Hijacked Kubernetes

Kubernetes
 avatar
unknown
json
3 days ago
60 kB
23
No Index
Machine image details
 lxkd
Properties
Description
Location
us
Machine type
e2-medium
Architecture
x86/64
Network tags
gke-nginx-1-cluster-561e0195-node
Custom metadata
instance-template: projects/1039184303749/global/instanceTemplates/gke-nginx-1-cluster-default-pool-2c2560d6
created-by: projects/1039184303749/zones/us-central1-a/instanceGroupManagers/gke-nginx-1-cluster-default-pool-2c2560d6-grp
serial-port-logging-enable: true
kube-labels: cloud.google.com/gke-boot-disk=pd-balanced,cloud.google.com/gke-container-runtime=containerd,cloud.google.com/gke-cpu-scaling-level=2,cloud.google.com/gke-logging-variant=DEFAULT,cloud.google.com/gke-max-pods-per-node=110,cloud.google.com/gke-nodepool=default-pool,cloud.google.com/gke-os-distribution=cos,cloud.google.com/machine-family=e2,cloud.google.com/private-node=false
google-compute-enable-pcid: true
kubelet-config: apiVersion: kubelet.config.k8s.io/v1beta1 authentication: anonymous: enabled: false webhook: enabled: true x509: clientCAFile: /etc/srv/kubernetes/pki/ca-certificates.crt authorization: mode: Webhook cgroupRoot: / clusterDNS: - 10.64.0.10 clusterDomain: cluster.local enableDebuggingHandlers: true evictionHard: memory.available: 100Mi nodefs.available: 10% nodefs.inodesFree: 5% pid.available: 10% featureGates: CSIMigrationGCE: false DynamicKubeletConfig: false ExecProbeTimeout: false InTreePluginAWSUnregister: true InTreePluginAzureDiskUnregister: true InTreePluginOpenStackUnregister: true InTreePluginvSphereUnregister: true RotateKubeletServerCertificate: true kernelMemcgNotification: true kind: KubeletConfiguration kubeReserved: cpu: 1060m ephemeral-storage: 41Gi memory: 1019Mi readOnlyPort: 10255 serverTLSBootstrap: true staticPodPath: /etc/kubernetes/manifests
cluster-name: nginx-1-cluster
gci-update-strategy: update_disabled
gci-metrics-enabled: true
configure-sh: #!/usr/bin/env bash # Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Due to the GCE custom metadata size limit, we split the entire script into two # files configure.sh and configure-helper.sh. The functionality of downloading # kubernetes configuration, manifests, docker images, and binary files are # put in configure.sh, which is uploaded via GCE custom metadata. set -o errexit set -o nounset set -o pipefail ### Hardcoded constants DEFAULT_CNI_VERSION='v0.9.1' DEFAULT_CNI_HASH='b5a59660053a5f1a33b5dd5624d961864482d9dc8e5b79c9b3afc3d6f62c9830e1c30f9ccba6ee76f5fb1ff0504e58984420cc0680b26cb643f1cb07afbd1c' DEFAULT_NPD_VERSION='v0.8.9' DEFAULT_NPD_HASH_AMD64='4919c47447c5f3871c1dc1bbb817a38c8c8d07a6ce55a77d43cadc098e9ad608ceeab121eec00c13c0b6a2cc3488544d61ce84cdade1823f3fd5163a952de' DEFAULT_NPD_HASH_ARM64='8ccb42a862efdfc1f25ca9af3fd36f9fdff1ac618dd7d39e3b5991505dd610d432364420896ad71f42197a116f28a85dde58b129baa075ebb7312caa57f852' DEFAULT_CRICTL_VERSION='v1.23.0' DEFAULT_CRICTL_AMD64_SHA512='f8c40c66c8d9a85e857396f4977564890815b02658eec591114e04bd8bc6b8ea08bcc159af0088b5eda7bf0dfd16096bf0c174819c204193fb7343ae7d9d5' DEFAULT_CRICTL_ARM64_SHA512='261ac360b0ac3fc88c81f1c48f84b0df0b07ca4db61b0e647c142882d129ba11d21d0de373a27ecfd984436a08a11b19cde2ad5e3412e5d03203caedd62d92' DEFAULT_MOUNTER_TAR_SHA='7956fd42523de6b3107ddc3ce05233d2fcb78436ff07a1389b6eaac91fb2b1b72a08f7a219eaf96ba1ca4da8d45271002e0d60e0644e796c665f99bb356516' ### # This version needs to be the same as in gke/cluster/gce/gci/configure-helper.sh GKE_CONTAINERD_INFRA_CONTAINER="${CONTAINERD_INFRA_CONTAINER:-gke.gcr.io/pause:3.6@sha256:10008c36b4611b44db1229451675d5d7d86c7ddf4ef00f883d806a01547203f6}" RIPTIDE_FUSE_BUCKET="${RIPTIDE_FUSE_BUCKET:-gke-release}" RIPTIDE_SNAPSHOTTER_BUCKET="${RIPTIDE_SNAPSHOTTER_BUCKET:-gke-release}" RIPTIDE_FUSE_VERSION="${RIPTIDE_FUSE_VERSION:-v0.120.0}" RIPTIDE_SNAPSHOTTER_VERSION="${RIPTIDE_SNAPSHOTTER_VERSION:-v1.4-21}" # Standard curl flags. CURL_FLAGS='--fail --silent --show-error --retry 5 --retry-delay 3 --connect-timeout 10 --retry-connrefused' function set-broken-motd { cat > /etc/motd <<EOF Broken (or in progress) Kubernetes node setup! Check the cluster initialization status using the following commands. Master instance: - sudo systemctl status kube-master-installation - sudo systemctl status kube-master-configuration Node instance: - sudo systemctl status kube-node-installation - sudo systemctl status kube-node-configuration EOF } # A function that fetches a GCE metadata value and echoes it out. # Args: # $1 : URL path after /computeMetadata/v1/ (without heading slash). # $2 : An optional default value to echo out if the fetch fails. # # NOTE: this function is duplicated in configure-helper.sh, any changes here # should be duplicated there as well. function get-metadata-value { local default="${2:-}" local status # shellcheck disable=SC2086 curl ${CURL_FLAGS} \ -H 'Metadata-Flavor: Google' \ "http://metadata/computeMetadata/v1/${1}" \ || status="$?" status="${status:-0}" if [[ "${status}" -eq 0 || -z "${default}" ]]; then return "${status}" else echo "${default}" fi } # A function to fetch kube-env from GCE metadata server # or using hurl on the master if available function download-kube-env { ( umask 077 local kube_env_path="/tmp/kube-env.yaml" if [[ "$(is-master)" == "true" && $(use-hurl) = "true" ]]; then local kube_env_path="${KUBE_HOME}/kube-env.yaml" download-kube-env-hurl "${kube_env_path}" else local meta_path="http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env" echo "Downloading kube-env via GCE metadata from ${meta_path} to ${kube_env_path}" # shellcheck disable=SC2086 retry-forever 10 curl ${CURL_FLAGS} \ -H "X-Google-Metadata-Request: True" \ -o "${kube_env_path}" \ "${meta_path}" fi # Convert the yaml format file into a shell-style file. eval "$(python3 -c ''' import pipes,sys,yaml items = yaml.load(sys.stdin, Loader=yaml.BaseLoader).items() for k, v in items: print("readonly {var}={value}".format(var=k, value=pipes.quote(str(v)))) ''' < "${kube_env_path}" > "${KUBE_HOME}/kube-env")" # Leave kube-env if we are a master if [[ "$(is-master)" != "true" ]]; then rm -f "${kube_env_path}" fi ) } # A function to pull kube-env from HMS using hurl function download-kube-env-hurl { local -r kube_env_path="$1" local -r endpoint=$(get-metadata-value "instance/attributes/gke-api-endpoint") local -r kube_env_hms_path=$(get-metadata-value "instance/attributes/kube-env-path") echo "Downloading kube-env via hurl from ${kube_env_hms_path} to ${kube_env_path}" retry-forever 30 ${KUBE_HOME}/bin/hurl --hms_address $endpoint \ --dst "${kube_env_path}" \ "${kube_env_hms_path}" chmod 600 "${kube_env_path}" } function download-kubelet-config { local -r dest="$1" echo "Downloading Kubelet config file, if it exists" # Fetch kubelet config file from GCE metadata server. ( umask 077 local -r tmp_kubelet_config="/tmp/kubelet-config.yaml" # shellcheck disable=SC2086 retry-forever 10 curl ${CURL_FLAGS} \ -H "X-Google-Metadata-Request: True" \ -o "${tmp_kubelet_config}" \ http://metadata.google.internal/computeMetadata/v1/instance/attributes/kubelet-config # only write to the final location if curl succeeds mv "${tmp_kubelet_config}" "${dest}" ) } function download-kube-master-certs { # Fetch kube-master-certs from GCE metadata server. ( umask 077 local -r tmp_kube_master_certs="/tmp/kube-master-certs.yaml" # shellcheck disable=SC2086 retry-forever 10 curl ${CURL_FLAGS} \ -H "X-Google-Metadata-Request: True" \ -o "${tmp_kube_master_certs}" \ http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-master-certs # Convert the yaml format file into a shell-style file. eval "$(python3 -c ''' import pipes,sys,yaml items = yaml.load(sys.stdin, Loader=yaml.BaseLoader).items() for k, v in items: print("readonly {var}={value}".format(var=k, value=pipes.quote(str(v)))) ''' < "${tmp_kube_master_certs}" > "${KUBE_HOME}/kube-master-certs")" rm -f "${tmp_kube_master_certs}" ) } function validate-hash { local -r file="$1" local -r expected="$2" actual_sha1=$(sha1sum "${file}" | awk '{ print $1 }') || true actual_sha512=$(sha512sum "${file}" | awk '{ print $1 }') || true if [[ "${actual_sha1}" != "${expected}" ]] && [[ "${actual_sha512}" != "${expected}" ]]; then echo "== ${file} corrupted, sha1 ${actual_sha1}/sha512 ${actual_sha512} doesn't match expected ${expected} ==" return 1 fi } # Get default service account credentials of the VM. GCE_METADATA_INTERNAL="http://metadata.google.internal/computeMetadata/v1/instance" function get-credentials { # shellcheck disable=SC2086 curl ${CURL_FLAGS} \ -H "Metadata-Flavor: Google" \ "${GCE_METADATA_INTERNAL}/service-accounts/default/token" \ | python3 -c 'import sys; import json; print(json.loads(sys.stdin.read())["access_token"])' } function valid-storage-scope { # shellcheck disable=SC2086 curl ${CURL_FLAGS} \ -H "Metadata-Flavor: Google" \ "${GCE_METADATA_INTERNAL}/service-accounts/default/scopes" \ | grep -E "auth/devstorage|auth/cloud-platform" } # Determine if this node is a master using metadata function is-master { local -r is_master_val=${KUBERNETES_MASTER:-$(get-metadata-value "instance/attributes/is-master-node")} local result="false" if [[ ${is_master_val:-} == "true" ]]; then result="true" fi echo $result } # A function that returns "true" if hurl should be used, "false" otherwise. function use-hurl { local -r enable_hms_read=${ENABLE_HMS_READ:-$(get-metadata-value "instance/attributes/enable_hms_read")} local result="false" if [[ -f "${KUBE_HOME}/bin/hurl" && "${enable_hms_read}" == "true" ]]; then result="true" fi echo $result } # Retry a download until we get it. Takes a hash and a set of URLs. # # $1 is the sha512/sha1 hash of the URL. Can be "" if the sha512/sha1 hash is unknown. # $2+ are the URLs to download. function download-or-bust { local -r hash="$1" shift 1 while true; do for url in "$@"; do local file="${url##*/}" rm -f "${file}" # if the url belongs to GCS API we should use oauth2_token in the headers if the VM service account has storage scopes local curl_headers="" if [[ "$url" =~ ^https://storage.googleapis.com.* ]] ; then local canUseCredentials=0 echo "Getting the scope of service account configured for VM." if ! valid-storage-scope ; then canUseCredentials=1 # this behavior is preserved for backward compatibility. We want to fail fast if SA is not available # and try to download without SA if scope does not exist on SA echo "No service account or service account without storage scope. Attempt to download without service account token." fi if [[ "${canUseCredentials}" == "0" ]] ; then echo "Getting the service account access token configured for VM." local access_token=""; if access_token=$(get-credentials); then echo "Service account access token is received. Downloading ${url} using this token." else echo "Cannot get a service account token. Exiting." exit 1 fi curl_headers=${access_token:+Authorization: Bearer "${access_token}"} fi fi if ! curl ${curl_headers:+-H "${curl_headers}"} -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10 --retry-connrefused "${url}"; then echo "== Failed to download ${url}. Retrying. ==" elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then echo "== Hash validation of ${url} failed. Retrying. ==" else if [[ -n "${hash}" ]]; then echo "== Downloaded ${url} (HASH = ${hash}) ==" else echo "== Downloaded ${url} ==" fi return fi done done } function record-preload-info { echo "$1,$2" >> "${KUBE_HOME}/preload_info" } function is-preloaded { local -r key=$1 local -r value=$2 grep -qs "${key},${value}" "${KUBE_HOME}/preload_info" } function split-commas { echo -e "${1//,/'\n'}" } function remount-flexvolume-directory { local -r flexvolume_plugin_dir=$1 mkdir -p "$flexvolume_plugin_dir" mount --bind "$flexvolume_plugin_dir" "$flexvolume_plugin_dir" mount -o remount,exec "$flexvolume_plugin_dir" } function install-gci-mounter-tools { CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter" local -r mounter_tar_sha="${DEFAULT_MOUNTER_TAR_SHA}" if is-preloaded "mounter" "${mounter_tar_sha}"; then echo "mounter is preloaded." return fi echo "Downloading gci mounter tools." mkdir -p "${CONTAINERIZED_MOUNTER_HOME}" chmod a+x "${CONTAINERIZED_MOUNTER_HOME}" mkdir -p "${CONTAINERIZED_MOUNTER_HOME}/rootfs" download-or-bust "${mounter_tar_sha}" "https://storage.googleapis.com/kubernetes-release/gci-mounter/mounter.tar" cp "${KUBE_HOME}/kubernetes/server/bin/mounter" "${CONTAINERIZED_MOUNTER_HOME}/mounter" chmod a+x "${CONTAINERIZED_MOUNTER_HOME}/mounter" mv "${KUBE_HOME}/mounter.tar" /tmp/mounter.tar tar xf /tmp/mounter.tar -C "${CONTAINERIZED_MOUNTER_HOME}/rootfs" rm /tmp/mounter.tar mkdir -p "${CONTAINERIZED_MOUNTER_HOME}/rootfs/var/lib/kubelet" record-preload-info "mounter" "${mounter_tar_sha}" } # Install node problem detector binary. function install-node-problem-detector { if [[ -n "${NODE_PROBLEM_DETECTOR_VERSION:-}" ]]; then local -r npd_version="${NODE_PROBLEM_DETECTOR_VERSION}" local -r npd_hash="${NODE_PROBLEM_DETECTOR_TAR_HASH}" else local -r npd_version="${DEFAULT_NPD_VERSION}" case "${HOST_PLATFORM}/${HOST_ARCH}" in linux/amd64) local -r npd_hash="${DEFAULT_NPD_HASH_AMD64}" ;; linux/arm64) local -r npd_hash="${DEFAULT_NPD_HASH_ARM64}" ;; # no other architectures are supported currently. # Assumption is that this script only runs on linux, # see cluster/gce/windows/k8s-node-setup.psm1 for windows # https://github.com/kubernetes/node-problem-detector/releases/ *) echo "Unrecognized version and platform/arch combination:" echo "$DEFAULT_NPD_VERSION $HOST_PLATFORM/$HOST_ARCH" echo "Set NODE_PROBLEM_DETECTOR_VERSION and NODE_PROBLEM_DETECTOR_TAR_HASH to overwrite" exit 1 ;; esac fi local -r npd_tar="node-problem-detector-${npd_version}-${HOST_PLATFORM}_${HOST_ARCH}.tar.gz" if is-preloaded "${npd_tar}" "${npd_hash}"; then echo "${npd_tar} is preloaded." return fi echo "Downloading ${npd_tar}." local -r npd_release_path="${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-https://storage.googleapis.com/kubernetes-release}" download-or-bust "${npd_hash}" "${npd_release_path}/node-problem-detector/${npd_tar}" local -r npd_dir="${KUBE_HOME}/node-problem-detector" mkdir -p "${npd_dir}" tar xzf "${KUBE_HOME}/${npd_tar}" -C "${npd_dir}" --overwrite mv "${npd_dir}/bin"/* "${KUBE_BIN}" chmod a+x "${KUBE_BIN}/node-problem-detector" rmdir "${npd_dir}/bin" rm -f "${KUBE_HOME}/${npd_tar}" record-preload-info "${npd_tar}" "${npd_hash}" } function install-cni-binaries { local -r cni_version=${CNI_VERSION:-$DEFAULT_CNI_VERSION} if [[ -n "${CNI_VERSION:-}" ]]; then local -r cni_hash="${CNI_HASH:-}" else local -r cni_hash="${DEFAULT_CNI_HASH}" fi local -r cni_tar="${CNI_TAR_PREFIX}${cni_version}.tgz" local -r cni_url="${CNI_STORAGE_URL_BASE}/${cni_version}/${cni_tar}" if is-preloaded "${cni_tar}" "${cni_hash}"; then echo "${cni_tar} is preloaded." return fi echo "Downloading cni binaries" download-or-bust "${cni_hash}" "${cni_url}" local -r cni_dir="${KUBE_HOME}/cni" mkdir -p "${cni_dir}/bin" tar xzf "${KUBE_HOME}/${cni_tar}" -C "${cni_dir}/bin" --overwrite mv "${cni_dir}/bin"/* "${KUBE_BIN}" rmdir "${cni_dir}/bin" rm -f "${KUBE_HOME}/${cni_tar}" record-preload-info "${cni_tar}" "${cni_hash}" } # Install crictl binary. # Assumptions: HOST_PLATFORM and HOST_ARCH are specified by calling detect_host_info. function install-crictl { if [[ -n "${CRICTL_VERSION:-}" ]]; then local -r crictl_version="${CRICTL_VERSION}" local -r crictl_hash="${CRICTL_TAR_HASH}" else local -r crictl_version="${DEFAULT_CRICTL_VERSION}" case "${HOST_PLATFORM}/${HOST_ARCH}" in linux/amd64) local -r crictl_hash="${DEFAULT_CRICTL_AMD64_SHA512}" ;; linux/arm64) local -r crictl_hash="${DEFAULT_CRICTL_ARM64_SHA512}" ;; *) echo "Unrecognized version and platform/arch combination:" echo "$DEFAULT_CRICTL_VERSION $HOST_PLATFORM/$HOST_ARCH" echo "Set CRICTL_VERSION and CRICTL_TAR_HASH to overwrite" exit 1 esac fi local -r crictl="crictl-${crictl_version}-${HOST_PLATFORM}-${HOST_ARCH}.tar.gz" # Create crictl config file. cat > /etc/crictl.yaml <<EOF runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock} EOF if is-preloaded "${crictl}" "${crictl_hash}"; then echo "crictl is preloaded" return fi echo "Downloading crictl" local -r crictl_path="https://storage.googleapis.com/k8s-artifacts-cri-tools/release/${crictl_version}" download-or-bust "${crictl_hash}" "${crictl_path}/${crictl}" tar xf "${crictl}" mv crictl "${KUBE_BIN}/crictl" rm -f "${crictl}" record-preload-info "${crictl}" "${crictl_hash}" } function preload-pause-image { if is-preloaded "pause" "${GKE_CONTAINERD_INFRA_CONTAINER}"; then echo "pause image is preloaded" return fi # preloading pause image. It will be used in preloader and will be # useful for staging builds where access_token is needed to pull the image local access_token=""; if access_token=$(get-credentials); then "${KUBE_BIN}/crictl" pull --creds "oauth2accesstoken:${access_token}" ${GKE_CONTAINERD_INFRA_CONTAINER} else echo "No access token. Pulling without it." "${KUBE_BIN}/crictl" pull ${GKE_CONTAINERD_INFRA_CONTAINER} fi record-preload-info "pause" "${GKE_CONTAINERD_INFRA_CONTAINER}" } function install-exec-auth-plugin { if [[ ! "${EXEC_AUTH_PLUGIN_URL:-}" ]]; then return fi local -r plugin_url="${EXEC_AUTH_PLUGIN_URL}" local -r plugin_hash="${EXEC_AUTH_PLUGIN_HASH}" if is-preloaded "gke-exec-auth-plugin" "${plugin_hash}"; then echo "gke-exec-auth-plugin is preloaded" return fi echo "Downloading gke-exec-auth-plugin binary" download-or-bust "${plugin_hash}" "${plugin_url}" mv "${KUBE_HOME}/gke-exec-auth-plugin" "${KUBE_BIN}/gke-exec-auth-plugin" chmod a+x "${KUBE_BIN}/gke-exec-auth-plugin" if [[ ! "${EXEC_AUTH_PLUGIN_LICENSE_URL:-}" ]]; then return fi local -r license_url="${EXEC_AUTH_PLUGIN_LICENSE_URL}" echo "Downloading gke-exec-auth-plugin license" download-or-bust "" "${license_url}" mv "${KUBE_HOME}/LICENSE" "${KUBE_BIN}/gke-exec-auth-plugin-license" record-preload-info "gke-exec-auth-plugin" "${plugin_hash}" } function install-kube-manifests { # Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/. local dst_dir="${KUBE_HOME}/kube-manifests" mkdir -p "${dst_dir}" local manifests_tar_urls while IFS= read -r url; do manifests_tar_urls+=("$url") done < <(split-commas "${KUBE_MANIFESTS_TAR_URL}") local -r manifests_tar="${manifests_tar_urls[0]##*/}" if [ -n "${KUBE_MANIFESTS_TAR_HASH:-}" ]; then local -r manifests_tar_hash="${KUBE_MANIFESTS_TAR_HASH}" else echo "Downloading k8s manifests hash (not found in env)" download-or-bust "" "${manifests_tar_urls[@]/.tar.gz/.tar.gz.sha512}" local -r manifests_tar_hash=$(cat "${manifests_tar}.sha512") fi if is-preloaded "${manifests_tar}" "${manifests_tar_hash}"; then echo "${manifests_tar} is preloaded." return fi echo "Downloading k8s manifests tar" download-or-bust "${manifests_tar_hash}" "${manifests_tar_urls[@]}" tar xzf "${KUBE_HOME}/${manifests_tar}" -C "${dst_dir}" --overwrite local -r kube_addon_registry="${KUBE_ADDON_REGISTRY:-k8s.gcr.io}" if [[ "${kube_addon_registry}" != "k8s.gcr.io" ]]; then find "${dst_dir}" \(-name '*.yaml' -or -name '*.yaml.in'\) -print0 | \ xargs -0 sed -ri "s@(image:\s.*)k8s.gcr.io@\1${kube_addon_registry}@" find "${dst_dir}" \(-name '*.manifest' -or -name '*.json'\) -print0 | \ xargs -0 sed -ri "s@(image\":\s+\")k8s.gcr.io@\1${kube_addon_registry}@" fi cp "${dst_dir}/kubernetes/gci-trusty/gci-configure-helper.sh" "${KUBE_BIN}/configure-helper.sh" cp "${dst_dir}/kubernetes/gci-trusty/configure-kubeapiserver.sh" "${KUBE_BIN}/configure-kubeapiserver.sh" if [[ -e "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure.sh" ]]; then cp "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure.sh" "${KUBE_BIN}/" fi if [[ -e "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure-helper.sh" ]]; then cp "${dst_dir}/kubernetes/gci-trusty/gke-internal-configure-helper.sh" "${KUBE_BIN}/" fi if [[ -e "${dst_dir}/kubernetes/gci-trusty/node-registration-checker.sh" ]]; then cp "${dst_dir}/kubernetes/gci-trusty/node-registration-checker.sh" "${KUBE_BIN}/" fi cp "${dst_dir}/kubernetes/gci-trusty/health-monitor.sh" "${KUBE_BIN}/health-monitor.sh" cp "${dst_dir}/kubernetes/gci-trusty/networkd-monitor.sh" "${KUBE_BIN}/networkd-monitor.sh" rm -f "${KUBE_HOME}/${manifests_tar}" rm -f "${KUBE_HOME}/${manifests_tar}.sha512" record-preload-info "${manifests_tar}" "${manifests_tar_hash}" } # Installs hurl to ${KUBE_HOME}/bin/hurl if not already installed. function install-hurl { cd "${KUBE_HOME}" if [[ -f "${KUBE_HOME}/bin/hurl" ]]; then echo "install-hurl: hurl already installed" return fi local -r hurl_gcs_att="instance/attributes/hurl-gcs-url" local -r hurl_gcs_url=${HURL_GCS_URL:-$(get-metadata-value "${hurl_gcs_att}")} if [[ -z "${hurl_gcs_url}" ]]; then # URL not present in GCE Instance Metadata echo "install-hurl: Unable to find GCE metadata ${hurl_gcs_att}" return fi # Download hurl binary from a GCS bucket. local -r hurl_bin="hurl" echo "install-hurl: Installing hurl from ${hurl_gcs_url} ... " download-or-bust "" "${hurl_gcs_url}" if [[ -f "${KUBE_HOME}/${hurl_bin}" ]]; then chmod a+x ${KUBE_HOME}/${hurl_bin} mv "${KUBE_HOME}/${hurl_bin}" "${KUBE_BIN}/${hurl_bin}" echo "install-hurl: hurl installed to ${KUBE_BIN}/${hurl_bin}" return fi } # Installs inplace to ${KUBE_HOME}/bin/inplace if not already installed. function install-inplace { cd "${KUBE_HOME}" if [[ -f "${KUBE_HOME}/bin/inplace" ]]; then echo "install-inplace: inplace already installed" return fi local -r inplace_gcs_att="instance/attributes/inplace-gcs-url" local -r inplace_gcs_url=${INPLACE_GCS_URL:-$(get-metadata-value "${inplace_gcs_att}")} if [[ -z "${inplace_gcs_url}" ]]; then # URL not present in GCE Instance Metadata echo "install-inplace: Unable to find GCE metadata ${inplace_gcs_att}" return fi echo "install-inplace: Installing inplace from ${inplace_gcs_url} ..." download-or-bust "" "${inplace_gcs_url}" local -r inplace_bin="inplace" if [[ -f "${KUBE_HOME}/${inplace_bin}" ]]; then mv "${KUBE_HOME}/${inplace_bin}" "${KUBE_BIN}/${inplace_bin}" if [[ ! -d "${KUBE_HOME}/${inplace_bin}" ]]; then mkdir -p "${KUBE_HOME}/${inplace_bin}" fi cat > "${KUBE_HOME}/${inplace_bin}/inplace.hash" <<EOF ${inplace_gcs_url} EOF echo "install-inplace: inplace installed to ${KUBE_BIN}/${inplace_bin}" return fi } # A function to download in-place component manifests if in-place agent is # present. function inplace-run-once { if [[ -f "${KUBE_HOME}/bin/inplace" ]]; then echo "inplace-run-once: using inplace to download inplace component manefists" local dst_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty" mkdir -p "${dst_dir}/in-place" mkdir -p "${dst_dir}/gce-extras/in-place" ${KUBE_HOME}/bin/inplace --mode=run-once --in_place_addon_path="${dst_dir}/gce-extras/in-place" --master_pod_path="${dst_dir}/in-place" fi } function install-auger { echo "Downloading auger binary" if [[ -f "${KUBE_HOME}/bin/auger" ]]; then echo "auger is already installed" return fi AUGER_STORE_PATH="${AUGER_STORE_PATH:-https://storage.googleapis.com/gke-release-staging/auger}" AUGER_VERSION="${AUGER_VERSION:-v1.0.0-gke.1}" download-or-bust "" "${AUGER_STORE_PATH}/${AUGER_VERSION}/auger.sha1" sha1="$(cat auger.sha1)" readonly sha1 # Declare readonly separately to avoid masking error values. rm -f "auger.sha1" download-or-bust "${sha1}" "${AUGER_STORE_PATH}/${AUGER_VERSION}/auger" mv "${KUBE_HOME}/auger" "${KUBE_HOME}/bin/auger" chmod a+x "${KUBE_HOME}/bin/auger" record-preload-info "auger" "${sha1}" } # Extract etcdctl binary from etcd image. function install-etcdctl { echo "Installing etcdctl binary" if [[ -f "${KUBE_HOME}/bin/etcdctl" ]]; then echo "etcdctl is already installed" return fi local -r etcd_image="gcr.io/gke-master-images/etcd:${ETCDCTL_VERSION}" container_id="$(docker create "${etcd_image}" sh)" readonly containerId docker cp "${container_id}:usr/local/bin/etcdctl" "${KUBE_HOME}/bin/etcdctl" chmod a+x "${KUBE_HOME}/bin/etcdctl" docker rm "${container_id}" docker rmi "${etcd_image}" } function install-gcfsd { echo "Downloading Riptide FUSE client" if is-preloaded "gcfsd" "${RIPTIDE_FUSE_VERSION}"; then echo "gcfsd is preloaded." return fi if [[ "${HOST_ARCH}" == "arm64" ]]; then RIPTIDE_FUSE_STORE_PATH="https://storage.googleapis.com/${RIPTIDE_FUSE_BUCKET}/gcfsd/${RIPTIDE_FUSE_VERSION}/arm64" else RIPTIDE_FUSE_STORE_PATH="https://storage.googleapis.com/${RIPTIDE_FUSE_BUCKET}/gcfsd/${RIPTIDE_FUSE_VERSION}" fi if [[ "${RIPTIDE_FUSE_VERSION}" > v0.19.0 ]] && [[ "${RIPTIDE_FUSE_VERSION}" != v0.4.0 ]] ; then download-or-bust "" "${RIPTIDE_FUSE_STORE_PATH}/gcfsd.tar.gz" fi download-or-bust "" "${RIPTIDE_FUSE_STORE_PATH}/gcfsd" mv "${KUBE_HOME}/gcfsd" "${KUBE_HOME}/bin/gcfsd" chmod a+x "${KUBE_HOME}/bin/gcfsd" record-preload-info "gcfsd" "${RIPTIDE_FUSE_VERSION}" } function install-riptide-snapshotter { echo "Downloading Riptide snapshotter" if is-preloaded "containerd-gcfs-grpc" "${RIPTIDE_SNAPSHOTTER_VERSION}"; then echo "containerd-gcfs-grpc is preloaded." return fi RIPTIDE_SNAPSHOTTER_STORE_PATH="https://storage.googleapis.com/${RIPTIDE_SNAPSHOTTER_BUCKET}/gcfs-snapshotter/${RIPTIDE_SNAPSHOTTER_VERSION}" if [[ "${RIPTIDE_SNAPSHOTTER_VERSION}" > v1.4-2 ]]; then download-or-bust "" "${RIPTIDE_SNAPSHOTTER_STORE_PATH}/containerd-gcfs-grpc.tar.gz" fi if [[ "${HOST_ARCH}" == "arm64" ]]; then RIPTIDE_SNAPSHOTTER_BINARY="containerd-gcfs-grpc-arm64" else RIPTIDE_SNAPSHOTTER_BINARY="containerd-gcfs-grpc" fi download-or-bust "" "${RIPTIDE_SNAPSHOTTER_STORE_PATH}/${RIPTIDE_SNAPSHOTTER_BINARY}" mv "${KUBE_HOME}/${RIPTIDE_SNAPSHOTTER_BINARY}" "${KUBE_HOME}/bin/containerd-gcfs-grpc" chmod a+x "${KUBE_HOME}/bin/containerd-gcfs-grpc" record-preload-info "containerd-gcfs-grpc" "${RIPTIDE_SNAPSHOTTER_VERSION}" } # Install Riptide FUSE client and Riptide snapshotter function install-riptide { install-gcfsd install-riptide-snapshotter } function configure-cgroup-mode { if which cgroup_helper > /dev/null 2>&1; then if [[ "${CGROUP_MODE:-}" == "v1" ]] && cgroup_helper show | grep -q 'unified'; then cgroup_helper set hybrid echo "set cgroup config to hybrid, now rebooting..." reboot elif [[ "${CGROUP_MODE:-}" == "v2" ]] && cgroup_helper show | grep -q 'hybrid'; then cgroup_helper set unified echo "set cgroup config to unified, now rebooting..." reboot fi fi } # A helper function for loading a docker image. It keeps trying up to 5 times. # # $1: Full path of the docker image function try-load-docker-image { local -r img=$1 echo "Try to load docker image file ${img}" # Temporarily turn off errexit, because we don't want to exit on first failure. set +e local -r max_attempts=5 local -i attempt_num=1 if [[ "${CONTAINER_RUNTIME_NAME:-}" == "docker" ]]; then load_image_command=${LOAD_IMAGE_COMMAND:-docker load -i} elif [[ "${CONTAINER_RUNTIME_NAME:-}" == "containerd" || "${CONTAINERD_TEST:-}" == "containerd" ]]; then load_image_command=${LOAD_IMAGE_COMMAND:-ctr -n=k8s.io images import} else load_image_command="${LOAD_IMAGE_COMMAND:-}" fi # Deliberately word split load_image_command # shellcheck disable=SC2086 until timeout 30 ${load_image_command} "${img}"; do if [[ "${attempt_num}" == "${max_attempts}" ]]; then echo "Fail to load docker image file ${img} using ${load_image_command} after ${max_attempts} retries. Exit!!" exit 1 else attempt_num=$((attempt_num+1)) sleep 5 fi done # Re-enable errexit. set -e } # Loads kube-system docker images. It is better to do it before starting kubelet, # as kubelet will restart docker daemon, which may interfere with loading images. function load-docker-images { echo "Start loading kube-system docker images" local -r img_dir="${KUBE_HOME}/kube-docker-files" if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then try-load-docker-image "${img_dir}/kube-apiserver.tar" try-load-docker-image "${img_dir}/kube-controller-manager.tar" try-load-docker-image "${img_dir}/kube-scheduler.tar" else try-load-docker-image "${img_dir}/kube-proxy.tar" fi } # If we are on ubuntu we can try to install docker function install-docker { # bailout if we are not on ubuntu if ! command -v apt-get >/dev/null 2>&1; then echo "Unable to automatically install docker. Bailing out..." return fi # Install Docker deps, some of these are already installed in the image but # that's fine since they won't re-install and we can reuse the code below # for another image someday. apt-get update apt-get install -y --no-install-recommends \ apt-transport-https \ ca-certificates \ socat \ curl \ gnupg2 \ software-properties-common \ lsb-release release=$(lsb_release -cs) # Add the Docker apt-repository # shellcheck disable=SC2086 curl ${CURL_FLAGS} \ --location \ "https://download.docker.com/${HOST_PLATFORM}/$(. /etc/os-release; echo "$ID")/gpg" \ | apt-key add - add-apt-repository \ "deb [arch=${HOST_ARCH}] https://download.docker.com/${HOST_PLATFORM}/$(. /etc/os-release; echo "$ID") \ $release stable" # Install Docker apt-get update && \ apt-get install -y --no-install-recommends "${GCI_DOCKER_VERSION:-"docker-ce=5:19.03.*"}" rm -rf /var/lib/apt/lists/* } # If we are on ubuntu we can try to install containerd function install-containerd-ubuntu { # bailout if we are not on ubuntu if [[ -z "$(command -v lsb_release)" || $(lsb_release -si) != "Ubuntu" ]]; then echo "Unable to automatically install containerd in non-ubuntu image. Bailing out..." exit 2 fi # Install dependencies, some of these are already installed in the image but # that's fine since they won't re-install and we can reuse the code below # for another image someday. apt-get update apt-get install -y --no-install-recommends \ apt-transport-https \ ca-certificates \ socat \ curl \ gnupg2 \ software-properties-common \ lsb-release release=$(lsb_release -cs) # Add the Docker apt-repository (as we install containerd from there) # shellcheck disable=SC2086 curl ${CURL_FLAGS} \ --location \ "https://download.docker.com/${HOST_PLATFORM}/$(. /etc/os-release; echo "$ID")/gpg" \ | apt-key add - add-apt-repository \ "deb [arch=${HOST_ARCH}] https://download.docker.com/${HOST_PLATFORM}/$(. /etc/os-release; echo "$ID") \ $release stable" # Install containerd from Docker repo apt-get update && \ apt-get install -y --no-install-recommends containerd rm -rf /var/lib/apt/lists/* # Override to latest versions of containerd and runc systemctl stop containerd if [[ -n "${UBUNTU_INSTALL_CONTAINERD_VERSION:-}" ]]; then # TODO(https://github.com/containerd/containerd/issues/2901): Remove this check once containerd has arm64 release. if [[ $(dpkg --print-architecture) != "amd64" ]]; then echo "Unable to automatically install containerd in non-amd64 image. Bailing out..." exit 2 fi # containerd versions have slightly different url(s), so try both # shellcheck disable=SC2086 ( curl ${CURL_FLAGS} \ --location \ "https://github.com/containerd/containerd/releases/download/${UBUNTU_INSTALL_CONTAINERD_VERSION}/containerd-${UBUNTU_INSTALL_CONTAINERD_VERSION:1}-${HOST_PLATFORM}-${HOST_ARCH}.tar.gz" \ || curl ${CURL_FLAGS} \ --location \ "https://github.com/containerd/containerd/releases/download/${UBUNTU_INSTALL_CONTAINERD_VERSION}/containerd-${UBUNTU_INSTALL_CONTAINERD_VERSION:1}.${HOST_PLATFORM}-${HOST_ARCH}.tar.gz" ) \ | tar --overwrite -xzv -C /usr/ fi if [[ -n "${UBUNTU_INSTALL_RUNC_VERSION:-}" ]]; then # TODO: Remove this check once runc has arm64 release. if [[ $(dpkg --print-architecture) != "amd64" ]]; then echo "Unable to automatically install runc in non-amd64. Bailing out..." exit 2 fi # shellcheck disable=SC2086 curl ${CURL_FLAGS} \ --location \ "https://github.com/opencontainers/runc/releases/download/${UBUNTU_INSTALL_RUNC_VERSION}/runc.${HOST_ARCH}" --output /usr/sbin/runc \ && chmod 755 /usr/sbin/runc fi sudo systemctl start containerd } function ensure-container-runtime { container_runtime="${CONTAINER_RUNTIME:-docker}" if [[ "${container_runtime}" == "docker" ]]; then if ! command -v docker >/dev/null 2>&1; then install-docker if ! command -v docker >/dev/null 2>&1; then echo "ERROR docker not found. Aborting." exit 2 fi fi docker version elif [[ "${container_runtime}" == "containerd" ]]; then # Install containerd/runc if requested if [[ -n "${UBUNTU_INSTALL_CONTAINERD_VERSION:-}" || -n "${UBUNTU_INSTALL_RUNC_VERSION:-}" ]]; then install-containerd-ubuntu fi # Verify presence and print versions of ctr, containerd, runc if ! command -v ctr >/dev/null 2>&1; then echo "ERROR ctr not found. Aborting." exit 2 fi ctr --version if ! command -v containerd >/dev/null 2>&1; then echo "ERROR containerd not found. Aborting." exit 2 fi containerd --version if ! command -v runc >/dev/null 2>&1; then echo "ERROR runc not found. Aborting." exit 2 fi runc --version fi } # Downloads kubernetes binaries and kube-system manifest tarball, unpacks them, # and places them into suitable directories. Files are placed in /home/kubernetes. function install-kube-binary-config { cd "${KUBE_HOME}" local server_binary_tar_urls while IFS= read -r url; do server_binary_tar_urls+=("$url") done < <(split-commas "${SERVER_BINARY_TAR_URL}") local -r server_binary_tar="${server_binary_tar_urls[0]##*/}" if [[ -n "${SERVER_BINARY_TAR_HASH:-}" ]]; then local -r server_binary_tar_hash="${SERVER_BINARY_TAR_HASH}" else echo "Downloading binary release sha512 (not found in env)" download-or-bust "" "${server_binary_tar_urls[@]/.tar.gz/.tar.gz.sha512}" local -r server_binary_tar_hash=$(cat "${server_binary_tar}.sha512") fi if is-preloaded "${server_binary_tar}" "${server_binary_tar_hash}"; then echo "${server_binary_tar} is preloaded." else echo "Downloading binary release tar" download-or-bust "${server_binary_tar_hash}" "${server_binary_tar_urls[@]}" tar xzf "${KUBE_HOME}/${server_binary_tar}" -C "${KUBE_HOME}" --overwrite # Copy docker_tag and image files to ${KUBE_HOME}/kube-docker-files. local -r src_dir="${KUBE_HOME}/kubernetes/server/bin" local dst_dir="${KUBE_HOME}/kube-docker-files" mkdir -p "${dst_dir}" cp "${src_dir}/"*.docker_tag "${dst_dir}" if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then cp "${src_dir}/kube-proxy.tar" "${dst_dir}" else cp "${src_dir}/kube-apiserver.tar" "${dst_dir}" cp "${src_dir}/kube-controller-manager.tar" "${dst_dir}" cp "${src_dir}/kube-scheduler.tar" "${dst_dir}" cp -r "${KUBE_HOME}/kubernetes/addons" "${dst_dir}" fi load-docker-images mv "${src_dir}/kubelet" "${KUBE_BIN}" mv "${src_dir}/kubectl" "${KUBE_BIN}" # Some older images have LICENSES baked-in as a file. Presumably they will # have the directory baked-in eventually. rm -rf "${KUBE_HOME}"/LICENSES mv "${KUBE_HOME}/kubernetes/LICENSES" "${KUBE_HOME}" mv "${KUBE_HOME}/kubernetes/kubernetes-src.tar.gz" "${KUBE_HOME}" record-preload-info "${server_binary_tar}" "${server_binary_tar_hash}" fi if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]] || \ [[ "${NETWORK_PROVIDER:-}" == "cni" ]]; then install-cni-binaries fi # Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/. install-kube-manifests chmod -R 755 "${KUBE_BIN}" # Install gci mounter related artifacts to allow mounting storage volumes in GCI install-gci-mounter-tools # Remount the Flexvolume directory with the "exec" option, if needed. if [[ "${REMOUNT_VOLUME_PLUGIN_DIR:-}" == "true" && -n "${VOLUME_PLUGIN_DIR:-}" ]]; then remount-flexvolume-directory "${VOLUME_PLUGIN_DIR}" fi # Install crictl on each node. install-crictl # Preload pause image preload-pause-image # Copy health check binaries to a tmpfs mount to reduce block IO usage. setup-shm-healthcheck-binaries # TODO(awly): include the binary and license in the OS image. install-exec-auth-plugin # Source GKE specific scripts. # # This must be done after install-kube-manifests where the # gke-internal-configure.sh is downloaded. if [[ -e "${KUBE_HOME}/bin/gke-internal-configure.sh" ]]; then echo "Running GKE internal configuration script gke-internal-configure.sh" . "${KUBE_HOME}/bin/gke-internal-configure.sh" fi if [[ "${KUBERNETES_MASTER:-}" == "false" ]] && \ [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then install-node-problem-detector if [[ -e "${KUBE_HOME}/bin/gke-internal-configure.sh" ]]; then install-npd-custom-plugins fi fi # Clean up. rm -rf "${KUBE_HOME}/kubernetes" rm -f "${KUBE_HOME}/${server_binary_tar}" rm -f "${KUBE_HOME}/${server_binary_tar}.sha512" } function setup-shm-healthcheck-binaries() { if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then return fi if [[ "${ENABLE_SHM_HEALTHCHECK_BINARIES:-}" != "true" ]];then return fi local -r shm_dir="${HEALTHCHECK_SHM_DIR:-/dev/kube_shm}" local -r shm_bin_dir="${shm_dir}/bin" mkdir -p "$shm_dir" mount -t tmpfs -o exec none "$shm_dir" mkdir "${shm_bin_dir}" cp -f "${KUBE_BIN}/crictl" "${shm_bin_dir}/crictl" cp -f "$(which curl)" "${shm_bin_dir}/curl" } function install-extra-node-requirements() { if [[ "${KUBERNETES_MASTER:-}" != "false" ]]; then return fi if [[ -e "${KUBE_HOME}/bin/gke-internal-configure.sh" ]]; then # M4A is not relevant on ARM if [[ "${HOST_ARCH}" == "amd64" ]]; then install-m4a-apparmor-profile fi fi } # This function detects the platform/arch of the machine where the script runs, # and sets the HOST_PLATFORM and HOST_ARCH environment variables accordingly. # Callers can specify HOST_PLATFORM_OVERRIDE and HOST_ARCH_OVERRIDE to skip the detection. # This function is adapted from the detect_client_info function in cluster/get-kube-binaries.sh # and kube::util::host_os, kube::util::host_arch functions in hack/lib/util.sh # This function should be synced with detect_host_info in ./configure-helper.sh function detect_host_info() { HOST_PLATFORM=${HOST_PLATFORM_OVERRIDE:-"$(uname -s)"} case "${HOST_PLATFORM}" in Linux|linux) HOST_PLATFORM="linux" ;; *) echo "Unknown, unsupported platform: ${HOST_PLATFORM}." >&2 echo "Supported platform(s): linux." >&2 echo "Bailing out." >&2 exit 2 esac HOST_ARCH=${HOST_ARCH_OVERRIDE:-"$(uname -m)"} case "${HOST_ARCH}" in x86_64*|i?86_64*|amd64*) HOST_ARCH="amd64" ;; aHOST_arch64*|aarch64*|arm64*) HOST_ARCH="arm64" ;; *) echo "Unknown, unsupported architecture (${HOST_ARCH})." >&2 echo "Supported architecture(s): amd64 and arm64." >&2 echo "Bailing out." >&2 exit 2 ;; esac } # Retries a command forever with a delay between retries. # Args: # $1 : delay between retries, in seconds. # $2... : the command to run. function retry-forever { local -r delay="$1" shift 1 until "$@"; do echo "== $* failed, retrying after ${delay}s" sleep "${delay}" done } # Initializes variables used by the log-* functions. # # get-metadata-value must be defined before calling this function. # # NOTE: this function is duplicated in configure-helper.sh, any changes here # should be duplicated there as well. function log-init { # Used by log-* functions. LOG_CLUSTER_ID=${LOG_CLUSTER_ID:-$(get-metadata-value 'instance/attributes/cluster-uid' 'get-metadata-value-error')} LOG_INSTANCE_NAME=$(hostname || echo 'hostname-error') LOG_BOOT_ID=$(journalctl --list-boots | grep -E '^ *0' | awk '{print $2}' || echo 'journalctl-error') declare -Ag LOG_START_TIMES declare -ag LOG_TRAP_STACK LOG_STATUS_STARTED='STARTED' LOG_STATUS_COMPLETED='COMPLETED' LOG_STATUS_ERROR='ERROR' } # Sets an EXIT trap. # Args: # $1:... : the trap command. # # NOTE: this function is duplicated in configure-helper.sh, any changes here # should be duplicated there as well. function log-trap-push { local t="${*:1}" LOG_TRAP_STACK+=("${t}") # shellcheck disable=2064 trap "${t}" EXIT } # Removes and restores an EXIT trap. # # NOTE: this function is duplicated in configure-helper.sh, any changes here # should be duplicated there as well. function log-trap-pop { # Remove current trap. unset 'LOG_TRAP_STACK[-1]' # Restore previous trap. if [ ${#LOG_TRAP_STACK[@]} -ne 0 ]; then local t="${LOG_TRAP_STACK[-1]}" # shellcheck disable=2064 trap "${t}" EXIT else # If no traps in stack, clear. trap EXIT fi } # Logs the end of a bootstrap step that errored. # Args: # $1 : bootstrap step name. # # NOTE: this function is duplicated in configure-helper.sh, any changes here # should be duplicated there as well. function log-error { local bootstep="$1" log-proto "${bootstep}" "${LOG_STATUS_ERROR}" "encountered non-zero exit code" } # Wraps a command with bootstrap logging. # Args: # $1 : bootstrap step name. # $2... : the command to run. # # NOTE: this function is duplicated in configure-helper.sh, any changes here # should be duplicated there as well. function log-wrap { local bootstep="$1" local command="${*:2}" log-trap-push "log-error ${bootstep}" log-proto "${bootstep}" "${LOG_STATUS_STARTED}" $command log-proto "${bootstep}" "${LOG_STATUS_COMPLETED}" log-trap-pop } # Logs a bootstrap step start. Prefer log-wrap. # Args: # $1 : bootstrap step name. # # NOTE: this function is duplicated in configure-helper.sh, any changes here # should be duplicated there as well. function log-start { local bootstep="$1" log-trap-push "log-error ${bootstep}" log-proto "${bootstep}" "${LOG_STATUS_STARTED}" } # Logs a bootstrap step end. Prefer log-wrap. # Args: # $1 : bootstrap step name. # # NOTE: this function is duplicated in configure-helper.sh, any changes here # should be duplicated there as well. function log-end { local bootstep="$1" log-proto "${bootstep}" "${LOG_STATUS_COMPLETED}" log-trap-pop } # Writes a log proto to stdout. # Args: # $1: bootstrap step name. # $2: status. Either 'STARTED', 'COMPLETED', or 'ERROR'. # $3: optional status reason. # # NOTE: this function is duplicated in configure-helper.sh, any changes here # should be duplicated there as well. function log-proto { local bootstep="$1" local status="$2" local status_reason="${3:-}" # Get current time. local current_time current_time="$(date --utc '+%s.%N')" # ...formatted as UTC RFC 3339. local timestamp timestamp="$(date --utc --date="@${current_time}" '+%FT%T.%NZ')" # Calculate latency. local latency='null' if [ "${status}" == "${LOG_STATUS_STARTED}" ]; then LOG_START_TIMES["${bootstep}"]="${current_time}" else local start_time="${LOG_START_TIMES["${bootstep}"]}" unset 'LOG_START_TIMES['"${bootstep}"']' # Bash cannot do non-integer math, shell out to awk. latency="$(echo "${current_time} ${start_time}" | awk '{print $1 - $2}')s" # The default latency is null which cannot be wrapped as a string so we must # do it here instead of the printf. latency="\"${latency}\"" fi printf '[cloud.kubernetes.monitoring.proto.SerialportLog] {"cluster_hash":"%s","vm_instance_name":"%s","boot_id":"%s","timestamp":"%s","bootstrap_status":{"step_name":"%s","status":"%s","status_reason":"%s","latency":%s}}\n' \ "${LOG_CLUSTER_ID}" "${LOG_INSTANCE_NAME}" "${LOG_BOOT_ID}" "${timestamp}" "${bootstep}" "${status}" "${status_reason}" "${latency}" } ######### Main Function ########## log-init detect_host_info # Preloader will source this script, and skip the main function. The preloader # will choose what to preload by calling install-X functions directly. # When configure.sh is sourced by the preload script, $0 and $BASH_SOURCE are # different. $BASH_SOURCE still contains the path of configure.sh, while $0 is # the path of the preload script. if [[ "$0" != "$BASH_SOURCE" && "${IS_PRELOADER:-"false"}" == "true" ]]; then echo "Running in preloader instead of VM bootsrapping. Skipping installation steps as preloader script will source configure.sh and call corresponding functions." return fi log-start 'ConfigureMain' echo "Start to install kubernetes files" # if install fails, message-of-the-day (motd) will warn at login shell log-wrap 'SetBrokenMotd' set-broken-motd KUBE_HOME="/home/kubernetes" KUBE_BIN="${KUBE_HOME}/bin" if [[ "$(is-master)" == "true" ]]; then log-wrap 'InstallHurl' install-hurl log-wrap 'InstallInplace' install-inplace fi # download and source kube-env log-wrap 'DownloadKubeEnv' download-kube-env log-wrap 'SourceKubeEnv' source "${KUBE_HOME}/kube-env" log-wrap 'ConfigureCgroupMode' configure-cgroup-mode log-wrap 'DownloadKubeletConfig' download-kubelet-config "${KUBE_HOME}/kubelet-config.yaml" if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then log-wrap 'DownloadKubeMasterCerts' download-kube-master-certs fi # ensure chosen container runtime is present log-wrap 'EnsureContainerRuntime' ensure-container-runtime # binaries and kube-system manifests log-wrap 'InstallKubeBinaryConfig' install-kube-binary-config if [[ "${ENABLE_GCFS:-""}" == "true" ]]; then log-wrap 'InstallRiptide' install-riptide fi # extra node requirements log-wrap 'InstallExtraNodeRequirements' install-extra-node-requirements # download inplace component manifests if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then log-wrap 'InplaceRunOnce' retry-forever 30 inplace-run-once fi echo "Done for installing kubernetes files" log-end 'ConfigureMain'
disable-legacy-endpoints: true
user-data: #cloud-config write_files: - path: /etc/systemd/system/kube-node-installation.service permissions: '0644' owner: root content: | [Unit] Description=Download and install k8s binaries and configurations After=network-online.target [Service] Type=oneshot RemainAfterExit=yes ExecStartPre=/bin/mkdir -p /home/kubernetes/bin ExecStartPre=/bin/mount --bind /home/kubernetes/bin /home/kubernetes/bin ExecStartPre=/bin/mount -o remount,exec /home/kubernetes/bin ExecStartPre=/usr/bin/curl --fail --retry 5 --retry-delay 3 --silent --show-error -H "X-Google-Metadata-Request: True" -o /home/kubernetes/bin/configure.sh http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-sh ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure.sh ExecStart=/home/kubernetes/bin/configure.sh StandardOutput=journal+console [Install] WantedBy=kubernetes.target - path: /etc/systemd/system/kube-node-configuration.service permissions: '0644' owner: root content: | [Unit] Description=Configure kubernetes node After=kube-node-installation.service [Service] Type=oneshot RemainAfterExit=yes ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/configure-helper.sh ExecStart=/home/kubernetes/bin/configure-helper.sh StandardOutput=journal+console [Install] WantedBy=kubernetes.target - path: /etc/systemd/system/kube-container-runtime-monitor.service permissions: '0644' owner: root content: | [Unit] Description=Kubernetes health monitoring for container runtime After=kube-node-configuration.service [Service] Restart=always RestartSec=10 RemainAfterExit=yes RemainAfterExit=yes ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh ExecStart=/home/kubernetes/bin/health-monitor.sh container-runtime [Install] WantedBy=kubernetes.target - path: /etc/systemd/system/kubelet-monitor.service permissions: '0644' owner: root content: | [Unit] Description=Kubernetes health monitoring for kubelet After=kube-node-configuration.service [Service] Restart=always RestartSec=10 RemainAfterExit=yes RemainAfterExit=yes ExecStartPre=/bin/chmod 544 /home/kubernetes/bin/health-monitor.sh ExecStart=/home/kubernetes/bin/health-monitor.sh kubelet [Install] WantedBy=kubernetes.target - path: /etc/systemd/system/kube-logrotate.timer permissions: '0644' owner: root content: | [Unit] Description=Hourly kube-logrotate invocation [Timer] OnCalendar=hourly [Install] WantedBy=kubernetes.target - path: /etc/systemd/system/kube-logrotate.service permissions: '0644' owner: root content: | [Unit] Description=Kubernetes log rotation After=kube-node-configuration.service [Service] Type=oneshot ExecStart=-/usr/sbin/logrotate /etc/logrotate.conf [Install] WantedBy=kubernetes.target - path: /etc/systemd/system/kubernetes.target permissions: '0644' owner: root content: | [Unit] Description=Kubernetes [Install] WantedBy=multi-user.target - path: /etc/modprobe.d/sunrpc.conf permissions: '0644' owner: root # The GKE metadata server uses ports 987-989, so the sunrpc range should be restricted to be below. content: | options sunrpc max_resvport=986 runcmd: - systemctl daemon-reload - systemctl enable kube-node-installation.service - systemctl enable kube-node-configuration.service - systemctl enable kube-container-runtime-monitor.service - systemctl enable kubelet-monitor.service - systemctl enable kube-logrotate.timer - systemctl enable kube-logrotate.service - systemctl enable kubernetes.target - systemctl start kubernetes.target
kube-env: ALLOCATE_NODE_CIDRS: "true" API_SERVER_TEST_LOG_LEVEL: --v=3 AUTOSCALER_ENV_VARS: kube_reserved=cpu=1060m,memory=1019Mi,ephemeral-storage=41Gi;node_labels=cloud.google.com/gke-boot-disk=pd-balanced,cloud.google.com/gke-container-runtime=containerd,cloud.google.com/gke-cpu-scaling-level=2,cloud.google.com/gke-logging-variant=DEFAULT,cloud.google.com/gke-max-pods-per-node=110,cloud.google.com/gke-nodepool=default-pool,cloud.google.com/gke-os-distribution=cos,cloud.google.com/machine-family=e2,cloud.google.com/private-node=false;arch=amd64;os=linux;os_distribution=cos;evictionHard=memory.available=100Mi,nodefs.available=10%,nodefs.inodesFree=5%,pid.available=10% CA_CERT: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVMRENDQXBTZ0F3SUJBZ0lRZWFkVExrVjFoQ0dlOGtJUGwvdWFQekFOQmdrcWhraUc5dzBCQVFzRkFEQXYKTVMwd0t3WURWUVFERXlSaE9HVmhOR0U1TVMweFpHRmxMVFJrWWpFdFlURTFPQzB3WTJSbE1EVTJOek5oTlRVdwpJQmNOTWpNd016QTJNREV5T0RReVdoZ1BNakExTXpBeU1qWXdNakk0TkRKYU1DOHhMVEFyQmdOVkJBTVRKR0U0ClpXRTBZVGt4TFRGa1lXVXROR1JpTVMxaE1UVTRMVEJqWkdVd05UWTNNMkUxTlRDQ0FhSXdEUVlKS29aSWh2Y04KQVFFQkJRQURnZ0dQQURDQ0FZb0NnZ0dCQUpzZml2Z2F2bkQ4V1BYYm9YdGhxRGZGTzl4REF5b1M3SEwvbmNlYwpkUmVmU0lpUGhnWDZXT3VXSVZXS3ArVW14MGJQRG1pNnRCSHFaU0RTQ084ZHpvZHpuNnZ6cTlvS2ticDlZWGdUCk9meFNMRDlYWjNYQXBTcGNqS05BV20vUVczSHdCdmtzSVZSUVlHUGpIMkxGVWFBWHJPS0FqZ0FkUlBtMmJaMGUKMXkvSTU4WG90dkZiTGlDSThjNVkvWTJEMU5vY0dLRkJVRjlxWFRFRlJSTFRtUU1rVUtWOWhNQjBHWHlCZDR4dApCTCtuempaMVNzbDlkSGh4VjRHV0hkSDZ6eG03VUsvZmdTZ0xNZTRxKzl0Y1B3aC9DOFF4OWNSSWt3S080UktKCjBzTEdnVklJNW0xT0dzSUxHNXUrSWYwOHBLVThuNEIxVlhrNGFUVVBlMC9JUXQzT2tuTTJEYmtuclpTbDcwekUKc1ZOS1ZWL0RoSXViRFBycGJRRjM5VEZLY3NrL2pNdDdwaDdLb2Nrcm92dzdGS3l0VmN4VUhqR0RrYzhJcVRaZwpEL0k2UlFjN3JCaGs1VTc3UU5BZ3FxbThDZmtLMXdua29MenNwUnh4b0Y4eXFXVUlnWUlEQnNMS3F4bG1UQVkrClJuVFNHYzNsTWN6cWc1cjV0MDRxNEY1RWJ3SURBUUFCbzBJd1FEQU9CZ05WSFE4QkFmOEVCQU1DQWdRd0R3WUQKVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVVQdzRGRnE1d3NFSmVOOExVZmh2d1JETGZncEF3RFFZSgpLb1pJaHZjTkFRRUxCUUFEZ2dHQkFHcEdtaTNteHRGYWZYVGF3czQ4bjB4c2FqUEdxU2c2eVppaXhlVEViUTNOCkluV3lpTkRFSUE5V2hoZGVDM3JUelJvOUc1MmFQVGt5RHo4eGNLSG1YdmtSOVQ1cXNZQ01PZ2tLVzRNYlNCLzcKOWxRaDJPcmJpZXNsb3RzSmtGbDhqZ3RtZ2NLOENPblpmN2x2RTVaNmlIZklqaHUyWW9HMDYzd2VoTVQ3dkFxWAoxSDhaYytXdU9zOTlhUTlPUUJjQ2luejVXeUd1UUhoUUk5amgxNTdmanBQNFlRVXN1cE5jWVBJVl3F5Z3RUCjVXTW9YRU1Qc2g4aVVaaGdiajlnaitSTVFka0dHaE1lU05WTzN6eDNOOTIxRGk5TFRscHorenZNaTNGVVZ4YUsKRitiNjdRd0Y1OUVYZTJIbXhQQlYvTCs0SVVjNnBObWVCUGIrT2YxSk5ESGpveGZtSlZ3ZWFpQjBEZkpqREw4ZQpIeDJTTy9BaW4yZUVOa2FZMmVOczB0U3U2UlBlSWZrdVNicGdkK0hrKzBzbEFZdXhBczJLZGNnZEE3a0l5QlRSCjR4NkRHSVM5ZFdZbnJOeGsxWFpSbHZoRG9aYVQxSWxmMkI3bjNXVGJUUFBjRUErZEphVFRreVJ2bnZJUTJLS0EKMDlTbi80Q0xCdUhzWk0xTk1ydWwydz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K CLUSTER_IP_RANGE: 10.60.0.0/14 CLUSTER_NAME: nginx-1-cluster CNI_HASH: ba28765e2939d398dc101ff5543e62a14d132a113102b371af8da822a4dfbfb3cc5121e6cbdb349e5d9d892e66f034ba24d0b4acd2f1df942c0cd9bcc5723a CNI_SHA1: a687a7150d8201ac374b1915829c80abf0c774 CNI_STORAGE_PATH: https://storage.googleapis.com/gke-release/cni-plugins/v1.0.1-gke.4 CNI_STORAGE_URL_BASE: https://storage.googleapis.com/gke-release/cni-plugins CNI_TAR_PREFIX: cni-plugins-linux-amd64- CNI_VERSION: v1.0.1-gke.4 CONNTRACK_EXEMPT_HC_MS: "true" CONTAINER_RUNTIME: containerd CONTAINER_RUNTIME_ENDPOINT: unix:///run/containerd/containerd.sock CONTAINER_RUNTIME_NAME: containerd CONTAINERD_MAX_CONTAINER_LOG_LINE: "262144" CREATE_BOOTSTRAP_KUBECONFIG: "true" DETECT_LOCAL_MODE: NodeCIDR DNS_DOMAIN: cluster.local DNS_SERVER_IP: 10.64.0.10 DOCKER_REGISTRY_MIRROR_URL: https://mirror.gcr.io ELASTICSEARCH_LOGGING_REPLICAS: "1" ENABLE_CLUSTER_DNS: "true" ENABLE_CLUSTER_LOGGING: "false" ENABLE_CLUSTER_MONITORING: none ENABLE_CLUSTER_REGISTRY: "false" ENABLE_CLUSTER_UI: "true" ENABLE_L7_LOADBALANCING: glbc ENABLE_LATEST_NPD: "true" ENABLE_METADATA_AGENT: "" ENABLE_METRICS_SERVER: "true" ENABLE_NODE_LOGGING: "false" ENABLE_NODE_PROBLEM_DETECTOR: standalone ENABLE_NODE_REGISTRATION_CHECKER: "true" ENABLE_NODELOCAL_DNS: "false" ENABLE_SYSCTL_TUNING: "true" ENV_TIMESTAMP: "2023-03-06T02:28:41+00:00" EXEC_AUTH_PLUGIN_HASH: 5e3d7c0504132820d5625aac230056becc23a20a6c0040afadf123f36c6d32b73e4625640881ec3b62e76846cfc1789d036486fc78ee6cdf40666d2dfe6ba356 EXEC_AUTH_PLUGIN_LICENSE_URL: https://storage.googleapis.com/gke-prod-binaries/gke-exec-auth-plugin/d741bb92df4f39f2fb7201460f70c861d50e442c/LICENSE EXEC_AUTH_PLUGIN_SHA1: 8fa5a3a7c7229f1b22a9d5ce9ff7192cf1f02336 EXEC_AUTH_PLUGIN_URL: https://storage.googleapis.com/gke-prod-binaries/gke-exec-auth-plugin/d741bb92df4f39f2fb7201460f70c861d50e442c/linux_amd64/gke-exec-auth-plugin EXTRA_DOCKER_OPTS: --insecure-registry 10.0.0.0/8 EXTRA_POD_SYSCTLS: net.ipv6.conf.all.disable_ipv6=1,net.ipv6.conf.default.disable_ipv6=1 FEATURE_GATES: DynamicKubeletConfig=false,InTreePluginAWSUnregister=true,InTreePluginAzureDiskUnregister=true,InTreePluginOpenStackUnregister=true,InTreePluginvSphereUnregister=true,RotateKubeletServerCertificate=true,ExecProbeTimeout=false,CSIMigrationGCE=false FLUENTD_CONTAINER_RUNTIME_SERVICE: containerd HEAPSTER_USE_NEW_STACKDRIVER_RESOURCES: "true" HEAPSTER_USE_OLD_STACKDRIVER_RESOURCES: "false" HPA_USE_REST_CLIENTS: "true" INSTANCE_PREFIX: gke-nginx-1-cluster-561e0195 KUBE_ADDON_REGISTRY: k8s.gcr.io KUBE_CLUSTER_DNS: 10.64.0.10 KUBE_DOCKER_REGISTRY: gke.gcr.io KUBE_MANIFESTS_TAR_HASH: fbc9a61dc4be8e83437923e0c75edd4fd676fb4508ccd3dcf584b35bdca529153fbae38ad43cbf9e08cb483ce9a25ebd62250b836aa3e2a6962869bce1d4b71f KUBE_MANIFESTS_TAR_URL: https://storage.googleapis.com/gke-release/kubernetes/release/v1.24.9-gke.3200/kubernetes-manifests.tar.gz,https://storage.googleapis.com/gke-release-eu/kubernetes/release/v1.24.9-gke.3200/kubernetes-manifests.tar.gz,https://storage.googleapis.com/gke-release-asia/kubernetes/release/v1.24.9-gke.3200/kubernetes-manifests.tar.gz KUBE_PROXY_TOKEN: L_j9KzhoIUL-xo4RDs0bthUBEI203bQbfbW5miP2Afo= KUBELET_ARGS: --v=2 --cloud-provider=gce --experimental-mounter-path=/home/kubernetes/containerized_mounter/mounter --cert-dir=/var/lib/kubelet/pki/ --kubeconfig=/var/lib/kubelet/kubeconfig --max-pods=110 --volume-plugin-dir=/home/kubernetes/flexvolume --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig --node-status-max-images=25 --container-runtime=remote --container-runtime-endpoint=unix:///run/containerd/containerd.sock --runtime-cgroups=/system.slice/containerd.service --registry-qps=10 --registry-burst=20 KUBELET_CERT: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURvakNDQWdxZ0F3SUJBZ0lSQVBDZXc1ek8vbzNtblpTbTZpUktTKzB3RFFZSktvWklodmNOQVFFTEJRQXcKTHpFdE1Dc0dBMVVFQXhNa1lUaGxZVFJoT1RFdE1XUmhaUzAwWkdJeExXRXhOVGd0TUdOa1pUQTFOamN6WVRVMQpNQjRYRFRJek1ETXdOakF5TWpZME1sb1hEVEk0TURNd05EQXlNamcwTWxvd0VqRVFNQTRHQTFVRUF4TUhhM1ZpClpXeGxkRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMdi9zaGxZZjMzYWlhOHIKN3dsUWpJQTFJTWl2bFdNem9qSzJvbTlkTnV2TzhObGpzaUFjc2ZEZXNnTk9XWExyeU51ejk4SG5SUFFOV3FnSwpEN01OMDJGZ1ZXQzBWUTBhTGpRRERrUzUwOGx6SFV2NnVDb1ErY2dGMkhPS0JIU3N3TWNzNklVOGhZQlpUWWpYClZIYVNvdTVVbFA0T0VxOFFJN1NsVFZka0FMTnpDdDh5QTR6YzkrM0l5TSs1SlJuc0ZGeVpPRzBYWmNtTTdJeWgKV1hJVzRvMDhLVTFidGNEaG1IeWlPRVlMMTlENTVOOVEvTzVXejQxKzlVSlFxeVFKNEZ0QjYwcFkxK1FFSXhaQgo1cDliUEZ0WmlRMDBVOUM0cnJ5Wlp5dRcDQ5OEJEN1d3SjRlL0IzZ2s3Tm1lN0xVbWUzNmFqUUtESm80UmErCjJ5RVZpVThDQXdFQUFhTldNRlF3RGdZRFZSMFBBUUgvQkFRREFnV2dNQk1HQTFVZEpRUU1NQW9HQ0NzR0FRVUYKQndNQ01Bd0dBMVVkRXdFQi93UUNNQUF3SHdZRFZSMGpCQmd3Rm9BVVB3NEZGcTV3c0VKZU44TFVmaHZ3UkRMZgpncEF3RFFZSktvWklodmNOQVFFTEJRQURnZ0dCQUR3bmx1M2p5S1RyYmpYeWVLeUlIcmVJaEdPOGhOK0c5SnNWCmhPRFRpZGcrN2J3UHRUR05LUnV4VFlyUWxRbE81Tk9kOXU3OTMyRjU5aHBab25oR3l5N0JCOEw2UXJiKzhCb2MKeE96UWxmblh3VUJpVks2YXcwQ3NXN21zRjlVSVRmN3FrMkVmODJuQ0U2QjBaaVdSN2pWb3U0R01Uelo2ZUUzUwpaKzhmTDNpdGtRK01TUUNEc2Uva1J6NHBtYTF0TUdWN250YytvbUZNMDBld0swVHhXT2Mzek5RQnoyWk9JcXZsCnFQNjRmM0hmNm0xaUJ0NWt3d3RBMS9Dckh0clJ1RWZXdCtQL1VWSmJYTnEzOXFXeEJRRmRYajhpYzh5WUdSeWsKTWpNU2c2WFhySTNkV2tsTHhicjFaRzR2M3RKNm9FLzZMK2Q0NjJ3VHFjc2RsM04zNjdFK3NaZWNoOFVnRXN0UQp0UHFqT2xqYXVNTHJ5Smt3dThHUUNGNmgyanRGY3oxME02VTNNNTJYTDZuZDlFYWFqZlhLam1pS0dFdW5FUHNTCmNNajZOU3RMM3BOM0UwRFI2ZUhaQW5Hd1dHcWNSVHFjMHJDaWFRSC8rTHRzNUorUHdYRFFpVUFqUUhvZVFNN1YKYTJOeFIvS2xLNmthQXA0MjVyWC9LSnk4cTgrTGFBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= KUBELET_KEY: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBdS8reUdWaC9mZHFKcnl2dkNWQ01nRFVneUsrVll6T2lNcmFpYjEwMjY4N3cyV095CklCeXg4TjZ5QTA1WmN1dkkyN1Azd2VkRTlBMWFxQW9Qc3czVFlXQlZZTFJWRFJvdU5BTU9STG5UeVhNZFMvcTQKS2hENXlBWFljNG9FZEt6QXh5em9oVHlGZ0ZsTmlOZFVkcEtpN2xTVS9nNFNyeEFqdEtWTlYyUUFzM01LM3pJRApqTnozN2NqSXo3a2xHZXdVWEprNGJSZGx5WXpzaktGWmNoYmlqVHdwVFZ1MXdPR1lmS0k0Umd2WDBQbmszMUQ4CjdsYlBqWDcxUWxDckpBbmdXMEhyU2xqWDVBUWpGa0htbjFzOFcxbUpEVFJUMExpdXZKbG5MUHRDbmozd0VQdGIKQW5oNzhIZUNUczJaN3N0U1o3ZnBxTkFvTW1qaEZyN2JJUldKVHdJREFRQUJBb0lCQUVyeW1mN3V3QXpkWmN4MgorRm0vU2ZRR3VpWW5rYWUxdXZENnpIRVhPVUR6QkNPMlVobnBOcXN2TE1xU25xaHBCVzlKSlBtaGlHUy9PaWdKCnNzNlFNWGY5bnF2cDJJcGJxSlhGcmhlSitidXk3N2dOb1FQc05wZ2VkSGREUzVsMEVYcVdGYkNnT3h5Ni9XVVQKYXpIR0xieFQ3ZjRHZ0Q1WWNkT2ZudUM3Rm1oelFwK3hWUm9kSHcwdlluWllLY0VBSGpTajluazMzSURnTGRSeApmUjlVKzFjekRvaGZra1huY2ZLSFVVMSs0N0VMcmZOWEVzclIwbmVNYytBZHlGVTcydFB3R09rOVB5NTVnR0hpCkQ5b0w3RXkyaFl3UUlMcTdJRllwdXlLa0NNcGxLam0ycHVzUmlNZFd4TE16ZnY4TS9ZUkkvU0ZJYW1UcmsrTFoKYysxd0kwa0NnWUVBNGhkODh2VE5zNG0vZW9LUmd0Q1VXM2QrdVNJT2hKcXFqdVorK1pQUGlkU0lQM0xuZjhYbwovSWZudVhCc243UkE5RU1sN08vYUI5RFF3U1lTQUhOWXc0Zk5lTmtjcnJuQ1gvQUdtLzlSRzB5dUYwb1ZsQVJUCkRtNDRuTmVGdElhZTJWY2tZMVBLKy94QXc1RjE1ZGIwZnJMb1RUb3Y4akhXTVJNVmNrNXpUT3NDZ1lFQTFONDAKWUVUeXhhZ1p6MFRvaUM2blEwVFB3N2o4NDZVa21HbThCcGtJZStMRFVhdjlTdDJid0pLTUNKbXNrb3FSUkVJRApqZkZBOHZRanZWNTVqTk1wRUEwUnp5R0JOdzFPV0Q4RzJBWDBMWFNwY1dkYVBKZERJckx5ZFdzTmo4RDNQT2s4Ck9nZUJMd0x4ODJ6akN3Y3NJcGwxZ0h3WDEycUFxNTdZQXlnaURDMENnWUVBdThnR1gwUTBGUW96bnUwVExqTlAKQlNrYWQybHg1WEVRZDkrNVZsRU43c3cyM1h2eUNvMjJGUWhMemhreUh2TDNmK2pGZHl1VYNTk0KzVheFVVSQpWRjVXcWlpcFNmVy9HeStnVGJDaERLdURiSlI3bDVSLzUxeGthT0FZalBmQ3Q0RkpGN1pnd1NtaUlNeVB2M1hwClc5SDhYeWdVNm5PNkVzaG01Nm1BMmtFQ2dZRUFxdUNKQ1BLd00rMENjdmtzOVEwR1ppQ1o2OUxXNXVNTm9tL1cKYVRGMXR3Vk1wNFk1K1Z3MkNIdGJnQmZraVdwRXZKZUZoTzh5cFJWSWoxNkVrb0I3eVEvRXlSSnJTTWNjcWdoRwpOU2xxWHdiMGNqMTJHWWVQZEE1VVJkK2U3bFFINUZTU2JUN1MrK3NRVU1tSzVMSm9Rb29QNGdrN2xrZy9iQVZDCkFCNjZYaTBDZ1lFQTRhc0VleG1Hc2tlcXlPdFp3WlNMekduUzBVSUk1dzBONGpmK2xMQ2p3OU5GWU9Bbjh6NTMKa0dSVmZXREtVcy9XQzM1Y1BPa3hXbGtDcnZMUXZVcXZSUk9weTRzUDFoMWo5bStFVUJOQzZDb0wvakh0ZW8xaApxUm1ndkc0N1JWN3k0Y3dGd21ac2crbjRrbVU1UWd4Qk9zSElSdzJML1ZKWlByalU2cUFERmNZPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= KUBERNETES_MASTER: "false" KUBERNETES_MASTER_NAME: 10.128.0.3 LOAD_IMAGE_COMMAND: ctr -n=k8s.io images import LOGGING_DESTINATION: "" LOGGING_STACKDRIVER_RESOURCE_TYPES: "" MONITORING_FLAG_SET: "true" NETWORK_PROVIDER: kubenet NODE_LOCAL_SSDS_EXT: "" NODE_PROBLEM_DETECTOR_TOKEN: _9vHM3B5-gJfh9mZi5jYreFmSbG-II7EnSMN8gHS0KA= NON_MASQUERADE_CIDR: 0.0.0.0/0 REMOUNT_VOLUME_PLUGIN_DIR: "true" REQUIRE_METADATA_KUBELET_CONFIG_FILE: "true" SALT_TAR_HASH: "" SALT_TAR_URL: https://storage.googleapis.com/gke-release/kubernetes/release/v1.24.9-gke.3200/kubernetes-salt.tar.gz,https://storage.googleapis.com/gke-release-eu/kubernetes/release/v1.24.9-gke.3200/kubernetes-salt.tar.gz,https://storage.googleapis.com/gke-release-asia/kubernetes/release/v1.24.9-gke.3200/kubernetes-salt.tar.gz SERVER_BINARY_TAR_HASH: 24196c1780bdca80878000fd0130609e1c48196823c4281b175cb8f1b7a13880a2317f651cd7b904e4816cc6b9654c063a19db0b06df31a40fa617631f5a7cc4 SERVER_BINARY_TAR_URL: https://storage.googleapis.com/gke-release/kubernetes/release/v1.24.9-gke.3200/kubernetes-server-linux-amd64.tar.gz,https://storage.googleapis.com/gke-release-eu/kubernetes/release/v1.24.9-gke.3200/kubernetes-server-linux-amd64.tar.gz,https://storage.googleapis.com/gke-release-asia/kubernetes/release/v1.24.9-gke.3200/kubernetes-server-linux-amd64.tar.gz SERVICE_CLUSTER_IP_RANGE: 10.64.0.0/20 STACKDRIVER_ENDPOINT: https://logging.googleapis.com SYSCTL_OVERRIDES: "" VOLUME_PLUGIN_DIR: /home/kubernetes/flexvolume ZONE: us-central1-a
cluster-uid: 561e01956f0b44689eda892e0b6d710df97e92f220994c3eae212bda90e27f99
cluster-location: us-central1-a
Editor is loading...
Leave a Comment