Kubeadm: рддреНрд░реБрдЯрд┐ рдирд┐рд╖реНрдкрд╛рджрди рдЪрд░рдг рдЕрдкрд▓реЛрдб-рдХреЙрдиреНрдлрд┐рдЧ / рдХреНрдпреВрдмрд▓реЗрдЯ: рдХрдВрдЯреНрд░реЛрд▓-рдкреНрд▓реЗрди рдиреЛрдб рдХреЗ рд▓рд┐рдП рдХреНрд░рд┐рд╕реНрдХреЗрдЯ рдЬрд╛рдирдХрд╛рд░реА рд▓рд┐рдЦрдиреЗ рдореЗрдВ рддреНрд░реБрдЯрд┐: рд╕реНрдерд┐рддрд┐ рдХреЗ рдЗрдВрддрдЬрд╛рд░ рдореЗрдВ рд╕рдордп рд╕рдорд╛рдкреНрдд

рдХреЛ рдирд┐рд░реНрдорд┐рдд 7 рдорд╛рд░реНрдЪ 2019  ┬╖  18рдЯрд┐рдкреНрдкрдгрд┐рдпрд╛рдБ  ┬╖  рд╕реНрд░реЛрдд: kubernetes/kubeadm

рдирдорд╕реНрддреЗ, рдореИрдВ kubeadm рдмрд╛рд╣рд░ рдХреА рдХреЛрд╢рд┐рд╢ рдХрд░ рд░рд╣рд╛ рд╣реВрдБ рдФрд░ рдореИрдВ рд╣рд╛ рдХреНрд▓рд╕реНрдЯрд░ рд╕реНрдерд╛рдкрд┐рдд рдХрд░рдиреЗ рдореЗрдВ рдЖрдзрд┐рдХрд╛рд░рд┐рдХ рдбреЙрдХреНрд╕ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░ рд░рд╣рд╛ рд╣реВрдБред рдореИрдВ рдПрдХ etcd рдХреНрд▓рд╕реНрдЯрд░ рдмрдирд╛рдиреЗ рдореЗрдВ рдХрд╛рдордпрд╛рдм рд░рд╣рд╛ рд╣реВрдВ, рд▓реЗрдХрд┐рди рдкрд╣рд▓реЗ рдорд╛рд╕реНрдЯрд░ рдиреЛрдб рдкрд░ init рдЪрд░рдг рдирд┐рдореНрди рддреНрд░реБрдЯрд┐ рдХреЗ рд╕рд╛рде рд╡рд┐рдлрд▓ рд╣реЛ рд░рд╣рд╛ рд╣реИ:

error execution phase upload-config/kubelet: Error writing Crisocket information for the control-plane node: timed out waiting for the condition

рдЗрд╕реЗ рджрд░реНрдЬ рдХрд░рдиреЗ рд╕реЗ рдкрд╣рд▓реЗ рдЖрдкрдиреЗ рдХреБрдмрдбрд╝реЗ рдореБрджреНрджреЛрдВ рдореЗрдВ рдХреМрди рд╕реЗ рдХреАрд╡рд░реНрдб рдЦреЛрдЬреЗ рдереЗ?

рддреНрд░реБрдЯрд┐ рдирд┐рд╖реНрдкрд╛рджрди рдЪрд░рдг рдЕрдкрд▓реЛрдб-рдХреЙрдиреНрдлрд╝рд┐рдЧрд░реЗрд╢рди / рдХреНрдпреВрдмрд▓реЗрдЯ

рдФрд░ рдорд┐рд▓рд╛ рдЧрдпрд╛:

1382

1227

рдХреНрдпрд╛ рдпрд╣ рдмрдЧ рд░рд┐рдкреЛрд░реНрдЯ рдпрд╛ рдкреНрд░рддрд┐рдХреНрд░рд┐рдпрд╛ рдХреА рдЖрд╡рд╢реНрдпрдХрддрд╛ рд╣реИ?

рдмрдЧ рд░рд┐рдкреЛрд░реНрдЯ

рд╕рдВрд╕реНрдХрд░рдгреЛрдВ

root@k8s-c3-m1:~# lsb_release -a
No LSB modules are available.
Distributor ID: Ubuntu
Description:    Ubuntu 16.04.3 LTS
Release:        16.04
Codename:       xenial

root@k8s-c3-m1:~# uname -a
Linux k8s-c3-m1 4.4.0-112-generic #135-Ubuntu SMP Fri Jan 19 11:48:36 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux

root@k8s-c3-m1:~# docker --version
Docker version 18.06.1-ce, build e68fc7a
root@k8s-c3-m1:~#

root@k8s-c3-m1:~# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.4", GitCommit:"c27b913fddd1a6c480c229191a087698aa92f0b1", GitTreeState:"clean", BuildDate:"2019-02-28T13:35:32Z", GoVersion:"go1.11.5", Compiler:"gc", Platform:"linux/amd64"}

root@k8s-c3-m1:~# kubectl version
Client Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.4", GitCommit:"c27b913fddd1a6c480c229191a087698aa92f0b1", GitTreeState:"clean", BuildDate:"2019-02-28T13:37:52Z", GoVersion:"go1.11.5", Compiler:"gc", Platform:"linux/amd64"}
The connection to the server localhost:8080 was refused - did you specify the right host or port?

рдиреЛрдЯ: рднреА Docker рд╕рдВрд╕реНрдХрд░рдг 17.03.3-CE рдФрд░ 18.09.3-CE рдХрд╛ рдкреНрд░рдпрд╛рд╕ рдХрд┐рдпрд╛ рдЧрдпрд╛

рдореИрдВ рдЗрд╕реЗ vm рдХреЗ vmware рдкрд░ рдЪрд▓рд╛ рд░рд╣рд╛ рд╣реВрдВред

рдХреНрдпрд╛ рд╣реБрдЖ?

root@k8s-c3-m1:~# kubeadm init --config /root/kubeadmcfg.yaml -v 256
I0304 14:52:28.103162    1391 initconfiguration.go:169] loading configuration from the given file
I0304 14:52:28.107089    1391 interface.go:384] Looking for default routes with IPv4 addresses
I0304 14:52:28.107141    1391 interface.go:389] Default route transits interface "eth0"
I0304 14:52:28.107440    1391 interface.go:196] Interface eth0 is up
I0304 14:52:28.107587    1391 interface.go:244] Interface "eth0" has 1 addresses :[10.10.10.93/24].
I0304 14:52:28.107695    1391 interface.go:211] Checking addr  10.10.10.93/24.
I0304 14:52:28.107724    1391 interface.go:218] IP found 10.10.10.93
I0304 14:52:28.107759    1391 interface.go:250] Found valid IPv4 address 10.10.10.93 for interface "eth0".
I0304 14:52:28.107791    1391 interface.go:395] Found active IP 10.10.10.93 
I0304 14:52:28.107979    1391 version.go:163] fetching Kubernetes version from URL: https://dl.k8s.io/release/stable.txt
I0304 14:52:29.493555    1391 feature_gate.go:206] feature gates: &{map[]}
[init] Using Kubernetes version: v1.13.4
[preflight] Running pre-flight checks
I0304 14:52:29.494477    1391 checks.go:572] validating Kubernetes and kubeadm version
I0304 14:52:29.494609    1391 checks.go:171] validating if the firewall is enabled and active
I0304 14:52:29.506263    1391 checks.go:208] validating availability of port 6443
I0304 14:52:29.506767    1391 checks.go:208] validating availability of port 10251
I0304 14:52:29.507110    1391 checks.go:208] validating availability of port 10252
I0304 14:52:29.507454    1391 checks.go:283] validating the existence of file /etc/kubernetes/manifests/kube-apiserver.yaml
I0304 14:52:29.507728    1391 checks.go:283] validating the existence of file /etc/kubernetes/manifests/kube-controller-manager.yaml
I0304 14:52:29.507959    1391 checks.go:283] validating the existence of file /etc/kubernetes/manifests/kube-scheduler.yaml
I0304 14:52:29.508140    1391 checks.go:283] validating the existence of file /etc/kubernetes/manifests/etcd.yaml
I0304 14:52:29.508316    1391 checks.go:430] validating if the connectivity type is via proxy or direct
I0304 14:52:29.508504    1391 checks.go:466] validating http connectivity to first IP address in the CIDR
I0304 14:52:29.508798    1391 checks.go:466] validating http connectivity to first IP address in the CIDR
I0304 14:52:29.509053    1391 checks.go:104] validating the container runtime
I0304 14:52:29.749661    1391 checks.go:130] validating if the service is enabled and active
I0304 14:52:29.778962    1391 checks.go:332] validating the contents of file /proc/sys/net/bridge/bridge-nf-call-iptables
I0304 14:52:29.779324    1391 checks.go:332] validating the contents of file /proc/sys/net/ipv4/ip_forward
I0304 14:52:29.779573    1391 checks.go:644] validating whether swap is enabled or not
I0304 14:52:29.779818    1391 checks.go:373] validating the presence of executable ip
I0304 14:52:29.780044    1391 checks.go:373] validating the presence of executable iptables
I0304 14:52:29.780251    1391 checks.go:373] validating the presence of executable mount
I0304 14:52:29.780465    1391 checks.go:373] validating the presence of executable nsenter
I0304 14:52:29.780674    1391 checks.go:373] validating the presence of executable ebtables
I0304 14:52:29.780925    1391 checks.go:373] validating the presence of executable ethtool
I0304 14:52:29.781018    1391 checks.go:373] validating the presence of executable socat
I0304 14:52:29.781221    1391 checks.go:373] validating the presence of executable tc
I0304 14:52:29.781415    1391 checks.go:373] validating the presence of executable touch
I0304 14:52:29.781647    1391 checks.go:515] running all checks
I0304 14:52:29.838382    1391 checks.go:403] checking whether the given node name is reachable using net.LookupHost
I0304 14:52:29.838876    1391 checks.go:613] validating kubelet version
I0304 14:52:29.983771    1391 checks.go:130] validating if the service is enabled and active
I0304 14:52:30.011507    1391 checks.go:208] validating availability of port 10250
I0304 14:52:30.011951    1391 checks.go:307] validating the existence of file /etc/kubernetes/pki/etcd/ca.crt
I0304 14:52:30.012301    1391 checks.go:307] validating the existence of file /etc/kubernetes/pki/apiserver-etcd-client.crt
I0304 14:52:30.012360    1391 checks.go:307] validating the existence of file /etc/kubernetes/pki/apiserver-etcd-client.key
I0304 14:52:30.012408    1391 checks.go:685] validating the external etcd version
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
I0304 14:52:30.238175    1391 checks.go:833] image exists: k8s.gcr.io/kube-apiserver:v1.13.4
I0304 14:52:30.378446    1391 checks.go:833] image exists: k8s.gcr.io/kube-controller-manager:v1.13.4
I0304 14:52:30.560185    1391 checks.go:833] image exists: k8s.gcr.io/kube-scheduler:v1.13.4
I0304 14:52:30.745876    1391 checks.go:833] image exists: k8s.gcr.io/kube-proxy:v1.13.4
I0304 14:52:30.930200    1391 checks.go:833] image exists: k8s.gcr.io/pause:3.1
I0304 14:52:31.096902    1391 checks.go:833] image exists: k8s.gcr.io/coredns:1.2.6
I0304 14:52:31.097108    1391 kubelet.go:71] Stopping the kubelet
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I0304 14:52:31.256217    1391 kubelet.go:89] Starting the kubelet
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
I0304 14:52:31.530165    1391 certs.go:113] creating a new certificate authority for ca
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-c3-m1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.10.10.93 10.10.10.76 127.0.0.1 10.10.10.90 10.10.10.91 10.10.10.92 10.10.10.76]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Using existing etcd/ca keyless certificate authority[certs] External etcd mode: Skipping etcd/server certificate authority generation
[certs] External etcd mode: Skipping etcd/peer certificate authority generation
[certs] External etcd mode: Skipping etcd/healthcheck-client certificate authority generation
[certs] Using existing apiserver-etcd-client certificate and key on disk
I0304 14:52:33.267470    1391 certs.go:113] creating a new certificate authority for front-proxy-ca
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
I0304 14:52:33.995630    1391 certs.go:72] creating a new public/private key files for signing service account users
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
I0304 14:52:34.708619    1391 kubeconfig.go:92] creating kubeconfig file for admin.conf
[kubeconfig] Writing "admin.conf" kubeconfig file
I0304 14:52:35.249743    1391 kubeconfig.go:92] creating kubeconfig file for kubelet.conf
[kubeconfig] Writing "kubelet.conf" kubeconfig file
I0304 14:52:35.798270    1391 kubeconfig.go:92] creating kubeconfig file for controller-manager.conf
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
I0304 14:52:36.159920    1391 kubeconfig.go:92] creating kubeconfig file for scheduler.conf
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
I0304 14:52:36.689060    1391 manifests.go:97] [control-plane] getting StaticPodSpecs
I0304 14:52:36.701499    1391 manifests.go:113] [control-plane] wrote static Pod manifest for component "kube-apiserver" to "/etc/kubernetes/manifests/kube-apiserver.yaml"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
I0304 14:52:36.701545    1391 manifests.go:97] [control-plane] getting StaticPodSpecs
I0304 14:52:36.703214    1391 manifests.go:113] [control-plane] wrote static Pod manifest for component "kube-controller-manager" to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
[control-plane] Creating static Pod manifest for "kube-scheduler"
I0304 14:52:36.703259    1391 manifests.go:97] [control-plane] getting StaticPodSpecs
I0304 14:52:36.704327    1391 manifests.go:113] [control-plane] wrote static Pod manifest for component "kube-scheduler" to "/etc/kubernetes/manifests/kube-scheduler.yaml"
I0304 14:52:36.704356    1391 etcd.go:97] [etcd] External etcd mode. Skipping the creation of a manifest for local etcd
I0304 14:52:36.704377    1391 waitcontrolplane.go:89] [wait-control-plane] Waiting for the API server to be healthy
I0304 14:52:36.705892    1391 loader.go:359] Config loaded from file /etc/kubernetes/admin.conf
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
I0304 14:52:36.707216    1391 round_trippers.go:419] curl -k -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/healthz?timeout=32s'
I0304 14:52:36.711008    1391 round_trippers.go:438] GET https://10.10.10.76:6443/healthz?timeout=32s  in 3 milliseconds
I0304 14:52:36.711030    1391 round_trippers.go:444] Response Headers:
I0304 14:52:36.711077    1391 request.go:779] Got a Retry-After 1s response for attempt 1 to https://10.10.10.76:6443/healthz?timeout=32s
I0304 14:52:37.711365    1391 round_trippers.go:419] curl -k -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/healthz?timeout=32s'
I0304 14:52:37.715841    1391 round_trippers.go:438] GET https://10.10.10.76:6443/healthz?timeout=32s  in 4 milliseconds
I0304 14:52:37.715880    1391 round_trippers.go:444] Response Headers:
I0304 14:52:37.715930    1391 request.go:779] Got a Retry-After 1s response for attempt 2 to https://10.10.10.76:6443/healthz?timeout=32s
I0304 14:52:38.716182    1391 round_trippers.go:419] curl -k -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/healthz?timeout=32s'
I0304 14:52:38.717826    1391 round_trippers.go:438] GET https://10.10.10.76:6443/healthz?timeout=32s  in 1 milliseconds
I0304 14:52:38.717850    1391 round_trippers.go:444] Response Headers:
I0304 14:52:38.717897    1391 request.go:779] Got a Retry-After 1s response for attempt 3 to https://10.10.10.76:6443/healthz?timeout=32s
I0304 14:52:39.718135    1391 round_trippers.go:419] curl -k -v -XGET  -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" -H "Accept: application/json, */*" 'https://10.10.10.76:6443/healthz?timeout=32s'
I0304 14:52:39.719946    1391 round_trippers.go:438] GET https://10.10.10.76:6443/healthz?timeout=32s  in 1 milliseconds
I0304 14:52:39.719972    1391 round_trippers.go:444] Response Headers:
I0304 14:52:39.720022    1391 request.go:779] Got a Retry-After 1s response for attempt 4 to https://10.10.10.76:6443/healthz?timeout=32s
I0304 14:52:40.720273    1391 round_trippers.go:419] curl -k -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/healthz?timeout=32s'
I0304 14:52:40.722069    1391 round_trippers.go:438] GET https://10.10.10.76:6443/healthz?timeout=32s  in 1 milliseconds
I0304 14:52:40.722093    1391 round_trippers.go:444] Response Headers:
I0304 14:52:40.722136    1391 request.go:779] Got a Retry-After 1s response for attempt 5 to https://10.10.10.76:6443/healthz?timeout=32s
I0304 14:52:41.722440    1391 round_trippers.go:419] curl -k -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/healthz?timeout=32s'
I0304 14:52:41.724033    1391 round_trippers.go:438] GET https://10.10.10.76:6443/healthz?timeout=32s  in 1 milliseconds
I0304 14:52:41.724058    1391 round_trippers.go:444] Response Headers:
I0304 14:52:41.724103    1391 request.go:779] Got a Retry-After 1s response for attempt 6 to https://10.10.10.76:6443/healthz?timeout=32s
I0304 14:52:42.724350    1391 round_trippers.go:419] curl -k -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/healthz?timeout=32s'
I0304 14:52:52.725613    1391 round_trippers.go:438] GET https://10.10.10.76:6443/healthz?timeout=32s  in 10001 milliseconds
I0304 14:52:52.725683    1391 round_trippers.go:444] Response Headers:
I0304 14:52:53.226097    1391 round_trippers.go:419] curl -k -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/healthz?timeout=32s'
I0304 14:52:53.720051    1391 round_trippers.go:438] GET https://10.10.10.76:6443/healthz?timeout=32s 500 Internal Server Error in 493 milliseconds
I0304 14:52:53.720090    1391 round_trippers.go:444] Response Headers:
I0304 14:52:53.720103    1391 round_trippers.go:447]     Content-Type: text/plain; charset=utf-8
I0304 14:52:53.720115    1391 round_trippers.go:447]     X-Content-Type-Options: nosniff
I0304 14:52:53.720125    1391 round_trippers.go:447]     Content-Length: 879
I0304 14:52:53.720135    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:53 GMT
I0304 14:52:53.720197    1391 request.go:942] Response Body: [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[-]poststarthook/bootstrap-controller failed: reason withheld
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
[-]poststarthook/start-kube-apiserver-admission-initializer failed: reason withheld
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-status-available-controller ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[-]autoregister-completion failed: reason withheld
healthz check failed
I0304 14:52:53.726022    1391 round_trippers.go:419] curl -k -v -XGET  -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" -H "Accept: application/json, */*" 'https://10.10.10.76:6443/healthz?timeout=32s'
I0304 14:52:53.739616    1391 round_trippers.go:438] GET https://10.10.10.76:6443/healthz?timeout=32s 500 Internal Server Error in 13 milliseconds
I0304 14:52:53.739690    1391 round_trippers.go:444] Response Headers:
I0304 14:52:53.739705    1391 round_trippers.go:447]     X-Content-Type-Options: nosniff
I0304 14:52:53.739717    1391 round_trippers.go:447]     Content-Length: 858
I0304 14:52:53.740058    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:53 GMT
I0304 14:52:53.740083    1391 round_trippers.go:447]     Content-Type: text/plain; charset=utf-8
I0304 14:52:53.740342    1391 request.go:942] Response Body: [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[-]poststarthook/bootstrap-controller failed: reason withheld
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
[-]poststarthook/start-kube-apiserver-admission-initializer failed: reason withheld
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-status-available-controller ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
healthz check failed
I0304 14:52:54.226068    1391 round_trippers.go:419] curl -k -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/healthz?timeout=32s'
I0304 14:52:54.232126    1391 round_trippers.go:438] GET https://10.10.10.76:6443/healthz?timeout=32s 500 Internal Server Error in 6 milliseconds
I0304 14:52:54.232149    1391 round_trippers.go:444] Response Headers:
I0304 14:52:54.232161    1391 round_trippers.go:447]     Content-Type: text/plain; charset=utf-8
I0304 14:52:54.232172    1391 round_trippers.go:447]     X-Content-Type-Options: nosniff
I0304 14:52:54.232182    1391 round_trippers.go:447]     Content-Length: 816
I0304 14:52:54.232192    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:54 GMT
I0304 14:52:54.232234    1391 request.go:942] Response Body: [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld
[-]poststarthook/ca-registration failed: reason withheld
[+]poststarthook/start-kube-apiserver-admission-initializer ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-status-available-controller ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
healthz check failed
I0304 14:52:54.726154    1391 round_trippers.go:419] curl -k -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/healthz?timeout=32s'
I0304 14:52:54.734050    1391 round_trippers.go:438] GET https://10.10.10.76:6443/healthz?timeout=32s 500 Internal Server Error in 7 milliseconds
I0304 14:52:54.734091    1391 round_trippers.go:444] Response Headers:
I0304 14:52:54.734111    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:54 GMT
I0304 14:52:54.734129    1391 round_trippers.go:447]     Content-Type: text/plain; charset=utf-8
I0304 14:52:54.734146    1391 round_trippers.go:447]     X-Content-Type-Options: nosniff
I0304 14:52:54.734163    1391 round_trippers.go:447]     Content-Length: 774
I0304 14:52:54.734250    1391 request.go:942] Response Body: [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
[+]poststarthook/start-kube-apiserver-admission-initializer ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-status-available-controller ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
healthz check failed
I0304 14:52:55.226158    1391 round_trippers.go:419] curl -k -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/healthz?timeout=32s'
I0304 14:52:55.231693    1391 round_trippers.go:438] GET https://10.10.10.76:6443/healthz?timeout=32s 500 Internal Server Error in 5 milliseconds
I0304 14:52:55.231734    1391 round_trippers.go:444] Response Headers:
I0304 14:52:55.231754    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:55 GMT
I0304 14:52:55.231772    1391 round_trippers.go:447]     Content-Type: text/plain; charset=utf-8
I0304 14:52:55.231789    1391 round_trippers.go:447]     X-Content-Type-Options: nosniff
I0304 14:52:55.231805    1391 round_trippers.go:447]     Content-Length: 774
I0304 14:52:55.231998    1391 request.go:942] Response Body: [+]ping ok
[+]log ok
[+]etcd ok
[+]poststarthook/generic-apiserver-start-informers ok
[+]poststarthook/start-apiextensions-informers ok
[+]poststarthook/start-apiextensions-controllers ok
[+]poststarthook/bootstrap-controller ok
[-]poststarthook/rbac/bootstrap-roles failed: reason withheld
[+]poststarthook/scheduling/bootstrap-system-priority-classes ok
[+]poststarthook/ca-registration ok
[+]poststarthook/start-kube-apiserver-admission-initializer ok
[+]poststarthook/start-kube-aggregator-informers ok
[+]poststarthook/apiservice-registration-controller ok
[+]poststarthook/apiservice-status-available-controller ok
[+]poststarthook/apiservice-openapi-controller ok
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
healthz check failed
I0304 14:52:55.726404    1391 round_trippers.go:419] curl -k -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/healthz?timeout=32s'
I0304 14:52:55.733705    1391 round_trippers.go:438] GET https://10.10.10.76:6443/healthz?timeout=32s 200 OK in 7 milliseconds
I0304 14:52:55.733746    1391 round_trippers.go:444] Response Headers:
I0304 14:52:55.733766    1391 round_trippers.go:447]     Content-Type: text/plain; charset=utf-8
I0304 14:52:55.733792    1391 round_trippers.go:447]     Content-Length: 2
I0304 14:52:55.733809    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:55 GMT
I0304 14:52:55.733888    1391 request.go:942] Response Body: ok
[apiclient] All control plane components are healthy after 19.026898 seconds
I0304 14:52:55.736342    1391 loader.go:359] Config loaded from file /etc/kubernetes/admin.conf
I0304 14:52:55.738400    1391 uploadconfig.go:114] [upload-config] Uploading the kubeadm ClusterConfiguration to a ConfigMap
[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I0304 14:52:55.741686    1391 round_trippers.go:419] curl -k -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config'
I0304 14:52:55.751480    1391 round_trippers.go:438] GET https://10.10.10.76:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config 200 OK in 9 milliseconds
I0304 14:52:55.751978    1391 round_trippers.go:444] Response Headers:
I0304 14:52:55.752324    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:55 GMT
I0304 14:52:55.752367    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:52:55.752586    1391 round_trippers.go:447]     Content-Length: 1423
I0304 14:52:55.752696    1391 request.go:942] Response Body: {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"kubeadm-config","namespace":"kube-system","selfLink":"/api/v1/namespaces/kube-system/configmaps/kubeadm-config","uid":"519f6c23-3e69-11e9-8dd7-0050569c544c","resourceVersion":"13121","creationTimestamp":"2019-03-04T10:36:07Z"},"data":{"ClusterConfiguration":"apiServer:\n  certSANs:\n  - 127.0.0.1\n  - 10.10.10.90\n  - 10.10.10.91\n  - 10.10.10.92\n  - 10.10.10.76\n  extraArgs:\n    authorization-mode: Node,RBAC\n  timeoutForControlPlane: 4m0s\napiVersion: kubeadm.k8s.io/v1beta1\ncertificatesDir: /etc/kubernetes/pki\nclusterName: kubernetes\ncontrolPlaneEndpoint: 10.10.10.76:6443\ncontrollerManager: {}\ndns:\n  type: CoreDNS\netcd:\n  external:\n    caFile: /etc/kubernetes/pki/etcd/ca.crt\n    certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt\n    endpoints:\n    - https://10.10.10.90:2379\n    - https://10.10.10.91:2379\n    - https://10.10.10.92:2379\n    keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key\nimageRepository: k8s.gcr.io\nkind: ClusterConfiguration\nkubernetesVersion: v1.13.4\nnetworking:\n  dnsDomain: cluster.local\n  podSubnet: \"\"\n  serviceSubnet: 10.96.0.0/12\nscheduler: {}\n","ClusterStatus":"apiEndpoints:\n  k8s-c3-m1:\n    advertiseAddress: 10.10.10.93\n    bindPort: 6443\n  k8s-c3-m2:\n    advertiseAddress: 10.10.10.94\n    bindPort: 6443\napiVersion: kubeadm.k8s.io/v1beta1\nkind: ClusterStatus\n"}}
I0304 14:52:55.756813    1391 request.go:942] Request Body: {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"kubeadm-config","namespace":"kube-system","creationTimestamp":null},"data":{"ClusterConfiguration":"apiServer:\n  certSANs:\n  - 127.0.0.1\n  - 10.10.10.90\n  - 10.10.10.91\n  - 10.10.10.92\n  - 10.10.10.76\n  extraArgs:\n    authorization-mode: Node,RBAC\n  timeoutForControlPlane: 4m0s\napiVersion: kubeadm.k8s.io/v1beta1\ncertificatesDir: /etc/kubernetes/pki\nclusterName: kubernetes\ncontrolPlaneEndpoint: 10.10.10.76:6443\ncontrollerManager: {}\ndns:\n  type: CoreDNS\netcd:\n  external:\n    caFile: /etc/kubernetes/pki/etcd/ca.crt\n    certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt\n    endpoints:\n    - https://10.10.10.90:2379\n    - https://10.10.10.91:2379\n    - https://10.10.10.92:2379\n    keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key\nimageRepository: k8s.gcr.io\nkind: ClusterConfiguration\nkubernetesVersion: v1.13.4\nnetworking:\n  dnsDomain: cluster.local\n  podSubnet: \"\"\n  serviceSubnet: 10.96.0.0/12\nscheduler: {}\n","ClusterStatus":"apiEndpoints:\n  k8s-c3-m1:\n    advertiseAddress: 10.10.10.93\n    bindPort: 6443\n  k8s-c3-m2:\n    advertiseAddress: 10.10.10.94\n    bindPort: 6443\napiVersion: kubeadm.k8s.io/v1beta1\nkind: ClusterStatus\n"}}
I0304 14:52:55.757443    1391 round_trippers.go:419] curl -k -v -XPOST  -H "Content-Type: application/json" -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/api/v1/namespaces/kube-system/configmaps'
I0304 14:52:55.913083    1391 round_trippers.go:438] POST https://10.10.10.76:6443/api/v1/namespaces/kube-system/configmaps 409 Conflict in 155 milliseconds
I0304 14:52:55.913243    1391 round_trippers.go:444] Response Headers:
I0304 14:52:55.913271    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:52:55.913290    1391 round_trippers.go:447]     Content-Length: 218
I0304 14:52:55.913335    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:55 GMT
I0304 14:52:55.913438    1391 request.go:942] Response Body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"configmaps \"kubeadm-config\" already exists","reason":"AlreadyExists","details":{"name":"kubeadm-config","kind":"configmaps"},"code":409}
I0304 14:52:55.914863    1391 request.go:942] Request Body: {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"kubeadm-config","namespace":"kube-system","creationTimestamp":null},"data":{"ClusterConfiguration":"apiServer:\n  certSANs:\n  - 127.0.0.1\n  - 10.10.10.90\n  - 10.10.10.91\n  - 10.10.10.92\n  - 10.10.10.76\n  extraArgs:\n    authorization-mode: Node,RBAC\n  timeoutForControlPlane: 4m0s\napiVersion: kubeadm.k8s.io/v1beta1\ncertificatesDir: /etc/kubernetes/pki\nclusterName: kubernetes\ncontrolPlaneEndpoint: 10.10.10.76:6443\ncontrollerManager: {}\ndns:\n  type: CoreDNS\netcd:\n  external:\n    caFile: /etc/kubernetes/pki/etcd/ca.crt\n    certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt\n    endpoints:\n    - https://10.10.10.90:2379\n    - https://10.10.10.91:2379\n    - https://10.10.10.92:2379\n    keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key\nimageRepository: k8s.gcr.io\nkind: ClusterConfiguration\nkubernetesVersion: v1.13.4\nnetworking:\n  dnsDomain: cluster.local\n  podSubnet: \"\"\n  serviceSubnet: 10.96.0.0/12\nscheduler: {}\n","ClusterStatus":"apiEndpoints:\n  k8s-c3-m1:\n    advertiseAddress: 10.10.10.93\n    bindPort: 6443\n  k8s-c3-m2:\n    advertiseAddress: 10.10.10.94\n    bindPort: 6443\napiVersion: kubeadm.k8s.io/v1beta1\nkind: ClusterStatus\n"}}
I0304 14:52:55.915123    1391 round_trippers.go:419] curl -k -v -XPUT  -H "Accept: application/json, */*" -H "Content-Type: application/json" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config'
I0304 14:52:55.923538    1391 round_trippers.go:438] PUT https://10.10.10.76:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config 200 OK in 8 milliseconds
I0304 14:52:55.924120    1391 round_trippers.go:444] Response Headers:
I0304 14:52:55.924437    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:52:55.924810    1391 round_trippers.go:447]     Content-Length: 1423
I0304 14:52:55.925107    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:55 GMT
I0304 14:52:55.925521    1391 request.go:942] Response Body: {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"kubeadm-config","namespace":"kube-system","selfLink":"/api/v1/namespaces/kube-system/configmaps/kubeadm-config","uid":"519f6c23-3e69-11e9-8dd7-0050569c544c","resourceVersion":"13121","creationTimestamp":"2019-03-04T10:36:07Z"},"data":{"ClusterConfiguration":"apiServer:\n  certSANs:\n  - 127.0.0.1\n  - 10.10.10.90\n  - 10.10.10.91\n  - 10.10.10.92\n  - 10.10.10.76\n  extraArgs:\n    authorization-mode: Node,RBAC\n  timeoutForControlPlane: 4m0s\napiVersion: kubeadm.k8s.io/v1beta1\ncertificatesDir: /etc/kubernetes/pki\nclusterName: kubernetes\ncontrolPlaneEndpoint: 10.10.10.76:6443\ncontrollerManager: {}\ndns:\n  type: CoreDNS\netcd:\n  external:\n    caFile: /etc/kubernetes/pki/etcd/ca.crt\n    certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt\n    endpoints:\n    - https://10.10.10.90:2379\n    - https://10.10.10.91:2379\n    - https://10.10.10.92:2379\n    keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key\nimageRepository: k8s.gcr.io\nkind: ClusterConfiguration\nkubernetesVersion: v1.13.4\nnetworking:\n  dnsDomain: cluster.local\n  podSubnet: \"\"\n  serviceSubnet: 10.96.0.0/12\nscheduler: {}\n","ClusterStatus":"apiEndpoints:\n  k8s-c3-m1:\n    advertiseAddress: 10.10.10.93\n    bindPort: 6443\n  k8s-c3-m2:\n    advertiseAddress: 10.10.10.94\n    bindPort: 6443\napiVersion: kubeadm.k8s.io/v1beta1\nkind: ClusterStatus\n"}}
I0304 14:52:55.926346    1391 request.go:942] Request Body: {"kind":"Role","apiVersion":"rbac.authorization.k8s.io/v1","metadata":{"name":"kubeadm:nodes-kubeadm-config","namespace":"kube-system","creationTimestamp":null},"rules":[{"verbs":["get"],"apiGroups":[""],"resources":["configmaps"],"resourceNames":["kubeadm-config"]}]}
I0304 14:52:55.926823    1391 round_trippers.go:419] curl -k -v -XPOST  -H "Accept: application/json, */*" -H "Content-Type: application/json" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles'
I0304 14:52:55.946643    1391 round_trippers.go:438] POST https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles 409 Conflict in 19 milliseconds
I0304 14:52:55.947026    1391 round_trippers.go:444] Response Headers:
I0304 14:52:55.947441    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:55 GMT
I0304 14:52:55.947798    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:52:55.948105    1391 round_trippers.go:447]     Content-Length: 298
I0304 14:52:55.948447    1391 request.go:942] Response Body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"roles.rbac.authorization.k8s.io \"kubeadm:nodes-kubeadm-config\" already exists","reason":"AlreadyExists","details":{"name":"kubeadm:nodes-kubeadm-config","group":"rbac.authorization.k8s.io","kind":"roles"},"code":409}
I0304 14:52:55.949132    1391 request.go:942] Request Body: {"kind":"Role","apiVersion":"rbac.authorization.k8s.io/v1","metadata":{"name":"kubeadm:nodes-kubeadm-config","namespace":"kube-system","creationTimestamp":null},"rules":[{"verbs":["get"],"apiGroups":[""],"resources":["configmaps"],"resourceNames":["kubeadm-config"]}]}
I0304 14:52:55.949653    1391 round_trippers.go:419] curl -k -v -XPUT  -H "Accept: application/json, */*" -H "Content-Type: application/json" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/kubeadm:nodes-kubeadm-config'
I0304 14:52:55.960370    1391 round_trippers.go:438] PUT https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/kubeadm:nodes-kubeadm-config 200 OK in 10 milliseconds
I0304 14:52:55.960920    1391 round_trippers.go:444] Response Headers:
I0304 14:52:55.961216    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:52:55.961507    1391 round_trippers.go:447]     Content-Length: 464
I0304 14:52:55.961789    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:55 GMT
I0304 14:52:55.962002    1391 request.go:942] Response Body: {"kind":"Role","apiVersion":"rbac.authorization.k8s.io/v1","metadata":{"name":"kubeadm:nodes-kubeadm-config","namespace":"kube-system","selfLink":"/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/kubeadm%3Anodes-kubeadm-config","uid":"51a356c9-3e69-11e9-8dd7-0050569c544c","resourceVersion":"559","creationTimestamp":"2019-03-04T10:36:07Z"},"rules":[{"verbs":["get"],"apiGroups":[""],"resources":["configmaps"],"resourceNames":["kubeadm-config"]}]}
I0304 14:52:55.964418    1391 request.go:942] Request Body: {"kind":"RoleBinding","apiVersion":"rbac.authorization.k8s.io/v1","metadata":{"name":"kubeadm:nodes-kubeadm-config","namespace":"kube-system","creationTimestamp":null},"subjects":[{"kind":"Group","name":"system:bootstrappers:kubeadm:default-node-token"},{"kind":"Group","name":"system:nodes"}],"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"Role","name":"kubeadm:nodes-kubeadm-config"}}
I0304 14:52:55.965022    1391 round_trippers.go:419] curl -k -v -XPOST  -H "Accept: application/json, */*" -H "Content-Type: application/json" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings'
I0304 14:52:55.983782    1391 round_trippers.go:438] POST https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings 409 Conflict in 18 milliseconds
I0304 14:52:55.983847    1391 round_trippers.go:444] Response Headers:
I0304 14:52:55.983890    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:52:55.983920    1391 round_trippers.go:447]     Content-Length: 312
I0304 14:52:55.983948    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:55 GMT
I0304 14:52:55.984007    1391 request.go:942] Response Body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"rolebindings.rbac.authorization.k8s.io \"kubeadm:nodes-kubeadm-config\" already exists","reason":"AlreadyExists","details":{"name":"kubeadm:nodes-kubeadm-config","group":"rbac.authorization.k8s.io","kind":"rolebindings"},"code":409}
I0304 14:52:55.984330    1391 request.go:942] Request Body: {"kind":"RoleBinding","apiVersion":"rbac.authorization.k8s.io/v1","metadata":{"name":"kubeadm:nodes-kubeadm-config","namespace":"kube-system","creationTimestamp":null},"subjects":[{"kind":"Group","name":"system:bootstrappers:kubeadm:default-node-token"},{"kind":"Group","name":"system:nodes"}],"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"Role","name":"kubeadm:nodes-kubeadm-config"}}
I0304 14:52:55.984464    1391 round_trippers.go:419] curl -k -v -XPUT  -H "Accept: application/json, */*" -H "Content-Type: application/json" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/kubeadm:nodes-kubeadm-config'
I0304 14:52:55.994138    1391 round_trippers.go:438] PUT https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/kubeadm:nodes-kubeadm-config 200 OK in 9 milliseconds
I0304 14:52:55.994193    1391 round_trippers.go:444] Response Headers:
I0304 14:52:55.994497    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:52:55.994878    1391 round_trippers.go:447]     Content-Length: 678
I0304 14:52:55.995094    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:55 GMT
I0304 14:52:55.995377    1391 request.go:942] Response Body: {"kind":"RoleBinding","apiVersion":"rbac.authorization.k8s.io/v1","metadata":{"name":"kubeadm:nodes-kubeadm-config","namespace":"kube-system","selfLink":"/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/kubeadm%3Anodes-kubeadm-config","uid":"51a61bf8-3e69-11e9-8dd7-0050569c544c","resourceVersion":"560","creationTimestamp":"2019-03-04T10:36:07Z"},"subjects":[{"kind":"Group","apiGroup":"rbac.authorization.k8s.io","name":"system:bootstrappers:kubeadm:default-node-token"},{"kind":"Group","apiGroup":"rbac.authorization.k8s.io","name":"system:nodes"}],"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"Role","name":"kubeadm:nodes-kubeadm-config"}}
I0304 14:52:56.001421    1391 loader.go:359] Config loaded from file /etc/kubernetes/admin.conf
I0304 14:52:56.002891    1391 uploadconfig.go:128] [upload-config] Uploading the kubelet component config to a ConfigMap
[kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster
I0304 14:52:56.005261    1391 request.go:942] Request Body: {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"kubelet-config-1.13","namespace":"kube-system","creationTimestamp":null},"data":{"kubelet":"address: 0.0.0.0\napiVersion: kubelet.config.k8s.io/v1beta1\nauthentication:\n  anonymous:\n    enabled: false\n  webhook:\n    cacheTTL: 2m0s\n    enabled: true\n  x509:\n    clientCAFile: /etc/kubernetes/pki/ca.crt\nauthorization:\n  mode: Webhook\n  webhook:\n    cacheAuthorizedTTL: 5m0s\n    cacheUnauthorizedTTL: 30s\ncgroupDriver: cgroupfs\ncgroupsPerQOS: true\nclusterDNS:\n- 10.96.0.10\nclusterDomain: cluster.local\nconfigMapAndSecretChangeDetectionStrategy: Watch\ncontainerLogMaxFiles: 5\ncontainerLogMaxSize: 10Mi\ncontentType: application/vnd.kubernetes.protobuf\ncpuCFSQuota: true\ncpuCFSQuotaPeriod: 100ms\ncpuManagerPolicy: none\ncpuManagerReconcilePeriod: 10s\nenableControllerAttachDetach: true\nenableDebuggingHandlers: true\nenforceNodeAllocatable:\n- pods\neventBurst: 10\neventRecordQPS: 5\nevictionHard:\n  imagefs.available: 15%\n  memory.available: 100Mi\n  nodefs.available: 10%\n  nodefs.inodesFree: 5%\nevictionPressureTransitionPeriod: 5m0s\nfailSwapOn: true\nfileCheckFrequency: 20s\nhairpinMode: promiscuous-bridge\nhealthzBindAddress: 127.0.0.1\nhealthzPort: 10248\nhttpCheckFrequency: 20s\nimageGCHighThresholdPercent: 85\nimageGCLowThresholdPercent: 80\nimageMinimumGCAge: 2m0s\niptablesDropBit: 15\niptablesMasqueradeBit: 14\nkind: KubeletConfiguration\nkubeAPIBurst: 10\nkubeAPIQPS: 5\nmakeIPTablesUtilChains: true\nmaxOpenFiles: 1000000\nmaxPods: 110\nnodeLeaseDurationSeconds: 40\nnodeStatusReportFrequency: 1m0s\nnodeStatusUpdateFrequency: 10s\noomScoreAdj: -999\npodPidsLimit: -1\nport: 10250\nregistryBurst: 10\nregistryPullQPS: 5\nresolvConf: /etc/resolv.conf\nrotateCertificates: true\nruntimeRequestTimeout: 2m0s\nserializeImagePulls: true\nstaticPodPath: /etc/kubernetes/manifests\nstreamingConnectionIdleTimeout: 4h0m0s\nsyncFrequency: 1m0s\nvolumeStatsAggPeriod: 1m0s\n"}}
I0304 14:52:56.005580    1391 round_trippers.go:419] curl -k -v -XPOST  -H "Accept: application/json, */*" -H "Content-Type: application/json" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/api/v1/namespaces/kube-system/configmaps'
I0304 14:52:56.026664    1391 round_trippers.go:438] POST https://10.10.10.76:6443/api/v1/namespaces/kube-system/configmaps 409 Conflict in 20 milliseconds
I0304 14:52:56.026763    1391 round_trippers.go:444] Response Headers:
I0304 14:52:56.026798    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:52:56.026852    1391 round_trippers.go:447]     Content-Length: 228
I0304 14:52:56.026931    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:56 GMT
I0304 14:52:56.027084    1391 request.go:942] Response Body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"configmaps \"kubelet-config-1.13\" already exists","reason":"AlreadyExists","details":{"name":"kubelet-config-1.13","kind":"configmaps"},"code":409}
I0304 14:52:56.027551    1391 request.go:942] Request Body: {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"kubelet-config-1.13","namespace":"kube-system","creationTimestamp":null},"data":{"kubelet":"address: 0.0.0.0\napiVersion: kubelet.config.k8s.io/v1beta1\nauthentication:\n  anonymous:\n    enabled: false\n  webhook:\n    cacheTTL: 2m0s\n    enabled: true\n  x509:\n    clientCAFile: /etc/kubernetes/pki/ca.crt\nauthorization:\n  mode: Webhook\n  webhook:\n    cacheAuthorizedTTL: 5m0s\n    cacheUnauthorizedTTL: 30s\ncgroupDriver: cgroupfs\ncgroupsPerQOS: true\nclusterDNS:\n- 10.96.0.10\nclusterDomain: cluster.local\nconfigMapAndSecretChangeDetectionStrategy: Watch\ncontainerLogMaxFiles: 5\ncontainerLogMaxSize: 10Mi\ncontentType: application/vnd.kubernetes.protobuf\ncpuCFSQuota: true\ncpuCFSQuotaPeriod: 100ms\ncpuManagerPolicy: none\ncpuManagerReconcilePeriod: 10s\nenableControllerAttachDetach: true\nenableDebuggingHandlers: true\nenforceNodeAllocatable:\n- pods\neventBurst: 10\neventRecordQPS: 5\nevictionHard:\n  imagefs.available: 15%\n  memory.available: 100Mi\n  nodefs.available: 10%\n  nodefs.inodesFree: 5%\nevictionPressureTransitionPeriod: 5m0s\nfailSwapOn: true\nfileCheckFrequency: 20s\nhairpinMode: promiscuous-bridge\nhealthzBindAddress: 127.0.0.1\nhealthzPort: 10248\nhttpCheckFrequency: 20s\nimageGCHighThresholdPercent: 85\nimageGCLowThresholdPercent: 80\nimageMinimumGCAge: 2m0s\niptablesDropBit: 15\niptablesMasqueradeBit: 14\nkind: KubeletConfiguration\nkubeAPIBurst: 10\nkubeAPIQPS: 5\nmakeIPTablesUtilChains: true\nmaxOpenFiles: 1000000\nmaxPods: 110\nnodeLeaseDurationSeconds: 40\nnodeStatusReportFrequency: 1m0s\nnodeStatusUpdateFrequency: 10s\noomScoreAdj: -999\npodPidsLimit: -1\nport: 10250\nregistryBurst: 10\nregistryPullQPS: 5\nresolvConf: /etc/resolv.conf\nrotateCertificates: true\nruntimeRequestTimeout: 2m0s\nserializeImagePulls: true\nstaticPodPath: /etc/kubernetes/manifests\nstreamingConnectionIdleTimeout: 4h0m0s\nsyncFrequency: 1m0s\nvolumeStatsAggPeriod: 1m0s\n"}}
I0304 14:52:56.027830    1391 round_trippers.go:419] curl -k -v -XPUT  -H "Accept: application/json, */*" -H "Content-Type: application/json" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/api/v1/namespaces/kube-system/configmaps/kubelet-config-1.13'
I0304 14:52:56.036853    1391 round_trippers.go:438] PUT https://10.10.10.76:6443/api/v1/namespaces/kube-system/configmaps/kubelet-config-1.13 200 OK in 8 milliseconds
I0304 14:52:56.036900    1391 round_trippers.go:444] Response Headers:
I0304 14:52:56.037253    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:52:56.037291    1391 round_trippers.go:447]     Content-Length: 2133
I0304 14:52:56.037554    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:56 GMT
I0304 14:52:56.037755    1391 request.go:942] Response Body: {"kind":"ConfigMap","apiVersion":"v1","metadata":{"name":"kubelet-config-1.13","namespace":"kube-system","selfLink":"/api/v1/namespaces/kube-system/configmaps/kubelet-config-1.13","uid":"51a9de57-3e69-11e9-8dd7-0050569c544c","resourceVersion":"561","creationTimestamp":"2019-03-04T10:36:07Z"},"data":{"kubelet":"address: 0.0.0.0\napiVersion: kubelet.config.k8s.io/v1beta1\nauthentication:\n  anonymous:\n    enabled: false\n  webhook:\n    cacheTTL: 2m0s\n    enabled: true\n  x509:\n    clientCAFile: /etc/kubernetes/pki/ca.crt\nauthorization:\n  mode: Webhook\n  webhook:\n    cacheAuthorizedTTL: 5m0s\n    cacheUnauthorizedTTL: 30s\ncgroupDriver: cgroupfs\ncgroupsPerQOS: true\nclusterDNS:\n- 10.96.0.10\nclusterDomain: cluster.local\nconfigMapAndSecretChangeDetectionStrategy: Watch\ncontainerLogMaxFiles: 5\ncontainerLogMaxSize: 10Mi\ncontentType: application/vnd.kubernetes.protobuf\ncpuCFSQuota: true\ncpuCFSQuotaPeriod: 100ms\ncpuManagerPolicy: none\ncpuManagerReconcilePeriod: 10s\nenableControllerAttachDetach: true\nenableDebuggingHandlers: true\nenforceNodeAllocatable:\n- pods\neventBurst: 10\neventRecordQPS: 5\nevictionHard:\n  imagefs.available: 15%\n  memory.available: 100Mi\n  nodefs.available: 10%\n  nodefs.inodesFree: 5%\nevictionPressureTransitionPeriod: 5m0s\nfailSwapOn: true\nfileCheckFrequency: 20s\nhairpinMode: promiscuous-bridge\nhealthzBindAddress: 127.0.0.1\nhealthzPort: 10248\nhttpCheckFrequency: 20s\nimageGCHighThresholdPercent: 85\nimageGCLowThresholdPercent: 80\nimageMinimumGCAge: 2m0s\niptablesDropBit: 15\niptablesMasqueradeBit: 14\nkind: KubeletConfiguration\nkubeAPIBurst: 10\nkubeAPIQPS: 5\nmakeIPTablesUtilChains: true\nmaxOpenFiles: 1000000\nmaxPods: 110\nnodeLeaseDurationSeconds: 40\nnodeStatusReportFrequency: 1m0s\nnodeStatusUpdateFrequency: 10s\noomScoreAdj: -999\npodPidsLimit: -1\nport: 10250\nregistryBurst: 10\nregistryPullQPS: 5\nresolvConf: /etc/resolv.conf\nrotateCertificates: true\nruntimeRequestTimeout: 2m0s\nserializeImagePulls: true\nstaticPodPath: /etc/kubernetes/manifests\nstreamingConnectionIdleTimeout: 4h0m0s\nsyncFrequency: 1m0s\nvolumeStatsAggPeriod: 1m0s\n"}}
I0304 14:52:56.038255    1391 request.go:942] Request Body: {"kind":"Role","apiVersion":"rbac.authorization.k8s.io/v1","metadata":{"name":"kubeadm:kubelet-config-1.13","namespace":"kube-system","creationTimestamp":null},"rules":[{"verbs":["get"],"apiGroups":[""],"resources":["configmaps"],"resourceNames":["kubelet-config-1.13"]}]}
I0304 14:52:56.038523    1391 round_trippers.go:419] curl -k -v -XPOST  -H "Accept: application/json, */*" -H "Content-Type: application/json" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles'
I0304 14:52:56.052414    1391 round_trippers.go:438] POST https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles 409 Conflict in 13 milliseconds
I0304 14:52:56.052512    1391 round_trippers.go:444] Response Headers:
I0304 14:52:56.052572    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:52:56.052603    1391 round_trippers.go:447]     Content-Length: 296
I0304 14:52:56.052685    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:56 GMT
I0304 14:52:56.052955    1391 request.go:942] Response Body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"roles.rbac.authorization.k8s.io \"kubeadm:kubelet-config-1.13\" already exists","reason":"AlreadyExists","details":{"name":"kubeadm:kubelet-config-1.13","group":"rbac.authorization.k8s.io","kind":"roles"},"code":409}
I0304 14:52:56.053398    1391 request.go:942] Request Body: {"kind":"Role","apiVersion":"rbac.authorization.k8s.io/v1","metadata":{"name":"kubeadm:kubelet-config-1.13","namespace":"kube-system","creationTimestamp":null},"rules":[{"verbs":["get"],"apiGroups":[""],"resources":["configmaps"],"resourceNames":["kubelet-config-1.13"]}]}
I0304 14:52:56.053646    1391 round_trippers.go:419] curl -k -v -XPUT  -H "Accept: application/json, */*" -H "Content-Type: application/json" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/kubeadm:kubelet-config-1.13'
I0304 14:52:56.061599    1391 round_trippers.go:438] PUT https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/kubeadm:kubelet-config-1.13 200 OK in 7 milliseconds
I0304 14:52:56.061691    1391 round_trippers.go:444] Response Headers:
I0304 14:52:56.061723    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:56 GMT
I0304 14:52:56.061779    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:52:56.061808    1391 round_trippers.go:447]     Content-Length: 467
I0304 14:52:56.061917    1391 request.go:942] Response Body: {"kind":"Role","apiVersion":"rbac.authorization.k8s.io/v1","metadata":{"name":"kubeadm:kubelet-config-1.13","namespace":"kube-system","selfLink":"/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles/kubeadm%3Akubelet-config-1.13","uid":"51abee39-3e69-11e9-8dd7-0050569c544c","resourceVersion":"562","creationTimestamp":"2019-03-04T10:36:07Z"},"rules":[{"verbs":["get"],"apiGroups":[""],"resources":["configmaps"],"resourceNames":["kubelet-config-1.13"]}]}
I0304 14:52:56.062370    1391 request.go:942] Request Body: {"kind":"RoleBinding","apiVersion":"rbac.authorization.k8s.io/v1","metadata":{"name":"kubeadm:kubelet-config-1.13","namespace":"kube-system","creationTimestamp":null},"subjects":[{"kind":"Group","name":"system:nodes"},{"kind":"Group","name":"system:bootstrappers:kubeadm:default-node-token"}],"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"Role","name":"kubeadm:kubelet-config-1.13"}}
I0304 14:52:56.062564    1391 round_trippers.go:419] curl -k -v -XPOST  -H "Accept: application/json, */*" -H "Content-Type: application/json" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings'
I0304 14:52:56.076620    1391 round_trippers.go:438] POST https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings 409 Conflict in 13 milliseconds
I0304 14:52:56.076664    1391 round_trippers.go:444] Response Headers:
I0304 14:52:56.076902    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:52:56.076938    1391 round_trippers.go:447]     Content-Length: 310
I0304 14:52:56.077092    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:56 GMT
I0304 14:52:56.077299    1391 request.go:942] Response Body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"rolebindings.rbac.authorization.k8s.io \"kubeadm:kubelet-config-1.13\" already exists","reason":"AlreadyExists","details":{"name":"kubeadm:kubelet-config-1.13","group":"rbac.authorization.k8s.io","kind":"rolebindings"},"code":409}
I0304 14:52:56.077657    1391 request.go:942] Request Body: {"kind":"RoleBinding","apiVersion":"rbac.authorization.k8s.io/v1","metadata":{"name":"kubeadm:kubelet-config-1.13","namespace":"kube-system","creationTimestamp":null},"subjects":[{"kind":"Group","name":"system:nodes"},{"kind":"Group","name":"system:bootstrappers:kubeadm:default-node-token"}],"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"Role","name":"kubeadm:kubelet-config-1.13"}}
I0304 14:52:56.077940    1391 round_trippers.go:419] curl -k -v -XPUT  -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" -H "Accept: application/json, */*" -H "Content-Type: application/json" 'https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/kubeadm:kubelet-config-1.13'
I0304 14:52:56.084893    1391 round_trippers.go:438] PUT https://10.10.10.76:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/kubeadm:kubelet-config-1.13 200 OK in 6 milliseconds
I0304 14:52:56.084937    1391 round_trippers.go:444] Response Headers:
I0304 14:52:56.085395    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:52:56.085635    1391 round_trippers.go:447]     Content-Length: 675
I0304 14:52:56.085675    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:52:56 GMT
I0304 14:52:56.086357    1391 request.go:942] Response Body: {"kind":"RoleBinding","apiVersion":"rbac.authorization.k8s.io/v1","metadata":{"name":"kubeadm:kubelet-config-1.13","namespace":"kube-system","selfLink":"/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings/kubeadm%3Akubelet-config-1.13","uid":"51ad932c-3e69-11e9-8dd7-0050569c544c","resourceVersion":"563","creationTimestamp":"2019-03-04T10:36:07Z"},"subjects":[{"kind":"Group","apiGroup":"rbac.authorization.k8s.io","name":"system:nodes"},{"kind":"Group","apiGroup":"rbac.authorization.k8s.io","name":"system:bootstrappers:kubeadm:default-node-token"}],"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"Role","name":"kubeadm:kubelet-config-1.13"}}
I0304 14:52:56.086694    1391 uploadconfig.go:133] [upload-config] Preserving the CRISocket information for the control-plane node
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "k8s-c3-m1" as an annotation
[excluded similar lines] ...
I0304 14:53:16.587525    1391 round_trippers.go:419] curl -k -v -XGET  -H "Accept: application/json, */*" -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" 'https://10.10.10.76:6443/api/v1/nodes/k8s-c3-m1'
I0304 14:53:16.597510    1391 round_trippers.go:438] GET https://10.10.10.76:6443/api/v1/nodes/k8s-c3-m1 404 Not Found in 9 milliseconds
I0304 14:53:16.597872    1391 round_trippers.go:444] Response Headers:
I0304 14:53:16.597909    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:53:16.598117    1391 round_trippers.go:447]     Content-Length: 188
I0304 14:53:16.598141    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:53:16 GMT
I0304 14:53:16.598332    1391 request.go:942] Response Body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"nodes \"k8s-c3-m1\" not found","reason":"NotFound","details":{"name":"k8s-c3-m1","kind":"nodes"},"code":404}
[kubelet-check] Initial timeout of 40s passed.
[excluded similar lines] ...
I0304 14:53:17.111508    1391 request.go:942] Response Body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"nodes \"k8s-c3-m1\" not found","reason":"NotFound","details":{"name":"k8s-c3-m1","kind":"nodes"},"code":404}
I0304 14:54:56.095649    1391 round_trippers.go:419] curl -k -v -XGET  -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" -H "Accept: application/json, */*" 'https://10.10.10.76:6443/api/v1/nodes/k8s-c3-m1'
I0304 14:54:56.101815    1391 round_trippers.go:438] GET https://10.10.10.76:6443/api/v1/nodes/k8s-c3-m1 404 Not Found in 6 milliseconds
I0304 14:54:56.101895    1391 round_trippers.go:444] Response Headers:
I0304 14:54:56.101926    1391 round_trippers.go:447]     Content-Type: application/json
I0304 14:54:56.101945    1391 round_trippers.go:447]     Content-Length: 188
I0304 14:54:56.101996    1391 round_trippers.go:447]     Date: Mon, 04 Mar 2019 14:54:56 GMT
I0304 14:54:56.102074    1391 request.go:942] Response Body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"nodes \"k8s-c3-m1\" not found","reason":"NotFound","details":{"name":"k8s-c3-m1","kind":"nodes"},"code":404}
error execution phase upload-config/kubelet: Error writing Crisocket information for the control-plane node: timed out waiting for the condition

рдХреНрдпрд╛ рд╣реЛрдиреЗ рдХреА рдЙрдореНрдореАрдж рдереА?

рдмрд┐рдирд╛ рдХрд┐рд╕реА рд╕рдорд╕реНрдпрд╛ рдХреЗ рдЖрд░рдВрдн рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдорд╛рд╕реНрдЯрд░

рдЗрд╕реЗ рдХреИрд╕реЗ рдкреБрди: рдкреЗрд╢ рдХрд░реЗрдВ (рдЬрд┐рддрдирд╛ рд╕рдВрднрд╡ рд╣реЛ рдиреНрдпреВрдирддрдо рдФрд░ рдареАрдХ рд╣реИ)?

рдиреЛрдб рдкреНрд░реАрдкрд░реЗрд╢рди

# note! - docker needs to be installed on all nodes (it is on my my 16.04 template VMs)

# install misc tools
apt-get update && apt-get install -y apt-transport-https curl

# install required k8s tools
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -

cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF

apt-get update
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl

# turn off swap
swapoff -a
sed -i '/ swap / s/^/#/' /etc/fstab

# create systemd config for kubelet
cat << _EOF_ > /etc/systemd/system/kubelet.service.d/20-etcd-service-manager.conf
[Service]
ExecStart=
ExecStart=/usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true
Restart=always
_EOF_

# reload systemd and start kublet
systemctl daemon-reload
systemctl restart kubelet

рдирд┐рд░реНрдорд╛рдг рдХреЗ рд▓рд┐рдП рдХреЙрдиреНрдлрд╝рд┐рдЧрд░ рдХрд░реЗрдВ

### on all etcd nodes
# create required variables
declare -A ETCDINFO
ETCDINFO=([k8s-c3-e1]=10.10.10.90 [k8s-c3-e2]=10.10.10.91 [k8s-c3-e3]=10.10.10.92)
mapfile -t ETCDNAMES < <(for KEY in ${!ETCDINFO[@]}; do echo "${ETCDINFO[$KEY]}:::$KEY"; done | sort | awk -F::: '{print $2}')
mapfile -t ETCDIPS < <(for KEY in ${ETCDINFO[@]}; do echo "${ETCDINFO[$KEY]}:::$KEY"; done | sort | awk -F::: '{print $2}')
declare -A MASTERINFO
MASTERINFO=([k8s-c3-m1]=10.10.10.93 [k8s-c3-m2]=10.10.10.94 [k8s-c3-m3]=10.10.10.95)
mapfile -t MASTERNAMES < <(for KEY in ${!MASTERINFO[@]}; do echo "${MASTERINFO[$KEY]}:::$KEY"; done | sort | awk -F::: '{print $2}')
mapfile -t MASTERIPS < <(for KEY in ${MASTERINFO[@]}; do echo "${MASTERINFO[$KEY]}:::$KEY"; done | sort | awk -F::: '{print $2}')

# cerate clusterConfig for etcd
cat << EOF > /root/kubeadmcfg.yaml
apiVersion: "kubeadm.k8s.io/v1beta1"
kind: ClusterConfiguration
etcd:
    local:
        serverCertSANs:
        - "${ETCDINFO[$HOSTNAME]}"
        peerCertSANs:
        - "${ETCDINFO[$HOSTNAME]}"
        extraArgs:
            initial-cluster: ${ETCDNAMES[0]}=https://${ETCDIPS[0]}:2380,${ETCDNAMES[1]}=https://${ETCDIPS[1]}:2380,${ETCDNAMES[2]}=https://${ETCDIPS[2]}:2380
            initial-cluster-state: new
            name: ${HOSTNAME}
            listen-peer-urls: https://${ETCDINFO[$HOSTNAME]}:2380
            listen-client-urls: https://${ETCDINFO[$HOSTNAME]}:2379
            advertise-client-urls: https://${ETCDINFO[$HOSTNAME]}:2379
            initial-advertise-peer-urls: https://${ETCDINFO[$HOSTNAME]}:2380
EOF

рд╕реЗрд░рдЯ рдкреИрджрд╛ рдХрд░реЗрдВ рдФрд░ рд╡рд┐рддрд░рд┐рдд рдХрд░реЗрдВ

### run only on one etcd node (k8s-c3-e1)
# generate the main certificate authority (creates two files in /etc/kubernetes/pki/etcd/)
kubeadm init phase certs etcd-ca

# create certificates
kubeadm init phase certs etcd-server --config=/root/kubeadmcfg.yaml 
kubeadm init phase certs etcd-peer --config=/root/kubeadmcfg.yaml
kubeadm init phase certs etcd-healthcheck-client --config=/root/kubeadmcfg.yaml
kubeadm init phase certs apiserver-etcd-client --config=/root/kubeadmcfg.yaml

# copy cert files from k8s-c3-e1 to the other etcd nodes
scp -rp /etc/kubernetes/pki ubuntu@${ETCDIPS[1]}: && \
ssh -t ubuntu@${ETCDIPS[1]} "sudo mv pki /etc/kubernetes/ && \
sudo chown -R root.root /etc/kubernetes/pki"

scp -rp /etc/kubernetes/pki ubuntu@${ETCDIPS[2]}: && \
ssh -t ubuntu@${ETCDIPS[2]} "sudo mv pki /etc/kubernetes/ && \
sudo chown -R root.root /etc/kubernetes/pki"

# copy cert files from k8s-c3-e1 to the master nodes
scp -rp /etc/kubernetes/pki ubuntu@${MASTERIPS[0]}: && \
ssh -t ubuntu@${MASTERIPS[0]} "sudo mv pki /etc/kubernetes/ && \
sudo find /etc/kubernetes/pki -not -name ca.crt \
-not -name apiserver-etcd-client.crt \
-not -name apiserver-etcd-client.key \
-type f -delete && \
sudo chown -R root.root /etc/kubernetes/pki"

scp -rp /etc/kubernetes/pki ubuntu@${MASTERIPS[1]}: && \
ssh -t ubuntu@${MASTERIPS[1]} "sudo mv pki /etc/kubernetes/ && \
sudo find /etc/kubernetes/pki -not -name ca.crt \
-not -name apiserver-etcd-client.crt \
-not -name apiserver-etcd-client.key \
-type f -delete && \
sudo chown -R root.root /etc/kubernetes/pki"

scp -rp /etc/kubernetes/pki ubuntu@${MASTERIPS[2]}: && \
ssh -t ubuntu@${MASTERIPS[2]} "sudo mv pki /etc/kubernetes/ && \
sudo find /etc/kubernetes/pki -not -name ca.crt \
-not -name apiserver-etcd-client.crt \
-not -name apiserver-etcd-client.key \
-type f -delete && \
sudo chown -R root.root /etc/kubernetes/pki"

### run on the other etcd nodes
# create certificates
kubeadm init phase certs etcd-server --config=/root/kubeadmcfg.yaml 
kubeadm init phase certs etcd-peer --config=/root/kubeadmcfg.yaml
kubeadm init phase certs etcd-healthcheck-client --config=/root/kubeadmcfg.yaml
kubeadm init phase certs apiserver-etcd-client --config=/root/kubeadmcfg.yaml

# create manifest for 
kubeadm init phase etcd local --config=/root/kubeadmcfg.yaml


### run only on one etcd node (k8s-c3-e1)
# check if cluster is running
docker run --rm -it \
--net host \
-v /etc/kubernetes:/etc/kubernetes quay.io/coreos/etcd:v3.2.24 etcdctl \
--cert-file /etc/kubernetes/pki/etcd/peer.crt \
--key-file /etc/kubernetes/pki/etcd/peer.key \
--ca-file /etc/kubernetes/pki/etcd/ca.crt \
--endpoints https://${ETCDIPS[0]}:2379 cluster-health

рдХреЙрдиреНрдлрд╝рд┐рдЧрд░реЗрд╢рди рдФрд░ рдЗрдирд┐рдЯ рдорд╛рд╕реНрдЯрд░ рдиреЛрдбреНрд╕

### run on all master nodes
# create required variables
declare -A ETCDINFO
ETCDINFO=([k8s-c3-e1]=10.10.10.90 [k8s-c3-e2]=10.10.10.91 [k8s-c3-e3]=10.10.10.92)
mapfile -t ETCDNAMES < <(for KEY in ${!ETCDINFO[@]}; do echo "${ETCDINFO[$KEY]}:::$KEY"; done | sort | awk -F::: '{print $2}')
mapfile -t ETCDIPS < <(for KEY in ${ETCDINFO[@]}; do echo "${ETCDINFO[$KEY]}:::$KEY"; done | sort | awk -F::: '{print $2}')
declare -A MASTERINFO
MASTERINFO=([k8s-c3-m1]=10.10.10.93 [k8s-c3-m2]=10.10.10.94 [k8s-c3-m3]=10.10.10.95)
mapfile -t MASTERNAMES < <(for KEY in ${!MASTERINFO[@]}; do echo "${MASTERINFO[$KEY]}:::$KEY"; done | sort | awk -F::: '{print $2}')
mapfile -t MASTERIPS < <(for KEY in ${MASTERINFO[@]}; do echo "${MASTERINFO[$KEY]}:::$KEY"; done | sort | awk -F::: '{print $2}')
VIP=10.10.10.76

# cerate clusterConfig for master nodes
cat << EOF > /root/kubeadmcfg.yaml
apiVersion: "kubeadm.k8s.io/v1beta1"
kind: ClusterConfiguration
kubernetesVersion: stable
apiServer:
  certSANs:
  - "127.0.0.1"
  - "${ETCDIPS[0]}"
  - "${ETCDIPS[1]}"
  - "${ETCDIPS[2]}"
  - "${VIP}"
controlPlaneEndpoint: "${VIP}:6443"
etcd:
    external:
        endpoints:
        - https://${ETCDIPS[0]}:2379
        - https://${ETCDIPS[1]}:2379
        - https://${ETCDIPS[2]}:2379
        caFile: /etc/kubernetes/pki/etcd/ca.crt
        certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt
        keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key
EOF

### run only on the first master node (k8s-c3-m1)
# init the first master node
service kubelet stop && \
kubeadm init --config /root/kubeadmcfg.yaml

рд╣рдореЗрдВ рдХреБрдЫ рдФрд░ рдЬрд╛рдирдирд╛ рдЪрд╛рд╣рд┐рдП?

рдХреНрд▓рд╕реНрдЯрд░ рдиреЛрдбреНрд╕

k8s-c3-lb - 10.10.10.76
k8s-c3-e1 - 10.10.10.90
k8s-c3-e2 - 10.10.10.91
k8s-c3-e3 - 10.10.10.92
k8s-c3-m1 - 10.10.10.93
k8s-c3-m2 - 10.10.10.94
k8s-c3-m3 - 10.10.10.95
k8s-c3-w1 - 10.10.10.96
k8s-c3-w2 - 10.10.10.97
k8s-c3-w3 - 10.10.10.98

nginx LB config

root@k8s-c3-lb:~# cat nginx.conf
worker_processes 4;
worker_rlimit_nofile 40000;

events {
    worker_connections 8192;
}

error_log /var/log/nginx/error.log info;

stream {
  upstream k8s-c3 {
    server 10.10.10.93:6443;
    server 10.10.10.94:6443;
    server 10.10.10.95:6443;
  }
  server {
    listen 6443;
    proxy_pass k8s-c3;
  }
}

netcat ouput lb рдЪреЗрдХ рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП

root@k8s-c3-lb:~# nc -v 10.10.10.76 6443
Connection to 10.10.10.76 6443 port [tcp/*] succeeded!

kubeadm config ondd рдиреЛрдбреНрд╕ рдкрд░

root@k8s-c3-e1:~# cat kubeadmcfg.yaml 
apiVersion: "kubeadm.k8s.io/v1beta1"
kind: ClusterConfiguration
etcd:
    local:
        serverCertSANs:
        - "10.10.10.90"
        peerCertSANs:
        - "10.10.10.90"
        extraArgs:
            initial-cluster: k8s-c3-e1=https://10.10.10.90:2380,k8s-c3-e2=https://10.10.10.91:2380,k8s-c3-e3=https://10.10.10.92:2380
            initial-cluster-state: new
            name: k8s-c3-e1
            listen-peer-urls: https://10.10.10.90:2380
            listen-client-urls: https://10.10.10.90:2379
            advertise-client-urls: https://10.10.10.90:2379
            initial-advertise-peer-urls: https://10.10.10.90:2380

рдорд╛рд╕реНрдЯрд░ рд╕реЗ рдЬрд╛рдВрдЪ рдЖрджрд┐

root@k8s-c3-m1:~# docker run --rm -it --net host -v /etc/kubernetes:/etc/kubernetes quay.io/coreos/etcd:v3.2.24 etcdctl --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --ca-file /etc/kubernetes/pki/etcd/ca.crt --endpoints https://10.10.10.90:2379 cluster-health
member 2855b88ffd64a219 is healthy: got healthy result from https://10.10.10.91:2379
member 54861c1657ba1b20 is healthy: got healthy result from https://10.10.10.92:2379
member 6fc6fbb1e152a287 is healthy: got healthy result from https://10.10.10.90:2379
cluster is healthy

рдорд╛рд╕реНрдЯрд░ рдкрд░ kubeadm config

root@k8s-c3-m1:~# cat /root/kubeadmcfg.yaml 
apiVersion: "kubeadm.k8s.io/v1beta1"
kind: ClusterConfiguration
kubernetesVersion: stable
apiServer:
  certSANs:
  - "127.0.0.1"
  - "10.10.10.90"
  - "10.10.10.91"
  - "10.10.10.92"
  - "10.10.10.76"
controlPlaneEndpoint: "10.10.10.76:6443"
etcd:
    external:
        endpoints:
        - https://10.10.10.90:2379
        - https://10.10.10.91:2379
        - https://10.10.10.92:2379
        caFile: /etc/kubernetes/pki/etcd/ca.crt
        certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt
        keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key

docker ps

root@k8s-c3-m1:~# docker ps
CONTAINER ID        IMAGE                  COMMAND                  CREATED             STATUS              PORTS               NAMES
d6e9af6f2585        dd862b749309           "kube-scheduler --adтАж"   28 minutes ago      Up 28 minutes                           k8s_kube-scheduler_kube-scheduler-k8s-c3-m1_kube-system_4b52d75cab61380f07c0c5a69fb371d4_1
76bcca06bb0c        40a817357014           "kube-controller-manтАж"   28 minutes ago      Up 28 minutes                           k8s_kube-controller-manager_kube-controller-manager-k8s-c3-m1_kube-system_3a2670bb8847c2036740fe0f0a3de429_1
74c9b34ec00d        fc3801f0fc54           "kube-apiserver --auтАж"   About an hour ago   Up About an hour                        k8s_kube-apiserver_kube-apiserver-k8s-c3-m1_kube-system_6fb1fd1d468dedcf6a62eff4d392685e_0
e68bbbc0967e        k8s.gcr.io/pause:3.1   "/pause"                 About an hour ago   Up About an hour                        k8s_POD_kube-scheduler-k8s-c3-m1_kube-system_4b52d75cab61380f07c0c5a69fb371d4_0
0d6e0d0040cf        k8s.gcr.io/pause:3.1   "/pause"                 About an hour ago   Up About an hour                        k8s_POD_kube-controller-manager-k8s-c3-m1_kube-system_3a2670bb8847c2036740fe0f0a3de429_0
29f7974ae280        k8s.gcr.io/pause:3.1   "/pause"                 About an hour ago   Up About an hour                        k8s_POD_kube-apiserver-k8s-c3-m1_kube-system_6fb1fd1d468dedcf6a62eff4d392685e_0

journalctl -xeu рдХреНрдпреВрдмрд▓реЗрдЯ

root@k8s-c3-m1:~# journalctl -xeu kubelet
Mar 04 15:44:05 k8s-c3-m1 kubelet[1512]: I0304 15:44:05.502619    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:44:15 k8s-c3-m1 kubelet[1512]: I0304 15:44:15.582590    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:44:24 k8s-c3-m1 kubelet[1512]: I0304 15:44:24.567123    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:44:25 k8s-c3-m1 kubelet[1512]: I0304 15:44:25.622999    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:44:35 k8s-c3-m1 kubelet[1512]: I0304 15:44:35.669595    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:44:45 k8s-c3-m1 kubelet[1512]: I0304 15:44:45.742763    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:44:49 k8s-c3-m1 kubelet[1512]: I0304 15:44:49.566491    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:44:55 k8s-c3-m1 kubelet[1512]: I0304 15:44:55.812636    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:44:58 k8s-c3-m1 kubelet[1512]: I0304 15:44:58.566265    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:45:05 k8s-c3-m1 kubelet[1512]: I0304 15:45:05.890388    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:45:15 k8s-c3-m1 kubelet[1512]: I0304 15:45:15.971426    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:45:26 k8s-c3-m1 kubelet[1512]: I0304 15:45:26.043344    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:45:36 k8s-c3-m1 kubelet[1512]: I0304 15:45:36.117636    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:45:36 k8s-c3-m1 kubelet[1512]: I0304 15:45:36.566338    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:45:46 k8s-c3-m1 kubelet[1512]: I0304 15:45:46.190995    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:45:51 k8s-c3-m1 kubelet[1512]: I0304 15:45:51.566093    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:45:56 k8s-c3-m1 kubelet[1512]: I0304 15:45:56.273010    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:46:06 k8s-c3-m1 kubelet[1512]: I0304 15:46:06.346175    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:46:16 k8s-c3-m1 kubelet[1512]: I0304 15:46:16.384087    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach

рд╡реНрдпрд╡рд╕реНрдерд┐рдд

root@k8s-c3-m1:~# systemctl status kubelet          
тЧП kubelet.service - kubelet: The Kubernetes Node Agent
   Loaded: loaded (/lib/systemd/system/kubelet.service; enabled; vendor preset: enabled)
  Drop-In: /etc/systemd/system/kubelet.service.d
           тФФтФА10-kubeadm.conf, 20-etcd-service-manager.conf
   Active: active (running) since Mon 2019-03-04 14:52:31 GMT; 55min ago
     Docs: https://kubernetes.io/docs/home/
 Main PID: 1512 (kubelet)
    Tasks: 17
   Memory: 42.1M
      CPU: 2min 45.226s
   CGroup: /system.slice/kubelet.service
           тФФтФА1512 /usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true

Mar 04 15:46:26 k8s-c3-m1 kubelet[1512]: I0304 15:46:26.450692    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:46:27 k8s-c3-m1 kubelet[1512]: I0304 15:46:27.566498    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:46:36 k8s-c3-m1 kubelet[1512]: I0304 15:46:36.519582    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:46:46 k8s-c3-m1 kubelet[1512]: I0304 15:46:46.621611    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:46:56 k8s-c3-m1 kubelet[1512]: I0304 15:46:56.566111    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:46:56 k8s-c3-m1 kubelet[1512]: I0304 15:46:56.568601    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:46:56 k8s-c3-m1 kubelet[1512]: I0304 15:46:56.706182    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:47:06 k8s-c3-m1 kubelet[1512]: I0304 15:47:06.778864    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:47:16 k8s-c3-m1 kubelet[1512]: I0304 15:47:16.852441    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach
Mar 04 15:47:26 k8s-c3-m1 kubelet[1512]: I0304 15:47:26.893380    1512 kubelet_node_status.go:278] Setting node annotation to enable volume controller attach/detach

рд╕рдмрд╕реЗ рдЙрдкрдпреЛрдЧреА рдЯрд┐рдкреНрдкрдгреА

рдЕрдЧрд░ рдореБрдЭреЗ рдХреЛрдИ рдЪрд┐рдВрддрд╛ рдирд╣реАрдВ рд╣реИ, рддреЛ рдирд┐рдпрдВрддреНрд░рдг-рд╕рдорддрд▓ рдиреЛрдбреНрд╕ рдкрд░ 20-etc-service-manager -conf рдореЗрдВ рдбреНрд░реЙрдк рдореМрдЬреВрдж рдирд╣реАрдВ рд╣реЛрдирд╛ рдЪрд╛рд╣рд┐рдП

рдбреНрд░реЙрдк-рдЗрди: /etc/systemd/system/kubelet.service.d
тФФтФА10-kubeadm.conf, 20-Incd-service-manager.conf

рдбреНрд░реЙрдк 20-etc-service-manager.conf, рдпрд╣ рдХрд╛рдо рдХрд░рддрд╛ рд╣реИ!

рд╕рднреА 18 рдЯрд┐рдкреНрдкрдгрд┐рдпрд╛рдБ

рдирдорд╕реНрддреЗ, @timothysc , рдореИрдВ рджреЗрдЦрддрд╛ рд╣реВрдВ рдХрд┐ рдЖрдкрдиреЗ рдЗрд╕реЗ v1.15 рдХреЗ рд▓рд┐рдП рдПрдХ рдореАрд▓ рдХреЗ рдкрддреНрдерд░ рдореЗрдВ рдЬреЛрдбрд╝рд╛ рд╣реИ, рдХреНрдпрд╛ рдореИрдВ рдПрдХ рдЬреНрдЮрд╛рдд рд╕рдорд╕реНрдпрд╛ рдХреЛ рдорд╛рд░ рд░рд╣рд╛ рд╣реВрдВ?

@ рд╢реНрд░реАрдзрд░
рдореАрд▓ рдХрд╛ рдкрддреНрдерд░ рдкрд░рд┐рд╡рд░реНрддрди рдХрд╛ рдорддрд▓рдм рд╣реИ рдХрд┐ рдЯреАрдо рдЗрд╕ рдЪрдХреНрд░ рдкрд░ рдХрд╛рдо рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдмреИрдВрдбрд╡рд┐рдбреНрде рд╕реЗ рдмрд╛рд╣рд░ рд╣реИ (рд░рд┐рд▓реАрдЬ 1.14 рдЬрд▓реНрдж рд╣реА рд╣реИ)ред

рдореБрдЭреЗ рд╡рд┐рд╢реНрд╡рд╛рд╕ рдирд╣реАрдВ рд╣реИ рдХрд┐ рд╣рдордиреЗ рдмрд╛рд╣рд░реА рдИрдЯреАрд╕реАрдбреА рд╣рд╛ рдХреНрд▓рд╕реНрдЯрд░ рдХреЗ рдмрд╛рд░реЗ рдореЗрдВ рдПрдХ рд╕рдорд╛рди рд░рд┐рдкреЛрд░реНрдЯ рдкреНрд░рд╛рдкреНрдд рдХреА рд╣реИ рдЬреЛ рд╕реАрдЖрд░рдЖрдИ рд╕реЙрдХреЗрдЯ рд▓рд┐рдЦрдиреЗ рдореЗрдВ рд╕рдХреНрд╖рдо рдирд╣реАрдВ рд╣реИред рднреА, рдореИрдВ рдЕрдкрдиреЗ рд╕реЗрдЯрдЕрдк рдХреЗ рд╕рд╛рде рдПрдХ рд╕рдорд╕реНрдпрд╛ рдЦреЛрдЬрдиреЗ рдХреЗ рд▓рд┐рдП рдкреНрд░рддреАрдд рдирд╣реАрдВ рдХрд░ рд╕рдХрддреЗред

рдпрджрд┐ рдЖрдк рдЕрдкрдиреЗ рдиреЛрдбреНрд╕ рдкрд░ kubeadm init --ignore-preflight-errors=all (рдпрд╛рдиреА рд╕рд┐рдВрдЧрд▓ рдХрдВрдЯреНрд░реЛрд▓ рдкреНрд▓реЗрди рд╕реЗрдЯрдЕрдк рдЪрд▓рд╛рдиреЗ) рдХреА рдХреЛрд╢рд┐рд╢ рдХрд░рддреЗ рд╣реИрдВ рддреЛ рдХреНрдпрд╛ рд╣реЛрдЧрд╛? рдХреНрдпрд╛ рдЖрдкрдХреЛ рд╡рд╣реА рддреНрд░реБрдЯрд┐ рдорд┐рд▓рддреА рд╣реИ?

root @ k8s-c3-m1 : ~ # journalctl -xeu рдХреНрдпреВрдмрд▓реЗрдЯ

рдХреБрдЫ рдФрд░ рдЬреЛ рдХреНрдпреВрдмрд▓реЗрдЯ рд▓реЙрдЧ рдореЗрдВ рджрд┐рд▓рдЪрд╕реНрдк рд╣реИ?

@ neolit123 рдЬрд╛рдирдХрд╛рд░реА рдХреЗ рд▓рд┐рдП рдзрдиреНрдпрд╡рд╛рджред

рдореИрдВ рд╕рд╣рдордд рд╣реВрдВред рдореБрдЭреЗ рдХреБрдЫ рднреА рдЧрд▓рдд рдирд╣реАрдВ рджрд┐рдЦ рд░рд╣рд╛ рд╣реИ рдФрд░ рддреНрд░реБрдЯрд┐ рдХреЗ рдмрд╛рдж рдореИрдВ kubernetes API рдХреЛ рдХреНрд╡реЗрд░реА рдХрд░рдиреЗ рдореЗрдВ рд╕рдХреНрд╖рдо рд╣реВрдВ рдФрд░ рдореИрдВ рдЕрдкрдиреЗ etcA рдХреНрд▓рд╕реНрдЯрд░ рдореЗрдВ k8s рдбреЗрдЯрд╛ рджреЗрдЦ рд╕рдХрддрд╛ рд╣реВрдВред

рдореИрдВ рдХрд▓ рдХреБрдЫ рдФрд░ рдкрд░реАрдХреНрд╖рдг рдХрд░рдиреЗ рдХреА рдХреЛрд╢рд┐рд╢ рдХрд░реВрдВрдЧрд╛ред рдЕрдЧрд░ рдореБрдЭреЗ рдХреБрдЫ рджрд┐рд▓рдЪрд╕реНрдк рд▓рдЧрддрд╛ рд╣реИ рддреЛ рдореИрдВ рд░рд┐рдкреЛрд░реНрдЯ рдХрд░реВрдВрдЧрд╛ред

рдХреНрдпрд╛ рдЖрдк рдЕрдкрдиреЗ рдХреНрдпреВрдм-рдЕрдкреАрдЬрд╝рд░ рдХрдВрдЯреЗрдирд░ @hreidar рдХреЗ рд▓реЙрдЧ рдХреЛ рд╕рд╛рдЭрд╛ рдХрд░ рд╕рдХрддреЗ рд╣реИрдВ

рдЕрдЧрд░ рдореБрдЭреЗ рдХреЛрдИ рдЪрд┐рдВрддрд╛ рдирд╣реАрдВ рд╣реИ, рддреЛ рдирд┐рдпрдВрддреНрд░рдг-рд╕рдорддрд▓ рдиреЛрдбреНрд╕ рдкрд░ 20-etc-service-manager -conf рдореЗрдВ рдбреНрд░реЙрдк рдореМрдЬреВрдж рдирд╣реАрдВ рд╣реЛрдирд╛ рдЪрд╛рд╣рд┐рдП

рдбреНрд░реЙрдк-рдЗрди: /etc/systemd/system/kubelet.service.d
тФФтФА10-kubeadm.conf, 20-Incd-service-manager.conf

@CodeAholic рдореИрдВ рд▓реЗрдХрд┐рди рдордВрдЧрд▓рд╡рд╛рд░ рддрдХ рдирд╣реАрдВ рдХрд░ рд╕рдХрддрд╛ рдХреНрдпреЛрдВрдХрд┐ рдореИрдВ рдЕрдкрдиреА рджреЛ рд▓рдбрд╝рдХрд┐рдпреЛрдВ рдХреЗ рд╕рд╛рде рдШрд░ рдкрд░ рд╣реВрдБ рдЬреЛ рдЗрд╕ рд╕рдордп рдмреАрдорд╛рд░ рд╣реИрдВред

@fabriziopandini рдЗрд╕ рдмрд╛рдд рдХреА рд╕рдВрднрд╛рд╡рдирд╛ рд╣реИ рдХрд┐ рдореИрдВрдиреЗ рдЗрд╕ рдлрд╛рдЗрд▓ рдХреЛ рджреБрд░реНрдШрдЯрдирд╛ рд╕реЗ рдЙрддреНрдкрдиреНрди рдХрд┐рдпрд╛ рд╣реИ рдХреНрдпреЛрдВрдХрд┐ рдореИрдВ рдПрдХ рд╣реА рдмрд╛рд░ рдореЗрдВ рдиреЛрдбреНрд╕ рддреИрдпрд╛рд░ рдХрд░ рд░рд╣рд╛ рдерд╛ред рдХреНрдпрд╛ рдпрд╣ рдХрд┐рд╕реА рднреА рддрд░рд╣ рд╕реЗ рдЕрдиреНрдп k8 рдШрдЯрдХреЛрдВ рдХреЛ рдкреНрд░рднрд╛рд╡рд┐рдд рдХрд░рддрд╛ рд╣реИ? рдореЗрд░реЗ рдорд╛рдорд▓реЗ рдореЗрдВ рдорд╛рд╕реНрдЯрд░ рдкрд░ etcd рд╕реНрдерд╛рдкрд┐рдд рдирд╣реАрдВ рд╣реЛрдирд╛ рдЪрд╛рд╣рд┐рдПред

рдирдорд╕реНрддреЗ, @CodeAholic , @fabriziopandini , рдореБрдЭреЗ рдЕрдкрдиреЗ рдкрд░реАрдХреНрд╖рдг рдХреНрд▓рд╕реНрдЯрд░ рдХреЛ рдлрд┐рд░ рд╕реЗ рдХрд░рдирд╛ рдкрдбрд╝рд╛ рдХреНрдпреЛрдВрдХрд┐ рдордВрдЧрд▓рд╡рд╛рд░ рдХреЛ рдХрд╛рдо рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдореЗрд░рд╛ рд╕реЗрдЯрдЕрдк

hreidar@k8s-c3-m1:~# sudo kubeadm init --config /root/kubeadmcfg.yaml
...
...
Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:

  kubeadm join 10.10.10.76:6443 --token z9bf7c.8tl3crfvd6onov70 --discovery-token-ca-cert-hash sha256:863eb2eab4e4623f5751306f181e00e8fe4c01da00cc8161cdee9f35b0ed944c

hreidar@k8s-c3-m1:~#

рдХреЗрд╡рд▓ рдПрдХ рдЪреАрдЬ рдЬреЛ рдореИрдВрдиреЗ рдЗрд╕ рдмрд╛рд░ рдЕрд▓рдЧ-рдЕрд▓рдЧ рдХреА рд╣реИ рд╡рд╣ рд╣реИ рдореИрдВ рд░реВрдЯ рдХреЗ рдмрдЬрд╛рдп рд╕реБрдбреЛ рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдХреЗ рдХреБрдмреЗрджрдо рдЖрджреЗрд╢ рдЪрд▓рд╛рддрд╛ рд╣реВрдВред рдпрд╣ рдХреБрдЫ рдРрд╕рд╛ рд╣реИ рдЬреЛ рдЪреАрдЬреЛрдВ рдХреЛ рдЦрд░рд╛рдм рдХрд░рдиреЗ рдХреЗ рд▓рд┐рдП рдЬрд╛рдирд╛ рдЬрд╛рддрд╛ рд╣реИ рдпрд╛ ....?

рдпрджрд┐ рдРрд╕рд╛ рд╣реИ рддреЛ рдпрд╣ рд╢рд╛рдпрдж рдХрд╣реАрдВ рджрд╕реНрддрд╛рд╡реЗрдЬ рд╣реЛрдирд╛ рдЪрд╛рд╣рд┐рдПред
рдЖрдкрдХреА рдорджрдж рдХреЗ рд▓рд┐рдП рдзрдиреНрдпрд╡рд╛рдж, рдореИрдВ рдЗрд╕ рдореБрджреНрджреЗ рдХреЛ рдмрдВрдж рдХрд░ рджреВрдВрдЧрд╛ред

рдореЗрд░реЗ рд╕реНрдерд╛рдиреАрдп рд╕реЗрдЯрдЕрдк рдкрд░ рдПрдХ рд╣реА рдореБрджреНрджрд╛ рдорд┐рд▓рд╛

рдЕрдЧрд░ рдореБрдЭреЗ рдХреЛрдИ рдЪрд┐рдВрддрд╛ рдирд╣реАрдВ рд╣реИ, рддреЛ рдирд┐рдпрдВрддреНрд░рдг-рд╕рдорддрд▓ рдиреЛрдбреНрд╕ рдкрд░ 20-etc-service-manager -conf рдореЗрдВ рдбреНрд░реЙрдк рдореМрдЬреВрдж рдирд╣реАрдВ рд╣реЛрдирд╛ рдЪрд╛рд╣рд┐рдП

рдбреНрд░реЙрдк-рдЗрди: /etc/systemd/system/kubelet.service.d
тФФтФА10-kubeadm.conf, 20-Incd-service-manager.conf

рдбреНрд░реЙрдк 20-etc-service-manager.conf, рдпрд╣ рдХрд╛рдо рдХрд░рддрд╛ рд╣реИ!

рдореИрдВрдиреЗ рдЗрд╕ рдореБрджреНрджреЗ рдХреЛ рд╣рд▓ рдХрд┐рдпрд╛ред

docker рдЬрд╛рдирдХрд╛рд░реА | grep- рдЖрдИ рд╕реАрдЧреНрд░реБрдк
---> рдпрд╣ "systemd" рд╣реЛ рд╕рдХрддрд╛ рд╣реИ

/var/lib/kubelet/config.yaml
---> cgroupDriver: cgroupfs

рдЗрд╕ рд╡рдЬрд╣ рд╕реЗ, рдореИрдВ "рдХреНрдпреВрдмрд▓реЗрдЯ" рдЪрд▓рд╛ рд╕рдХрддрд╛ рд╣реВрдВред рдЗрд╕рд▓рд┐рдП рдореИрдВрдиреЗ рдЗрд╕ рдореВрд▓реНрдп рдХреЗ рд▓рд┐рдП рдорд┐рд▓рд╛рди рдХрд░рдиреЗ рдХреА рдХреЛрд╢рд┐рд╢ рдХреАред

Inetc/systemd/system/kubelet.service.d/ рдкрд░
10-kubeadm.conf
20-etcd-service-manager.conf (рдпрджрд┐ рдЖрдк рдмрд╛рд╣рд░реА etcd рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рдирд╛ рдЪрд╛рд╣рддреЗ рд╣реИрдВ, рддреЛ рдпрд╣ рдлрд╝рд╛рдЗрд▓ рд╡рд╣рд╛рдВ рд╕реНрдерд┐рдд рд╣реИ)

10-kubeadm.conf рдореЗрдВ

рдХреЛрдИ рднреА рдЬреЛрдбрд╝рд╛ рдЧрдпрд╛ рд╡рд┐рдХрд▓реНрдк рдЬреИрд╕реЗ "--cgroup-driver = systemd" рдХрд╛рдо рдирд╣реАрдВ рдХрд░реЗрдЧрд╛ред рдЗрд╕рдХреЗ рд▓рд┐рдП рдЖрдкрдХреЛ config.yaml рдореЗрдВ рдЬреЛрдбрд╝рдирд╛ рд╣реЛрдЧрд╛ред

apiVersion: kubelet.config.k8s.io/v1beta1
рддрд░рд╣: KubeletConfiguration
cgroupDriver: systemd

20-etcd-service-manager.conf рдореЗрдВ
рдореИрдВ рдиреАрдЪреЗ рдХреА рддрд░рд╣ "--cgroup-driver = systemd" рдЬреЛрдбрд╝ рд╕рдХрддрд╛ рд╣реВрдВред
ExecStart = / usr / bin / kubelet --address = 0.0.0.0 --pod- рдореИрдирд┐рдлрд╝реЗрд╕реНрдЯ-рдкрд╛рде = / etc / kubernetes / рдореИрдирд┐рдлрд╝реЗрд╕реНрдЯреНрд╕ --allow-рд╡рд┐рд╢реЗрд╖рд╛рдзрд┐рдХрд╛рд░ рдкреНрд░рд╛рдкреНрдд = true --cgroup-driver = systemd

рдЙрд╕рдХреЗ рдмрд╛рдж, "kubeadm" рдФрд░ "kubelet" рдХреЛ рдкреБрдирд░рд╛рд░рдВрдн рдХрд░реЗрдВред рдпрд╣ рдХрд╛рдо рдХрд░реЗрдЧрд╛ред

рдореЗрд░реЗ рдмрд╛рд╣рд░реА рд╕реЗрдЯрдЕрдк рдкрд░ рдПрдХ рд╣реА рд╕рдорд╕реНрдпрд╛ рд╣реИ:

I0618 20:52:58.265837   31415 round_trippers.go:419] curl -k -v -XGET  -H "User-Agent: kubeadm/v1.13.4 (linux/amd64) kubernetes/c27b913" -H "Accept: application/json, */*" 'https://k8s-master:60443/api/v1/nodes/10-10-40-93'
I0618 20:52:58.268370   31415 round_trippers.go:438] GET https://k8s-master:60443/api/v1/nodes/10-10-40-93 404 Not Found in 2 milliseconds
I0618 20:52:58.268389   31415 round_trippers.go:444] Response Headers:
I0618 20:52:58.268396   31415 round_trippers.go:447]     Content-Type: application/json
I0618 20:52:58.268402   31415 round_trippers.go:447]     Content-Length: 192
I0618 20:52:58.268409   31415 round_trippers.go:447]     Date: Tue, 18 Jun 2019 12:52:58 GMT
I0618 20:52:58.268430   31415 request.go:942] Response Body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"nodes \"10-10-40-93\" not found","reason":"NotFound","details":{"name":"10-10-40-93","kind":"nodes"},"code":404}
error execution phase upload-config/kubelet: Error writing Crisocket information for the control-plane node: timed out waiting for the condition

рдореИрдВ Ubuntu 16.04.2 LTS рдкрд░ kubeadm v1.15.0 рдХрд╛ рдЙрдкрдпреЛрдЧ рдХрд░рддрд╛ рд╣реВрдВ рдФрд░ sudo kubeadm init --pod-network-cidr=10.244.0.0/16 рдЪрд▓рдиреЗ рдкрд░ рдЗрд╕реА рдореБрджреНрджреЗ рдкрд░ред kubeadm v1.15.1 рдПрдХрджрдо рд╕рд╣реА рдХрд╛рдо рдХрд░рддрд╛ рд╣реИред

рдореБрдЭреЗ рдпрд╣ рддрдм рдорд┐рд▓рд╛ рдЬрдм рдореИрдВ init рднрд╛рдЧрд╛ рдерд╛, рдлрд┐рд░ рдореИрдиреНрдпреБрдЕрд▓ рд░реВрдк рд╕реЗ рдорд╢реАрди рдХреЛ рд╕рд╛рдл рдХрд┐рдпрд╛, рдФрд░ рдлрд┐рд░ рдЗрд╕реЗ рдлрд┐рд░ рд╕реЗ рдЪрд▓рд╛рдпрд╛, рдЬрдмрдХрд┐ рдкрд░реАрдХреНрд╖рдг рдЖрджрд┐ рдореЗрдВ sudo kubectl reset рдЪрд▓ рд░рд╣рд╛ рдерд╛, рдЗрд╕рдХреЗ рдмрд╛рдж рдлрд┐рд░ рд╕реЗ init, sudo kubeadm init --pod-network-cidr=10.244.0.0/16 etc, рдареАрдХ рдХрд╛рдо рдХрд┐рдпрд╛ред

рд╕реБрдбреЛрд▓ рдХреБрдмреЗрдЯреЗрд▓ рд░реАрд╕реЗрдЯ

рд╕рд╣реА рдХрдорд╛рдВрдб sudo kubeadm рд░реАрд╕реЗрдЯ рд╣реИ, рдФрд░ /etc/cni/net.d рдФрд░ $ HOME / /kube / config рдХреЛ рд╣рдЯрд╛рдиреЗ рдХреЗ рд▓рд┐рдП рдХрдо рд╕реЗ рдХрдо рдЗрд╕рдХреЗ рдЖрдЙрдЯрдкреБрдЯ рдХрд╛ рдкрд╛рд▓рди рдХрд░реЗрдВред

[kubelet-check] 40 рдХреЗ рджрд╢рдХ рдХрд╛ рдЖрд░рдВрднрд┐рдХ рд╕рдордп рдмреАрдд рдЧрдпрд╛ред
рддреНрд░реБрдЯрд┐ рдирд┐рд╖реНрдкрд╛рджрди рдЪрд░рдг рдХреНрдпреВрдмрд▓реЗрдЯ-рд╕реНрдЯрд╛рд░реНрдЯ: рддреНрд░реБрдЯрд┐ рдЕрдкрд▓реЛрдбрд┐рдВрдЧ рдХреНрд░реЙрдХреЗрдЯ

рдЗрд╕ рдореБрджреНрджреЗ рдХреЛ рдХреИрд╕реЗ рд╣рд▓ рдХрд┐рдпрд╛ рдЬрд╛рдП

рдЗрд╕ рдореБрджреНрджреЗ рдХреЛ рдХреИрд╕реЗ рд╣рд▓ рдХрд┐рдпрд╛ рдЬрд╛рдП

рдмрд╣реБрдд рдХрдо рдЬрд╛рдирдХрд╛рд░реА

рд╕рдорд░реНрдерди рдЪреИрдирд▓ рдЖрдЬрд╝рдорд╛рдПрдВ:
https://github.com/kubernetes/kubernetes/blob/master/SUPPORT.md

рдпрджрд┐ рдХреЛрдИ рдЧреНрд░рд╛рд╣рдХ рдЯрд╛рдЗрдордЖрдЙрдЯ рдХреЗ рдХрд╛рд░рдг рдХрд┐рд╕реА рдиреЛрдб рдСрдмреНрдЬреЗрдХреНрдЯ (рдЬреИрд╕реЗ CRI рд╕реЙрдХреЗрдЯ рдЬрд╛рдирдХрд╛рд░реА) рдХреЛ рдЬрд╛рдирдХрд╛рд░реА рдирд╣реАрдВ рд▓рд┐рдЦ рд╕рдХрддрд╛ рд╣реИ, рддреЛ рдХреНрд▓рд╕реНрдЯрд░ рдореЗрдВ рдПрдкреАрдЖрдИ-рд╕рд░реНрд╡рд░ рдХреЗ рд╕рд╛рде рдХреБрдЫ рдареАрдХ рдирд╣реАрдВ рд╣реИред

рдХреНрдпрд╛ рдпрд╣ рдкреГрд╖реНрда рдЙрдкрдпреЛрдЧреА рдерд╛?
0 / 5 - 0 рд░реЗрдЯрд┐рдВрдЧреНрд╕

рд╕рдВрдмрдВрдзрд┐рдд рдореБрджреНрджреЛрдВ

shenshouer picture shenshouer  ┬╖  3рдЯрд┐рдкреНрдкрдгрд┐рдпрд╛рдБ

kvaps picture kvaps  ┬╖  3рдЯрд┐рдкреНрдкрдгрд┐рдпрд╛рдБ

helphi picture helphi  ┬╖  3рдЯрд┐рдкреНрдкрдгрд┐рдпрд╛рдБ

tmjd picture tmjd  ┬╖  4рдЯрд┐рдкреНрдкрдгрд┐рдпрд╛рдБ

bruceauyeung picture bruceauyeung  ┬╖  4рдЯрд┐рдкреНрдкрдгрд┐рдпрд╛рдБ