5-7 关于kubectl apply -f calico.yaml

来源:5-7 网络插件-Calico_1

yl_testimooc3804939

2022-01-04

[root@node-1 ~]# kubectl apply -f calico.yaml
configmap/calico-config configured
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org configured
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers configured
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers unchanged
clusterrole.rbac.authorization.k8s.io/calico-node configured
clusterrolebinding.rbac.authorization.k8s.io/calico-node unchanged
daemonset.apps/calico-node configured
serviceaccount/calico-node unchanged
deployment.apps/calico-kube-controllers configured
serviceaccount/calico-kube-controllers unchanged
poddisruptionbudget.policy/calico-kube-controllers unchanged


[root@node-1 ~]# kubectl get po -n kube-system
NAME                                       READY   STATUS              RESTARTS   AGE
calico-kube-controllers-558995777d-kh2l7   0/1     ContainerCreating   0          67m
calico-node-88lc4                          0/1     CrashLoopBackOff    21         67m
calico-node-g4shr                          0/1     CrashLoopBackOff    21         67m
nginx-proxy-node-3                         1/1     Running             1          134m

[root@node-1 ~]# kubectl get nodes -o wide
NAME     STATUS   ROLES    AGE    VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION           CONTAINER-RUNTIME
node-2   Ready    <none>   121m   v1.20.2   172.16.1.22   <none>        CentOS Linux 7 (Core)   3.10.0-1160.el7.x86_64   containerd://1.4.3
node-3   Ready    <none>   121m   v1.20.2   172.16.1.23   <none>        CentOS Linux 7 (Core)   3.10.0-1160.el7.x86_64   containerd://1.4.3


[root@node-2 ~]# crictl images
IMAGE                                                          TAG                 IMAGE ID            SIZE
docker.io/calico/cni                                           v3.19.1             5749e8b276f9b       48.3MB
docker.io/calico/cni                                           v3.21.2             4c5c325303915       81.7MB
docker.io/calico/kube-controllers                              v3.21.2             b206524060289       54.8MB
docker.io/calico/node                                          v3.19.1             c4d75af7e098e       58.7MB
docker.io/calico/node                                          v3.21.2             f1bca4d4ced28       74.1MB
docker.io/calico/pod2daemon-flexvol                            v3.19.1             5660150975fb8       9.33MB
docker.io/calico/pod2daemon-flexvol                            v3.21.2             7778dd57e5063       9.17MB
registry.cn-hangzhou.aliyuncs.com/kubernetes-kubespray/pause   3.2                 80d28bedfe5de       298kB
k8s.gcr.io/pause                                               3.2                 80d28bedfe5de       298kB

[root@node-3 ~]# crictl images
IMAGE                                                          TAG                 IMAGE ID            SIZE
docker.io/calico/cni                                           v3.19.1             5749e8b276f9b       48.3MB
docker.io/calico/cni                                           v3.21.2             4c5c325303915       81.7MB
docker.io/calico/kube-controllers                              v3.21.2             b206524060289       54.8MB
docker.io/calico/node                                          v3.19.1             c4d75af7e098e       58.7MB
docker.io/calico/node                                          v3.21.2             f1bca4d4ced28       74.1MB
docker.io/calico/pod2daemon-flexvol                            v3.19.1             5660150975fb8       9.33MB
docker.io/calico/pod2daemon-flexvol                            v3.21.2             7778dd57e5063       9.17MB
docker.io/calico/typha                                         v3.19.1             1680f4d98085a       24.1MB
docker.io/library/nginx                                        1.19                f0b8a9a541369       53.7MB
k8s.gcr.io/pause                                               3.2                 80d28bedfe5de       298kB
registry.cn-hangzhou.aliyuncs.com/kubernetes-kubespray/pause   3.2                 80d28bedfe5de       298kB



[root@node-2 ~]# journalctl -f

Jan 04 03:13:09 node-2 kubelet[2409]: E0104 03:13:09.655827    2409 kuberuntime_sandbox.go:70] 
CreatePodSandbox for pod "calico-kube-controllers-558995777d-8nflx_kube-system
(4d2fbb59-cc98-4939-9f7e-7dcba17c6e97)" failed: rpc error: code = Unknown 
desc = failed to setup network for sandbox "bc484fc3d5056b856b719050d82d5c6e829a72e7b47227d19c3fe896c9a3583c": 
error getting ClusterInformation: 
Get "https://[10.233.0.1]:443/apis/crd.projectcalico.org/v1/clusterinformations/default": 
x509: certificate is valid for 172.16.1.21, 172.16.1.22, 172.16.1.23, 127.0.0.1, not 10.233.0.1

Jan 04 03:13:09 node-2 kubelet[2409]: E0104 03:13:09.655877    2409 kuberuntime_manager.go:755] 
createPodSandbox for pod "calico-kube-controllers-558995777d-8nflx_kube-system
(4d2fbb59-cc98-4939-9f7e-7dcba17c6e97)" failed: rpc error: code = Unknown 
desc = failed to setup network for sandbox "bc484fc3d5056b856b719050d82d5c6e829a72e7b47227d19c3fe896c9a3583c": 
error getting ClusterInformation: 
Get "https://[10.233.0.1]:443/apis/crd.projectcalico.org/v1/clusterinformations/default": 
x509: certificate is valid for 172.16.1.21, 172.16.1.22, 172.16.1.23, 127.0.0.1, not 10.233.0.1

Jan 04 03:13:09 node-2 kubelet[2409]: E0104 03:13:09.655967    2409 pod_workers.go:191] 
Error syncing pod 4d2fbb59-cc98-4939-9f7e-7dcba17c6e97 
("calico-kube-controllers-558995777d-8nflx_kube-system(4d2fbb59-cc98-4939-9f7e-7dcba17c6e97)"), 
skipping: failed to "CreatePodSandbox" for "calico-kube-controllers-558995777d-8nflx_kube-system
(4d2fbb59-cc98-4939-9f7e-7dcba17c6e97)" 
with CreatePodSandboxError: "CreatePodSandbox for 
pod \"calico-kube-controllers-558995777d-8nflx_kube-system(4d2fbb59-cc98-4939-9f7e-7dcba17c6e97)\" 
failed: rpc error: code = Unknown desc = failed to setup network for sandbox 
\"bc484fc3d5056b856b719050d82d5c6e829a72e7b47227d19c3fe896c9a3583c\": 
error getting ClusterInformation: Get \"https://[10.233.0.1]:443/apis/crd.projectcalico.org/v1/clusterinformations/default\": 
x509: certificate is valid for 172.16.1.21, 172.16.1.22, 172.16.1.23, 127.0.0.1, not 10.233.0.1"
[root@node-3 ~]# journalctl -f

Jan 04 03:15:33 node-3 kubelet[2531]: E0104 03:15:33.468978    2531 cri_stats_provider.go:691] 
failed updating cpu usage nano core: zero or negative interval (1641237333465683928 - 1641264006409590269)

Jan 04 03:15:33 node-3 kubelet[2531]: E0104 03:15:33.679163    2531 pod_workers.go:191] 
Error syncing pod 18aacfc6-1d9e-4dc8-852c-f55c5da25b87 
("calico-typha-98b98f7b8-bw4d5_kube-system(18aacfc6-1d9e-4dc8-852c-f55c5da25b87)"), 
skipping: failed to "StartContainer" for "calico-typha" with CrashLoopBackOff: 
"back-off 5m0s restarting failed container=calico-typha 
pod=calico-typha-98b98f7b8-bw4d5_kube-system(18aacfc6-1d9e-4dc8-852c-f55c5da25b87)"

Jan 04 03:15:35 node-3 kubelet[2531]: I0104 03:15:35.103070    2531 prober.go:117] 
Liveness probe for "calico-node-85jr2_kube-system
(944c3206-9a04-4d2c-ac06-9a09e92328a2):calico-node" failed (failure): 
calico/node is not ready: Felix is not live: Get "http://localhost:9099/liveness": 
dial tcp 127.0.0.1:9099: connect: connection refused

Jan 04 03:15:38 node-3 kubelet[2531]: I0104 03:15:38.431855    2531 prober.go:117] 
Readiness probe for "calico-node-85jr2_kube-system(944c3206-9a04-4d2c-ac06-9a09e92328a2):
calico-node" failed (failure): calico/node is not ready: BIRD is not ready: 
Error querying BIRD: unable to connect to BIRDv4 socket: 
dial unix /var/run/bird/bird.ctl: connect: no such file or directory

[root@node-2 ~]# systemctl status kube-proxy
¡ñ kube-proxy.service - Kubernetes Kube Proxy
   Loaded: loaded (/etc/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2022-01-04 10:38:38 CST; 7h left
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 795 (kube-proxy)
    Tasks: 6
   Memory: 49.6M
   CGroup: /system.slice/kube-proxy.service
           ©¸©¤795 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy-config.yaml

Jan 04 10:38:47 node-2 kube-proxy[795]: I0104 10:38:47.459277     795 conntrack.go:52] Setting nf_conntrack_max to 131072
Jan 04 10:38:47 node-2 kube-proxy[795]: I0104 10:38:47.478490     795 conntrack.go:83] Setting conntrack hashsize to 32768
Jan 04 10:38:47 node-2 kube-proxy[795]: I0104 10:38:47.481362     795 conntrack.go:100] Set sysctl 'net/netfilter/nf_connt...o 86400
Jan 04 10:38:47 node-2 kube-proxy[795]: I0104 10:38:47.481391     795 conntrack.go:100] Set sysctl 'net/netfilter/nf_connt...to 3600
Jan 04 10:38:47 node-2 kube-proxy[795]: I0104 10:38:47.483502     795 config.go:315] Starting service config controller
Jan 04 10:38:47 node-2 kube-proxy[795]: I0104 10:38:47.483521     795 shared_informer.go:240] Waiting for caches to sync f... config
Jan 04 10:38:47 node-2 kube-proxy[795]: I0104 10:38:47.483550     795 config.go:224] Starting endpoint slice config controller
Jan 04 10:38:47 node-2 kube-proxy[795]: I0104 10:38:47.483555     795 shared_informer.go:240] Waiting for caches to sync f... config
Jan 04 10:38:47 node-2 kube-proxy[795]: I0104 10:38:47.584043     795 shared_informer.go:247] Caches are synced for endpoi... config
Jan 04 10:38:47 node-2 kube-proxy[795]: I0104 10:38:47.584165     795 shared_informer.go:247] Caches are synced for service config
Hint: Some lines were ellipsized, use -l to show in full.
[root@node-2 ~]# systemctl status kubelet
¡ñ kubelet.service - Kubernetes Kubelet
   Loaded: loaded (/etc/systemd/system/kubelet.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2022-01-04 10:39:13 CST; 7h left
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 2409 (kubelet)
    Tasks: 0
   Memory: 35.5M
   CGroup: /system.slice/kubelet.service
           ? 2409 /usr/local/bin/kubelet --config=/etc/kubernetes/kubelet-config.yaml --container-runtime=remote --container-runt...

Jan 04 03:18:26 node-2 kubelet[2409]: E0104 03:18:26.551097    2409 pod_workers.go:191] Error syncing pod be3e5c3c-069e-4756-8675...
Jan 04 03:18:33 node-2 kubelet[2409]: I0104 03:18:33.550115    2409 kuberuntime_manager.go:439] No sandbox for pod "calico...new one
Jan 04 03:18:33 node-2 kubelet[2409]: E0104 03:18:33.656473    2409 remote_runtime.go:116] RunPodSandbox from runtime service fai...
Jan 04 03:18:33 node-2 kubelet[2409]: E0104 03:18:33.656511    2409 kuberuntime_sandbox.go:70] CreatePodSandbox for pod "c...e5e0fe6
Jan 04 03:18:33 node-2 kubelet[2409]: E0104 03:18:33.656521    2409 kuberuntime_manager.go:755] createPodSandbox for pod "...ee5e0fe
Jan 04 03:18:33 node-2 kubelet[2409]: E0104 03:18:33.656552    2409 pod_workers.go:191] Error syncing pod 4d2fbb59-cc98-49...kube-sy
Jan 04 03:18:34 node-2 kubelet[2409]: I0104 03:18:34.047379    2409 setters.go:86] Using node IP: "172.16.1.22"
Jan 04 03:18:41 node-2 kubelet[2409]: I0104 03:18:41.549763    2409 scope.go:95] [topologymanager] RemoveContainer - Conta...f1f9f82
Jan 04 03:18:42 node-2 kubelet[2409]: I0104 03:18:42.076818    2409 kubelet.go:1926] SyncLoop (PLEG): "calico-node-kzdf9_kube-sys...
Jan 04 03:18:44 node-2 kubelet[2409]: I0104 03:18:44.072525    2409 setters.go:86] Using node IP: "172.16.1.22"
Hint: Some lines were ellipsized, use -l to show in full.
[root@node-3 ~]# systemctl status kubelet
¡ñ kubelet.service - Kubernetes Kubelet
   Loaded: loaded (/etc/systemd/system/kubelet.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2022-01-04 10:39:15 CST; 7h left
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 2531 (kubelet)
    Tasks: 0
   Memory: 40.9M
   CGroup: /system.slice/kubelet.service
           ? 2531 /usr/local/bin/kubelet --config=/etc/kubernetes/kubelet-config.yaml --container-runtime=remote --container-runt...

Jan 04 03:18:07 node-3 kubelet[2531]: E0104 03:18:07.677822    2531 pod_workers.go:191] Error syncing pod 944c3206-9a04-4d2c-ac06...
Jan 04 03:18:08 node-3 kubelet[2531]: I0104 03:18:08.539757    2531 setters.go:86] Using node IP: "172.16.1.23"
Jan 04 03:18:13 node-3 kubelet[2531]: E0104 03:18:13.812854    2531 cri_stats_provider.go:691] failed updating cpu usage n...590269)
Jan 04 03:18:18 node-3 kubelet[2531]: I0104 03:18:18.593744    2531 setters.go:86] Using node IP: "172.16.1.23"
Jan 04 03:18:18 node-3 kubelet[2531]: I0104 03:18:18.598780    2531 kubelet_getters.go:176] "Pod status updated" pod="kube...Running
Jan 04 03:18:18 node-3 kubelet[2531]: I0104 03:18:18.677989    2531 scope.go:95] [topologymanager] RemoveContainer - Conta...9265509
Jan 04 03:18:18 node-3 kubelet[2531]: E0104 03:18:18.678389    2531 pod_workers.go:191] Error syncing pod 944c3206-9a04-4d2c-ac06...
Jan 04 03:18:21 node-3 kubelet[2531]: I0104 03:18:21.678132    2531 scope.go:95] [topologymanager] RemoveContainer - Conta...cd747d4
Jan 04 03:18:21 node-3 kubelet[2531]: E0104 03:18:21.678353    2531 pod_workers.go:191] Error syncing pod 18aacfc6-1d9e-4dc8-852c...
Jan 04 03:18:23 node-3 kubelet[2531]: E0104 03:18:23.825785    2531 cri_stats_provider.go:691] failed updating cpu usage n...590269)
Hint: Some lines were ellipsized, use -l to show in full.
[root@node-3 ~]# systemctl status kube-proxy
¡ñ kube-proxy.service - Kubernetes Kube Proxy
   Loaded: loaded (/etc/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
   Active: active (running) since Tue 2022-01-04 10:38:19 CST; 7h left
     Docs: https://github.com/kubernetes/kubernetes
 Main PID: 813 (kube-proxy)
    Tasks: 6
   Memory: 46.6M
   CGroup: /system.slice/kube-proxy.service
           ©¸©¤813 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy-config.yaml

Jan 04 10:38:57 node-3 kube-proxy[813]: E0104 10:38:57.830170     813 reflector.go:138] k8s.io/client-go/informers/factory.go:134...
Jan 04 10:39:00 node-3 kube-proxy[813]: E0104 10:39:00.375374     813 reflector.go:138] k8s.io/client-go/informers/factory.go:134...
Jan 04 10:39:00 node-3 kube-proxy[813]: E0104 10:39:00.416638     813 reflector.go:138] k8s.io/client-go/informers/factory.go:134...
Jan 04 10:39:02 node-3 kube-proxy[813]: E0104 10:39:02.825677     813 event.go:273] Unable to write event: '&v1.Event{Type...stamp:v
Jan 04 10:39:04 node-3 kube-proxy[813]: E0104 10:39:04.365442     813 reflector.go:138] k8s.io/client-go/informers/factory.go:134...
Jan 04 10:39:06 node-3 kube-proxy[813]: E0104 10:39:06.279529     813 reflector.go:138] k8s.io/client-go/informers/factory.go:134...
Jan 04 10:39:12 node-3 kube-proxy[813]: E0104 10:39:12.774175     813 reflector.go:138] k8s.io/client-go/informers/factory.go:134...
Jan 04 10:39:12 node-3 kube-proxy[813]: E0104 10:39:12.827544     813 event.go:273] Unable to write event: '&v1.Event{Type...stamp:v
Jan 04 10:39:18 node-3 kube-proxy[813]: I0104 10:39:18.393627     813 shared_informer.go:247] Caches are synced for service config
Jan 04 10:39:34 node-3 kube-proxy[813]: I0104 10:39:34.493388     813 shared_informer.go:247] Caches are synced for endpoi... config
Hint: Some lines were ellipsized, use -l to show in full.

问题1:kubectl get po -n kube-system执行后的几个状态一直不变成1/READY,镜像我都事先下载好了。
问题2:journalctl -f的几个错误。
问题3:systemctl status kubelet/kube-proxy的几个错误。

注:老师,实在抱歉,这几个问题,我都百度搜过了,github也搜了。可是翻来覆去改,都没效果。
那几个镜像的状态就是不变成正常的READY状态。不清楚这三者的关系。

环境信息如下(其他所有软件版本和配置文件跟老师的一样):

MASTERS=(node-1 node-2)
WORKERS=(node-2 node-3)
ETCDS=(node-1 node-2 node-3)

cat /etc/redhat-release 
CentOS Linux release 7.9.2009 (Core)

uname -r
3.10.0-1160.el7.x86_64
写回答

1回答

刘果国

2022-01-04

很遗憾 你这个错误基本需要从头来了,关注这个错误:Get "https://[10.233.0.1]:443/apis/crd.projectcalico.org/v1/clusterinformations/default": x509: certificate is valid for 172.16.1.21, 172.16.1.22, 172.16.1.23, 127.0.0.1, not 10.233.0.1

意思是apiserver证书中合法的ip地址不包含10.233.0.1,需要重新生成证书才行

0
0

Kubernetes生产落地全程实践

一个互联网公司落地Kubernetes全过程点点滴滴

2293 学习 · 2211 问题

查看课程