sudo kubeadm init --upload-certs --config=/home/weiyigeek/k8s-init/kubeadm-init-config.yaml -v 5 | tee kubeadm_init.log # W1104 21:29:36.119447 198575 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io] # [init] Using Kubernetes version: v1.20.1 # [preflight] Running pre-flight checks # [preflight] Pulling images required for setting up a Kubernetes cluster # [preflight] This might take a minute or two, depending on the speed of your internet connection # [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' # [certs] Using certificateDir folder "/etc/kubernetes/pki" # ..... # [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace # [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key # [addons] Applied essential addon: CoreDNS # [addons] Applied essential addon: kube-proxy
# Your Kubernetes control-plane has initialized successfully! # 表示控制化平面初始化成功
# 部署集群 pod 网络可以选择 flannel # Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: # https://kubernetes.io/docs/concepts/cluster-administration/addons/ # 例如 安装 flannel 网络插件 kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml podsecuritypolicy.policy/psp.flannel.unprivileged created clusterrole.rbac.authorization.k8s.io/flannel created clusterrolebinding.rbac.authorization.k8s.io/flannel created serviceaccount/flannel created configmap/kube-flannel-cfg created daemonset.apps/kube-flannel-ds created
# (1) 你现在可以以root身份加入任意数量的控制平面节点,在每个节点上运行以下命令: kubeadm join weiyigeek.k8s:6443 --token 123456.httpweiyigeektop \ --discovery-token-ca-cert-hash sha256:95e1bb846a09a4523be6c1ee6d3860eec1dcfdd16200efec5177ff25a1de49a6 \ --control-plane --certificate-key e05180fc473a8b89e4616412dac61b95cf02808fe1a27f9f72c2be921acc63f8 Please note that the certificate-key gives access to cluster sensitive data, keep it secret! As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use "kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
# (2) 你可以加入任意数量的worker节点,在每个worker节点上以root用户运行如下命令: sudo kubeadm join weiyigeek.k8s:6443 --token 123456.httpweiyigeektop --discovery-token-ca-cert-hash sha256:95e1bb846a09a4523be6c1ee6d3860eec1dcfdd16200efec5177ff25a1de49a6 [sudo] password for weiyigeek: [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Starting the kubelet [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
# This node has joined the cluster: # 表示该节点已经加入到集群中
# Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
sudo tee /etc/keepalived/check_apiserver.sh <<'EOF' #!/bin/bash err=0 for k in $(seq 1 3) do check_code=$(pgrep haproxy) if [[ $check_code == "" ]]; then err=$(expr $err + 1) sleep 1 continue else err=0 break fi done
if [[ $err != "0" ]]; then echo"systemctl stop keepalived" /usr/bin/systemctl stop keepalived exit 1 else exit 0 fi EOF sudo chmod +x /etc/keepalived/check_apiserver.sh
# (2) 初始化Master节点控制平面以及Worker加入的参数生成(# 根据您服务器网速的情况,您需要等候 3 - 10 分钟建议采用下面备注中的操作) sudo kubeadm init --config=/home/weiyigeek/k8s-init/kubeadm-init-config.yaml --upload-certs | tee kubeadm_init.log # [certs] Using certificateDir folder "/etc/kubernetes/pki" # [certs] Generating "ca" certificate and key # [certs] Generating "apiserver" certificate and key # [certs] apiserver serving cert is signed for DNS names [weiyigeek-107 weiyigeek-lb-vip.k8s kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.107 192.168.1.110] # [certs] Generating "apiserver-kubelet-client" certificate and key # [certs] Generating "front-proxy-ca" certificate and key # [certs] Generating "front-proxy-client" certificate and key # [certs] Generating "etcd/ca" certificate and key # [certs] Generating "etcd/server" certificate and key # [certs] etcd/server serving cert is signed for DNS names [weiyigeek-107 localhost] and IPs [192.168.1.107 127.0.0.1 ::1] # [certs] Generating "etcd/peer" certificate and key # [certs] etcd/peer serving cert is signed for DNS names [weiyigeek-107 localhost] and IPs [192.168.1.107 127.0.0.1 ::1] # [certs] Generating "etcd/healthcheck-client" certificate and key # [certs] Generating "apiserver-etcd-client" certificate and key # [certs] Generating "sa" key and public key # [kubeconfig] Using kubeconfig folder "/etc/kubernetes" # [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address # [kubeconfig] Writing "admin.conf" kubeconfig file # [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address # [kubeconfig] Writing "kubelet.conf" kubeconfig file # [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address # [kubeconfig] Writing "controller-manager.conf" kubeconfig file # [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address # [kubeconfig] Writing "scheduler.conf" kubeconfig file # [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" # [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" # [kubelet-start] Starting the kubelet # [control-plane] Using manifest folder "/etc/kubernetes/manifests" # [control-plane] Creating static Pod manifest for "kube-apiserver" # [control-plane] Creating static Pod manifest for "kube-controller-manager" # [control-plane] Creating static Pod manifest for "kube-scheduler" # [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" # [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s # [apiclient] All control plane components are healthy after 21.750931 seconds # [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace # [kubelet] Creating a ConfigMap "kubelet-config-1.19" in namespace kube-system with the configuration for the kubelets in the cluster # [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace # [upload-certs] Using certificate key: # bb5b2f0b287d35e179ef4efawwww9f61a38f62343a9b06fc143e3b # [mark-control-plane] Marking the node weiyigeek-107 as control-plane by adding the label "node-role.kubernetes.io/master=''" # [mark-control-plane] Marking the node weiyigeek-107 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] # [bootstrap-token] Using token: 2021wq.httpweiyigeektop # [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles # [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes # [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials # [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token # [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster # [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace # [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key # [addons] Applied essential addon: CoreDNS # [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address # [addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
# (3) 部署到集群中 kubectl apply -f calico-etcd.yaml # secret/calico-etcd-secrets created # configmap/calico-config created # clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created # clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created # clusterrole.rbac.authorization.k8s.io/calico-node created # clusterrolebinding.rbac.authorization.k8s.io/calico-node created # daemonset.apps/calico-node created # serviceaccount/calico-node created # deployment.apps/calico-kube-controllers created # serviceaccount/calico-kube-controllers created # poddisruptionbudget.policy/calico-kube-controllers created
# 方式1.不推荐(配置标签以及标签选择需要添加参数,比较麻烦) $ kubectl run nginx-deployment --image=harbor.weiyigeek.top/test/nginx:v2.2 --port=80 # pod/nginx-deployment created
$ kubectl get pod -o wide # NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES # nginx-deployment 1/1 Running 0 108s 10.244.1.2 k8s-node-4 <none> <none>
$ kubectl apply -f nginx-deployment.yaml # deployment.apps/nginx-deployment created
$ kubectl get deployment # NAME READY UP-TO-DATE AVAILABLE AGE # nginx-deployment 1/1 1 1 87s # weiyigeek@ubuntu:~/nginx$ kubectl get pod # NAME READY STATUS RESTARTS AGE # nginx-deployment-7f5d9779c6-flmsf 1/1 Running 0 92s # weiyigeek@ubuntu:~/nginx$ kubectl get pod -o wide # NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES # nginx-deployment-7f5d9779c6-flmsf 1/1 Running 0 99s 10.244.1.4 k8s-node-4 <none> <none>
$ kubectl delete pod nginx-deployment-7f5d9779c6-flmsf # pod "nginx-deployment-7f5d9779c6-flmsf" deleted $ kubectl get pod -o wide # NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES # nginx-deployment-7f5d9779c6-hhl7k 1/1 Running 0 60s 10.244.1.5 k8s-node-4 <none> <none>
$ kubectl scale --replicas=3 deployment/nginx-deployment # deployment.apps/nginx-deployment scaled $ kubectl get pod -o wide # NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES # nginx-deployment-7f5d9779c6-dr5h8 1/1 Running 0 4s 10.244.1.7 k8s-node-4 <none> <none> # nginx-deployment-7f5d9779c6-hhl7k 1/1 Running 0 109s 10.244.1.5 k8s-node-4 <none> <none> # nginx-deployment-7f5d9779c6-sk2f4 1/1 Running 0 4s 10.244.1.6 k8s-node-4 <none> <none>
方式1.请访问本博主的B站【WeiyiGeek】首页关注UP主, 将自动随机获取解锁验证码。
Method 2.Please visit 【My Twitter】. There is an article verification code in the homepage.
方式3.扫一扫下方二维码,关注本站官方公众号
回复:验证码
将获取解锁(有效期7天)本站所有技术文章哟!