1、说明

1
2
3
4
5
6
7
8
192.168.33.11 etcd集群节点1
192.168.33.12 etcd集群节点2
192.168.33.13 etcd集群节点3
192.168.33.21 apiserver集群节点1,同时安装kubelet和dockerd并label为master。安装keepalived+haproxy,设置KUBE_MASTER_VIP="https://192.168.33.100:8443"192.168.33.22 apiserver集群节点2,同时安装kubelet和dockerd并label为master。安装keepalived+haproxy,设置KUBE_MASTER_VIP="https://192.168.33.100:8443"192.168.33.23 apiserver集群节点3,同时安装kubelet和dockerd并label为master。安装keepalived+haproxy,设置KUBE_MASTER_VIP="https://192.168.33.100:8443"192.168.33.26 仅安装kubelet和dockerd并label为worker
192.168.33.27 仅安装kubelet和dockerd并label为worker
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
# 所有的机器安装执行
#!/bin/bash
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.163.com/.help/CentOS7-Base-163.repo
rm -rf /etc/yum.repos.d/epel.repo
yum clean all
yum makecache
yum install -y epel-release
yum clean all
yum makecache
#yum repolist enabled
#yum repolist all
yum update
yum upgrade
yum install -y net-tools curl wget git vim jq socat conntrack ipvsadm ipset sysstat libseccomp gcc gcc-c++ cmake make bzip2 automake autoconf libtool flex bison pcre-devel zlib-devel openssl openssl-devel lsof htop
yum install -y libnfnetlink-devel libnl3 libnl3-devel systemd-devel
yum install -y bridge-utils bind-utils
yum install -y device-mapper-persistent-data lvm2
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
sudo timedatectl set-timezone Asia/Shanghai
cat > /tmp/mysysctl.conf <<EOF
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
fs.inotify.max_user_watches=89100
user.max_user_namespaces=15000
vm.max_map_count=262144
EOF
sudo cp /tmp/mysysctl.conf  /etc/sysctl.d/mysysctl.conf
sudo modprobe br_netfilter
sudo sysctl -p /etc/sysctl.d/mysysctl.conf
sudo groupadd docker
sudo usermod -aG docker vagrant

2、Vagrantfile

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
  config.ssh.username = "vagrant"
  config.ssh.password = "vagrant"
  #config.ssh.insert_key = false

  [11,12,13,21,22,23,26,27].each do |i|
    config.vm.define "n#{i}" do |node|
      node.vm.network "private_network", ip: "192.168.33.#{i}"
      node.vm.synced_folder "/data/vagrant/shell", "/shell"
      node.vm.network :forwarded_port, guest: 22, host: "2#{i}22", host_ip: "0.0.0.0"

      node.vm.provider "virtualbox" do |vb|
        vb.memory = "2048"
        vb.cpus = 2
      end

      node.vm.provision "shell", inline: <<-SHELL
        echo "vagrant:vagrant" | sudo chpasswd
        mkdir -p /data
        echo IP=\"192.168.33.#{i}\" >>/data/env.sh
        echo ETCD_IP=\"192.168.33.#{i}\" >>/data/env.sh
        echo ETCD_NAME=\"etcd-#{i}\" >>/data/env.sh
        echo KUBE_NODENAME=\"n#{i}\" >>/data/env.sh
        echo KUBE_NODE_HOSTNAME=\"n#{i}.dev\" >>/data/env.sh
        echo HOSTNAME_=\"n#{i}\" >>/data/env.sh
        chown -R vagrant:vagrant /data
        hostnamectl set-hostname n#{i}
        timedatectl set-timezone Asia/Shanghai
      SHELL
    end
  end

3、准备所有证书

4、etcd集群

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
cat > /tmp/etcd.service <<EOF
[Unit]
Description=etcd
Documentation=https://github.com/coreos/etcd
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
#User=etcd
Type=notify
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) $ETCD_BIN_DIR/etcd \\
--name=$ETCD_NAME \\
--data-dir=$ETCD_DATA_DIR \\
--advertise-client-urls=https://$ETCD_IP:2379 \\
--listen-peer-urls=https://$ETCD_IP:2380 \\
--listen-client-urls=https://$ETCD_IP:2379 \\
--initial-advertise-peer-urls=https://$ETCD_IP:2380 \\
--initial-cluster=$ETCD_CLUS \\
--initial-cluster-state=new \\
--cert-file=$ETCD_PKI_DIR/etcd-server.pem \\
--key-file=$ETCD_PKI_DIR/etcd-server-key.pem \\
--trusted-ca-file=$ETCD_PKI_DIR/etcd-ca.pem \\
--client-cert-auth \\
--peer-cert-file=$ETCD_PKI_DIR/etcd-peer.pem \\
--peer-key-file=$ETCD_PKI_DIR/etcd-peer-key.pem \\
--peer-trusted-ca-file=$ETCD_PKI_DIR/etcd-ca.pem \\
--peer-client-cert-auth"
Restart=on-failure
RestartSec=10s
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

5、master节点

  • keepalived
  • haproxy
  • kube-apiserver
  • kube-controller-manager
  • kube-scheduler
  • dockerd
  • kubelet
  • kube-proxy

6、worker节点

  • dockerd
  • kubelet
  • kube-proxy
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
#!/bin/bash
# create bootstrap token
pwdoo=$(dirname "$(readlink -fn "$0")")
source $pwdoo/__global_env.sh
echo $1
TOKEN=$(kubeadm token create --groups system:bootstrappers:$1:default-node-token)
echo $TOKEN
kubectl --kubeconfig=$K8S_ETC_DIR/bootstrap.conf config set-cluster kubernetes --certificate-authority=$K8S_PKI_DIR/kubernetes-ca.pem --embed-certs=true --server=https://192.168.33.100:8443
kubectl --kubeconfig=$K8S_ETC_DIR/bootstrap.conf config set-credentials kubelet-bootstrap --token=$TOKEN
kubectl --kubeconfig=$K8S_ETC_DIR/bootstrap.conf config set-context default --cluster=kubernetes --user=kubelet-bootstrap
kubectl --kubeconfig=$K8S_ETC_DIR/bootstrap.conf config use-context default

kubeadm token list;
cat $K8S_ETC_DIR/bootstrap.conf
1
2
3
4
5
6
7
#!/bin/bash
nodes=$(kubectl get csr -o json | jq -r '[.items | .[].metadata | .name ] | join(" ")')
for node in $nodes; do
	kubectl certificate approve $node
done
kubectl get csr;
kubectl get no;

7、启动cni之bridge

1
vi /etc/cni/net.d/19-bridge.conf;
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
{
	"name": "mynet",
	"type": "bridge",
	"bridge": "mynet0",
	"isDefaultGateway": true,
	"forceAddress": false,
	"ipMasq": true,
	"hairpinMode": true,
	"ipam": {
		"type": "host-local",
		"subnet": "172.19.0.0/16"
	}
}

8、部署calico

1
2
3
4
5
6
7
8
9
wget https://docs.projectcalico.org/v3.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
diff calico_deploy.yaml calico.yaml
298,299d297
<             - name: IP_AUTODETECTION_METHOD
<               value: "interface=eth1"  
313c311
<               value: "172.20.0.0/16"
---
>               value: "192.168.0.0/16"
1
2
kubectl apply -f calico_deploy.yaml
#部署calico后cni之bridge自动失效

9、部署coredns

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
#!/bin/bash
#export http_proxy=
#export https_proxy=
#10.254.0.2 为cluster-ip,根据情况部署写死指定
#cluster.local. 为域名
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh
chmod +x deploy.sh
./deploy.sh -i 10.254.0.2 -d cluster.local. > dns.yaml
kubectl apply -f dns.yaml;

10、测试

1
2
3
4
5
6
7
8
kubectl label node n21.dev node-role.kubernetes.io/master=master
kubectl label node n21.dev node-role.kubernetes.io/worker=worker
kubectl label node n22.dev node-role.kubernetes.io/master=master
kubectl label node n22.dev node-role.kubernetes.io/worker=worker
kubectl label node n23.dev node-role.kubernetes.io/master=master
kubectl label node n23.dev node-role.kubernetes.io/worker=worker
kubectl label node n26.dev node-role.kubernetes.io/worker=worker
kubectl label node n27.dev node-role.kubernetes.io/worker=worker

11、常用命令

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
kubectl api-resources
kubectl api-versions
kubectl cluster-info
kubectl get all --all-namespaces
kubectl get apiservices
kubectl get replicaset
kubectl get --raw "/apis" | jq
kubectl get svc,ep,deploy,po,no,rs,ns,sa --all-namespaces -o wide
kubectl get ds
kubectl get cs
kubectl get csr
kubectl certificate approve <CSR>
kubectl logs calico-node-2nct7 --namespace kube-system
# 滚动跟新
kubectl set image deployment/my-nginx nginx=nginx:1.9.1
kubectl rollout status deployment/my-nginx
kubectl rollout history deployment/my-nginx
kubectl apply -f <deploy.yaml> --record
# spec.revisionHistoryLimit
# Mark node as unschedulable or schedulable
kubectl cordo
kubectl uncordon
# 污点
kubectl taint
# 驱赶node上的pod
kubectl drain
kubectl run -it --rm --image=busybox:latest busybox /bin/sh
kubectl run -it --rm --image=radial/busyboxplus:curl mytest /bin/sh
docker run -it --rm radial/busyboxplus:curl
kubectl run -it --rm --image=alpine alpine /bin/sh
kubectl run -it --rm --image=infoblox/dnstools dns-client
kubectl run -it --rm --image=buildpack-deps:18.04-curl ubuntu /bin/sh
kubectl exec -it xx /bin/sh
kubectl exec -it $(kubectl get pods -o jsonpath='{.items[0].metadata.name}') /bin/sh
# pod 中通过访问api
curl -ik -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" https://kubernetes/api/v1/namespaces/default/pods
kubectl get secret --namespace default dokuwiki-dokuwiki -o jsonpath="{.data.dokuwiki-password}" | base64 --decode
# 查看nodelabel
kubectl get nodes --show-labels
# You can remove incorrect label with <label>-
kubectl label node 192.168.1.1 networkSpeed-
# 给node设置label
kubectl label node n11.dev node-role.kubernetes.io/master=master
kubectl label node n12.dev node-role.kubernetes.io/worker=worker
# 创建token
kubeadm token create  --groups system:bootstrappers:kube-n11 --kubeconfig ~/.kube/config
# 删除node节点
kubectl delete no n11.dev;
curl --cacert ca.pem --cert etcd-client.pem --key etcd-client-key.pem https://192.168.33.11:2379/health
curl --cacert ca.pem --cert kubectl.pem --key kubectl-key.pem https://192.168.33.12:6443
curl --cacert ca.pem --cert kubectl.pem --key kubectl-key.pem https://192.168.33.100:8443/api/v1/nodes | jq '.items | .[] | .metadata | .name';

export ETCDCTL_API=3;
etcdctl endpoint health --endpoints "https://192.168.33.11:2379,https://192.168.33.12:2379,https://192.168.33.13:2379" --cacert=/data/k8s/certs/ca.pem --cert=/data/k8s/certs/etcd-client.pem --key=/data/k8s/certs/etcd-client-key.pem --cluster=true
kubectl run -it --rm --restart=Never busybox --image=busybox sh
kubectl run -it --rm --image=radial/busyboxplus:curl mytest /bin/sh
nslookup kubernetes
nslookup nginx-svc-demo
kubectl get role,rolebindings,serviceaccount,clusterrole,clusterrolebindings --all-namespaces -o wide

12、参考资料