1、说明

1
2
3
4
5
6
7
8
192.168.33.11 etcd集群节点1,安装etcd。
192.168.33.12 etcd集群节点2,安装etcd。
192.168.33.13 etcd集群节点3,安装etcd。
192.168.33.21 apiserver集群节点1,安装kube-apiserver、kube-controller-manager、kube-scheduler并同时安装kubelet、kube-proxy和dockerd并label为master。安装keepalived+haproxy,设置KUBE_MASTER_VIP="https://192.168.33.100:8443"192.168.33.22 apiserver集群节点2,安装kube-apiserver、kube-controller-manager、kube-scheduler并同时安装kubelet、kube-proxy和dockerd并label为master。安装keepalived+haproxy,设置KUBE_MASTER_VIP="https://192.168.33.100:8443"192.168.33.23 apiserver集群节点3,安装kube-apiserver、kube-controller-manager、kube-scheduler并同时安装kubelet、kube-proxy和dockerd并label为master。安装keepalived+haproxy,设置KUBE_MASTER_VIP="https://192.168.33.100:8443"192.168.33.26 仅安装kubelet、kube-proxy和dockerd并label为worker。
192.168.33.27 仅安装kubelet、kube-proxy和dockerd并label为worker。

2、系统

1)、Vagrantfile

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
  config.ssh.username = "vagrant"
  config.ssh.password = "vagrant"
  #config.ssh.insert_key = false

  [11,12,13,21,22,23,26,27].each do |i|
    config.vm.define "n#{i}" do |node|
      node.vm.network "private_network", ip: "192.168.33.#{i}"
      node.vm.synced_folder "/data/vagrant/shell", "/shell"
      node.vm.network :forwarded_port, guest: 22, host: "2#{i}22", host_ip: "0.0.0.0"

      node.vm.provider "virtualbox" do |vb|
        vb.memory = "2048"
        vb.cpus = 2
      end

      node.vm.provision "shell", inline: <<-SHELL
        echo "vagrant:vagrant" | sudo chpasswd
        mkdir -p /data
        echo IP=\"192.168.33.#{i}\" >>/data/env.sh
        echo ETCD_IP=\"192.168.33.#{i}\" >>/data/env.sh
        echo ETCD_NAME=\"etcd-#{i}\" >>/data/env.sh
        echo KUBE_NODENAME=\"n#{i}\" >>/data/env.sh
        echo KUBE_NODE_HOSTNAME=\"n#{i}.dev\" >>/data/env.sh
        echo HOSTNAME_=\"n#{i}\" >>/data/env.sh
        chown -R vagrant:vagrant /data
        hostnamectl set-hostname n#{i}
        timedatectl set-timezone Asia/Shanghai
      SHELL
    end
  end

2)、centos7

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
# 所有的机器安装执行
#!/bin/bash
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.163.com/.help/CentOS7-Base-163.repo
rm -rf /etc/yum.repos.d/epel.repo
yum clean all
yum makecache
yum install -y epel-release
yum clean all
yum makecache
#yum repolist enabled
#yum repolist all
yum update
yum upgrade
yum install -y net-tools curl wget git vim jq socat conntrack ipvsadm ipset sysstat libseccomp gcc gcc-c++ cmake make bzip2 automake autoconf libtool flex bison pcre-devel zlib-devel openssl openssl-devel lsof htop
yum install -y libnfnetlink-devel libnl3 libnl3-devel systemd-devel
yum install -y bridge-utils bind-utils psmisc
yum install -y device-mapper-persistent-data lvm2
#yum install kernel-devel-$(uname -r) kernel-headers-$(uname -r)
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
sudo timedatectl set-timezone Asia/Shanghai
cat > /tmp/mysysctl.conf <<EOF
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
fs.inotify.max_user_watches=89100
user.max_user_namespaces=15000
vm.max_map_count=262144
EOF
sudo cp /tmp/mysysctl.conf  /etc/sysctl.d/mysysctl.conf
sudo modprobe br_netfilter
sudo sysctl -p /etc/sysctl.d/mysysctl.conf
sudo groupadd docker
sudo usermod -aG docker vagrant

3、准备证书

4、部署etcd

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
cat > /tmp/etcd.service <<EOF
[Unit]
Description=etcd
Documentation=https://github.com/coreos/etcd
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
#User=etcd
Type=notify
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) $ETCD_BIN_DIR/etcd \\
--name=$ETCD_NAME \\
--data-dir=$ETCD_DATA_DIR \\
--advertise-client-urls=https://$ETCD_IP:2379 \\
--listen-peer-urls=https://$ETCD_IP:2380 \\
--listen-client-urls=https://$ETCD_IP:2379 \\
--initial-advertise-peer-urls=https://$ETCD_IP:2380 \\
--initial-cluster=$ETCD_CLUS \\
--initial-cluster-state=new \\
--cert-file=$ETCD_PKI_DIR/etcd-server.pem \\
--key-file=$ETCD_PKI_DIR/etcd-server-key.pem \\
--trusted-ca-file=$ETCD_PKI_DIR/etcd-ca.pem \\
--client-cert-auth \\
--peer-cert-file=$ETCD_PKI_DIR/etcd-peer.pem \\
--peer-key-file=$ETCD_PKI_DIR/etcd-peer-key.pem \\
--peer-trusted-ca-file=$ETCD_PKI_DIR/etcd-ca.pem \\
--peer-client-cert-auth"
Restart=on-failure
RestartSec=10s
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

5、master节点

  • keepalived
  • haproxy
  • kube-apiserver
  • kube-controller-manager
  • kube-scheduler
  • dockerd
  • kubelet
  • kube-proxy

6、worker节点

  • dockerd
  • kubelet
  • kube-proxy
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
#!/bin/bash
# create bootstrap token
pwdoo=$(dirname "$(readlink -fn "$0")")
source $pwdoo/__global_env.sh
echo $1
TOKEN=$(kubeadm token create --groups system:bootstrappers:$1:default-node-token)
echo $TOKEN
kubectl --kubeconfig=$K8S_ETC_DIR/bootstrap.conf config set-cluster kubernetes --certificate-authority=$K8S_PKI_DIR/kubernetes-ca.pem --embed-certs=true --server=https://192.168.33.100:8443
kubectl --kubeconfig=$K8S_ETC_DIR/bootstrap.conf config set-credentials kubelet-bootstrap --token=$TOKEN
kubectl --kubeconfig=$K8S_ETC_DIR/bootstrap.conf config set-context default --cluster=kubernetes --user=kubelet-bootstrap
kubectl --kubeconfig=$K8S_ETC_DIR/bootstrap.conf config use-context default

kubeadm token list;
cat $K8S_ETC_DIR/bootstrap.conf
1
2
3
4
5
6
7
#!/bin/bash
nodes=$(kubectl get csr -o json | jq -r '[.items | .[].metadata | .name ] | join(" ")')
for node in $nodes; do
	kubectl certificate approve $node
done
kubectl get csr;
kubectl get no;

7、部署cni之bridge

1
vi /etc/cni/net.d/19-bridge.conf;
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
{
	"name": "mynet",
	"type": "bridge",
	"bridge": "mynet0",
	"isDefaultGateway": true,
	"forceAddress": false,
	"ipMasq": true,
	"hairpinMode": true,
	"ipam": {
		"type": "host-local",
		"subnet": "172.19.0.0/16"
	}
}

8、部署cni之calico

1
2
3
4
5
6
7
8
9
wget https://docs.projectcalico.org/v3.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
diff calico_deploy.yaml calico.yaml
298,299d297
<             - name: IP_AUTODETECTION_METHOD
<               value: "interface=eth1"  
313c311
<               value: "172.20.0.0/16"
---
>               value: "192.168.0.0/16"
1
2
kubectl apply -f calico_deploy.yaml
#部署calico后cni之bridge自动失效

9、部署coredns

1)、部署

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
#!/bin/bash
#export http_proxy=
#export https_proxy=
#10.254.0.2 为cluster-ip,根据情况部署写死指定
#cluster.local. 为域名
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh
chmod +x deploy.sh
./deploy.sh -i 10.254.0.2 -d cluster.local. > dns.yaml
kubectl apply -f dns.yaml;

2)、验证

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
kubectl run -it --rm --image=infoblox/dnstools dns-client

## 执行命令
nslookup kubernetes
## 返回如下信息
Server:		10.254.0.2
Address:	10.254.0.2#53

Name:	kubernetes.default.svc.cluster.local
Address: 10.254.0.1

10、部署metrics-server

1)、聚合层

1
2
3
4
5
6
7
8
# https://kubernetes.io/docs/tasks/access-kubernetes-api/configure-aggregation-layer/#enable-kubernetes-apiserver-flags
--requestheader-client-ca-file=<path to aggregator CA cert>
--requestheader-allowed-names=front-proxy-client
--requestheader-extra-headers-prefix=X-Remote-Extra-
--requestheader-group-headers=X-Remote-Group
--requestheader-username-headers=X-Remote-User
--proxy-client-cert-file=<path to aggregator proxy cert>
--proxy-client-key-file=<path to aggregator proxy key>

2)、部署

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
mkdir metrics-server
cd metrics-server
# https://github.com/kubernetes-incubator/metrics-server/tree/master/deploy
url=https://raw.githubusercontent.com/kubernetes-incubator/metrics-server/master/deploy/1.8%2B
wget --no-check-certificate $url/aggregated-metrics-reader.yaml
wget --no-check-certificate $url/auth-delegator.yaml
wget --no-check-certificate $url/auth-reader.yaml
wget --no-check-certificate $url/metrics-apiservice.yaml
wget --no-check-certificate $url/metrics-server-deployment.yaml
wget --no-check-certificate $url/metrics-server-service.yaml
wget --no-check-certificate $url/resource-reader.yaml
# k8s.gcr.io/metrics-server-amd64:v0.3.1
# 修改metrics-server-deployment.yaml,添加证书
#        command:
#        - /metrics-server
#        - --metric-resolution=20s
#        - --requestheader-client-ca-file=/etc/kubernetes/pki/kubernetes-front-proxy-ca.pem
#        - --kubelet-insecure-tls

kubectl create -f .
# https://github.com/kubernetes-incubator/metrics-server/issues/45
# W0312 01:45:37.102313       1 x509.go:172] x509: subject with cn=front-proxy-client is not in the allowed list: [aggregator]
# E0312 01:45:37.102354       1 authentication.go:62] Unable to authenticate the request due to an error: [x509: subject with cn=front-proxy-client is not allowed, x509: certificate signed by unknown author
# https://github.com/kubernetes-incubator/metrics-server/issues/58
# error: Metrics not available for pod default
# https://github.com/kubernetes-incubator/metrics-server/issues/150
# unable to fetch metrics from Kubelet
# https://github.com/kubernetes-incubator/metrics-server/issues/146


# E0311 16:43:38.416358       1 manager.go:102] unable to fully collect metrics: [unable to fully scrape metrics from source kubelet_summary:n22.dev: unable to fetch metrics from Kubelet n22.dev (192.168.33.22): Get https://192.168.33.22:10250/stats/summary/: x509: cannot validate certificate for 192.168.33.22 because it doesn't contain any IP SANs, unable to fully scrape metrics from source kubelet_summary:n26.dev: unable to fetch metrics from Kubelet n26.dev (192.168.33.26): Get https://192.168.33.26:10250/stats/summary/: x509: cannot validate certificate for 192.168.33.26 because it doesn't contain any IP SANs, unable to fully scrape metrics from source kubelet_summary:n23.dev: unable to fetch metrics from Kubelet n23.dev (192.168.33.23): Get https://192.168.33.23:10250/stats/summary/: x509: cannot validate certificate for 192.168.33.23 because it doesn't contain any IP SANs, unable to fully scrape metrics from source kubelet_summary:n21.dev: unable to fetch metrics from Kubelet n21.dev (192.168.33.21): Get https://192.168.33.21:10250/stats/summary/: x509: cannot validate certificate for 192.168.33.21 because it doesn't contain any IP SANs, unable to fully scrape metrics from source kubelet_summary:n27.dev: unable to fetch metrics from Kubelet n27.dev (192.168.33.27): Get https://192.168.33.27:10250/stats/summary/: x509: cannot validate certificate for 192.168.33.27 because it doesn't contain any IP SANs]

# Error from server (Forbidden): nodes.metrics.k8s.io is forbidden: User “system:kube-proxy” cannot list nodes.metrics.k8s.io at the cluster scope
# Forbidden (user=system:anonymous, verb=get, resource=nodes, subresource=proxy)
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: view-metrics
rules:
- apiGroups:
    - metrics.k8s.io
  resources:
    - pods
    - nodes
  verbs:
    - get
    - list
    - watch

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: view-metrics
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: view-metrics
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: system:kube-proxy

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: anonymous-logs-role
rules:
- apiGroups: [""]
  resources: ["nodes/proxy"]
  verbs: ["create", "get"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: anonymous-logs-binding
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: anonymous-logs-role
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: User
  name: system:anonymous
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
kubectl get apiservice v1beta1.metrics.k8s.io -o json
kubectl get pods -n kube-system -l k8s-app=metrics-server
kubectl get svc -n kube-system -l k8s-app=metrics-server
kubectl get --raw "/apis/metrics.k8s.io"
# 查看node指标
kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes"
# 查看pod指标
kubectl get --raw "/apis/metrics.k8s.io/v1beta1/pods"
kubectl get --raw "/apis/metrics.k8s.io/v1beta1" | jq .
kubectl logs $(kubectl get po -o json -n kube-system -l k8s-app=metrics-server | jq -r '.items | .[] | .metadata | .name') -n kube-system

# kubectl get --raw "/apis/metrics.k8s.io/v1beta1" | jq .
# error: You must be logged in to the server (Unauthorized)
# front-proxy-ca.pem证书问题,--requestheader-client-ca-file参数
# error: metrics not available yet
# 刚搭建完会没有数据,过几分钟就能收集到数据了
# '/sys/fs/cgroup/cpuset': operation not permitted
# Could not configure a source for OOM detection, disabling OOM events: open /dev/kmsg: no such file or directory

3)、验证

1
2
kubectl top node
kubectl top pod

11、部署dashboard

1)、部署

1
2
# https://github.com/kubernetes/dashboard
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml

2)、账户

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system 
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard2
  namespace: kube-system
spec:
  type: NodePort
  ports:
  - port: 443
    targetPort: 8443
    nodePort: 30014
  selector:
    k8s-app: kubernetes-dashboard

3)、登录token

1
2
3
#kubectl create sa dashboard-admin -n kube-system
#kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') | grep -E '^token' | awk '{print $2}'

12、ingress-nginx

1)、部署

1
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/mandatory.yaml

2)、部署svc

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
apiVersion: v1
kind: Service
metadata:
  name: ingress-nginx
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  type: NodePort
  ports:
    - name: http
      port: 80
      targetPort: 80
      protocol: TCP
    - name: https
      port: 443
      targetPort: 443
      protocol: TCP
    - name: proxied-tcp-3306
      port: 3306
      targetPort: 3306
      protocol: TCP
      nodePort: 30016
  selector:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

3)、L4转发

1
2
3
4
5
6
7
kind: ConfigMap
apiVersion: v1
metadata:
  name: tcp-services
  namespace: ingress-nginx
data:
  3306: "default/mysql-svc:3306"

4)、L7转发

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: my-ingress
  annotations:
    nginx.ingress.kubernetes.io/use-regex: "true"
spec:
  rules:
  - host: demo.mydomain.com
    http:
      paths:
      - backend:
          serviceName: api
          servicePort: 80
1
2
# 测试
curl -H "host:demo.mydomain.com" http://192.168.33.27:31025

5)、pod固定

1
2
hostnetwork: true
dnsPolicy: ClusterFirstWithHostNet

13、调度

1)、污点、容忍

2)、亲和

14、常用命令

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
kubectl label node n21.dev node-role.kubernetes.io/master=master
kubectl label node n21.dev node-role.kubernetes.io/worker=worker
kubectl label node n22.dev node-role.kubernetes.io/master=master
kubectl label node n22.dev node-role.kubernetes.io/worker=worker
kubectl label node n23.dev node-role.kubernetes.io/master=master
kubectl label node n23.dev node-role.kubernetes.io/worker=worker
kubectl label node n26.dev node-role.kubernetes.io/worker=worker
kubectl label node n27.dev node-role.kubernetes.io/worker=worker
#########
kubectl api-resources
kubectl api-versions
kubectl cluster-info
kubectl get all --all-namespaces
kubectl get apiservices
kubectl get replicaset
kubectl get --raw "/apis" | jq
kubectl get svc,ep,deploy,po,no,rs,ns,sa --all-namespaces -o wide
kubectl get ds
kubectl get cs
kubectl get csr
kubectl certificate approve <CSR>
kubectl logs calico-node-2nct7 --namespace kube-system
# 滚动跟新
kubectl set image deployment/my-nginx nginx=nginx:1.9.1
kubectl rollout status deployment/my-nginx
kubectl rollout history deployment/my-nginx
kubectl apply -f <deploy.yaml> --record
# spec.revisionHistoryLimit
# Mark node as unschedulable or schedulable
kubectl cordon <node>
kubectl uncordon <node>
# 污点
kubectl taint
# 驱赶node上的pod
kubectl drain
kubectl run -it --rm --image=busybox:latest busybox /bin/sh
kubectl run -it --rm --image=radial/busyboxplus:curl mytest /bin/sh
docker run -it --rm radial/busyboxplus:curl
kubectl run -it --rm --image=alpine alpine /bin/sh
kubectl run -it --rm --image=infoblox/dnstools dns-client
kubectl run -it --rm --image=buildpack-deps:18.04-curl ubuntu /bin/sh
kubectl exec -it <pod> /bin/sh
kubectl exec -it $(kubectl get pods -o jsonpath='{.items[0].metadata.name}') /bin/sh
kubectl exec -it $(kubectl get po -o json | jq -r '.items | sort_by(.spec.nodeName)[] | [.metadata.name] | @tsv' |grep test) /bin/sh
# pod 中通过访问api
curl -ik -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" https://kubernetes/api/v1/namespaces/default/pods
kubectl get secret --namespace default dokuwiki-dokuwiki -o jsonpath="{.data.dokuwiki-password}" | base64 --decode
# 查看nodelabel
kubectl get nodes --show-labels
# You can remove incorrect label with <label>-
kubectl label node 192.168.1.1 networkSpeed-
# 给node设置label
kubectl label node n11.dev node-role.kubernetes.io/master=master
kubectl label node n12.dev node-role.kubernetes.io/worker=worker
# 创建token
kubeadm token create  --groups system:bootstrappers:kube-n11 --kubeconfig ~/.kube/config
# 删除node节点
kubectl delete no n11.dev;
curl --cacert ca.pem --cert etcd-client.pem --key etcd-client-key.pem https://192.168.33.11:2379/health
curl --cacert ca.pem --cert kubectl.pem --key kubectl-key.pem https://192.168.33.12:6443
curl --cacert ca.pem --cert kubectl.pem --key kubectl-key.pem https://192.168.33.100:8443/api/v1/nodes | jq '.items | .[] | .metadata | .name';

export ETCDCTL_API=3;
etcdctl endpoint health --endpoints "https://192.168.33.11:2379,https://192.168.33.12:2379,https://192.168.33.13:2379" --cacert=/data/k8s/certs/ca.pem --cert=/data/k8s/certs/etcd-client.pem --key=/data/k8s/certs/etcd-client-key.pem --cluster=true
kubectl run -it --rm --restart=Never busybox --image=busybox sh
kubectl run -it --rm --image=radial/busyboxplus:curl mytest /bin/sh
nslookup kubernetes
nslookup nginx-svc-demo
kubectl get role,rolebindings,serviceaccount,clusterrole,clusterrolebindings --all-namespaces -o wide
yaml2json_linux_amd64 < ~/.kube/config |jq '.clusters | .[] | .cluster.server'
kubectl get pods/{NAME} -n {NAMESPACE} --export=true -o yaml
kubectl get configmap extension-apiserver-authentication  -n kube-system
kubectl delete configmap extension-apiserver-authentication  -n kube-system

15、参考资料