# Kubernetes

# 一、概念

# 1.1 Master node(主控节点)

# 1.1.1 api server

集群统一入口,以restful方式,

# 1.1.2 scheduler

节点调度,选择node节点应用部署

# 1.1.3 controller-manager

处理集群中常规后台任务,一个资源对应一个控制器

  • 确保预期的pod副本数量
  • 无状态应用部署
  • 有状态应用部署
  • 确保所有的node运行同一个pod
  • 一次性任务和定时任务

# 1.1.4 etcd

存储系统,用于保存群集相关的数据

# 1.2 Worker node(工作节点)

# 1.2.1 kubelet

管理本机容器

# 1.2.2 kube-proxy

提供网络代理,负载均衡等操作

# 1.2.3 pod

  • 最小部署单元
  • 一组容器的集合
  • 共享网络
  • 生命周期是短暂的

# 二、kubeadm创建集群

三台Ubuntu Server 22.04,名字分别为control、worker1、worker2

# 2.1 准备工作(全部)

sudo apt install vim git -y
git clone https://github.com/sandervanvugt/cka
cd cka/
sudo ./setup-container.sh
sudo ./setup-kubetools.sh
1
2
3
4
5

# 2.2 初始化群集(control)

sudo kubeadm init
# 根据初始化以后的提示创建文件,这个是为了让客户端可以访问控制k8s
# 如果是root用户也可以直接 export KUBECONFIG=/etc/kubernetes/admin.conf
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 检查
tpxcer@k8s-control:~/cka$ kubectl get all
NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
service/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   4m53s
# 创建网络
tpxcer@k8s-control:~/cka$ kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
1
2
3
4
5
6
7
8
9
10
11
12

# 2.3 添加两个worker到群集(worker1,worker2)

# 最简单的方法是复制kubeadm安装好之后的提示语句,需要sudo权限
sudo kubeadm join 192.168.50.159:6443 --token ysql4d.j09d54mxkl8ocr4o \
	--discovery-token-ca-cert-hash sha256:7848d0ceb05c588d4acf6fbb25d782398e5f721c1eb119dce770dd63aa26fe71
	
# 回到control查看状态,如果显示NotReady需要等一会
tpxcer@k8s-control:~/cka$ kubectl get nodes
NAME          STATUS   ROLES           AGE   VERSION
k8s-control   Ready    control-plane   12m   v1.28.2
k8s-worker1   Ready    <none>          98s   v1.28.2
k8s-worker2   Ready    <none>          88s   v1.28.2
1
2
3
4
5
6
7
8
9
10

# 2.4 如果加入群集的token丢失或者过期

sudo kubeadm token create --print-join-command
1

# 2.5 重置k8s(control)

如果玩坏了。。。

sudo kubeadm reset
1

# 2.6 显示当前配置

这里其实就是~/.kube/config里面的内容

tpxcer@k8s-control:~/cka$ kubectl config view
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://192.168.50.159:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:
    client-certificate-data: DATA+OMITTED
    client-key-data: DATA+OMITTED
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20

# 2.7 安装helm

去Helm官方下载二进制包https://github.com/helm/helm

tpxcer@k8s-control:~$ tar -xzf helm-v3.12.3-linux-amd64.tar.gz
tpxcer@k8s-control:~$ sudo cp linux-amd64/helm /usr/local/bin/
1
2

# 2.8 安装官方WebUI

  1. 部署

kubernetes官方提供的可视化界面 https://github.com/kubernetes/dashboard

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml
1
  1. 设置访问端口
# 修改配置文件,把端口暴露出来
# 把type: ClusterIP改为type: NodePort
kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
1
2
3
  1. 查找端口访问
[root@master01 ~]# kubectl get svc -A |grep kubernetes-dashboard
kubernetes-dashboard   dashboard-metrics-scraper   ClusterIP   10.96.191.17    <none>        8000/TCP                 8m13s
kubernetes-dashboard   kubernetes-dashboard        NodePort    10.96.146.196   <none>        443:31564/TCP            8m14s
1
2
3

访问 https://master01:31564/ 这里还得需要一个token

  1. 创建访问账号(token)
#创建访问账号,准备一个yaml文件; vi dash.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
[root@master01 ~]# vi dash.yaml
[root@master01 ~]# kubectl apply -f dash.yaml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
1
2
3
4
  1. 拿到令牌
kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"
1

然后把令牌复制到ui页面的令牌里面

# 2.9 命令补全

sudo apt-get install -y bash-completion
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc
1
2
3

# 三、创建高可用群集

# 3.1 准备主机

传送门

# 3.2 配置负载均衡

传送门

# 3.3 容器运行时(所有control和worker)

  1. 容器运行时-安装和配置先决条件 (opens new window)
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF

sudo modprobe overlay
sudo modprobe br_netfilter

# 设置所需的 sysctl 参数,参数在重新启动后保持不变
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF

# 应用 sysctl 参数而不重新启动
sudo sysctl --system 
sudo apt-get update && sudo apt-get install -y containerd
sudo systemctl stop containerd
wget https://github.com/containerd/containerd/releases/download/v1.7.8/containerd-1.7.8-linux-amd64.tar.gz
tar xvf containerd-1.7.8-linux-amd64.tar.gz
sudo mv bin/* /usr/bin/
sudo mkdir -p /etc/containerd
# Configure containerd
sudo mkdir -p /etc/containerd
cat <<- TOML | sudo tee /etc/containerd/config.toml
version = 2
[plugins]
  [plugins."io.containerd.grpc.v1.cri"]
    [plugins."io.containerd.grpc.v1.cri".containerd]
      discard_unpacked_layers = true
      [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
        [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
          runtime_type = "io.containerd.runc.v2"
          [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
            SystemdCgroup = true
	TOML
# Restart containerd
sudo systemctl restart containerd	
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

# 3.4 安装 kubetools

官方文档:安装 kubeadm (opens new window)

sudo apt-get update && sudo apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF

sudo apt-get update
sudo apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
sudo swapoff -a

sudo sed -i 's/\/swap/#\/swap/' /etc/fstab
sudo crictl config --set \
    runtime-endpoint=unix:///run/containerd/containerd.sock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
sudo apt install vim git -y
git clone https://github.com/sandervanvugt/cka
cd cka/
sudo ./setup-container.sh
sudo ./setup-kubetools.sh
1
2
3
4
5

# 3.5 创建群集

sudo kubeadm init --control-plane-endpoint "192.168.50.223:8443" --upload-certs
1

# 三、部署Kubernetes应用程序

# 3.1 Deployment

  1. 查看帮助
# 部署命令帮助,里面有一些例子
tpxcer@k8s-control:~/cka$ kubectl create deploy -h |less
1
2
  1. 创建nginx应用
# Create a deployment named my-dep that runs the nginx image with 3 replicas
tpxcer@k8s-control:~/cka$ kubectl create deployment firstnginx --image=nginx --replicas=3
deployment.apps/firstnginx created
1
2
3
  1. 查看状态
tpxcer@k8s-control:~/cka$ kubectl get all
NAME                             READY   STATUS    RESTARTS   AGE
pod/firstnginx-d8679d567-4rj9n   1/1     Running   0          58s
pod/firstnginx-d8679d567-jtcwt   1/1     Running   0          58s
pod/firstnginx-d8679d567-s7gcn   1/1     Running   0          58s

NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
service/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   83m

NAME                         READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/firstnginx   3/3     3            3           58s

NAME                                   DESIRED   CURRENT   READY   AGE
replicaset.apps/firstnginx-d8679d567   3         3         3       58s
1
2
3
4
5
6
7
8
9
10
11
12
13
14

# 3.2 DaemonSet

  1. 什么是DaemonSets

DaemonSet 确保全部(或者某些)节点上运行一个 Pod 的副本。 当有节点加入集群时, 也会为他们新增一个 Pod 。 当有节点从集群移除时,这些 Pod 也会被回收。删除 DaemonSet 将会删除它创建的所有 Pod。

官方文档:https://kubernetes.io/zh-cn/docs/concepts/workloads/controllers/daemonset/

DaemonSet 的一些典型用法:

  • 在每个节点上运行集群守护进程
  • 在每个节点上运行日志收集守护进程
  • 在每个节点上运行监控守护进程
# -A 表示显示所有的namespace
tpxcer@k8s-control:~/cka$ kubectl get ds -A
NAMESPACE     NAME          DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
kube-system   calico-node   3         3         3       3            3           kubernetes.io/os=linux   84m
kube-system   kube-proxy    3         3         3       3            3           kubernetes.io/os=linux   92m
1
2
3
4
5

查看kube-system的配置,它就是个DaemonSet

tpxcer@k8s-control:~/cka$ kubectl get ds -n kube-system calico-node -o yaml
1
  1. 将一个deployment改为daemonSets
tpxcer@k8s-control:~$ kubectl create deploy mydaemon --image=nginx --dry-run=client -o yaml >mydaemon.yaml
1

yaml文件修改如下:

apiVersion: apps/v1
kind: DaemonSet
metadata:
  creationTimestamp: null
  labels:
    app: mydaemon
  name: mydaemon
spec:
  selector:
    matchLabels:
      app: mydaemon
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: mydaemon
    spec:
      containers:
      - image: nginx
        name: nginx
        resources: {}
status: {}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22

应用配置

tpxcer@k8s-control:~$ kubectl apply -f mydaemon.yaml
daemonset.apps/mydaemon created

# 查看DaemonSet,"ds"代表DaemonSet资源类型
tpxcer@k8s-control:~$ kubectl get ds
NAME       DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
mydaemon   2         2         2       2            2           <none>          49s

# 查看所有的应用,control端是不进行负载的,所以mydaemon只在worker1和worker2上
tpxcer@k8s-control:~$ kubectl get pods -o wide
NAME                         READY   STATUS    RESTARTS   AGE    IP              NODE          NOMINATED NODE   READINESS GATES
firstnginx-d8679d567-4rj9n   1/1     Running   0          17h    172.16.194.65   k8s-worker1   <none>           <none>
firstnginx-d8679d567-jtcwt   1/1     Running   0          17h    172.16.194.66   k8s-worker1   <none>           <none>
firstnginx-d8679d567-s7gcn   1/1     Running   0          17h    172.16.126.1    k8s-worker2   <none>           <none>
mydaemon-9tgmn               1/1     Running   0          4m8s   172.16.194.67   k8s-worker1   <none>           <none>
mydaemon-pphsv               1/1     Running   0          4m8s   172.16.126.2    k8s-worker2   <none>           <none>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16

# 3.3 StatefulSet

官方文档:https://kubernetes.io/zh-cn/docs/concepts/workloads/controllers/statefulset/

# 3.4 单个pod

tpxcer@k8s-control:~$ kubectl run sleepy --image=busybox -- sleep 3600
pod/sleepy created
tpxcer@k8s-control:~$ kubectl get pods
NAME                         READY   STATUS    RESTARTS   AGE
firstnginx-d8679d567-4rj9n   1/1     Running   0          18h
firstnginx-d8679d567-jtcwt   1/1     Running   0          18h
firstnginx-d8679d567-s7gcn   1/1     Running   0          18h
mydaemon-9tgmn               1/1     Running   0          43m
mydaemon-pphsv               1/1     Running   0          43m
sleepy                       1/1     Running   0          56s
1
2
3
4
5
6
7
8
9
10

# 3.5 init containers

官方文档:https://kubernetes.io/zh-cn/docs/concepts/workloads/pods/init-containers/

Init 容器是一种特殊容器,在 Pod 内的应用容器启动之前运行。Init 容器可以包括一些应用镜像中不存在的实用工具和安装脚本。

# 3.6 Scaling Applications

tpxcer@k8s-control:~$ kubectl scale deployment firstnginx --replicas 4
deployment.apps/firstnginx scaled
tpxcer@k8s-control:~$ kubectl get all --selector app=firstnginx
NAME                             READY   STATUS    RESTARTS   AGE
pod/firstnginx-d8679d567-4rj9n   1/1     Running   0          19h
pod/firstnginx-d8679d567-jg25f   1/1     Running   0          51s
pod/firstnginx-d8679d567-jtcwt   1/1     Running   0          19h
pod/firstnginx-d8679d567-s7gcn   1/1     Running   0          19h

NAME                         READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/firstnginx   4/4     4            4           19h

NAME                                   DESIRED   CURRENT   READY   AGE
replicaset.apps/firstnginx-d8679d567   4         4         4       19h
1
2
3
4
5
6
7
8
9
10
11
12
13
14

# 3.7 Sidecar containers

边车容器已成为一种常见的 Kubernetes 部署模式,通常用于网络代理或作为日志系统的一部分。 到目前为止,边车已经成为 Kubernetes 用户在没有原生支持情况下使用的概念。

官方文档:https://kubernetes.io/zh-cn/blog/2023/08/25/native-sidecar-containers/

tpxcer@k8s-control:~/cka$ kubectl apply -f sidecarlog.yaml
pod/two-containers created
tpxcer@k8s-control:~/cka$ kubectl exec -it two-containers -c nginx-container -- cat /usr/share/nginx/html/index.html
hello from the cluster
1
2
3
4

# 四、管理存储

# 4.1 访问存储

tpxcer@k8s-control:~/cka$ kubectl explain pod.spec.volumes |less
tpxcer@k8s-control:~/cka$ kubectl apply -f morevolumes.yaml
pod/morevol created
tpxcer@k8s-control:~/cka$ kubectl get pods
# 查看状态
tpxcer@k8s-control:~/cka$ kubectl  describe pod morevol
tpxcer@k8s-control:~/cka$ kubectl exec -it morevol -c centos1 -- touch /centos1/centos1file
tpxcer@k8s-control:~/cka$ kubectl exec -it morevol -c centos2 -- ls /centos2/
centos1file
1
2
3
4
5
6
7
8
9

# 4.2 配置PV(Persistent Volume)存储

tpxcer@k8s-control:~/cka$ kubectl apply -f pv.yaml
persistentvolume/pv-volume created
tpxcer@k8s-control:~/cka$ kubectl get pv
NAME        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
pv-volume   2Gi        RWO            Retain           Available           demo                    22s
tpxcer@k8s-control:~/cka$ kubectl get pv -o yaml
1
2
3
4
5
6

# 4.3 配置PVC

tpxcer@k8s-control:~/cka$ kubectl apply -f pvc.yaml
persistentvolumeclaim/pv-claim created
tpxcer@k8s-control:~/cka$ kubectl get pvc,pv
NAME                             STATUS   VOLUME      CAPACITY   ACCESS MODES   STORAGECLASS   AGE
persistentvolumeclaim/pv-claim   Bound    pv-volume   2Gi        RWO            demo           8m37s

NAME                         CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM              STORAGECLASS   REASON   AGE
persistentvolume/pv-volume   2Gi        RWO            Retain           Bound    default/pv-claim   demo                    12m
tpxcer@k8s-control:~/cka$ kubectl describe pvc pv-claim
Name:          pv-claim
Namespace:     default
StorageClass:  demo
Status:        Bound
Volume:        pv-volume
Labels:        <none>
Annotations:   pv.kubernetes.io/bind-completed: yes
               pv.kubernetes.io/bound-by-controller: yes
Finalizers:    [kubernetes.io/pvc-protection]
Capacity:      2Gi
Access Modes:  RWO
VolumeMode:    Filesystem
Used By:       <none>
Events:        <none>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23

# 4.4 使用创建好的PV和PVC

tpxcer@k8s-control:~/cka$ kubectl apply -f pv-pod.yaml
pod/pv-pod created
tpxcer@k8s-control:~/cka$ kubectl exec -it pv-pod -- touch /usr/share/nginx/html/hellothere
tpxcer@k8s-control:~/cka$ kubectl describe pv
Name:            pv-volume
Labels:          type=local
Annotations:     pv.kubernetes.io/bound-by-controller: yes
Finalizers:      [kubernetes.io/pv-protection]
StorageClass:    demo
Status:          Bound
Claim:           default/pv-claim
Reclaim Policy:  Retain
Access Modes:    RWO
VolumeMode:      Filesystem
Capacity:        2Gi
Node Affinity:   <none>
Message:
Source:
    Type:          HostPath (bare host directory volume)
    Path:          /mydata
    HostPathType:
Events:            <none>
tpxcer@k8s-control:~/cka$ kubectl get pods pv-pod -o wide
NAME     READY   STATUS    RESTARTS   AGE     IP              NODE          NOMINATED NODE   READINESS GATES
pv-pod   1/1     Running   0          3m11s   172.16.194.69   k8s-worker1   <none>           <none>
# 从上面可以看到worker1在使用,登录worker1查看文件
tpxcer@k8s-worker1:~$ cd /
tpxcer@k8s-worker1:/$ cd mydata/
tpxcer@k8s-worker1:/mydata$ ls
hellothere
tpxcer@k8s-worker1:/mydata$
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31

# 4.4 StorageClass -- nfs

# 每台机器安装 nfs-common
sudo apt install nfs-common
# 安装nfs provisioner
tpxcer@k8s-control:~$ helm repo add nfs-subdir-external-provisioner https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/
"nfs-subdir-external-provisioner" has been added to your repositories
tpxcer@k8s-control:~$ helm install nfs-subdir-external-provisioner nfs-subdir-external-provisioner/nfs-subdir-external-provisioner \
    --set nfs.server=192.168.50.111 \
    --set nfs.path=/mnt/apps/k8s
NAME: nfs-subdir-external-provisioner
LAST DEPLOYED: Tue Sep 26 08:03:50 2023
NAMESPACE: default
STATUS: deployed
REVISION: 1
TEST SUITE: None
k8s-control-➜  ~ kubectl get pods
NAME                                               READY   STATUS             RESTARTS        AGE
nfs-subdir-external-provisioner-7d859b6bfc-jk2qn   1/1     Running            1 (58m ago)     17h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17

使用nfs provisioner

k8s-control-➜  ~ kubectl get pv
NAME        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM              STORAGECLASS   REASON   AGE
pv-volume   2Gi        RWO            Retain           Bound    default/pv-claim   demo                    21h
k8s-control-➜  ~ cd cka
k8s-control-➜  cka git:(master) ✗ kubectl get storageclass
NAME         PROVISIONER                                     RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-client   cluster.local/nfs-subdir-external-provisioner   Delete          Immediate           true                   17h
k8s-control-➜  cka git:(master) ✗ kubectl apply -f nfs-provisioner-pvc-test.yaml
persistentvolumeclaim/nfs-pvc-test created
k8s-control-➜  cka git:(master) ✗ kubectl get pvc
NAME           STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
nfs-pvc-test   Bound    pvc-5a1c5700-5916-47fb-80e3-2e40984017c2   50Mi       RWX            nfs-client     24s
pv-claim       Bound    pv-volume                                  2Gi        RWO            demo           21h
1
2
3
4
5
6
7
8
9
10
11
12
13

使用默认 StorageClass

# yaml里面没有指定StorageClass
k8s-control-➜  cka git:(master) ✗ kubectl apply -f another-pvc-test.yaml
persistentvolumeclaim/another-nfs-pvc-test created
# 一直再pending状态
k8s-control-➜  cka git:(master) ✗ kubectl get pvc
NAME                   STATUS    VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
another-nfs-pvc-test   Pending                                                                                       64s
k8s-control-➜  cka git:(master) ✗ kubectl describe pvc another-nfs-pvc-test
Name:          another-nfs-pvc-test
Namespace:     default
StorageClass:
Status:        Pending
Volume:
Labels:        <none>
Annotations:   <none>
Finalizers:    [kubernetes.io/pvc-protection]
Capacity:
Access Modes:
VolumeMode:    Filesystem
Used By:       <none>
Events:
  Type    Reason         Age                  From                         Message
  ----    ------         ----                 ----                         -------
  Normal  FailedBinding  7s (x10 over 2m11s)  persistentvolume-controller  no persistent volumes available for this claim and no storage class is set
# 查看当前的storageclass
k8s-control-➜  cka git:(master) ✗ kubectl get storageclasses.storage.k8s.io -o yaml
apiVersion: v1
items:
- allowVolumeExpansion: true
  apiVersion: storage.k8s.io/v1
  kind: StorageClass
  metadata:
    annotations:
      meta.helm.sh/release-name: nfs-subdir-external-provisioner
      meta.helm.sh/release-namespace: default
    creationTimestamp: "2023-09-26T10:04:37Z"
    labels:
      app: nfs-subdir-external-provisioner
      app.kubernetes.io/managed-by: Helm
      chart: nfs-subdir-external-provisioner-4.0.18
      heritage: Helm
      release: nfs-subdir-external-provisioner
    name: nfs-client
    resourceVersion: "142096"
    uid: 1473c085-df04-431e-b4a8-2dfb584a4f6c
  parameters:
    archiveOnDelete: "true"
  provisioner: cluster.local/nfs-subdir-external-provisioner
  reclaimPolicy: Delete
  volumeBindingMode: Immediate
kind: List
metadata:
# 打补丁,设置nfs-client是默认的storageclass
k8s-control-➜  cka git:(master) ✗ kubectl patch storageclass nfs-client -p '{"metadata":{"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
storageclass.storage.k8s.io/nfs-client patched
# 重启一下
k8s-control-➜  cka git:(master) ✗ kubectl delete -f another-pvc-test.yaml
persistentvolumeclaim "another-nfs-pvc-test" deleted
k8s-control-➜  cka git:(master) ✗ kubectl apply -f another-pvc-test.yaml
persistentvolumeclaim/another-nfs-pvc-test created
k8s-control-➜  cka git:(master) ✗ kubectl get pvc
NAME                   STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
another-nfs-pvc-test   Bound    pvc-7d34e82b-13eb-40a5-a65a-54a714f65839   50Mi       RWX            nfs-client     18s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63

# 4.5 configmap

k8s-control-➜  cka git:(master)echo hello world > index.html
k8s-control-➜  cka git:(master) ✗ kubectl create cm webindex --from-file=index.html
configmap/webindex created
k8s-control-➜  cka git:(master) ✗ kubectl describe cm webindex
Name:         webindex
Namespace:    default
Labels:       <none>
Annotations:  <none>

Data
====
index.html:
----
hello world


BinaryData
====

Events:  <none>
k8s-control-➜  cka git:(master) ✗ kubectl create deployment webserver --image=nginx
deployment.apps/webserver created
k8s-control-➜  cka git:(master) ✗ kubectl edit deployments.apps webserver
# 调整以下内容
    spec:
      containers:
      - image: nginx
        imagePullPolicy: Always
        name: nginx
        resources: {}
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
        volumeMounts:
        - mountPath: /usr/share/nginx/html
          name: cmvol
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30
      volumes:
      - configMap:
          defaultMode: 420
          name: webindex
        name: cmvol
# 验证结果
k8s-control-➜  cka git:(master) ✗ kubectl exec webserver-76d44586d-rq4dj -- cat /usr/share/nginx/html/index.html
hello world
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48

# 五、网络访问

# 5.1 创建NodePort访问

k8s-control-➜  cka git:(master) ✗ kubectl create deploy webshop --image=nginx --replicas=3
deployment.apps/webshop created

k8s-control-➜  cka git:(master) ✗ kubectl get pods --selector app=webshop -o wide
NAME                       READY   STATUS    RESTARTS   AGE   IP              NODE          NOMINATED NODE   READINESS GATES
webshop-7f9fd49d4c-2x6fq   1/1     Running   0          45s   172.16.194.78   k8s-worker1   <none>           <none>
webshop-7f9fd49d4c-gbzhj   1/1     Running   0          45s   172.16.126.12   k8s-worker2   <none>           <none>
webshop-7f9fd49d4c-h8992   1/1     Running   0          45s   172.16.194.79   k8s-worker1   <none>           <none>

k8s-control-➜  cka git:(master) ✗ kubectl expose deploy webshop --type=NodePort --port=80
service/webshop exposed

k8s-control-➜  cka git:(master) ✗ kubectl get all --selector app=webshop
NAME                           READY   STATUS    RESTARTS   AGE
pod/webshop-7f9fd49d4c-2x6fq   1/1     Running   0          3m20s
pod/webshop-7f9fd49d4c-gbzhj   1/1     Running   0          3m20s
pod/webshop-7f9fd49d4c-h8992   1/1     Running   0          3m20s

NAME              TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
service/webshop   NodePort   10.105.53.122   <none>        80:32452/TCP   27s

NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/webshop   3/3     3            3           3m20s

NAME                                 DESIRED   CURRENT   READY   AGE
replicaset.apps/webshop-7f9fd49d4c   3         3         3       3m20s

k8s-control-➜  cka git:(master) ✗ kubectl describe svc webshop
Name:                     webshop
Namespace:                default
Labels:                   app=webshop
Annotations:              <none>
Selector:                 app=webshop
Type:                     NodePort
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       10.105.53.122
IPs:                      10.105.53.122
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
NodePort:                 <unset>  32452/TCP
Endpoints:                172.16.126.12:80,172.16.194.78:80,172.16.194.79:80
Session Affinity:         None
External Traffic Policy:  Cluster
k8s-control-➜  cka git:(master) ✗ kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        46h
webshop      NodePort    10.105.53.122   <none>        80:32452/TCP   2m41s
# 访问
k8s-control-➜  cka git:(master)curl 192.168.50.159:32452
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73

# 5.2 使用ingress

ingress 只支持http或者https应用,插件官方https://kubernetes.github.io/ingress-nginx/

  1. 安装ingress
k8s-control-➜  cka git:(master) ✗ helm upgrade --install ingress-nginx ingress-nginx \
  --repo https://kubernetes.github.io/ingress-nginx \
  --namespace ingress-nginx --create-namespace
k8s-control-➜  cka git:(master) ✗ kubectl get all -n ingress-nginx
NAME                                           READY   STATUS             RESTARTS   AGE
pod/ingress-nginx-controller-bf4dc8789-gbqfq   0/1     ImagePullBackOff   0          73s

NAME                                         TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE
service/ingress-nginx-controller             LoadBalancer   10.109.149.114   <pending>     80:32561/TCP,443:31997/TCP   73s
service/ingress-nginx-controller-admission   ClusterIP      10.105.157.230   <none>        443/TCP                      73s

NAME                                       READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/ingress-nginx-controller   0/1     1            0           73s

NAME                                                 DESIRED   CURRENT   READY   AGE
replicaset.apps/ingress-nginx-controller-bf4dc8789   1         1         0       73s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
k8s-control-➜  cka git:(master) ✗ kubectl create deploy nginxsvc --image=nginx --port=80
deployment.apps/nginxsvc created
k8s-control-➜  cka git:(master) ✗ kubectl expose deploy nginxsvc
service/nginxsvc exposed
k8s-control-➜  cka git:(master) ✗ kubectl get all --selector app=nginxsvc
NAME                            READY   STATUS    RESTARTS   AGE
pod/nginxsvc-5f8b7d4f4d-wc4qw   1/1     Running   0          77s

NAME               TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
service/nginxsvc   ClusterIP   10.103.213.197   <none>        80/TCP    19s

NAME                       READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nginxsvc   1/1     1            1           77s

NAME                                  DESIRED   CURRENT   READY   AGE
replicaset.apps/nginxsvc-5f8b7d4f4d   1         1         1       77s
tpxcer@k8s-control:~$ kubectl create ingress nginxsvc --class=nginx --rule=nginxsvc.info/*=nginxsvc:80
ingress.networking.k8s.io/nginxsvc created
# 开启转发,后台运行
tpxcer@k8s-control:~$ kubectl port-forward -n ingress-nginx svc/ingress-nginx-controller 8080:80
Forwarding from 127.0.0.1:8080 -> 80
Forwarding from [::1]:8080 -> 80
# 编辑host文件增加 nginxsvc.info的映射
k8s-control-➜  cka git:(master)sudo vim /etc/hosts
127.0.0.1 localhost nginxsvc.info
k8s-control-➜  ~ curl nginxsvc.info:8080
k8s-control-➜  ~ kubectl get all --selector app=nginxsvc
NAME                            READY   STATUS    RESTARTS   AGE
pod/nginxsvc-5f8b7d4f4d-wc4qw   1/1     Running   0          74m

NAME               TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
service/nginxsvc   ClusterIP   10.103.213.197   <none>        80/TCP    73m

NAME                       READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nginxsvc   1/1     1            1           74m

NAME                                  DESIRED   CURRENT   READY   AGE
replicaset.apps/nginxsvc-5f8b7d4f4d   1         1         1       74m
k8s-control-➜  ~ kubectl get ingress
NAME       CLASS   HOSTS           ADDRESS   PORTS   AGE
nginxsvc   nginx   nginxsvc.info             80      10m
k8s-control-➜  ~ kubectl describe ingress nginxsvc
Name:             nginxsvc
Labels:           <none>
Namespace:        default
Address:
Ingress Class:    nginx
Default backend:  <default>
Rules:
  Host           Path  Backends
  ----           ----  --------
  nginxsvc.info
                 /   nginxsvc:80 (172.16.126.16:80)
Annotations:     <none>
Events:
  Type    Reason  Age   From                      Message
  ----    ------  ----  ----                      -------
  Normal  Sync    11m   nginx-ingress-controller  Scheduled for sync
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58

# 5.3 设置默认ingress

k8s-control-➜  ~ kubectl get ingressclass
NAME    CONTROLLER             PARAMETERS   AGE
nginx   k8s.io/ingress-nginx   <none>       95m
# 设置默认ingress
k8s-control-➜  ~ kubectl edit ingressclass nginx 改动如下
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
  annotations:
    meta.helm.sh/release-name: ingress-nginx
    meta.helm.sh/release-namespace: ingress-nginx
    ingressclass.kubernetes.io/is-default-class: "true"
1
2
3
4
5
6
7
8
9
10
11
12

# 5.4 把5.1 创建NodePort改为ingress

k8s-control-➜  ~ kubectl get deploy
NAME                              READY   UP-TO-DATE   AVAILABLE   AGE
webshop                           3/3     3            3           123m
k8s-control-➜  ~ kubectl get svc webshop
NAME      TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
webshop   NodePort   10.105.53.122   <none>        80:32452/TCP   121m
# 这里创建了两条规则,其中newdep这个应用是没有的,后面会处理
k8s-control-➜  ~ kubectl create ingress webshop-ingress --rule="/=webshop:80" --rule="/hello=newdep:8080"
ingress.networking.k8s.io/webshop-ingress created
# 把webshop.info加进去
k8s-control-➜  ~ sudo vim /etc/hosts
127.0.0.1 localhost nginxsvc.info webshop.info
# 所有的都会匹配到webshop
k8s-control-➜  ~ kubectl get ingress
NAME              CLASS   HOSTS           ADDRESS   PORTS   AGE
nginxsvc          nginx   nginxsvc.info             80      34m
webshop-ingress   nginx   *                         80      2m11s
k8s-control-➜  ~ kubectl describe ingress webshop-ingress
Name:             webshop-ingress
Labels:           <none>
Namespace:        default
Address:
Ingress Class:    nginx
Default backend:  <default>
Rules:
  Host        Path  Backends
  ----        ----  --------
  *
              /        webshop:80 (172.16.126.12:80,172.16.194.78:80,172.16.194.79:80)
              /hello   newdep:8080 (<error: endpoints "newdep" not found>)
Annotations:  <none>
Events:
  Type    Reason  Age   From                      Message
  ----    ------  ----  ----                      -------
  Normal  Sync    3m4s  nginx-ingress-controller  Scheduled for sync

# 创建newdep
k8s-control-➜  ~ kubectl create deploy newdep --image=gcr.io/google-samples/hello-app:2.0
deployment.apps/newdep created
k8s-control-➜  ~ kubectl expose deploy newdep --port=8080
service/newdep exposed

# 再看webshop-ingress 就正常了
k8s-control-➜  ~ kubectl describe ingress webshop-ingress
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44

# 5.5 端口转发

# 后台运行
k8s-control-➜  ~ kubectl port-forward pods/webserver-76d44586d-rq4dj 1235:80
k8s-control-➜  ~ curl localhost:1235
Handling connection for 1235
hello world
1
2
3
4
5

# 六、群集管理

# 6.1 查看服务状态

k8s-control-➜  ~ systemctl status kubelet
k8s-control-➜  ~ kubectl describe node k8s-worker1
k8s-control-➜  ~ sudo ls -lrt /var/log
k8s-control-➜  ~ journalctl -u kubelet
1
2
3
4

# 6.2 使用crictl 管理节点

k8s-control-➜  ~ sudo crictl ps
CONTAINER           IMAGE               CREATED             STATE               NAME                      ATTEMPT             POD ID              POD
bcdf3ec24b567       ead0a4a53df89       24 hours ago        Running             coredns                   1                   9b27d3816ba5d       coredns-5dd5756b68-vsl86
57899abc2c06d       1919f2787fa70       24 hours ago        Running             calico-kube-controllers   1                   f8d6b3fce6bfe       calico-kube-controllers-7ddc4f45bc-8sqw5
b48f7a99c8b2a       ead0a4a53df89       24 hours ago        Running             coredns                   1                   87f2c65a93aa8       coredns-5dd5756b68-t4dt7
65beb5c2a7776       8065b798a4d67       24 hours ago        Running             calico-node               1                   e15ccfeaa5a60       calico-node-nbgvd
b6e0dd385ae18       c120fed2beb84       24 hours ago        Running             kube-proxy                1                   d168fa054d699       kube-proxy-fzzwg
# 根据上面的id可以查看细节
k8s-control-➜  ~ sudo crictl inspect bcdf3ec24b567 
{
  "status": {
    "id": "bcdf3ec24b567c6a6ce3517283ef91bfafb48aa6ec3ae1c4e91563db52b653eb",
    "metadata": {
      "attempt": 1,
      "name": "coredns"
    },
    "state": "CONTAINER_RUNNING",
    "createdAt": "2023-09-27T02:40:13.546152354Z",
# 查看pods
k8s-control-➜  ~ sudo crictl pods
# 管理镜像
k8s-control-➜  ~ sudo crictl images               
IMAGE                                     TAG                 IMAGE ID            SIZE
docker.io/calico/cni                      v3.26.1             9dee260ef7f59       93.4MB
docker.io/calico/kube-controllers         v3.26.1             1919f2787fa70       32.8MB
docker.io/calico/node                     v3.26.1             8065b798a4d67       86.6MB
registry.k8s.io/coredns/coredns           v1.10.1             ead0a4a53df89       16.2MB
# 下载镜像
k8s-control-➜  ~ sudo crictl pull docker.io/library/mysql
Image is up to date for sha256:b2013ac9910129ded1da4c96495142b2ed0638ddf7e86e65156400d9a8503777
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30

# 6.3 管理节点

  1. 标记一个节点为不可调度

当一个节点被标记为不可调度后,Kubernetes Scheduler 将不会在该节点上启动新的 Pod。然而,已经在该节点上运行的 Pod 不会受到影响,它们将继续在该节点上运行。

kubectl cordon k8s-woker2
kubectl describe node k8s-woker2 
kubectl get nodes
kubectl uncordon k8s-woker2 
1
2
3
4
  1. 标记一个节点为不可调度,并移除所有运行的pods
kubectl drain k8s-woker2
# 同时移除daemonset
kubectl drain k8s-woker2 --ignore-daemonsets
1
2
3
  1. 删除pod
k8s-control-➜  ~ kubectl delete pod two-containers
pod "two-containers" deleted
1
2

# 6.5 Etcd 备份还原

  1. 备份
k8s-control-➜  ~ sudo apt install etcd-client
# 查看etcd用的证书
k8s-control-➜  ~ ps aux |grep etcd 
# 查看key
k8s-control-➜  ~ sudo ETCDCTL_API=3 etcdctl --endpoints=localhost:2379 --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key get / --prefix --keys-only
# 备份
k8s-control-➜  ~ sudo ETCDCTL_API=3 etcdctl --endpoints=localhost:2379 --cacert /etc/kubernetes/pki/etcd/ca.crt --cert /etc/kubernetes/pki/etcd/server.crt --key /etc/kubernetes/pki/etcd/server.key snapshot save /tmp/etcbackup.db
2023-09-28 06:26:52.245182 I | clientv3: opened snapshot stream; downloading
2023-09-28 06:26:52.298320 I | clientv3: completed snapshot read; closing
Snapshot saved at /tmp/etcbackup.db
# 验证备份
k8s-control-➜  ~  sudo ETCDCTL_API=3 etcdctl  --write-out=table snapshot status /tmp/etcbackup.db 
+----------+----------+------------+------------+
|   HASH   | REVISION | TOTAL KEYS | TOTAL SIZE |
+----------+----------+------------+------------+
| 4796ed31 |   450267 |       1406 |     6.0 MB |
+----------+----------+------------+------------+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17

2.还原

# 先删除个应用
k8s-control-➜  ~ kubectl delete deploy newdep
deployment.apps "newdep" deleted
# 把etcd的配置文件挪走
k8s-control-➜  ~ cd /etc/kubernetes/manifests 
k8s-control-➜  manifests ls                     
etcd.yaml  kube-apiserver.yaml  kube-controller-manager.yaml  kube-scheduler.yaml
k8s-control-➜  manifests sudo mv * ..       
# 过一会儿在查看状态,会提示没有etcd
k8s-control-➜  manifests sudo crictl ps      
# 将配置复原到新的地方
k8s-control-➜  ~ sudo ETCDCTL_API=3 etcdctl snapshot restore /tmp/etcbackup.db --data-dir /var/lib/etcd-backup
[sudo] password for tpxcer:
2023-09-28 06:51:11.996876 I | mvcc: restore compact to 449348
2023-09-28 06:51:12.004159 I | etcdserver/membership: added member 8e9e05c52164694d [http://localhost:2380] to cluster cdf818194e3a8c32
k8s-control-➜  ~ sudo ls -l /var/lib/etcd-backup
total 4
drwx------ 4 root root 4096 Sep 28 06:51 member
# 修改yaml文件指定新的路径
k8s-control-➜  ~ sudo vim /etc/kubernetes/etcd.yaml
  - hostPath:
      path: /var/lib/etcd-backup
      type: DirectoryOrCreate
    name: etcd-data
# 再把配置文件移回来,就可以看到删掉的程序又开始重新部署了
k8s-control-➜  manifests sudo mv ../*.yaml .
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26

# 6.6 升级k8s

官方升级文档:https://kubernetes.io/zh-cn/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/

# 6.7 nodeSelector 标签

# 设置 worker2的标签 disktype=ssd
k8s-control-➜  ~ kubectl label nodes k8s-worker2 disktype=ssd
node/k8s-worker2 labeled
# selector-pod.yaml 里面有个nodeSelector disktype: ssd
k8s-control-➜  ~ cd cka   
k8s-control-➜  cka git:(master)vim selector-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: nginx
spec:
  containers:
  - name: nginx
    image: nginx
    imagePullPolicy: IfNotPresent
  nodeSelector:
    disktype: ssd
# 让k8s-worker2进入维护状态
k8s-control-➜  cka git:(master) ✗ kubectl cordon k8s-worker2                  
node/k8s-worker2 cordoned
# 应用刚才的程序
k8s-control-➜  cka git:(master) ✗ kubectl apply -f selector-pod.yaml                                                                         
pod/nginx created
# 此时查看pods就会发现一直再pending中,因为剩下的一台work没有设定标签
k8s-control-➜  cka git:(master) ✗ kubectl get pods 
NAME                                               READY   STATUS    RESTARTS        AGE
nginx                                              0/1     Pending   0               58s
# 查看具体描述会发现selector不配
k8s-control-➜  cka git:(master) ✗ kubectl describe pod nginx      
Events:
  Type     Reason            Age    From               Message
  ----     ------            ----   ----               -------
  Warning  FailedScheduling  2m32s  default-scheduler  0/3 nodes are available: 1 node(s) didn't match Pod's node affinity/selector, 1
# 让work2退出维护模式,然后再查看pod状态,就会发现应用部署到work2了
k8s-control-➜  cka git:(master) ✗ kubectl uncordon k8s-worker2      
node/k8s-worker2 uncordoned
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36

# 6.8 亲和度(affinity)配置

官网文档:

用节点亲和性把 Pod 分配到节点: https://kubernetes.io/zh-cn/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/

# pod-with-node-affinity.yaml 需要node都有对应的key kubernetes.io/e2e-az-name 和值e2e-az1,e2e-az2
# preferredDuringSchedulingIgnoredDuringExecution 是首选的节点
# node affinity 类似nodeSelector
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: kubernetes.io/e2e-az-name
            operator: In
            values:
            - e2e-az1
            - e2e-az2
      preferredDuringSchedulingIgnoredDuringExecution:
      - weight: 1
        preference:
          matchExpressions:
          - key: another-node-label-key
            operator: In
            values:
            - another-node-label-value
# 因为没有任何key匹配,所以应用一直peding中
k8s-control-➜  cka git:(master) ✗ kubectl apply -f pod-with-node-affinity.yaml 
pod/with-node-affinity created
k8s-control-➜  cka git:(master) ✗ kubectl get pods                            
NAME                                               READY   STATUS    RESTARTS        AGE
with-node-affinity                                 0/1     Pending   0               19s
k8s-control-➜  cka git:(master) ✗ kubectl delete -f pod-with-node-affinity.yaml
pod "with-node-affinity" deleted

# pod-with-node-antiaffinity.yaml 这个应为NotIn所以我们的work1会部署应用(接6.7 nodeSelector 标签)
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: disktype
            operator: NotIn
            values:
            - ssd     
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40

本示例定义了一条 Pod 亲和性规则和一条 Pod 反亲和性规则。Pod 亲和性规则配置为 requiredDuringSchedulingIgnoredDuringExecution,而 Pod 反亲和性配置为 preferredDuringSchedulingIgnoredDuringExecution。

亲和性规则表示,仅当节点和至少一个已运行且有 security=S1 的标签的 Pod 处于同一区域时,才可以将该 Pod 调度到节点上。 更确切的说,调度器必须将 Pod 调度到具有 topology.kubernetes.io/zone=V 标签的节点上,并且集群中至少有一个位于该可用区的节点上运行着带有 security=S1 标签的 Pod。

反亲和性规则表示,如果节点处于 Pod 所在的同一可用区且至少一个 Pod 具有 security=S2 标签,则该 Pod 不应被调度到该节点上。 更确切地说, 如果同一可用区中存在其他运行着带有 security=S2 标签的 Pod 节点, 并且节点具有标签 topology.kubernetes.io/zone=R,Pod 不能被调度到该节点上。

# pod-with-pod-affinity.yaml  注意这里是podAffinity
  affinity:
    podAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - key: security
            operator: In
            values:
            - S1
        topologyKey: failure-domain.beta.kubernetes.io/zone
    podAntiAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:
      - weight: 100
        podAffinityTerm:
          labelSelector:
            matchExpressions:
            - key: security
              operator: In
              values:
              - S2
          topologyKey: failure-domain.beta.kubernetes.io/zone
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22

podAntiAffinity 规则告诉调度器避免将多个带有 app=store 标签的副本部署到同一节点上。

# redis-with-pod-affinity.yaml
# 这里 labels app: store ,podAntiAffinity 里面也是app store,虽然副本设置3,但是同一个节点不会启动多个同样的应用
spec:
  selector:
    matchLabels:
      app: store
  replicas: 3
  template:
    metadata:
      labels:
        app: store
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - store
            topologyKey: "kubernetes.io/hostname"
k8s-control-➜  cka git:(master) ✗ kubectl apply -f redis-with-pod-affinity.yaml 
deployment.apps/redis-cache created
k8s-control-➜  cka git:(master) ✗ kubectl get pods                        
NAME                                               READY   STATUS    RESTARTS          AGE
redis-cache-8478cbdc86-hmllk                       1/1     Running   0                 99s
redis-cache-8478cbdc86-wd7zl                       0/1     Pending   0                 99s
redis-cache-8478cbdc86-z4rg2                       1/1     Running   0                 99s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29

同样的下面的例子web-store单个pod不能用重复,而且需要有运行了app=store的pod才能创建容器

#  webserver-with-pod-affinity.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web-server
spec:
  selector:
    matchLabels:
      app: web-store
  replicas: 3
  template:
    metadata:
      labels:
        app: web-store
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - web-store
            topologyKey: "kubernetes.io/hostname"
        podAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: app
                operator: In
                values:
                - store
            topologyKey: "kubernetes.io/hostname"
      containers:
      - name: web-app
        image: nginx:1.12-alpine
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37

# 6.9 taints(污点)和tolerations(容忍度)

在 Kubernetes(简称为K8s)中,Taints(污点)是一种机制,用于标记节点(Node)以限制哪些Pod可以在该节点上调度和运行。Taints可以被认为是节点上的标记,表示该节点具有某种特殊属性或限制条件。

当节点设置了Taints后,只有具有相应Tolerations(容忍度)的Pod才能被调度到该节点上运行。Tolerations是Pod的属性,定义了Pod是否可以"容忍"(tolerate)一些节点上的Taints。

在 Kubernetes 中,Taints(污点)可以被分为三种类型:

  • NoSchedule(不可调度):使用 NoSchedule 类型的 Taint 可以阻止 Pod 在被标记的节点上调度。当节点上存在 NoSchedule 污点时,只有具有相应 Tolerations 的 Pod 才能在该节点上被调度。
  • PreferNoSchedule(优先不可调度):使用 PreferNoSchedule 类型的 Taint 可以降低节点上被调度的 Pod 的优先级。当节点上存在 PreferNoSchedule 污点时,Kubernetes 调度器会尽量不在该节点上调度 Pod,除非没有其他可用节点。
  • NoExecute(不执行):使用 NoExecute 类型的 Taint 可以将节点上已有的 Pod 驱逐(Evict)出节点。当节点上存在 NoExecute 污点时,节点会拒绝现有 Pod 的运行,直到 Pod 具有相应 Tolerations 或 Taint 被移除。
# 创建一个taint
k8s-control-➜  ~ kubectl taint nodes k8s-worker1 storage=ssd:NoSchedule
node/k8s-worker1 tainted
# 查看节点k8s-worker1的ttaint
k8s-control-➜  ~ kubectl describe nodes k8s-worker1 |grep -i taint
Taints:             storage=ssd:NoSchedule
# 创建应用
k8s-control-➜  ~ kubectl create deploy nginx-taint --image=nginx                          
deployment.apps/nginx-taint created
k8s-control-➜  ~ kubectl scale deploy nginx-taint --replicas=3 
deployment.apps/nginx-taint scaled
# 查看应用 全部部署到了worker2
k8s-control-➜  ~ kubectl get pods -o wide --selector app=nginx-taint 
NAME                           READY   STATUS    RESTARTS   AGE     IP              NODE          NOMINATED NODE   READINESS GATES
nginx-taint-68bd5db674-7s7qs   1/1     Running   0          2m27s   172.16.126.32   k8s-worker2   <none>           <none>
nginx-taint-68bd5db674-shb6c   1/1     Running   0          49s     172.16.126.34   k8s-worker2   <none>           <none>
nginx-taint-68bd5db674-wv5pb   1/1     Running   0          49s     172.16.126.33   k8s-worker2   <none>           <none>
# 因为nginx 的配置里面选用的是ssd
k8s-control-➜  cka git:(master) ✗ kubectl describe pod nginx |grep Node-Selectors
Node-Selectors:              disktype=ssd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# 创建toleration
# 查看配置文件,应用这个文件之后我们前面的nginx就可以部署上去了
k8s-control-➜  cka git:(master)vim taint-toleration.yaml
apiVersion: v1
kind: Pod
metadata:
  name: nginx-ssd
  labels:
    env: test
spec:
  containers:
  - name: nginx-ssd
    image: nginx
    imagePullPolicy: IfNotPresent
  toleration:
  - key: "storage"
    operator: "Equal"
    value: "ssd"
    effect: "NoSchedule"
    
## 查看另外一个toleration配置文件
## 这个toleration对应的taint不存在
apiVersion: v1
kind: Pod
metadata:
  name: nginx-hdd
  labels:
    env: test
spec:
  containers:
  - name: nginx-hdd
    image: nginx
    imagePullPolicy: IfNotPresent
  tolerations:
  - key: "storage"
    operator: "Equal"
    value: "hdd"
    effect: "NoSchedule"
## 应用
k8s-control-➜  cka git:(master) ✗ kubectl apply -f taint-toleration2.yaml        
pod/nginx-hdd created
## 会跑去worker2
k8s-control-➜  cka git:(master) ✗ kubectl get pods nginx-hdd -o wide                 
NAME        READY   STATUS    RESTARTS   AGE   IP              NODE          NOMINATED NODE   READINESS GATES
nginx-hdd   1/1     Running   0          33s   172.16.126.35   k8s-worker2   <none>           <none>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45

# 6.10 资源限制

LimitRange 和 Quota 是 Kubernetes 中用于资源管理和配额控制的两种不同机制。

LimitRange 主要用于限制单个命名空间内单个容器或 Pod 的资源使用。它定义了容器或 Pod 可以使用的资源的上限和下限。LimitRange 可以设置 CPU、内存和临时存储等资源的限制和请求。

Quota 则是一种更高级别的资源配额控制机制,可以应用于整个命名空间。它允许管理员为整个命名空间设置资源配额。Quota 可以限制命名空间内的资源总量,如 CPU、内存、存储卷数量、对象数量等。它还可以限制特定资源类型的使用量,例如 Pod、Service、ConfigMap 等。

区别总结如下:

LimitRange:用于限制单个容器或 Pod 的资源使用,作用于单个命名空间内。 Quota:用于限制整个命名空间的资源使用,包括多个容器或 Pod,可以设置资源总量和特定资源类型的限制。 综合来说,LimitRange 用于细粒度的资源限制,而 Quota 用于命名空间级别的资源配额控制。它们可以结合使用,以便在 Kubernetes 集群中实现更灵活和精确的资源管理。

# Quota 限制

# 创建资源限制
k8s-control-➜  ~ kubectl create ns limited; kubectl create quota qtest --hard pods=3,cpu=100m,memory=500Mi --namespace limited
namespace/limited created
resourcequota/qtest created
# 查看限制
k8s-control-➜  ~ kubectl describe quota -n limited                                                                            
Name:       qtest
Namespace:  limited
Resource    Used  Hard
--------    ----  ----
cpu         0     100m
memory      0     500Mi
pods        0     3
# 创建应用
k8s-control-➜  ~ kubectl create deploy nginx --image=nginx --replicas=3 -n limited
deployment.apps/nginx created
# 可以看到一个都没起
k8s-control-➜  ~ kubectl get all -n limited                                       
NAME                    READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nginx   0/3     0            0           118s

NAME                               DESIRED   CURRENT   READY   AGE
replicaset.apps/nginx-7854ff8877   3         0         0       118s
# 查看日志 提示必须要指定使用的资源
k8s-control-➜  ~ kubectl describe -n limited rs nginx-7854ff8877    
  Warning  FailedCreate  3m56s                replicaset-controller  Error creating: pods "nginx-7854ff8877-jkx7t" is forbidden: failed quota: qtest: must specify cpu for: nginx; memory for: nginx
  Warning  FailedCreate  3m56s                replicaset-controller  Error creating: pods "nginx-7854ff8877-qb5x7" is forbidden: failed quota: qtest: must specify cpu for: nginx; memory for: nginx
# 创建资源需求
k8s-control-➜  ~ kubectl set resources deploy nginx --requests cpu=100m,memory=50Mi --limits cpu=200m,memory=100Mi -n limited
deployment.apps/nginx resource requirements updated
#  再次查看
k8s-control-➜  ~ kubectl describe quota -n limited              
Name:       qtest
Namespace:  limited
Resource    Used  Hard
--------    ----  ----
cpu         100m  100m
memory      5Mi   500Mi
pods        1     3
# 修改限制
k8s-control-➜  ~ kubectl edit quota -n limited   
# 删掉quota设置
kubectl delete quota qtest -n limited
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43

设置LimitRange

# 查看配置
k8s-control-➜  cka git:(master)vim limitrange.yaml 
apiVersion: v1
kind: LimitRange
metadata:
  name: mem-limit-range
spec:
  limits:
  - default:
      memory: 512Mi
    defaultRequest:
      memory: 256Mi
    type: Container
# 创建limitrange
k8s-control-➜  cka git:(master) ✗ kubectl apply -f limitrange.yaml -n limited
limitrange/mem-limit-range created
# 查看
k8s-control-➜  cka git:(master) ✗ kubectl describe ns limited
Resource Limits
 Type       Resource  Min  Max  Default Request  Default Limit  Max Limit/Request Ratio
 ----       --------  ---  ---  ---------------  -------------  -----------------------
 Container  memory    -    -    256Mi            512Mi       
# 查看限制的应用配置
k8s-control-➜  cka git:(master)vim limitedpod.yaml 
# 先创建一个应用
k8s-control-➜  cka git:(master) ✗  kubectl run limited --image=nginx -n limited
pod/limited created
k8s-control-➜  cka git:(master) ✗ kubectl get pods -n limited             
NAME                    READY   STATUS             RESTARTS         AGE
limited                 1/1     Running            0                70s
# 查看限制配置
k8s-control-➜  cka git:(master) ✗ kubectl describe pod limited -n limited     
    Restart Count:  0
    Limits:
      memory:  512Mi
    Requests:
      memory:     256Mi
    Environment:  <none>

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

# 七、安全设置

# 7.1 SecurityContext 设置

# 安全说明
k8s-control-➜  ~ kubectl explain pod.spec.securityContext
k8s-control-➜  ~ kubectl explain pod.spec.containers.securityContext
# 看一个安全配置,指定了配置 runAsGroup 属性用于指定容器中进程运行的组标识符(GID) fsGroup 属性用于指定容器内的文件或目录的组所有权
k8s-control-➜  cka git:(master)vim security-context.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: security-context-demo
spec:
  securityContext:
    runAsUser: 1000
    runAsGroup: 1000
    fsGroup: 2000
  volumes:
  - name: securevol
    emptyDir: {}
  containers:
  - name: sec-demo
    image: busybox
    command: ["sh", "-c", "sleep 3600"]
    volumeMounts:
    - name: securevol
      mountPath: /data/demo
    securityContext:
      allowPrivilegeEscalation: false
# 应用
kubectl apply -f security-context.yaml             
pod/security-context-demo created   
k8s-control-➜  cka git:(master) ✗ kubectl get pods security-context-demo
NAME                    READY   STATUS    RESTARTS   AGE
security-context-demo   1/1     Running   0          43s
# 查看权限
k8s-control-➜  cka git:(master) ✗ kubectl exec -it security-context-demo -- sh
~ $ ps
PID   USER     TIME  COMMAND
    1 1000      0:00 sh -c sleep 3600
    7 1000      0:00 sh
   13 1000      0:00 ps
~ $ ls -l /data
total 4
drwxrwsrwx    2 root     2000          4096 Oct 18 09:16 demo
~ $ id
uid=1000 gid=1000 groups=2000
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44

# 7.2 RBAC(Role Based Access Control)

# 查看RBAC

# 获取角色
k8s-control-➜  ~ kubectl get roles  
NAME                                             CREATED AT
leader-locking-nfs-subdir-external-provisioner   2023-09-26T10:04:37Z
# 查看角色配置
k8s-control-➜  ~ kubectl get role leader-locking-nfs-subdir-external-provisioner -o yaml
# 查看角色的绑定
k8s-control-➜  ~ kubectl get rolebinding
NAME                                             ROLE                                                  AGE
leader-locking-nfs-subdir-external-provisioner   Role/leader-locking-nfs-subdir-external-provisioner   22d
# 查看所有角色绑定
k8s-control-➜  ~ kubectl get rolebinding -A
NAMESPACE       NAME                                                ROLE                                                  AGE
default         leader-locking-nfs-subdir-external-provisioner      Role/leader-locking-nfs-subdir-external-provisioner   22d
ingress-nginx   ingress-nginx                                       Role/ingress-nginx                                    21d
# 查看绑定细节
k8s-control-➜  ~ kubectl get rolebindings.rbac.authorization.k8s.io -n ingress-nginx ingress-nginx -o yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17

# 创建pod,使用默认的权限访问api

# 创建一个简单的pod
k8s-control-➜  cka git:(master)vim mypod.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: mypod
spec:
  containers:
  - name: alpine
    image: alpine:3.9
    command:
    - "sleep"
    - "3600"

# 创建pod
k8s-control-➜  cka git:(master) ✗ kubectl apply -f mypod.yaml                                                              
pod/mypod created
# 查看创建好的配置,sa都用的默认的
k8s-control-➜  cka git:(master) ✗ kubectl get pods mypod -o yaml   
  serviceAccount: default
  serviceAccountName: default
# 进入到pod的shell
k8s-control-➜  cka git:(master) ✗ kubectl exec -it mypod -- sh    
# 安装curl包
/ # apk add --update curl
fetch http://dl-cdn.alpinelinux.org/alpine/v3.9/main/x86_64/APKINDEX.tar.gz
fetch http://dl-cdn.alpinelinux.org/alpine/v3.9/community/x86_64/APKINDEX.tar.gz
(1/5) Installing ca-certificates (20191127-r2)
(2/5) Installing nghttp2-libs (1.35.1-r2)
(3/5) Installing libssh2 (1.9.0-r1)
(4/5) Installing libcurl (7.64.0-r5)
(5/5) Installing curl (7.64.0-r5)
Executing busybox-1.29.3-r10.trigger
Executing ca-certificates-20191127-r2.trigger
OK: 7 MiB in 19 packages
# 访问kubernetes api ,非安全访问,注意提示  forbidden: User \"system:anonymous\" cannot get path \"/api/v1\
/ # curl https://kubernetes/api/v1 --insecure
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "forbidden: User \"system:anonymous\" cannot get path \"/api/v1\"",
  "reason": "Forbidden",
  "details": {},
  "code": 403
}/ # 
# 用SA访问
/ # TOKEN=$(cat /run/secrets/kubernetes.io/serviceaccount/token)
/ # curl -H "Authorization: Bearer $TOKEN" https://kubernetes/api/v1/ --insecure
# 默认的账号权限少
/ # curl -H "Authorization: Bearer $TOKEN" https://kubernetes/api/v1/namespaces/default/pods/ --insecure
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "pods is forbidden: User \"system:serviceaccount:default:default\" cannot list resource \"pods\" in API group \"\" in the namespace \"default\"",
  "reason": "Forbidden",
  "details": {
    "kind": "pods"
  },
  "code": 403
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63

# 创建SA和Role

# 创建SA
k8s-control-➜  cka git:(master)vim mysa.yaml    
apiVersion: v1
kind: ServiceAccount
metadata:
  name: mysa
k8s-control-➜  cka git:(master) ✗ kubectl apply -f mysa.yaml    
serviceaccount/mysa created
# 创建角色,下面配置就是提供pods的列表
k8s-control-➜  cka git:(master)vim list-pods.yaml 
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
  name: list-pods
  namespace: default
rules:
  - apiGroups:
    - ''
    resources:
    - pods
    verbs:
    - list
k8s-control-➜  cka git:(master) ✗ kubectl apply -f list-pods.yaml            
role.rbac.authorization.k8s.io/list-pods created
# 绑定sa和角色
k8s-control-➜  cka git:(master)vim list-pods-mysa-binding.yaml 
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: list-pods-mysa-binding
  namespace: default
roleRef:
  kind: Role
  name: list-pods
  apiGroup: rbac.authorization.k8s.io
subjects:
  - kind: ServiceAccount
    name: mysa
    namespace: default
k8s-control-➜  cka git:(master) ✗ kubectl apply -f list-pods-mysa-binding.yaml 
rolebinding.rbac.authorization.k8s.io/list-pods-mysa-binding created
# 配置pod,配置里面serviceAccountName指定了mysa
k8s-control-➜  cka git:(master)vim mysapod.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: mysapod
spec:
  serviceAccountName: mysa
  containers:
  - name: alpine
    image: alpine:3.9
    command:
    - "sleep"
    - "3600"
k8s-control-➜  cka git:(master) ✗ kubectl apply -f mysapod.yaml               
pod/mysapod created
# 验证sa账号
k8s-control-➜  cka git:(master) ✗ kubectl exec -it mysapod -- sh
/ # apk add --update curl
/ # TOKEN=$(cat /run/secrets/kubernetes.io/serviceaccount/token)
# 再次查看就能看到pod列表了
/ # curl -H "Authorization: Bearer $TOKEN" https://kubernetes/api/v1/namespaces/default/pods/ --insecure |more
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0{
  "kind": "PodList",
  "apiVersion": "v1",
  "metadata": {
    "resourceVersion": "3680821"
  },
  "items": [
    {
      "metadata": {
        "name": "firstnginx-d8679d567-4rj9n",
        "generateName": "firstnginx-d8679d567-",
        "namespace": "default",
....
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78

# 7.3 创建K8S用户账户

# 创建用户环境

k8s-control-➜  cka git:(master) ✗ kubectl create ns students          
namespace/students created
k8s-control-➜  cka git:(master) ✗ kubectl create ns staff   
namespace/staff created
k8s-control-➜  cka git:(master) ✗ kubectl config get-contexts
CURRENT   NAME                          CLUSTER      AUTHINFO           NAMESPACE
*         kubernetes-admin@kubernetes   kubernetes   kubernetes-admin   
# 这个其实在我们的配置文件里面
k8s-control-➜  cka git:(master)less ~/.kube/config
1
2
3
4
5
6
7
8
9

# 创建用户账号

k8s-control-➜  cka git:(master)sudo useradd -m -G sudo -s /bin/bash anna
k8s-control-➜  cka git:(master)sudo passwd anna   
# 切换到anna
k8s-control-➜  cka git:(master)su - anna    
# 创建私钥
anna@k8s-control:~$ openssl genrsa -out anna.key 2048
# 生成证书签名请求
anna@k8s-control:~$ openssl req -new -key anna.key -out anna.csr -subj "/CN=anna/O=k8s"
# 生成证书
anna@k8s-control:~$ sudo openssl x509 -req -in anna.csr -CA /etc/kubernetes/pki/ca.crt -CAkey /etc/kubernetes/pki/ca.key -CAcreateserial -out anna.crt -days 1800
1
2
3
4
5
6
7
8
9
10

# 配置用户

mkdir -p .kube
anna@k8s-control:~$ sudo cp -i /etc/kubernetes/admin.conf /home/anna/.kube/config
anna@k8s-control:~$ sudo chown -R anna:anna ~/.kube
anna@k8s-control:~$ kubectl config set-credentials anna --client-certificate=/home/anna/anna.crt --client-key=/home/anna/anna.key
User "anna" set.
1
2
3
4
5

# 为新用户创建Context,关联集群

kubectl config set-context anna-context --cluster=kubernetes --namespace=staff --user=anna
anna@k8s-control:~$ kubectl config use-context anna-context
Switched to context "anna-context".
anna@k8s-control:~$ kubectl config get-contexts
CURRENT   NAME                          CLUSTER      AUTHINFO           NAMESPACE
*         anna-context                  kubernetes   anna               staff
          kubernetes-admin@kubernetes   kubernetes   kubernetes-admin   
# 尝试获取pod清单,提示没有权限
anna@k8s-control:~$ kubectl get pods
Error from server (Forbidden): pods is forbidden: User "anna" cannot list resource "pods" in API group "" in the namespace "staff"
1
2
3
4
5
6
7
8
9
10

# 配置staff role

k8s-control-➜  cka git:(master)vim staff-role.yaml 
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: staff
  name: staff
rules:
- apiGroups: ["", "extensions", "apps"]
  resources: ["deployments", "replicasets", "pods"]
  verbs: ["list", "get", "watch", "create", "update", "patch", "delete"]
k8s-control-➜  cka git:(master) ✗ kubectl apply -f staff-role.yaml 
role.rbac.authorization.k8s.io/staff created
# 绑定
k8s-control-➜  cka git:(master)vim rolebind.yaml      
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: staff-role-binding
  namespace: staff
subjects:
- kind: User
  name: anna
  apiGroup: ""
roleRef:
  kind: Role
  name: staff
  apiGroup: ""
k8s-control-➜  cka git:(master) ✗ kubectl apply -f rolebind.yaml               
rolebinding.rbac.authorization.k8s.io/staff-role-binding created
# 再登录anna
anna@k8s-control:~$ kubectl config view
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://192.168.50.159:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    namespace: staff
    user: anna
  name: anna-context
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: anna-context
kind: Config
preferences: {}
users:
- name: anna
  user:
    client-certificate: /home/anna/anna.crt
    client-key: /home/anna/anna.key
- name: kubernetes-admin
  user:
    client-certificate-data: DATA+OMITTED
    client-key-data: DATA+OMITTED
# 创建应用
anna@k8s-control:~$ kubectl create deploy annaploy --image=nginx
deployment.apps/annaploy created
anna@k8s-control:~$ kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
annaploy-6c88f9955f-4wzbc   1/1     Running   0          17s
# 查看default空间没有权限
anna@k8s-control:~$ kubectl get pods -n default
Error from server (Forbidden): pods is forbidden: User "anna" cannot list resource "pods" in API group "" in the namespace "default"
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68

# 配置 创建只读角色

k8s-control-➜  cka git:(master)vim students-role.yaml 
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: default
  name: students
rules:
- apiGroups: ["", "extensions", "apps"]
  resources: ["deployments", "replicasets", "pods"]
  verbs: ["list", "get", "watch"]
k8s-control-➜  cka git:(master) ✗ kubectl apply -f students-role.yaml 
role.rbac.authorization.k8s.io/students created
k8s-control-➜  cka git:(master)vim rolebindstudents.yaml 
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: students-role-binding
  namespace: default
subjects:
- kind: User
  name: anna
  apiGroup: ""
roleRef:
  kind: Role
  name: students
  apiGroup: ""
k8s-control-➜  cka git:(master) ✗ kubectl apply -f rolebindstudents.yaml 
rolebinding.rbac.authorization.k8s.io/students-role-binding created
# 再次查看默认pod
k8s-control-➜  cka git:(master)su - anna
anna@k8s-control:~$ kubectl get pods -n default
NAME                                               READY   STATUS    RESTARTS          AGE
firstnginx-d8679d567-4rj9n                         1/1     Running   2 (3d3h ago)      23d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33

# 八、系统监控

# 8.1 安装Metrics Server

官方地址:https://github.com/kubernetes-sigs/metrics-server

  1. 安装
# 安装
k8s-control-➜  autoscaling git:(master) ✗ kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
# 查看状态,虽然在运行但是,ready是0/1
k8s-control-➜  ~ kubectl -n kube-system get pods
NAME                                       READY   STATUS              RESTARTS      AGE
metrics-server-fbb469ccc-6gzjc             0/1     Running   0             7m35s
k8s-control-➜  ~ kubectl describe -n kube-system pod metrics-server
# 查看日志可以看到证书错误
k8s-control-➜  ~ kubectl logs -n kube-system metrics-server-fbb469ccc-6gzjc
E0928 05:51:40.931295       1 scraper.go:140] "Failed to scrape node" err="Get \"https://192.168.50.159:10250/metrics/resource\": x509: cannot validate certificate for 192.168.50.159 because it doesn't contain any IP SANs" node="k8s-control"
E0928 05:51:40.936551       1 scraper.go:140] "Failed to scrape node" err="Get \"https://192.168.50.164:10250/metrics/resource\": x509: cannot validate certificate for 192.168.50.164 because it doesn't contain any IP SANs" node="k8s-worker1"
E0928 05:51:40.939216       1 scraper.go:140] "Failed to scrape node" err="Get \"https://192.168.50.35:10250/metrics/resource\": x509: cannot validate certificate for 192.168.50.35 because it doesn't contain any IP SANs" node="k8s-worker2"
I0928 05:51:42.290022       1 server.go:187] "Failed probe" probe="metric-storage-ready" err="no metrics to serve"
# 修改容器,设置可以不安全访问
k8s-control-➜  ~ kubectl -n kube-system edit deployments.apps metrics-server
  template:
    metadata:
      creationTimestamp: null
      labels:
        k8s-app: metrics-server
    spec:
      containers:
      - args:
        - --kubelet-insecure-tls
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
  1. 使用
k8s-control-➜  ~ kubectl top pods
NAME                                               CPU(cores)   MEMORY(bytes)
firstnginx-d8679d567-4rj9n                         0m           29Mi
firstnginx-d8679d567-jg25f                         0m           29Mi
firstnginx-d8679d567-jtcwt                         0m           29Mi
firstnginx-d8679d567-s7gcn                         0m           34Mi
morevol                                            0m           0Mi
mydaemon-9tgmn                                     0m           29Mi
k8s-control-➜  cka git:(master) ✗ kubectl top nodes                                     
NAME          CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-control   171m         0%     2231Mi          14%       
k8s-worker1   83m          0%     1759Mi          11%       
k8s-worker2   101m         0%     2772Mi          17%     
1
2
3
4
5
6
7
8
9
10
11
12
13

# 九、实战一

# 9.1 创建nginx和redis在一起的pod

k8s-control-➜  cka git:(master) ✗ kubectl run lab123 --image=nginx --dry-run=client -o yaml > lab123.yaml
# 编辑yaml文件,增加redis
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: lab123
  name: lab123
spec:
  containers:
  - image: nginx
    name: lab123
  - image: redis
    name: redis
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}
# 应用
k8s-control-➜  cka git:(master) ✗ kubectl apply -f lab123.yaml 
# 可以看到2/2
k8s-control-➜  cka git:(master) ✗ kubectl get pods |grep lab123 
lab123                                             2/2     Running   0                23s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

# 9.1 容器初始化

相关文档:https://kubernetes.io/zh-cn/docs/concepts/workloads/pods/init-containers/

k8s-control-➜  cka git:(master) ✗ kubectl create deploy lab124deploy --image=busybox --dry-run=client -o yaml -- sleep 30 > lab124deploy.yaml
k8s-control-➜  cka git:(master)vim lab124deploy.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: lab124deploy
  name: lab124deploy
spec:
  replicas: 1
  selector:
    matchLabels:
      app: lab124deploy
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: lab124deploy
    spec:
      containers:
      - name: nginx
        image: nginx
      initContainers:
      - command:
        - sleep
        - "30"
        image: busybox
        name: busybox
status: {}
k8s-control-➜  cka git:(master) ✗ kubectl apply -f lab124deploy.yaml                                                                         
deployment.apps/lab124deploy created
# 可以看到有一个再Init
k8s-control-➜  cka git:(master) ✗ kubectl get pods |grep lab        
lab123                                             2/2     Running    0                 15m
lab124deploy-7c7c8457f9-nlk74                      0/1     Init:0/1   0                 15s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37

# 9.2 创建Persistent Storage

相关文档:https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/

# 直接复制文档的配置,本地的路径会自动建立
k8s-control-➜  cka git:(master)vim lab125.yaml      
apiVersion: v1
kind: PersistentVolume
metadata:
  name: lab125
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/lab125"
k8s-control-➜  cka git:(master) ✗ kubectl apply -f lab125.yaml      
persistentvolume/lab125 created
k8s-control-➜  cka git:(master) ✗ kubectl get pv              
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                          STORAGECLASS   REASON   AGE
lab125                                     10Gi       RWO            Retain           Available                                  manual                  11s
k8s-control-➜  cka git:(master) ✗ kubectl describe pv lab125                                                                                 
Name:            lab125
Labels:          type=local
Annotations:     <none>
Finalizers:      [kubernetes.io/pv-protection]
StorageClass:    manual
Status:          Available
Claim:           
Reclaim Policy:  Retain
Access Modes:    RWO
VolumeMode:      Filesystem
Capacity:        10Gi
Node Affinity:   <none>
Message:         
Source:
    Type:          HostPath (bare host directory volume)
    Path:          /lab125
    HostPathType:  
Events:            <none>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40

# 9.3 应用访问

创建应用,让用户可以通过外部端口访问

k8s-control-➜  cka git:(master) ✗ kubectl create deploy lab126deploy --image=nginx --replicas=3                                              
deployment.apps/lab126deploy created
k8s-control-➜  cka git:(master) ✗ kubectl expose deploy lab126deploy --port=80                 
service/lab126deploy exposed
k8s-control-➜  cka git:(master) ✗ kubectl get all --selector app=lab126deploy 
NAME                               READY   STATUS    RESTARTS   AGE
pod/lab126deploy-fff46cd4b-6ljcq   1/1     Running   0          60s
pod/lab126deploy-fff46cd4b-hfcg6   1/1     Running   0          60s
pod/lab126deploy-fff46cd4b-rn272   1/1     Running   0          60s

NAME                   TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
service/lab126deploy   ClusterIP   10.111.146.140   <none>        80/TCP    30s

NAME                           READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/lab126deploy   3/3     3            3           60s

NAME                                     DESIRED   CURRENT   READY   AGE
replicaset.apps/lab126deploy-fff46cd4b   3         3         3       60s
# 调整网络
k8s-control-➜  cka git:(master) ✗ kubectl edit svc lab126deploy     
#  增加nodePort:32567,type改为NodePort
spec:
  clusterIP: 10.111.146.140
  clusterIPs:
  - 10.111.146.140
  internalTrafficPolicy: Cluster
  ipFamilies:
  - IPv4
  ipFamilyPolicy: SingleStack
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 32567
  selector:
    app: lab126deploy
  sessionAffinity: None
  type: NodePort
# 再次查看,可以看到Type变了,端口可以用过32567访问
k8s-control-➜  cka git:(master) ✗ kubectl get all --selector app=lab126deploy 
NAME                   TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
service/lab126deploy   NodePort   10.111.146.140   <none>        80:32567/TCP   5m17s
k8s-control-➜  cka git:(master)curl k8s-control:32567  
<!DOCTYPE html>
<html>
<head>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46

# 9.4 网络策略

相关链接:https://kubernetes.io/docs/concepts/services-networking/network-policies/

k8s-control-➜  cka git:(master) ✗ kubectl create ns restricted                                 
namespace/restricted created
k8s-control-➜  cka git:(master) ✗ kubectl create ns access    
namespace/access created
k8s-control-➜  cka git:(master) ✗ kubectl run tesginx --image=nginx -n restricted                        
pod/tesginx created
k8s-control-➜  cka git:(master) ✗ kubectl run testbox --image=busybox -n access  -- sleep 3600
pod/testbox created
k8s-control-➜  cka git:(master) ✗ kubectl run testbox --image=busybox -- sleep 3600
pod/testbox created
1
2
3
4
5
6
7
8
9
10

创建策略

# 复制官方的配置调整如下
k8s-control-➜  cka git:(master)vim lab127.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: test-network-policy
  namespace: restricted
spec:
  policyTypes:
    - Ingress
  ingress:
    - from:
        - namespaceSelector:
            matchLabels:
              project: myproject
                #- podSelector:
                #matchLabels:
                #role: frontend
      ports:
        - protocol: TCP
          port: 80
k8s-control-➜  cka git:(master) ✗ kubectl label ns access project=myproject
k8s-control-➜  cka git:(master) ✗ kubectl get ns --show-labels               
NAME              STATUS   AGE     LABELS
access            Active   22m     kubernetes.io/metadata.name=access,project=myproject
k8s-control-➜  cka git:(master) ✗ kubectl create -f lab127.yaml 
networkpolicy.networking.k8s.io/test-network-policy created
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27

开始实验

k8s-control-➜  cka git:(master) ✗ kubectl get pods -n restricted
NAME      READY   STATUS    RESTARTS   AGE
tesginx   1/1     Running   0          23m
k8s-control-➜  cka git:(master) ✗ kubectl expose pod tesginx --port=80 -n restricted
service/tesginx exposed
# 拿到访问的ip
k8s-control-➜  cka git:(master) ✗ kubectl -n restricted get svc                     
NAME      TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE
tesginx   ClusterIP   10.96.97.173   <none>        80/TCP    28s
# access 空间可以访问
k8s-control-➜  cka git:(master) ✗ kubectl exec -it testbox -n access -- wget 10.96.97.173 
Connecting to 10.96.97.173 (10.96.97.173:80)
saving to 'index.html'
index.html           100% |*************************************************************************************************************************************************************************************|   615  0:00:00 ETA
'index.html' saved
k8s-control-➜  cka git:(master)cat index.html 
hello world
#  默认空间无法访问
k8s-control-➜  cka git:(master) ✗ kubectl exec -it testbox -- wget 10.96.97.173  
Connecting to 10.96.97.173 (10.96.97.173:80)
# 查看策略
k8s-control-➜  cka git:(master) ✗ kubectl -n restricted describe networkpolicies.networking.k8s.io
Name:         test-network-policy
Namespace:    restricted
Created on:   2023-10-20 03:43:58 +0000 UTC
Labels:       <none>
Annotations:  <none>
Spec:
  PodSelector:     <none> (Allowing the specific traffic to all pods in this namespace)
  Allowing ingress traffic:
    To Port: 80/TCP
    From:
      NamespaceSelector: project=myproject
  Not affecting egress traffic
  Policy Types: Ingress
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35

# 9.5 Quota设置

k8s-control-➜  cka git:(master) ✗ kubectl create ns limited  
# 参考例子
k8s-control-➜  cka git:(master) ✗ kubectl create quota -h | less
k8s-control-➜  cka git:(master) ✗ kubectl create quota my-quota --hard=memory=2G,pods=5 -n limited
resourcequota/my-quota created
k8s-control-➜  cka git:(master) ✗ kubectl describe ns limited
Name:         limited
Labels:       kubernetes.io/metadata.name=limited
Annotations:  <none>
Status:       Active

Resource Quotas
  Name:     my-quota
  Resource  Used   Hard
  --------  ---    ---
  memory    406Mi  2G
  pods      4      5
# 创建应用
k8s-control-➜  cka git:(master) ✗ kubectl create deploy lab128deploy --image=nginx --replicas=3 -n limited
deployment.apps/lab128deploy created
# 设置resource,也可以参考例子
k8s-control-➜  cka git:(master) ✗ kubectl set resources -h      
k8s-control-➜  cka git:(master) ✗ kubectl set resources deployment lab128deploy --limits=memory=256Mi --requests=memory=120Mi -n limited
deployment.apps/lab128deploy resource requirements updated
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

# 9.6 创建Static Pod

# 拷贝个yaml文件
k8s-control-➜  cka git:(master) ✗ kubectl run lab129pod --image=nginx --dry-run=client -o yaml           
# 登录wokrder2 
MAC-➜  BigData git:(master)ssh k8s-worker2
tpxcer@k8s-worker2:~$ cd /etc/kubernetes/manifests/
tpxcer@k8s-worker2:/etc/kubernetes/manifests$ sudo vim lab129pod.yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: lab129pod
  name: lab129pod
spec:
  containers:
  - image: nginx
    name: lab129pod
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}
# 我们再回到control,就会看到这个应用已经启动
k8s-control-➜  cka git:(master) ✗ kubectl get pods |grep lab129
lab129pod-k8s-worker2                              1/1     Running   0               64s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24

# 9.7 创建绑定角色

相关文档:https://kubernetes.io/docs/reference/access-authn-authz/rbac/

# 拷贝例子
k8s-control-➜  cka git:(master) ✗ kubectl create role -h |less    
k8s-control-➜  cka git:(master) ✗ kubectl create role appcreator --verb=get --verb=list --verb=watch --resource=pods,deployment,daemonset,statefulset --verb=get --verb=list --verb=watch --verb=create --verb=update --verb=patch --verb=delete -n access
role.rbac.authorization.k8s.io/appcreator created
# 查看权限
k8s-control-➜  cka git:(master) ✗ kubectl describe role -n access appcreator
Name:         appcreator
Labels:       <none>
Annotations:  <none>
PolicyRule:
  Resources          Non-Resource URLs  Resource Names  Verbs
  ---------          -----------------  --------------  -----
  pods               []                 []              [get list watch create update patch delete]
  daemonsets.apps    []                 []              [get list watch create update patch delete]
  deployments.apps   []                 []              [get list watch create update patch delete]
  statefulsets.apps  []                 []              [get list watch create update patch delete]
# 创建sa
k8s-control-➜  cka git:(master) ✗ kubectl create sa appcreator -n access
serviceaccount/appcreator created
# 绑定role
k8s-control-➜  cka git:(master) ✗ kubectl create rolebinding appcreator --role=appcreator --serviceaccount=access:appcreator -n access
rolebinding.rbac.authorization.k8s.io/appcreator created
# 查看
k8s-control-➜  cka git:(master) ✗ kubectl get role,rolebinding,serviceaccount -n access
NAME                                        CREATED AT
role.rbac.authorization.k8s.io/appcreator   2023-10-20T05:49:30Z

NAME                                               ROLE              AGE
rolebinding.rbac.authorization.k8s.io/appcreator   Role/appcreator   53s

NAME                        SECRETS   AGE
serviceaccount/appcreator   0         4m47s
serviceaccount/default      0         157m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33

# 9.8 Taints and Tolerations

官方文档:https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/

配置workder2 只能跑type:db的程序

# 创建tainted
k8s-control-➜  cka git:(master) ✗ kubectl taint nodes k8s-worker2 type=db:NoSchedule
node/k8s-worker2 tainted
# 创建配置
k8s-control-➜  cka git:(master) ✗ kubectl create deploy tolerateginx --image=nginx --replicas=3 --dry-run=client -o yaml > lab1212.yaml
# 添加修改一下添加tolerations 信息
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: tolerateginx
  name: tolerateginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: tolerateginx
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: tolerateginx
    spec:
      tolerations:
      - key: "type"
        operator: "Equal"
        value: "db"
        effect: "NoSchedule"
      containers:
      - image: nginx
        name: nginx
        resources: {}
status: {}
# 创建应用
k8s-control-➜  cka git:(master) ✗ kubectl apply -f lab1212.yaml                                                                        
deployment.apps/tolerateginx created
# 全部运行在worker2
k8s-control-➜  cka git:(master) ✗ kubectl get pods -o wide  | grep tolerateginx
tolerateginx-6bdbffc474-m7q7d                      1/1     Running   0               36s     172.16.126.1    k8s-worker2   <none>           <none>
tolerateginx-6bdbffc474-qs4nm                      1/1     Running   0               36s     172.16.126.62   k8s-worker2   <none>           <none>
tolerateginx-6bdbffc474-rvhcc                      1/1     Running   0               36s     172.16.126.63   k8s-worker2   <none>           <none>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43

# 十、实战二

# 10.1 配置HA

相关文档:https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/

k8s-control-➜  cka git:(master)sudo ./setup-lb-ubuntu.sh
k8s-control-➜  ~ sudo kubeadm init --control-plane-endpoint "192.168.50.6:8443" --upload-certs
1
2

# 10.2 备份Etcd

todo

# 10.3 设置应用日志

相关文档:https://kubernetes.io/docs/concepts/cluster-administration/logging/

# 复制例子中的配置并稍作调整
k8s-control-➜  ~ vim  lab135.yaml
apiVersion: v1
kind: Pod
metadata:
  name: counter
spec:
  containers:
  - name: count
    image: busybox
    args:
    - /bin/sh
    - -c
    - >
      i=0;
      while sleep 60;
      do
        echo "$i: $(date)" >> /output/date.lgo
      done      
    volumeMounts:
    - name: varlog
      mountPath: /output
  - name: count-log-1
    image: nginx
    volumeMounts:
    - name: varlog
      mountPath: /usr/share/nginx/html
  volumes:
  - name: varlog
    emptyDir: {}
k8s-control-➜  ~ kubectl apply -f lab135.yaml                                         
pod/counter created
kubectl exec -it counter -c count-log-1 -- cat /usr/share/nginx/html/date.lgo
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33

# 10.4 配置Persistent Volume Claims

k8s-control-➜  cka git:(master) ✗ kubectl apply -f resize_pvc.yaml    
namespace/myvol created
storageclass.storage.k8s.io/mystorageclass created
persistentvolume/mypv created
persistentvolumeclaim/mypvc created
pod/pv-pod created
k8s-control-➜  cka git:(master) ✗ kubectl get pv,pvc -n myvol
NAME                    CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM         STORAGECLASS     REASON   AGE
persistentvolume/mypv   1Gi        RWO            Recycle          Bound    myvol/mypvc   mystorageclass            2m36s

NAME                          STATUS   VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS     AGE
persistentvolumeclaim/mypvc   Bound    mypv     1Gi        RWO            mystorageclass   2m36s
# 可以直接编辑pvc修改请求的空间大小
k8s-control-➜  cka git:(master) ✗ kubectl edit pvc mypvc -n myvol
spec:
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 100Mi
  storageClassName: mystorageclass
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21

# 10.5 查看pod日志

k8s-control-➜  cka git:(master) ✗ kubectl run failingdb --image=mariadb
pod/failingdb created
# 查看日志
k8s-control-➜  cka git:(master) ✗ kubectl logs failingdb                                    
2023-10-23 06:01:10+00:00 [Note] [Entrypoint]: Entrypoint script for MariaDB Server 1:11.1.2+maria~ubu2204 started.
2023-10-23 06:01:10+00:00 [Note] [Entrypoint]: Switching to dedicated user 'mysql'
2023-10-23 06:01:10+00:00 [Note] [Entrypoint]: Entrypoint script for MariaDB Server 1:11.1.2+maria~ubu2204 started.
2023-10-23 06:01:11+00:00 [ERROR] [Entrypoint]: Database is uninitialized and password option is not specified
        You need to specify one of MARIADB_ROOT_PASSWORD, MARIADB_ROOT_PASSWORD_HASH, MARIADB_ALLOW_EMPTY_ROOT_PASSWORD and MARIADB_RANDOM_ROOT_PASSWORD
1
2
3
4
5
6
7
8
9

# 10.6 managing scheduling

创建一个pod只运行在有标签 storage=ssd的节点上

# 设定label
k8s-control-➜  cka git:(master) ✗ kubectl label node k8s-worker1 storage=ssd  
node/k8s-worker1 labeled
# 创建pod
k8s-control-➜  cka git:(master) ✗ kubectl run lab139pod --image=nginx --dry-run=client -o yaml > lab139pod.yaml
# 编辑如下
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: lab139pod
  name: lab139pod
spec:
  nodeSelector:
    storage: ssd
  containers:
  - image: nginx
    name: lab139pod
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}
# 应用
k8s-control-➜  cka git:(master) ✗ kubectl apply -f lab139pod.yaml
pod/lab139pod created
# 跑在了workder1上
k8s-control-➜  cka git:(master) ✗ kubectl get pods -o wide                     
NAME        READY   STATUS             RESTARTS        AGE   IP              NODE          NOMINATED NODE   READINESS GATES
counter     2/2     Running            0               47m   172.16.194.65   k8s-worker1   <none>           <none>
failingdb   0/1     CrashLoopBackOff   6 (3m53s ago)   10m   172.16.126.3    k8s-worker2   <none>           <none>
lab139pod   1/1     Running            0               15s   172.16.194.66   k8s-worker1   <none>           <none>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32

# 10.7 配置ingress

k8s-control-➜  cka git:(master) ✗ kubectl run lab1310pod --image=nginx                                         
pod/lab1310pod created
k8s-control-➜  cka git:(master) ✗ kubectl expose pod lab1310pod --port=80 --type=NodePort
service/lab1310pod exposed
k8s-control-➜  cka git:(master) ✗ kubectl get svc                                        
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP        2d13h
lab1310pod   NodePort    10.101.141.158   <none>        80:31656/TCP   14s
# 创建ingress规则
k8s-control-➜  cka git:(master) ✗ kubectl create ingress simple --rule="lab1310.info/hi=lab1310pod:80"                                 
ingress.networking.k8s.io/simple created
# 查看
k8s-control-➜  cka git:(master) ✗ kubectl describe ingress simple                                     
Name:             simple
Labels:           <none>
Namespace:        default
Address:          
Ingress Class:    <none>
Default backend:  <default>
Rules:
  Host          Path  Backends
  ----          ----  --------
  lab1310.info  
                /hi   lab1310pod:80 (172.16.194.67:80)
Annotations:    <none>
Events:         <none>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26

# 10.8 设置节点维护

# 部分dasemon不能停掉
k8s-control-➜  cka git:(master) ✗ kubectl drain k8s-worker1 
node/k8s-worker1 cordoned
error: unable to drain node "k8s-worker1" due to error:[cannot delete Pods with local storage (use --delete-emptydir-data to override): default/counter, cannot delete Pods declare no controller (use --force to override): default/lab1310pod, default/lab139pod, cannot delete DaemonSet-managed Pods (use --ignore-daemonsets to ignore): kube-system/calico-node-76ms2, kube-system/kube-proxy-zqdss], continuing command...
There are pending nodes to be drained:
 k8s-worker1
cannot delete Pods with local storage (use --delete-emptydir-data to override): default/counter
cannot delete Pods declare no controller (use --force to override): default/lab1310pod, default/lab139pod
cannot delete DaemonSet-managed Pods (use --ignore-daemonsets to ignore): kube-system/calico-node-76ms2, kube-system/kube-proxy-zqdss
# 彻底维护
k8s-control-➜  cka git:(master) ✗ kubectl drain k8s-worker1 --ignore-daemonsets --force --delete-emptydir-data
node/k8s-worker1 already cordoned
Warning: deleting Pods that declare no controller: default/counter, default/lab1310pod, default/lab139pod; ignoring DaemonSet-managed Pods: kube-system/calico-node-76ms2, kube-system/kube-proxy-zqdss
evicting pod default/lab139pod
evicting pod default/lab1310pod
evicting pod default/counter
pod/lab1310pod evicted
pod/lab139pod evicted
# 恢复
k8s-control-➜  cka git:(master) ✗ kubectl uncordon k8s-worker1
node/k8s-worker1 uncordoned
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21

# 10.9 使用port-forward 访问镜像

port-forward 一般用来测试使用,正是访问应该通过service和ingress

k8s-control-➜  ~ kubectl run fwnginx --image=nginx    
pod/fwnginx created
k8s-control-➜  ~ kubectl get pods -o wide         
NAME        READY   STATUS             RESTARTS         AGE    IP              NODE          NOMINATED NODE   READINESS GATES
fwnginx     1/1     Running            0                15s    172.16.194.68   k8s-worker1   <none>           <none>
# 转发
k8s-control-➜  ~ kubectl port-forward fwnginx 8080:80 &                     
[1] 1648477
k8s-control-➜  ~ Forwarding from 127.0.0.1:8080 -> 80
Forwarding from [::1]:8080 -> 80
# 本地访问
k8s-control-➜  ~ curl localhost:8080      
Handling connection for 8080
<!DOCTYPE html>
<html>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

# 10.10 pod安全设定

# 查看配置,securityContext部分
k8s-control-➜  ckad git:(master) vim securitycontextdemo2.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: security-context-demo
spec:
  securityContext:
    runAsUser: 1000
    runAsGroup: 3000
    fsGroup: 2000
  volumes:
  - name: sec-ctx-vol
    emptyDir: {}
  containers:
  - name: sec-ctx-demo
    image: busybox
    command: [ "sh", "-c", "sleep 1h" ]
    volumeMounts:
    - name: sec-ctx-vol
      mountPath: /data/demo
    securityContext:
      allowPrivilegeEscalation: false
k8s-control-➜  ckad git:(master) kubectl apply -f securitycontextdemo2.yaml 
pod/security-context-demo created
k8s-control-➜  ckad git:(master) kubectl exec -it security-context-demo -- sh   
~ $ ps
PID   USER     TIME  COMMAND
    1 1000      0:00 sh -c sleep 1h
    7 1000      0:00 sh
   13 1000      0:00 ps
/data $ ls -l
total 4
drwxrwsrwx    2 root     2000          4096 Oct 23 09:23 demo
# 创建文件会对应镜像的设置
/data/demo $ echo hello > testfile
/data/demo $ ls -l
total 4
-rw-r--r--    1 1000     2000             6 Oct 23 09:26 testfile
/data/demo $ id
uid=1000 gid=3000 groups=2000,3000
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# 这个配置,以非root用户运行
k8s-control-➜  ckad git:(master) vim securitycontextdemo.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: nginxsecure
spec:
  securityContext:
    runAsNonRoot: true
  containers:
  - image: nginx
    name: nginx
k8s-control-➜  ckad git:(master) kubectl apply -f securitycontextdemo.yaml   
pod/nginxsecure created
# 提示配置错误
k8s-control-➜  ckad git:(master) kubectl get pods
NAME                    READY   STATUS                       RESTARTS       AGE
nginxsecure             0/1     CreateContainerConfigError   0              29s
# 运行需要root,但是配置缺不让root运行,所以报错了
k8s-control-➜  ckad git:(master) kubectl describe pods nginxsecure 
 Warning  Failed     13s (x5 over 65s)  kubelet            Error: container has runAsNonRoot and image will run as root (pod: "nginxsecure_default(88749bfa-3745-43b2-9516-458b7f2109b0)", container: nginx)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22

# 10.11 管理job

job适合一次性运行,可用spec.ttlSecondsAfterFinished来结束job

例子一:

k8s-control-➜  ckad git:(master) kubectl create job onejob --image=busybox -- date                   
job.batch/onejob created
k8s-control-➜  ckad git:(master) kubectl get jobs                                 
NAME     COMPLETIONS   DURATION   AGE
onejob   1/1           8s         62s
# 可以看到job的状态已经完成
k8s-control-➜  ckad git:(master) kubectl get jobs,pods
NAME               COMPLETIONS   DURATION   AGE
job.batch/onejob   1/1           8s         2m15s

NAME                        READY   STATUS                       RESTARTS         AGE
pod/onejob-2x9q6            0/1     Completed                    0                2m15s
# 查看内部配置
k8s-control-➜  ckad git:(master) kubectl get jobs -o yaml | less
# 删除job
k8s-control-➜  ckad git:(master) kubectl delete jobs.batch onejob
job.batch "onejob" deleted
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17

例子二

k8s-control-➜  ~ kubectl create job mynewjob --image=busybox --dry-run=client -o yaml -- sleep 5 > mynewjob.yaml 
# 增加两个参数 completions和ttlSecondsAfterFinished
apiVersion: batch/v1
kind: Job
metadata:
  creationTimestamp: null
  name: mynewjob
spec:
  completions: 3
  ttlSecondsAfterFinished: 60
  template:
    metadata:
      creationTimestamp: null
    spec:
      containers:
      - command:
        - sleep
        - "5"
        image: busybox
        name: mynewjob
        resources: {}
      restartPolicy: Never
status: {}
k8s-control-➜  ~ kubectl create -f mynewjob.yaml 
job.batch/mynewjob created
# 等任务都完成以后过60秒就会自动删除
k8s-control-➜  ~ kubectl get jobs,pods           
NAME                 COMPLETIONS   DURATION   AGE
job.batch/mynewjob   3/3           30s        84s

NAME                        READY   STATUS      RESTARTS       AGE
pod/mynewjob-9qlw6          0/1     Completed   0              74s
pod/mynewjob-vzfzs          0/1     Completed   0              64s
pod/mynewjob-z5vch          0/1     Completed   0              84s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

# 10.12 cronjobs

k8s-control-➜  ~ kubectl create cronjob -h | less    
k8s-control-➜  ~ kubectl create cronjob runme --image=busybox --schedule="*/2 * * * *" -- echo greetings from the cluster
cronjob.batch/runme created
k8s-control-➜  ~ kubectl get cronjobs                         
NAME    SCHEDULE      SUSPEND   ACTIVE   LAST SCHEDULE   AGE
runme   */2 * * * *   False     0        39s             7m11s
# 直接运行
k8s-control-➜  ~ kubectl create job runme --from=cronjob/runme                                                           
job.batch/runme created 
# 查看日志
k8s-control-➜  ~ kubectl get all    
pod/runme-28301948-pzcd2    0/1     Completed   0              4m18s
k8s-control-➜  ~ kubectl logs runme-28301950-8ljxc                     
greetings from the cluster
1
2
3
4
5
6
7
8
9
10
11
12
13
14

# 10.13 pod资源限制

# 资源配置
k8s-control-➜  ckad git:(master) cat frontend-resources.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: frontend
spec:
  containers:
  - name: db
    image: mysql
    env:
    - name: MYSQL_ROOT_PASSWORD
      value: "password"
    resources:
      requests:
        memory: "64Mi"
        cpu: "250m"
      limits:
        memory: "128Mi"
        cpu: "500m"
  - name: wp
    image: wordpress
    resources:
      requests:
        memory: "64Mi"
        cpu: "250m"
      limits:
        memory: "128Mi"
        cpu: "500m"
k8s-control-➜  ckad git:(master) kubectl apply -f frontend-resources.yaml 
pod/frontend created
# 因为内存超限额挂了
k8s-control-➜  ckad git:(master) kubectl get pods                         
NAME       READY   STATUS      RESTARTS      AGE
frontend   1/2     OOMKilled   4 (53s ago)   2m4s
k8s-control-➜  ckad git:(master) kubectl delete -f frontend-resources.yaml    
pod "frontend" deleted
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37

# 10.14 清除资源

k8s-control-➜  ckad git:(master) kubectl delete all --all
service "kubernetes" deleted
service "lab1310pod" deleted
# 当然基础的应用都还在
k8s-control-➜  ckad git:(master) kubectl get all -A  
# 强制删掉
kubectl delete all --all --force
# 完全删掉。。非常危险
kubectl delete all --all --force --grace-period=-1 -A
1
2
3
4
5
6
7
8
9

# 10.15 创建应用

  • 运行在 secret 命名空间
  • 失败重启
  • 初始内存64mb,最高128mb
k8s-control-➜  ckad git:(master) kubectl create ns secret --dry-run=client -o yaml > lesson6lab.yaml
k8s-control-➜  ckad git:(master) ✗ kubectl run secret-app --image=busybox --dry-run=client -o yaml -- sleep 3600 >> lesson6lab.yaml
# 查看配置帮助参考
k8s-control-➜  ckad git:(master) ✗ kubectl explain pod.spec | less   
# 再复制一下resouce的配置 https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
# 最终配置如下
k8s-control-➜  ckad git:(master)cat lesson6lab.yaml                
---
apiVersion: v1
kind: Namespace
metadata:
  creationTimestamp: null
  name: secret
spec: {}
status: {}
---
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: secret-app
  name: secret-app
  namespace: secret
spec:
  restartPolicy: OnFailure
  containers:
  - args:
    - sleep
    - "3600"
    image: busybox
    resources:
      requests:
        memory: "64Mi"
      limits:
        memory: "128Mi"
    name: secret-app
    resources: {}
  dnsPolicy: ClusterFirst
status: {}
k8s-control-➜  ckad git:(master) ✗ kubectl create -f lesson6lab.yaml
namespace/secret created
pod/secret-app created
k8s-control-➜  ckad git:(master) ✗ kubectl get pods -n secret
NAME         READY   STATUS    RESTARTS   AGE
secret-app   1/1     Running   0          76s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46

# 十一、实战三

# 3.1 Namespace

名称空间用来隔离资源,默认隔离资源,不隔离网络。部署的资源如果不指定名称空间,默认都会部署到default名称空间下

# 创建
kubectl create ns hello
# 删除,删除的时候会把名称空间下面的所有资源都删掉
kubectl delete ns hello
# 查看
kubectl get ns
# 查看应用的时候-A 显示命名空间
kubectl get pods -A 
# 指定名称空间查看
kubectl get pod -n kube-system
1
2
3
4
5
6
7
8
9
10

资源配置文件的名称空间写法

apiVersion: v1
kind: Namespace
metadata:
  name: hello
1
2
3
4
# 应用配置文件 
kubectl apply -f hello.yaml
# 通过配置文件删除
kubectl delete -f hello.yaml
1
2
3
4

# 3.2 Pod

运行中的一组容器,Pod是kubernetes中应用的最小单位.

  1. 创建pod
# mynginx 就是pod的名字
kubectl run mynginx --image=nginx
# 查看
kubectl get pod
kubectl get pod -n default
# 可以查看到ip
kubectl get pod -owide
# 描述pod
kubectl describe pod mynginx
# 查看日志
kubectl logs mynginx
1
2
3
4
5
6
7
8
9
10
11
  1. 删除pod
kubectl delete pod mynginx
kubectl delete pod myapp mynginx -n default
1
2
  1. 交互进入容器
kubectl exec -it mynginx -- /bin/bash
1
  1. 通过配置文件创建
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: mynginx
  # pod名
  name: mynginx
#  namespace: default
spec:
  containers:
  - image: nginx
    name: mynginx
1
2
3
4
5
6
7
8
9
10
11
12

多个container

apiVersion: v1
kind: Pod
metadata:
  labels:
    run: myapp
  name: myapp
spec:
  containers:
  - image: nginx
    name: nginx
  - image: tomcat:8.5.68
    name: tomcat
1
2
3
4
5
6
7
8
9
10
11
12

# 3.3 Deployment

控制Pod,使Pod拥有多副本,自愈,扩缩容等能力

# 清除所有Pod,比较下面两个命令有何不同效果?
kubectl run mynginx --image=nginx

# 自愈能力
kubectl create deployment mytomcat --image=tomcat:8.5.68

# 删除
kubectl delete deploy mytomcat
1
2
3
4
5
6
7
8
  1. 多副本
kubectl create deployment my-dep --image=nginx --replicas=3
1
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: my-dep
  name: my-dep
spec:
  replicas: 3
  selector:
    matchLabels:
      app: my-dep
  template:
    metadata:
      labels:
        app: my-dep
    spec:
      containers:
      - image: nginx
        name: nginx
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
  1. 扩容
kubectl scale --replicas=5 deployment/my-dep
# 直接改配置
kubectl edit deployment my-dep
1
2
3
  1. 滚动更新
kubectl set image deployment/my-dep nginx=nginx:1.16.1 --record
kubectl rollout status deployment/my-dep
# 修改 kubectl edit deployment/my-dep
1
2
3
  1. 版本退回
#历史记录
kubectl rollout history deployment/my-dep

#查看某个历史详情
kubectl rollout history deployment/my-dep --revision=2

#回滚(回到上次)
kubectl rollout undo deployment/my-dep

#回滚(回到指定版本)
kubectl rollout undo deployment/my-dep --to-revision=2
1
2
3
4
5
6
7
8
9
10
11
  1. 自愈&故障转移
watch -n 1 kubectl get pod 
# 或者
kubectl get pod -w
1
2
3

更多: 除了Deployment,k8s还有 StatefulSet 、DaemonSet 、Job 等 类型资源。我们都称为 工作负载。 有状态应用使用 StatefulSet 部署,无状态应用使用 Deployment 部署 https://kubernetes.io/zh/docs/concepts/workloads/controllers/

# 3.4 Service

将一组 Pods 公开为网络服务的抽象方法。

#暴露Deploy
kubectl expose deployment my-dep --port=8000 --target-port=80
kubectl get service
# 删除
kubectl delete svc my-dep
#使用标签检索Pod
kubectl get pod -l app=my-dep
1
2
3
4
5
6
7
apiVersion: v1
kind: Service
metadata:
  labels:
    app: my-dep
  name: my-dep
spec:
  selector:
    app: my-dep
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 80
1
2
3
4
5
6
7
8
9
10
11
12
13

# 3.4.1 ClusterIP

外网不能访问

# 等同于没有--type的
kubectl expose deployment my-dep --port=8000 --target-port=80 --type=ClusterIP
1
2
apiVersion: v1
kind: Service
metadata:
  labels:
    app: my-dep
  name: my-dep
spec:
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 80
  selector:
    app: my-dep
  type: ClusterIP
1
2
3
4
5
6
7
8
9
10
11
12
13
14

# 3.4.2 NodePort

能被外网访问

kubectl expose deployment my-dep --port=8000 --target-port=80 --type=NodePort
1
apiVersion: v1
kind: Service
metadata:
  labels:
    app: my-dep
  name: my-dep
spec:
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 80
  selector:
    app: my-dep
  type: NodePort
1
2
3
4
5
6
7
8
9
10
11
12
13
14

NodePort范围在 30000-32767 之间

# 3.5 Ingress

Service的统一网关入口

# 3.5.1 安装

wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.0.4/deploy/static/provider/baremetal/deploy.yaml

#修改镜像
vi deploy.yaml
#将image的值改为如下值:
registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/ingress-nginx-controller:v0.46.0

# 安装
kubectl apply -f deploy.yaml

# 检查pod结果
kubectl get pod -A
# 查看service
[root@master01 ~]# kubectl get svc -n ingress-nginx
NAME                                 TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                      AGE
ingress-nginx-controller             NodePort    10.96.124.59   <none>        80:31060/TCP,443:32704/TCP   112m
ingress-nginx-controller-admission   ClusterIP   10.96.125.3    <none>        443/TCP                      112m
# 查看日志
kubectl logs -n ingress-nginx ingress-nginx-admission-create-vc2nb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19

# 3.5.2 使用

# 测试环境

测试应用的yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: hello-server
spec:
  replicas: 2
  selector:
    matchLabels:
      app: hello-server
  template:
    metadata:
      labels:
        app: hello-server
    spec:
      containers:
      - name: hello-server
        image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/hello-server
        ports:
        - containerPort: 9000
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-demo
  name: nginx-demo
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-demo
  template:
    metadata:
      labels:
        app: nginx-demo
    spec:
      containers:
      - image: nginx
        name: nginx
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx-demo
  name: nginx-demo
spec:
  selector:
    app: nginx-demo
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 80
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: hello-server
  name: hello-server
spec:
  selector:
    app: hello-server
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 9000
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
[root@master01 ~]# kubectl apply -f deploy.yaml
deployment.apps/hello-server created
deployment.apps/nginx-demo created
service/nginx-demo created
service/hello-server created
1
2
3
4
5
# 域名访问
apiVersion: networking.k8s.io/v1
kind: Ingress  
metadata:
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /$2
  name: ingress-host-bar
spec:
  ingressClassName: nginx
  rules:
  - host: "hello.bihell.com"
    http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: hello-server
            port:
              number: 8000
  - host: "demo.bihell.com"
    http:
      paths:
      - pathType: Prefix
        path: "/nginx(/|$)(.*)"  # 把请求会转给下面的服务,下面的服务一定要能处理这个路径,不能处理就是404
        backend:
          service:
            name: nginx-demo  ## java,比如使用路径重写,去掉前缀nginx
            port:
              number: 8000
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# 路径重写
apiVersion: networking.k8s.io/v1
kind: Ingress  
metadata:
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /$2
  name: ingress-host-bar
spec:
  ingressClassName: nginx
  rules:
  - host: "hello.atguigu.com"
    http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: hello-server
            port:
              number: 8000
  - host: "demo.atguigu.com"
    http:
      paths:
      - pathType: Prefix
        path: "/nginx(/|$)(.*)"  # 把请求会转给下面的服务,下面的服务一定要能处理这个路径,不能处理就是404
        backend:
          service:
            name: nginx-demo  ## java,比如使用路径重写,去掉前缀nginx
            port:
              number: 8000
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# 流量限制
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: ingress-limit-rate
  annotations:
    nginx.ingress.kubernetes.io/limit-rps: "1"
spec:
  ingressClassName: nginx
  rules:
  - host: "haha.atguigu.com"
    http:
      paths:
      - pathType: Exact
        path: "/"
        backend:
          service:
            name: nginx-demo
            port:
              number: 8000
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19

部署以后修改

kubectl edit ing xxxx
1

# 3.6 存储抽象

# 3.6.1 环境准备

1.所有节点

yum install -y nfs-utils
1

2.主节点

# nfs主节点
echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports

mkdir -p /nfs/data
systemctl enable rpcbind --now
systemctl enable nfs-server --now
# 配置生效
exportfs -r
# 看目录列表
exportfs
1
2
3
4
5
6
7
8
9
10

3.从节点

showmount -e master01

#执行以下命令挂载 nfs 服务器上的共享目录到本机路径 /root/nfsmount
mkdir -p /nfs/data

mount -t nfs master01:/nfs/data /nfs/data
# 写入一个测试文件
echo "hello nfs server" > /nfs/data/test.txt
1
2
3
4
5
6
7
8

4.原生方式数据挂载

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-pv-demo
  name: nginx-pv-demo
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-pv-demo
  template:
    metadata:
      labels:
        app: nginx-pv-demo
    spec:
      containers:
      - image: nginx
        name: nginx
        volumeMounts:
        - name: html
          mountPath: /usr/share/nginx/html
      volumes:
        - name: html
          nfs:
            server: 172.31.0.4
            path: /nfs/data/nginx-pv
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27

# 3.6.2 PV&PVC

PV:持久卷(Persistent Volume),将应用需要持久化的数据保存到指定位置 PVC:持久卷申明(Persistent Volume Claim),申明需要使用的持久卷规格

  1. 创建pv池
#nfs主节点
mkdir -p /nfs/data/01
mkdir -p /nfs/data/02
mkdir -p /nfs/data/03
1
2
3
4
  1. 创建PV
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv01-10m
spec:
  capacity:
    storage: 10M
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  nfs:
    path: /nfs/data/01
    server: 172.31.0.4
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv02-1gi
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  nfs:
    path: /nfs/data/02
    server: 172.31.0.4
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv03-3gi
spec:
  capacity:
    storage: 3Gi
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  nfs:
    path: /nfs/data/03
    server: 172.31.0.4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# 查看列表
kubectl get persistentvolume
kubectl get pv
1
2
3
  1. PVC创建与绑定

创建PVC

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: nginx-pvc
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 200Mi
  storageClassName: nfs
1
2
3
4
5
6
7
8
9
10
11

创建Pod绑定PVC

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-deploy-pvc
  name: nginx-deploy-pvc
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-deploy-pvc
  template:
    metadata:
      labels:
        app: nginx-deploy-pvc
    spec:
      containers:
      - image: nginx
        name: nginx
        volumeMounts:
        - name: html
          mountPath: /usr/share/nginx/html
      volumes:
        - name: html
          persistentVolumeClaim:
            claimName: nginx-pvc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26

# 3.6.3 ConfigMap

抽取应用配置,并且可以自动更新,以下以Redis为例

  1. 把之前的配置文件创建为配置集
# 创建配置,redis保存到k8s的etcd;
kubectl create cm redis-conf --from-file=redis.conf
1
2

我们可以看到yaml的格式kubectl get cm redis-conf -o yaml,精简如下:

apiVersion: v1
data:    #data是所有真正的数据,key:默认是文件名   value:配置文件的内容
  redis.conf: |
    appendonly yes
kind: ConfigMap
metadata:
  name: redis-conf
  namespace: default
1
2
3
4
5
6
7
8

修改 kubectl edit cm redis-conf

  1. 创建Pod
apiVersion: v1
kind: Pod
metadata:
  name: redis
spec:
  containers:
  - name: redis
    image: redis
    command:
      - redis-server
      - "/redis-master/redis.conf"  #指的是redis容器内部的位置
    ports:
    - containerPort: 6379
    volumeMounts:
    - mountPath: /data
      name: data
    - mountPath: /redis-master
      name: config
  volumes:
    - name: data
      emptyDir: {}
    - name: config
      configMap:
        name: redis-conf
        items:
        - key: redis.conf
          path: redis.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
  1. 检查默认配置
kubectl exec -it redis -- redis-cli

127.0.0.1:6379> CONFIG GET appendonly
127.0.0.1:6379> CONFIG GET requirepass
1
2
3
4

# 3.6.4 Secret

Secret 对象类型用来保存敏感信息,例如密码、OAuth 令牌和 SSH 密钥。 将这些信息放在 secret 中比放在 Pod 的定义或者 容器镜像 中来说更加安全和灵活。

kubectl create secret docker-registry leifengyang-docker \
--docker-username=leifengyang \
--docker-password=Lfy123456 \
--docker-email=534096094@qq.com

##命令格式
kubectl create secret docker-registry regcred \
  --docker-server=<你的镜像仓库服务器> \
  --docker-username=<你的用户名> \
  --docker-password=<你的密码> \
  --docker-email=<你的邮箱地址>
1
2
3
4
5
6
7
8
9
10
11
apiVersion: v1
kind: Pod
metadata:
  name: private-nginx
spec:
  containers:
  - name: private-nginx
    image: leifengyang/guignginx:v1.0
  imagePullSecrets:
  - name: leifengyang-docker
1
2
3
4
5
6
7
8
9
10

# 十二、实战四 deployment

# 12.1 理解deployment

deploy的应用删掉会自动启动

# 创建myweb应用,3份
k8s-control-➜  ckad git:(master) ✗ kubectl create deploy myweb --image=nginx --replicas=3                                               
deployment.apps/myweb created
# 查看应用数据
k8s-control-➜  ckad git:(master) ✗ kubectl describe deployments.apps myweb
# 删掉某个节点的容器以后会自动启动一个新的
k8s-control-➜  ckad git:(master) ✗ kubectl delete pod myweb-9794cbc77-mcq5x 
pod "myweb-9794cbc77-mcq5x" deleted
k8s-control-➜  ckad git:(master) ✗ kubectl get pods                        
NAME                    READY   STATUS        RESTARTS   AGE
myweb-9794cbc77-4kths   1/1     Running       0          39m
myweb-9794cbc77-ffkxs   1/1     Running       0          39m
myweb-9794cbc77-jt5m6   0/1     Pending       0          47s
myweb-9794cbc77-mcq5x   1/1     Terminating   0          39m
1
2
3
4
5
6
7
8
9
10
11
12
13
14

# 12.2 Scalability 扩展的几种姿势

k8s-control-➜  ckad git:(master) ✗ kubectl api-resources | less
k8s-control-➜  ckad git:(master) ✗ kubectl api-versions |less  
k8s-control-➜  ckad git:(master) ✗ kubectl create -f redis-deploy.yaml                   
deployment.apps/redis created
# 直接编辑配置
k8s-control-➜  ckad git:(master) ✗ kubectl edit deployments.apps redis   
spec:
  replicas: 3
# 直接调整运行的应用
kubectl scale deployment my-deployment --replicas=4
# 创建时候指定
kubectl create deploy xxx --replicas=3
1
2
3
4
5
6
7
8
9
10
11
12

# 12.3 滚动更新rolling update

滚动更新通过逐步替换现有的应用程序副本来进行更新,而不是一次性删除并重新创建所有副本。这意味着在更新过程中,旧的应用程序副本和新的应用程序副本可以同时存在,并逐步过渡到新版本。

k8s-control-➜  ~ kubectl create deploy nginxup --image=nginx:1.14                 
deployment.apps/nginxup created
k8s-control-➜  ~ kubectl describe deploy nginxup   
k8s-control-➜  ~ kubectl get all --selector app=nginxup     
NAME                           READY   STATUS    RESTARTS   AGE
pod/nginxup-5d79d5f58b-b547x   1/1     Running   0          2m5s

NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nginxup   1/1     1            1           2m5s

NAME                                 DESIRED   CURRENT   READY   AGE
replicaset.apps/nginxup-5d79d5f58b   1         1         1       2m5s
# 更新镜像
k8s-control-➜  ~ kubectl set image deploy nginxup nginx=nginx:1.17                                                     
deployment.apps/nginxup image updated
# 创建好以后会把老的关掉新的开启
k8s-control-➜  ~ kubectl set image deploy nginxup nginx=nginx:1.17                                                     
deployment.apps/nginxup image updated
k8s-control-➜  ~ kubectl get all --selector app=nginxup           
NAME                         READY   STATUS    RESTARTS   AGE
pod/nginxup-8dc45f7c-d458f   1/1     Running   0          107s

NAME                      READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nginxup   1/1     1            1           7m1s

NAME                                 DESIRED   CURRENT   READY   AGE
replicaset.apps/nginxup-5d79d5f58b   0         0         0       7m1s
replicaset.apps/nginxup-8dc45f7c     1         1         1       107s
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
k8s-control-➜  ~ kubectl create deploy bluelabel --image=ginx
deployment.apps/bluelabel created
# maxUnavailable 用于指定在滚动更新期间允许不可用的最大副本数量。
# maxSurge 用于指定在滚动更新期间允许超过所需副本数的最大额外副本数量。
# 默认是百分比,可以写数字
k8s-control-➜  ~ kubectl get deploy bluelabel -o yaml |grep max
      maxSurge: 25%
      maxUnavailable: 25%
1
2
3
4
5
6
7
8

# 12.4 标签 Label

# 创建应用的时候会自动创建label app=应用名
k8s-control-➜  ~ kubectl create deploy bluelabel --image=ginx    
deployment.apps/bluelabel created
# 增加新的label
k8s-control-➜  ~ kubectl label deployment bluelabel state=demo
deployment.apps/bluelabel labeled
# 查看label
k8s-control-➜  ~ kubectl get deployments --show-labels        
NAME        READY   UP-TO-DATE   AVAILABLE   AGE   LABELS
bluelabel   0/1     1            0           73s   app=bluelabel,state=demo
nginxup     1/1     1            1           33m   app=nginxup
redis       3/3     3            3           18h   app=redis
# 根据标签查看
k8s-control-➜  ~ kubectl get deployments --selector state=demo
NAME        READY   UP-TO-DATE   AVAILABLE   AGE
bluelabel   0/1     1            0           2m7s
# 显示所有标签
k8s-control-➜  ~ kubectl get all --show-labels                
k8s-control-➜  ~ kubectl get all --show-labels --selector state=demo
# describe里面也可以看到应用的所有标签
kubectl describe deployments.apps bluelabel
# 移除标签
# rk7n2有个app标签
k8s-control-➜  ~ kubectl get all --show-labels                    
NAME                             READY   STATUS             RESTARTS   AGE     LABELS
pod/bluelabel-65987b48d5-22gh6   0/1     ImagePullBackOff   0          8m39s   pod-template-hash=65987b48d5
pod/bluelabel-65987b48d5-rk7n2   0/1     ImagePullBackOff   0          105s    app=bluelabel,pod-template-hash=65987b48d5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27

# 12.5 回滚更新

k8s-control-➜  ckad git:(master)cat rolling.yaml     
apiVersion: apps/v1
kind: Deployment
metadata:
  name: rolling-nginx
spec:
  replicas: 4
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxSurge: 2
      maxUnavailable: 1
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      name: nginx
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.8
k8s-control-➜  ckad git:(master) ✗ kubectl create -f rolling.yaml                
deployment.apps/rolling-nginx created
k8s-control-➜  ckad git:(master) ✗ kubectl get deploy
NAME            READY   UP-TO-DATE   AVAILABLE   AGE
rolling-nginx   4/4     4            4           44s
# 查看历史版本
k8s-control-➜  ckad git:(master) ✗ kubectl rollout history deployment                                                              
deployment.apps/rolling-nginx 
REVISION  CHANGE-CAUSE
1         <none>
# 修改当前版本改为1.15
k8s-control-➜  ckad git:(master) ✗ kubectl edit deployments.apps rolling-nginx
    spec:
      containers:
      - image: nginx:1.15
# 再次查看就会发现多了个版本
k8s-control-➜  ckad git:(master) ✗ kubectl rollout history deployment rolling-nginx
deployment.apps/rolling-nginx 
REVISION  CHANGE-CAUSE
1         <none>
2         <none>
# 查看细节
k8s-control-➜  ckad git:(master) ✗ kubectl rollout history deployment rolling-nginx --revision=2
deployment.apps/rolling-nginx with revision #2
Pod Template:
  Labels:       app=nginx
        pod-template-hash=9d6f4f858
  Containers:
   nginx:
    Image:      nginx:1.15
    Port:       <none>
    Host Port:  <none>
    Environment:        <none>
    Mounts:     <none>
  Volumes:      <none>
# 回滚到版本1
k8s-control-➜  ckad git:(master) ✗ kubectl rollout undo deployment rolling-nginx --to-revision=1
deployment.apps/rolling-nginx rolled back
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62

# 12.6 自动扩展

# 先要安装metrics-server,看上文
# 准备个占用cpu的任务
k8s-control-➜  autoscaling git:(master)cat index.php   
<?php
        $x = 0.0001;
        for ($i = 0; $i <= 1000000; $i++) {
                $x += sqrt($x);
        }
        echo "OK!";
?>
# 创建镜像传送门 https://bigdata.bihell.com/hadoop/docker.html#_2-4-%E5%88%9B%E5%BB%BA%E9%95%9C%E5%83%8F
# 发布镜像传送门 https://bigdata.bihell.com/hadoop/docker.html#_2-8-%E6%8E%A8%E9%80%81%E8%BF%9C%E7%A8%8B%E4%BB%93%E5%BA%93
# 拉取镜像
k8s-control-➜  autoscaling git:(master)cat hpa.yaml  
apiVersion: apps/v1
kind: Deployment
metadata:
  name: php-apache
spec:
  selector:
    matchLabels:
      run: php-apache
  replicas: 1
  template:
    metadata:
      labels:
        run: php-apache
    spec:
      containers:
      - name: php-apache
        image: tpxcer/php-apache:latest
        ports:
        - containerPort: 80
        resources:
          limits:
            cpu: 500m
          requests:
            cpu: 200m
---
apiVersion: v1
kind: Service
metadata:
  name: php-apache
  labels:
    run: php-apache
spec:
  ports:
  - port: 80
  selector:
    run: php-apache
# 应用镜像
k8s-control-➜  autoscaling git:(master) ✗ kubectl apply -f hpa.yaml                                            
deployment.apps/php-apache created
service/php-apache created
# 自动扩展
k8s-control-➜  autoscaling git:(master) ✗ kubectl autoscal -h | less
# cpu使用超过50就自动扩展,最小一个最大十个
k8s-control-➜  autoscaling git:(master) ✗ kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
horizontalpodautoscaler.autoscaling/php-apache autoscaled
k8s-control-➜  autoscaling git:(master) ✗ kubectl api-resources |grep auto
k8s-control-➜  autoscaling git:(master) ✗ kubectl get hpa                 
NAME         REFERENCE               TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
php-apache   Deployment/php-apache   0%/50%    1         10        1          9m16s
k8s-control-➜  autoscaling git:(master) ✗ kubectl describe hpa
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64

# 十三、实战五 service

# 13.1 创建nodeport

# 创建应用
k8s-control-➜  autoscaling git:(master) ✗ kubectl create deploy nginxsvc --image=nginx          
deployment.apps/nginxsvc created
# 扩展成三个节点
k8s-control-➜  autoscaling git:(master) ✗ kubectl scale deploy nginxsvc --replicas=3       
deployment.apps/nginxsvc scaled
# 查看
k8s-control-➜  autoscaling git:(master) ✗ kubectl get all --selector app=nginxsvc   
NAME                            READY   STATUS    RESTARTS   AGE
pod/nginxsvc-69b846bcff-j6bvx   1/1     Running   0          37s
pod/nginxsvc-69b846bcff-nk5ds   1/1     Running   0          72s
pod/nginxsvc-69b846bcff-x9h2d   1/1     Running   0          37s

NAME                       READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nginxsvc   3/3     3            3           72s

NAME                                  DESIRED   CURRENT   READY   AGE
replicaset.apps/nginxsvc-69b846bcff   3         3         3       72s
# 开端口
k8s-control-➜  autoscaling git:(master) ✗ kubectl expose deploy nginxsvc --port=80
service/nginxsvc exposed 
# 查看 service/nginxsvc
k8s-control-➜  autoscaling git:(master) ✗ kubectl get all --selector app=nginxsvc   
NAME                            READY   STATUS    RESTARTS   AGE
pod/nginxsvc-69b846bcff-j6bvx   1/1     Running   0          2m55s
pod/nginxsvc-69b846bcff-nk5ds   1/1     Running   0          3m30s
pod/nginxsvc-69b846bcff-x9h2d   1/1     Running   0          2m55s

NAME               TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
service/nginxsvc   ClusterIP   10.111.124.138   <none>        80/TCP    26s

NAME                       READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nginxsvc   3/3     3            3           3m30s

NAME                                  DESIRED   CURRENT   READY   AGE
replicaset.apps/nginxsvc-69b846bcff   3         3         3       3m30s
# 查看方式二,可以看到Endpoints列表
k8s-control-➜  autoscaling git:(master) ✗ kubectl describe svc nginxsvc                    
Name:              nginxsvc
Namespace:         default
Labels:            app=nginxsvc
Annotations:       <none>
Selector:          app=nginxsvc
Type:              ClusterIP
IP Family Policy:  SingleStack
IP Families:       IPv4
IP:                10.111.124.138
IPs:               10.111.124.138
Port:              <unset>  80/TCP
TargetPort:        80/TCP
Endpoints:         172.16.126.11:80,172.16.126.12:80,172.16.194.120:80
Session Affinity:  None
Events:            <none>
# 也可以导出配置看
k8s-control-➜  autoscaling git:(master) ✗ kubectl get svc nginxsvc -o yaml                 
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: "2023-10-26T07:00:37Z"
  labels:
    app: nginxsvc
  name: nginxsvc
  namespace: default
  resourceVersion: "1009664"
  uid: 3648eace-abdd-4cdc-830f-e35d7104c2ad
spec:
  clusterIP: 10.111.124.138
  clusterIPs:
  - 10.111.124.138
  internalTrafficPolicy: Cluster
  ipFamilies:
  - IPv4
  ipFamilyPolicy: SingleStack
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginxsvc
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {}
# 直接查看endpoints
k8s-control-➜  autoscaling git:(master) ✗ kubectl get endpoints           
NAME         ENDPOINTS                                                    AGE
nginxsvc     172.16.126.11:80,172.16.126.12:80,172.16.194.120:80          5m30s
#  查看service,因此这时候因为只有clusterip,所以是无法访问的
k8s-control-➜  autoscaling git:(master) ✗ kubectl get svc                 
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
nginxsvc     ClusterIP   10.111.124.138   <none>        80/TCP    6m26s
# 把service 改为nodeport,然后在增加一个nodePort 32000
k8s-control-➜  autoscaling git:(master) ✗ kubectl edit svc nginxsvc    
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 32000
  selector:
    app: nginxsvc
  sessionAffinity: None
  type: NodePort
# 再次查看svc可以看到类型已经变更
k8s-control-➜  autoscaling git:(master) ✗ kubectl get svc          
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
nginxsvc     NodePort    10.111.124.138   <none>        80:32000/TCP   12m
# 这个时候直接访问服务器ip加端口就能访问到应用
k8s-control-➜  autoscaling git:(master)curl http://k8s-control:32000/
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108

# 13.2 dns

# 创建应用
k8s-control-➜  autoscaling git:(master) ✗ kubectl run testpod --image=busybox -- sleep 3000                                               
pod/testpod created
# 查看系统的dns
k8s-control-➜  autoscaling git:(master) ✗ kubectl get svc,pods -n kube-system              
NAME                     TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                  AGE
service/kube-dns         ClusterIP   10.96.0.10       <none>        53/UDP,53/TCP,9153/TCP   5d14h

NAME                                           READY   STATUS    RESTARTS       AGE
pod/coredns-5dd5756b68-bh2pw                   1/1     Running   1 (46h ago)    5d14h
pod/coredns-5dd5756b68-m6qgj                   1/1     Running   1 (46h ago)    5d14h
# 查看应用的dns配置,可以看到dns的地址和上面系统dns地址是一样的
k8s-control-➜  autoscaling git:(master) ✗ kubectl exec -it testpod -- cat /etc/resolv.conf
search default.svc.cluster.local svc.cluster.local cluster.local
nameserver 10.96.0.10
options ndots:5
# nslookup一下 nginxsvc
k8s-control-➜  autoscaling git:(master) ✗ kubectl exec -it testpod -- nslookup nginxsvc   
Server:         10.96.0.10
Address:        10.96.0.10:53

Name:   nginxsvc.default.svc.cluster.local
Address: 10.111.124.138
# 可以看到通过名字查到的ip和实际的nginxsvc是一样的 10.111.124.138
k8s-control-➜  autoscaling git:(master) ✗ kubectl get svc                              
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
nginxsvc     NodePort    10.111.124.138   <none>        80:32000/TCP   38m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27

# 13.3 网络策略

网络策略可以用在三个地方 Pods(podSelector)Namespaces(namespaceSelector)IP blocks(ipBlock)

k8s-control-➜  ckad git:(master)vim nwpolicy-complete-example.yaml 
# pod名字是nginx的应用这个策略
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: access-nginx
spec:
  podSelector:
    matchLabels:
      app: nginx
  ingress:
  - from:
    - podSelector:
        matchLabels:
          access: "true"
# nginx pod
apiVersion: v1
kind: Pod
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  containers:
  - name: nwp-nginx
    image: nginx:1.17

# 非nginx pod
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  labels:
    app: sleepy
spec:
  containers:
  - name: nwp-busybox
    image: busybox
    command:
    - sleep
    - "3600"
# 应用配置
k8s-control-➜  ckad git:(master) ✗ kubectl apply -f nwpolicy-complete-example.yaml                                                            
networkpolicy.networking.k8s.io/access-nginx created
pod/nginx created
pod/busybox created
# 开启端口
k8s-control-➜  ckad git:(master) ✗ kubectl expose pod nginx --port=80                     
service/nginx exposed
# 查看策略,注意这里的PodSelector: access=true,pod需要设定这个标签才能访问
k8s-control-➜  ckad git:(master) ✗ kubectl describe networkpolicy    
Name:         access-nginx
Namespace:    default
Created on:   2023-10-26 07:50:32 +0000 UTC
Labels:       <none>
Annotations:  <none>
Spec:
  PodSelector:     app=nginx
  Allowing ingress traffic:
    To Port: <any> (traffic allowed to all ports)
    From:
      PodSelector: access=true
  Not affecting egress traffic
  Policy Types: Ingress
# 在busybox里面访问
k8s-control-➜  ckad git:(master) ✗ kubectl exec -it busybox -- wget --spider --timeout=1 nginx
Connecting to nginx (10.105.250.172:80)
wget: download timed out
command terminated with exit code 1
# 设定标签 access=true
k8s-control-➜  ckad git:(master) ✗ kubectl label pod busybox access=true                      
pod/busybox labeled
# 再次访问成功
k8s-control-➜  ckad git:(master) ✗ kubectl exec -it busybox -- wget --spider --timeout=1 nginx
Connecting to nginx (10.105.250.172:80)
remote file exists
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76

# 十四、实践六 存储

# 14.1 临时卷

# 查看定义
k8s-control-➜  ~ kubectl explain pod.spec.volumes |less
# 配置文件
# 这里emptyDir是临时存储,两个镜像都用同一个volume test
k8s-control-➜  ckad git:(master)cat morevolumes.yaml 
apiVersion: v1
kind: Pod
metadata: 
  name: morevol2
spec:
  containers:
  - name: centos1
    image: centos:7
    command:
      - sleep
      - "3600" 
    volumeMounts:
      - mountPath: /centos1
        name: test
  - name: centos2
    image: centos:7
    command:
      - sleep
      - "3600"
    volumeMounts:
      - mountPath: /centos2
        name: test
  volumes: 
    - name: test
      emptyDir: {}
# 创建应用
k8s-control-➜  ckad git:(master) ✗ kubectl create -f morevolumes.yaml                                                        
pod/morevol2 created
# 查看应用
k8s-control-➜  ckad git:(master) ✗ kubectl get pods morevol2         
NAME       READY   STATUS    RESTARTS   AGE
morevol2   2/2     Running   0          60s
# 可以看到两个容器都用的临时目录
k8s-control-➜  ckad git:(master) ✗ kubectl describe pods morevol2 
# centos1创建文件
k8s-control-➜  ckad git:(master) ✗ kubectl exec -it morevol2 -c centos1 -- touch /centos1/test
# centos2里面也能看到这个文件
k8s-control-➜  ckad git:(master) ✗ kubectl exec -it morevol2 -c centos2 -- ls /centos2
test
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44

# 14.2 配置PV(Persistent Volume)

# 查看配置,创建一个本地存储
k8s-control-➜  ckad git:(master)cat pv.yaml                        
kind: PersistentVolume
apiVersion: v1
metadata:
  name: pv-volume
  labels:
      type: local
spec:
  capacity:
    storage: 2Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/mydata"
# 创建pv
k8s-control-➜  ckad git:(master) ✗ kubectl create -f pv.yaml 
persistentvolume/pv-volume created
# 查看pv
# 注意还没用的时候/mydata目录是不会创建的
k8s-control-➜  ckad git:(master) ✗ kubectl describe pv pv-volume 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21

# 14.3 配置PVC(Persistent Volume Claims)

# 配置文件
k8s-control-➜  ckad git:(master)cat pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: pv-claim
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
# 创建
k8s-control-➜  ckad git:(master) ✗ kubectl create -f pvc.yaml   
persistentvolumeclaim/pv-claim created
# 查看pvc
k8s-control-➜  ckad git:(master) ✗ kubectl get pvc            
NAME       STATUS   VOLUME      CAPACITY   ACCESS MODES   STORAGECLASS   AGE
pv-claim   Bound    pv-volume   2Gi        RWO                           20s
# 查看pv
k8s-control-➜  ckad git:(master) ✗ kubectl get pv           
NAME        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM              STORAGECLASS     REASON   AGE
pv-volume   2Gi        RWO            Retain           Bound    default/pv-claim                             2d18h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23

# 14.4 Pod使用PVC

# 查看配置
k8s-control-➜  ckad git:(master)cat pvc-pod.yaml  
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: nginx-pvc
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 2Gi
---
kind: Pod
apiVersion: v1
metadata:
   name: nginx-pvc-pod
spec:
  volumes:
    - name: site-storage
      persistentVolumeClaim:
        claimName: nginx-pvc
  containers:
    - name: pv-container
      image: nginx
      ports:
        - containerPort: 80
          name: webserver
      volumeMounts:
        - mountPath: "/usr/share/nginx/html"
          name: site-storage
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32

# Ingress

# 安装

# 安装
k8s-control-➜  ~ kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.2/deploy/static/provider/cloud/deploy.yaml
# 检查
kubectl get pods --namespace=ingress-nginx
# 测试
kubectl create deployment demo --image=httpd --port=80
k8s-control-➜  ~ kubectl get pods --selector app=demo -o wide
NAME                   READY   STATUS    RESTARTS   AGE    IP              NODE          NOMINATED NODE   READINESS GATES
demo-b66f56cf5-svtpz   1/1     Running   0          117s   172.16.126.18   k8s-worker2   <none>           <none>
k8s-control-➜  ~ kubectl expose deployment demo
service/demo exposed
k8s-control-➜  ~ kubectl create ingress demo-localhost --class=nginx \
  --rule="demo.localdev.me/*=demo:80"
ingress.networking.k8s.io/demo-localhost created

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

# 十六、其他

# 4.1 一些命令

  1. 获取pod用的镜像,以yaml方式输出
kubectl get deploy my-dep -oyaml 
1

# 4.2 调整已绑定pv

  1. 先输出pv的配置文件
kubectl get pv pvc-e23c6ecc-0342-49f7-b35b-dd98b5061a11 -o yaml > data-elasticsearch-logging-discovery-0
1
  1. 删除pv
kubectl delete pv pvc-e23c6ecc-0342-49f7-b35b-dd98b5061a11
1
  1. 删除finalizers



 
 





dd98b5061a11","server":"192.168.50.3"},"persistentVolumeReclaimPolicy":"Delete","storageClassName":"nfs-storage","volumeMode":"Filesystem"},"status":{"phase":"Bound"}}
    pv.kubernetes.io/provisioned-by: k8s-sigs.io/nfs-subdir-external-provisioner
  creationTimestamp: "2021-12-27T10:10:59Z"
  finalizers:
  - kubernetes.io/pv-protection
  name: pvc-e23c6ecc-0342-49f7-b35b-dd98b5061a11
  resourceVersion: "1834686"
  uid: ee03e0ad-c0b0-49eb-a60c-6256c7eda1c9
spec:
1
2
3
4
5
6
7
8
9
  1. 重新安装改好的配置
kubectl apply -f data-elasticsearch-logging-discovery-0
1

参考:Kubernetes 中 PV 和 PVC 的状态变化 (opens new window)

更新时间: 11/1/2023, 1:43:06 PM