# Kubernetes

# 一、概念

# 1.1 Master node(主控节点)

# 1.1.1 api server

集群统一入口,以restful方式,

# 1.1.2 scheduler

节点调度,选择node节点应用部署

# 1.1.3 controller-manager

处理集群中常规后台任务,一个资源对应一个控制器

  • 确保预期的pod副本数量
  • 无状态应用部署
  • 有状态应用部署
  • 确保所有的node运行同一个pod
  • 一次性任务和定时任务

# 1.1.4 etcd

存储系统,用于保存群集相关的数据

# 1.2 Worker node(工作节点)

# 1.2.1 kubelet

管理本机容器

# 1.2.2 kube-proxy

提供网络代理,负载均衡等操作

# 1.2.3 pod

  • 最小部署单元
  • 一组容器的集合
  • 共享网络
  • 生命周期是短暂的

# 二、kubeadm创建集群

传送门

# 2.1 安装官方WebUI

  1. 部署

kubernetes官方提供的可视化界面 https://github.com/kubernetes/dashboard

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml
1
  1. 设置访问端口
# 修改配置文件,把端口暴露出来
# 把type: ClusterIP改为type: NodePort
kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
1
2
3
  1. 查找端口访问
[root@master01 ~]# kubectl get svc -A |grep kubernetes-dashboard
kubernetes-dashboard   dashboard-metrics-scraper   ClusterIP   10.96.191.17    <none>        8000/TCP                 8m13s
kubernetes-dashboard   kubernetes-dashboard        NodePort    10.96.146.196   <none>        443:31564/TCP            8m14s
1
2
3

访问 https://master01:31564/ 这里还得需要一个token

  1. 创建访问账号(token)
#创建访问账号,准备一个yaml文件; vi dash.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
[root@master01 ~]# vi dash.yaml
[root@master01 ~]# kubectl apply -f dash.yaml
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
1
2
3
4
  1. 拿到令牌
kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"
1

然后把令牌复制到ui页面的令牌里面

# 三、实战

# 3.1 Namespace

名称空间用来隔离资源,默认隔离资源,不隔离网络。部署的资源如果不指定名称空间,默认都会部署到default名称空间下

# 创建
kubectl create ns hello
# 删除,删除的时候会把名称空间下面的所有资源都删掉
kubectl delete ns hello
# 查看
kubectl get ns
# 查看应用的时候-A 显示命名空间
kubectl get pods -A 
# 指定名称空间查看
kubectl get pod -n kube-system
1
2
3
4
5
6
7
8
9
10

资源配置文件的名称空间写法

apiVersion: v1
kind: Namespace
metadata:
  name: hello
1
2
3
4
# 应用配置文件 
kubectl apply -f hello.yaml
# 通过配置文件删除
kubectl delete -f hello.yaml
1
2
3
4

# 3.2 Pod

运行中的一组容器,Pod是kubernetes中应用的最小单位.

  1. 创建pod
# mynginx 就是pod的名字
kubectl run mynginx --image=nginx
# 查看
kubectl get pod
kubectl get pod -n default
# 可以查看到ip
kubectl get pod -owide
# 描述pod
kubectl describe pod mynginx
# 查看日志
kubectl logs mynginx
1
2
3
4
5
6
7
8
9
10
11
  1. 删除pod
kubectl delete pod mynginx
kubectl delete pod myapp mynginx -n default
1
2
  1. 交互进入容器
kubectl exec -it mynginx -- /bin/bash
1
  1. 通过配置文件创建
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: mynginx
  # pod名
  name: mynginx
#  namespace: default
spec:
  containers:
  - image: nginx
    name: mynginx
1
2
3
4
5
6
7
8
9
10
11
12

多个container

apiVersion: v1
kind: Pod
metadata:
  labels:
    run: myapp
  name: myapp
spec:
  containers:
  - image: nginx
    name: nginx
  - image: tomcat:8.5.68
    name: tomcat
1
2
3
4
5
6
7
8
9
10
11
12

# 3.3 Deployment

控制Pod,使Pod拥有多副本,自愈,扩缩容等能力

# 清除所有Pod,比较下面两个命令有何不同效果?
kubectl run mynginx --image=nginx

# 自愈能力
kubectl create deployment mytomcat --image=tomcat:8.5.68

# 删除
kubectl delete deploy mytomcat
1
2
3
4
5
6
7
8
  1. 多副本
kubectl create deployment my-dep --image=nginx --replicas=3
1
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: my-dep
  name: my-dep
spec:
  replicas: 3
  selector:
    matchLabels:
      app: my-dep
  template:
    metadata:
      labels:
        app: my-dep
    spec:
      containers:
      - image: nginx
        name: nginx
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
  1. 扩容
kubectl scale --replicas=5 deployment/my-dep
# 直接改配置
kubectl edit deployment my-dep
1
2
3
  1. 滚动更新
kubectl set image deployment/my-dep nginx=nginx:1.16.1 --record
kubectl rollout status deployment/my-dep
# 修改 kubectl edit deployment/my-dep
1
2
3
  1. 版本退回
#历史记录
kubectl rollout history deployment/my-dep

#查看某个历史详情
kubectl rollout history deployment/my-dep --revision=2

#回滚(回到上次)
kubectl rollout undo deployment/my-dep

#回滚(回到指定版本)
kubectl rollout undo deployment/my-dep --to-revision=2
1
2
3
4
5
6
7
8
9
10
11
  1. 自愈&故障转移
watch -n 1 kubectl get pod 
# 或者
kubectl get pod -w
1
2
3

更多: 除了Deployment,k8s还有 StatefulSet 、DaemonSet 、Job 等 类型资源。我们都称为 工作负载。 有状态应用使用 StatefulSet 部署,无状态应用使用 Deployment 部署 https://kubernetes.io/zh/docs/concepts/workloads/controllers/

# 3.4 Service

将一组 Pods 公开为网络服务的抽象方法。

#暴露Deploy
kubectl expose deployment my-dep --port=8000 --target-port=80
kubectl get service
# 删除
kubectl delete svc my-dep
#使用标签检索Pod
kubectl get pod -l app=my-dep
1
2
3
4
5
6
7
apiVersion: v1
kind: Service
metadata:
  labels:
    app: my-dep
  name: my-dep
spec:
  selector:
    app: my-dep
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 80
1
2
3
4
5
6
7
8
9
10
11
12
13

# 3.4.1 ClusterIP

外网不能访问

# 等同于没有--type的
kubectl expose deployment my-dep --port=8000 --target-port=80 --type=ClusterIP
1
2
apiVersion: v1
kind: Service
metadata:
  labels:
    app: my-dep
  name: my-dep
spec:
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 80
  selector:
    app: my-dep
  type: ClusterIP
1
2
3
4
5
6
7
8
9
10
11
12
13
14

# 3.4.2 NodePort

能被外网访问

kubectl expose deployment my-dep --port=8000 --target-port=80 --type=NodePort
1
apiVersion: v1
kind: Service
metadata:
  labels:
    app: my-dep
  name: my-dep
spec:
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 80
  selector:
    app: my-dep
  type: NodePort
1
2
3
4
5
6
7
8
9
10
11
12
13
14

NodePort范围在 30000-32767 之间

# 3.5 Ingress

Service的统一网关入口

# 3.5.1 安装

wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.0.4/deploy/static/provider/baremetal/deploy.yaml

#修改镜像
vi deploy.yaml
#将image的值改为如下值:
registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/ingress-nginx-controller:v0.46.0

# 安装
kubectl apply -f deploy.yaml

# 检查pod结果
kubectl get pod -A
# 查看service
[root@master01 ~]# kubectl get svc -n ingress-nginx
NAME                                 TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                      AGE
ingress-nginx-controller             NodePort    10.96.124.59   <none>        80:31060/TCP,443:32704/TCP   112m
ingress-nginx-controller-admission   ClusterIP   10.96.125.3    <none>        443/TCP                      112m
# 查看日志
kubectl logs -n ingress-nginx ingress-nginx-admission-create-vc2nb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19

# 3.5.2 使用

# 测试环境

测试应用的yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: hello-server
spec:
  replicas: 2
  selector:
    matchLabels:
      app: hello-server
  template:
    metadata:
      labels:
        app: hello-server
    spec:
      containers:
      - name: hello-server
        image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/hello-server
        ports:
        - containerPort: 9000
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-demo
  name: nginx-demo
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-demo
  template:
    metadata:
      labels:
        app: nginx-demo
    spec:
      containers:
      - image: nginx
        name: nginx
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: nginx-demo
  name: nginx-demo
spec:
  selector:
    app: nginx-demo
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 80
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: hello-server
  name: hello-server
spec:
  selector:
    app: hello-server
  ports:
  - port: 8000
    protocol: TCP
    targetPort: 9000
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
[root@master01 ~]# kubectl apply -f deploy.yaml
deployment.apps/hello-server created
deployment.apps/nginx-demo created
service/nginx-demo created
service/hello-server created
1
2
3
4
5

# 域名访问

apiVersion: networking.k8s.io/v1
kind: Ingress  
metadata:
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /$2
  name: ingress-host-bar
spec:
  ingressClassName: nginx
  rules:
  - host: "hello.bihell.com"
    http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: hello-server
            port:
              number: 8000
  - host: "demo.bihell.com"
    http:
      paths:
      - pathType: Prefix
        path: "/nginx(/|$)(.*)"  # 把请求会转给下面的服务,下面的服务一定要能处理这个路径,不能处理就是404
        backend:
          service:
            name: nginx-demo  ## java,比如使用路径重写,去掉前缀nginx
            port:
              number: 8000
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29

# 路径重写

apiVersion: networking.k8s.io/v1
kind: Ingress  
metadata:
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /$2
  name: ingress-host-bar
spec:
  ingressClassName: nginx
  rules:
  - host: "hello.atguigu.com"
    http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: hello-server
            port:
              number: 8000
  - host: "demo.atguigu.com"
    http:
      paths:
      - pathType: Prefix
        path: "/nginx(/|$)(.*)"  # 把请求会转给下面的服务,下面的服务一定要能处理这个路径,不能处理就是404
        backend:
          service:
            name: nginx-demo  ## java,比如使用路径重写,去掉前缀nginx
            port:
              number: 8000
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29

# 流量限制

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: ingress-limit-rate
  annotations:
    nginx.ingress.kubernetes.io/limit-rps: "1"
spec:
  ingressClassName: nginx
  rules:
  - host: "haha.atguigu.com"
    http:
      paths:
      - pathType: Exact
        path: "/"
        backend:
          service:
            name: nginx-demo
            port:
              number: 8000
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19

部署以后修改

kubectl edit ing xxxx
1

# 3.6 存储抽象

# 3.6.1 环境准备

1.所有节点

yum install -y nfs-utils
1

2.主节点

# nfs主节点
echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports

mkdir -p /nfs/data
systemctl enable rpcbind --now
systemctl enable nfs-server --now
# 配置生效
exportfs -r
# 看目录列表
exportfs
1
2
3
4
5
6
7
8
9
10

3.从节点

showmount -e master01

#执行以下命令挂载 nfs 服务器上的共享目录到本机路径 /root/nfsmount
mkdir -p /nfs/data

mount -t nfs master01:/nfs/data /nfs/data
# 写入一个测试文件
echo "hello nfs server" > /nfs/data/test.txt
1
2
3
4
5
6
7
8

4.原生方式数据挂载

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-pv-demo
  name: nginx-pv-demo
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-pv-demo
  template:
    metadata:
      labels:
        app: nginx-pv-demo
    spec:
      containers:
      - image: nginx
        name: nginx
        volumeMounts:
        - name: html
          mountPath: /usr/share/nginx/html
      volumes:
        - name: html
          nfs:
            server: 172.31.0.4
            path: /nfs/data/nginx-pv
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27

# 3.6.2 PV&PVC

PV:持久卷(Persistent Volume),将应用需要持久化的数据保存到指定位置 PVC:持久卷申明(Persistent Volume Claim),申明需要使用的持久卷规格

  1. 创建pv池
#nfs主节点
mkdir -p /nfs/data/01
mkdir -p /nfs/data/02
mkdir -p /nfs/data/03
1
2
3
4
  1. 创建PV
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv01-10m
spec:
  capacity:
    storage: 10M
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  nfs:
    path: /nfs/data/01
    server: 172.31.0.4
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv02-1gi
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  nfs:
    path: /nfs/data/02
    server: 172.31.0.4
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv03-3gi
spec:
  capacity:
    storage: 3Gi
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  nfs:
    path: /nfs/data/03
    server: 172.31.0.4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# 查看列表
kubectl get persistentvolume
kubectl get pv
1
2
3
  1. PVC创建与绑定

创建PVC

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: nginx-pvc
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 200Mi
  storageClassName: nfs
1
2
3
4
5
6
7
8
9
10
11

创建Pod绑定PVC

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: nginx-deploy-pvc
  name: nginx-deploy-pvc
spec:
  replicas: 2
  selector:
    matchLabels:
      app: nginx-deploy-pvc
  template:
    metadata:
      labels:
        app: nginx-deploy-pvc
    spec:
      containers:
      - image: nginx
        name: nginx
        volumeMounts:
        - name: html
          mountPath: /usr/share/nginx/html
      volumes:
        - name: html
          persistentVolumeClaim:
            claimName: nginx-pvc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26

# 3.6.3 ConfigMap

抽取应用配置,并且可以自动更新,以下以Redis为例

  1. 把之前的配置文件创建为配置集
# 创建配置,redis保存到k8s的etcd;
kubectl create cm redis-conf --from-file=redis.conf
1
2

我们可以看到yaml的格式kubectl get cm redis-conf -oyaml,精简如下:

apiVersion: v1
data:    #data是所有真正的数据,key:默认是文件名   value:配置文件的内容
  redis.conf: |
    appendonly yes
kind: ConfigMap
metadata:
  name: redis-conf
  namespace: default
1
2
3
4
5
6
7
8

修改 kubectl edit cm redis-conf

  1. 创建Pod
apiVersion: v1
kind: Pod
metadata:
  name: redis
spec:
  containers:
  - name: redis
    image: redis
    command:
      - redis-server
      - "/redis-master/redis.conf"  #指的是redis容器内部的位置
    ports:
    - containerPort: 6379
    volumeMounts:
    - mountPath: /data
      name: data
    - mountPath: /redis-master
      name: config
  volumes:
    - name: data
      emptyDir: {}
    - name: config
      configMap:
        name: redis-conf
        items:
        - key: redis.conf
          path: redis.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
  1. 检查默认配置
kubectl exec -it redis -- redis-cli

127.0.0.1:6379> CONFIG GET appendonly
127.0.0.1:6379> CONFIG GET requirepass
1
2
3
4

# 3.6.4 Secret

Secret 对象类型用来保存敏感信息,例如密码、OAuth 令牌和 SSH 密钥。 将这些信息放在 secret 中比放在 Pod 的定义或者 容器镜像 中来说更加安全和灵活。

kubectl create secret docker-registry leifengyang-docker \
--docker-username=leifengyang \
--docker-password=Lfy123456 \
--docker-email=534096094@qq.com

##命令格式
kubectl create secret docker-registry regcred \
  --docker-server=<你的镜像仓库服务器> \
  --docker-username=<你的用户名> \
  --docker-password=<你的密码> \
  --docker-email=<你的邮箱地址>
1
2
3
4
5
6
7
8
9
10
11
apiVersion: v1
kind: Pod
metadata:
  name: private-nginx
spec:
  containers:
  - name: private-nginx
    image: leifengyang/guignginx:v1.0
  imagePullSecrets:
  - name: leifengyang-docker
1
2
3
4
5
6
7
8
9
10

# 四、其他

# 4.1 一些命令

  1. 获取pod用的镜像,以yaml方式输出
kubectl get deploy my-dep -oyaml 
1

# 4.2 调整已绑定pv

  1. 先输出pv的配置文件
kubectl get pv pvc-e23c6ecc-0342-49f7-b35b-dd98b5061a11 -o yaml > data-elasticsearch-logging-discovery-0
1
  1. 删除pv
kubectl delete pv pvc-e23c6ecc-0342-49f7-b35b-dd98b5061a11
1
  1. 删除finalizers



 
 





dd98b5061a11","server":"192.168.50.3"},"persistentVolumeReclaimPolicy":"Delete","storageClassName":"nfs-storage","volumeMode":"Filesystem"},"status":{"phase":"Bound"}}
    pv.kubernetes.io/provisioned-by: k8s-sigs.io/nfs-subdir-external-provisioner
  creationTimestamp: "2021-12-27T10:10:59Z"
  finalizers:
  - kubernetes.io/pv-protection
  name: pvc-e23c6ecc-0342-49f7-b35b-dd98b5061a11
  resourceVersion: "1834686"
  uid: ee03e0ad-c0b0-49eb-a60c-6256c7eda1c9
spec:
1
2
3
4
5
6
7
8
9
  1. 重新安装改好的配置
kubectl apply -f data-elasticsearch-logging-discovery-0
1

参考:Kubernetes 中 PV 和 PVC 的状态变化 (opens new window)

更新时间: 12/27/2021, 7:17:09 PM