云计算第四阶段: cloud二周目 07-08

发布于:2024-10-18 ⋅ 阅读:(13) ⋅ 点赞:(0)

cloud 07

一、k8s服务管理

创建服务

# 资源清单文件
[root@master ~]# kubectl create service clusterip websvc --tcp=80:80 --dry-run=client -o yaml
[root@master ~]# vim websvc.yaml
---
kind: Service
apiVersion: v1
metadata:
  name: websvc
spec:
  type: ClusterIP
  selector:
    app: web
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80

[root@master ~]# kubectl apply -f websvc.yaml 
service/websvc created
[root@master ~]# kubectl get service
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)
kubernetes   ClusterIP   10.245.0.1      <none>        443/TCP
websvc       ClusterIP   10.245.5.18     <none>        80/TCP
解析域名
# 安装工具软件包
[root@master ~]# dnf install -y bind-utils
# 查看 DNS 服务地址
[root@master ~]# kubectl -n kube-system get service kube-dns
NAME       TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)
kube-dns   ClusterIP   10.245.0.10   <none>        53/UDP,53/TCP,9153/TCP
# 域名解析测试
[root@master ~]# host websvc.default.svc.cluster.local 10.245.0.10
Using domain server:
Name: 10.245.0.10
Address: 10.245.0.10#53
Aliases: 

websvc.default.svc.cluster.local has address 10.245.5.18

创建后端应用

[root@master ~]# vim web1.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: web1
  labels:
    app: web   # 服务靠标签寻找后端
spec:
  containers:
  - name: apache
    image: myos:httpd

[root@master ~]# kubectl apply -f web1.yaml
pod/web1 created
[root@master ~]# curl http://10.245.5.18
Welcome to The Apache.
负载均衡
[root@master ~]# sed 's,web1,web2,' web1.yaml |kubectl apply -f -
pod/web2 created
[root@master ~]# sed 's,web1,web3,' web1.yaml |kubectl apply -f -
pod/web3 created
[root@master ~]# curl -s http://10.245.5.18/info.php |grep php_host
php_host:       web1
[root@master ~]# curl -s http://10.245.5.18/info.php |grep php_host
php_host:       web2
[root@master ~]# curl -s http://10.245.5.18/info.php |grep php_host
php_host:       web3
固定 IP 服务
[root@master ~]# vim websvc.yaml 
---
kind: Service
apiVersion: v1
metadata:
  name: websvc
spec:
  type: ClusterIP
  clusterIP: 10.245.1.80    # 可以设置 ClusterIP
  selector:
    app: web
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80

[root@master ~]# kubectl replace --force -f websvc.yaml 
service "websvc" deleted
service/websvc replaced
[root@master ~]# kubectl get service
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)
kubernetes   ClusterIP   10.245.0.1    <none>        443/TCP
websvc       ClusterIP   10.245.1.80   <none>        80/TCP

端口别名

[root@master ~]# vim websvc.yaml 
---
kind: Service
apiVersion: v1
metadata:
  name: websvc
spec:
  type: ClusterIP
  clusterIP: 10.245.1.80
  selector:
    app: web
  ports:
  - protocol: TCP
    port: 80
    targetPort: myhttp    # 使用别名查找后端服务端口

[root@master ~]# kubectl replace --force -f websvc.yaml 
service "websvc" deleted
service/websvc replaced

[root@master ~]# kubectl delete pod --all
pod "web1" deleted
pod "web2" deleted
pod "web3" deleted

[root@master ~]# vim web1.yaml 
---
kind: Pod
apiVersion: v1
metadata:
  name: web1
  labels:
    app: web
spec:
  containers:
  - name: apache
    image: myos:httpd
    ports:               # 配置端口规范
    - name: myhttp       # 端口别名
      protocol: TCP      # 协议
      containerPort: 80  # 端口号

[root@master ~]# kubectl apply -f web1.yaml
pod/web1 created
[root@master ~]# curl http://10.245.1.80
Welcome to The Apache.

服务排错

---
kind: Service
apiVersion: v1
metadata:
  name: web123
spec:
  type: ClusterIP
  clusterIP: 192.168.1.88
  selector:
    app: apache
  ports:
  - protocol: TCP
    port: 80
    targetPort: web

nodePort


对外发布服务

[root@master ~]# vim mysvc.yaml
---
kind: Service
apiVersion: v1
metadata:
  name: mysvc
spec:
  type: NodePort            # 服务类型
  selector:
    app: web
  ports:
  - protocol: TCP
    port: 80
    nodePort: 30080         # 映射端口号
    targetPort: 80

[root@master ~]# kubectl apply -f mysvc.yaml 
service/mysvc configured
[root@master ~]# kubectl get service
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)
kubernetes   ClusterIP   10.245.0.1    <none>        443/TCP
websvc       ClusterIP   10.245.1.80   <none>        80/TCP
mysvc        NodePort    10.245.3.88   <none>        80:30080/TCP

[root@master ~]# curl http://node-0001:30080
Welcome to The Apache.
[root@master ~]# curl http://node-0002:30080
Welcome to The Apache.
[root@master ~]# curl http://node-0003:30080
Welcome to The Apache.
[root@master ~]# curl http://node-0004:30080
Welcome to The Apache.
[root@master ~]# curl http://node-0005:30080
Welcome to The Apache.

二、lngress 安装与策略配置

安装控制器

[root@master ~]# cd plugins/ingress
[root@master ingress]# docker load -i ingress.tar.xz
[root@master ingress]# docker images|while read i t _;do
    [[ "${t}" == "TAG" ]] && continue
    [[ "${i}" =~ ^"harbor:443/".+ ]] && continue
    docker tag ${i}:${t} harbor:443/plugins/${i##*/}:${t}
    docker push harbor:443/plugins/${i##*/}:${t}
    docker rmi ${i}:${t} harbor:443/plugins/${i##*/}:${t}
done
[root@master ingress]# sed -ri 's,^(\s*image: )(.*/)?(.+),\1harbor:443/plugins/\3,' deploy.yaml
443:    image: registry.k8s.io/ingress-nginx/controller:v1.9.6
546:    image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231226-1a7112e06
599:    image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20231226-1a7112e06

[root@master ingress]# kubectl apply -f deploy.yaml
[root@master ingress]# kubectl -n ingress-nginx get pods
NAME                                        READY   STATUS      RESTARTS
ingress-nginx-admission-create--1-lm52c     0/1     Completed   0
ingress-nginx-admission-patch--1-sj2lz      0/1     Completed   0
ingress-nginx-controller-5664857866-tql24   1/1     Running     0
验证后端服务
[root@master ~]# kubectl get pods,services 
NAME       READY   STATUS    RESTARTS   AGE
pod/web1   1/1     Running   0          35m

NAME                 TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)
service/kubernetes   ClusterIP   10.245.0.1    <none>        443/TCP
service/websvc       ClusterIP   10.245.1.80   <none>        80/TCP
service/mysvc        NodePort    10.245.3.88   <none>        80:30080/TCP

[root@master ~]# curl http://10.245.1.80
Welcome to The Apache.
对外发布服务
# 查询 ingress 控制器类名称
[root@master ~]# kubectl get ingressclasses.networking.k8s.io 
NAME    CONTROLLER             PARAMETERS   AGE
nginx   k8s.io/ingress-nginx   <none>       5m7s

# 资源清单文件
[root@master ~]# kubectl create ingress mying --class=nginx --rule=nsd.tedu.cn/*=mysvc:80 --dry-run=client -o yaml
[root@master ~]# vim mying.yaml
---
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
  name: mying
spec:
  ingressClassName: nginx
  rules:
  - host: nsd.tedu.cn
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: websvc
            port:
              number: 80

[root@master ~]# kubectl apply -f mying.yaml 
ingress.networking.k8s.io/mying created
[root@master ~]# kubectl get ingress
NAME    CLASS   HOSTS         ADDRESS        PORTS
mying   nginx   nsd.tedu.cn   192.168.1.51   80
[root@master ~]# curl -H "Host: nsd.tedu.cn" http://192.168.1.51
Welcome to The Apache.


三、Dashboard 安装

#下面给大家介绍下新的k8s插件

web 管理插件

安装 Dashboard

[root@master ~]# cd plugins/dashboard
[root@master dashboard]# docker load -i dashboard.tar.xz
[root@master dashboard]# docker images|while read i t _;do
    [[ "${t}" == "TAG" ]] && continue
    [[ "${i}" =~ ^"harbor:443/".+ ]] && continue
    docker tag ${i}:${t} harbor:443/plugins/${i##*/}:${t}
    docker push harbor:443/plugins/${i##*/}:${t}
    docker rmi ${i}:${t} harbor:443/plugins/${i##*/}:${t}
done
[root@master dashboard]# sed -ri 's,^(\s*image: )(.*/)?(.+),\1harbor:443/plugins/\3,' recommended.yaml
193:    image: kubernetesui/dashboard:v2.7.0
278:    image: kubernetesui/metrics-scraper:v1.0.8
[root@master dashboard]# kubectl apply -f recommended.yaml
[root@master dashboard]# kubectl -n kubernetes-dashboard get pods
NAME                                         READY   STATUS    RESTARTS
dashboard-metrics-scraper-66f6f56b59-b42ng   1/1     Running   0
kubernetes-dashboard-65ff57f4cf-lwtsk        1/1     Running   0

发布服务

# 查看服务状态
[root@master dashboard]# kubectl -n kubernetes-dashboard get service
NAME                        TYPE        CLUSTER-IP       PORT(S)
dashboard-metrics-scraper   ClusterIP   10.245.205.236   8000/TCP
kubernetes-dashboard        ClusterIP   10.245.215.40    443/TCP
# 获取服务资源对象文件
[root@master dashboard]# sed -n '30,45p' recommended.yaml >dashboard-svc.yaml
[root@master dashboard]# vim dashboard-svc.yaml
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort
  ports:
    - port: 443
      nodePort: 30443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

[root@master dashboard]# kubectl apply -f dashboard-svc.yaml 
service/kubernetes-dashboard configured
[root@master dashboard]# kubectl -n kubernetes-dashboard get service
NAME                        TYPE        CLUSTER-IP       PORT(S)
dashboard-metrics-scraper   ClusterIP   10.245.205.236   8000/TCP
kubernetes-dashboard        NodePort    10.245.215.40    443:30443/TCP

  • #记得访问下仪表盘dashboard登录页面

四、RBAC 权限管理

服务账号与权限

创建服务账号

# 资源对象模板
[root@master ~]# kubectl -n kubernetes-dashboard create serviceaccount kube-admin --dry-run=client -o yaml
[root@master ~]# vim admin-user.yaml
---
kind: ServiceAccount
apiVersion: v1
metadata:
  name: kube-admin
  namespace: kubernetes-dashboard

[root@master ~]# kubectl apply -f admin-user.yaml 
serviceaccount/kube-admin created
[root@master ~]# kubectl -n kubernetes-dashboard get serviceaccounts 
NAME                   SECRETS   AGE
default                0         16m
kube-admin             0         11s
kubernetes-dashboard   0         16m

获取用户 token

[root@master ~]# kubectl -n kubernetes-dashboard create token kube-admin
<Base64 编码的令牌数据>

角色与鉴权

#类似网游DNF里面的角色管理,GM管理员和玩家的关系。

资源对象 描述 作用域
ServiceAccount 服务账号,为 Pod 中运行的进程提供了一个身份 单一名称空间
Role 角色,包含一组代表相关权限的规则 单一名称空间
ClusterRole 角色,包含一组代表相关权限的规则 全集群
RoleBinding 将权限赋予用户,Role、ClusterRole 均可使用 单一名称空间
ClusterRoleBinding 将权限赋予用户,只可以使用 ClusterRole 全集群

资源对象权限

create delete deletecollection get list patch update watch
创建 删除 删除集合 获取属性 获取列表 补丁 更新 监控
普通角色
[root@master ~]# kubectl cluster-info dump |grep authorization-mode
                            "--authorization-mode=Node,RBAC",

# 资源对象模板
[root@master ~]# kubectl -n default create role myrole --resource=pods --verb=get,list --dry-run=client -o yaml
[root@master ~]# kubectl -n default create rolebinding kube-admin-role --role=myrole --serviceaccount=kubernetes-dashboard:kube-admin --dry-run=client -o yaml
[root@master ~]# vim myrole.yaml 
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: myrole
  namespace: default
rules:
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
  - list

---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: kube-admin-role
  namespace: default
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: myrole
subjects:
- kind: ServiceAccount
  name: kube-admin
  namespace: kubernetes-dashboard

[root@master ~]# kubectl apply -f myrole.yaml 
role.rbac.authorization.k8s.io/myrole created
rolebinding.rbac.authorization.k8s.io/kube-admin-role created

[root@master ~]# kubectl delete -f myrole.yaml 
role.rbac.authorization.k8s.io "myrole" deleted
rolebinding.rbac.authorization.k8s.io "kube-admin-role" deleted
集群管理员
[root@master ~]# kubectl get clusterrole
NAME                              CREATED AT
admin                             2022-06-24T08:11:17Z
cluster-admin                     2022-06-24T08:11:17Z
... ...

# 资源对象模板
[root@master ~]# kubectl create clusterrolebinding kube-admin-role --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kube-admin --dry-run=client -o yaml
[root@master ~]# vim admin-user.yaml 
---
kind: ServiceAccount
apiVersion: v1
metadata:
  name: kube-admin
  namespace: kubernetes-dashboard

---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: kube-admin-role
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kube-admin
  namespace: kubernetes-dashboard

[root@master ~]# kubectl apply -f admin-user.yaml 
serviceaccount/kube-admin unchanged
clusterrolebinding.rbac.authorization.k8s.io/kube-admin-role created

cloud 08

#上一小节讲过K8S的有控制组件和计算组件。现在我们一起来深入研究K8S的控制组件。

一、Deployment

资源清单文件
[root@master ~]# kubectl create deployment myweb --image=myos:httpd --dry-run=client -o yaml
[root@master ~]# vim mydeploy.yaml
---
kind: Deployment          # 资源对象类型
apiVersion: apps/v1       # 版本
metadata:                 # 元数据
  name: mydeploy          # 名称
spec:                     # 详细定义
  replicas: 3             # 副本数量
  selector:               # 定义标签选择器
    matchLabels:          # 支持 matchExpressions 表达式语法
      app: deploy-httpd   # 通过标签来确定那个 Pod 由它来管理
  template:               # 定义用来创建 Pod 的模板,以下为 Pod 定义
    metadata:
      labels:
        app: deploy-httpd
    spec:
      containers:
      - name: apache
        image: myos:httpd
配置案例
# 创建控制器
[root@master ~]# kubectl apply -f mydeploy.yaml 
deployment.apps/mydeploy created

[root@master ~]# kubectl get deployments
NAME       READY   UP-TO-DATE   AVAILABLE   AGE
mydeploy   3/3     3            3           1s

# 控制器自动创建 ReplicaSet
[root@master ~]# kubectl get replicasets 
NAME                  DESIRED   CURRENT   READY   AGE
mydeploy-76f96b85df   3         3         3       2s

# 控制器自动创建 Pod
[root@master ~]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
mydeploy-76f96b85df-5gng9   1/1     Running   0          3s
mydeploy-76f96b85df-vsfrw   1/1     Running   0          3s
mydeploy-76f96b85df-z9x95   1/1     Running   0          3s

# 集群自维护自治理
[root@master ~]# kubectl delete pod --all 
pod "mydeploy-76f96b85df-5gng9" deleted
pod "mydeploy-76f96b85df-vsfrw" deleted
pod "mydeploy-76f96b85df-z9x95" deleted

# 删除后自动重新创建
[root@master ~]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
mydeploy-76f96b85df-7dvwh   1/1     Running   0          7s
mydeploy-76f96b85df-kpbz4   1/1     Running   0          7s
mydeploy-76f96b85df-kr2zq   1/1     Running   0          7s
集群服务

# 创建集群服务
[root@master ~]# vim websvc.yaml
---
kind: Service
apiVersion: v1
metadata:
  name: websvc
spec:
  type: ClusterIP
  clusterIP: 10.245.1.80
  selector:
    app: deploy-httpd
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80

[root@master ~]# kubectl replace --force -f websvc.yaml 
service/websvc replaced
[root@master ~]# curl -m 3 http://10.245.1.80
Welcome to The Apache.
集群扩缩容

#抽象来说,扩容就是在基础存储设备上,添加新的设备,然后挂载到新的设备上。达到扩容结果, 类似吃鸡游戏里的扩容弹夹。

而缩容就是为了达到更佳的运行效率,减少存储设备上的存储空间,达到缩容目的。古代的增兵减灶

# 集群扩容
[root@master ~]# kubectl scale deployment mydeploy --replicas 10
deployment.apps/mydeploy scaled

[root@master ~]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
mydeploy-76f96b85df-kg27l   1/1     Running   0          6s
mydeploy-76f96b85df-q5fzb   1/1     Running   0          6s
mydeploy-76f96b85df-rxhp4   1/1     Running   0          6s
mydeploy-76f96b85df-szf69   1/1     Running   0          6s
mydeploy-76f96b85df-tp2xj   1/1     Running   0          6s
......

# 集群缩容
[root@master ~]# kubectl scale deployment mydeploy --replicas=2
deployment.apps/mydeploy scaled

[root@master ~]# kubectl get pods
NAME                        READY   STATUS    RESTARTS   AGE
mydeploy-76f96b85df-7dvwh   1/1     Running   0          51s
mydeploy-76f96b85df-kr2zq   1/1     Running   0          51s
历史版本信息
# 查看历史版本
[root@master ~]# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
1         <none>

# 添加注释信息
[root@master ~]# kubectl annotate deployments mydeploy kubernetes.io/change-cause="httpd.v1"
deployment.apps/mydeploy annotated

[root@master ~]# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
1         httpd.v1

# 更新资源清单文件
[root@master ~]# vim mydeploy.yaml
# 在创建容器的镜像下面添加
        imagePullPolicy: Always

[root@master ~]# kubectl apply -f mydeploy.yaml
deployment.apps/mydeploy patched

# 更新版本信息
[root@master ~]# kubectl annotate deployments mydeploy kubernetes.io/change-cause="httpd.v2"
deployment.apps/mydeploy annotated

[root@master ~]# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
1         httpd.v1
2         httpd.v2
滚动更新
# 修改镜像,滚动更新集群
[root@master ~]# kubectl set image deployment mydeploy apache=myos:nginx
deployment.apps/mydeploy image updated

# 给新版本添加注释信息
[root@master ~]# kubectl annotate deployments mydeploy kubernetes.io/change-cause="nginx.v1"
deployment.apps/mydeploy annotated

# 查看历史版本信息
[root@master ~]# kubectl rollout history deployment mydeploy
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
1         httpd.v1
2         httpd.v2
3         nginx.v1

# 访问验证服务
[root@master ~]# curl -m 3 http://10.245.1.80
Nginx is running !
版本回滚

#类似游戏里面的怀旧服,而这里的版本回滚是用于恢复数据

# 历史版本与回滚
[root@master ~]# kubectl rollout undo deployment mydeploy --to-revision 1
deployment.apps/mydeploy rolled back

[root@master ~]# kubectl rollout history deployment mydeploy 
deployment.apps/mydeploy 
REVISION  CHANGE-CAUSE
2         httpd.v2
3         nginx.v1
4         httpd.v1

[root@master ~]# curl -m 3 http://10.245.1.80
Welcome to The Apache.

清理资源对象
# 删除控制器时会自动回收自己创建的 Pod
[root@master ~]# kubectl delete deployments mydeploy
deployment.apps "mydeploy" deleted

二、DaemonSet

配置案例
[root@master ~]# cp -a mydeploy.yaml myds.yaml
[root@master ~]# vim myds.yaml
---
kind: DaemonSet         # 资源对象类型
apiVersion: apps/v1
metadata:
  name: myds            # 控制器名称
spec:
  # replicas: 2         # 删除副本参数
  selector:
    matchLabels:
      app: ds-httpd     # 修改标签防止冲突
  template:
    metadata:
      labels:
        app: ds-httpd   # 修改标签防止冲突
    spec:
      containers:
      - name: apache
        image: myos:httpd
        imagePullPolicy: Always

[root@master ~]# kubectl apply -f myds.yaml 
daemonset.apps/myds created
[root@master ~]# kubectl get pods -o wide
NAME         READY   STATUS    RESTARTS   IP            NODE
myds-msrcx   1/1     Running   0          10.244.1.11   node-0001
myds-lwq8l   1/1     Running   0          10.244.2.17   node-0002
myds-4wt72   1/1     Running   0          10.244.3.14   node-0003
myds-6k82t   1/1     Running   0          10.244.4.15   node-0004
myds-9c6wc   1/1     Running   0          10.244.5.19   node-0005
清理资源对象
# 删除控制器
[root@master ~]# kubectl delete daemonsets myds
daemonset.apps "myds" deleted

三、Job、CronJob

 

 Job 控制器

# 资源文件模板
[root@master ~]# kubectl create job myjob --image=myos:8.5 --dry-run=client -o yaml -- sleep 3
[root@master ~]# vim myjob.yaml 
---
kind: Job
apiVersion: batch/v1
metadata:
  name: myjob
spec:
  template:  # 以下定义 Pod 模板
    metadata: {}
    spec:
      restartPolicy: OnFailure
      containers:
      - name: myjob
        image: myos:8.5
        command: ["/bin/sh"]
        args:
        - -c
        - |
          sleep 3
          exit $((RANDOM%2))

[root@master ~]# kubectl apply -f myjob.yaml 
job.batch/myjob created

# 失败了会重启
[root@master ~]# kubectl get pods -l job-name=myjob -w
NAME             READY   STATUS      RESTARTS     AGE
myjob--1-lrtbk   1/1     Running     0            2s
myjob--1-lrtbk   0/1     Error       0            4s
myjob--1-lrtbk   1/1     Running     1 (1s ago)   5s
myjob--1-lrtbk   0/1     Completed   1            9s

[root@master ~]# kubectl get jobs.batch 
NAME    COMPLETIONS   DURATION   AGE
myjob   1/1           8s         12s

# 删除Job控制器
[root@master ~]# kubectl delete -f myjob.yaml 
job.batch "myjob" deleted

#pod控制器创建失败,任务会确保创建成功而重启,避免失败

Cronjob

#类似ansible中的crontab模块,可以定时执行某一任务
配置案例
# 资源对象模板
[root@master ~]# kubectl create cronjob mycj --image=myos:8.5 --schedule='* * * * *' --dry-run=client -o yaml -- sleep 3
[root@master ~]# vim mycj.yaml
---
kind: CronJob
apiVersion: batch/v1
metadata:
  name: mycj
spec:
  schedule: "* * * * *"
  jobTemplate:  # 以下定义 Job 模板
    metadata: {}
    spec:
      template:
        metadata: {}
        spec:
          restartPolicy: OnFailure
          containers:
          - name: myjob
            image: myos:8.5
            command: ["/bin/sh"]
            args:
            - -c
            - |
              sleep 3
              exit $((RANDOM%2))

[root@master ~]# kubectl apply -f mycj.yaml 
cronjob.batch/mycj created
[root@master ~]# kubectl get cronjobs 
NAME   SCHEDULE        SUSPEND   ACTIVE   LAST SCHEDULE   AGE
mycj   * * * * 1-5     False     0        <none>          4s

# 按照时间周期,每分钟触发一个任务
[root@master ~]# kubectl get jobs -w
NAME                     READY   STATUS              RESTARTS
mycj-27808172--1-w6sbx   0/1     Pending             0
mycj-27808172--1-w6sbx   0/1     ContainerCreating   0
mycj-27808172--1-w6sbx   1/1     Running             0
mycj-27808172--1-w6sbx   0/1     Completed           1

# 保留三次结果,多余的会被删除
[root@master ~]# kubectl get jobs 
NAME            COMPLETIONS   DURATION   AGE
mycj-27605367   1/1           31s        3m30s
mycj-27605368   1/1           31s        2m30s
mycj-27605369   1/1           31s        90s
mycj-27605370   0/1           30s        30s

[root@master ~]# kubectl get jobs 
NAME            COMPLETIONS   DURATION   AGE
mycj-27605368   1/1           31s        2m33s
mycj-27605369   1/1           31s        93s
mycj-27605370   1/1           31s        33s

# 删除CJ控制器
[root@master ~]# kubectl delete -f mycj.yaml 
cronjob.batch "mycj" deleted

四、StatefulSet

Headless 服务
[root@master ~]# cp websvc.yaml stssvc.yaml 
[root@master ~]# vim stssvc.yaml 
---
kind: Service
apiVersion: v1
metadata:
  name: stssvc          # 服务名称
spec:
  type: ClusterIP
  clusterIP: None       # 设置 IP 为 None
  selector:
    app: sts-httpd      # 设置 Pod 标签
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80

[root@master ~]# kubectl apply -f stssvc.yaml 
service/stssvc created

[root@master ~]# kubectl get services stssvc
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
stssvc       ClusterIP   None          <none>        80/TCP    51s
资源清单文件
[root@master ~]# cp -a mydeploy.yaml mysts.yaml
[root@master ~]# vim mysts.yaml
---
kind: StatefulSet       # 资源对象类型
apiVersion: apps/v1
metadata:
  name: mysts           # 控制器名称
spec:
  serviceName: stssvc   # 新增 headless 服务名称
  replicas: 3
  selector:
    matchLabels:
      app: sts-httpd    # 修改标签防止冲突
  template:
    metadata:
      labels:
        app: sts-httpd  # 修改标签防止冲突
    spec:
      containers:
      - name: apache
        image: myos:httpd
配置案例
# statefulset 主要解决了 Pod 创建顺序的问题
# statefulset 主要解决了访问指定 Pod 的问题
[root@master ~]# kubectl apply -f mysts.yaml 
statefulset.apps/mysts created

[root@master ~]# kubectl get pods
NAME      READY   STATUS    RESTARTS   AGE
mysts-0   1/1     Running   0          3s
mysts-1   1/1     Running   0          2s
mysts-2   1/1     Running   0          1s

# 所有 Pod IP 地址
[root@master ~]# host stssvc.default.svc.cluster.local 10.245.0.10
Using domain server:
Name: 10.245.0.10
Address: 10.245.0.10#53
Aliases: 
stssvc.default.svc.cluster.local has address 10.244.1.81
stssvc.default.svc.cluster.local has address 10.244.2.82
stssvc.default.svc.cluster.local has address 10.244.3.83

# 单个 Pod IP 地址
[root@master ~]# host mysts-0.stssvc.default.svc.cluster.local 10.245.0.10
Using domain server:
Name: 10.245.0.10
Address: 10.245.0.10#53
Aliases: 
mysts-0.stssvc.default.svc.cluster.local has address 10.244.1.81

# 删除sts控制器
[root@master ~]# kubectl delete -f mysts.yaml -f stssvc.yaml
statefulset.apps "mysts" deleted
service "stssvc" deleted
弹性云服务

五、HorizontalPodAutoscaler

 配置后端服务

# 为 Deploy 模板添加资源配额
[root@master ~]# cat mydeploy.yaml websvc.yaml >mycluster.yaml
[root@master ~]# vim mycluster.yaml 
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: mydeploy
spec:
  replicas: 1
  selector:
    matchLabels:
      app: deploy-httpd
  template:
    metadata:
      labels:
        app: deploy-httpd
    spec:
      containers:
      - name: apache
        image: myos:httpd
        resources:           # 为该资源设置配额
          requests:          # HPA 控制器会根据配额使用情况伸缩集群
            cpu: 300m        # CPU 配额

---
kind: Service
apiVersion: v1
metadata:
  name: websvc
spec:
  type: ClusterIP
  clusterIP: 10.245.1.80
  selector:
    app: deploy-httpd
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80

[root@master ~]# kubectl replace --force -f mycluster.yaml
deployment.apps/mydeploy replaced
service/websvc replaced

# 验证服务
[root@master ~]# kubectl top pods
NAME                       CPU(cores)   MEMORY(bytes)   
mydeploy-b4f9dc786-w4x2z   6m           18Mi            

[root@master ~]# curl -s http://10.245.1.80/info.php
<pre>
Array
(
    [REMOTE_ADDR] => 10.244.219.64
    [REQUEST_METHOD] => GET
    [HTTP_USER_AGENT] => curl/7.61.1
    [REQUEST_URI] => /info.php
)
php_host:   mydeploy-b4f9dc786-w4x2z
1229

HPA 控制器

[root@master ~]# vim myhpa.yaml 
---
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v2
metadata:
  name: myhpa
spec:
  behavior:
    scaleDown:
      stabilizationWindowSeconds: 60
  scaleTargetRef:
    kind: Deployment
    apiVersion: apps/v1
    name: mydeploy
  minReplicas: 1
  maxReplicas: 5
  metrics:
  - type: Resource
    resource:
      name: cpu
      target:
        type: Utilization
        averageUtilization: 50

[root@master ~]# kubectl apply -f myhpa.yaml 
horizontalpodautoscaler.autoscaling/myhpa created

# 刚刚创建 unknown 是正常现象,最多等待 60s 就可以正常获取数据
[root@master ~]# kubectl get horizontalpodautoscalers
NAME    REFERENCE             TARGETS         MINPODS   MAXPODS   REPLICAS
myhpa   Deployment/mydeploy   <unknown>/50%   1         5         0

[root@master ~]# kubectl get horizontalpodautoscalers
NAME    REFERENCE             TARGETS   MINPODS   MAXPODS   REPLICAS
myhpa   Deployment/mydeploy   0%/50%    1         5         3
配置案例

# 终端 1 访问提高负载
[root@master ~]# while sleep 1;do curl -s "http://10.245.1.80/info.php?id=100000" -o /dev/null; done &
# 终端 2 监控 HPA 变化
[root@master ~]# kubectl get hpa -w
NAME    REFERENCE             TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
myhpa   Deployment/mydeploy   0%/50%    1         5         1          1m
myhpa   Deployment/mydeploy   31%/50%   1         5         1          2m
myhpa   Deployment/mydeploy   70%/50%   1         5         1          2m15s
myhpa   Deployment/mydeploy   72%/50%   1         5         2          2m30s
myhpa   Deployment/mydeploy   36%/50%   1         5         2          2m45s
myhpa   Deployment/mydeploy   55%/50%   1         5         2          3m
myhpa   Deployment/mydeploy   58%/50%   1         5         3          3m15s
myhpa   Deployment/mydeploy   39%/50%   1         5         3          3m30s
... ...
myhpa   Deployment/mydeploy   66%/50%   1         5         4          5m
myhpa   Deployment/mydeploy   68%/50%   1         5         5          5m15s
myhpa   Deployment/mydeploy   55%/50%   1         5         5          5m30s
myhpa   Deployment/mydeploy   58%/50%   1         5         5          5m45s
myhpa   Deployment/mydeploy   62%/50%   1         5         5          6m

# 如果 60s 内平均负载小于标准值,就会自动缩减集群规模
[root@master ~]# kubectl get hpa -w
NAME    REFERENCE             TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
myhpa   Deployment/mydeploy   52%/50%   1         5         5          13m
myhpa   Deployment/mydeploy   44%/50%   1         5         5          13m15s
myhpa   Deployment/mydeploy   38%/50%   1         5         5          13m30s
myhpa   Deployment/mydeploy   35%/50%   1         5         5          13m45s
myhpa   Deployment/mydeploy   28%/50%   1         5         5          14m
... ...
myhpa   Deployment/mydeploy   8%/50%    1         5         5          18m30s
myhpa   Deployment/mydeploy   9%/50%    1         5         4          18m45s
myhpa   Deployment/mydeploy   9%/50%    1         5         4          19m
myhpa   Deployment/mydeploy   12%/50%   1         5         3          19m15s
myhpa   Deployment/mydeploy   15%/50%   1         5         3          19m30s
myhpa   Deployment/mydeploy   18%/50%   1         5         2          19m45s
myhpa   Deployment/mydeploy   33%/50%   1         5         1          20m

课后总结:

#我们本节学的好多控制器,都有差异和区别,可以按照类似以下的提示词,来对AI提问,得到更加符合工作使用环境的回答。


至此云计算cloud二周目内容更新完毕!

大家有想练习的,可以去华为云、阿里云等云平台,创建帐号,使用30天免费体验版云产品

熟悉相关云产品的使用与配置,里面也有一些项目的免费体验课,可以照着案例学基本项目架构

下一阶段,将回重回网络阶段,深入了解云计算与云原生领域的网络架构知识.

                        

下个阶段见!!!