ubuntu24中部署k8s 1.30.x-底层用docker

发布于:2025-08-06 ⋅ 阅读:(11) ⋅ 点赞:(0)


主机:
192.168.109.11 k8s-master
192.168.109.12 k8s-node1
192.168.109.13 k8s-node2

参考:

Ubuntu安装 kubeadm 部署k8s 1.30 - 星的博客
Ubuntu K8S完全安装指南2025最新版!(小白也能学会,超详细)2025 k8s 最新版安装指南(小白版) Ub - 掘金

ubuntu 24.04 安装 k8s 1.30.x 底层走docker容器_哔哩哔哩_bilibili

https://blog.csdn.net/hanhanduizhang/article/details/149200427

1.初始化

1 配置静态ip地址

cat  /etc/netplan/50-cloud-init.yaml
network:
  version: 2
  ethernets:
    ens160:  #注意网卡
      dhcp4: false
      dhcp6: false

      addresses: [192.168.109.11/22]  
      optional: true

      routes:
        - to: default
          via: 192.168.110.1

      nameservers:
        addresses: [114.114.114.114,8.8.8.8]
root@k8s-master:~# netplan apply

2 开启root可以远程连接,关闭防火墙

sudo passwd root
su - root # 输入你刚刚设置的密码即可,退出,下次就可以用root登录

#关闭防火墙
systemctl status ufw.service
systemctl stop ufw.service

#ssh禁用了root连接可以开启
设置vi /etc/ssh/sshd_config配置开启
 
PermitRootLogin yes
 
重启服务
systemctl restart sshd

3 配置阿里云apt镜像源

cd  /etc/apt/sources.list.d/
cp ubuntu.sources ubuntu.sources.bak

vim ubuntu.source

Types: deb deb-src
URIs: https://mirrors.aliyun.com/ubuntu/
Suites: noble noble-security noble-updates noble-proposed noble-backports
Components: main restricted universe multiverse
Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg

$ sudo apt update 
$ sudo apt upgrade

4 设置主机名

#修改主机名
sudo hostnamectl set-hostname 主机名
#刷新主机名无需重启
sudo hostname -F /etc/hostname

cat >> /etc/hosts << EOF
192.168.109.11 k8s-master
192.168.109.12 k8s-node1
192.168.109.13 k8s-node2
EOF

5 时间同步

timedatectl set-timezone Asia/Shanghai
sudo apt install -y ntpsec-ntpdate
ntpdate ntp.aliyun.com

crontab -e # 创建定时任务。选择编辑器
0 0 * * * ntpdate ntp.aliyun.com

6 配置内核参数

cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
# 立即生效
sysctl --system

# 1. 开启ipvs 转发
sudo modprobe br_netfilter

# 2. 确保模块开机自动加载
echo "br_netfilter" | sudo tee /etc/modules-load.d/k8s.conf

# 3. 配置网络参数
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

# 4. 应用配置
sudo sysctl --system

# 5. 验证配置
ls /proc/sys/net/bridge/  # 应该显示 bridge-nf-call-iptables
cat /proc/sys/net/bridge/bridge-nf-call-iptables  # 应该输出 1

# 6.关闭swap分区
swapoff -a  
sed -ri 's/.*swap.*/#&/' /etc/fstab

7 安装软件包

apt-get install -y conntrack ipvsadm ipset jq iptables curl sysstat  wget vim net-tools git
apt-get install wget jq psmisc vim net-tools socat telnet lvm2 git  tar curl 


mkdir -p /etc/sysconfig/modules/
cat > /etc/sysconfig/modules/ipvs.modules << EOF 
#!/bin/bash 
modprobe -- ip_vs 
modprobe -- ip_vs_rr 
modprobe -- ip_vs_wrr 
modprobe -- ip_vs_sh 
modprobe -- nf_conntrack
EOF 

chmod 755 /etc/sysconfig/modules/ipvs.modules 
bash /etc/sysconfig/modules/ipvs.modules 
lsmod | grep -e ip_vs -e nf_conntrack

8 安装docker

# step 1: 安装必要的一些系统工具
sudo apt-get update
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
# step 2: 安装GPG证书
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
# Step 3: 写入软件源信息
sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
# Step 4: 更新并安装Docker-CE
sudo apt-get -y update
sudo apt-get -y install docker-ce


apt-cache madison docker-ce  ### 列出所有可用安装版本
### 可以指定版本安装
apt-get install docker-ce=5:20.10.24~3-0~ubuntu-bionic  docker-ce-cli=5:20.10.24~3-0~ubuntu-bionic 

配置镜像源:
cat /etc/docker/daemon.json
{  
  "registry-mirrors": [  
    "https://docker.1ms.run",  
    "https://doublezonline.cloud",  
    "https://dislabaiot.xyz",  
    "https://docker.fxxk.dedyn.io",  
    "https://dockerpull.org",  
    "https://docker.unsee.tech",  
    "https://hub.rat.dev",  
    "https://docker.1panel.live",  
    "https://docker.nastool.de",  
    "https://docker.zhai.cm",  
    "https://docker.5z5f.com",  
    "https://a.ussh.net",  
    "https://docker.udayun.com",  
    "https://hub.geekery.cn"  
  ],
  "insecure-registries": ["kubernetes-register.sswang.com"],
  "exec-opts": [  
    "native.cgroupdriver=systemd"  
  ]  
}

systemctl daemon-reload  
systemctl restart docker
systemctl enable docker

9 安装docker-cri

wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.13/cri-dockerd-0.3.13.amd64.tgz

tar -zxvf cri-dockerd-*.amd64.tgz
cp cri-dockerd/cri-dockerd /usr/bin/
chmod +x /usr/bin/cri-dockerd
 
# 写入启动配置文件
cat >  /etc/systemd/system/cri-docker.service <<EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
 
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
 
StartLimitBurst=3
 
StartLimitInterval=60s
 
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
 
TasksMax=infinity
Delegate=yes
KillMode=process
 
[Install]
WantedBy=multi-user.target
EOF
 
# 写入socket配置文件
cat > /etc/systemd/system/cri-docker.socket <<EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
 
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
 
[Install]
WantedBy=sockets.target
EOF
 
# 进行启动cri-docker
systemctl daemon-reload ; systemctl enable cri-docker --now

# 查看是否启动
root@k8s-master:~# ps -ef |grep cri-docker
root      116533       1  0 10:41 ?        00:00:00 /usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9

2 k8s安装

1.apt安装

使用阿里云的镜像源:
apt-get update && apt-get install -y apt-transport-https
curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/deb/Release.key |
    gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/deb/ /" |
    tee /etc/apt/sources.list.d/kubernetes.list

apt-get update
apt-get install -y kubelet kubeadm kubectl

关闭自动更新
apt-mark hold kubelet kubeadm kubectl 

为了实现docker使用的cgroupdriver与kubelet使用的cgroup的一致性,建议修改如下文件内容。

# vim /etc/sysconfig/kubelet  #也可能是在vim /etc/default/kubelet中
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"

设置kubelet为开机自启动即可,由于没有生成配置文件,集群初始化后自动启动
# systemctl enable kubelet

# 使用阿里云下载相关组件镜像
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers --cri-socket=unix:///var/run/cri-dockerd.sock
# 查看k8s1.30.14 所需要的镜像
root@k8s-master:~# kubeadm config images list 
I0805 11:41:27.661346  118401 version.go:256] remote version is much newer: v1.33.3; falling back to: stable-1.30
registry.k8s.io/kube-apiserver:v1.30.14
registry.k8s.io/kube-controller-manager:v1.30.14
registry.k8s.io/kube-scheduler:v1.30.14
registry.k8s.io/kube-proxy:v1.30.14
registry.k8s.io/coredns/coredns:v1.11.3
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.15-0
root@k8s-master:~# 
# 下载成功
root@k8s-master:~# kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers --cri-socket=unix:///var/run/cri-dockerd.sock
I0805 11:29:34.595821  118035 version.go:256] remote version is much newer: v1.33.3; falling back to: stable-1.30
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.30.14
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.30.14
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.30.14
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.30.14
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:v1.11.3
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.9
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.5.15-0

2.初始化k8s

命令:
kubeadm init --kubernetes-version=v1.30.14 --pod-network-cidr=10.224.0.0/16 --apiserver-advertise-address=192.168.109.11 --image-repository registry.aliyuncs.com/google_containers  --cri-socket=unix:///var/run/cri-dockerd.sock

结果:
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.109.11:6443 --token g3xm.gifdl7r \
	--discovery-token-ca-cert-hash sha256:14698d75asfej34t3asd3a9a60 
root@k8s-master:~# 


master节点中执行:
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
  export KUBECONFIG=/etc/kubernetes/admin.conf
  
将其他两个节点加入集群:
kubeadm join 192.168.109.11:6443 --token g3xlqm.gifdt4r92lvial7r \
	--discovery-token-ca-cert-hash sha256:14698d75asfej34t3asd3a9a60   --cri-socket=unix:///var/run/cri-dockerd.sock


# 查看集群状态:
root@k8s-master:/opt/calico# kubectl get no
NAME         STATUS     ROLES           AGE    VERSION
k8s-master   NotReady   control-plane   108m   v1.30.14
k8s-node1    NotReady   <none>          17m    v1.30.14
k8s-node2    NotReady   <none>          17m    v1.30.14
root@k8s-master:/opt/calico# 

3 安装calico网络插件

wget --no-check-certificate https://raw.githubusercontent.com/projectcalico/calico/v3.27.3/manifests/calico.yaml
vim calico.yaml #添加下面的部分.注意网卡名

...
- name: IP_AUTODETECTION_METHOD
  value: "interface=ens160" 

- name: CALICO_IPV4POOL_CIDR
  value: "10.244.0.0/16"
...


拉取calico镜像:
root@k8s-master:/opt/calico# cat calico.yaml |grep image
          image: docker.io/calico/cni:v3.27.3
          imagePullPolicy: IfNotPresent
          image: docker.io/calico/cni:v3.27.3
          imagePullPolicy: IfNotPresent
          image: docker.io/calico/node:v3.27.3
          imagePullPolicy: IfNotPresent
          image: docker.io/calico/node:v3.27.3
          imagePullPolicy: IfNotPresent
          image: docker.io/calico/kube-controllers:v3.27.3
          imagePullPolicy: IfNotPresent
          
docker pull docker.io/calico/cni:v3.27.3
docker pull docker.io/calico/node:v3.27.3
docker pull docker.io/calico/kube-controllers:v3.27.3

kubectl create -f calico.yaml
root@k8s-master:/opt/calico# kubectl get no
NAME         STATUS   ROLES           AGE    VERSION
k8s-master   Ready    control-plane   127m   v1.30.14
k8s-node1    Ready    <none>          35m    v1.30.14
k8s-node2    Ready    <none>          35m    v1.30.14

4 测试

root@k8s-master:~# cat nginx.yaml 
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginxweb
  annotations:
    abc: test
spec:
  selector:
    matchLabels:
      app: nginxweb1
  replicas: 2
  template:
    metadata:
      labels:
        app: nginxweb1
    spec:
      containers:
        - name: nginxwebc
          image: nginx:latest
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: nginxweb-service
spec:
  externalTrafficPolicy: Cluster
  selector:
    app: nginxweb1
  ports:
    - protocol: TCP
      port: 80
      targetPort: 80
      nodePort: 30180
  type: NodePort


kubectl create -f nginx.yaml
root@k8s-master:~# kubectl get po -o wide
NAME                        READY   STATUS    RESTARTS   AGE   IP               NODE        NOMINATED NODE   READINESS GATES
nginxweb-55dcdbb446-f8zsb   1/1     Running   0          13s   10.224.169.129   k8s-node2   <none>           <none>
nginxweb-55dcdbb446-w47j9   1/1     Running   0          13s   10.224.36.65     k8s-node1   <none>           <none>
root@k8s-master:~# kubectl get svc
NAME               TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
kubernetes         ClusterIP   10.96.0.1      <none>        443/TCP        137m
nginxweb-service   NodePort    10.96.85.254   <none>        80:30180/TCP   19s

浏览器中随便访问集群中的ip:30180.都是可以访问的

5 使用helm安装ingress-nginx

curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3  
chmod 700 get_helm.sh  
./get_helm.sh

helm repo add ingress-nginx "https://helm-charts.itboon.top/ingress-nginx"
helm search repo ingress-nginx/ingress-nginx --versions
helm pull ingress-nginx/ingress-nginx --version 4.13.0

网站公告

今日签到

点亮在社区的每一天
去签到