基于centos7.9使用shell脚本部署k8s1.25平台

发布于:2024-11-28 ⋅ 阅读:(13) ⋅ 点赞:(0)

使用脚本部署k8s1.25版本平台,网络插件使用flannel ,容器运行时ctr,部署包括harbor仓库,服务网格、kubevirt服务等

使用的centos7.9资源配置如下:

主机 IP 资源
master 192.168.200.100 4C_8G_100G
node 192.168.200.101 4C_8G_100G

环境初始化

两边节点修改主机名

hostnamectl set-hostname master
hostnamectl set-hostname node
cat >> /etc/hosts <<eof
192.168.200.100 master
192.168.200.101 node
eof

mastr节点配置centos源

mkdir /opt/centos
mount /dev/sr0 //opt/centos/
rm -rf /etc/yum.repos.d/*

配置repo仓库

vi /etc/yum.repos.d/local.repo
[local]
name=local
gpgcheck=0
enabled=1
baseurl=file:///opt/centos
[k8s]
name=k8s
gpgcheck=0
enabled=1
baseurl=file:///opt/kubernetes-repo

安装Harbor

mount kubernetes_v2.1.iso /mnt/
cp -rf /mnt/* /opt/
cd /opt/
#!/bin/bash

# 配置免密钥
ALL_SERVER_ROOT_PASSWORD=000000
all_hosts=`cat /etc/hosts |awk '{print $1}' |sed '/::1/d'|sort -u`
all_hostname=`cat /etc/hosts |awk '{print $2}' |sort -u`
a_hosts="$all_hosts  $all_hostname"
my_ip=`ip a |grep -w "inet" |awk '{print $2}'|sed 's/\/.*//g'`
other_ip=$all_hosts
for i in $my_ip;do other_ip=`echo $other_ip |sed "s/$i//g"`;done

yum install -y expect
if [[ ! -s ~/.ssh/id_rsa.pub ]];then
    ssh-keygen  -t rsa -N '' -f ~/.ssh/id_rsa -q -b 2048
fi
for hosts in $a_hosts; do
    ping $hosts -c 4 >> /dev/null 2>&1
    if [ 0  -ne  $? ]; then
        echo -e "\033[31mWarning\n$hosts IP unreachable!\033[0m"
    fi
    expect -c "set timeout -1;
    spawn ssh-copy-id  -i /root/.ssh/id_rsa  $hosts ;
    expect {
        *(yes/no)* {send -- yes\r;exp_continue;}
        *assword:* {send -- $ALL_SERVER_ROOT_PASSWORD\r;exp_continue;}
        eof        {exit 0;}
    }";
done


# 配置 时间同步
IP=`ip addr | grep 'state UP' -A2 | grep inet | egrep -v '(127.0.0.1|inet6|docker)' | awk '{print $2}' | tr -d "addr:" | head -n 1 | cut -d / -f1`
yum install -y chrony
sed -i '3,6s/^/#/g' /etc/chrony.conf
sed -i "7s|^|server $IP iburst|g" /etc/chrony.conf
echo "allow all" >> /etc/chrony.conf
echo "local stratum 10" >> /etc/chrony.conf
systemctl restart chronyd
systemctl enable chronyd
timedatectl set-ntp true
sleep 5
systemctl restart chronyd
chronyc sources


# 关闭防火墙以及selinux
systemctl stop firewalld && systemctl disable firewalld
sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
setenforce 0

# 关闭swap分区
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
echo -e "nameserver 114.114.114.114" > /etc/resolv.conf
echo -e "overlay\nbr_netfilter" > /etc/modules-load.d/containerd.conf
modprobe -- overlay
modprobe -- br_netfilter
cat > /etc/sysctl.d/kubernetes.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p /etc/sysctl.d/kubernetes.conf

# 安装 Docker-ce
yum install -y yum-utils device-mapper-persistent-data lvm2
yum install -y docker-ce
systemctl enable docker
systemctl start docker

# 修改 Docker Cgroup Driver为systemd
tee /etc/docker/daemon.json <<EOF
{
  "insecure-registries" : ["0.0.0.0/0"],
  "registry-mirrors": ["https://d8b3zdiw.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl restart docker

# 安装docker-compose
chmod +x /opt/docker-compose/v2.10.2-docker-compose-linux-x86_64
mv /opt/docker-compose/v2.10.2-docker-compose-linux-x86_64 /usr/local/bin/docker-compose

# 导入镜像
for i in $(ls /opt/images|grep tar)
do
  docker load -i /opt/images/$i
done

# 安装Harbor仓库
IP=`ip addr | grep 'state UP' -A2 | grep inet | egrep -v '(127.0.0.1|inet6|docker)' | awk '{print $2}' | tr -d "addr:" | head -n 1 | cut -d / -f1`
cd /opt/harbor/
tar -zxvf harbor-offline-installer-v2.5.3.tgz
cd harbor
mv harbor.yml.tmpl harbor.yml
sed -i "5s/reg.mydomain.com/${IP}/g" harbor.yml
sed -i "13s/^/#/g" harbor.yml
sed -i "15,18s/^/#/g" harbor.yml
docker load -i harbor.v2.5.3.tar.gz
./prepare || exit
./install.sh || exit
sleep 5
docker-compose ps
echo "请在浏览器通过http://${IP}访问Harbor"
[root@localhost opt]# ./k8s_harbor_install.sh

搭建完成后界面IP访问Harbor:admin/Harbor12345

导入镜像

#!/bin/bash

#--------------------------------------------
## 镜像上传说明
# 需要先在镜像仓库中创建 library 项目
# 根据实际情况更改以下私有仓库地址
#--------------------------------------------

# 定义日志
workdir=`pwd`
log_file=${workdir}/sync_images_$(date +"%Y-%m-%d").log

logger()
{
    log=$1
    cur_time='['$(date +"%Y-%m-%d %H:%M:%S")']'
    echo ${cur_time} ${log} | tee -a ${log_file}
}

images_hub() {

    while true;
    do
        read -p "输入镜像仓库地址(不加http/https): " registry
        read -p "输入镜像仓库用户名: " registry_user
        read -p "输入镜像仓库用户密码: " registry_password
        echo "您设置的仓库地址为: ${registry},用户名: ${registry_user},密码: xxx"
        read -p "是否确认(Y/N): " confirm

        if [ $confirm != Y ] && [ $confirm != y ] && [ $confirm == '' ]; then
            echo "输入不能为空,重新输入"
        else
            break
        fi
    done
}

images_hub

echo "镜像仓库 $(docker login -u ${registry_user} -p ${registry_password} ${registry})"

images=$(docker images -a | grep -v TAG | grep -v goharbor | awk '{print $1 ":" $2}')

#images=$(cat library-images.txt )

# 定义全局项目,如果想把镜像全部同步到一个仓库,则指定一个全局项目名称;
global_namespace=library

docker_push() {
    for imgs in $( echo "${images}" );
    do
        if [[ -n "$global_namespace" ]]; then

            n=$(echo ${imgs} | awk -F"/" '{print NF-1}')
            # 如果镜像名中没有/,那么此镜像一定是library仓库的镜像;
            if [ ${n} -eq 0 ]; then
                img_tag=${imgs}

                #重命名镜像
                docker tag ${imgs} ${registry}/${global_namespace}/${img_tag}
                #删除原始镜像
                #docker rmi ${imgs}
                #上传镜像
                docker push ${registry}/${global_namespace}/${img_tag}

            # 如果镜像名中有一个/,那么/左侧为项目名,右侧为镜像名和tag
            elif [ ${n} -eq 1 ]; then
                img_tag=$(echo ${imgs} | awk -F"/" '{print $2}')

                #重命名镜像
                docker tag ${imgs} ${registry}/${global_namespace}/${img_tag}
                #删除旧镜像
                #docker rmi ${imgs}
                #上传镜像
                docker push ${registry}/${global_namespace}/${img_tag}

            # 如果镜像名中有两个/,
            elif [ ${n} -eq 2 ]; then
                img_tag=$(echo ${imgs} | awk -F"/" '{print $3}')

                #重命名镜像
                docker tag ${imgs} ${registry}/${global_namespace}/${img_tag}
                #删除旧镜像
                #docker rmi ${imgs}
                #上传镜像
                docker push ${registry}/${global_namespace}/${img_tag}
            else
                #标准镜像为四层结构,即:仓库地址/项目名/镜像名:tag,如不符合此标准,即为非有效镜像。
                echo "No available images"
            fi
        else

            n=$(echo ${imgs} | awk -F"/" '{print NF-1}')
            # 如果镜像名中没有/,那么此镜像一定是library仓库的镜像;
            if [ ${n} -eq 0 ]; then
                img_tag=${imgs}
                namespace_1=library
                namespace_2=library

                #重命名镜像
                docker tag ${imgs} ${registry}/${namespace_1}/${img_tag}
                docker tag ${imgs} ${registry}/${namespace_2}/${img_tag}
                #删除原始镜像
                #docker rmi ${imgs}
                #上传镜像
                docker push ${registry}/${namespace_1}/${img_tag}
                docker push ${registry}/${namespace_2}/${img_tag}

            # 如果镜像名中有一个/,那么/左侧为项目名,右侧为镜像名和tag
            elif [ ${n} -eq 1 ]; then
                img_tag=$(echo ${imgs} | awk -F"/" '{print $2}')
                namespace=$(echo ${imgs} | awk -F"/" '{print $1}')

                #重命名镜像
                docker tag ${imgs} ${registry}/${namespace}/${img_tag}
                #删除旧镜像
                #docker rmi ${imgs}
                #上传镜像
                docker push ${registry}/${namespace}/${img_tag}

            # 如果镜像名中有两个/,
            elif [ ${n} -eq 2 ]; then
                img_tag=$(echo ${imgs} | awk -F"/" '{print $3}')
                namespace=$(echo ${imgs} | awk -F"/" '{print $2}')

                #重命名镜像
                docker tag ${imgs} ${registry}/${namespace}/${img_tag}
                #删除旧镜像
                #docker rmi ${imgs}
                #上传镜像
                docker push ${registry}/${namespace}/${img_tag}
            else
                #标准镜像为四层结构,即:仓库地址/项目名/镜像名:tag,如不符合此标准,即为非有效镜像。
                echo "No available images"
            fi
        fi
    done
}

docker_push
[root@localhost opt]# ./k8s_image_push.sh
输入镜像仓库地址(不加http/https): 192.168.200.100
输入镜像仓库用户名: admin
输入镜像仓库用户密码: Harbor12345
您设置的仓库地址为: 192.168.200.100,用户名: admin,密码: xxx
是否确认(Y/N): Y

安装k8s

[root@localhost opt]# cat k8s_master_install.sh
#!/bin/bash

# 安装 Kubeadm
yum install -y kubeadm-1.25.0 kubelet-1.25.0 kubectl-1.25.0

systemctl enable kubelet
systemctl start kubelet
docker -v
kubelet --version


# 部署依赖
IP=`ip addr | grep 'state UP' -A2 | grep inet | egrep -v '(127.0.0.1|inet6|docker)' | awk '{print $2}' | tr -d "addr:" | head -n 1 | cut -d / -f1`

tar -zxvf /opt/cri/crictl-v1.25.0-linux-amd64.tar.gz -C /usr/local/bin/
containerd config default > /etc/containerd/config.toml
sed -ri -e 's/(.*SystemdCgroup = ).*/\1true/' -e "s@(.*sandbox_image = ).*@\1\'$IP/library/pause:3.8\'@" /etc/containerd/config.toml
sed -i -e "/.*registry.mirrors.*/a\        [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"docker.io\"]\n          endpoint = [\"https://registry.docker-cn.com\" ,\"http://hub-mirror.c.163.com\" ,\"https://docker.mirrors.ustc.edu.cn\"]\n        [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"$IP\"]\n          endpoint = [\"http://$IP\"]" -e "/.*registry.configs.*/a\        [plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"$IP\".tls]\n          insecure_skMASTER_IP_verify = true\n        [plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"$IP\".auth]\n          username = \"admin\"\n          password = \"Harbor12345\"" /etc/containerd/config.toml

cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

tar -zxvf /opt/cri/nerdctl-0.23.0-linux-amd64.tar.gz -C /usr/local/bin/
mkdir -p /etc/nerdctl/

cat > /etc/nerdctl/nerdctl.toml <<EOF
namespace      = "k8s.io"
insecure_registry = true
EOF

tar -zxvf /opt/cri/buildkit-v0.10.4.linux-amd64.tar.gz -C /usr/local/
cat > /usr/lib/systemd/system/buildkit.socket <<EOF
[Unit]
Description=BuildKit
Documentation=https://github.com/moby/buildkit

[Socket]
ListenStream=%t/buildkit/buildkitd.sock
SocketMode=0660

[Install]
WantedBy=sockets.target
EOF
cat > /usr/lib/systemd/system/buildkit.service << EOF
[Unit]
Description=BuildKit
Requires=buildkit.socket
After=buildkit.socket
Documentation=https://github.com/moby/buildkit

[Service]
Type=notify
ExecStart=/usr/local/bin/buildkitd --addr fd://

[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload && systemctl enable buildkit && systemctl start buildkit
systemctl daemon-reload && systemctl restart containerd && systemctl enable --now containerd

ctr version && crictl version && runc -version && buildctl --version && nerdctl version

cd /opt/harbor/harbor && docker-compose restart &> /dev/null && cd ~
nerdctl login -u admin -pHarbor12345 $IP



# 初始化 master 节点
kubeadm init --kubernetes-version=1.25.0 --apiserver-advertise-address=$IP --image-repository $IP/library --pod-network-cidr=10.244.0.0/16
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sleep 50
kubectl get pod -n kube-system -owide

# 部署 flannel网络
eval sed -i 's@docker.io/flannel@$IP/library@g' /opt/yaml/flannel/kube-flannel.yaml
kubectl apply -f /opt/yaml/flannel/kube-flannel.yaml
sleep 20

# 部署dashboard
mkdir /opt/dashboard-certs
cd /opt/dashboard-certs/
kubectl create namespace kubernetes-dashboard
openssl genrsa -out dashboard.key 2048
openssl req -days 36000 -new -out dashboard.csr -key dashboard.key -subj '/CN=dashboard-cert'
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard
sed -i "s/kubernetesui/$IP\/library/g" /opt/yaml/dashboard/recommended.yaml
kubectl apply -f /opt/yaml/dashboard/recommended.yaml
kubectl apply -f /opt/yaml/dashboard/dashadmin-user.yaml

# 删除污点
kubectl taint nodes master node-role.kubernetes.io/control-plane-


# 登录信息
token=`kubectl -n kubernetes-dashboard create token admin-user`
echo ""
echo ""
echo ""
echo "dashboard地址:https://$IP:30001"
echo "登录令牌:$token"
[root@localhost opt]# ./k8s_master_install.sh

将node脚本传给node节点

scp /opt/k8s_node_install.sh node:/root/

配置vsftp

yum install -y vsftpd
echo "anon_root=/opt" >> /etc/vsftpd/vsftpd.conf
systemctl restart vsftpd

node节点配置环境

rm -rf /etc/yum.repos.d/*
vi /etc/yum.repos.d/local.repo
[centos]
name=centos
gpgcheck=0
enabled=1
baseurl=ftp://192.168.200.100/centos
[k8s]
name=k8s
gpgcheck=0
enabled=1
baseurl=ftp://192.168.200.100/kubernetes-repo

node节点加入k8s集群

[root@localhost ~]# ls
anaconda-ks.cfg  k8s_node_install.sh
[root@localhost ~]# ./k8s_node_install.sh
[root@master opt]# kubectl get nodes
NAME     STATUS   ROLES           AGE     VERSION
master   Ready    control-plane   14m     v1.25.0
node     Ready    <none>          4m30s   v1.25.0
[root@master opt]# kubectl get pod -A
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE
kube-flannel           kube-flannel-ds-275nd                        1/1     Running   0          4m48s
kube-flannel           kube-flannel-ds-jlkz7                        1/1     Running   0          14m
kube-system            coredns-76bf7f8764-gcggn                     1/1     Running   0          14m
kube-system            coredns-76bf7f8764-qwz69                     1/1     Running   0          14m
kube-system            etcd-master                                  1/1     Running   0          15m
kube-system            kube-apiserver-master                        1/1     Running   0          15m
kube-system            kube-controller-manager-master               1/1     Running   0          15m
kube-system            kube-proxy-5jvkl                             1/1     Running   0          14m
kube-system            kube-proxy-f9k7p                             1/1     Running   0          4m48s
kube-system            kube-scheduler-master                        1/1     Running   0          15m
kubernetes-dashboard   dashboard-metrics-scraper-7b645c4f85-q858n   1/1     Running   0          13m
kubernetes-dashboard   kubernetes-dashboard-568f4844dc-2tlhx        1/1     Running   0          13m

安装istio和kubevirt

#!/bin/bash

#### 部署istio ####
install_istio(){
  nerdctl load -i /opt/project/images/istio_image.tar
  tar -zxvf /opt/project/istio/istio-1.17.2-linux-amd64.tar.gz -C /opt/project/istio
  chmod +x /opt/project/istio/istio-1.17.2/bin/istioctl
  cp /opt/project/istio/istio-1.17.2/bin/istioctl /usr/local/bin/
  kubectl delete po -n kube-system `kubectl get po -n kube-system  | grep coredns | head -n 1 | awk '{print $1}'`
  istioctl install --set profile=demo -y
  kubectl apply -f /opt/project/istio/istio-1.17.2/samples/addons/
  sleep 15
  kubectl get all -n istio-system
}

#### 部署kubevirt ####
install_kubevirt(){
  yum install -y qemu-kvm libvirt virt-install bridge-utils
  nerdctl load -i /opt/project/images/kubevirt_image.tar
  kubectl apply -f /opt/project/kubevirt/deploy/kubevirt-operator.yaml
  kubectl apply -f /opt/project/kubevirt/deploy/kubevirt-cr.yaml
  kubectl apply -f /opt/project/kubevirt/deploy/multus-daemonset.yaml
  kubectl apply -f /opt/project/kubevirt/deploy/multus-cni-macvlan.yaml
  chmod +x /opt/project/kubevirt/tools/virtctl-v0.41.0-linux-amd64
  cp /opt/project/kubevirt/tools/virtctl-v0.41.0-linux-amd64 /usr/local/bin/virtctl
  sleep 15
  kubectl get all -n kubevirt
}


## 进行判断
read -p "是否安装配置istio和kubevirt(Y/N): " answer

case "$answer" in
    y|Y)
        install_istio
        install_kubevirt
        ;;
    n|N)
        exit 0
        ;;
    *)
        echo "请输入正确的选项"
        exit 1
        ;;
esac
[root@master opt]# ./k8s_project_install.sh
是否安装配置istio和kubevirt(Y/N): Y
[root@master opt]# kubectl get pod -A
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE
istio-system           grafana-56bdf8bf85-g86cz                     1/1     Running   0          2m47s
istio-system           istio-egressgateway-85649899f8-clwmg         1/1     Running   0          4m9s
istio-system           istio-ingressgateway-f56888458-24ttk         1/1     Running   0          4m9s
istio-system           istiod-64848b6c78-64zqf                      1/1     Running   0          4m13s
istio-system           jaeger-76cd7c7566-46hvk                      1/1     Running   0          2m47s
istio-system           kiali-646db7568f-h9q6h                       1/1     Running   0          2m47s
istio-system           prometheus-85949fddb-k4ndc                   2/2     Running   0          2m47s
kube-flannel           kube-flannel-ds-275nd                        1/1     Running   0          11m
kube-flannel           kube-flannel-ds-jlkz7                        1/1     Running   0          20m
kube-system            coredns-76bf7f8764-qwz69                     1/1     Running   0          21m
kube-system            coredns-76bf7f8764-rkhk5                     1/1     Running   0          4m21s
kube-system            etcd-master                                  1/1     Running   0          21m
kube-system            kube-apiserver-master                        1/1     Running   0          21m
kube-system            kube-controller-manager-master               1/1     Running   0          21m
kube-system            kube-multus-ds-pjckn                         1/1     Running   0          113s
kube-system            kube-multus-ds-qn7lv                         1/1     Running   0          113s
kube-system            kube-proxy-5jvkl                             1/1     Running   0          21m
kube-system            kube-proxy-f9k7p                             1/1     Running   0          11m
kube-system            kube-scheduler-master                        1/1     Running   0          21m
kubernetes-dashboard   dashboard-metrics-scraper-7b645c4f85-q858n   1/1     Running   0          20m
kubernetes-dashboard   kubernetes-dashboard-568f4844dc-2tlhx        1/1     Running   0          20m
kubevirt               virt-api-6c4f849c9d-nntf6                    1/1     Running   0          90s
kubevirt               virt-api-6c4f849c9d-q78kk                    1/1     Running   0          90s
kubevirt               virt-controller-67b95d99d5-bhpts             1/1     Running   0          59s
kubevirt               virt-controller-67b95d99d5-xspc7             1/1     Running   0          59s
kubevirt               virt-handler-ptsk8                           1/1     Running   0          59s
kubevirt               virt-handler-vs5kh                           1/1     Running   0          59s
kubevirt               virt-operator-798f64bdf6-4t4nc               1/1     Running   0          116s
kubevirt               virt-operator-798f64bdf6-cqqr6               1/1     Running   0          116s

网站公告

今日签到

点亮在社区的每一天
去签到