Docker Swarm 集群使用记录

发布于:2025-07-19 ⋅ 阅读:(17) ⋅ 点赞:(0)
1 初始化集群

manager主机目录:

data
├── base_data.yml
├── base_monitoring.yml
├── base_server_middleware.yml
└── docker
   ├── consul
   ├── elasticsearch
   ├── filebeat
   ├── grafana
   ├── kib
   ├── konga
   ├── mongodb
   ├── mysql
   ├── nacos
   ├── nginx
   ├── portainer
   ├── postgresql
   ├── prometheus
   ├── rabbitmq
   └── redis


1.1 先配置各个服务器之间的host名称

先配置各个主机节点名称,方便后续区分

 hostnamectl set-hostname  manager    # 管理节点
 hostnamectl set-hostname  node1      # work节点1
 hostnamectl set-hostname  node2      # work节点2

修改完主机名称后,需要 配置一下 /etc/hosts 记录,否则后续使用dns可能会出现 unable to resolve host xxxx: Temporary failure in name resolution 问题

vi /etc/hosts

127.0.1.1 上面修改主机节点的名称, 如 127.0.1.1 manager 或 node1、node2等
1.2 创建集群并加入
docker swarm init --advertise-addr 10.10.6.111 --data-path-addr 10.10.6.111
docker swarm join --token SWMTKN-1-51niu3a5jh0bgj738go49re9yoo1hpzidq6nxn5ho114yx43-ekeyf6rynb6xl9rykyrx8 \
10.10.6.111:2377 --advertise-addr 172.168.1.175:2377

需要注意,如果用docker swarm 搭建的机器的服务器来源于不同网络的,比如每个服务器都是云服务器,并且云服务器之间都是只能通过公网ip进行通信的,那么node节点需要在加入服务器的时候使用 --advertise-addr参数加上当前服务器的公网ip, 如 我上面的172.168.1.175 这个ip地址是公网ip,可以与10.10.6.111进行连接,否则将集群内的服务无法通过 dns 服务发现的内网进行通信,如果没有使用 --advertise-addr进行标明加入的节点ip地址,那么docker swarm node节点默认以当前eth0网卡中的ip地址加入到集群中,eth0 网卡下的ip默认是内网ip,所以才会导致如果是不同网段下的节点无法正常通信,如果当前集群是在内网中,每个服务器的eth0 网卡下的ip可以进行互联的话,那么则不需要使用 --advertise-addr进行连接

1.2 开放端口号

1 到云服务器厂商管理后台上将 7946,4789,2377 tcp/udp端口进行开放,开放给集群内的服务器

2 在各个集群内的服务器上开放防火墙端口
ufw allow proto tcp from any to any port 7946,4789,2377
ufw allow proto udp from any to any port 7946,4789,2377

2 创建基本数据库服务

base_data.yml:

version: '3.8'

networks:
  base_service_database-net:
    external: true

services:
#mysql
  mysql:
    # mysql:8.0.20 或其它mysql版本(自己修改)
    image: mysql:8.0.20
    # 容器名册
    container_name: mysql-8
    networks:
      - base_service_database-net
    environment:
      #密码设置
      - MYSQL_ROOT_PASSWORD=python
      - TZ=Asia/Shanghai
      - SET_CONTAINER_TIMEZONE=true
      - CONTAINER_TIMEZONE=Asia/Shanghai
    volumes:
      # 前面宿主机目录,后面容器内目录(宿主机没有的目录会自动创建)
      - /data/docker/mysql/mysql8:/etc/mysql
      - /data/docker/mysql/mysql8/logs:/logs
      - /data/docker/mysql/mysql8/data:/var/lib/mysql
      - /etc/localtime:/etc/localtime
      - /data/docker/mysql/mysql8/mysql-files:/var/lib/mysql-files
    deploy:
      placement:
        constraints:
          - node.hostname == manager  #
      replicas: 1  # 单副本确保固定节点
    ports:
      # 前面宿主机目录,后面容器内目录
      - 3613:3306
    restart: always
    privileged: true


#mongo
  mongo:
    restart: always
    image: mongo:8.0.3
    container_name: mongodb
    networks:
      - base_service_database-net
    volumes:
      - /data/docker/mongodb/config/mongod.conf:/etc/mongod.conf
      - /data/docker/mongodb/data:/data/db
      - /data/docker/mongodb/logs:/var/log/mongodb
    ports:
      - 27017:27017
    environment:
      - MONGO_INITDB_ROOT_PASSWORD=python
      - MONGO_INITDB_ROOT_USERNAME=caipu_srv
    deploy:
      placement:
        constraints:
          - node.hostname == manager  #


#redis
  redis:
      image: redis:7.0.12
      container_name: redis
      restart: always
      networks:
        - base_service_database-net
      command: redis-server /usr/local/etc/redis/redis.conf --appendonly no
      volumes:
        - /etc/localtime:/etc/localtime
        - /data/docker/redis/config/redis.conf:/usr/local/etc/redis/redis.conf
        - /data/docker/redis/data:/data
        - /data/docker/redis/logs:/logs

      ports:
        - 6379:6379
      deploy:
        placement:
          constraints:
            - node.hostname == manager  #

  kong-database:
     image: postgres:16
     container_name: kong-database
     restart: always
     networks:
      - base_service_database-net
     environment:
      - POSTGRES_USER=kong
      - POSTGRES_DB=kong
      - POSTGRES_PASSWORD=kong
     volumes:
      - /data/docker/postgresql/data:/var/lib/postgis/data
      - /data/docker/postgresql/data:/var/lib/postgresql/data
     ports:
       - "5348:5432"
     deploy:
        placement:
          constraints:
            - node.hostname == manager  #
   #kong数据库的初始化
  kong-migration:
      container_name: kong-migration
      image: kong
      command: kong migrations bootstrap
      networks:
        - base_service_database-net
      restart: on-failure
      environment:
        - KONG_PG_HOST=kong-database
        - KONG_DATABASE=postgres
        - KONG_PG_USER=kong
        - KONG_PG_PASSWORD=kong
        - KONG_CASSANDRA_CONTACT_POINTS=kong-database

      links:
        - kong-database
      depends_on:
        - kong-database
      deploy:
        placement:
          constraints:
            - node.hostname == manager  #

  elasticsearch:
    image: elasticsearch:7.17.7
    restart: always
    container_name: elasticsearch
    networks:
      - base_service_database-net
    environment:
      - discovery.type=single-node
      - ES_JAVA_OPTS=-Xms512m -Xmx512m
    ports:
      - "9200:9200"
      - "9300:9300"
    volumes:
      - /data/docker/elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - /data/docker/elasticsearch/data:/usr/share/elasticsearch/data
      - /data/docker/elasticsearch/logs:/usr/share/elasticsearch/logs

    deploy:
      placement:
        constraints:
          - node.hostname == manager

先执行创建网络

docker network create --driver overlay base_service_database-net --attachable

然后再执行下面命令启动创建服务

docker stack deploy -c  base_data.yml  base_service_database
3 创建监控服务

注意,cadvisor的docker镜像可能无法直接下载,可以通过 下载cadvisor 链接去下载cadvisor tar镜像,然后通过 docker load -i 进行离线安装

base_monitoring.yml

version: "3.8"

networks:
  monitoring:
    external: true
  base_service_database-net:
    external: true


services:

  # Prometheus 服务
  prometheus:
    image: prom/prometheus:latest
    ports:
      - "9090:9090"
    volumes:
      - /data/docker/prometheus/data:/prometheus
      - /data/docker/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
      
    networks:
      - monitoring
      - base_service_database-net
    deploy:
      placement:
        constraints:
          - node.role == manager
    environment:
      - TZ=Asia/Shanghai

      
  # Node Exporter(全局部署到所有节点)
  node-exporter:
    image: prom/node-exporter:latest
    command:
      - '--path.rootfs=/host'
    pid: host
    volumes:
      - '/:/host:ro,rslave'
    environment:
      - TZ=Asia/Shanghai
    networks:
      - monitoring
    deploy:
      mode: global

  # cAdvisor(全局部署到所有节点)
  cadvisor:
    image: gcr.io/cadvisor/cadvisor:v0.52.1
    volumes:
      - /:/rootfs:ro
      - /var/run:/var/run:ro
      - /sys:/sys:ro
      - /proc:/proc
      - /var/lib/docker/:/var/lib/docker:ro
    security_opt:
      - apparmor:unconfined  
    devices:
      - /dev/kmsg:/dev/kmsg
    networks:
      - monitoring
    deploy:
      mode: global
    ports:
      - "8080:8080"


  # Grafana 仪表盘
  grafana:
    image: grafana/grafana:latest
    ports:
      - "3000:3000"
    volumes:
      - /data/docker/grafana:/var/lib/grafana
    environment:
      - TZ=Asia/Shanghai
    networks:
      - monitoring
    deploy:
      placement:
        constraints:
          - node.role == manager

先执行创建网络

docker network create --driver overlay --attachable monitoring

然后再执行下面命令启动创建服务

docker stack deploy -c base_monitoring.yml  monitoring

如果出现 prometheu 或者 grafana起不来的话,那么就将挂载的目录修改权限,

chmod 777  /data/docker/grafana
chmod 777  /data/docker/prometheus/data
4 创建中间件服务

base_server_middleware.yml

version: '3.8'

networks:
  base_service_database-net:
    external: true
  web_app:
    external: true
  monitoring:
    external: true
    
services:

  consul:
    image: consul:1.15.4
    restart: always
    container_name: consul
    networks:
      - web_app
    ports:
      - "8500:8500"
      - "8600:8600"
    volumes:
      - /etc/localtime:/etc/localtime
      - /data/docker/consul/data:/consul/data

    deploy:
      placement:
        constraints:
          - node.hostname == manager

  nacos:
    image: qingpan/rnacos:stable
    container_name: nacos
    networks:
      - web_app
    ports:
      - "8848:8848"
      - "9848:9848"
      - "10848:10848"
    volumes:
      - /data/docker/nacos/logs:/home/nacos/logs
    restart: always
    environment:
      - RNACOS_HTTP_PORT=8848
      - RNACOS_ENABLE_NO_AUTH_CONSOLE=true
      - TZ=Asia/Shanghai
      - MODE=standalone
      - SPRING_DATASOURCE_PLATFORM=mysql
      - MYSQL_SERVICE_HOST=81.71.64.139
      - MYSQL_SERVICE_PORT=3306
      - MYSQL_SERVICE_USER=root
      - MYSQL_SERVICE_PASSWORD=python
      - MYSQL_SERVICE_DB_NAME=nacos_config
      - MYSQL_SERVICE_DB_PARAM=characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC
    deploy:
      placement:
        constraints:
          - node.hostname == manager

  portainer:
    image: 6053537/portainer-ce
    container_name: portainer
    networks:
      - monitoring
    ports:
      - "9000:9000"
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
      - /data/docker/portainer:/data
    restart: always
    deploy:
      placement:
        constraints:
          - node.hostname == manager

  rabbitmq:
    image: rabbitmq:4.0.7-management
    container_name: rabbitmq
    networks:
      - monitoring
      - web_app
      - base_service_database-net
    environment:
      - RABBITMQ_DEFAULT_USER=root
      - RABBITMQ_DEFAULT_PASS=q123q123
    ports:
      - "5672:5672"
      - "15672:15672"
    volumes:
      - /data/docker/rabbitmq/data:/var/lib/rabbitmq
      - /data/docker/rabbitmq/logs:/var/log/rabbitmq
    restart: always
    deploy:
      placement:
        constraints:
          - node.hostname == manager


  konga:
    container_name: konga
    image: pantsel/konga:latest
    restart: always
    networks:
      - monitoring
      - base_service_database-net
    ports:
      - "1337:1337"
    deploy:
      placement:
        constraints:
          - node.hostname == manager  #

  kibana:
    container_name: kibana
    image: kibana:7.17.7
    restart: always
    volumes:
      - /data/docker/kib/config/kibana.yml:/usr/share/kibana/config/kibana.yml
    networks:
      - base_service_database-net
      - monitoring
    ports:
      - "5601:5601"
    deploy:
      placement:
        constraints:
          - node.hostname == manager  #


  filebeat:
    container_name: filebeat
    image: elastic/filebeat:7.17.7
    restart: always
    networks:
      - base_service_database-net
    deploy:
      mode: global
    configs:
      - source: filebeat-config
        target: /usr/share/filebeat/filebeat.yml  # 配置文件挂载路径
    volumes:
      - type: bind
        source: /data/logs/
        target: /data/logs/
        
      - type: bind
        source: /var/run/docker.sock
        target: /var/run/docker.sock
      - type: bind
        source: /var/lib/docker/containers
        target: /var/lib/docker/containers
        read_only: true
        

configs:
  filebeat-config:
    file: /data/docker/filebeat/config/filebeat.yml  # 使用本地的 filebeat.yml 文件

先执行创建网络

docker network create --driver overlay --attachable web_app

然后再创建filebeat.yml的config配置
filebeat.yml 配置如下:

filebeat.inputs:
- type: filestream
  enabled: true
  paths:
      - /data/logs/*/*.log

  parsers:
    - multiline:
        type: pattern
        pattern: '^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}'
        negate: true
        match: after
        max_lines: 500
        timeout: 10s

processors:
  - dissect:
      tokenizer: "%{log_timestamp} | %{log_level} | %{namespace} | %{file_path} | %{method} | %{track_id} | %{message}"
      field: "message"
      max_lines: 500
      target_prefix: ""
      overwrite_keys: true
  - timestamp:
      field: log_timestamp
      layouts:
        - '2006-01-02 15:04:05.000'
      test:
        - '2025-04-14 09:16:52.758'
  - drop_fields:
      fields: ["log_timestamp"]


output.elasticsearch:
  hosts: ["http://elasticsearch:9200"]
  index: "caipu_srv-logs-%{+yyyy.MM.dd}"
  indices:
    - index: "caipu_srv-logs-%{+yyyy.MM.dd}"
      when.contains:
        tags: "xixi"
      pipeline: "xixi_processor"
      


setup.template.enabled: false
setup.template.name: "caipu_srv"
setup.template.pattern: "caipu_srv-*"
docker config create filebeat-config  /data/docker/filebeat/config/filebeat.yml

最后再执行下面命令启动创建服务

docker stack deploy -c base_server_middleware.yml  base_server_middleware

网站公告

今日签到

点亮在社区的每一天
去签到