1. 环境准备
# 创建项目目录结构
mkdir -p ~/project/{web,proxy,redis,db,elk,zabbix,rsync,config}
cd ~/project
# 创建自定义Docker网络(模拟192.168.115.0/24)
docker network create --subnet=192.168.115.0/24 --gateway=192.168.115.1 cluster_net
2. 共享存储配置(NFS替代方案)
# 创建共享数据卷(替代NFS)
docker volume create app_data
3. 构建关键服务镜像
(1) Web应用服务器(Nginx+PHP+Tomcat)
~/project/web/Dockerfile
:
FROM alpine:3.18
RUN apk add --no-cache nginx php81 php81-fpm php81-mysqli php81-gd \
openjdk11-jre bash tomcat-native tzdata
COPY config/nginx.conf /etc/nginx/http.d/default.conf
COPY config/php.ini /etc/php81/php.ini
CMD ["sh", "-c", "php-fpm81 && exec nginx -g 'daemon off;'"]
~/project/web/config/nginx.conf
):
server {
listen 80;
server_name localhost;
root /var/www/html;
index index.php index.html;
location / {
try_files $uri $uri/ /index.php?$args;
}
location ~ \.php$ {
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
include fastcgi.conf;
}
}
php.ini示例(保存在~/project/web/config/php.ini
):
[PHP]
engine=On
short_open_tag=Off
error_reporting=E_ALL & ~E_NOTICE
构建并运行容器:
# 复制应用文件
cp /root/Discuz_X3.5_SC_UTF8_20250205 ~/project/web/discuz -R
cp /root/biyesheji.war ~/project/web/
cp /root/apache-tomcat-8.5.40.tar.gz ~/project/web/
# 构建镜像
docker build -t web-app ~/project/web
# 运行容器(两个实例)
docker run -d --name web_01 --hostname web_01 --net cluster_net \
--ip 192.168.115.113 -v app_data:/var/www/html -p 8080:80 web-app
docker run -d --name web_02 --hostname web_02 --net cluster_net \
--ip 192.168.115.114 -v app_data:/var/www/html -p 8081:80 web-app
(2) HAProxy代理层
~/project/proxy/Dockerfile
:
FROM alpine:3.18
RUN apk add --no-cache haproxy
COPY config/haproxy.cfg /etc/haproxy/haproxy.cfg
CMD ["haproxy", "-f", "/etc/haproxy/haproxy.cfg"]
配置文件 ~/project/config/haproxy.cfg
:
frontend http_in
bind *:80
acl host_discuz hdr(host) -i discuz.linuxha.com
acl host_shop hdr(host) -i shop.linuxha.com
use_backend discuz if host_discuz
use_backend shop if host_shop
backend discuz
server web1 192.168.115.113:80 check
backend shop
server web2 192.168.115.114:8080 check
运行容器:
docker build -t haproxy ~/project/proxy
docker run -d --name lb_01 --hostname lb_01 --net cluster_net \
--ip 192.168.115.111 -p 80:80 haproxy
(3) Redis高可用集群
~/project/redis/Dockerfile
:
FROM alpine:3.18
RUN apk add --no-cache redis
COPY config/redis.conf /etc/redis.conf
CMD ["redis-server", "/etc/redis.conf"]
配置哨兵 ~/project/config/redis-sentinel.conf
:
port 26379
sentinel monitor mymaster 192.168.115.115 6379 2
sentinel down-after-milliseconds mymaster 5000
sentinel parallel-syncs mymaster 1
运行容器:
# 主节点
docker run -d --name redis_01 --hostname redis_01 --net cluster_net \
--ip 192.168.115.115 redis
# 从节点
docker run -d --name redis_02 --hostname redis_02 --net cluster_net \
--ip 192.168.115.116 redis
# 哨兵
docker run -d --name redis_sentinel --net cluster_net \
redis redis-server /path/to/redis-sentinel.conf --sentinel
(4) MySQL主从数据库
~/project/db/Dockerfile
:
FROM alpine:3.18
RUN apk add --no-cache mariadb mariadb-client
COPY config/my.cnf /etc/mysql/my.cnf
CMD ["mysqld", "--user=mysql"]
配置主从复制:
1.主库 my.cnf
:
[mysqld]
server-id=1
log-bin=mysql-bin
2.从库 my.cnf
:
[mysqld]
server-id=2
运行容器:
# 主库
docker run -d --name db_01 --hostname db_01 --net cluster_net \
--ip 192.168.115.117 -v mysql_data:/var/lib/mysql db
# 从库
docker run -d --name db_02 --hostname db_02 --net cluster_net \
--ip 192.168.115.118 -v mysql_data_slave:/var/lib/mysql db
初始化命令:在
db_01
中执行CHANGE MASTER TO
配置复制。
(5) ELK日志系统
# 复制安装包
cp /root/{elasticsearch,logstash,kibana,filebeat}-7.1.1* ~/project/elk/
~/project/elk/Dockerfile
:
FROM alpine:3.18
RUN apk add --no-cache openjdk11-jre bash
COPY elasticsearch-7.1.1 /opt/elasticsearch
COPY kibana-7.1.1 /opt/kibana
COPY logstash-7.1.1 /opt/logstash
COPY config/elk-setup.sh /setup.sh
CMD ["/setup.sh"]
elk-setup.sh
脚本启动所有组件,Filebeat配置发送日志到Logstash。
elk-setup.sh
脚本
#!/bin/bash
# =================================================================
# ELK Stack 自动化部署脚本 (Docker Compose)
# 功能:
# 1. 创建标准目录结构
# 2. 生成 docker-compose.yml 及组件配置文件
# 3. 初始化容器并验证服务状态
# 使用:
# chmod +x elk-setup.sh && ./elk-setup.sh
# =================================================================
# 定义颜色常量
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # 恢复默认颜色
# --- 1. 创建项目目录结构 ---
echo -e "${YELLOW}==> 创建项目目录...${NC}"
PROJECT_DIR="elk-stack"
mkdir -p ${PROJECT_DIR}/{filebeat,logstash/pipeline,elasticsearch/data,logs}
echo -e "目录结构:
- ${PROJECT_DIR}/
- ${PROJECT_DIR}/filebeat/
- ${PROJECT_DIR}/logstash/pipeline/
- ${PROJECT_DIR}/elasticsearch/data/
- ${PROJECT_DIR}/logs/"
echo -e "${GREEN}目录创建成功!${NC}\n"
cd ${PROJECT_DIR}
# --- 2. 生成 docker-compose.yml ---
echo -e "${YELLOW}==> 生成 docker-compose.yml...${NC}"
cat <<EOF > docker-compose.yml
version: '3.8'
services:
elasticsearch:
image: elasticsearch:7.1.1
container_name: elasticsearch
environment:
- discovery.type=single-node
- ES_JAVA_OPTS=-Xms512m -Xmx512m
volumes:
- ./elasticsearch/data:/usr/share/elasticsearch/data
ports:
- "9200:9200"
networks:
- elk
logstash:
image: logstash:7.1.1
container_name: logstash
volumes:
- ./logstash/pipeline:/usr/share/logstash/pipeline
ports:
- "5044:5044"
networks:
- elk
depends_on:
- elasticsearch
kibana:
image: kibana:7.1.1
container_name: kibana
ports:
- "5601:5601"
networks:
- elk
depends_on:
- elasticsearch
filebeat:
image: elastic/filebeat:7.1.1
container_name: filebeat
user: root
volumes:
- ./filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
- ./logs:/var/log/app
networks:
- elk
depends_on:
- logstash
networks:
elk:
driver: bridge
EOF
echo -e "${GREEN}docker-compose.yml 生成成功!${NC}\n"
# --- 3. 生成 filebeat.yml 配置 ---
echo -e "${YELLOW}==> 生成 filebeat.yml...${NC}"
cat <<EOF > filebeat/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/app/*.log
output.logstash:
hosts: ["logstash:5044"]
EOF
echo -e "${GREEN}filebeat.yml 生成成功!${NC}\n"
# --- 4. 生成 logstash.conf 管道配置 ---
echo -e "${YELLOW}==> 生成 logstash.conf...${NC}"
cat <<EOF > logstash/pipeline/logstash.conf
input {
beats {
port => 5044
}
}
filter {
# 示例: 解析 JSON 格式日志
if [message] =~ /^{.*}$/ {
json {
source => "message"
}
}
}
output {
elasticsearch {
hosts => ["http://elasticsearch:9200"]
index => "app-logs-%{+YYYY.MM.dd}"
}
}
EOF
echo -e "${GREEN}logstash.conf 生成成功!${NC}\n"
# --- 5. 创建日志测试文件 ---
echo -e "${YELLOW}==> 创建测试日志文件...${NC}"
echo "{\"level\":\"INFO\",\"message\":\"ELK 部署成功!\"}" > logs/app.log
echo -e "${GREEN}日志文件创建成功!${NC}\n"
# --- 6. 启动容器 ---
echo -e "${YELLOW}==> 启动 ELK 容器...${NC}"
docker-compose up -d
sleep 10 # 等待服务初始化
# --- 7. 验证服务状态 ---
echo -e "${YELLOW}==> 服务状态检查:${NC}"
if docker ps | grep -q 'elasticsearch\|logstash\|kibana\|filebeat'; then
echo -e "${GREEN}所有容器运行正常!${NC}"
else
echo -e "${YELLOW}警告: 部分容器未启动,请检查日志!${NC}"
fi
# --- 8. 使用指南 ---
echo -e "\n${GREEN}======================= 部署完成! =======================${NC}"
echo -e "访问服务:"
echo -e " Kibana : http://${HOSTNAME:-localhost}:5601"
echo -e " Elasticsearch: http://${HOSTNAME:-localhost}:9200"
echo -e "\n测试日志收集:"
echo " echo '{\"level\":\"DEBUG\",\"message\":\"测试日志\"}' >> logs/app.log"
echo -e "\n管理命令:"
echo -e " 停止服务: ${YELLOW}docker-compose down${NC}"
echo -e " 查看日志: ${YELLOW}docker-compose logs -f${NC}"
1.权限设置:
chmod +x elk-setup.sh
2.执行脚本:
./elk-setup.sh # 自动创建目录/配置文件/启动容器
3.验证服务:
curl http://localhost:9200 # Elasticsearch 状态检查
4.查看日志:
docker-compose logs -f filebeat # 监控日志收集
(6) 其他服务
- Zabbix:使用官方镜像
zabbix/zabbix-appliance
- Rsync:Alpine基础镜像 + rsync配置
- Cron备份:定时任务同步
app_data
卷到备份目录
4. 服务集成
- 应用配置:
- Discuz连接MySQL主库 (
db_01:3306
) - 商城应用连接Redis哨兵 (
redis_sentinel:26379
)
- Discuz连接MySQL主库 (
- 日志收集:
# Filebeat配置示例 filebeat.inputs: - type: log paths: [/var/log/nginx/access.log] output.logstash: hosts: ["192.168.115.119:5044"]
- 监控配置:
- •
Zabbix添加所有容器IP监控
- •
设置MySQL/Redis/Nginx模板
- •
5. 验证步骤
- 访问Discuz:
curl -H "Host: discuz.linuxha.com" http://localhost
- 测试Redis高可用:
docker exec redis_01 redis-cli info replication
- 检查ELK日志:
curl http://192.168.115.119:9200/_cat/indices