目录
环境准备
下载efk软件包
[root@k8s-master ~]# mkdir efk
[root@k8s-master ~]# cd efk/
[root@k8s-master efk]# ls
[root@k8s-master efk]# rz
rz waiting to receive.**[root@k8s-master efk]# ls
efk.zip
[root@k8s-master efk]# unzip efk.zip
Archive: efk.zip
creating: efk/
inflating: efk/class.yaml
inflating: efk/deployment.yaml
inflating: efk/elasticsearch-statefulset.yaml
inflating: efk/elasticsearch_svc.yaml
inflating: efk/fluentd.yaml
inflating: efk/kibana.yaml
extracting: efk/kube-logging.yaml
inflating: efk/pod.yaml
inflating: efk/rbac.yaml
inflating: efk/serviceaccount.yaml
下载 yum -y install socat
下载 yum -y install nfs-utils
下载 nfs
设置nfs开机自启
[root@k8s-master data]# systemctl enable --now nfs
创建共享存储目录
[root@k8s-master ~]# cd /data/
[root@k8s-master data]# ls
discuz mysql redis tomcat
[root@k8s-master data]# mkdir efk
[root@k8s-master data]# ls
discuz efk mysql redis tomcat
配置共享目录文件
[root@k8s-master data]# vim /etc/exports
/data/efk 192.168.158.0/24(rw,sync,no_root_squash)
加载nfs
使共享目录生效
查看
[root@k8s-master data]# showmount -e
Export list for k8s-master:
/data/efk 192.168.158.0/24
node节点验证
共享目录配置成功
[root@k8s-node1 ~]# showmount -e 192.168.158.33
Export list for 192.168.158.33:
[root@k8s-node2 ~]# showmount -e 192.168.158.33
Export list for 192.168.158.33:
进入efk配置文件目录
修改deployment.yaml文件
[root@k8s-master efk]# vim deployment.yaml
修改为master主节点ip
修改为nfs共享存储目录
修改 kibana
添加 type:Nodeport
[root@k8s-master efk]# vim kibana.yaml
因为只有两个node 节点,这里pod副本数改为2就可以
部署EFK
创建nfs存储访问
kubectl create -f serviceaccount.yaml
kubectl create -f rbac.yaml
kubectl create -f deployment.yaml
kubectl create -f class.yaml
构建es集群
kubectl apply -f kube-logging.yaml
kubectl create -f elasticsearch-statefulset.yaml
kubectl create -f elasticsearch_svc.yaml
部署kibana
往解压出来的 kibana.yaml文件里添加
volumeMounts:
- name: kibana-config
mountPath: /usr/share/kibana/config/
volumes:
- name: kibana-config
configMap:
name: kibana-configmap
修改好后如下
[root@k8s-master efk]# vim kibana.yaml
[root@k8s-master efk]# cat kibana.yaml
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: kube-logging
labels:
app: kibana
spec:
type: NodePort
ports:
- port: 5601
selector:
app: kibana
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: kube-logging
labels:
app: kibana
spec:
replicas: 1
selector:
matchLabels:
app: kibana
template:
metadata:
labels:
app: kibana
spec:
containers:
- name: kibana
image: docker.elastic.co/kibana/kibana:7.2.0
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 1000m
requests:
cpu: 100m
env:
- name: ELASTICSEARCH_URL
value: http://elasticsearch.kube-logging.svc.cluster.local:9200
ports:
- containerPort: 5601
volumeMounts:
- name: kibana-config
mountPath: /usr/share/kibana/config/
volumes:
- name: kibana-config
configMap:
name: kibana-configmap
汉化kibana
创建 kibana.yml 文件
vim kibana.yml
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"
命令行创建configmap
kubectl -n kube-logging create configmap kibana-configmap --from-file=kibana.yml=./kibana.yml
提交 kibana资源清单
kubectl apply -f kibana.yaml
查看
[root@k8s-master efk]# kubectl get pods -n kube-logging
NAME READY STATUS RESTARTS AGE
es-cluster-0 1/1 Running 0 45m
es-cluster-1 1/1 Running 0 80m
kibana-7645484fc7-58bfd 1/1 Running 2 (44m ago) 51m
[root@k8s-master efk]# kubectl get svc -n kube-logging
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
elasticsearch ClusterIP None <none> 9200/TCP,9300/TCP 82m
kibana NodePort 10.100.200.93 <none> 5601:32107/TCP 77m
master节点执行
当es的po和kibana安装好之后在master节点执行
kubectl port-forward --address 192.168.158.33 es-cluster-0 9200:9200 --namespace=kube-logging
访问网页
192.168.158.33:9200
查看暴露端口号
[root@k8s-master ~]# kubectl -n kube-logging get svc -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
elasticsearch ClusterIP None <none> 9200/TCP,9300/TCP 102m app=elasticsearch
kibana NodePort 10.100.200.93 <none> 5601:32107/TCP 97m app=kibana
网页访问
192.168.158.33:32107
安装fluentd组件
查看污点并复制污点
修改配置文件
vim fluentd.yaml
修改key的污点
在tolerations 字段中加上:operator:Exists
kubectl apply -f fluentd.yaml
网页访问
192.168.158.33:32107