- sidecar: 边车模式
特点:
边车模式的解决思路是在原有的容器的POD注入一个新的容器,这个新容器提供附加功能。缺点: 当K8S集群的Pod数量增多时,意味着资源消耗,因为每个Pod都会运行一个新的附加容器。
- daemonset: 守护进程
特点:
所有的worker节点有且仅有一个Pod提供附加功能。相比于边车模式更加节省资源。缺点:
需要学习K8S的RBAC认证体系。 - custom: 开发自实现
特点:
软件本身就支持日志采集,数据分析,流量治理,负载均衡等功能,运维人员只需要提供服务端的IP地址。缺点:
开发可能更关注业务的实现,对于运维的功能,可能不会考虑周全。
- daemonset: 守护进程
1 ELFK+k8s项目实战(daemonset)
1.1 将kafka集群映射为K8S集群内部的svc
cat 01-ep-svc.yaml
apiVersion: v1
kind: Endpoints
metadata:
name: kafka-k8s
subsets:
- addresses:
- ip: 10.168.10.91
- ip: 10.168.10.92
- ip: 10.168.10.93
ports:
- port: 9092
---
apiVersion: v1
kind: Service
metadata:
name: kafka-k8s
spec:
type: ClusterIP
ports:
- port: 9092
1.2 创建服务账号并授权
kubectl apply -f 02-sa-rbac.yaml
serviceaccount/filebeat created
clusterrolebinding.rbac.authorization.k8s.io/filebeat created
clusterrole.rbac.authorization.k8s.io/filebeat created
[root@master-231 /oldboyedu/manifests/project/02-es-k8s]# cat 02-sa-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: default
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""]
resources:
- namespaces
- pods
- nodes
verbs:
- get
- watch
- list
1.3 添加Filebeat的配置文件
[root@master-231 /oldboyedu/manifests/project/02-es-k8s]# cat 03-cm-filebeat-k8s.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
data:
filebeat.yml: |-
filebeat.config:
inputs:
path: ${path.config}/inputs.d/*.yml
reload.enabled: true
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: true
output.kafka:
hosts:
- kafka-k8s:9092
topic: kafka-k8s
# output.elasticsearch:
# hosts: ['https://oldboyedu-es7:9092']
# # hosts: ['oldboyedu-es7.default.svc.oldboyedu.com:9200']
# index: 'kafka-k8s-%{+yyyy.MM.dd}'
# 跳过证书校验,有效值为: full(default),strict,certificate,none
# 参考链接:
# https://www.elastic.co/guide/en/beats/filebeat/7.17/configuration-ssl.html#client-verification-mode
#ssl.verification_mode: none
#api_key: "IgF_RJcBB2nKmCHczQZS:fZQHRB8vR9SoFRNKj8ynfw"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-inputs
data:
kubernetes.yml: |-
- type: docker
containers.ids:
- "*"
processors:
- add_kubernetes_metadata:
in_cluster: true
1.4 Filebeat采集日志到kafka集群
kubectl apply -f 04-ds-filebeat.yaml
daemonset.apps/filebeat created
[root@master-231 /oldboyedu/manifests/project/02-es-k8s]# cat 04-ds-filebeat.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
operator: Exists
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
containers:
- name: filebeat
# image: docker.elastic.co/beats/filebeat:7.17.28
image: harbor.cmy.cn/efk/fibeat-cmy:v7.17.25
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: inputs
mountPath: /usr/share/filebeat/inputs.d
readOnly: true
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: inputs
configMap:
defaultMode: 0600
name: filebeat-inputs
验证
比如使用KQL语句过滤数据: kubernetes.namespace:"default" and kubernetes.pod.name : "deploy-xiuxian-568cf47956-dcl29"
1.5 启动logstach把日志打入es
cat > /etc/logstash/conf.d/k8s-to-es.conf <<EOF
input {
kafka {
bootstrap_servers => "10.168.10.91:9092,10.168.10.92:9092,10.168.10.93:9092"
topics => ["kafka-k8s"]
group_id => "kafka-001"
auto_offset_reset => "earliest"
}
}
filter {
json {
source => "message"
}
mutate {
remove_field => [ "agent","@version","ecs","input","log","event","host","fileset","service" ]
}
}
output {
# stdout {
# codec => rubydebug
# }
elasticsearch {
hosts => ["10.168.10.91:9200","10.168.10.92:9200","10.168.10.93:9200"]
index => "k8s-filebeat-kafka-logstach-to-es"
api_key => "IgF_RJcBB2nKmCHczQZS:fZQHRB8vR9SoFRNKj8ynfw"
ssl => true
ssl_certificate_verification => false
}
}
EOF
logstash -rf /etc/logstash/conf.d/19-kafka_docker-to-es.conf