From d0104a31198e542b81e540a3abe1ad34d2272873 Mon Sep 17 00:00:00 2001 From: Trygve Laugstøl Date: Sun, 28 Jul 2019 06:58:17 +0200 Subject: wip --- README.md | 9 ++ logging/README.md | 21 ++++ logging/counter.yaml | 9 ++ logging/elasticsearch-service.yaml | 16 +++ logging/elasticsearch-statefulset.yaml | 77 +++++++++++++ logging/fluentd.yaml | 98 ++++++++++++++++ logging/fluentd/fluent.conf | 36 ++++++ logging/fluentd/kubernetes.conf | 201 +++++++++++++++++++++++++++++++++ logging/fluentd/systemd.conf | 53 +++++++++ logging/kibana.yaml | 43 +++++++ logging/kube-logging.yaml | 4 + logging/local-path-storage.yaml | 107 ++++++++++++++++++ 12 files changed, 674 insertions(+) create mode 100644 logging/README.md create mode 100644 logging/counter.yaml create mode 100644 logging/elasticsearch-service.yaml create mode 100644 logging/elasticsearch-statefulset.yaml create mode 100644 logging/fluentd.yaml create mode 100644 logging/fluentd/fluent.conf create mode 100644 logging/fluentd/kubernetes.conf create mode 100644 logging/fluentd/systemd.conf create mode 100644 logging/kibana.yaml create mode 100644 logging/kube-logging.yaml create mode 100644 logging/local-path-storage.yaml diff --git a/README.md b/README.md index 792e6ed..43ee7fd 100644 --- a/README.md +++ b/README.md @@ -24,3 +24,12 @@ Getting bearer token kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') +# Local storage + +* https://github.com/rancher/local-path-provisioner + + kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml + +# Logging + + * https://www.digitalocean.com/community/tutorials/how-to-set-up-an-elasticsearch-fluentd-and-kibana-efk-logging-stack-on-kubernetes diff --git a/logging/README.md b/logging/README.md new file mode 100644 index 0000000..4cd2e6c --- /dev/null +++ b/logging/README.md @@ -0,0 +1,21 @@ +First, install *local-path-provisioner*. + +Then: + + kubectl create -f kube-logging.yaml + kubectl create -f elasticsearch-service.yaml + kubectl create -f elasticsearch-statefulset.yaml + kubectl create -f kibana.yaml + kubectl create -f fluentd.yaml + +To get some logging output: + + kubectl create -f counter.yaml + +To fix broken config: + + kubectl -n kube-system create configmap fluentd-config \ + --from-file fluentd/kubernetes.conf \ + --from-file fluentd/fluent.conf \ + --from-file fluentd/systemd.conf + diff --git a/logging/counter.yaml b/logging/counter.yaml new file mode 100644 index 0000000..e0d1085 --- /dev/null +++ b/logging/counter.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox + args: [/bin/sh, -c, 'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done'] diff --git a/logging/elasticsearch-service.yaml b/logging/elasticsearch-service.yaml new file mode 100644 index 0000000..b4c8bbd --- /dev/null +++ b/logging/elasticsearch-service.yaml @@ -0,0 +1,16 @@ +kind: Service +apiVersion: v1 +metadata: + name: elasticsearch + namespace: kube-logging + labels: + app: elasticsearch +spec: + selector: + app: elasticsearch + clusterIP: None + ports: + - port: 9200 + name: rest + - port: 9300 + name: inter-node diff --git a/logging/elasticsearch-statefulset.yaml b/logging/elasticsearch-statefulset.yaml new file mode 100644 index 0000000..543b06a --- /dev/null +++ b/logging/elasticsearch-statefulset.yaml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: es-cluster + namespace: kube-logging +spec: + serviceName: elasticsearch + replicas: 3 + selector: + matchLabels: + app: elasticsearch + template: + metadata: + labels: + app: elasticsearch + spec: + containers: + - name: elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0 + resources: + limits: + cpu: 1000m + requests: + cpu: 100m + ports: + - containerPort: 9200 + name: rest + protocol: TCP + - containerPort: 9300 + name: inter-node + protocol: TCP + volumeMounts: + - name: data + mountPath: /usr/share/elasticsearch/data + env: + - name: cluster.name + value: k8s-logs + - name: node.name + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: discovery.seed_hosts + value: "es-cluster-0.elasticsearch,es-cluster-1.elasticsearch,es-cluster-2.elasticsearch" + - name: cluster.initial_master_nodes + value: "es-cluster-0,es-cluster-1,es-cluster-2" + - name: ES_JAVA_OPTS + value: "-Xms512m -Xmx512m" + initContainers: + - name: fix-permissions + image: busybox + command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"] + securityContext: + privileged: true + volumeMounts: + - name: data + mountPath: /usr/share/elasticsearch/data + - name: increase-vm-max-map + image: busybox + command: ["sysctl", "-w", "vm.max_map_count=262144"] + securityContext: + privileged: true + - name: increase-fd-ulimit + image: busybox + command: ["sh", "-c", "ulimit -n 65536"] + securityContext: + privileged: true + volumeClaimTemplates: + - metadata: + name: data + labels: + app: elasticsearch + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: local-path + resources: + requests: + storage: 10Gi diff --git a/logging/fluentd.yaml b/logging/fluentd.yaml new file mode 100644 index 0000000..089c184 --- /dev/null +++ b/logging/fluentd.yaml @@ -0,0 +1,98 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: fluentd + namespace: kube-logging + labels: + app: fluentd +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: fluentd + labels: + app: fluentd +rules: +- apiGroups: + - "" + resources: + - pods + - namespaces + verbs: + - get + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: fluentd +roleRef: + kind: ClusterRole + name: fluentd + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: fluentd + namespace: kube-logging +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: fluentd + namespace: kube-logging + labels: + app: fluentd +spec: + selector: + matchLabels: + app: fluentd + template: + metadata: + labels: + app: fluentd + spec: + serviceAccount: fluentd + serviceAccountName: fluentd + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: fluentd + image: fluent/fluentd-kubernetes-daemonset:v1.4.2-debian-elasticsearch-1.1 + env: + - name: FLUENT_ELASTICSEARCH_HOST + value: "elasticsearch.kube-logging.svc.cluster.local" + - name: FLUENT_ELASTICSEARCH_PORT + value: "9200" + - name: FLUENT_ELASTICSEARCH_SCHEME + value: "http" + - name: FLUENTD_SYSTEMD_CONF + value: disable + - name: FLUENT_ELASTICSEARCH_SED_DISABLE + value: yeah + resources: + limits: + memory: 512Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: fluentd-config + mountPath: /fluentd/etc/ + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + terminationGracePeriodSeconds: 30 + volumes: + - name: fluentd-config + configMap: + name: fluentd-config + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers diff --git a/logging/fluentd/fluent.conf b/logging/fluentd/fluent.conf new file mode 100644 index 0000000..f10d16c --- /dev/null +++ b/logging/fluentd/fluent.conf @@ -0,0 +1,36 @@ +# FIXED + +@include "#{ENV['FLUENTD_SYSTEMD_CONF'] || 'systemd'}.conf" +@include "#{ENV['FLUENTD_PROMETHEUS_CONF'] || 'prometheus'}.conf" +@include kubernetes.conf +@include conf.d/*.conf + + + @type elasticsearch + @id out_es + @log_level info + include_tag_key true + host "#{ENV['FLUENT_ELASTICSEARCH_HOST']}" + port "#{ENV['FLUENT_ELASTICSEARCH_PORT']}" + path "#{ENV['FLUENT_ELASTICSEARCH_PATH']}" + scheme "#{ENV['FLUENT_ELASTICSEARCH_SCHEME'] || 'http'}" + ssl_verify "#{ENV['FLUENT_ELASTICSEARCH_SSL_VERIFY'] || 'true'}" + ssl_version "#{ENV['FLUENT_ELASTICSEARCH_SSL_VERSION'] || 'TLSv1'}" + reload_connections "#{ENV['FLUENT_ELASTICSEARCH_RELOAD_CONNECTIONS'] || 'false'}" + reconnect_on_error "#{ENV['FLUENT_ELASTICSEARCH_RECONNECT_ON_ERROR'] || 'true'}" + reload_on_failure "#{ENV['FLUENT_ELASTICSEARCH_RELOAD_ON_FAILURE'] || 'true'}" + log_es_400_reason "#{ENV['FLUENT_ELASTICSEARCH_LOG_ES_400_REASON'] || 'false'}" + logstash_prefix "#{ENV['FLUENT_ELASTICSEARCH_LOGSTASH_PREFIX'] || 'logstash'}" + logstash_format "#{ENV['FLUENT_ELASTICSEARCH_LOGSTASH_FORMAT'] || 'true'}" + index_name "#{ENV['FLUENT_ELASTICSEARCH_LOGSTASH_INDEX_NAME'] || 'logstash'}" + type_name "#{ENV['FLUENT_ELASTICSEARCH_LOGSTASH_TYPE_NAME'] || 'fluentd'}" + + flush_thread_count "#{ENV['FLUENT_ELASTICSEARCH_BUFFER_FLUSH_THREAD_COUNT'] || '8'}" + flush_interval "#{ENV['FLUENT_ELASTICSEARCH_BUFFER_FLUSH_INTERVAL'] || '5s'}" + chunk_limit_size "#{ENV['FLUENT_ELASTICSEARCH_BUFFER_CHUNK_LIMIT_SIZE'] || '2M'}" + queue_limit_length "#{ENV['FLUENT_ELASTICSEARCH_BUFFER_QUEUE_LIMIT_LENGTH'] || '32'}" + retry_max_interval "#{ENV['FLUENT_ELASTICSEARCH_BUFFER_RETRY_MAX_INTERVAL'] || '30'}" + retry_forever true + + + diff --git a/logging/fluentd/kubernetes.conf b/logging/fluentd/kubernetes.conf new file mode 100644 index 0000000..78465d3 --- /dev/null +++ b/logging/fluentd/kubernetes.conf @@ -0,0 +1,201 @@ +# FIXED + + + @type null + + + + @type tail + @id in_tail_container_logs + path /var/log/containers/*.log + exclude_path ["/var/log/containers/fluentd*"] + pos_file /var/log/fluentd-containers.log.pos + tag kubernetes.* + read_from_head true + + @type multi_format + + format json + time_format %Y-%m-%dT%H:%M:%S.%NZ + + + format regexp + time_format %Y-%m-%dT%H:%M:%S.%N%:z + expression /^(? + + + + + @type tail + @id in_tail_minion + path /var/log/salt/minion + pos_file /var/log/fluentd-salt.pos + tag salt + + @type regexp + expression /^(? + + + + @type tail + @id in_tail_startupscript + path /var/log/startupscript.log + pos_file /var/log/fluentd-startupscript.log.pos + tag startupscript + + @type syslog + + + + + @type tail + @id in_tail_docker + path /var/log/docker.log + pos_file /var/log/fluentd-docker.log.pos + tag docker + + @type regexp + expression /^time="(? + + + + @type tail + @id in_tail_etcd + path /var/log/etcd.log + pos_file /var/log/fluentd-etcd.log.pos + tag etcd + + @type none + + + + + @type tail + @id in_tail_kubelet + multiline_flush_interval 5s + path /var/log/kubelet.log + pos_file /var/log/fluentd-kubelet.log.pos + tag kubelet + + @type kubernetes + + + + + @type tail + @id in_tail_kube_proxy + multiline_flush_interval 5s + path /var/log/kube-proxy.log + pos_file /var/log/fluentd-kube-proxy.log.pos + tag kube-proxy + + @type kubernetes + + + + + @type tail + @id in_tail_kube_apiserver + multiline_flush_interval 5s + path /var/log/kube-apiserver.log + pos_file /var/log/fluentd-kube-apiserver.log.pos + tag kube-apiserver + + @type kubernetes + + + + + @type tail + @id in_tail_kube_controller_manager + multiline_flush_interval 5s + path /var/log/kube-controller-manager.log + pos_file /var/log/fluentd-kube-controller-manager.log.pos + tag kube-controller-manager + + @type kubernetes + + + + + @type tail + @id in_tail_kube_scheduler + multiline_flush_interval 5s + path /var/log/kube-scheduler.log + pos_file /var/log/fluentd-kube-scheduler.log.pos + tag kube-scheduler + + @type kubernetes + + + + + @type tail + @id in_tail_rescheduler + multiline_flush_interval 5s + path /var/log/rescheduler.log + pos_file /var/log/fluentd-rescheduler.log.pos + tag rescheduler + + @type kubernetes + + + + + @type tail + @id in_tail_glbc + multiline_flush_interval 5s + path /var/log/glbc.log + pos_file /var/log/fluentd-glbc.log.pos + tag glbc + + @type kubernetes + + + + + @type tail + @id in_tail_cluster_autoscaler + multiline_flush_interval 5s + path /var/log/cluster-autoscaler.log + pos_file /var/log/fluentd-cluster-autoscaler.log.pos + tag cluster-autoscaler + + @type kubernetes + + + +# Example: +# 2017-02-09T00:15:57.992775796Z AUDIT: id="90c73c7c-97d6-4b65-9461-f94606ff825f" ip="104.132.1.72" method="GET" user="kubecfg" as="" asgroups="" namespace="default" uri="/api/v1/namespaces/default/pods" +# 2017-02-09T00:15:57.993528822Z AUDIT: id="90c73c7c-97d6-4b65-9461-f94606ff825f" response="200" + + @type tail + @id in_tail_kube_apiserver_audit + multiline_flush_interval 5s + path /var/log/kubernetes/kube-apiserver-audit.log + pos_file /var/log/kube-apiserver-audit.log.pos + tag kube-apiserver-audit + + @type multiline + format_firstline /^\S+\s+AUDIT:/ + # Fields must be explicitly captured by name to be parsed into the record. + # Fields may not always be present, and order may change, so this just looks + # for a list of key="\"quoted\" value" pairs separated by spaces. + # Unknown fields are ignored. + # Note: We can't separate query/response lines as format1/format2 because + # they don't always come one after the other for a given query. + format1 /^(? + + + + @type kubernetes_metadata + @id filter_kube_metadata + + diff --git a/logging/fluentd/systemd.conf b/logging/fluentd/systemd.conf new file mode 100644 index 0000000..0203734 --- /dev/null +++ b/logging/fluentd/systemd.conf @@ -0,0 +1,53 @@ +# AUTOMATICALLY GENERATED +# DO NOT EDIT THIS FILE DIRECTLY, USE /templates/conf/systemd.conf.erb + +# Logs from systemd-journal for interesting services. + + @type systemd + @id in_systemd_kubelet + matches [{ "_SYSTEMD_UNIT": "kubelet.service" }] + + @type local + persistent true + path /var/log/fluentd-journald-kubelet-cursor.json + + + fields_strip_underscores true + + read_from_head true + tag kubelet + + +# Logs from docker-systemd + + @type systemd + @id in_systemd_docker + matches [{ "_SYSTEMD_UNIT": "docker.service" }] + + @type local + persistent true + path /var/log/fluentd-journald-docker-cursor.json + + + fields_strip_underscores true + + read_from_head true + tag docker.systemd + + +# Logs from systemd-journal for interesting services. + + @type systemd + @id in_systemd_bootkube + matches [{ "_SYSTEMD_UNIT": "bootkube.service" }] + + @type local + persistent true + path /var/log/fluentd-journald-bootkube-cursor.json + + + fields_strip_underscores true + + read_from_head true + tag bootkube + diff --git a/logging/kibana.yaml b/logging/kibana.yaml new file mode 100644 index 0000000..3fe9839 --- /dev/null +++ b/logging/kibana.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Service +metadata: + name: kibana + namespace: kube-logging + labels: + app: kibana +spec: + ports: + - port: 5601 + selector: + app: kibana +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kibana + namespace: kube-logging + labels: + app: kibana +spec: + replicas: 1 + selector: + matchLabels: + app: kibana + template: + metadata: + labels: + app: kibana + spec: + containers: + - name: kibana + image: docker.elastic.co/kibana/kibana:7.2.0 + resources: + limits: + cpu: 1000m + requests: + cpu: 100m + env: + - name: ELASTICSEARCH_URL + value: http://elasticsearch:9200 + ports: + - containerPort: 5601 diff --git a/logging/kube-logging.yaml b/logging/kube-logging.yaml new file mode 100644 index 0000000..805477b --- /dev/null +++ b/logging/kube-logging.yaml @@ -0,0 +1,4 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: kube-logging diff --git a/logging/local-path-storage.yaml b/logging/local-path-storage.yaml new file mode 100644 index 0000000..7d717bd --- /dev/null +++ b/logging/local-path-storage.yaml @@ -0,0 +1,107 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: local-path-storage +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-path-provisioner-service-account + namespace: local-path-storage +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: local-path-provisioner-role + namespace: local-path-storage +rules: +- apiGroups: [""] + resources: ["nodes", "persistentvolumeclaims"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["endpoints", "persistentvolumes", "pods"] + verbs: ["*"] +- apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: local-path-provisioner-bind + namespace: local-path-storage +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: local-path-provisioner-role +subjects: +- kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: local-path-storage +--- +apiVersion: apps/v1beta2 +kind: Deployment +metadata: + name: local-path-provisioner + namespace: local-path-storage +spec: + replicas: 1 + selector: + matchLabels: + app: local-path-provisioner + template: + metadata: + labels: + app: local-path-provisioner + spec: + serviceAccountName: local-path-provisioner-service-account + containers: + - name: local-path-provisioner + image: rancher/local-path-provisioner:v0.0.9 + imagePullPolicy: IfNotPresent + command: + - local-path-provisioner + - --debug + - start + - --config + - /etc/config/config.json + volumeMounts: + - name: config-volume + mountPath: /etc/config/ + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: config-volume + configMap: + name: local-path-config +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-path +provisioner: rancher.io/local-path +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Delete +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: local-path-config + namespace: local-path-storage +data: + config.json: |- + { + "nodePathMap":[ + { + "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", + "paths":["/opt/local-path-provisioner"] + } + ] + } + -- cgit v1.2.3