diff options
| author | lixikang <[email protected]> | 2020-05-25 16:48:37 +0800 |
|---|---|---|
| committer | lixikang <[email protected]> | 2020-05-25 16:48:37 +0800 |
| commit | fc431fa3bddfb8ba1645accce4e7ccb74161485c (patch) | |
| tree | 18901735e8a6bf5909c5a83ab0c02976b2831de8 /zk-kafka | |
| parent | 0d1396f415a0c0b53e0e32d0672ba61383f5f96c (diff) | |
k8s初始版本storm
Diffstat (limited to 'zk-kafka')
| -rwxr-xr-x | zk-kafka/check.sh | 5 | ||||
| -rwxr-xr-x | zk-kafka/host.sh | 13 | ||||
| -rw-r--r-- | zk-kafka/kafka-available/151kafka.yaml | 98 | ||||
| -rw-r--r-- | zk-kafka/kafka-svc.yaml | 13 | ||||
| -rw-r--r-- | zk-kafka/kafka.yaml | 85 | ||||
| -rw-r--r-- | zk-kafka/pv.yaml | 89 | ||||
| -rw-r--r-- | zk-kafka/zk.yaml | 157 |
7 files changed, 460 insertions, 0 deletions
diff --git a/zk-kafka/check.sh b/zk-kafka/check.sh new file mode 100755 index 0000000..5dd58f1 --- /dev/null +++ b/zk-kafka/check.sh @@ -0,0 +1,5 @@ +#!/bin/bash +for i in 0 1 2 ;do +kubectl exec zk-$i -c kubernetes-zookeeper zkServer.sh status +done; + diff --git a/zk-kafka/host.sh b/zk-kafka/host.sh new file mode 100755 index 0000000..ba59e18 --- /dev/null +++ b/zk-kafka/host.sh @@ -0,0 +1,13 @@ +#!/bin/bash +HOSTNAME=`hostname -s` +if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + PORT=$((ORD + 9092)) + #12.345.67.8 是 LB 的 ip + export KAFKA_CFG_ADVERTISED_LISTENERS="PLAINTEXT://192.168.40.127:$PORT" +else + echo "Failed to get index from hostname $HOST" + exit 1 +fi + +echo $KAFKA_CFG_ADVERTISED_LISTENERS diff --git a/zk-kafka/kafka-available/151kafka.yaml b/zk-kafka/kafka-available/151kafka.yaml new file mode 100644 index 0000000..ae5649e --- /dev/null +++ b/zk-kafka/kafka-available/151kafka.yaml @@ -0,0 +1,98 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: kafka-svc +spec: + ports: + - port: 9093 + targetPort: 9093 + name: server + protocol: TCP + nodePort: 9093 + type: NodePort +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka +spec: + serviceName: kafka-svc + replicas: 3 + selector: + matchLabels: + app: kafka + template: + metadata: + labels: + app: kafka + spec: + hostAliases: + - ip: "192.168.40.127" + hostnames: + - "bigdata-127" + - ip: "192.168.40.151" + hostnames: + - "bigdata-151" + - ip: "192.168.40.152" + hostnames: + - "bigdata-152" + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - kafka + topologyKey: "kubernetes.io/hostname" + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zk + topologyKey: "kubernetes.io/hostname" + terminationGracePeriodSeconds: 300 + containers: + - name: k8skafka + securityContext: + runAsUser: 0 + imagePullPolicy: Always + image: 192.168.40.153:9080/k8s/kafka:test3 + resources: + requests: + memory: "1Gi" + cpu: 500m + ports: + - containerPort: 9093 + hostPort: 9093 + env: + - name: KA_PORT + value: "9093" + - name: HOST_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: ZK_DIR + value: "zk-0.zk-hs.default.svc.cluster.local:2182/kafka-test" + volumeMounts: + - name: datadir + mountPath: /opt/kafka-logs + securityContext: + runAsUser: 1000 + fsGroup: 1000 + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: [ "ReadWriteMany" ] + storageClassName: nfs + resources: + requests: + storage: 5Gi diff --git a/zk-kafka/kafka-svc.yaml b/zk-kafka/kafka-svc.yaml new file mode 100644 index 0000000..383665c --- /dev/null +++ b/zk-kafka/kafka-svc.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: kafka-svc + labels: + app: kafka +spec: + ports: + - port: 9093 + name: server + clusterIP: None + selector: + app: kafka diff --git a/zk-kafka/kafka.yaml b/zk-kafka/kafka.yaml new file mode 100644 index 0000000..5e145dc --- /dev/null +++ b/zk-kafka/kafka.yaml @@ -0,0 +1,85 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka +spec: + serviceName: kafka-svc + replicas: 3 + selector: + matchLabels: + app: kafka + template: + metadata: + labels: + app: kafka + spec: + hostAliases: + - ip: "192.168.40.127" + hostnames: + - "bigdata-127" + - ip: "192.168.40.151" + hostnames: + - "bigdata-151" + - ip: "192.168.40.152" + hostnames: + - "bigdata-152" + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - kafka + topologyKey: "kubernetes.io/hostname" + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zk + topologyKey: "kubernetes.io/hostname" + terminationGracePeriodSeconds: 300 + containers: + - name: k8skafka + securityContext: + runAsUser: 0 + imagePullPolicy: Always + image: 192.168.40.153:9080/k8s/kafka:test3 + resources: + requests: + memory: "10Gi" + cpu: 500m + ports: + - containerPort: 9093 + hostPort: 9093 + env: + - name: KA_PORT + value: "9093" + - name: HOST_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: ZK_DIR + value: "zk-0.zk-hs.default.svc.cluster.local:2182/kafka-test" + volumeMounts: + - name: datadir + mountPath: /opt/kafka-logs + securityContext: + runAsUser: 1000 + fsGroup: 1000 + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: [ "ReadWriteMany" ] + storageClassName: nfs + resources: + requests: + storage: 5Gi diff --git a/zk-kafka/pv.yaml b/zk-kafka/pv.yaml new file mode 100644 index 0000000..87e97da --- /dev/null +++ b/zk-kafka/pv.yaml @@ -0,0 +1,89 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfspv1 +spec: + capacity: + storage: 10Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs + nfs: + path: /nfs/storage1 + server: 192.168.40.127 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfspv2 +spec: + capacity: + storage: 10Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs + nfs: + path: /nfs/storage2 + server: 192.168.40.127 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfspv3 +spec: + capacity: + storage: 10Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs + nfs: + path: /nfs/storage3 + server: 192.168.40.127 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfspv4 +spec: + capacity: + storage: 10Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs + nfs: + path: /nfs/storage4 + server: 192.168.40.127 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfspv5 +spec: + capacity: + storage: 10Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs + nfs: + path: /nfs/storage5 + server: 192.168.40.127 +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfspv6 +spec: + capacity: + storage: 10Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: nfs + nfs: + path: /nfs/storage6 + server: 192.168.40.127 diff --git a/zk-kafka/zk.yaml b/zk-kafka/zk.yaml new file mode 100644 index 0000000..d192184 --- /dev/null +++ b/zk-kafka/zk.yaml @@ -0,0 +1,157 @@ +apiVersion: v1 +kind: Service +metadata: + name: zk-hs + labels: + app: zk +spec: + ports: + - port: 2888 + name: server + - port: 3888 + name: leader-election + clusterIP: None + selector: + app: zk +--- +apiVersion: v1 +kind: Service +metadata: + name: zk-cs + labels: + app: zk +spec: + type: NodePort + ports: + - port: 2182 + name: client + targetPort: 2182 + nodePort: 2182 + selector: + app: zk +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: zk-pdb +spec: + selector: + matchLabels: + app: zk + maxUnavailable: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: zk +spec: + selector: + matchLabels: + app: zk + serviceName: zk-hs + replicas: 3 #创建三个pod + updateStrategy: + type: RollingUpdate + podManagementPolicy: Parallel + template: + metadata: + labels: + app: zk + spec: + nodeSelector: #进行label匹配,调度pod到目标节点 + travis.io/schedule-only: "kafka" + tolerations: + - key: "travis.io/schedule-only" + operator: "Equal" + value: "kafka" + effect: "NoSchedule" + - key: "travis.io/schedule-only" + operator: "Equal" + value: "kafka" + effect: "NoExecute" + tolerationSeconds: 3600 + - key: "travis.io/schedule-only" + operator: "Equal" + value: "kafka" + effect: "PreferNoSchedule" + affinity: #配置每个机器只能运行一个pod + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - zk + topologyKey: "kubernetes.io/hostname" + imagePullSecrets: # 指定自己的私有镜像秘钥 + - name: registry-key + containers: + - name: kubernetes-zookeeper + securityContext: + runAsUser: 0 + imagePullPolicy: Always + image: 192.168.40.153:9080/k8s/k8s.gcr.io/kubernetes-zookeeper:1.0-3.4.10 + resources: + requests: + memory: "20Mi" + cpu: "0.1" + ports: + - containerPort: 2182 + name: client + - containerPort: 2888 + name: server + - containerPort: 3888 + name: leader-election + command: + - sh + - -c + - "start-zookeeper \ + --servers=3 \ + --data_dir=/var/lib/zookeeper/data \ + --data_log_dir=/var/lib/zookeeper/data/log \ + --conf_dir=/opt/zookeeper/conf \ + --client_port=2182 \ + --election_port=3888 \ + --server_port=2888 \ + --tick_time=2000 \ + --init_limit=10 \ + --sync_limit=5 \ + --heap=512M \ + --max_client_cnxns=60 \ + --snap_retain_count=3 \ + --purge_interval=12 \ + --max_session_timeout=40000 \ + --min_session_timeout=4000 \ + --log_level=INFO" + readinessProbe: # pod 健康监测 + exec: + command: + - sh + - -c + - "zookeeper-ready 2182" + initialDelaySeconds: 10 + timeoutSeconds: 5 + livenessProbe: + exec: + command: + - sh + - -c + - "zookeeper-ready 2182" + initialDelaySeconds: 10 + timeoutSeconds: 5 + volumeMounts: + - name: datadir + mountPath: /var/lib/zookeeper + securityContext: + runAsUser: 1000 + fsGroup: 1000 + volumeClaimTemplates: #nfs 映射模版配置 + - metadata: + name: datadir + spec: + accessModes: [ "ReadWriteMany" ] + storageClassName: nfs + resources: + requests: + storage: 10Gi |
