KRaft-kafka集群可用脚本

本地使用docker-compose搭建,用于开发环境测试

version: '3.8'
services:
  kafka1:
    image: docker.m.daocloud.io/apache/kafka:3.9.1
    container_name: kafka1
    ports:
      - "9092:9092"
      - "9093:9093"
    volumes:
      - /home/shell81docker/kafka/data_KRaft/kafka1:/data/kafka
      - /home/shell81/docker/kafka/data_KRaft/shared:/data/id
    networks:
      kraft_my_network: # 引用自定义网络
        ipv4_address: 172.25.0.21
    command: >
      sh -c "
      if [ ! -f /data/kafka/log/meta.properties ]; then
        echo '初始化存储目录...'
        mkdir -p /data/kafka/log
        echo '初始化配置文件'
        sed -i 's/^log.dirs=.*//g' /opt/kafka/config/kraft/server.properties
        sed -i 's/^node.id=.*/node.id=1/' /opt/kafka/config/kraft/server.properties
        sed -i 's/localhost/172.25.0.21/g' /opt/kafka/config/kraft/server.properties
        echo 'controller.quorum.voters=1@172.25.0.21:9093,2@172.25.0.22:9093,3@172.25.0.23:9093' >> /opt/kafka/config/kraft/server.properties
        echo 'log.dirs=/data/kafka/log' >> /opt/kafka/config/kraft/server.properties
        sleep 1
        echo '配置文件初始化完毕...'
        export KAFKA_CLUSTER_ID=$$(/opt/kafka/bin/kafka-storage.sh random-uuid)
        echo '集群ID: '$$KAFKA_CLUSTER_ID
        echo $$KAFKA_CLUSTER_ID > /data/id/cluster.id
        /opt/kafka/bin/kafka-storage.sh format --cluster-id $$KAFKA_CLUSTER_ID --config /opt/kafka/config/kraft/server.properties
        echo 'kafka1:格式化log存储目录'
        fi
      
        echo 'kafka1:启动Kafka服务...'
      /opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/kraft/server.properties
      "

  kafka2:
    image: docker.m.daocloud.io/apache/kafka:3.9.1
    container_name: kafka2
    ports:
      - "9094:9092"
      - "9095:9093"
    volumes:
      - /home/shell81/docker/kafka/data_KRaft/kafka2:/data/kafka
      - /home/shell81/docker/kafka/data_KRaft/shared:/data/id
    networks:
      kraft_my_network: # 引用自定义网络
        ipv4_address: 172.25.0.22
    command: >
      sh -c "
      if [ ! -f /data/kafka/log/meta.properties ]; then
        echo '初始化存储目录...'
        mkdir -p /data/kafka/log
        echo '初始化配置文件'
        sed -i 's/^log.dirs=.*//g' /opt/kafka/config/kraft/server.properties
        sed -i 's/^node.id=.*/node.id=2/' /opt/kafka/config/kraft/server.properties
        sed -i 's/localhost/172.25.0.22/g' /opt/kafka/config/kraft/server.properties
        echo 'controller.quorum.voters=1@172.25.0.21:9093,2@172.25.0.22:9093,3@172.25.0.23:9093' >> /opt/kafka/config/kraft/server.properties
        echo 'log.dirs=/data/kafka/log' >> /opt/kafka/config/kraft/server.properties
        echo '配置文件初始化完毕,等待kafka1完成初始化...'
        cat /opt/kafka/config/kraft/server.properties | xargs -I {} echo {}
        sleep 20
        export KAFKA_CLUSTER_ID=$$(cat /data/id/cluster.id)
        echo 'kafka2-cluster-id: '$$KAFKA_CLUSTER_ID
        /opt/kafka/bin/kafka-storage.sh format --cluster-id $$KAFKA_CLUSTER_ID --config /opt/kafka/config/kraft/server.properties
        echo 'kafka2: 格式化log存储目录'
        fi
      
        echo 'kafka2: 启动Kafka服务...'
      /opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/kraft/server.properties
      "

  kafka3:
    image: docker.m.daocloud.io/apache/kafka:3.9.1
    container_name: kafka3
    ports:
      - "9096:9092"
      - "9097:9093"
    volumes:
      - /home/shell81/docker/kafka/data_KRaft/kafka3:/data/kafka
      - /home/shell81/docker/kafka/data_KRaft/shared:/data/id
    networks:
      kraft_my_network: # 引用自定义网络
        ipv4_address: 172.25.0.23
    command: >
      sh -c "
      if [ ! -f /data/kafka/log/meta.properties ]; then
        echo '初始化存储目录...'
        mkdir -p /data/kafka/log
        echo '初始化配置文件'
        sed -i 's/^log.dirs=.*//g' /opt/kafka/config/kraft/server.properties
        sed -i 's/^node.id=.*/node.id=3/' /opt/kafka/config/kraft/server.properties
        sed -i 's/localhost/172.25.0.23/g' /opt/kafka/config/kraft/server.properties
        echo 'controller.quorum.voters=1@172.25.0.21:9093,2@172.25.0.22:9093,3@172.25.0.23:9093' >> /opt/kafka/config/kraft/server.properties
        echo 'log.dirs=/data/kafka/log' >> /opt/kafka/config/kraft/server.properties
        echo '配置文件初始化完毕,等待kafka1完成初始化...'
        sleep 25
        export KAFKA_CLUSTER_ID=$$(cat /data/id/cluster.id)
        echo 'kafka3-cluster-id: '$$KAFKA_CLUSTER_ID
        /opt/kafka/bin/kafka-storage.sh format --cluster-id $$KAFKA_CLUSTER_ID --config /opt/kafka/config/kraft/server.properties
        echo 'kafka3: 格式化log存储目录'
        fi
      
        echo 'kafka3: 启动Kafka服务...'
      /opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/kraft/server.properties
      "

networks:
  kafka_net:
    driver: bridge
  kraft_my_network:
    driver: bridge
    ipam:
      config:
        - subnet: "172.25.0.0/16"  # 使用一个不常用的子网
          gateway: "172.25.0.1"

k8s 搭建正式环境

---
apiVersion: v1
kind: Service
metadata:
  name: kafka-hs
  namespace: kafka
spec:
  clusterIP: None
  selector:
    app: kafka
  ports:
    - port: 9092
      targetPort: 9092
      name: kafka-server
    - port: 9093
      targetPort: 9093
      name: kafka-cluster
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-svc
  namespace: kafka
spec:
  type: ClusterIP
  selector:
    app: kafka
  ports:
    - port: 9092
      targetPort: 9092
      name: server
    - port: 9093
      targetPort: 9093
      name: server-cluster
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: kafka
  namespace: kafka
  labels:
    app: kafka
spec:
  serviceName: "kafka-hs" # 指向内部无头服务
  replicas: 3
  selector:
    matchLabels:
      app: kafka
  template:
    metadata:
      labels:
        app: kafka
    spec:
      initContainers:
      - name: create-data-dir
        image: docker.m.daocloud.io/library/busybox:latest
        imagePullPolicy: IfNotPresent
        command: ['sh', '-c', 'mkdir -p /host-data/$(POD_NAME) && chmod 755 /host-data/$(POD_NAME) && chown -R 1000:1000 /host-data']
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        volumeMounts:
        - name: data
          mountPath: /host-data
      containers:
      - name: kafka
        image: core.harbor.shell.com:443/library/kafka:3.9.1  //本地仓库
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 9092
          protocol: TCP
        - containerPort: 9093
          protocol: TCP
        securityContext:
          runAsUser: 1000
          runAsGroup: 1000
        env:
        - name: KAFKA_NODE_ID
          valueFrom:
            fieldRef:
              fieldPath: metadata.labels['apps.kubernetes.io/pod-index']
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: KAFKA_CONTROLLER_QUORUM_VOTERS
          value: "0@kafka-0.kafka-hs.kafka.svc.cluster.local:9093,1@kafka-1.kafka-hs.kafka.svc.cluster.local:9093,2@kafka-2.kafka-hs.kafka.svc.cluster.local:9093"
        - name: KAFKA_ADVERTISED_LISTENERS
          value: "PLAINTEXT://$(POD_NAME).kafka-hs.kafka.svc.cluster.local:9092"
        - name: KAFKA_LOG_DIRS
          value: "/data/kafka/log"
        # 添加存储初始化命令
        command: ["sh", "-c"]
        args:
          - |
            # 首次启动时格式化存储目录
            echo 'step 1'
            if [ ! -d /data/kafka/log ]; then
              echo "创建log目录"
              mkdir -p /data/kafka/log
              mkdir -p /data/id
            fi

            echo 'step 2'
            echo "初始化配置文件"
            sed -i 's/^log.dirs=.*//g' /opt/kafka/config/kraft/server.properties
            sed -i 's/^controller.quorum.voters=.*//g' /opt/kafka/config/kraft/server.properties
            sed -i 's/^node.id=.*/node.id=$(KAFKA_NODE_ID)/' /opt/kafka/config/kraft/server.properties
            sed -i 's/localhost/$(POD_NAME).kafka-hs.kafka.svc.cluster.local/g' /opt/kafka/config/kraft/server.properties
            echo 'controller.quorum.voters=$(KAFKA_CONTROLLER_QUORUM_VOTERS)' >> /opt/kafka/config/kraft/server.properties
            echo 'log.dirs=$(KAFKA_LOG_DIRS)' >> /opt/kafka/config/kraft/server.properties
            echo "配置文件初始化完毕..."
            echo 'step 2'
            if [ $(KAFKA_NODE_ID) -eq 0 ]; then
              if [ ! -f /data/id/cluster.id ]; then
                KAFKA_CLUSTER_ID=$(/opt/kafka/bin/kafka-storage.sh random-uuid)
                echo $KAFKA_CLUSTER_ID > /data/id/cluster.id
                echo '集群ID: $$(KAFKA_CLUSTER_ID)'
              else
                KAFKA_CLUSTER_ID=$(cat /data/id/cluster.id)
                echo '使用集群ID: $$(KAFKA_CLUSTER_ID)'
              fi
            else
              echo '等待cluster id 初始化...'
              while [ ! -f /data/id/cluster.id ]; do
                sleep 5
              done
              KAFKA_CLUSTER_ID=$(cat /data/id/cluster.id)
              echo '使用集群ID: $$(KAFKA_CLUSTER_ID)'
            fi

            echo 'step 3'
            if [ ! -f /data/kafka/log/meta.properties ]; then
              echo 'cluster id: $$(KAFKA_CLUSTER_ID)' 
              /opt/kafka/bin/kafka-storage.sh format -c /opt/kafka/config/kraft/server.properties -t $KAFKA_CLUSTER_ID
              echo 'kafka: $$(KAFKA_NODE_ID) 格式化log存储目录完成。'
            else
              echo 'kafka $$(KAFKA_NODE_ID) 已经初始化'
            fi

            echo 'step 4'
            if [ $(KAFKA_NODE_ID) -ne 0 ]; then
              echo '等待kafka-0启动...'
              sleep 30
            fi
            
            echo 'step 5'
            echo '启动 kafka: ' $$(KAFKA_NODE_ID)
            /opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/kraft/server.properties
        volumeMounts:
        - name: data
          mountPath: /data/kafka
          subPathExpr: $(POD_NAME)
        - name: cluster-id
          mountPath: /data/id
      nodeSelector:
        cpu: x86
      volumes:
      - name: data
        hostPath:
          path: /data/juicefs-mnt/kafka
          type: DirectoryOrCreate
      - name: cluster-id
        hostPath:
          path: /data/juicefs-mnt/kafka/shared
          type: DirectoryOrCreate

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值