docker swarm 搭建生产用kafka集群

docker swarm 搭建kafka集群

创建docker-compose.yml文件

在根目录下创建了kafkacompose.yml文件


version: '3.7'

services:
  zoo1:
    image: zookeeper:latest
    networks:
      - kafka
    hostname: zoo1
    ports: # 端口
      - 2181:2181
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
    volumes:
      - "/etc/timezone:/etc/timezone:ro"
      - "/etc/localtime:/etc/localtime:ro"
      - "/gfs-share/zookeeper/zoo1conf:/conf"
      - "/gfs-share/zookeeper/zoo1data:/data"
      - "/gfs-share/zookeeper/zoo1datalog:/datalog"
      - "/gfs-share/zookeeper/zoo1logs:/logs"
    deploy:
      mode: replicated
      replicas: 1
      placement:
         constraints: # 添加条件约束
            - node.labels.role1==zookeeper
  zoo2:
    image: zookeeper:latest
    networks:
      - kafka
    hostname: zoo2
    ports: # 端口
      - 2182:2181
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zoo3:2888:3888;2181
    volumes:
      - "/etc/timezone:/etc/timezone:ro"
      - "/etc/localtime:/etc/localtime:ro"
      - "/gfs-share/zookeeper/zoo2conf:/conf"
      - "/gfs-share/zookeeper/zoo2data:/data"
      - "/gfs-share/zookeeper/zoo2datalog:/datalog"
      - "/gfs-share/zookeeper/zoo2logs:/logs"
    deploy:
      mode: replicated
      replicas: 1
      placement:
         constraints: # 添加条件约束
            - node.labels.role1==zookeeper
  zoo3:
    image: zookeeper:latest
    networks:
      - kafka
    hostname: zoo3
    ports: # 端口
      - 2183:2181
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
    volumes:
      - "/etc/timezone:/etc/timezone:ro"
      - "/etc/localtime:/etc/localtime:ro"
      - "/gfs-share/zookeeper/zoo3conf:/conf"
      - "/gfs-share/zookeeper/zoo3data:/data"
      - "/gfs-share/zookeeper/zoo3datalog:/datalog"
      - "/gfs-share/zookeeper/zoo3logs:/logs"
    deploy:
      mode: replicated
      replicas: 1 
      placement:
         constraints: # 添加条件约束
            - node.labels.role1==zookeeper
      
  kafka1:
    image: wurstmeister/kafka:latest
    hostname: kafka1
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - target: 9094
        published: 9094
        protocol: tcp
        mode: host
    environment:
      HOSTNAME_COMMAND: "docker info -f '{{`{{.Swarm.NodeAddr}}`}}'"
      KAFKA_BROKER_ID: 1
      KAFKA_MESSAGE_MAX_BYTES: 5000000
      KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
      KAFKA_ADVERTISED_LISTENERS: INSIDE://:9092,OUTSIDE://_{HOSTNAME_COMMAND}:9094
      KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:9094
      KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
      KAFKA_LOG_DIRS: /kafka/kafka_log
      KAFKA_LOG_CLEANUP_POLICY: 'delete'
      KAFKA_LOG_RETENTION_HOURS: 1
      KAFKA_LOG_RETENTION_BYTES: 1048576
      KAFKA_LOG_SEGMENT_BYTES: 1048576
      KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 300000
      KAFKA_LOG_CLEANLE_ENABLE: 'true'
    volumes:
      - "/etc/timezone:/etc/timezone:ro"
      - "/etc/localtime:/etc/localtime:ro"
      - /var/run/docker.sock:/var/run/docker.sock
      - "/gfs-share/kafka/kafka1:/kafka"
    networks:
      - kafka
    deploy:
      placement:
         constraints: # 添加条件约束
            - node.labels.role==kafka1
  
  kafka2:
    image: wurstmeister/kafka:latest
    hostname: kafka2
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - target: 9094
        published: 9094
        protocol: tcp
        mode: host
    environment:
      HOSTNAME_COMMAND: "docker info -f '{{`{{.Swarm.NodeAddr}}`}}'"
      KAFKA_BROKER_ID: 2
      KAFKA_MESSAGE_MAX_BYTES: 5000000
      KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
      KAFKA_ADVERTISED_LISTENERS: INSIDE://:9092,OUTSIDE://_{HOSTNAME_COMMAND}:9094
      KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:9094
      KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
      KAFKA_LOG_DIRS: /kafka/kafka_log
      KAFKA_LOG_CLEANUP_POLICY: 'delete'
      KAFKA_LOG_RETENTION_HOURS: 1
      KAFKA_LOG_RETENTION_BYTES: 1048576
      KAFKA_LOG_SEGMENT_BYTES: 1048576
      KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 300000
      KAFKA_LOG_CLEANLE_ENABLE: 'true'
    volumes:
      - "/etc/timezone:/etc/timezone:ro"
      - "/etc/localtime:/etc/localtime:ro"
      - /var/run/docker.sock:/var/run/docker.sock
      - "/gfs-share/kafka/kafka2:/kafka"
    networks:
      - kafka
    deploy:
      placement:
         constraints: # 添加条件约束
            - node.labels.role==kafka2
            
  kafka3:
    image: wurstmeister/kafka:latest
    hostname: kafka3
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - target: 9094
        published: 9094
        protocol: tcp
        mode: host
    environment:
      HOSTNAME_COMMAND: "docker info -f '{{`{{.Swarm.NodeAddr}}`}}'"
      KAFKA_BROKER_ID: 3
      KAFKA_MESSAGE_MAX_BYTES: 5000000
      KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
      KAFKA_ADVERTISED_LISTENERS: INSIDE://:9092,OUTSIDE://_{HOSTNAME_COMMAND}:9094
      KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:9094
      KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
      KAFKA_LOG_DIRS: /kafka/kafka_log
      KAFKA_LOG_CLEANUP_POLICY: 'delete'
      KAFKA_LOG_RETENTION_HOURS: 1
      KAFKA_LOG_RETENTION_BYTES: 1048576
      KAFKA_LOG_SEGMENT_BYTES: 1048576
      KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 300000
      KAFKA_LOG_CLEANLE_ENABLE: 'true'
    volumes:
      - "/etc/timezone:/etc/timezone:ro"
      - "/etc/localtime:/etc/localtime:ro"
      - /var/run/docker.sock:/var/run/docker.sock
      - "/gfs-share/kafka/kafka3:/kafka"
    networks:
      - kafka
    deploy:
      placement:
         constraints: # 添加条件约束
            - node.labels.role==kafka3
            
  kafka4:
    image: wurstmeister/kafka:latest
    hostname: kafka4
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - target: 9094
        published: 9094
        protocol: tcp
        mode: host
    environment:
      HOSTNAME_COMMAND: "docker info -f '{{`{{.Swarm.NodeAddr}}`}}'"
      KAFKA_BROKER_ID: 4
      KAFKA_MESSAGE_MAX_BYTES: 5000000
      KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
      KAFKA_ADVERTISED_LISTENERS: INSIDE://:9092,OUTSIDE://_{HOSTNAME_COMMAND}:9094
      KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:9094
      KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
      KAFKA_LOG_DIRS: /kafka/kafka_log
      KAFKA_LOG_CLEANUP_POLICY: 'delete'
      KAFKA_LOG_RETENTION_HOURS: 1
      KAFKA_LOG_RETENTION_BYTES: 1048576
      KAFKA_LOG_SEGMENT_BYTES: 1048576
      KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 300000
      KAFKA_LOG_CLEANLE_ENABLE: 'true'
    volumes:
      - "/etc/timezone:/etc/timezone:ro"
      - "/etc/localtime:/etc/localtime:ro"
      - /var/run/docker.sock:/var/run/docker.sock
      - "/gfs-share/kafka/kafka4:/kafka"
    networks:
      - kafka
    deploy:
      placement:
         constraints: # 添加条件约束
            - node.labels.role==kafka4
            
  kafka5:
    image: wurstmeister/kafka:latest
    hostname: kafka5
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - target: 9094
        published: 9094
        protocol: tcp
        mode: host
    environment:
      HOSTNAME_COMMAND: "docker info -f '{{`{{.Swarm.NodeAddr}}`}}'"
      KAFKA_BROKER_ID: 5
      KAFKA_MESSAGE_MAX_BYTES: 5000000
      KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
      KAFKA_ADVERTISED_LISTENERS: INSIDE://:9092,OUTSIDE://_{HOSTNAME_COMMAND}:9094
      KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:9094
      KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
      KAFKA_LOG_DIRS: /kafka/kafka_log
      KAFKA_LOG_CLEANUP_POLICY: 'delete'
      KAFKA_LOG_RETENTION_HOURS: 1
      KAFKA_LOG_RETENTION_BYTES: 1048576
      KAFKA_LOG_SEGMENT_BYTES: 1048576
      KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 300000
      KAFKA_LOG_CLEANLE_ENABLE: 'true'
    volumes:
      - "/etc/timezone:/etc/timezone:ro"
      - "/etc/localtime:/etc/localtime:ro"
      - /var/run/docker.sock:/var/run/docker.sock
      - "/gfs-share/kafka/kafka5:/kafka"
    networks:
      - kafka
    deploy:
      placement:
         constraints: # 添加条件约束
            - node.labels.role==kafka5
            
  kafka6:
    image: wurstmeister/kafka:latest
    hostname: kafka6
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - target: 9094
        published: 9094
        protocol: tcp
        mode: host
    environment:
      HOSTNAME_COMMAND: "docker info -f '{{`{{.Swarm.NodeAddr}}`}}'"
      KAFKA_BROKER_ID: 6
      KAFKA_MESSAGE_MAX_BYTES: 5000000
      KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
      KAFKA_ADVERTISED_LISTENERS: INSIDE://:9092,OUTSIDE://_{HOSTNAME_COMMAND}:9094
      KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:9094
      KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
      KAFKA_LOG_DIRS: /kafka/kafka_log
      KAFKA_LOG_CLEANUP_POLICY: 'delete'
      KAFKA_LOG_RETENTION_HOURS: 1
      KAFKA_LOG_RETENTION_BYTES: 1048576
      KAFKA_LOG_SEGMENT_BYTES: 1048576
      KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 300000
      KAFKA_LOG_CLEANLE_ENABLE: 'true'
    volumes:
      - "/etc/timezone:/etc/timezone:ro"
      - "/etc/localtime:/etc/localtime:ro"
      - /var/run/docker.sock:/var/run/docker.sock
      - "/gfs-share/kafka/kafka6:/kafka"
    networks:
      - kafka
    deploy:
      placement:
         constraints: # 添加条件约束
            - node.labels.role==kafka6
            
  kafka7:
    image: wurstmeister/kafka:latest
    hostname: kafka7
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - target: 9094
        published: 9094
        protocol: tcp
        mode: host
    environment:
      HOSTNAME_COMMAND: "docker info -f '{{`{{.Swarm.NodeAddr}}`}}'"
      KAFKA_BROKER_ID: 7
      KAFKA_MESSAGE_MAX_BYTES: 5000000
      KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
      KAFKA_ADVERTISED_LISTENERS: INSIDE://:9092,OUTSIDE://_{HOSTNAME_COMMAND}:9094
      KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:9094
      KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
      KAFKA_LOG_DIRS: /kafka/kafka_log
      KAFKA_LOG_CLEANUP_POLICY: 'delete'
      KAFKA_LOG_RETENTION_HOURS: 1
      KAFKA_LOG_RETENTION_BYTES: 1048576
      KAFKA_LOG_SEGMENT_BYTES: 1048576
      KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 300000
      KAFKA_LOG_CLEANLE_ENABLE: 'true'
    volumes:
      - "/etc/timezone:/etc/timezone:ro"
      - "/etc/localtime:/etc/localtime:ro"
      - /var/run/docker.sock:/var/run/docker.sock
      - "/gfs-share/kafka/kafka7:/kafka"
    networks:
      - kafka
    deploy:
      placement:
         constraints: # 添加条件约束
            - node.labels.role==kafka7
            
  kafka8:
    image: wurstmeister/kafka:latest
    hostname: kafka8
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - target: 9094
        published: 9094
        protocol: tcp
        mode: host
    environment:
      HOSTNAME_COMMAND: "docker info -f '{{`{{.Swarm.NodeAddr}}`}}'"
      KAFKA_BROKER_ID: 8
      KAFKA_MESSAGE_MAX_BYTES: 5000000
      KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
      KAFKA_ADVERTISED_LISTENERS: INSIDE://:9092,OUTSIDE://_{HOSTNAME_COMMAND}:9094
      KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:9094
      KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
      KAFKA_LOG_DIRS: /kafka/kafka_log
      KAFKA_LOG_CLEANUP_POLICY: 'delete'
      KAFKA_LOG_RETENTION_HOURS: 1
      KAFKA_LOG_RETENTION_BYTES: 1048576
      KAFKA_LOG_SEGMENT_BYTES: 1048576
      KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 300000
      KAFKA_LOG_CLEANLE_ENABLE: 'true'
    volumes:
      - "/etc/timezone:/etc/timezone:ro"
      - "/etc/localtime:/etc/localtime:ro"
      - /var/run/docker.sock:/var/run/docker.sock
      - "/gfs-share/kafka/kafka8:/kafka"
    networks:
      - kafka
    deploy:
      placement:
         constraints: # 添加条件约束
            - node.labels.role==kafka8
            
  kafka9:
    image: wurstmeister/kafka:latest
    hostname: kafka9
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - target: 9094
        published: 9094
        protocol: tcp
        mode: host
    environment:
      HOSTNAME_COMMAND: "docker info -f '{{`{{.Swarm.NodeAddr}}`}}'"
      KAFKA_BROKER_ID: 9
      KAFKA_MESSAGE_MAX_BYTES: 5000000
      KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
      KAFKA_ADVERTISED_LISTENERS: INSIDE://:9092,OUTSIDE://_{HOSTNAME_COMMAND}:9094
      KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:9094
      KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
      KAFKA_LOG_DIRS: /kafka/kafka_log
      KAFKA_LOG_CLEANUP_POLICY: 'delete'
      KAFKA_LOG_RETENTION_HOURS: 1
      KAFKA_LOG_RETENTION_BYTES: 1048576
      KAFKA_LOG_SEGMENT_BYTES: 1048576
      KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 300000
      KAFKA_LOG_CLEANLE_ENABLE: 'true'
    volumes:
      - "/etc/timezone:/etc/timezone:ro"
      - "/etc/localtime:/etc/localtime:ro"
      - /var/run/docker.sock:/var/run/docker.sock
      - "/gfs-share/kafka/kafka9:/kafka"
    networks:
      - kafka
    deploy:
      placement:
         constraints: # 添加条件约束
            - node.labels.role==kafka9
            
  kafka10:
    image: wurstmeister/kafka:latest
    hostname: kafka10
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - target: 9094
        published: 9094
        protocol: tcp
        mode: host
    environment:
      HOSTNAME_COMMAND: "docker info -f '{{`{{.Swarm.NodeAddr}}`}}'"
      KAFKA_BROKER_ID: 10
      KAFKA_MESSAGE_MAX_BYTES: 5000000
      KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
      KAFKA_ADVERTISED_LISTENERS: INSIDE://:9092,OUTSIDE://_{HOSTNAME_COMMAND}:9094
      KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:9094
      KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
      KAFKA_LOG_DIRS: /kafka/kafka_log
      KAFKA_LOG_CLEANUP_POLICY: 'delete'
      KAFKA_LOG_RETENTION_HOURS: 1
      KAFKA_LOG_RETENTION_BYTES: 1048576
      KAFKA_LOG_SEGMENT_BYTES: 1048576
      KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS: 300000
      KAFKA_LOG_CLEANLE_ENABLE: 'true'
    volumes:
      - "/etc/timezone:/etc/timezone:ro"
      - "/etc/localtime:/etc/localtime:ro"
      - /var/run/docker.sock:/var/run/docker.sock
      - "/gfs-share/kafka/kafka10:/kafka"
    networks:
      - kafka
    deploy:
      placement:
         constraints: # 添加条件约束
            - node.labels.role==kafka10
            
networks:
  kafka:
    external:
      name: publicNetWork

启动集群

docker stack deploy -c kafkacompose.yml kafka
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值