kafka外网链接超时,通过docker-compose搭建三节点kafka集群,通过nginx代理后,内网能访问,外网链接超时。

那人真像一条狗! 发表于: 2023-10-27   最后更新时间: 2023-10-27 16:01:09   446 游览

kafka三节点集群IP段

192.168.1.42
192.168.1.43
192.168.1.44

nginx的双网卡

内网:192.168.1.33
外网:10.173.211.80

kafka的docker-conmpose脚本

version: "3.7"
services:
  zk0:
    image: zookeeper:latest
    container_name: zookeeper
    ports:
      - 2181:2181
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=0.0.0.0:2888:3888 server.2=zk1:2888:3888 server.3=zk2:2888:3888
    volumes:
      - "/etc/localtime:/etc/localtime"
      - "/root/data/zk0/data:/data"
      - "/root/data/zk0/log:/datalog"
    deploy:
      placement:
        constraints:
          - node.hostname == idc1-hrjt-cloud-act-sto1
      resources:
        limits:
          cpus: '1'
          memory: 256M
  zk1:
    image: zookeeper:latest
    container_name: zookeeper
    ports:
      - 2182:2181
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zk0:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zk2:2888:3888
    volumes:
      - "/etc/localtime:/etc/localtime"
      - "/root/data/zk1/data:/data"
      - "/root/data/zk1/log:/datalog"
    deploy:
      placement:
        constraints:
          - node.hostname == idc1-hrjt-cloud-act-sto2
      resources:
        limits:
          cpus: '1'
          memory: 256M
  zk2:
    image: zookeeper:latest
    container_name: zookeeper
    ports:
      - 2183:2181
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=zk0:2888:3888 server.2=zk1:2888:3888 server.3=0.0.0.0:2888:3888
    volumes:
      - "/etc/localtime:/etc/localtime"
      - "/root/data/zk2/data:/data"
      - "/root/data/zk2/log:/datalog"
    deploy:
      placement:
        constraints:
          - node.hostname == idc1-hrjt-cloud-act-sto3
      resources:
        limits:
          cpus: '1'
          memory: 256M

  kafka0:
    image: kafka:latest
    depends_on:
      - zk0
      - zk1
      - zk2
    container_name: kafka0
    ports:
      - 9091:9092
    environment:
      KAFKA_ADVERTISED_HOST_NAME: kafka0
      KAFKA_ADVERTISED_PORT: 9091
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.1.42:9091
      KAFKA_ZOOKEEPER_CONNECT: zk0:2181,zk1:2181,zk2:2181
      KAFKA_BROKER_ID: 0
      REPLICA_FETCH_WAIT_MAX_MS: 3000
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
      KAFKA_DEFAULT_REPLICATION_FACTOR: 3
      KAFKA_NUM_PARTITIONS: 3
      KAFKA_LOG_DIRS: /kafka/kafkalog
    volumes:
      - /root/data/kafka0/data:/kafka
      - /etc/hosts:/etc/hosts
    deploy:
      placement:
        constraints:
          - node.hostname == idc1-hrjt-cloud-act-sto1
      replicas: 1
      resources:
        limits:
          cpus: '1'
          memory: 512M
  kafka1:
    image: kafka:latest
    depends_on:
      - zk0
      - zk1
      - zk2
    container_name: kafka1
    ports:
      - 9092:9092
    environment:
      KAFKA_ADVERTISED_HOST_NAME: kafka1
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.1.43:9092
      KAFKA_ZOOKEEPER_CONNECT: zk0:2181,zk1:2181,zk2:2181
      KAFKA_BROKER_ID: 1
      REPLICA_FETCH_WAIT_MAX_MS: 3000
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
      KAFKA_DEFAULT_REPLICATION_FACTOR: 3
      KAFKA_NUM_PARTITIONS: 3
      KAFKA_LOG_DIRS: /kafka/kafkalog
    volumes:
      - /root/data/kafka1/data:/kafka
      - /etc/hosts:/etc/hosts
    deploy:
      placement:
        constraints:
          - node.hostname == idc1-hrjt-cloud-act-sto2
      replicas: 1
      resources:
        limits:
          cpus: '1'
          memory: 512M        
  kafka2:
    image: kafka:latest
    depends_on:
      - zk0
      - zk1
      - zk2
    container_name: kafka2
    ports:
      - 9093:9092
    environment:
      KAFKA_ADVERTISED_HOST_NAME: kafka2
      KAFKA_ADVERTISED_PORT: 9093
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.1.44:9093
      KAFKA_ZOOKEEPER_CONNECT: zk0:2181,zk1:2181,zk2:2181
      KAFKA_BROKER_ID: 2
      REPLICA_FETCH_WAIT_MAX_MS: 3000
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
      KAFKA_DEFAULT_REPLICATION_FACTOR: 3
      KAFKA_NUM_PARTITIONS: 3
      KAFKA_LOG_DIRS: /kafka/kafkalog
    volumes:
      - /root/data/kafka2/data:/kafka
      - /etc/hosts:/etc/hosts
    deploy:
      placement:
        constraints:
          - node.hostname == idc1-hrjt-cloud-act-sto3
      replicas: 1
      resources:
        limits:
          cpus: '1'
          memory: 512M

nginx转发配置

    upstream kafka-cluster-9092 {
        server 192.168.1.42:9091;
        server 192.168.1.43:9092;
        server 192.168.1.44:9093;
    }
    server {
        listen 9092;
        proxy_connect_timeout 10s;
        proxy_timeout 300s;
        proxy_pass kafka-cluster-9092;
    }

然后在192.168.1网段的都能正常使用
但是在10.173.211网段不行
如:10.173.211.175

echo "Test123" | ./kafka-console-producer.sh --broker-list 10.173.211.80:9092 --topic test

报错链接超时

ERROR Error when sending message to topic test with key: null, value: 7 bytes with error: (org.apache.kafka.clients.producer.internals.ErrorLoggingCallback)
org.apache.kafka.common.errors.TimeoutException: Expiring 1 record(s) for test-0:120002 ms has passed since batch creation
发表于 2023-10-27
添加评论

挂外网是不行的,只能在容器内访问,要么你想办法在容器内配置域名映射的方式:
原因可以看下:kafka外网转发

这个文章我有看,hosts是指docker容器的还是宿主机的?我按照这个配置了好像还是不行。

docker内的kafka要配置域名形式,所以docker内的hosts要有域名的解析。

这很尴尬我是用swarm搭配docker-compose部署所以docker容器一直在变的。我试试docker-compose脚本映射hosts进去

你的答案

查看kafka相关的其他问题或提一个您自己的问题