Zookeeper协调控制

本文介绍Zookeeper在Kafka集群中的关键作用,包括动态管理broker与consumer、触发负载均衡、维护消费关系等。同时,提供生产者与消费者的代码示例,展示如何使用Zookeeper实现Kafka消息的高效传递。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Zookeeper协调控制

1)管理broker与consumer的动态加入与离开。
2)触发负载均衡,当broker或consumer加入或离开时会触发负载均衡算法,使得一个consumer group内的多个consumer的订阅负载平衡。
3)维护消费关系及每个partition的消费信息。

生产者代码示例:

import java.util.*;



import kafka.javaapi.producer.Producer;

import kafka.producer.KeyedMessage;

import kafka.producer.ProducerConfig;



public class TestProducer {

    public static void main(String[] args) {

        long events = Long.parseLong(args[0]);

        Random rnd = new Random();



        Properties props = new Properties();

        props.put("metadata.broker.list", "broker1:9092,broker2:9092 ");

        props.put("serializer.class", "kafka.serializer.StringEncoder");

        props.put("partitioner.class", "example.producer.SimplePartitioner");

        props.put("request.required.acks", "1");



        ProducerConfig config = new ProducerConfig(props);



        Producer<String, Stringproducer = new Producer<String, String(config);



        for (long nEvents = 0; nEvents < events; nEvents++) {

               long runtime = new Date().getTime();

               String ip =192.168.2.+ rnd.nextInt(255);

               String msg = runtime +,www.example.com,+ ip;

               KeyedMessage<String, Stringdata = new KeyedMessage<String, String("page_visits", ip, msg);

               producer.send(data);

        }

        producer.close();

    }

}
Partitioning Code:
import kafka.producer.Partitioner;

import kafka.utils.VerifiableProperties;



public class SimplePartitioner implements Partitioner {

    public SimplePartitioner (VerifiableProperties props) {



    }



    public int partition(Object key, int a_numPartitions) {

        int partition = 0;

        String stringKey = (String) key;

        int offset = stringKey.lastIndexOf('.');

        if (offset 0) {

           partition = Integer.parseInt( stringKey.substring(offset+1)) % a_numPartitions;

        }

       return partition;

  }



}
消费者代码示例:
import kafka.consumer.ConsumerConfig;

import kafka.consumer.KafkaStream;

import kafka.javaapi.consumer.ConsumerConnector;



import java.util.HashMap;

import java.util.List;

import java.util.Map;

import java.util.Properties;

import java.util.concurrent.ExecutorService;

import java.util.concurrent.Executors;



public class ConsumerGroupExample {

    private final ConsumerConnector consumer;

    private final String topic;

    private  ExecutorService executor;



    public ConsumerGroupExample(String a_zookeeper, String a_groupId, String a_topic) {

        consumer = kafka.consumer.Consumer.createJavaConsumerConnector(

                createConsumerConfig(a_zookeeper, a_groupId));

        this.topic = a_topic;

    }



    public void shutdown() {

        if (consumer != null) consumer.shutdown();

        if (executor != null) executor.shutdown();

        try {

            if (!executor.awaitTermination(5000, TimeUnit.MILLISECONDS)) {

                System.out.println("Timed out waiting for consumer threads to shut down, exiting uncleanly");

            }

        } catch (InterruptedException e) {

            System.out.println("Interrupted during shutdown, exiting uncleanly");

        }

   }



    public void run(int a_numThreads) {

        Map<String, IntegertopicCountMap = new HashMap<String, Integer();

        topicCountMap.put(topic, new Integer(a_numThreads));

        Map<String, List<KafkaStream<byte[], byte[]consumerMap = consumer.createMessageStreams(topicCountMap);

        List<KafkaStream<byte[], byte[]streams = consumerMap.get(topic);



        // now launch all the threads

        //

        executor = Executors.newFixedThreadPool(a_numThreads);



        // now create an object to consume the messages

        //

        int threadNumber = 0;

        for (final KafkaStream stream : streams) {

            executor.submit(new ConsumerTest(stream, threadNumber));

            threadNumber++;

        }

    }



    private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId) {

        Properties props = new Properties();

        props.put("zookeeper.connect", a_zookeeper);

        props.put("group.id", a_groupId);

        props.put("zookeeper.session.timeout.ms", "400");

        props.put("zookeeper.sync.time.ms", "200");

        props.put("auto.commit.interval.ms", "1000");



        return new ConsumerConfig(props);

    }



    public static void main(String[] args) {

        String zooKeeper = args[0];

        String groupId = args[1];

        String topic = args[2];

        int threads = Integer.parseInt(args[3]);



        ConsumerGroupExample example = new ConsumerGroupExample(zooKeeper, groupId, topic);

        example.run(threads);



        try {

            Thread.sleep(10000);

        } catch (InterruptedException ie) {



        }

        example.shutdown();

    }

}
ConsumerTest 测试类:
import kafka.consumer.ConsumerIterator;

import kafka.consumer.KafkaStream;



public class ConsumerTest implements Runnable {

    private KafkaStream m_stream;

    private int m_threadNumber;



    public ConsumerTest(KafkaStream a_stream, int a_threadNumber) {

        m_threadNumber = a_threadNumber;

        m_stream = a_stream;

    }



    public void run() {

        ConsumerIterator<byte[], byte[]it = m_stream.iterator();

        while (it.hasNext())

            System.out.println("Thread " + m_threadNumber + ": " + new String(it.next().message()));

        System.out.println("Shutting down Thread: " + m_threadNumber);

    }

}
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

皮皮攻城狮

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值