一、发送代码
1、发送只有值没有key的
import org.apache.kafka.clients.producer.*;
import org.apache.log4j.Logger;
import java.util.HashMap;
import java.util.List;
import java.util.Properties;
/**
* @author shkstart
* @create 2019-07-28 17:11
*/
public class formalSend {
private static Logger logger = Logger.getLogger(formalSend.class);
/**
* @param bootstrapServers bootstrap.servers
* @param list 数据
* @param topic topic
*/
public static void sendData(String bootstrapServers, List<String> list, String topic) {
Properties props = new Properties();
props.put("bootstrap.servers", bootstrapServers);
props.put("acks", "all");
props.put("retries", 0);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<String, String>(props);
try {
list.forEach(v -> {
//发送没有key的message
producer.send(new ProducerRecord<>(topic, v), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if(e != null) {
e.printStackTrace();
} else {
logger.info("The offset of the record we just sent is:-------------> " + recordMetadata.partition());
//System.out.println("The offset of the record we just sent is:-------------> " + recordMetadata.partition());
}
}
});
});
} catch (Exception e) {
logger.info("formalSend类发送数据到kafka失败!");
//System.out.println("formalSend类发送数据到kafka失败!");
e.printStackTrace();
} finally {
if (producer != null) {
producer.close();
}
}
}
}
2、发送有key有value的
import org.apache.kafka.clients.producer.*;
import java.util.HashMap;
import java.util.Properties;
import java.util.concurrent.Future;
/**
* @author shkstart
* @create 2019-07-25 9:53
*/
public class SendKafka {
/**
*
* @param bootstrapServers bootstrap.servers
* @param map 数据
* @param topic topic
*/
public static void sendData(String bootstrapServers,HashMap<String,String> map,String topic){
Properties props = new Properties();
props.put("bootstrap.servers", bootstrapServers);
props.put("acks", "all");
props.put("retries", 0);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer= new KafkaProducer<String, String>(props);
try {
map.forEach((k,v) -> {
//发送带有key的message
producer.send(new ProducerRecord<String, String>(topic, k, v), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if(e != null) {
e.printStackTrace();
} else {
System.out.println("The offset of the record we just sent is: " + recordMetadata.partition());
}
}
});
/*
//发送没有key的message
producer.send(new ProducerRecord<>(topic, v), new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if(e != null) {
e.printStackTrace();
} else {
System.out.println("The offset of the record we just sent is: " + recordMetadata.partition());
}
}
})*/
});
} catch (Exception e) {
e.printStackTrace();
} finally {
if(producer != null){
producer.close();
}
}
System.out.println("dd");
}
}
二、拉取代码
formalPoll:
import java.util.Arrays;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.log4j.Logger;
import com.hisense.util.PointAnalysisUitls;
/**
* @author shkstart
* @create 2019-07-26 14:55
*/
public class formalPoll {
private static Logger logger = Logger.getLogger(formalPoll.class);
ExecutorService executorService = Singleton.getInstance().getThreadPool();
/**
*
* @param bootstrapServers bootstrap.servers
* @param arg topic
* @param groupid group.id
* @return
*/
public void pollData(String bootstrapServers, String groupid, String ... arg) {
Properties props = new Properties();
props.put("bootstrap.servers", bootstrapServers);
props.put("group.id", groupid);
//props.put("group.id", "taxi-scala1");
props.put("enable.auto.commit", "true");
props.put("auto.commit.interval.ms", "1000");
//额外加入
props.put("auto.offset.reset", "latest");
//props.put("session.timeout.ms", "30000");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = null;
try {
consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList(arg));//消费多个,订阅多个
logger.info("PointAnalysisUitls类——树的初始化——开始!!!");
//System.out.println("PointAnalysisUitls类——树的初始化——开始!!!");
PointAnalysisUitls instance = PointAnalysisUitls.getInstance();
//String path = "/home/fbdshp/fbd_84.shp";
String path = "E:\\eclipseAll\\eclipseWorkspace\\taxi\\resources\\fbdshp\\fbd_84.shp";
instance.initData(path,"");
logger.info("PointAnalysisUitls类——树的初始化——结束!!!");
//System.out.println("PointAnalysisUitls类——树的初始化——结束!!!");
while (true){
ConsumerRecords<String, String> records = consumer.poll(1000);
//往后传
//executorService.execute(() -> new HandleData(records).run());
executorService.execute(() -> new HandleData(records,instance).run());
//new HandleData(records).run();
//线程阻塞2分钟[120000],1分钟[60000],5秒【5000】
Thread.sleep(120000);
}
} catch (Exception e){
logger.info("formalPoll类拉取数据失败!");
//System.out.println("formalPoll类拉取数据失败!");
e.printStackTrace();
} finally {
if(consumer != null){
consumer.close();
}
}
}
}
Singleton:
import java.util.Properties;
import java.util.concurrent.*;
/**
* @author shkstart
* @create 2019-07-26 15:07
*/
public class Singleton {
private static Singleton instance;
private static ExecutorService threadPool;
//private static SqlSessionTemplate sqlSessionTemplate;
private static Properties properties;
private Singleton() {
}
public static Singleton getInstance() {
if (instance == null) {
instance = new Singleton();
}
return instance;
}
/**
* 获取线程池
*
* @return 线程池
*/
public ExecutorService getThreadPool() {
if (threadPool != null) {
return threadPool;
}
threadPool = new ThreadPoolExecutor(200, 300, 60, TimeUnit.MINUTES, new LinkedBlockingQueue<>(), Executors.defaultThreadFactory(), new ThreadPoolExecutor.AbortPolicy());
return threadPool;
}
}