1、导入整合的jar包
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-core</artifactId>
<version>1.1.1</version>
<!-- provided 表示我们的jar包引用范围,开发的时候需要他,打包的时候不需要 -->
<!-- <scope>provided</scope>-->
</dependency>
<!-- use new kafka spout code -->
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-kafka-client</artifactId>
<version>1.1.1</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>0.10.0.0</version>
</dependency>
2、开发kafkaBolt
public class KafkaBolt extends BaseBasicBolt {
@Override
public void execute(Tuple input, BasicOutputCollector collector) {
System.out.println(input.toString());
System.out.println(input.toString());
System.out.println(input.toString());
String string = input.getString(4);
System.out.println(string);
}
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
}
}
3、开发main方法
public class KafkaStormMain {
public static void main(String[] args) throws InvalidTopologyException, AuthorizationException, AlreadyAliveException {
//需要跟两个参数,第一个参数,连接我们kafka的地址
//第二个参数,跟我们需要获取数据的topic
KafkaSpoutConfig.Builder<String, String> kafkaSpoutConfigBuiler = KafkaSpoutConfig.builder("node01:9092,node02:9092,node03:9092", "test");
//设置我们的消费组是哪一个
kafkaSpoutConfigBuiler.setGroupId("kafkaStormGroup");
//设置我们从哪一条数据进行开始消费
kafkaSpoutConfigBuiler.setFirstPollOffsetStrategy(KafkaSpoutConfig.FirstPollOffsetStrategy.UNCOMMITTED_LATEST);
//调用build方法得到我们的KafkaSpoutConfig
KafkaSpoutConfig<String, String> kafaSpoutConfig = kafkaSpoutConfigBuiler.build();
//通过构造器,得到我们的kafkaSpout
KafkaSpout<String, String> kafkaSpout = new KafkaSpout<String, String>(kafaSpoutConfig);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("kafkaSpout",kafkaSpout);
builder.setBolt("kafakBolt",new KafkaBolt()).localOrShuffleGrouping("kafkaSpout");
Config config = new Config();
if(null != args && args.length > 0){
StormSubmitter.submitTopology(args[0],config,builder.createTopology());
}else{
LocalCluster cluster = new LocalCluster();
cluster.submitTopology("kafakToStorm",config,builder.createTopology());
}
}
}