storm为了保证数据能被正确处理,对于spout每个发送的数据都会跟踪
spout没发送一条数据ack方法都会收到响应,如果没有
通过ack机制,可以保证每条数据最少被处理一遍
package com.ning.storm;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.IRichSpout;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.trident.spout.IBatchSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;
import java.io.*;
import java.util.Map;
public class AckTest implements IRichSpout {
private SpoutOutputCollector collector = null;
int index = 0;
public void open(Map map, TopologyContext context, SpoutOutputCollector collector) {
this.collector = collector;
}
public void close() {
}
public void activate() {
}
public void deactivate() {
}
public void nextTuple() {
index++;
//这里会给每条发送的数据一个ID
collector.emit(new Values(), index);
}
//如果数据发送成功了,ack方法会获得发送数据的id
public void ack(Object o) {
System.out.println("[" + Thread.currentThread().getName() + "]" + "spout ack:" + o.toString());
}
//数据发送失败了,fail会获得发送失败的数据id
public void fail(Object o) {
System.out.println("[" + Thread.currentThread().getName() + "]" + "spout fail:" + o.toString());
}
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
}
public Map<String, Object> getComponentConfiguration() {
return null;
}
}