java实现storm+kafka+flume

主方法

public class CountToMain {
    public static void main(String[] args) {
        String topic="dsj03";//主题
        String zkRoot="/dsj";//节点
        String spountId="kagkaSpount";//名字,这个随便取的
        BrokerHosts zkHosts = new ZkHosts("zjgm01:2181,zjgm02:2181,zjgm03:2181");
        TopologyBuilder builder= new TopologyBuilder();
        SpoutConfig conf=new SpoutConfig(zkHosts,topic,zkRoot,spountId);
        conf.forceFromStart=true;//重头开始读取
        conf.scheme=new SchemeAsMultiScheme(new MessageScheme());//格式
        builder.setSpout(spountId,new KafkaSpout(conf));
        builder.setBolt("readBolt",new ReadBolt()).shuffleGrouping(spountId);//知道上一个下一个的信息,并传递。
        builder.setBolt("writeCountBolt",new WriteCountBolt()).shuffleGrouping("readBolt");
        LocalCluster cluster=new LocalCluster();
        Config conf1=new Config();
        conf1.setNumWorkers(4);
        cluster.submitTopology("count",conf1,builder.createTopology());//一切都是为了设置topology
    }
}

读数据

public class ReadBolt extends BaseBasicBolt {

    @Override
    public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) {
        String s = tuple.getString(0);//进入后一行一行的读写代码这里读0 第一行就好了
        int index=s.indexOf("user Phone :");//查是否含有,不含有返回-1
        if (index!=-1){
            String s1 = s.substring(index + 12);//第几个值开始取,可以设置最后读到第几位,不写就是默认到最后
            basicOutputCollector.emit(new Values(s1));
        }

    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
    outputFieldsDeclarer.declare(new Fields("s"));
    }
}

写数据

public class WriteCountBolt extends BaseBasicBolt {
    FileWriter fileWriter=null;
    Map<String,Integer> map=null;

    @Override
    public void prepare(Map stormConf, TopologyContext context) {//这个prepare是初始化的设置,进来第一次会执行,防止多次新建。
        try {
            fileWriter=new FileWriter("d:\\storm\\yiju"+ UUID.randomUUID().toString());
            map=new HashMap<>();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    @Override
    public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) {
        String s = tuple.getString(0);
        Integer integer=map.get(s);
        if (integer==null){
            map.put(s,1);
        }
        else {
            Integer i=map.get(s);
            map.put(s,i+1);
        }
        try {
            fileWriter.write(s+"登入"+map.get(s)+"次");
            fileWriter.write("\n");
            fileWriter.flush();
        } catch (IOException e) {
            e.printStackTrace();
        }

    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {

    }
}

设置kafka传进来的格式 不然无法读取

public class MessageScheme implements Scheme {
    @Override
    public List<Object> deserialize(byte[] bytes) {
        String msg=new String(bytes);
        return new Values(msg);
    }

    @Override
    public Fields getOutputFields() {
        return new Fields("msg");
    }
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值