实现原理
Hbase对MapReduce提供支持,它实现了TableMapper类和TableReducer类,我们只需要继承这两个类即可
写重Mapper类继承TableMapper<Text, IntWritable>
map的参数(ImmutableBytesWritable key, Result value, Context context)
ImmutableBytesWritable key:行键
Result value: 一行的所有结果
Context context:上下文搬运
重写Reducer类继承TableReducer<Text, IntWritable, KEYOUT>
reduce(Text key, Iterable values, Context context)
key:从map接收到的入的key
values:从map接收到的入的value
contex:上下文搬运
统计该表格中一共有多少行数据
代码入下:
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.io.IOException;
import java.util.stream.StreamSupport;
/**
* 该类的作用:
* 统计该表格中一共有多少行数据
* 注:一个rowkey表示一行
*/
public class CountMR extends Configured implements Tool {
public static void main(String[] args) throws Exception {
ToolRunner.run(new CountMR(),args);
}
@Override
public int run(String[] strings) throws Exception {
//任务的配置
//获得当前的运行环境变量,如果不是在本机上跑,用set设置运行的环境
Configuration conf=getConf();
//获得job
Job job = Job.getInstance();
//对job设置运行主类
job.setJarByClass(this.getClass());
//设置运行job的名字
job.setJobName("CountMR");
//配置hbasr的mapper,table名字是输入获得,对表进行扫描的scan,运行的mapper类是CMappaer.class,map输出的key是Text 类,和输出的value是IntWritable类
TableMapReduceUtil.initTableMapperJob(conf.get("intable"),new Scan(),CMappaer.class,Text.class,IntWritable.class,job);
//配置hbase的reducer,输出的表名,装配的reducer,job
TableMapReduceUtil.initTableReducerJob(conf.get("outtable"),CReducer.class,job);
//提交任务
job.waitForCompletion(true);
return 0;
}
//TableMapper自带去hbase中读取数据
//TableMapper每次读取一行数据,即一个rowkey
//key是rowkey valuse是result(一行内容)
public static class CMappaer extends TableMapper<Text, IntWritable>{
@Override
//ImmutableBytesWritable key行键 Result value: 一行的所有结果 Context contex上下文搬运
//******value是值,大部分操作是对value*********
protected void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException {
TableSplit sp = (TableSplit)context.getInputSplit(); //获得表名
String tb_name=Bytes.toString(sp.getTableName());
//将表名设置为map的输出key,value设置为1(表示一行)
context.write(new Text(tb_name),new IntWritable(1));
}
}
//TableReduce 输出的key value
//key值不论写什么都不输出
//value值必须组成一个Put Delete Update
//Text是输入的key类型 , IntWritable是输入的key类型,NullWritable是key的输出类型
public static class CReducer extends TableReducer<Text,IntWritable, NullWritable> {
@Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
long sum = StreamSupport.stream(values.spliterator(), false).count(); //获得所有values的数值总和
//进行数据输出
//把数据装载到一个put中进行输出
//表名 =rowkey
//行数 info:count
//接收结果的表需要自己预先定义
Put put=new Put(key.getBytes());
put.addColumn(Bytes.toBytes("info"),Bytes.toBytes("count"),Bytes.toBytes(sum+""));
context.write(NullWritable.get(),put);
}
}
}