hbase与hdfs之间的数据转换

本文围绕MapReduce功能实现展开,涵盖Hbase和Hdfs间数据转换、排序、Top N、去重计数等多种功能。还介绍了从Hbase表读取数据,将统计结果存到另一Hbase表或Hdfs中的操作,包含建表的Java代码及Linux执行方式。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

 

MapReduce功能实现系列:

MapReduce功能实现一---Hbase和Hdfs之间数据相互转换

MapReduce功能实现二---排序

MapReduce功能实现三---Top N

MapReduce功能实现四---小综合(从hbase中读取数据统计并在hdfs中降序输出Top 3)

MapReduce功能实现五---去重(Distinct)、计数(Count)

MapReduce功能实现六---最大值(Max)、求和(Sum)、平均值(Avg)

MapReduce功能实现七---小综合(多个job串行处理计算平均值)

MapReduce功能实现八---分区(Partition)

MapReduce功能实现九---Pv、Uv

MapReduce功能实现十---倒排索引(Inverted Index)

MapReduce功能实现十一---join

 

一、从Hbase表1中读取数据再把统计结果存到表2

在Hbase中建立相应的表1:

 
  1. create 'hello','cf'

  2. put 'hello','1','cf:hui','hello world'

  3. put 'hello','2','cf:hui','hello hadoop'

  4. put 'hello','3','cf:hui','hello hive'

  5. put 'hello','4','cf:hui','hello hadoop'

  6. put 'hello','5','cf:hui','hello world'

  7. put 'hello','6','cf:hui','hello world'


java代码:

 
  1. import java.io.IOException;

  2. import java.util.Iterator;

  3.  
  4. import org.apache.hadoop.conf.Configuration;

  5. import org.apache.hadoop.hbase.HBaseConfiguration;

  6. import org.apache.hadoop.hbase.HColumnDescriptor;

  7. import org.apache.hadoop.hbase.HTableDescriptor;

  8. import org.apache.hadoop.hbase.client.HBaseAdmin;

  9. import org.apache.hadoop.hbase.client.Put;

  10. import org.apache.hadoop.hbase.client.Result;

  11. import org.apache.hadoop.hbase.client.Scan;

  12. import org.apache.hadoop.hbase.io.ImmutableBytesWritable;

  13. import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;

  14. import org.apache.hadoop.hbase.mapreduce.TableMapper;

  15. import org.apache.hadoop.hbase.mapreduce.TableReducer;

  16. import org.apache.hadoop.hbase.util.Bytes;

  17. import org.apache.hadoop.io.IntWritable;

  18. import org.apache.hadoop.io.NullWritable;

  19. import org.apache.hadoop.io.Text;

  20. import org.apache.hadoop.mapreduce.Job;

  21.  
  22. public class HBaseToHbase {

  23. public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

  24. String hbaseTableName1 = "hello";

  25. String hbaseTableName2 = "mytb2";

  26.  
  27. prepareTB2(hbaseTableName2);

  28.  
  29. Configuration conf = new Configuration();

  30.  
  31. Job job = Job.getInstance(conf);

  32. job.setJarByClass(HBaseToHbase.class);

  33. job.setJobName("mrreadwritehbase");

  34.  
  35. Scan scan = new Scan();

  36. scan.setCaching(500);

  37. scan.setCacheBlocks(false);

  38.  
  39. TableMapReduceUtil.initTableMapperJob(hbaseTableName1, scan, doMapper.class, Text.class, IntWritable.class, job);

  40. TableMapReduceUtil.initTableReducerJob(hbaseTableName2, doReducer.class, job);

  41. System.exit(job.waitForCompletion(true) ? 1 : 0);

  42. }

  43.  
  44. public static class doMapper extends TableMapper<Text, IntWritable>{

  45. private final static IntWritable one = new IntWritable(1);

  46. @Override

  47. protected void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException {

  48. String rowValue = Bytes.toString(value.list().get(0).getValue());

  49. context.write(new Text(rowValue), one);

  50. }

  51. }

  52.  
  53. public static class doReducer extends TableReducer<Text, IntWritable, NullWritable>{

  54. @Override

  55. protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {

  56. System.out.println(key.toString());

  57. int sum = 0;

  58. Iterator<IntWritable> haha = values.iterator();

  59. while (haha.hasNext()) {

  60. sum += haha.next().get();

  61. }

  62. Put put = new Put(Bytes.toBytes(key.toString()));

  63. put.add(Bytes.toBytes("mycolumnfamily"), Bytes.toBytes("count"), Bytes.toBytes(String.valueOf(sum)));

  64. context.write(NullWritable.get(), put);

  65. }

  66. }

  67.  
  68. public static void prepareTB2(String hbaseTableName) throws IOException{

  69. HTableDescriptor tableDesc = new HTableDescriptor(hbaseTableName);

  70. HColumnDescriptor columnDesc = new HColumnDescriptor("mycolumnfamily");

  71. tableDesc.addFamily(columnDesc);

  72. Configuration cfg = HBaseConfiguration.create();

  73. HBaseAdmin admin = new HBaseAdmin(cfg);

  74. if (admin.tableExists(hbaseTableName)) {

  75. System.out.println("Table exists,trying drop and create!");

  76. admin.disableTable(hbaseTableName);

  77. admin.deleteTable(hbaseTableName);

  78. admin.createTable(tableDesc);

  79. } else {

  80. System.out.println("create table: "+ hbaseTableName);

  81. admin.createTable(tableDesc);

  82. }

  83. }

  84. }


在Linux中执行该代码:

 
  1. [hadoop@h71 q1]$ /usr/jdk1.7.0_25/bin/javac HBaseToHbase.java

  2. [hadoop@h71 q1]$ /usr/jdk1.7.0_25/bin/jar cvf xx.jar HBaseToHbase*class

  3. [hadoop@h71 q1]$ hadoop jar xx.jar HBaseToHbase

MapReduce功能实现系列:

MapReduce功能实现一---Hbase和Hdfs之间数据相互转换

MapReduce功能实现二---排序

MapReduce功能实现三---Top N

MapReduce功能实现四---小综合(从hbase中读取数据统计并在hdfs中降序输出Top 3)

MapReduce功能实现五---去重(Distinct)、计数(Count)

MapReduce功能实现六---最大值(Max)、求和(Sum)、平均值(Avg)

MapReduce功能实现七---小综合(多个job串行处理计算平均值)

MapReduce功能实现八---分区(Partition)

MapReduce功能实现九---Pv、Uv

MapReduce功能实现十---倒排索引(Inverted Index)

MapReduce功能实现十一---join

 

一、从Hbase表1中读取数据再把统计结果存到表2

在Hbase中建立相应的表1:

 
  1. create 'hello','cf'

  2. put 'hello','1','cf:hui','hello world'

  3. put 'hello','2','cf:hui','hello hadoop'

  4. put 'hello','3','cf:hui','hello hive'

  5. put 'hello','4','cf:hui','hello hadoop'

  6. put 'hello','5','cf:hui','hello world'

  7. put 'hello','6','cf:hui','hello world'


java代码:

 
  1. import java.io.IOException;

  2. import java.util.Iterator;

  3.  
  4. import org.apache.hadoop.conf.Configuration;

  5. import org.apache.hadoop.hbase.HBaseConfiguration;

  6. import org.apache.hadoop.hbase.HColumnDescriptor;

  7. import org.apache.hadoop.hbase.HTableDescriptor;

  8. import org.apache.hadoop.hbase.client.HBaseAdmin;

  9. import org.apache.hadoop.hbase.client.Put;

  10. import org.apache.hadoop.hbase.client.Result;

  11. import org.apache.hadoop.hbase.client.Scan;

  12. import org.apache.hadoop.hbase.io.ImmutableBytesWritable;

  13. import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;

  14. import org.apache.hadoop.hbase.mapreduce.TableMapper;

  15. import org.apache.hadoop.hbase.mapreduce.TableReducer;

  16. import org.apache.hadoop.hbase.util.Bytes;

  17. import org.apache.hadoop.io.IntWritable;

  18. import org.apache.hadoop.io.NullWritable;

  19. import org.apache.hadoop.io.Text;

  20. import org.apache.hadoop.mapreduce.Job;

  21.  
  22. public class HBaseToHbase {

  23. public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

  24. String hbaseTableName1 = "hello";

  25. String hbaseTableName2 = "mytb2";

  26.  
  27. prepareTB2(hbaseTableName2);

  28.  
  29. Configuration conf = new Configuration();

  30.  
  31. Job job = Job.getInstance(conf);

  32. job.setJarByClass(HBaseToHbase.class);

  33. job.setJobName("mrreadwritehbase");

  34.  
  35. Scan scan = new Scan();

  36. scan.setCaching(500);

  37. scan.setCacheBlocks(false);

  38.  
  39. TableMapReduceUtil.initTableMapperJob(hbaseTableName1, scan, doMapper.class, Text.class, IntWritable.class, job);

  40. TableMapReduceUtil.initTableReducerJob(hbaseTableName2, doReducer.class, job);

  41. System.exit(job.waitForCompletion(true) ? 1 : 0);

  42. }

  43.  
  44. public static class doMapper extends TableMapper<Text, IntWritable>{

  45. private final static IntWritable one = new IntWritable(1);

  46. @Override

  47. protected void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException {

  48. String rowValue = Bytes.toString(value.list().get(0).getValue());

  49. context.write(new Text(rowValue), one);

  50. }

  51. }

  52.  
  53. public static class doReducer extends TableReducer<Text, IntWritable, NullWritable>{

  54. @Override

  55. protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {

  56. System.out.println(key.toString());

  57. int sum = 0;

  58. Iterator<IntWritable> haha = values.iterator();

  59. while (haha.hasNext()) {

  60. sum += haha.next().get();

  61. }

  62. Put put = new Put(Bytes.toBytes(key.toString()));

  63. put.add(Bytes.toBytes("mycolumnfamily"), Bytes.toBytes("count"), Bytes.toBytes(String.valueOf(sum)));

  64. context.write(NullWritable.get(), put);

  65. }

  66. }

  67.  
  68. public static void prepareTB2(String hbaseTableName) throws IOException{

  69. HTableDescriptor tableDesc = new HTableDescriptor(hbaseTableName);

  70. HColumnDescriptor columnDesc = new HColumnDescriptor("mycolumnfamily");

  71. tableDesc.addFamily(columnDesc);

  72. Configuration cfg = HBaseConfiguration.create();

  73. HBaseAdmin admin = new HBaseAdmin(cfg);

  74. if (admin.tableExists(hbaseTableName)) {

  75. System.out.println("Table exists,trying drop and create!");

  76. admin.disableTable(hbaseTableName);

  77. admin.deleteTable(hbaseTableName);

  78. admin.createTable(tableDesc);

  79. } else {

  80. System.out.println("create table: "+ hbaseTableName);

  81. admin.createTable(tableDesc);

  82. }

  83. }

  84. }


在Linux中执行该代码:

 
  1. [hadoop@h71 q1]$ /usr/jdk1.7.0_25/bin/javac HBaseToHbase.java

  2. [hadoop@h71 q1]$ /usr/jdk1.7.0_25/bin/jar cvf xx.jar HBaseToHbase*class

  3. [hadoop@h71 q1]$ hadoop jar xx.jar HBaseToHbase


查看mytb2表:

 
  1. hbase(main):009:0> scan 'mytb2'

  2. ROW COLUMN+CELL

  3. hello hadoop column=mycolumnfamily:count, timestamp=1489817182454, value=2

  4. hello hive column=mycolumnfamily:count, timestamp=1489817182454, value=1

  5. hello world column=mycolumnfamily:count, timestamp=1489817182454, value=3

  6. 3 row(s) in 0.0260 seconds


二、从Hbase表1中读取数据再把结果存Hdfs中

查看mytb2表:

 
  1. hbase(main):009:0> scan 'mytb2'

  2. ROW COLUMN+CELL

  3. hello hadoop column=mycolumnfamily:count, timestamp=1489817182454, value=2

  4. hello hive column=mycolumnfamily:count, timestamp=1489817182454, value=1

  5. hello world column=mycolumnfamily:count, timestamp=1489817182454, value=3

  6. 3 row(s) in 0.0260 seconds

有需要的联系我

2317348976

yxxy1717

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值