1、使用一张表,记录文件数据,文件的一行就是表里一个字段的数据,所以使用换行符作为分隔符,并以文件名为分区
drop table file_data;
create table file_data(context string) partitioned by (file_name string)row format delimited fields terminated by '\n'stored as textfile;
2、从hdfs中把文件数据导入file_data
[root@hadoop001 hadoop]# cat /home/hadoop/demo.txt
hello world
hello hadoop
hello hive
hello spark
hello java
hello c++
load data local inpath '/home/hadoop/demo.txt' overwrite into table file_data PARTITION(file_name='/home/hadoop/demo.txt');
hive> select * from file_data;
OK
hello world /home/hadoop/demo.txt
hello hadoop /home/hadoop/demo.txt
hello hive /home/hadoop/demo.txt
hello spark /home/hadoop/demo.txt
hello java /home/hadoop/demo.txt
hello c++ /home/hadoop/demo.txt
Time taken: 0.087 seconds, Fetched: 6 row(s)
3、根据' '切分数据,切分出来的每个单词作为一行 记录到结果表。
select explode(split(context,' ')) from file_data where file_name='/home/hadoop/demo.txt';
drop table wordcount;
create table wordcount(context string) partitioned by (file_name string)row format delimited fields terminated by ' 'stored as textfile;
insert overwrite table wordcount partition(file_name='/home/hadoop/demo.txt') select explode(split(context,' ')) from file_data where file_name='/home/hadoop/demo.txt';
4、聚合,使用hql查询,检查结果是否与预期一致
drop table file_data;
create table file_data(context string) partitioned by (file_name string)row format delimited fields terminated by '\n'stored as textfile;
2、从hdfs中把文件数据导入file_data
[root@hadoop001 hadoop]# cat /home/hadoop/demo.txt
hello world
hello hadoop
hello hive
hello spark
hello java
hello c++
load data local inpath '/home/hadoop/demo.txt' overwrite into table file_data PARTITION(file_name='/home/hadoop/demo.txt');
hive> select * from file_data;
OK
hello world /home/hadoop/demo.txt
hello hadoop /home/hadoop/demo.txt
hello hive /home/hadoop/demo.txt
hello spark /home/hadoop/demo.txt
hello java /home/hadoop/demo.txt
hello c++ /home/hadoop/demo.txt
Time taken: 0.087 seconds, Fetched: 6 row(s)
3、根据' '切分数据,切分出来的每个单词作为一行 记录到结果表。
select explode(split(context,' ')) from file_data where file_name='/home/hadoop/demo.txt';
drop table wordcount;
create table wordcount(context string) partitioned by (file_name string)row format delimited fields terminated by ' 'stored as textfile;
insert overwrite table wordcount partition(file_name='/home/hadoop/demo.txt') select explode(split(context,' ')) from file_data where file_name='/home/hadoop/demo.txt';
4、聚合,使用hql查询,检查结果是否与预期一致
select context, count(context) from wordcount where file_name='/home/hadoop/demo.txt' group by context;
Total MapReduce CPU Time Spent: 2 seconds 790 msec
OK
c++ 1
hadoop 1
hello 6
hive 1
java 1
spark 1
world 1
Time taken: 24.791 seconds, Fetched: 7 row(s)
【来自@若泽大数据】