Hive基本操作

create database db_hive;
show databases;
use db_hibe;
create table student(id int, name string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
show tables;
desc student;#查看表信息
desc exended student;#详细信息
desc formatted student;#格式化详细信息
load data local inpath '/opt/datas/student.txt'into table db_hive.student ;#加载数据
select * from student;

show functions;# 查看函数
desc function upper;# 查看upper函数的使用方式(转换为大写)
desc function extended upper;# 查看函数的使用案例

在hive cli命令窗口中如何查看hdfs文件系统
	hive (default)> dfs -ls / ;  

在hive cli命令窗口中如何查看本地文件系统
	hive (default)> !ls /opt/datas ;


创建内部表:
create table emp(
	empno int,
	ename string,
	job string,
	mgr int,
	hiredate string,
	sal double,
	comm double,
	deptno int
)
row format delimited fields terminated by '\t'
加载数据:
load data local inpath '/opt/datas/emp.txt' overwrite into table emp

create table dept(
	deptno int,
	dname string,
	loc string
)
row format delimited fields terminated by '\t'
加载数据:
load data local inpath '/opt/datas/emp.txt' overwrite into table dept



清除一个表的数据:
truncate table dept;

给表改名:
alter table dept_like rename to dept_like_rename;




创建外部表
create EXTERNAL table emp(
	empno int,
	ename string,
	job string,
	mgr int,
	hiredate string,
	sal double,
	comm double,
	deptno int
)
row format delimited fields terminated by '\t'
location '/user/hive/warehouse/emp'

外部表是指向数据,所以不需要加载数据了

在实际当中,内部表和外部表的区别是:
	内部表会删除表数据以及元数据
	外部表只会删除元数据,不会删除表数据


创建分区表 
create table dept( 
	deptno int, 
	dname string, 
	loc string 
)
partitioned by (month string)
row format delimited fields terminated by '\t'

load data local inpath '/opt/datas/emp.txt' overwrite into table dept
partition (month='201710');

查询分区表:
select * from dept where month = '201710'

查询多张分区表(按季度查询)
select * from dept where month = '201709' 
union 
select * from dept where month = '201708' 
union 
select * from dept where month = '201707'
;

分区表(二级分区)
create table dept( 
	deptno int, 
	dname string, 
	loc string 
)
partitioned by (month string, day String)
row format delimited fields terminated by '\t'

load data local inpath '/opt/datas/emp.txt' overwrite into table dept
partition (month='201710', day='13');

查询二级分区表:
select * from dept where month = '201710' and day = '13'

注意事项:
create table default.dept_nopart( 
	deptno int, 
	dname string, 
	loc string 
)
row format delimited fields terminated by '\t'

dfs -put /opt/datas/dept.txt /user/hive/warehouse/dept_nopart;
select * from dept_nopart;

create table default.dept_part( 
	deptno int, 
	dname string, 
	loc string 
)
partitioned by (day string)
row format delimited fields terminated by '\t'

# 创建分区表的目录
dfs -mkdir -p /user/hive/warehouse/dept_part/day=201710;

dfs -put /opt/datas/dept.txt /user/hive/warehouse/dept_part;
select * from dept_part;

#这个时候,查询当前的表,会查询不出来数据

我们需要进行一个修复性的操作

第一种修复方式:
	msck repair table dept_part;


第二种修复方式:
	alter table dept_part add partition(day='201710');


查看表中有几个分区:
	show partitions dept_part;



********************************************************
Hive导入数据的六大方式:
	
load data [local] inpath 'filepath' [overwrite] into table tablename
[partition (partcal1=val1, ...)]

* 原始文件存储的位置
	* 本地文件 local
	* hdfs

* 对表的数据是否覆盖
	* 覆盖 overwrite
	* 追加就不加overwrite

* 分区表加载,特殊性
	* [partition (partcal1=val1, ...)]


1) 加载本地文件到hive表
	local data local inpath 'opt/datas/emp.txt' into table default.emp;
2) 加载hdfs文件到hive表
	local data inpath 'opt/datas/emp.txt' into table default.emp;
3) 加载数据覆盖表中已有的数据
	local data inpath 'opt/datas/emp.txt' overwrite into table default.emp;
4) 创建表的时候通过insert加载
	create table default.emp_ci like emp;
	insert into table default.emp_ci select * from default.emp
5) 创建表的时候通过location指定加载
	create EXTERNAL table emp(
		empno int,
		ename string,
		job string
	)
	row format delimited fields terminated by '\t'
	location '/user/hive/warehouse/emp'


********************************************************
Hive导出数据的几种方式:
	insert overwrite local directory '/opt/datas/hive_exp_emp'
	select * from default.emp;

限制格式
	insert overwrite local directory '/opt/datas/hive_exp_emp'
	row format delimited fields terminated by '\t'
	collection items terminated by '\n'
	select * from default.emp;

第三种:
bin/hive -e "select * from default.emp;" > /opt/datas/exp_res.txt

第四种(保存在hdfs上):
insert overwrite directory '/opt/datas/hive_exp_emp'
	row format delimited fields terminated by '\t'
	collection items terminated by '\n'
	select * from default.emp;

第五种:
sqoop
	hdfs/hive -> rdbms
	rdbms -> hdfs/hive/hbase


********************************************************
Hive中的常见查询:
	select * from emp

	select t.empno from emp t;

	select t.empno from emp t where t.sal between 800 and 1500;

	select count(*) cnt from emp;
	select max(sal) max_sal from emp;
	select sum(sal) from emp;
	select avg(sal) from emp;

=====================
group by /having
  分组
emp表
* 每个部门的平均工资
	select t.deptno, avg(t.sal) avg_sal from emp t group by t.deptno;

* 每个部门中每个岗位的最高薪水
	select t.deptno, t.job, max(t.sal) max_sal from emp t group by t.deptno, job;

>>> having
* where 是针对单条记录进行筛选
* having 是针对分组结果进行筛选(通常与group by组合)

* 求每个部门的平均薪水大于2000
	select deptno,avg(sal) from emp group by deptno having avg_sal > 2000;

join
 两个表进行连接
	select e.empno,d.deptno from emp e join dept d on e.deptno = d.deptno;

左连接
	select e.empno,d.deptno from emp e left join dept d on e.deptno = d.deptno;

右连接
	select e.empno,d.deptno from emp e right join dept d on e.deptno = d.deptno;

全连接
	full join
	select e.empno,d.deptno from emp e full join dept d on e.deptno = d.deptno;



********************************************************
Export
	导出,将Hive表中的数据,导出到外部
Import
	导入,将外部数据导入Hive表中

EXPORT TABLE emp TO 'export_target_path'

export_target_path:
	指的是HDFS上的路径

EXPORT TABLE default.emp TO '/user/hive/export/emp_exp'

create table db_hive.emp like default.emp;
import table db_hive.emp from 'hdfs_exports_location/department';


********************************************************
>> order by
	全局排序,一个reduce
	select * from emp order by empno desc;

>> sort by
	每个reduce内部进行排序,全局不是排序
	set mapreduce.job.reduces = 3;
	select * from emp sort by empno asc;
	
>> distribute by
	分区partition
	类似于mapreduce中分区partition,对数据进行分区,结合sort by进行使用

>> cluster by
	结合
	当distribute by和sort by字段相同时,可以使用cluster by进行代替


********************************************************
UDF编程
	User Definition Function(用户自定义函数)

public calss LowerUDF extends UDF {
	public Text evaluate(Text str){
		if(null == str.toString()){
			return null;
		}

		return new Text(str.toString().toLowerCase());
	}
}

select ename, my_lower(ename) lowername from emp limit 5;
或:
create function self_lower as 'mycalss' using jar 'hdfs:///path/to/jar';


********************************************************
Hive 数据存储
* 按行存储
* 按列存储


TEXTFILE方式(按行存储):
create table page_views(
	track_time string,
	url string,
	ip string,
	city_id string
)
row format delimited fields terminated by '\t'
stored as textfile;

local data local inpath 'opt/datas/page_views.data' into table page_views;


ORC方式(按列存储):
create table page_views_orc(
	track_time string,
	url string,
	ip string,
	city_id string
)
row format delimited fields terminated by '\t'
stored as orc;

insert into table page_views_orc select * from page_views;


parquet方式(按列存储):
create table page_views_parquet(
	track_time string,
	url string,
	ip string,
	city_id string
)
row format delimited fields terminated by '\t'
stored as parquet;

insert into table page_views_parquet select * from page_views;


数据压缩:
create table page_views_orc_snappy(
	track_time string,
	url string,
	ip string,
	city_id string
)
row format delimited fields terminated by '\t'
stored as orc tblproperties ("orc.compress"="SNAPPY")

总结:
	在实际项目开发中,hive表的数据
		* 存储格式
			orcfile / parquet
		* 数据压缩方式
			snappy


********************************************************
Hive 企业优化
	大表[拆分]
		子表
	外部表、分区表
		结合使用
		多级分区[一般二级]
	数据
		存储格式[textfile\orcfile\parquet]
	数据压缩
		snappy
	SQL
		优化SQL语句
		join、filter
	MapReduce
		Reduct Number
		JVM重用
		推测执行

转载于:https://my.oschina.net/hehongbo/blog/1547419

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值