HIVE学习第三天
-
日期函数
1.from_unixtime
2.to_date
3.year month day
-
datediff
-
date_add
-
date_sub
-
current_date
-
add_months
-
last_day
-
trunc
-
-
字符串函数
length
concat
concat_ws -- 可以拼接字符串及数组,如果遇到其他数据类型,需要通过cast做数据类型转换
substr,substring
trim
regexp_replace
regexp_extract
get_json_object
--$:表示根节点 --.:表示子节点 --[] 取数组中的数据 -- * 表示取所有数据 select get_json_object('{"store":{"fruit":[{"weight":8,"type":"apple"},{"weight":9,"type":"pear"}], "bicycle":{"price":19.95,"color":"red"} }, "email":"amy@only_for_json_udf_test.net", "owner":"amy" }','$.store.fruit[0].type');
split
regexp
-
条件函数
-
if 函数
if(boolean testCondition, T valueTrue, T valueFalseOrNull)
-
COALESCE
SELECT COALESCE(bonus,0) from emp;
-
CASE
CASE WHEN 判断条件1 THEN 判断条件为TRUE返回值
WHEN 判断条件2 THEN 判断条件为TRUE返回值
ELSE 以上都不满足 返回值 END
注意: CASE WHEN中尽量将 过滤数据多的判断条件放置最前面,可以提高查询速度
-
NVL
-
-
集合函数
collect_list
-- 需求:相同部门中的员工名称放置一起,形成一行数据
CREATE TABLE learn3.collect AS
SELECT
deptno,collect_list(ename) as coll
from emp
GROUP BY deptno
-- collect_list 需要和GROUP BY 进行搭配使用
collect_set
-- 可以对同组中的数据进行去重操作
SELECT
deptno
,collect_set(sal) as coll1
,collect_list(sal) as coll2
from emp
GROUP BY deptno
行列互换
-
多行转一行--使用集合函数
相关函数说明 CONCAT(string A/col, string B/col…):返回输入字符串连接后的结果,支持任意个输入字符串; CONCAT_WS(separator, str1, str2,...):它是一个特殊形式的 CONCAT()。第一个参数剩余参数间的分隔符。分隔符可以是与剩余参数一样的字符串。如果分隔符是 NULL,返回值也将为 NULL。这个函数会跳过分隔符参数后的任何 NULL 和空字符串。分隔符将被加到被连接的字符串之间; 注意: CONCAT_WS must be "string or array<string> COLLECT_SET(col):函数只接受基本数据类型,它的主要作用是将某字段的值进行去重汇总,产生array类型字段。
-- 需求:针对学生信息表中数据 1500100001,施笑槐,22,女,文科六班 1500100002,吕金鹏,24,男,文科六班 1500100003,单乐蕊,22,女,理科六班 1500100004,葛德曜,24,男,理科三班 1500100005,宣谷芹,22,女,理科五班 1500100006,边昂雄,21,男,理科二班 1500100007,尚孤风,23,女,文科六班 1500100008,符半双,22,女,理科六班 -- 实现如下结果: 文科六班|女 1500100005|宣谷芹*1500100001|施笑槐 --分析: ① 将学生班级和性别拼接 ② 将ID和姓名拼接 ③ 针对学生班级和性别拼接结果,进行分组,然后将ID和姓名拼接结果进行做聚合 ④ 针对聚合结果添加*
WITH res AS( SELECT clazz_g ,collect_list(id_n) id_n_list FROM ( SELECT concat(clazz,'|',gender) clazz_g ,concat(id,'|',name) id_n FROM learn1.student6 ) T1 GROUP BY clazz_g ) SELECT clazz_g ,concat_ws('*',id_n_list) FROM res
-
一行转多行
-
函数说明 EXPLODE(col):将hive一列中复杂的array或者map结构拆分成多行。 LATERAL VIEW 用法:LATERAL VIEW udtf(expression) tableAlias AS columnAlias 解释:用于和split, explode等UDTF一起使用,它能够将一列数据拆成多行数据,在此基础上可以对拆分后的数据进行聚合
-
create table wordcount( word string ); insert into table wordcount values('hello word'),('hello hive'),('hello hadoop'),('hello java'); WITH split_tb AS ( select split(word,' ') as split_word from wordcount ) SELECT split_word ,count(*) FROM( SELECT split_word[0] AS split_word FROM split_tb UNION ALL SELECT split_word[1] AS split_word FROM split_tb )T1 GROUP BY T1.split_word -- explode WITH split_tb AS ( select split(word,' ') as split_word from wordcount ) SELECT split_word ,count(*) FROM( SELECT explode(split_word) as split_word FROM split_tb )T1 GROUP BY T1.split_word
-- 肖申克的救赎,犯罪/剧情 霸王别姬,剧情/爱情/同性 阿甘正传,剧情/爱情 泰坦尼克号,剧情/爱情/灾难 这个杀手不太冷,剧情/动作/犯罪 --
CREATE TABLE moive( name string, type string ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' load data local inpath "/usr/local/soft/data/movie.txt" into table moive; -- SELECT explode(split(type,'/')) FROM moive -- explode在SELECT中只能单独使用 SELECT name ,explode(split(type,'/')) FROM moive /* explode总结: 说明:可以将列表类型数据转换成多行数据 使用方式:针对字符串数据,可以先通过split将字符串转换成列表类型,之后在通过explode扩展 */ -- LATERAL VIEW EXPLODE /* 分析: ①EXPLODE(split(type,'/')) 是将数据进行做切分,之后再转换多行 ②LATERAL VIEW 是将EXPLODE转换的结果创建为一个侧写表tb1 ③split_res 是指EXPLODE转换的结果列 ④FROM moive 是表示将侧写表和movie表进行全连接 ⑤在SELECT中取出split_res列 */ SELECT name ,split_res FROM moive LATERAL VIEW EXPLODE(split(type,'/')) tb1 as split_res
肖申克的救赎,犯罪/剧情 肖申克的救赎 犯罪 肖申克的救赎 剧情
动态分区表及分桶表
-
动态分区表
CREATE TABLE learn3.student_partition( id string, name string, age int, gender string ) PARTITIONED BY (clazz string COMMENT "指定分区字段为班级") ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' ; INSERT INTO TABLE learn3.student_partition partition(clazz) SELECT id ,name ,age ,gender ,clazz FROM learn2.student7 WHERE clazz = '理科六班' /* 错误: Dynamic partition strict mode requires at least one static partition column. To turn this off set hive.exec.dynamic.partition.mode=nonstrict 原因: HIVE中针对分区表,模式为严格模式,直接通过字段映射方式不能直接插入 */ -- INSERT INTO TABLE learn3.student_partition partition(clazz) SELECT id ,name ,age ,gender ,clazz -- 分区字段必须在SELECT查询字段的末尾,如果有多级分区,那么需要将多级字段按顺序放置末尾 FROM learn2.student7 WHERE clazz != '理科六班'
窗口函数
基本统计
-- 统计各班级中的人数
SELECT
clazz
,count(*) as clazz_num
FROM learn2.student7
GROUP BY clazz
-- 统计各班级中的人数并且展示班级学生信息
SELECT
T1.*
,T2.clazz_num
FROM learn2.student7 T1 JOIN (
SELECT
clazz
,count(*) as clazz_num
FROM learn2.student7
GROUP BY clazz
) T2 ON T1.clazz = T2.clazz
-- 窗口 over
/*
over() 函数中未添加任何内容,表示当前统计的窗口大小为整个表的数据
*/
SELECT
T1.*
,count(*) over() as num -- over()
FROM learn2.student7 T1
/*
1500100001,施笑槐,22,女,文科六班 1000
1500100002,吕金鹏,24,男,文科六班 1000
...
*/
/*
over(PARTITION BY clazz) 表示当前统计的窗口大小为各班级数据
*/
SELECT
T1.*
,count(*) over(PARTITION BY clazz) as clazz_num
FROM learn2.student7 T1
/*
分析:
①先加载学生表数据
②取出其中一条数据进行计算
③由于存在窗口函数,那么需要查看窗口大小
④窗口内由于有PARTITION BY clazz,那么会对整个窗口表数据进行做分区
⑤根据分区字段信息,找到和SELECT字段中一致的数据,作为整个窗口的大小 (文科六班 对应窗口分区后的文科六班数据)
⑥针对分区后的窗口进行做统计
1500100001,施笑槐,22,女,文科六班
*/
排序--
-- 针对薪资排序问题
-- Mysql5.7写法
SELECT
T1.*
,(SELECT count(*) FROM emp T2 WHERE T1.deptno = T2.deptno AND T1.sal < T2.sal)+1
from emp T1
-- ROW_NUMBER函数可以针对 窗口大小进行做排序,同时 OVER中可以添加 ORDER BY 对数据进行排序
SELECT
T1.*
,ROW_NUMBER() OVER(ORDER BY SAL) AS rm
FROM emp T1
SELECT
T1.*
,ROW_NUMBER() OVER(PARTITION BY deptno ORDER BY SAL) AS rm
FROM emp T1
create table new_score(
id int
,score int
,clazz string
,department string
) row format delimited fields terminated by ",";
-- 详情查看窗口文档
-- ORDER BY 排序,如果要倒叙排序,需要添加 DESC
SELECT
department
,score
,row_number() OVER(PARTITION BY department ORDER BY score) as rm
,dense_rank() OVER(PARTITION BY department ORDER BY score)
,rank() OVER(PARTITION BY department ORDER BY score)
,percent_rank() OVER(PARTITION BY department ORDER BY score)
,cume_dist() OVER(PARTITION BY department ORDER BY score DESC)
FROM new_score
row_number :无并列排名 dense_rank:有并列排名,并且依次递增 rank: 有并列排名,不依次递增 percent_rank:(rank的结果-1)/(分区内数据的个数-1) cume_dist:小于等于当前值x的行数 / 窗口或partition分区内的总行数 NTILE(n):对分区内数据再分成n组,然后打上组号
窗口统计函数
max、min、avg、count、sum
-- 和GROUP BY 分组最大区别是,OVER 取表中每一行数据,然后再通过窗口中的数据进行统计,然后再关联 -- OVER(PARTITION BY department ORDER BY score) ORDER BY是针对窗口中的数据进行做排序,由于最终是需要做统计,窗口顺序不影响最终取值,可以不加 SELECT department ,score ,max(score) OVER(PARTITION BY department) ,min(score) OVER(PARTITION BY department) ,avg(score) OVER(PARTITION BY department) ,count(*) OVER(PARTITION BY department) ,sum(score) OVER(PARTITION BY department) FROM new_score
窗口帧
作用:可以根据用户的需求进行调整窗口大小
使用方式:
① ROWS 根据行限定窗口大小
② RANGE 根据表中列值进行限定窗口大小
③ PARTITION BY 可以根据分区限定窗口大小
-- 取成绩表中数据,并将成绩表数据根据分数进行排序,并取出当前行和上一行、下一行数据进行求平均
SELECT id
,score
,clazz
FROM new_score
ORDER BY score desc
SELECT id
,score
,clazz
,avg(score) OVER(ORDER BY score DESC ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as avg_score
FROM new_score
111,69,class1,department1 112,80,class1,department1 113,74,class1,department1 114,94,class1,department1 115,93,class1,department1 121,74,class2,department1 122,86,class2,department1 123,78,class2,department1 124,70,class2,department1 211,93,class1,department2 212,83,class1,department2 213,94,class1,department2 214,94,class1,department2 215,82,class1,department2 216,74,class1,department2 221,99,class2,department2 222,78,class2,department2 223,74,class2,department2 224,80,class2,department2 225,85,class2,department2 表数据 221 99 class2 <- 1 avg : 96.5 214 94 class1 <- 2 avg 114 94 class1 213 94 class1 115 93 class1 211 93 class1 122 86 class2 225 85 class2 212 83 class1 215 82 class1 224 80 class2 112 80 class1 123 78 class2 222 78 class2 216 74 class1 121 74 class2 223 74 class2 113 74 class1 124 70 class2 111 69 class1 窗口数据 1: 221 99 class2 214 94 class1 2: 221 99 class2 214 94 class1 114 94 class1 -- 取当前行及之前所有行的成绩平均值
SELECT id
,score
,clazz
,avg(score) OVER(ORDER BY score DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as avg_score
FROM new_score
-- BETWEEN 1 PRECEDING AND 1 PRECEDING 如果在BETWEEN中给定两个PRECEDING,要求PRECEDING窗口中必须存在数据,才能进行窗口计算
SELECT id
,score
,clazz
,max(score) OVER(ORDER BY score DESC ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING) as max_score
FROM new_score
-- RANGE
-- 需求:获取当前行分数前10分和后10分总人数
SELECT id
,score
,clazz
,count(*) OVER(ORDER BY score DESC RANGE BETWEEN 10 PRECEDING AND 10 FOLLOWING) as count_score
FROM new_score
-- RANGE中取值为ORDER BY 指定的列
-- NullPointerException null
SELECT id
,score
,clazz
,count(*) OVER( RANGE BETWEEN 10 PRECEDING AND 10 FOLLOWING) as count_score
FROM new_score
SELECT id
,score
,clazz
,count(*) OVER(w) as count_score
FROM new_score
WINDOW w AS (ORDER BY score DESC RANGE BETWEEN 10 PRECEDING AND 10 FOLLOWING)
窗口帧总结: ① ROWS 按行取数据 ② RANGE 表示按 ORDER BY 中的列进行取值 并根据BETWEEN中指定范围取值的范围 ③ UNBOUNDED 表示无边界 ④ [num]可以和ROWS 搭配表示取行 和 RANGE搭配表示取范围值 ⑤ PRECEDING 表示往前取范围 ⑥ CURRENT ROW 表示当前行 ⑦ FOLLOWING 表示往后取范围 ⑧ WINDOW w AS (窗口)表示 对窗口重命名
按行取数据LAG与LEAD
LAG 往前第n行数据
格式一: LAG(col,取第n行数据) 格式二: LAG(col,取第n行数据,默认值)
-- 需求:求出当前行前一行数据的分数
SELECT
id
,score
,clazz
,LAG(score,1) OVER(ORDER BY score) as LAG_res
FROM new_score
--
SELECT
id
,score
,clazz
,LAG(score,1,score) OVER(PARTITION BY clazz ORDER BY score) as LAG_res
FROM new_score
LEAD 往后第n行数据
LEAD和LAG方式相似,格式一致
取值
FIRST_VALUE 取分组内排序后,截止到当前行,第一个值
LAST_VALUE 取分组内排序后,截止到当前行,最后一个值,对于并列的排名,取最后一个
SELECT
id
,score
,clazz
,FIRST_VALUE(score) OVER(PARTITION BY clazz ORDER BY score) as FIRST_res
FROM new_score
卡口流量分析(veh_pass数据)
设备ID 拍摄日期 过车数量
451000000000071113,2022-07-13,30 451000000000071113,2022-07-14,60 451000000000071113,2022-07-15,80 451000000000071113,2022-07-16,20 451000000000071114,2022-07-13,32 451000000000071114,2022-07-14,60 451000000000071114,2022-07-15,45 451000000000071114,2022-07-16,34 451000000000071115,2022-07-13,23 451000000000071115,2022-07-14,11 451000000000071116,2022-07-15,23 451000000000071117,2022-07-16,40 451000000000071116,2022-07-17,23 451000000000071117,2022-07-14,60 451000000000071113,2022-06-13,31 451000000000071113,2022-06-14,60 451000000000071113,2022-06-15,82 451000000000071113,2022-06-16,23 451000000000071114,2022-06-13,34 451000000000071114,2022-06-14,60 451000000000071114,2022-06-15,45 451000000000071114,2022-06-16,34 451000000000071115,2022-06-13,27 451000000000071115,2022-06-14,11 451000000000071116,2022-06-15,28 451000000000071117,2022-06-16,49 451000000000071116,2022-06-17,24 451000000000071117,2022-06-14,62 451000000000071117,2022-06-15,62
建表:
CREATE TABLE veh_pass(
id string
,data string
,num string
)ROW FORMAT DELIMITED FIELDS TERMINATED BY ',';
1)从第一天开始到当前天数,对流量进行累加
select
v1.*
,sum(num) over(PARTITION BY v1.id ORDER BY v1.data rows BETWEEN unbounded PRECEDING AND CURRENT ROW) as sum_data
from veh_pass v1
2)昨天与当前流量累加
select
v1.*
,sum(num) over(PARTITION BY v1.id ORDER BY v1.data rows BETWEEN 1 PRECEDING AND CURRENT ROW) as sum_data
from veh_pass v1
3)当前天数的前一天与后一天流量累加
select
v1.*
,sum(num) over(PARTITION BY v1.id ORDER BY v1.data rows BETWEEN 1 PRECEDING AND 1 following) as sum_data
from veh_pass v1
4)当前天数与下一天累加
select
v1.*
,sum(num) over(PARTITION BY v1.id ORDER BY v1.data rows BETWEEN current row AND 1 following) as sum_data
from veh_pass v1
5)当前天数与之后所有天数
select
v1.*
,sum(num) over(PARTITION BY v1.id ORDER BY v1.data rows BETWEEN current row AND unbounded following) as sum_data
from veh_pass v1
卡口流量分析
构建数据表
CREATE TABLE veh_pass( sbbh string COMMENT '设备编号', rq string COMMENT '日期', pass_num int COMMENT '过车数量' ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' ;
需求1:查询当月的设备及其总数
SELECT substr(rq,1,7) as year_month ,sbbh ,sum(pass_num) FROM veh_pass GROUP BY substr(rq,1,7),sbbh HAVING substr(current_date,1,7) = year_month
需求2:查询所有流量明细及所有设备月流量总额
/* 分析: ①由于需要流量明细,那么需要使用窗口函数 ②需要统计设备月流量总额,对应需要对月份进行分组或分区 ③对过车数量进行求和 -- 注意: 针对实际业务场景来说 有时候业务需求不是特别清晰,这时候就需要考虑最后结果数据的应用 */ SELECT T1.* ,sum(pass_num) over(PARTITION BY sbbh,substr(rq,1,7)) as pass_sum FROM veh_pass T1
需求3:按设备编号、日期顺序展示明细
1)从第一天开始到当前天数 对流量进行累加
/* 分析: ① 由于需要将明细及统计结果放置一起,那么需要用到窗口函数 ② 第一天到当前天数 说明需要针对窗口进行限制其大小 ③ 对设备进行分区 */ /* 错误: 根据需求描述,下列的SQL没有问题,但是没有考虑数据的实际应用 */ SELECT T1.* ,sum(pass_num) over(ORDER BY sbbh,rq ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as sum_res FROM veh_pass T1 /* 正确: 由于最终结果数据需要对单个设备进行做日期维度比较,所以需要对设备进行做分区 */ SELECT T1.* ,sum(pass_num) over(PARTITION BY sbbh ORDER BY rq ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as sum_res FROM veh_pass T1
昨天与当前天流量累加
-- 由于日期是按升序,所以昨天是 1 PRECEDING SELECT T1.* ,sum(pass_num) over(PARTITION BY sbbh ORDER BY rq ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) as sum_res FROM veh_pass T1
3)当前天数的前一天与后一天流量累加
SELECT T1.* ,sum(pass_num) over(PARTITION BY sbbh ORDER BY rq ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING ) as sum_res FROM veh_pass T1
4)当前天与下一天的累加和
SELECT T1.* ,sum(pass_num) over(PARTITION BY sbbh ORDER BY rq ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING ) as sum_res FROM veh_pass T1
5)当前天数与之后所有天流量累加和
SELECT T1.* ,sum(pass_num) over(PARTITION BY sbbh ORDER BY rq ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING ) as sum_res FROM veh_pass T1
需求4:查询每个设备编号上次有数据日期和下一次有数据日期
/* 451000000000071113,2022-07-13,30 NULL 2022-07-14 451000000000071113,2022-07-14,60 2022-07-13 2022-07-15 451000000000071113,2022-07-15,80 2022-07-14 2022-07-16 451000000000071113,2022-07-16,20 451000000000071114,2022-07-13,32 分析: 针对每个设备,根据日期进行排序,然后取上一行,为上次有数据日期 针对每个设备,根据日期进行排序,然后取下一行,为下一次有数据日期 concat(substr(rq,1,7),'-01') */ SELECT T1.* ,LAG(rq,1,trunc(rq,'MM')) OVER(PARTITION BY sbbh ORDER BY rq) LAG_RQ ,LAG(pass_num,1,0) OVER(PARTITION BY sbbh ORDER BY rq) LAG_pass ,LEAD(rq,1,last_day(rq)) OVER(PARTITION BY sbbh ORDER BY rq) LEAD_RQ ,LEAD(pass_num,1,0) OVER(PARTITION BY sbbh ORDER BY rq) LEAD_pass FROM veh_pass T1
自定义函数
步骤:
① 自定义类需要继承 UDF/GenericUDTF/GenericUDF
② 需要实现evaluate函数,函数名称不能改动,如果是Generic那么需要实现initialize函数
③ 将jar包添加至HIVE的class path 中
④ 创建临时函数或创建永久函数
⑤ 函数调用
package com.shujia;
import org.apache.hadoop.hive.ql.exec.UDF;
/*
* 功能: 给定字符串能够将字符串的首字母大写,如果都是大写,那么将非首字母变为小写
*
* // ③ 将jar包添加至HIVE的class path 中
* add jar /usr/local/soft/jars/hiveCode-1.0.jar
*
* // ④创建临时函数(临时函数只能现在当前HIVE会话中使用,当会话断开,那么函数失效)
* create temporary function myudf as 'com.shujia.MyUDF';
*
* // ④创建永久函数
* create function my_db.my_lower as 'com.example.hive.udf.Lower';
*
* // ⑤ 函数调用
* */
// ① 自定义类需要继承 UDF
public class MyUDF extends UDF {
// ② 需要实现evaluate函数,函数名称不能改动
// 并且函数内对应变量类型可以是 JAVA中的数据类型,也可以是Hadoop中的数据类型
public String evaluate(String word){
String lower_word = word.toLowerCase();
String first = lower_word.substring(0,1).toUpperCase();
return first+lower_word.substring(1);
}
}
/**
* 存在的问题?
* 在该方式中,没有办法对函数的参数个数以及类型做判断,并给与提示
* 同时,该方式没有函数的使用提示信息
*/
package com.shujia;
import org.apache.hadoop.hive.ql.exec.Description;
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaIntObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
/**
* 注意:
* 和继承UDF步骤基本一致,而GenericUDF需要实现其三个函数
*
* 需求:
* 针对传入的字符串,返回其字符串的长度
*
* 用法:
* select mylength('AAA') => 3
*
* 注意:
* 添加jar包时,需要重新开启会话,然后再去通过add jar 添加
*
* GenericUDF函数特点:
* 1.传入一行参数,同时也会返一行参数
* 2.使用时,可以判断参数长度及其类型
*
* add jar /usr/local/soft/jars/hiveCode-1.0.jar
*
* create temporary function mylength as 'com.shujia.NewUDF';
*
*/
/**
*
* desc function mylength;
* desc function extended mylength;
*
*/
@Description(
name = "mylength",
value = "mylength('str') return length(str)",
extended = "select mylength('AAA')"
)
public class NewUDF extends GenericUDF {
/**
* initialize:在对象构建时,会进行实例化,可以在该函数中针对传入值,进行判断,
* 判断的方式有两种:
* ① 类型:
* ② 长度:
*
* @param arguments ObjectInspector是一个类或者是对象的检查器,arguments表示的是hive中使用时,传入的参数
* @return ObjectInspector 表示hive中函数执行完成后,返回值的类型,对应样例中的 3类型
* @throws UDFArgumentException
*/
@Override
public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
/**
* ObjectInspector.Category category = arguments[0].getCategory();
* public static enum Category {
* PRIMITIVE, => 表示HIVE中的普通数据类型
* LIST => 表示HIVE中的Array数据类型
* MAP => 表示HIVE中的Map数据类型
* STRUCT => 表示HIVE中的STRUCT数据类型
*/
if (arguments.length != 1) {
throw new UDFArgumentLengthException("当前函数只能有一个参数值,并且该参数值类型是String类型");
}
if (!arguments[0].getCategory().equals(ObjectInspector.Category.PRIMITIVE)) {
// public UDFArgumentTypeException(int argumentId, String message)
throw new UDFArgumentTypeException(1, "当前函数传入值类型为String");
}
// return PrimitiveObjectInspectorFactory.writableIntObjectInspector;
return PrimitiveObjectInspectorFactory.javaIntObjectInspector;
}
/**
* evaluate:函数内部主要用于定义自定义函数的逻辑以及返回值
*
* @param arguments hive中传入的参数值
* @return
* @throws HiveException
*/
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
String col1 = arguments[0].get().toString();
// System.out.println(col1);
return col1.length();
}
/**
* getDisplayString 主要用于方法执行逻辑展示
*
* @param children
* @return
*/
@Override
public String getDisplayString(String[] children) {
return "nothing to do ...";
}
}
package com.shujia;
import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.apache.hadoop.io.Text;
import org.apache.tools.ant.taskdefs.Length;
import java.util.ArrayList;
import java.util.List;
/**
* hello world hello
* 返回三行:
* hello
* world
* hello
* <p>
* 样例:
* select mysplit("hello world hello"," ")
* <p>
* add jar /usr/local/soft/jars/hiveCode-1.0.jar
* create temporary function mysplit as 'com.shujia.MyUDTF';
*/
public class MyUDTF extends GenericUDTF {
@Override
public StructObjectInspector initialize(StructObjectInspector argOIs) throws UDFArgumentLengthException, UDFArgumentTypeException {
// 通过StructObjectInspector获取所有参数,其中StructField表示对应一个参数
List<? extends StructField> allStructFieldRefs = argOIs.getAllStructFieldRefs();
// 判断参数个数
if (allStructFieldRefs.size() != 2) {
throw new UDFArgumentLengthException("该函数参数为两个,其中一个为待分割的字符串,另一个为分隔符");
}
// 判断参数类型
for (int i = 0; i < allStructFieldRefs.size(); i++) {
StructField structField = allStructFieldRefs.get(i);
ObjectInspector objectInspector = structField.getFieldObjectInspector();
if (!objectInspector.getCategory().equals(ObjectInspector.Category.PRIMITIVE)) {
throw new UDFArgumentTypeException(i + 1, "参数的类型不正确,请使用HIVE中的普通数据类型String");
}
}
// 返回字段名称
ArrayList<String> fieldNames = new ArrayList<String>();
fieldNames.add("word");
// 返回字段的类型
ArrayList<ObjectInspector> fieldOIs = new ArrayList<ObjectInspector>();
// fieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
// Hadoop中的序列化数据类型writableString-> Hadoop中的Text
fieldOIs.add(PrimitiveObjectInspectorFactory.writableStringObjectInspector);
return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames,
fieldOIs);
}
@Override
public void process(Object[] args) throws HiveException {
String line_words = args[0].toString();
String split = args[1].toString();
String[] words = line_words.split(split);
// String[] 表示为一行数据,由于最终返回列只有一个并且类型为JavaString,所以可以使用String[]
// String[] returnRes = new String[1];
//
ArrayList<Text> returnRes = new ArrayList<>();
for (String word : words) {
// 将切分好的单词,通过String[]进行保存,然后通过forward函数返回出去
// returnRes[0] = word;
returnRes.add(new Text(word));
forward(returnRes);
}
}
@Override
public void close() throws HiveException {
// TODO: 2022/9/15 nothing to do
}
}
package com.shujia;
import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructField;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.util.ArrayList;
import java.util.List;
/**
* 需求:
* 根据字符串{"movie": [{"movie_name": "肖申克的救赎", "MovieType": "犯罪" }, {"movie_name": "肖申克的救赎", "MovieType": "剧情" }]}
* 获取其中的movie_name对应的value作为一列,MovieType作为另外一列,其中JSON数组中每一个JSON都是一行数据
*
* select get_json_object('{"movie": [{"movie_name": "肖申克的救赎", "MovieType": "犯罪" }, {"movie_name": "肖申克的救赎", "MovieType": "剧情" }]}','$.movie');
*
* 分析:
* 根据get_json_object获取其中的movieKey对应的JSONArray,并将该结果传入UDTF函数进行解析,返回对行数据
*
* 解析过程:
* 根据 [{"movie_name":"肖申克的救赎","MovieType":"犯罪"},{"movie_name":"肖申克的救赎","MovieType":"剧情"}]
* 需要将字符串转换成JSONArray对象,然后循环取movie_name和 MovieType,之后通过forward函数将一行数据写出
*
* 建表语句:
* CREATE TABLE movie_json(movie string);
*
* 加载数据:
* load data local inpath '/usr/local/soft/data/UDTF.txt' into table movie_json;
*
* 测试表:
* select get_json_object(movie,'$.movie') from movie_json;
*
* add jar /usr/local/soft/jars/hiveCode-1.0.jar
*
* create temporary function getjson as 'com.shujia.MyUDTF2';
*
* select getjson(get_json_object(movie,'$.movie')) from movie_json;
*
*/
public class MyUDTF2 extends GenericUDTF {
@Override
public StructObjectInspector initialize(StructObjectInspector argOIs) throws UDFArgumentLengthException, UDFArgumentTypeException {
// 通过StructObjectInspector获取所有参数,其中StructField表示对应一个参数
List<? extends StructField> allStructFieldRefs = argOIs.getAllStructFieldRefs();
// 判断参数个数
if(allStructFieldRefs.size() != 1){
throw new UDFArgumentLengthException("该函数参数为1个JSONArray的字符串");
}
// 判断参数类型
StructField structField = allStructFieldRefs.get(0);
ObjectInspector objectInspector = structField.getFieldObjectInspector();
if(!objectInspector.getCategory().equals(ObjectInspector.Category.PRIMITIVE)){
throw new UDFArgumentTypeException(1,"参数的类型不正确,请使用HIVE中的普通数据类型String");
}
// 返回字段名称
ArrayList<String> fieldNames = new ArrayList<String>();
fieldNames.add("movieName");
fieldNames.add("movieType");
// 返回字段的类型
ArrayList<ObjectInspector> fieldOIs = new ArrayList<ObjectInspector>();
// 多字段时,添加字段类型顺序需要和字段名称顺序保持一致
fieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
fieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames,
fieldOIs);
}
/**
* 用于处理JSONArray字符串
* @param args [{"movie_name":"肖申克的救赎","MovieType":"犯罪"},{"movie_name":"肖申克的救赎","MovieType":"剧情"}]
* @throws HiveException
*/
@Override
public void process(Object[] args) throws HiveException {
String jsonStr = args[0].toString();
try {
// 将字符串加载成JSONArray对象
JSONArray jsonArray = new JSONArray(jsonStr);
// JSONObject
// JSONObject jsonObject1 = new JSONObject(jsonStr);
String[] line_data = new String[2];
// 通过获取长度,进行遍历
for (int i = 0; i < jsonArray.length(); i++) {
// 通过getJSONObject获取jsonArray中每一个JSON对象:{"movie_name":"肖申克的救赎","MovieType":"犯罪"}
JSONObject jsonObject = jsonArray.getJSONObject(i);
String movie_name = jsonObject.getString("movie_name");
String movie_type = jsonObject.getString("MovieType");
// 需要和fieldNames中定义字段名称顺序一致
line_data[0] = movie_name;
line_data[1] = movie_type;
// 获取数据后,将结果返回
forward(line_data);
}
} catch (JSONException e) {
e.printStackTrace();
}
}
@Override
public void close() throws HiveException {
// TODO: 2022/9/15 nothing todo
}
}
/**
* 作业:
* 创建一个UDTF函数,实现功能为 传入一个JSON字符串 {"movie": [{"movie_name": "肖申克的救赎", "MovieType": "犯罪" }, {"movie_name": "肖申克的救赎", "MovieType": "剧情" }]}
* 解析其中的movie_name和MovieType转换为两列多行的结果
*
*/