一、需求描述
通过正则获取HDFS log日志中指定字段的数据,并输出到HIVE表中。
flink版本1.12.7。
二、实现
1. 主要连接器依赖
<!-- filesystem连接器 -->
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-connector-files</artifactId>
<version>${flink.version}</version>
</dependency>
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-csv</artifactId>
<version>${flink.version}</version>
</dependency>
2. udf:正则获取字段数据
package com.gao.function;
import org.apache.flink.table.annotation.DataTypeHint;
import org.apache.flink.table.annotation.FunctionHint;
import org.apache.flink.table.functions.TableFunction;
import org.apache.flink.types.Row;
import org.apache.flink.types.RowKind;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class RegexpExtractTupleUdf extends TableFunction<Row> {
@FunctionHint(output = @DataTypeHint("ROW<arr ARRAY<STRING>>"))
public void eval(String raw, String pattern) {
if (raw == null || pattern == null) {
return;
}
List<String> list = new ArrayList<>();
Pattern r = Pattern.compile(pattern);
Matcher m = r.matcher(raw);
if (m.find()) {
for (int i = 1; i <= m.groupCount(); i++) {
list.add(m.group(i));
}
}
String[] arr = list.toArray(new String[0]);
RowKind rowKind = RowKind.fromByteValue((byte) 0);
Row row = new Row(rowKind, 1);
row.setField(0, arr);
collect(row);
}
}
3. sql
CREATE TEMPORARY SYSTEM FUNCTION regexp_extract_tuple AS 'com.gao.function.RegexpExtractTupleUdf';
CREATE TABLE source
(
`raw_message` string
) WITH (
'connector' = 'filesystem',
'path' = 'hdfs:xxx',
'format'= 'raw'
);
CREATE TABLE sink
(
`gif` string,
`referer` string,
`mid` string
) WITH (
'connector' = 'filesystem',
'path' = 'hdfs://xxx',
'format'= 'csv',
'csv.field-delimiter'='\t', --字段分隔符,用于hive字段识别,使用\t
'csv.disable-quote-character' = 'true', --字符串类型的数据不加双引号输出
'csv.null-literal' = 'null' -- 数据为空是 输出为null
);
CREATE TEMPORARY VIEW source_1 AS
select
raw_message
,REGEXP_EXTRACT(raw_message, '(\?|&|<)gif=([^& \">]*)', 2) as `gif` -- 通过正则取gif字段内容
,REGEXP_EXTRACT(raw_message, '(\?|&|<)_referer=([^& \">]*)', 2) as `referer`
,REGEXP_EXTRACT(raw_message, '(\?|&|<)mid=([^& \">]*)', 2) as `mid`
from source S
;
insert into sink
select
`gif` as `gif`,
`referer` as `referer`,
`mid` as `mid`
from source_1;
本文介绍了如何利用Flink SQL解析HDFS上的日志文件,通过自定义UDF提取关键字段,并将处理后的数据写入到Hive表中,适用于企业实战场景。主要涉及Flink 1.12.7版本的连接器和SQL操作。
1943

被折叠的 条评论
为什么被折叠?



