base_province.json脚本:
mysql至hdfs
参数hadoopConfig:HA的配置,目的动态切换
{
"job": {
"setting": {
"speed": {
"channel": 3
},
"errorLimit": {
"record": 0,
"percentage": 0.02
}
},
"content": [{
"reader": {
"name": "mysqlreader",
"parameter": {
"username": "root",
"password": "000000",
"column": [
"id",
"name",
"region_id",
"area_code",
"iso_code",
"iso_3166_2",
"create_time",
"operate_time"
],
"splitPk": "",
"connection": [{
"table": [
"base_province"
],
"jdbcUrl": [
"jdbc:mysql://hadoop102:3306/tms01?useUnicode=true&allowPublicKeyRetrieval=true&characterEncoding=utf-8"
]
}]
}
},
"writer": {
"name": "hdfswriter",
"parameter": {
"defaultFS": "hdfs://mycluster",
"hadoopConfig": {
"dfs.nameservices": "mycluster",
"dfs.ha.namenodes.mycluster": "namenode1,namenode2",
"dfs.namenode.rpc-address.mycluster.namenode1": "hadoop102:8020",
"dfs.namenode.rpc-address.mycluster.namenode2": "hadoop103:8020",
"dfs.client.failover.proxy.provider.mycluster": "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
},
"fileType": "text",
"path": "/base_province",
"fileName": "base_province",
"column": [{
"name": "id",
"type": "bigint"
},
{
"name": "name",
"type": "string"
},
{
"name": "region_id",
"type": "string"
},
{
"name": "area_code",
"type": "string"
},
{
"name": "iso_code",
"type": "string"
},
{
"name": "iso_3166_2",
"type": "string"
},
{
"name": "create_time",
"type": "string"
},
{
"name": "operate_time",
"type": "string"
}
],
"writeMode": "append",
"fieldDelimiter": "\t",
"compress": "gzip"
}
}
}]
}
}