问题原因
在org/apache/seatunnel/connectors/seatunnel/jdbc/catalog/AbstractJdbcCatalog.java 的getConnection方法中的 DriverManager.getConnection(url, username, pwd);方法获取不到驱动
解决方法
1.获取到jar包的位置
2.将jar包再加载进去
3.再次获取Connection
如何获取jar包
位置在 org/apache/seatunnel/engine/core/job/AbstractJobEnvironment.java
代码如下
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.seatunnel.engine.core.job;
import org.apache.seatunnel.api.env.EnvCommonOptions;
import org.apache.seatunnel.common.config.Common;
import org.apache.seatunnel.common.utils.FileUtils;
import org.apache.seatunnel.engine.common.config.JobConfig;
import org.apache.seatunnel.engine.common.exception.SeaTunnelEngineException;
import org.apache.seatunnel.engine.common.utils.IdGenerator;
import org.apache.seatunnel.engine.core.dag.actions.Action;
import org.apache.seatunnel.engine.core.dag.logical.LogicalDag;
import org.apache.seatunnel.engine.core.dag.logical.LogicalDagGenerator;
import org.apache.seatunnel.engine.core.parse.MultipleTableJobConfigParser;
import com.hazelcast.logging.ILogger;
import com.hazelcast.logging.Logger;
import java.io.File;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
public abstract class AbstractJobEnvironment {
protected static ILogger LOGGER = null;
protected final boolean isStartWithSavePoint;
protected final List<Action> actions = new ArrayList<>();
protected final Set<URL> jarUrls = new HashSet<>();
protected final Set<ConnectorJarIdentifier> connectorJarIdentifiers = new HashSet<>();
protected final JobConfig jobConfig;
protected final IdGenerator idGenerator;
// protected final List<URL> commonPluginJars = new ArrayList<>();
//region
//TODO 修改成static 让jdbc模块调用
protected static final List<URL> commonPluginJars = new ArrayList<>();
public static List<URL> getCommonPluginJars(){
return commonPluginJars;
}
//endregion
public AbstractJobEnvironment(JobConfig jobConfig, boolean isStartWithSavePoint) {
LOGGER = Logger.getLogger(getClass().getName());
this.jobConfig = jobConfig;
this.isStartWithSavePoint = isStartWithSavePoint;
this.idGenerator = new IdGenerator();
this.commonPluginJars.addAll(searchPluginJars());
this.commonPluginJars.addAll(
new ArrayList<>(
Common.getThirdPartyJars(
jobConfig
.getEnvOptions()
.getOrDefault(EnvCommonOptions.JARS.key(), "")
.toString())
.stream()
.map(Path::toUri)
.map(
uri -> {
try {
return uri.toURL();
} catch (MalformedURLException e) {
throw new SeaTunnelEngineException(
"the uri of jar illegal:" + uri, e);
}
})
.collect(Collectors.toList())));
LOGGER.info("add common jar in plugins :" + commonPluginJars);
}
protected Set<URL> searchPluginJars() {
try {
if (Files.exists(Common.pluginRootDir())) {
return new HashSet<>(FileUtils.searchJarFiles(Common.pluginRootDir()));
}
} catch (IOException | SeaTunnelEngineException e) {
LOGGER.warning(
String.format("Can't search plugin jars in %s.", Common.pluginRootDir()), e);
}
return Collections.emptySet();
}
public static void addCommonPluginJarsToAction(
Action action,
Set<URL> commonPluginJars,
Set<ConnectorJarIdentifier> commonJarIdentifiers) {
action.getJarUrls().addAll(commonPluginJars);
action.getConnectorJarIdentifiers().addAll(commonJarIdentifiers);
if (!action.getUpstream().isEmpty()) {
action.getUpstream()
.forEach(
upstreamAction -> {
addCommonPluginJarsToAction(
upstreamAction, commonPluginJars, commonJarIdentifiers);
});
}
}
public static Set<URL> getJarUrlsFromIdentifiers(
Set<ConnectorJarIdentifier> connectorJarIdentifiers) {
Set<URL> jarUrls = new HashSet<>();
connectorJarIdentifiers.stream()
.map(
connectorJarIdentifier -> {
File storageFile = new File(connectorJarIdentifier.getStoragePath());
try {
return Optional.of(storageFile.toURI().toURL());
} catch (MalformedURLException e) {
LOGGER.warning(
String.format("Cannot get plugin URL: {%s}", storageFile));
return Optional.empty();
}
})
.collect(Collectors.toList())
.forEach(
optional -> {
if (optional.isPresent()) {
jarUrls.add((URL) optional.get());
}
});
return jarUrls;
}
protected abstract MultipleTableJobConfigParser getJobConfigParser();
protected LogicalDagGenerator getLogicalDagGenerator() {
return new LogicalDagGenerator(actions, jobConfig, idGenerator);
}
protected abstract LogicalDag getLogicalDag();
}
修改pom引入AbstractJobEnvironment
位置: 你电脑存放seatunnnel的位置\seatunnel-2.3.5\seatunnel-connectors-v2\connector-jdbc\pom.xml
代码:
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.seatunnel</groupId>
<artifactId>seatunnel-connectors-v2</artifactId>
<version>2.3.5</version>
</parent>
<artifactId>connector-jdbc</artifactId>
<name>SeaTunnel : Connectors V2 : Jdbc</name>
<properties>
<mysql.version>8.0.27</mysql.version>
<postgresql.version>42.4.3</postgresql.version>
<dm-jdbc.version>8.1.2.141</dm-jdbc.version>
<sqlserver.version>9.2.1.jre8</sqlserver.version>
<phoenix.version>5.2.5-HBase-2.x</phoenix.version>
<oracle.version>12.2.0.1</oracle.version>
<sqlite.version>3.39.3.0</sqlite.version>
<db2.version>db2jcc4</db2.version>
<sqlite.version>3.39.3.0</sqlite.version>
<tablestore.version>5.13.9</tablestore.version>
<teradata.version>17.20.00.12</teradata.version>
<redshift.version>2.1.0.9</redshift.version>
<saphana.version>2.14.7</saphana.version>
<snowflake.version>3.13.29</snowflake.version>
<vertica.version>12.0.3-0</vertica.version>
<hikari.version>4.0.3</hikari.version>
<postgis.jdbc.version>2.5.1</postgis.jdbc.version>
<kingbase8.version>8.6.0</kingbase8.version>
<hive.jdbc.version>3.1.3</hive.jdbc.version>
<oceanbase.jdbc.version>2.4.3</oceanbase.jdbc.version>
<xugu.jdbc.version>12.2.0</xugu.jdbc.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>${hikari.version}</version>
</dependency>
<dependency>
<groupId>com.aliyun.phoenix</groupId>
<artifactId>ali-phoenix-shaded-thin-client</artifactId>
<version>${phoenix.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>net.postgis</groupId>
<artifactId>postgis-jdbc</artifactId>
<version>${postgis.jdbc.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.dameng</groupId>
<artifactId>DmJdbcDriver18</artifactId>
<version>${dm-jdbc.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${sqlserver.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.oracle.database.jdbc</groupId>
<artifactId>ojdbc8</artifactId>
<version>${oracle.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.oracle.database.xml</groupId>
<artifactId>xdb6</artifactId>
<version>${oracle.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.oracle.database.xml</groupId>
<artifactId>xmlparserv2</artifactId>
<version>${oracle.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.xerial</groupId>
<artifactId>sqlite-jdbc</artifactId>
<version>${sqlite.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.ibm.db2.jcc</groupId>
<artifactId>db2jcc</artifactId>
<version>${db2.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.aliyun.openservices</groupId>
<artifactId>tablestore-jdbc</artifactId>
<version>${tablestore.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.teradata.jdbc</groupId>
<artifactId>terajdbc4</artifactId>
<version>${teradata.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.amazon.redshift</groupId>
<artifactId>redshift-jdbc42</artifactId>
<version>${redshift.version}</version>
<scope>provided</scope>
</dependency>
<!-- https://mvnrepository.com/artifact/com.sap.cloud.db.jdbc/ngdbc -->
<dependency>
<groupId>com.sap.cloud.db.jdbc</groupId>
<artifactId>ngdbc</artifactId>
<version>${saphana.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>net.snowflake</groupId>
<artifactId>snowflake-jdbc</artifactId>
<version>${snowflake.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.vertica.jdbc</groupId>
<artifactId>vertica-jdbc</artifactId>
<version>${vertica.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>cn.com.kingbase</groupId>
<artifactId>kingbase8</artifactId>
<version>${kingbase8.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>jdk.tools</groupId>
<artifactId>jdk.tools</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.oceanbase</groupId>
<artifactId>oceanbase-client</artifactId>
<version>${oceanbase.jdbc.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.xugudb</groupId>
<artifactId>xugu-jdbc</artifactId>
<version>${xugu.jdbc.version}</version>
<scope>provided</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>org.apache.seatunnel</groupId>
<artifactId>connector-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>com.zaxxer</groupId>
<artifactId>HikariCP</artifactId>
<version>${hikari.version}</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
</dependency>
<dependency>
<groupId>net.postgis</groupId>
<artifactId>postgis-jdbc</artifactId>
</dependency>
<dependency>
<groupId>com.dameng</groupId>
<artifactId>DmJdbcDriver18</artifactId>
</dependency>
<dependency>
<groupId>com.aliyun.phoenix</groupId>
<artifactId>ali-phoenix-shaded-thin-client</artifactId>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
</dependency>
<dependency>
<groupId>com.oracle.database.jdbc</groupId>
<artifactId>ojdbc8</artifactId>
</dependency>
<dependency>
<groupId>com.oracle.database.xml</groupId>
<artifactId>xdb6</artifactId>
</dependency>
<dependency>
<groupId>com.oracle.database.xml</groupId>
<artifactId>xmlparserv2</artifactId>
</dependency>
<dependency>
<groupId>org.xerial</groupId>
<artifactId>sqlite-jdbc</artifactId>
</dependency>
<dependency>
<groupId>com.ibm.db2.jcc</groupId>
<artifactId>db2jcc</artifactId>
</dependency>
<dependency>
<groupId>com.aliyun.openservices</groupId>
<artifactId>tablestore-jdbc</artifactId>
</dependency>
<dependency>
<groupId>com.teradata.jdbc</groupId>
<artifactId>terajdbc4</artifactId>
</dependency>
<dependency>
<groupId>com.amazon.redshift</groupId>
<artifactId>redshift-jdbc42</artifactId>
</dependency>
<dependency>
<groupId>com.sap.cloud.db.jdbc</groupId>
<artifactId>ngdbc</artifactId>
</dependency>
<dependency>
<groupId>net.snowflake</groupId>
<artifactId>snowflake-jdbc</artifactId>
</dependency>
<dependency>
<groupId>com.vertica.jdbc</groupId>
<artifactId>vertica-jdbc</artifactId>
</dependency>
<dependency>
<groupId>cn.com.kingbase</groupId>
<artifactId>kingbase8</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
</dependency>
<dependency>
<groupId>com.oceanbase</groupId>
<artifactId>oceanbase-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.seatunnel</groupId>
<artifactId>seatunnel-engine-core</artifactId>
<version>2.3.5</version>
<scope>compile</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>shade</goal>
</goals>
<phase>package</phase>
<configuration>
<createSourcesJar>false</createSourcesJar>
<shadeSourcesContent>true</shadeSourcesContent>
<shadedArtifactAttached>false</shadedArtifactAttached>
<createDependencyReducedPom>false</createDependencyReducedPom>
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
<relocations>
<!-- rename hikari to avoid jar conflict from spark -->
<relocation>
<pattern>com.zaxxer.hikari</pattern>
<shadedPattern>${seatunnel.shade.package}.com.zaxxer.hikari</shadedPattern>
</relocation>
</relocations>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
重新获取连接
位置:org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.AbstractJdbcCatalog
代码:
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.seatunnel.connectors.seatunnel.jdbc.catalog;
import org.apache.seatunnel.api.table.catalog.*;
import org.apache.seatunnel.api.table.catalog.exception.CatalogException;
import org.apache.seatunnel.api.table.catalog.exception.DatabaseAlreadyExistException;
import org.apache.seatunnel.api.table.catalog.exception.DatabaseNotExistException;
import org.apache.seatunnel.api.table.catalog.exception.TableAlreadyExistException;
import org.apache.seatunnel.api.table.catalog.exception.TableNotExistException;
import org.apache.seatunnel.api.table.type.BasicType;
import org.apache.seatunnel.api.table.type.SqlType;
import org.apache.seatunnel.common.exception.CommonError;
import org.apache.seatunnel.common.exception.CommonErrorCode;
import org.apache.seatunnel.common.exception.SeaTunnelRuntimeException;
import org.apache.seatunnel.common.utils.JdbcUrlUtil;
import org.apache.seatunnel.connectors.seatunnel.jdbc.catalog.utils.CatalogUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.seatunnel.engine.core.job.AbstractJobEnvironment;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Method;
import java.net.URL;
import java.net.URLClassLoader;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
public abstract class AbstractJdbcCatalog implements Catalog {
private static final Logger LOG = LoggerFactory.getLogger(AbstractJdbcCatalog.class);
protected static final Set<String> SYS_DATABASES = new HashSet<>();
protected final String catalogName;
protected final String defaultDatabase;
protected final String username;
protected final String pwd;
protected final String baseUrl;
protected final String suffix;
protected final String defaultUrl;
protected final Optional<String> defaultSchema;
protected final Map<String, Connection> connectionMap;
public AbstractJdbcCatalog(
String catalogName,
String username,
String pwd,
JdbcUrlUtil.UrlInfo urlInfo,
String defaultSchema) {
checkArgument(StringUtils.isNotBlank(username));
checkArgument(StringUtils.isNotBlank(urlInfo.getUrlWithoutDatabase()));
this.catalogName = catalogName;
this.defaultDatabase = urlInfo.getDefaultDatabase().orElse(null);
this.username = username;
this.pwd = pwd;
this.baseUrl = urlInfo.getUrlWithoutDatabase();
this.defaultUrl = urlInfo.getOrigin();
this.suffix = urlInfo.getSuffix();
this.defaultSchema = Optional.ofNullable(defaultSchema);
this.connectionMap = new ConcurrentHashMap<>();
}
@Override
public String name() {
return catalogName;
}
@Override
public String getDefaultDatabase() {
return defaultDatabase;
}
protected Connection getConnection(String url) {
if (connectionMap.containsKey(url)) {
return connectionMap.get(url);
}
try {
Connection connection =null;
//TODO wxt 修改 connection = DriverManager.getConnection(url, username, pwd); 新增重新加载jar包的国内
//region
try {
connection = DriverManager.getConnection(url, username, pwd);
}catch (Exception e){
//把jdbc的jar包都给加载进去
List<URL> commonPluginJars = AbstractJobEnvironment.getCommonPluginJars();
for (URL commonPluginJar : commonPluginJars) {
ClassLoader classLoader = getClass().getClassLoader();
try {
Method addURL = URLClassLoader.class.getDeclaredMethod("addURL", URL.class);
addURL.setAccessible(true);
addURL.invoke(classLoader,commonPluginJar);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
connection = DriverManager.getConnection(url, username, pwd);
}
//endregion
connectionMap.put(url, connection);
return connection;
} catch (Exception e) {
throw new CatalogException(String.format("Failed connecting to %s via JDBC.", url), e);
}
}
@Override
public void open() throws CatalogException {
getConnection(defaultUrl);
LOG.info("Catalog {} established connection to {}", catalogName, defaultUrl);
}
@Override
public void close() throws CatalogException {
for (Map.Entry<String, Connection> entry : connectionMap.entrySet()) {
try {
entry.getValue().close();
} catch (SQLException e) {
throw new CatalogException(
String.format("Failed to close %s via JDBC.", entry.getKey()), e);
}
}
connectionMap.clear();
LOG.info("Catalog {} closing", catalogName);
}
protected String getSelectColumnsSql(TablePath tablePath) {
throw new UnsupportedOperationException();
}
protected Column buildColumn(ResultSet resultSet) throws SQLException {
throw new UnsupportedOperationException();
}
protected TableIdentifier getTableIdentifier(TablePath tablePath) {
return TableIdentifier.of(
catalogName,
tablePath.getDatabaseName(),
tablePath.getSchemaName(),
tablePath.getTableName());
}
public CatalogTable getTable(TablePath tablePath)
throws CatalogException, TableNotExistException {
if (!tableExists(tablePath)) {
throw new TableNotExistException(catalogName, tablePath);
}
String dbUrl;
if (StringUtils.isNotBlank(tablePath.getDatabaseName())) {
dbUrl = getUrlFromDatabaseName(tablePath.getDatabaseName());
} else {
dbUrl = getUrlFromDatabaseName(defaultDatabase);
}
Connection conn = getConnection(dbUrl);
try {
DatabaseMetaData metaData = conn.getMetaData();
Optional<PrimaryKey> primaryKey = getPrimaryKey(metaData, tablePath);
List<ConstraintKey> constraintKeys = getConstraintKeys(metaData, tablePath);
try (PreparedStatement ps = conn.prepareStatement(getSelectColumnsSql(tablePath));
ResultSet resultSet = ps.executeQuery()) {
TableSchema.Builder builder = TableSchema.builder();
buildColumnsWithErrorCheck(tablePath, resultSet, builder);
// add primary key
primaryKey.ifPresent(builder::primaryKey);
// add constraint key
constraintKeys.forEach(builder::constraintKey);
TableIdentifier tableIdentifier = getTableIdentifier(tablePath);
return CatalogTable.of(
tableIdentifier,
builder.build(),
buildConnectorOptions(tablePath),
Collections.emptyList(),
"",
catalogName);
}
} catch (SeaTunnelRuntimeException e) {
throw e;
} catch (Exception e) {
throw new CatalogException(
String.format("Failed getting table %s", tablePath.getFullName()), e);
}
}
protected void buildColumnsWithErrorCheck(
TablePath tablePath, ResultSet resultSet, TableSchema.Builder builder)
throws SQLException {
Map<String, String> unsupported = new LinkedHashMap<>();
while (resultSet.next()) {
try {
builder.column(buildColumn(resultSet));
} catch (SeaTunnelRuntimeException e) {
if (e.getSeaTunnelErrorCode()
.equals(CommonErrorCode.CONVERT_TO_SEATUNNEL_TYPE_ERROR_SIMPLE)) {
unsupported.put(e.getParams().get("field"), e.getParams().get("dataType"));
} else {
throw e;
}
}
}
if (!unsupported.isEmpty()) {
throw CommonError.getCatalogTableWithUnsupportedType(
catalogName, tablePath.getFullName(), unsupported);
}
}
protected Optional<PrimaryKey> getPrimaryKey(DatabaseMetaData metaData, TablePath tablePath)
throws SQLException {
return getPrimaryKey(
metaData,
tablePath.getDatabaseName(),
tablePath.getSchemaName(),
tablePath.getTableName());
}
protected Optional<PrimaryKey> getPrimaryKey(
DatabaseMetaData metaData, String database, String schema, String table)
throws SQLException {
return CatalogUtils.getPrimaryKey(metaData, TablePath.of(database, schema, table));
}
protected List<ConstraintKey> getConstraintKeys(DatabaseMetaData metaData, TablePath tablePath)
throws SQLException {
return getConstraintKeys(
metaData,
tablePath.getDatabaseName(),
tablePath.getSchemaName(),
tablePath.getTableName());
}
protected List<ConstraintKey> getConstraintKeys(
DatabaseMetaData metaData, String database, String schema, String table)
throws SQLException {
return CatalogUtils.getConstraintKeys(metaData, TablePath.of(database, schema, table));
}
protected String getListDatabaseSql() {
throw new UnsupportedOperationException();
}
@Override
public List<String> listDatabases() throws CatalogException {
try {
return queryString(
defaultUrl,
getListDatabaseSql(),
rs -> {
String s = rs.getString(1);
return SYS_DATABASES.contains(s) ? null : s;
});
} catch (Exception e) {
throw new CatalogException(
String.format("Failed listing database in catalog %s", this.catalogName), e);
}
}
@Override
public boolean databaseExists(String databaseName) throws CatalogException {
checkArgument(StringUtils.isNotBlank(databaseName));
return listDatabases().contains(databaseName);
}
protected String getListTableSql(String databaseName) {
throw new UnsupportedOperationException();
}
protected String getTableName(ResultSet rs) throws SQLException {
String schemaName = rs.getString(1);
String tableName = rs.getString(2);
if (StringUtils.isNotBlank(schemaName) && !SYS_DATABASES.contains(schemaName)) {
return schemaName + "." + tableName;
}
return null;
}
protected String getTableName(TablePath tablePath) {
return tablePath.getSchemaAndTableName();
}
@Override
public List<String> listTables(String databaseName)
throws CatalogException, DatabaseNotExistException {
if (!databaseExists(databaseName)) {
throw new DatabaseNotExistException(this.catalogName, databaseName);
}
String dbUrl = getUrlFromDatabaseName(databaseName);
try {
return queryString(dbUrl, getListTableSql(databaseName), this::getTableName);
} catch (Exception e) {
throw new CatalogException(
String.format("Failed listing database in catalog %s", catalogName), e);
}
}
@Override
public boolean tableExists(TablePath tablePath) throws CatalogException {
try {
return databaseExists(tablePath.getDatabaseName())
&& listTables(tablePath.getDatabaseName()).contains(getTableName(tablePath));
} catch (DatabaseNotExistException e) {
return false;
}
}
@Override
public void createTable(TablePath tablePath, CatalogTable table, boolean ignoreIfExists)
throws TableAlreadyExistException, DatabaseNotExistException, CatalogException {
checkNotNull(tablePath, "Table path cannot be null");
if (!databaseExists(tablePath.getDatabaseName())) {
throw new DatabaseNotExistException(catalogName, tablePath.getDatabaseName());
}
if (defaultSchema.isPresent()) {
tablePath =
new TablePath(
tablePath.getDatabaseName(),
defaultSchema.get(),
tablePath.getTableName());
}
if (tableExists(tablePath)) {
if (ignoreIfExists) {
return;
}
throw new TableAlreadyExistException(catalogName, tablePath);
}
createTableInternal(tablePath, table);
}
protected String getCreateTableSql(TablePath tablePath, CatalogTable table) {
throw new UnsupportedOperationException();
}
protected void createTableInternal(TablePath tablePath, CatalogTable table)
throws CatalogException {
String dbUrl = getUrlFromDatabaseName(tablePath.getDatabaseName());
try {
List<Column> columns = table.getTableSchema().getColumns();
// BasicType<Integer> integerBasicType = new BasicType<>(Integer.class, SqlType.INT);
// PhysicalColumn newrow = new PhysicalColumn("newrowa1", integerBasicType, 0L, null);
// columns.add(newrow);
// BasicType<String> stringType = new BasicType<>(String.class, SqlType.STRING);
// String name,
// SeaTunnelDataType<?> dataType,
// Long columnLength,
// Integer scale,
// boolean nullable,
// Object defaultValue,
// String comment
// PhysicalColumn string = new PhysicalColumn("newstring", stringType, 255L,null,
// false,"这是默认值","这是注释");
// string.getDefaultValue();
// columns.add(string);
executeInternal(dbUrl, getCreateTableSql(tablePath, table));
} catch (Exception e) {
throw new CatalogException(
String.format("Failed creating table %s", tablePath.getFullName()), e);
}
}
@Override
public void dropTable(TablePath tablePath, boolean ignoreIfNotExists)
throws TableNotExistException, CatalogException {
checkNotNull(tablePath, "Table path cannot be null");
if (!tableExists(tablePath)) {
if (ignoreIfNotExists) {
return;
}
throw new TableNotExistException(catalogName, tablePath);
}
dropTableInternal(tablePath);
}
protected String getDropTableSql(TablePath tablePath) {
throw new UnsupportedOperationException();
}
protected void dropTableInternal(TablePath tablePath) throws CatalogException {
String dbUrl = getUrlFromDatabaseName(tablePath.getDatabaseName());
try {
// Will there exist concurrent drop for one table?
executeInternal(dbUrl, getDropTableSql(tablePath));
} catch (SQLException e) {
throw new CatalogException(
String.format("Failed dropping table %s", tablePath.getFullName()), e);
}
}
@Override
public void createDatabase(TablePath tablePath, boolean ignoreIfExists)
throws DatabaseAlreadyExistException, CatalogException {
checkNotNull(tablePath, "Table path cannot be null");
checkNotNull(tablePath.getDatabaseName(), "Database name cannot be null");
if (databaseExists(tablePath.getDatabaseName())) {
if (ignoreIfExists) {
return;
}
throw new DatabaseAlreadyExistException(catalogName, tablePath.getDatabaseName());
}
createDatabaseInternal(tablePath.getDatabaseName());
}
protected String getCreateDatabaseSql(String databaseName) {
throw new UnsupportedOperationException();
}
protected void createDatabaseInternal(String databaseName) {
try {
executeInternal(defaultUrl, getCreateDatabaseSql(databaseName));
} catch (Exception e) {
throw new CatalogException(
String.format(
"Failed creating database %s in catalog %s",
databaseName, this.catalogName),
e);
}
}
protected void closeDatabaseConnection(String databaseName) {
String dbUrl = getUrlFromDatabaseName(databaseName);
try {
Connection connection = connectionMap.remove(dbUrl);
if (connection != null) {
connection.close();
}
} catch (SQLException e) {
throw new CatalogException(String.format("Failed to close %s via JDBC.", dbUrl), e);
}
}
public void truncateTable(TablePath tablePath, boolean ignoreIfNotExists)
throws TableNotExistException, CatalogException {
checkNotNull(tablePath, "Table path cannot be null");
if (!databaseExists(tablePath.getDatabaseName())) {
if (ignoreIfNotExists) {
return;
}
throw new DatabaseNotExistException(catalogName, tablePath.getDatabaseName());
}
truncateTableInternal(tablePath);
}
@Override
public void dropDatabase(TablePath tablePath, boolean ignoreIfNotExists)
throws DatabaseNotExistException, CatalogException {
checkNotNull(tablePath, "Table path cannot be null");
checkNotNull(tablePath.getDatabaseName(), "Database name cannot be null");
if (!databaseExists(tablePath.getDatabaseName())) {
if (ignoreIfNotExists) {
return;
}
throw new DatabaseNotExistException(catalogName, tablePath.getDatabaseName());
}
dropDatabaseInternal(tablePath.getDatabaseName());
}
protected String getDropDatabaseSql(String databaseName) {
throw new UnsupportedOperationException();
}
protected void dropDatabaseInternal(String databaseName) throws CatalogException {
try {
executeInternal(defaultUrl, getDropDatabaseSql(databaseName));
} catch (Exception e) {
throw new CatalogException(
String.format(
"Failed dropping database %s in catalog %s",
databaseName, this.catalogName),
e);
}
}
protected String getUrlFromDatabaseName(String databaseName) {
String url = baseUrl.endsWith("/") ? baseUrl : baseUrl + "/";
return url + databaseName + suffix;
}
protected String getOptionTableName(TablePath tablePath) {
return tablePath.getFullName();
}
@SuppressWarnings("MagicNumber")
protected Map<String, String> buildConnectorOptions(TablePath tablePath) {
Map<String, String> options = new HashMap<>(8);
options.put("connector", "jdbc");
options.put("url", getUrlFromDatabaseName(tablePath.getDatabaseName()));
options.put("table-name", getOptionTableName(tablePath));
options.put("username", username);
options.put("password", pwd);
return options;
}
@FunctionalInterface
public interface ResultSetConsumer<T> {
T apply(ResultSet rs) throws SQLException;
}
protected List<String> queryString(String url, String sql, ResultSetConsumer<String> consumer)
throws SQLException {
try (PreparedStatement ps = getConnection(url).prepareStatement(sql)) {
List<String> result = new ArrayList<>();
ResultSet rs = ps.executeQuery();
while (rs.next()) {
String value = consumer.apply(rs);
if (value != null) {
result.add(value);
}
}
return result;
}
}
// If sql is DDL, the execute() method always returns false, so the return value
// should not be used to determine whether changes were made in database.
protected boolean executeInternal(String url, String sql) throws SQLException {
LOG.info("Execute sql : {}", sql);
try (PreparedStatement ps = getConnection(url).prepareStatement(sql)) {
return ps.execute();
}
}
public CatalogTable getTable(String sqlQuery) throws SQLException {
Connection defaultConnection = getConnection(defaultUrl);
return CatalogUtils.getCatalogTable(defaultConnection, sqlQuery);
}
protected void truncateTableInternal(TablePath tablePath) throws CatalogException {
try {
executeInternal(defaultUrl, getTruncateTableSql(tablePath));
} catch (Exception e) {
throw new CatalogException(
String.format(
"Failed truncate table %s in catalog %s",
tablePath.getFullName(), this.catalogName),
e);
}
}
protected String getTruncateTableSql(TablePath tablePath) {
throw new UnsupportedOperationException();
}
protected String getExistDataSql(TablePath tablePath) {
throw new UnsupportedOperationException();
}
public void executeSql(TablePath tablePath, String sql) {
String dbUrl = getUrlFromDatabaseName(tablePath.getDatabaseName());
Connection connection = getConnection(dbUrl);
try (PreparedStatement ps = connection.prepareStatement(sql)) {
// Will there exist concurrent drop for one table?
ps.execute();
} catch (SQLException e) {
throw new CatalogException(String.format("Failed executeSql error %s", sql), e);
}
}
public boolean isExistsData(TablePath tablePath) {
String dbUrl = getUrlFromDatabaseName(tablePath.getDatabaseName());
Connection connection = getConnection(dbUrl);
String sql = getExistDataSql(tablePath);
try (PreparedStatement ps = connection.prepareStatement(sql)) {
ResultSet resultSet = ps.executeQuery();
return resultSet.next();
} catch (SQLException e) {
throw new CatalogException(String.format("Failed executeSql error %s", sql), e);
}
}
@Override
public PreviewResult previewAction(
ActionType actionType, TablePath tablePath, Optional<CatalogTable> catalogTable) {
if (actionType == ActionType.CREATE_TABLE) {
checkArgument(catalogTable.isPresent(), "CatalogTable cannot be null");
return new SQLPreviewResult(getCreateTableSql(tablePath, catalogTable.get()));
} else if (actionType == ActionType.DROP_TABLE) {
return new SQLPreviewResult(getDropTableSql(tablePath));
} else if (actionType == ActionType.TRUNCATE_TABLE) {
return new SQLPreviewResult(getTruncateTableSql(tablePath));
} else if (actionType == ActionType.CREATE_DATABASE) {
return new SQLPreviewResult(getCreateDatabaseSql(tablePath.getDatabaseName()));
} else if (actionType == ActionType.DROP_DATABASE) {
return new SQLPreviewResult(getDropDatabaseSql(tablePath.getDatabaseName()));
} else {
throw new UnsupportedOperationException("Unsupported action type: " + actionType);
}
}
}
重新打包:
打完包之后东西是放在dist下面的