import java.sql.DriverManager
import org.apache.spark.rdd.JdbcRDD
import java.util.Properties
import org.apache.spark.{ SparkContext, SparkConf }
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{ IntegerType, StringType, StructField, StructType }
import org.apache.spark.sql.DataFrame
object sparkJDBC {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("Simple Application").setMaster("local[*]")
val sc = new SparkContext(conf)
val url = "jdbc:mysql://localhost:3306/easyui"
val MYSQL_CONNECTION_URL = url + "?user=" + "root" + "&password=";
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
val usersDf = sqlContext.read.json("i:/4/users.txt")
val sets = new Properties();
sets.put("user", "root");
sets.put("password", "");
sets.put("driver", "com.mysql.jdbc.Driver");
sets.put("overwrite", "false");
val datas = sqlContext.read.jdbc(url, "zhao", "name", 1, 3, 1, sets)
datas.foreach { v => println("name:" + v.get(0) + " age:" + v.get(1)) }
usersDf.write.jdbc(url, "zhao", sets)
// usersDf.insertIntoJDBC(MYSQL_CONNECTION_URL, "zhao", false);
// usersDf.insertIntoJDBC(url, table, overwrite)
}
}
users.txt
{"name":"11","age":"12"}
本文介绍如何使用 Apache Spark 通过 JDBC 读取 MySQL 数据并进行数据写入操作。演示了配置 Spark 应用程序、设置连接属性、从 JSON 文件中读取数据及执行具体的 JDBC 读写操作等步骤。
1237

被折叠的 条评论
为什么被折叠?



