package com.itcast.sql
import java.sql.{Connection, DriverManager, Statement}
import org.apache.spark.sql.{ForeachWriter, Row, SparkSession}
object StructruedForeach {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.appName("structrued mysql")
.master("local[6]")
.getOrCreate()
spark.sparkContext.setLogLevel("error")
import spark.implicits._
val source = spark.readStream
.format("kafka")
.option("kafka.bootstrap.servers", "hadoop100:9092")
.option("subscribe", "structrued_streaming_1")
.option("startingOffsets", "latest")
.load()
//.selectExpr("CAST(value to STRING) as value")
.selectExpr("CAST(value as STRING) as value")
.as[String]
//peter,20,10.2
val resultDF = source.map(
item => {
val words = item.split(",")
(words(0).toString, words(1).toInt, words(2).toDouble)
}
).as[(String, Int, Double)].toDF("name", "age", "gpa")
resultDF.writeStream
.foreach(new MySqlWriter)
.start()
.awaitTermination()
}
class MySqlWriter extends ForeachWriter[Row] {
var connection:Connection = null;
var statement:Statement = null
override def open(partitionId: Long, version: Long): Boolean = {
Class.forName("com.mysql.jdbc.Driver")
connection = DriverManager.getConnection("jdbc:mysql://hadoop100:3306/spark_test", "root", "root")
statement = connection.createStatement()
true
}
override def process(value: Row): Unit = {
statement.executeUpdate(s"insert into student values(${value.getString(0)} ,${value.getInt(1)},${value.getDouble(2)})")
}
override def close(errorOrNull: Throwable): Unit = {
connection.close()
}
}
}
StructruedStreaming 使用foreach落地数据到mysql中 代码示例
最新推荐文章于 2023-03-22 10:38:26 发布