Spark---Datasource(JDBC)---Scala

本文介绍了一个使用Apache Spark SQL通过JDBC连接MySQL数据库的示例程序。该程序从两个不同的表中读取数据,执行JOIN操作后筛选出成绩高于80分的学生,并将结果插入到另一个表中。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

package com.spark.sparksql.datasource.scala

import java.sql.DriverManager
import java.util

import org.apache.spark.sql.types.{StructType, IntegerType, StringType, StructField}
import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.{SparkContext, SparkConf}

/**
  * Created by root on 2017/8/10.
  */
object JDBCDataSource {

  def main(args: Array[String]) {
    val conf = new SparkConf().setAppName("JDBCDataSource").setMaster("local")
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)

    var options = new util.HashMap[String,String]()
    options.put("url","jdbc:mysql://spark001:3306/test")
    options.put("user","root")
    options.put("password","123123")
    options.put("dbtable","student_infos")

    val studentInfosDF = sqlContext.read.format("jdbc").options(options).load()

    options.put("dbtable","student_scores")
    val studentScoresDF = sqlContext.read.format("jdbc").options(options).load()

    // 将两个DataFrame数据框转换成PairRDD,进行JOIN操作
    val rdd1 = studentInfosDF.map(x => (x.getString(0), x.getInt(1)))
    val rdd2 = studentScoresDF.map(x => (x.getString(0), x.getInt(1)))
    val studentsRDD = rdd1.join(rdd2)

    // 将PairRDD转换为RDD<Row>
    val studentsRowRDD = studentsRDD.map(x => Row(x._1, x._2._1, x._2._2))
    val goodStudentsRDD = studentsRowRDD.filter(x => x.getInt(2)>80)

    val structFields = Array(StructField("name", StringType, true)
                            ,StructField("age", IntegerType, true)
                            ,StructField("score", IntegerType, true))

    val studentsDF = sqlContext.createDataFrame(goodStudentsRDD, StructType(structFields))
    for(row <- studentsDF){
      println(row)
    }

    studentsDF.foreach( row => {
        val sql = "insert into good_student_infos values(" +"'" + row.getString(0) + "',"  +  row.getInt(1) + "," +  row.getInt(2) + ")"
        Class.forName("com.mysql.jdbc.Driver")
        val conn = DriverManager.getConnection("jdbc:mysql://node15:3306/test","root","123123")
        val stat = conn.createStatement()
        stat.executeUpdate(sql)

        if(stat != null){
          stat.close()
        }
        if(conn != null){
          conn.close()
        }
      }
    )
  }
}
在dataworks上执行scala编写的spark任务,获取hologres报错如下 2025-08-04 08:55:31,896 ERROR org.apache.spark.deploy.yarn.ApplicationMaster - User class threw exception: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 25.0 failed 4 times, most recent failure: Lost task 0.3 in stage 25.0 (TID 2364, 3010b5205.cloud.b7.am301, executor 11): ExecutorLostFailure (executor 11 exited caused by one of the running tasks) Reason: Container marked as failed: container_1754267297905_2080247398_01_000011 on host: 3010b5205.cloud.b7.am301. Exit status: 137. Diagnostics: 3010b5205.cloud.b7.am301 Driver stacktrace: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 25.0 failed 4 times, most recent failure: Lost task 0.3 in stage 25.0 (TID 2364, 3010b5205.cloud.b7.am301, executor 11): ExecutorLostFailure (executor 11 exited caused by one of the running tasks) Reason: Container marked as failed: container_1754267297905_2080247398_01_000011 on host: 3010b5205.cloud.b7.am301. Exit status: 137. Diagnostics: 3010b5205.cloud.b7.am301 Driver stacktrace: at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1599) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1587) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1586) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) ~[scala-library-2.11.8.jar:?] at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) ~[scala-library-2.11.8.jar:?] at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1586) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at scala.Option.foreach(Option.scala:257) ~[scala-library-2.11.8.jar:?] at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:831) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1820) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1769) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1758) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:642) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.SparkContext.runJob(SparkContext.scala:2027) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.SparkContext.runJob(SparkContext.scala:2048) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.SparkContext.runJob(SparkContext.scala:2067) ~[spark-core_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:368) ~[spark-sql_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38) ~[spark-sql_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3272) ~[spark-sql_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2484) ~[spark-sql_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2484) ~[spark-sql_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3253) ~[spark-sql_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77) ~[spark-sql_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3252) ~[spark-sql_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.sql.Dataset.head(Dataset.scala:2484) ~[spark-sql_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.sql.Dataset.take(Dataset.scala:2698) ~[spark-sql_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.sql.Dataset.showString(Dataset.scala:254) ~[spark-sql_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.sql.Dataset.show(Dataset.scala:725) ~[spark-sql_2.11-2.3.0-odps0.30.0.jar:?] at org.apache.spark.sql.Dataset.show(Dataset.scala:702) ~[spark-sql_2.11-2.3.0-odps0.30.0.jar:?] at com.bmsoft.operate.VsZConductorOverlimiteventPeriod.ConductorconductorOverlimiteventInfo(VsZConductorOverlimiteventPeriod.scala:181) ~[_ed7dd58644e462c4a5e1b90cb86197da.jar:?] at com.bmsoft.operate.VsZConductorOverlimiteventPeriod.conductorOverlimiteventInfo(VsZConductorOverlimiteventPeriod.scala:46) ~[_ed7dd58644e462c4a5e1b90cb86197da.jar:?] at com.bmsoft.task.VsZConductorOverlimiteventPeriodTask$.func(VsZConductorOverlimiteventPeriodTask.scala:29) ~[_ed7dd58644e462c4a5e1b90cb86197da.jar:?] at com.bmsoft.task.VsZConductorOverlimiteventPeriodTask$$anonfun$main$1.apply(VsZConductorOverlimiteventPeriodTask.scala:33) ~[_ed7dd58644e462c4a5e1b90cb86197da.jar:?] at com.bmsoft.task.VsZConductorOverlimiteventPeriodTask$$anonfun$main$1.apply(VsZConductorOverlimiteventPeriodTask.scala:33) ~[_ed7dd58644e462c4a5e1b90cb86197da.jar:?] at com.bmsoft.scala.utils.LeoUtils.package$$anonfun$taskEntry_odps$1.apply$mcVJ$sp(LeoUtils.scala:453) ~[_ed7dd58644e462c4a5e1b90cb86197da.jar:?] at com.bmsoft.scala.utils.LeoUtils.package$$anonfun$taskEntry_odps$1.apply(LeoUtils.scala:431) ~[_ed7dd58644e462c4a5e1b90cb86197da.jar:?] at com.bmsoft.scala.utils.LeoUtils.package$$anonfun$taskEntry_odps$1.apply(LeoUtils.scala:431) ~[_ed7dd58644e462c4a5e1b90cb86197da.jar:?] at scala.collection.immutable.NumericRange.foreach(NumericRange.scala:73) ~[scala-library-2.11.8.jar:?] at com.bmsoft.scala.utils.LeoUtils.package$.taskEntry_odps(LeoUtils.scala:431) ~[_ed7dd58644e462c4a5e1b90cb86197da.jar:?] at com.bmsoft.task.VsZConductorOverlimiteventPeriodTask$.main(VsZConductorOverlimiteventPeriodTask.scala:33) ~[_ed7dd58644e462c4a5e1b90cb86197da.jar:?] at com.bmsoft.task.VsZConductorOverlimiteventPeriodTask.main(VsZConductorOverlimiteventPeriodTask.scala) ~[_ed7dd58644e462c4a5e1b90cb86197da.jar:?] at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:1.8.0_65-AliJVM] at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[?:1.8.0_65-AliJVM] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:1.8.0_65-AliJVM] at java.lang.reflect.Method.invoke(Method.java:497) ~[?:1.8.0_65-AliJVM] at org.apache.spark.deploy.yarn.ApplicationMaster$$anon$4.run(ApplicationMaster.scala:708) [spark-yarn_2.11-2.3.0-odps0.30.0.jar:?] 2025-08-04 08:55:31,908 INFO org.apache.spark.deploy.yarn.ApplicationMaster - Final app status: FAILED, exitCode: 15, (reason: User class threw exception: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 25.0 failed 4 times, most recent failure: Lost task 0.3 in stage 25.0 (TID 2364, 3010b5205.cloud.b7.am301, executor 11): ExecutorLostFailure (executor 11 exited caused by one of the running tasks) Reason: Container marked as failed: container_1754267297905_2080247398_01_000011 on host: 3010b5205.cloud.b7.am301. Exit status: 137. Diagnostics: 3010b5205.cloud.b7.am301 Driver stacktrace: at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1599) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1587) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1586) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1586) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831) at scala.Option.foreach(Option.scala:257) at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:831) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1820) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1769) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1758) at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48) at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:642) at org.apache.spark.SparkContext.runJob(SparkContext.scala:2027) at org.apache.spark.SparkContext.runJob(SparkContext.scala:2048) at org.apache.spark.SparkContext.runJob(SparkContext.scala:2067) at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:368) at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38) at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3272) at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2484) at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2484) at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3253) at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77) at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3252) at org.apache.spark.sql.Dataset.head(Dataset.scala:2484) at org.apache.spark.sql.Dataset.take(Dataset.scala:2698) at org.apache.spark.sql.Dataset.showString(Dataset.scala:254) at org.apache.spark.sql.Dataset.show(Dataset.scala:725) at org.apache.spark.sql.Dataset.show(Dataset.scala:702) at com.bmsoft.operate.VsZConductorOverlimiteventPeriod.ConductorconductorOverlimiteventInfo(VsZConductorOverlimiteventPeriod.scala:181) at com.bmsoft.operate.VsZConductorOverlimiteventPeriod.conductorOverlimiteventInfo(VsZConductorOverlimiteventPeriod.scala:46) at com.bmsoft.task.VsZConductorOverlimiteventPeriodTask$.func(VsZConductorOverlimiteventPeriodTask.scala:29) at com.bmsoft.task.VsZConductorOverlimiteventPeriodTask$$anonfun$main$1.apply(VsZConductorOverlimiteventPeriodTask.scala:33) at com.bmsoft.task.VsZConductorOverlimiteventPeriodTask$$anonfun$main$1.apply(VsZConductorOverlimiteventPeriodTask.scala:33) at com.bmsoft.scala.utils.LeoUtils.package$$anonfun$taskEntry_odps$1.apply$mcVJ$sp(LeoUtils.scala:453) at com.bmsoft.scala.utils.LeoUtils.package$$anonfun$taskEntry_odps$1.apply(LeoUtils.scala:431) at com.bmsoft.scala.utils.LeoUtils.package$$anonfun$taskEntry_odps$1.apply(LeoUtils.scala:431) at scala.collection.immutable.NumericRange.foreach(NumericRange.scala:73) at com.bmsoft.scala.utils.LeoUtils.package$.taskEntry_odps(LeoUtils.scala:431) at com.bmsoft.task.VsZConductorOverlimiteventPeriodTask$.main(VsZConductorOverlimiteventPeriodTask.scala:33) at com.bmsoft.task.VsZConductorOverlimiteventPeriodTask.main(VsZConductorOverlimiteventPeriodTask.scala) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:497) at org.apache.spark.deploy.yarn.ApplicationMaster$$anon$4.run(ApplicationMaster.scala:708) ) 表中当日一共有1个亿的数据
最新发布
08-05
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值