spark读取嵌套json代码测试示例

本文通过三个示例详细介绍了如何使用Spark读取和处理嵌套JSON数据,包括示例一的基础操作,示例二的进阶技巧,以及示例三的复杂场景应用。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

示例一

示例数据:
{"name":"zhangsan","age":20}
{"name":"lisi"}
{"name":"wangwu","age":18}
{"name":"a","age":21}
{"name":"zhangsan1","age":20}
{"name":"lisi1"}
{"name":"wangwu1","age":18}
{"name":"a1","age":21}
{"name":"zhangsan2","age":20}
{"name":"lisi2"}
{"name":"wangwu2","age":18}
{"name":"a2","age":21}
{"name":"zhangsan3","age":20}
{"name":"lisi3"}
{"name":"wangwu3","age":18}
{"name":"a3","age":21}
{"name":"zhangsan4","age":20}
{"name":"lisi4"}
{"name":"wangwu4","age":18}
{"name":"a4","age":21}
{"name":"zhangsan5","age":20}
{"name":"lisi5"}
{"name":"wangwu5","age":18}
{"name":"a5","age":21}

代码块:

package com.lw.scalaspark.sql.examples

import org.apache.spark.sql.types.IntegerType
import org.apache.spark.sql.{DataFrame, SparkSession}

object readJsonFile {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("readJsonFile")
      .master("local")
      .getOrCreate()

    import org.apache.spark.sql.functions._
    import spark.implicits._
    /**
      * get_json_object() 方法不识别单引号引起来的json格式字符串
      */
    //    val list = List[String](
    //      "{'name':'zhangsan','age':18}",
    //      "{'name':'lisi','age':19}",
    //      "{'name':'wangwu','age':20}"
    //    )

    val list = List[String](
      "{\"name\":\"zhangsan\",\"age\":20}",
      "{\"name\":\"lisi\",\"age\":21}",
      "{\"name\":\"wangwu\",\"age\":22}",
      "{\"name\":\"zhaoliu\",\"age\":23}"
    )

    val frame: DataFrame = list.toDF("infos")
//    val frame = spark.read.textFile("./data/json").toDF("infos")
    frame.show(100,false)
    frame.printSchema()

    //使用get_json_object() 可以获取其中某些列组成新的 DataFrame,注意:get_json_object() 方法不识别单引号引起来的json格式字符串
    val result: DataFrame = frame.select(get_json_object($"infos","$.name").as("name"),get_json_object($"infos", "$.age").cast(IntegerType).as("age"))
//    val result: DataFrame = frame.select(get_json_object($"infos","$.name").as("name"),get_json_object($"infos", "$.age").cast(IntegerType).as("age"))
    result.show(100)
    result.printSchema()

  }
}

示例二

示例数据:
{"name":"zhangsan","age":18,"scores":[{"yuwen":98,"shuxue":90,"yingyu":100},{"dili":98,"shengwu":78,"huaxue":100}]}
{"name":"lisi","age":19,"scores":[{"yuwen":58,"shuxue":50,"yingyu":78},{"dili":56,"shengwu":76,"huaxue":13}]}
{"name":"wangwu","age":17,"scores":[{"yuwen":18,"shuxue":90,"yingyu":45},{"dili":76,"shengwu":42,"huaxue":45}]}
{"name":"zhaoliu","age":20,"scores":[{"yuwen":68,"shuxue":23,"yingyu":63},{"dili":23,"shengwu":45,"huaxue":87}]}
{"name":"tianqi","age":22,"scores":[{"yuwen":88,"shuxue":91,"yingyu":41},{"dili":56,"shengwu":79,"huaxue":45}]}


代码块:

package com.bjsxt.scalaspark.sql.examples

import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 读取嵌套的jsonArray 数组,格式:
  *   {"name":"zhangsan","age":18,"scores":[{"yuwen":98,"shuxue":90,"yingyu":100},{"dili":98,"shengwu":78,"huaxue":100}]}
  *
  *   explode函数作用是将json格式的数组展开,数组中的每个json对象都是一条数据
  */
object readJsonArray {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder().master("local").appName("readJsonArray").getOrCreate()
    val frame: DataFrame = spark.read.json("./data/jsonArrayFile")
    //不折叠显示
    frame.show(false)
    frame.printSchema()
    import org.apache.spark.sql.functions._
    import spark.implicits._
    val transDF: DataFrame = frame.select($"name", $"age", explode($"scores")).toDF("name", "age","allScores")
    transDF.show(100,false)
    transDF.printSchema()

    val result: DataFrame = transDF.select(
      $"name",$"age",
            $"allScores.yuwen" as "yuwen",
            $"allScores.shuxue" as "shuxue",
            $"allScores.yingyu" as "yingyu",
            $"allScores.dili" as "dili",
            $"allScores.shengwu" as "shengwu",
            $"allScores.huaxue" as "huaxue"
    )
    result.show(100)
  }
}

示例三

示例数据:
{"name":"zhangsan","score":100,"infos":{"age":20,"gender":'man'}}
{"name":"lisi","score":70,"infos":{"age":21,"gender":'femal'}}
{"name":"wangwu","score":80,"infos":{"age":23,"gender":'man'}}
{"name":"maliu","score":50,"infos":{"age":16,"gender":'femal'}}
{"name":"tianqi","score":90,"infos":{"age":19,"gender":'man'}}

代码块:

package com.bjsxt.scalaspark.sql.examples

import org.apache.spark.sql.{DataFrame, SparkSession}
/**
  * 读取嵌套的json格式文件
  *
  *   json格式如下:
  *     {"name":"wangwu","score":80,"infos":{"age":23,"gender":'man'}}
  *
  *     对于读取嵌套的josn格式的数据,可以直接infos.列名称来获取值
  */
object readNestJsonFile {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder().master("local").appName("readNestJsonFile").getOrCreate()
    //读取嵌套的json文件
    val frame: DataFrame = spark.read.format("json").load("./data/NestJsonFile")
    frame.printSchema()
    frame.show(100)
    frame.createOrReplaceTempView("infosView")
    spark.sql("select name,infos.age,score,infos.gender from infosView").show(100)
  }
}


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值