-
Spark SQL可以自动推断JSON文件的元数据,并且加载其数据,创建一个DataFrame。可以使用SQLContext.read.json()方法,针对一个元素类型为String的RDD,或者是一个JSON文件。
-
但是要注意的是,这里使用的JSON文件与传统意义上的JSON文件是不一样的。每行都必须,也只能包含一个,单独的,自包含的,有效的JSON对象。不能让一个JSON对象分散在多行。否则会报错。
综合性复杂案例:查询成绩为80分以上的学生的基本信息与成绩信息
基于java
package cn.spark.study.sql;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import scala.Tuple2;
import java.util.ArrayList;
import java.util.List;
/**
* JSON数据源
*/
public class JsonDataSource_9 {
public static void main(String[] args) {
SparkConf conf = new SparkConf()
.setMaster("local")
.setAppName("JsonDataSource_9");
JavaSparkContext sc = new JavaSparkContext(conf);
SQLContext sqlContext = new SQLContext(sc);
//一:对json文件,创建DataFrame
DataFrame studentScoreDF = sqlContext.read().json("E:\\sparktext\\students-score.json");
//查询学生成绩大于80的人
studentScoreDF.registerTempTable("student_score");
DataFrame goodsStudentScoreDF = sqlContext.sql("select name,score from student_score where score >= 80");
List<String> goodsStudentNames = goodsStudentScoreDF.javaRDD().map(new Function<Row, String>() {
@Override
public String call(Row row) throws Exception {
return row.getString(0);
}
}).collect();
//二:针对JavaRDD<String>创建DataFram
//针对包含json串的JavaRDD,创建DataFrame
List<String> studentInfosJson = new ArrayList<String>();
//{"name":"Leo", "score":85}
studentInfosJson.add("{\"name\":\"Leo\", \"age\":18}");
studentInfosJson.add("{\"name\":\"Marry\", \"age\":17}");
studentInfosJson.add("{\"name\":\"Jack\", \"age\":19}");
JavaRDD<String> StudentInfoJsonRDD = sc.parallelize(studentInfosJson);
DataFrame studentInfoDF = sqlContext.read().json(StudentInfoJsonRDD);
//针对学生基本信息DataFrame注册临时表,然后查询分数大于80分的学生的基本信息
studentInfoDF.registerTempTable("student_info");
//select name,age from student_info where name in ('leo' ,'marry')
String sql = "select name,age from student_info where name in (";
for (int i = 0;i<goodsStudentNames.size();i++){
sql += "'" + goodsStudentNames.get(i) +"'";
if(i < goodsStudentNames.size() -1){
sql += ",";
}
}
sql += ")";
DataFrame goodStudentInfoDF = sqlContext.sql(sql);
/**
* +-----+-----+-----+---+
* | name|score| name|age|
* +-----+-----+-----+---+
* | Leo| 85| Leo| 18|
* | Leo| 85|Marry| 17|
* |Marry| 99| Leo| 18|
* |Marry| 99|Marry| 17|
* +-----+-----+-----+---+
*/
//DataFrame join = goodsStudentScoreDF.join(goodStudentInfoDF);
//然后将两份数据的DataFrame,转换成JavaPairRDD,执行join transformation
//将DataFrame转换成JavaRDD,在map为JavaPairRDD,然后进行join
//(name,score)
JavaPairRDD<String, Integer> goodsStudentScoreTuple = goodsStudentScoreDF.javaRDD().mapToPair(new PairFunction<Row, String, Integer>() {
@Override
public Tuple2<String, Integer> call(Row row) throws Exception {
return new Tuple2<String, Integer>(row.getString(0),
Integer.valueOf(String.valueOf(row.getLong(1))));
}
});
//(name,age)
final JavaPairRDD<String, Integer> goodStudentInfoTuple = goodStudentInfoDF.javaRDD().mapToPair(new PairFunction<Row, String, Integer>() {
@Override
public Tuple2<String, Integer> call(Row row) throws Exception {
return new Tuple2<String, Integer>(row.getString(0),
Integer.valueOf(String.valueOf(row.getLong(1))));
}
});
JavaPairRDD<String, Tuple2<Integer, Integer>> join = goodsStudentScoreTuple.join(goodStudentInfoTuple);
//然后将封装在RDD中是好学生的全部信息,转换成一个JavaRDD<Row>的格式
JavaRDD<Row> goodStudentRowRDD = join.map(new Function<Tuple2<String, Tuple2<Integer, Integer>>, Row>() {
@Override
public Row call(Tuple2<String, Tuple2<Integer, Integer>> tuple2) throws Exception {
return RowFactory.create(tuple2._1, tuple2._2._1, tuple2._2._2);
}
});
//创建一份元数据,将JavaRDD<Row>转换成DataFrame
List<StructField> studentFields = new ArrayList<StructField>();
studentFields.add(DataTypes.createStructField("name", DataTypes.StringType, true));
studentFields.add(DataTypes.createStructField("score", DataTypes.IntegerType, true));
studentFields.add(DataTypes.createStructField("age", DataTypes.IntegerType, true));
StructType structType = DataTypes.createStructType(studentFields);
//将JavaRDD转换成DataFrame
final DataFrame dataFrame = sqlContext.createDataFrame(goodStudentRowRDD, structType);
dataFrame.write().format("json").save("E:\\sparktext\\good_student_score_java");
// join.foreach(new VoidFunction<Tuple2<String, Tuple2<Integer, Integer>>>() {
// @Override
// public void call(Tuple2<String, Tuple2<Integer, Integer>> tuple) throws Exception {
// System.out.println("name:" + tuple._1);
// System.out.println("score:" + tuple._2._1+":"+ "age:" + tuple._2._2);
// }
// });
}
}
基于scala
package cn.spark.study.sql
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
/**
* 两种都去读取json的方式创建DataFrame
* 1.直接读取json文件
* 2.加载RDD但是RDD中的元素是满足json格式的String类型
*/
object JsonDataSource_9 {
def main(args: Array[String]): Unit = {
val conf = new SparkConf()
.setMaster("local")
.setAppName("JsonDataSource_9")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
//创建学生成绩DataFrame
val studentScoreDF: DataFrame = sqlContext.read.json("E:\\sparktext\\students-score.json")
//查询分数大于80的学生
val goodStudentScoreDF :DataFrame= studentScoreDF.filter(studentScoreDF.col("score") >= 80)
// goodStudentScoreDF.foreach(row => {
// println(row.getString(0)+","+row.getLong(1))
// })
val goodStudentNames: Array[String] = goodStudentScoreDF.map(row => row.getString(0)).collect()
//
// //创建学生基本信息数据
val studentInfosJSONs = Array("{\"name\":\"Leo\", \"age\":18}",
"{\"name\":\"Marry\", \"age\":17}",
"{\"name\":\"Jack\", \"age\":19}")
val studentInfoJsonRDD: RDD[String] = sc.parallelize(studentInfosJSONs,1)
val studentInfoDF: DataFrame = sqlContext.read.json(studentInfoJsonRDD)
//
// //查询出学生成绩大于80分的基本信息
studentInfoDF.registerTempTable("student_info")
var sql = "select name,age from student_info where name in ("
for( i <- 0 until goodStudentNames.length){
sql += "'" +goodStudentNames(i)+"'"
if(i < goodStudentNames.length-1){
sql += ","
}
}
sql += ")"
val goodStudentInfoDF: DataFrame = sqlContext.sql(sql)
// goodStudentInfoDF.foreach(row => {
// println(row.getString(0)+","+row.getLong(1))
// })
//
val goodStudentRDD: RDD[(String, (Long, Long))] = goodStudentScoreDF.rdd.map(row => (row.getAs[String]("name"), row.getAs[Long]("score")))
.join(goodStudentInfoDF.rdd.map(row => (row.getAs[String]("name"), row.getAs[Long]("age"))))
val goodStudentRowRDD: RDD[Row] = goodStudentRDD.map(info => {
//注意数据类型与67和68行的元数据类型匹配
Row(info._1, info._2._1.toInt, info._2._2.toInt)
})
val structType = StructType(Array(
StructField("name",StringType,true),
StructField("score",IntegerType,true),
StructField("age",IntegerType,true)))
val goodStudentsDF: DataFrame = sqlContext.createDataFrame(goodStudentRowRDD,structType)
goodStudentsDF.write.format("json").save("E:\\sparktext\\good_student_score_scala")
}
}