-
读Parquet格式wenjian
import org.apache.spark.sql.{DataFrame, SparkSession}
object CreateDataFrameFromParquet {
def main(args: Array[String]): Unit = {
//创建SparkSession(是对SparkContext的包装和增强)
val spark: SparkSession = SparkSession.builder()
.appName(this.getClass.getSimpleName)
.master("local[*]")
.getOrCreate()
val df: DataFrame = spark.read.parquet("src/main/scala/data/user.parquet")
df.show(2)
df.printSchema()
spark.stop()
}
}
-
写入到Parquet格式文件中
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types.{DoubleType, IntegerType, StringType, StructField, StructType}
object WriteToParquet {
def main(args: Array[String]): Unit = {
//创建SparkSession
val spark: SparkSession = SparkSession.builder()
.appName(this.getClass.getSimpleName)
.master("local[*]")
.getOrCreate()
val sc: SparkContext = spark.sparkContext
val lines: RDD[String] = sc.textFile("src/main/scala/data/user.txt")
//row的字段没有名字 没有类型
val rdd1: RDD[Row] = lines.map(e => {
val split = e.split(",")
Row(split(0), split(1).toInt, split(2).toDouble)
})
//关联schema(字段名称、字段类型、是否可以为空)
val schema: StructType = StructType(
Array(
StructField("name", StringType),
StructField("age", IntegerType),
StructField("fv", DoubleType)
)
)
//将RowRDD与StructType中的schema关联
val df1: DataFrame = spark.createDataFrame(rdd1, schema)
df1.write.parquet("src/main/scala/data/outpar")
sc.stop()
spark.stop()
}
}