1,Hadoop2.9.0
2,spark2.2.1_2.11
3,java代码:
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.SparkSession;
/**
* 开启spark程序
*
*/
public class App
{
public static void main( String[] args )
{
//standalone模式不支持读取本地文件,只支持hdfs文件,除非每个worker上均有一样的本地文件
String testFilePath = "hdfs://hdmaster:8020/user/root/sparktest.txt";
SparkSession spark = SparkSession
.builder()
.appName("test spark connection")
.master("spark://hdmaster:7077")//local[*] spark://hdmaster:7077
.config("spark.jars","target/asym-xdr-combine-1.0-SNAPSHOT.jar")//这种通过IDE提交都是standalone方式,告诉程序主函数的位置
.getOrCreate();
JavaRDD<People> rdd = spark
.read()
.textFile(testFilePath)
.javaRDD()
.map(new Function<String, People>() {//map算子需要在集群中分配,所以其中的参数变量需要序列化
@Override
public People call(String s) throws Exception {
String[] parts = s.split("\\|");//注意竖线的正则匹配
return new People(parts[0], parts[1],parts[2]);
}
});
spark.createDataFrame(rdd, People.class).createOrReplaceTempView("people");
String sql="select name from people";
spark.sql(sql).show();
}
}
4,注意事项
io.netty包冲突:查看maven中心仓库中spark-core依赖版本,替换版本
其他的包冲突:例如jkson.javax.servlet等需要在其他依赖中 pom中去除
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>www.xx.net.cn</groupId>
<artifactId>asym-xdr-combine</artifactId>
<version>1.0-SNAPSHOT</version>
<name>asym-xdr-combine</name>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>1.7</maven.compiler.source>
<maven.compiler.target>1.7</maven.compiler.target>
<junit.version>4.12</junit.version>
<hadoop.version>2.9.0</hadoop.version>
<flume.version>1.8.0</flume.version>
<scala.version>2.11</scala.version>
<kafka.version>1.1.1</kafka.version>
<spark.version>2.2.0</spark.version>
<quartz.version>2.3.0</quartz.version>
<emial.verson>1.5</emial.verson>
<hive.verson>2.3.4</hive.verson>
<mysql.jdbc.verson>5.1.46</mysql.jdbc.verson>
<netty.verson>4.0.43.Final</netty.verson>
<easymock.version>3.6</easymock.version>
</properties>
<dependencies>
<!--hadoop base-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.verson}</version>
</dependency>
<dependency>
<groupId>org.apache.flume</groupId>
<artifactId>flume-ng-core</artifactId>
<version>${flume.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_${scala.version}</artifactId>
<version>${kafka.version}</version>
<exclusions>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<!--spark -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_${scala.version}</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_${scala.version}</artifactId>
<version>${spark.version}</version>
</dependency>
<!--netty-->
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-all</artifactId>
<version>${netty.verson}</version>
</dependency>
<!-- apache mail tools -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${emial.verson}</version>
</dependency>
<!--定时任务-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<!--test -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.easymock</groupId>
<artifactId>easymock</artifactId>
<version>${easymock.version}</version>
<scope>test</scope>
</dependency>
<!--jdbc mysql-->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.jdbc.verson}</version>
</dependency>
</dependencies>
</project>
5,部署模式说明(转载):
单机:
local[n] 单机伪分布式模式,n个线程分别充当driver和Executors。由于driver和Executors处于同一个jvm,算子可以访问外部的变量。很多新手的坏习惯就是从这里养成的
集群:
standalone spark worker组成集群,Spark内置的集群搭建模式。适合于不太依赖Hadoop的运算环境,或者存储集群和计算集群分离的场景。
yarn 运行与Hadoop Yarn集群之上。作业调度、资源调度由Yarn分配。Yarn在这方面做得比Spark standalone集群好。适用于存储计算合一,或者需要依赖MR、Hive等作业的场景
部署模式:
client driver运行于执行spark-submit脚本的机器上。这机器不一定是集群的节点,你可以在Windows上运行driver,Linux集群运行Executors。
cluster 作业提交后,driver运行于集群上的某一个节点上,集群视其为一个Executor。相当于后台程序。
standalone 和 yarn(还有mesos,这个不了解)都支持client/cluster两种模式。前者由--master参数控制,后者由deploy-mode参数控制