1. 相关包导入&客户端配置&加载数据
# -*- coding: utf-8 -*-
#1.1 加上下两行代码,可以直接使用python aa.py 执行
import findspark
findspark.init()
import datetime
import os
import logging
import numpy as np
import pandas as pd
from pyhive import hive
from pyspark import SparkConf, SparkContext
from pyspark.context import SparkContext
from pyspark.sql import HiveContext, SparkSession
from pyspark.sql.functions import current_date,datediff,udf
from pyspark.sql.types import StructField, StringType, FloatType, StructType, IntegerType, LongType
from pyspark.mllib.feature import Normalizer,StandardScaler
from pyspark.mllib.linalg import SparseVector, DenseVector
from pyspark.ml.feature import VectorAssembler,StringIndexer,QuantileDiscretizer,RFormula
from pyspark.ml.feature import MaxAbsScaler,StandardScaler,VectorAssembler,ChiSqSelector,OneHotEncoder
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.evaluation import BinaryClassificationEvaluator, MulticlassClassificationEvaluator
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from pyspark.ml import Pipeline,PipelineModel
if __name__ == "__main__":
# 1.2 配置spark客户端
spark = SparkSession \
.builder \
.enableHiveSupport() \
.master("local[*]") \
.appName("test_lr") \
.config('spark.driver.maxResultSize', '10g') \
.config('spark.driver.memory', '4g') \
.config('spark.excutor.memory', '3g') \
.getOrCreate()
sc = spark.sparkContext
sc.setLogLevel("ERROR")
# 1.3 加载数据
df = spark.read.options(inferSchema=True, header=True, delimiter='\t').csv('file:///data/kouhj/test_pysprk/data.csv')

最低0.47元/天 解锁文章
3271

被折叠的 条评论
为什么被折叠?



