最近的推荐系统项目中,使用spark 中的机器学习算法实现推荐,会把收到的用户行为数据转换为对应的评分数据,参考电影评分数据一样,其中spark mllib中的Als算法需要的数据格式是uid itemid rating这种格式的,其中uid,itemid是int类型的,但是server手机到的数据是string的,就拿uid来说,可能是cookie,可能是deviceid,这时候就要将这些字段先转成int类型才能使用spark mlib中的算法,本文将介绍一下如何转换这个过程
首先,我们使用的是spark 中 dataFrame 来处理用户行为的数据,得到的如下格式
dataframe 如下:
root |-- uid: string (nullable = true) |-- item_id: string (nullable = true) |-- action_type: string (nullable = true) |-- rating: float (nullable = true) +------+-------+------+ | uid|item_id|rating| +------+-------+------+ |user09| a6| 5.0| |user09| a6| 7.0| +------+-------+------+转换步骤如下:1.取出所有的uid,去重,然后给每个uid加上一个临时 i_uid 然后转换成rdd,使用rdd中 zipWithIndex得到一个uid和一个index 转换成df1,加上schema信息 2.和df关联,将uid替换成i_uid这一列,去掉i_uid 3.保存uid,i_uid这个对应关系到 idIndexMapping ,map(colName,dataFrame)格式,key为列名, value为 dataframe(uid,i_uid)
那么转换成index之后的df,在进行推荐之后,推荐之后的uid,items 都是Index,这时候又需要将Index转换成原有的id (id:String, index:Int)
步骤如下:
1.将推荐结果df展开,转成 uid,itemid,rating这种格式的df
2.从idIndexMapping 中取出列名(uid),将这个df1中的两列数据交换(直接对换名称更方便)3.将df 和df1 join ,将uid这个字段替换成i_uid的值,删除i_uid这一列具体实现代码如下:package com.allyes.awise.eng.core import com.allyes.awise.eng.core.util.Logging import org.apache.spark.rdd.RDD import org.apache.spark.sql.functions._ import org.apache.spark.sql.{DataFrame, SQLContext} import scala.collection.mutable._ /** * Created by root on 5/26/17. */ class IDConverter extends Logging with Serializable { // var ids:Tuple2[String, Array[String]] = _ var idIndexMapping: Map[String, DataFrame] = Map[String, DataFrame]() def idToIndex(colName: String, df: DataFrame, sqlContext: SQLContext) = { import sqlContext.implicits._ val ids = df.select(colName).distinct() val idsWithIndex: RDD[(String, Long)] = ids.select(colName).rdd.zipWithIndex().map(line => { val r = line._1 val index: Long = line._2 (r.get(0).toString, index) }) val indexColName = s"i_${colName}" val idsWithIndexDF = idsWithIndex.toDF(colName, indexColName).cache() val data = df.join(idsWithIndexDF, colName).withColumn(colName, col(indexColName)).drop(indexColName) idIndexMapping += { colName -> idsWithIndexDF } data } def convertUserItemIdToIndex(df: DataFrame, sqlContext: SQLContext) = { val df0 = idToIndex(Constants.USER_ACTION_USERID, df, sqlContext) val resDF = idToIndex(Constants.USER_ACTION_ITMEID, df0, sqlContext) debugger.show(resDF, "IDConverter.resDF", false) resDF } def indexToId(colName: String, df: DataFrame) = { debugger.print("idIndexMapping:" + idIndexMapping) val idIndexDF: DataFrame = idIndexMapping(colName) val indexColName = s"i_${colName}" debugger.print("indexColName:" + indexColName) swapTwoColums(idIndexDF, colName, indexColName).join(df, colName).withColumn(colName, col(indexColName)).drop(indexColName) } //from index,id to id,index def swapTwoColums(df: DataFrame, col1: String, col2: String) = { val temp = "temp_col_name" df.withColumn(temp, col(col1)).withColumn(col1, col(col2)).withColumn(col2, col(temp)).drop(temp) } }