import java.sql.DriverManager
import org.apache.kafka.common.TopicPartition
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.kafka010.OffsetRange
import scala.collection.mutable
object DataToMysql {
def getconn ={
DriverManager.getConnection(
"jdbc:mysql://localhost:3306/test",
"root",
"1234"
)
}
def saveToMysql(result1: RDD[(String, Int)])={
result1.foreachPartition(filter=>{
val conn=getconn
filter.foreach(tp=>{
val count = getword(tp._1)+tp._2
val ps=conn.prepareStatement("replace into wordcount values(?,?)")
ps.setString(1,tp._1)
ps.setInt(2,count)
ps.executeUpdate()
ps.close()
})
conn.close()
})
}
def KafkaOffset(groupid:String,ranges: Array[OffsetRange])={
val conn=getconn
for (o <- ranges){
val ps=conn.prepareStatement("replace into offsets values(?,?,?,?)")
ps.setString(1,groupid)
ps.setString(2,o.topic)
ps.setInt(3,o.partition)
ps.setLong(4,o.untilOffset)
ps.executeUpdate()
ps.close()
}
conn.close()
}
def getOffset(groupid:String,topic:String): mutable.Map[TopicPartition, Long] ={
import collection.mutable.Map
val offset=Map[TopicPartition,Long]()
val conn=getconn
val ps= conn.prepareStatement("select * from offsets where groupid=? and topic=?")
ps.setString(1,groupid)
ps.setString(2,topic)
val resultSet = ps.executeQuery()
while(resultSet.next()){
offset += new TopicPartition(resultSet.getString("topic"),resultSet.getInt("partition")) -> resultSet.getLong("offset")
}
offset
}
def getword(word:String) ={
val conn=getconn
val ps= conn.prepareStatement("select * from wordcount where word=?")
ps.setString(1,word)
val resultSet = ps.executeQuery()
if (resultSet.next()){
resultSet.getInt("count")
}else{
0
}
}
}