List集合
基本函数
val bigData = List("Hadoop" , "Spark")
val data = List(1 , 2 , 3)
val bigData_Core = "Hadoop" :: ("Spark" :: Nil)
val data_Int = 1 :: 2 :: 3 :: Nil
println(data.isEmpty)
println(data.head)
println(data.tail.head)
val List(a,b) = bigData
println("a : " + a + " === " + " b: " + b)
val x :: y :: rest = data
println("x : " + x + " === " + " y: " + y + " === " + rest )
val shuffledData = List(6,3,5,6,2,9,1)
println(sortList(shuffledData))
def sortList(list : List[Int]): List[Int] = list match{
case List() => List()
case head :: tail => compute (head, sortList(tail))
}
def compute(data : Int , dataSet : List[Int]) : List[Int] = dataSet match{
case List() => List(data)
case head :: tail => if (data <= head) data :: dataSet
else head :: compute(data, tail)
}
一阶函数
//元素和List的合并用::, List和List的合并用:::
println(List (1,2,3,4) ::: List (4,5,6,7,8) ::: List (10,11))
println(List (1,2,3,4) ::: (List (4,5,6,7,8) ::: List (10,11)))
println(List (1,2,3,4).length)
val bigData = List("Hadoop" , "Spark" , "Kaffka")
println(bigData.last) //最后一个元素
println(bigData.init) //除最后一个元素之外的元素
println(bigData.reverse) //反转
println(bigData) //因每次操作都新建对象
println(bigData take 2) //取前2
println(bigData drop 1) //除前1
println(bigData splitAt 2) //以2个分组
println(bigData apply 2) //bigData(2)
println(bigData(2))
val data=List('a' ,'b', 'c', 'd', 'e', 'f')
println(data.indices) //index
println(data.indices zip data) //Vector{(0,a),(1,b),..}
println(data.zipWithIndex) //List{(0,a),(1,b),..}
println(data.toString)
println(data.mkString ("[", ",", "]")) //[a,b,c,d,e,f]
println(data.mkString ("*")) //a*b*c*d*e*f
println(data mkString)
val buffer = new StringBuilder
data addString (buffer, "(", ";;", ")") //集合的默认函数
println(buffer)
println(data) //data保持不变
val array = data.toArray
println(array.toList)
val new_Array = Array[Char](10)
data.copyToArray(new_Array,3) //index=3开始拷贝数组
new_Array.foreach(print)
println
val iterator = data.toIterator
println(iterator.next)
println(iterator.next)
排序
def mergedsort[T] (less: (T, T) => Boolean) (input: List[T]): List[T] = {
def merge(xList: List[T], yList: List[T]): List[T] =
(xList, yList) match {
case (Nil, _) => yList
case (_, Nil) => xList
case (x :: xtail, y :: ytail) =>
if (less(x, y)) x :: merge(xtail, yList)
else y :: merge(xList, ytail)
}
val n = input.length / 2
if (n == 0) input
else {
//把要排序的列表input平均分成两个列表
val (x, y) = input splitAt n
//先对分后的两个列表归并排序,再对排好的有序表进行归并
merge(mergedsort(less)(x), mergedsort(less)(y))
}
}
println(mergedsort((x: Int, y: Int) => x < y) (List (3, 7, 9, 5)))
val reversed_mergedsort=mergedsort((x: Int, y: Int) => x > y) _
println(reversed_mergedsort(List(3, 7, 9, 5)))
Map/ Filter函数
println(List(1, 2, 3, 4, 6).map(_ + 1))
val data = List("Scala", "Hadoop", "Spark")
println(data.map(_.length))
println(data.map(_.toList.reverse.mkString))
println(data.map(_.toList))
println(data.flatMap(_.toList))
println(List.range(1, 10).flatMap(i => List.range(1, i).map(j => (i, j))))
var sum = 0
List(1, 2, 3, 4, 5).foreach(sum += _)
println("sum : " + sum)
println(List(1, 2, 3, 4, 6, 7, 8, 9, 10).filter(_ % 2 ==0))
println(data.filter(_.length == 5))
Partition/ Find/ Takewhile
println(List(1, 2, 3, 4, 6).map(_ + 1))
val data = List("Scala", "Hadoop", "Spark")
println(data.map(_.length))
println(data.map(_.toList.reverse.mkString))
println(data.map(_.toList))
println(data.flatMap(_.toList))
println(List.range(1, 10).flatMap(i => List.range(1, i).map(j => (i, j))))
var sum = 0
List(1, 2, 3, 4, 5).foreach(sum += _)
println("sum : " + sum)
println(List(1, 2, 3, 4, 6, 7, 8, 9, 10).filter(_ % 2 ==0))
println(data.filter(_.length == 5))
FoldLeft/ FoldRight
//foldLeft 等同于 /: (..4+(3+(2+(0+1))..)
println((1 to 100).foldLeft(0)(_+_) )
println((0 /: (1 to 100))(_+_))
//foldRight 等同于 :\ (1-(2-(3-(4-(5-100))))
println((1 to 5).foldRight(100)(_-_))
println(((1 to 5):\100)(_-_))
println(List(1, -3, 4, 2, 6).sortWith(_ < _))
println(List(1, -3, 4, 2, 6).sortWith(_ > _))
Apply/ Make /Range
println(List.apply(1, 2, 3))
println(List.make(3,5)) //List(5,5,5)
println(List.range(1, 5))
println(List.range(9, 1, -3)) //List(9,6,3)
val zipped = "abcde".toList zip List(1, 2, 3, 4, 5)
println(zipped)
println(zipped.unzip)
//合并
println(List(List('a', 'b'), List('c'), List('d', 'e')).flatten)
println(List.concat(List(), List('b'), List('c')))
println(List.map2(List(10, 20), List(10, 10)) (_ * _)) //List(100,200)
其他集合
Buffer
//ListBuffer
import scala.collection.mutable.ListBuffer
val listBuffer = new ListBuffer[Int]
listBuffer += 1
listBuffer += 2
println(listBuffer)
//ArrayBuffer
import scala.collection.mutable.ArrayBuffer
val arrayBuffer = new ArrayBuffer[Int]()
arrayBuffer += 1
arrayBuffer += 2
println(arrayBuffer)
Queue
//不可变的Queue
import scala.collection.immutable.Queue
val empty = Queue[Int]()
val queue1 = empty.enqueue(1)
val queue2 = queue1.enqueue(List(2,3,4,5))
println(queue2)
val (element, left) = queue2.dequeue
println(element + " : " + left)
//可变的Queue
import scala.collection.mutable.Queue
val queue = Queue[String]()
queue += "a"
queue ++= List("b", "c")
println(queue)
println(queue.dequeue) //head :Queue(element)
println(queue)
Stack
import scala.collection.mutable.Stack
val stack = new Stack[Int]
stack.push(1)
stack.push(2)
stack.push(3)
println(stack.top) //3 stack(3,2,1)
println(stack)
println(stack.pop) //3 stack(2,1)
println(stack)
Set/ TreeSet
val data = Set.empty[Int]
data ++= List(1, 2, 3)
data += 4;
data --= List(2, 3);
println(data)
data += 1;
println(data)
data.clear
println(data)
//升序排序
val treeSet = TreeSet(9, 3, 1, 8, 0, 2, 7, 4, 6, 5)
println(treeSet)
val treeSetForChar = TreeSet("Spark", "Scala", "Hadoop")
println(treeSetForChar)
Map/ TreeMap
val map = Map.empty[String, String]
map("Java") = "Hadoop"
map("Scala") = "Spark"
println(map)
println(map("Scala"))
var treeMap = TreeMap("Scala" -> "Spark", "Java" -> "Hadoop")
println(treeMap)