【无标题】spark core编程

发布于:2025-04-13 ⋅ 阅读:(15) ⋅ 点赞:(0)

partitionBy

将数据按照指定 Partitioner 重新进行分区。Spark 默认的分区器是 HashPartitioner

val rdd: RDD[(Int, String)] =
 sc.makeRDD(Array((1,"aaa"),(2,"bbb"),(3,"ccc")),3)

val rdd2: RDD[(Int, String)] =
 rdd.partitionBy(new HashPartitioner(2))

groupByKey

将数据源的数据根据 key 对 value 进行分组

val dataRDD1 =
 sc.makeRDD(List(("a",1),("b",2),("c",3),("a",4)))
val dataRDD2 = dataRDD1.groupByKey()
val dataRDD3 = dataRDD1.groupByKey(2)
val dataRDD4 = dataRDD1.groupByKey(new HashPartitioner(2))

reduceByKey

可以将数据按照相同的 Key 对 Value 进行聚合

val dataRDD1 = sc.makeRDD(List(("a",1),("b",2),("c",3),("a",4)))
val dataRDD2 = dataRDD1.reduceByKey(_+_)
val dataRDD3 = dataRDD1.reduceByKey(_+_, 2)

aggregateByKey

将数据根据不同的规则进行分区内计算和分区间计算val dataRDD1 =
 sc.makeRDD(List(("a",1),("b",2),("c",3),("a",4)))
val dataRDD2 =
 dataRDD1.aggregateByKey(0)(_+_,_+_)

foldByKey

val dataRDD1 =
 sc.makeRDD(List(("a",1),("b",2),("c",3),("a",4)))
val dataRDD2 = dataRDD1.foldByKey(0)(_+_)

sortByKey

val dataRDD1 = sc.makeRDD(List(("a",1),("b",2),("c",3)))
val sortRDD1: RDD[(String, Int)] = dataRDD1.sortByKey(true)
val sortRDD2: RDD[(String, Int)] = dataRDD1.sortByKey(false)

join

val rdd: RDD[(Int, String)] = sc.makeRDD(Array((1, "a"), (2, "b"), (3, "c")))
val rdd1: RDD[(Int, Int)] = sc.makeRDD(Array((1, 4), (2, 5), (3, 6)))
rdd.join(rdd1).collect().foreach(println)

累加器

val rdd = sparkContext.makeRDD(List(1,2,3,4,5))
// 声明累加器
var sum = sparkContext.longAccumulator("sum");
rdd.foreach(
 num => {
   // 使用累加器
   sum.add(num)
 }
)
// 获取累加器的值
println("sum = " + sum.value)

创建自定义累加器:

class WordCountAccumulator extends AccumulatorV2[String,mutable.Map[String,Long]] {
 var map:mutable.Map[String,Long] = mutable.Map()

 override def isZero: Boolean = map.isEmpty

 override def copy(): AccumulatorV2[String, mutable.Map[String,Long]] = new WordCountAccumulator

 override def reset(): Unit = map.clear()

 override def add(v: String): Unit = {
   map(v) = map.getOrElse(v,0L)+1L
 }

 override def merge(other: AccumulatorV2[String, mutable.Map[String,Long]
 ]): Unit = {
  val map1 = map
   val map2 = other.value
   map = map1.foldLeft(map2)(
     (innerMap,kv)=>{
       innerMap(kv._1) = innerMap.getOrElse(kv._1,0L)+kv._2
       innerMap
     }
   )
 }
 override def value: mutable.Map[String,Long] = map
}

 

调用自定义累加器:

val rdd = sparkContext.makeRDD(
 List("spark","scala","spark hadoop","hadoop")
)
val acc = new WordCountAccumulator
sparkContext.register(acc)

rdd.flatMap(_.split(" ")).foreach(
 word=>acc.add(word)
)
println(acc.value)

广播变量

val rdd1 = sparkContext.makeRDD(List( ("a",1), ("b", 2), ("c", 3), ("d", 4) ),4)
 val list = List( ("a",4), ("b", 5), ("c", 6), ("d", 7))

 val broadcast :Broadcast[List[(String,Int)]] = sparkContext.broadcast(list)
 val resultRDD :RDD[(String,(Int,Int))] = rdd1.map{
   case (key,num)=> {
     var num2 = 0
     for((k,v)<-broadcast.value){
       if(k == key) {
         num2 = v
       }
     }
     (key,(num,num2))
   }
 }
 resultRDD.collect().foreach(println)
 sparkContext.stop()
}

reduce

val rdd: RDD[Int] = sc.makeRDD(List(1,2,3,4))
val reduceResult: Int = rdd.reduce(_+_)
println(reduceResult)

foreach

val rdd: RDD[Int] = sc.makeRDD(List(1,2,3,4))

rdd.collect().foreach(println)

def count(): Long

val rdd: RDD[Int] = sc.makeRDD(List(1,2,3,4))

val countResult: Long = rdd.count()
println(countResult)

first

val rdd: RDD[Int] = sc.makeRDD(List(1,2,3,4))

val firstResult: Int = rdd.first()
println(firstResult)

take

val rdd: RDD[Int] = sc.makeRDD(List(1,2,3,4))

val takeResult: Array[Int] = rdd.take(2)

takeResult.foreach(println)

 

aggregate

val rdd: RDD[Int] = sc.makeRDD(List(1,2,3,4),8)
// 将该 RDD 所有元素相加得到结果
val result1: Int = rdd.aggregate(0)(_+_, _+_)
val result2: Int = rdd.aggregate(10)(_+_,_+_)

println(result1)
println("**********")

fold

val rdd: RDD[Int] = sc.makeRDD(List(1, 2, 3, 4))
val foldResult: Int = rdd.fold(0)(_+_)
println(foldResult)

countByKey

val rdd: RDD[(Int, String)] = sc.makeRDD(List((1, "a"), (1, "a"), (1, "a"), (2,"b"), (3, "c"), (3, "c")))

val result: collection.Map[Int, Long] = rdd.countByKey()
print(result)

 

11) save 相关算子

➢ 函数签名

def saveAsTextFile(path: String): Unit

def saveAsObjectFile(path: String): Unit

def saveAsSequenceFile(

path: String,

codec: Option[Class[_ <: CompressionCodec]] = None): Unit //了解即可

➢ 函数说明

将数据保存到不同格式的文件中

val rdd: RDD[Int] = sc.makeRDD(List(1, 2, 3, 4))
// 保存成 Text 文件
rdd.saveAsTextFile("Spark-core/output/output")
// 序列化成对象保存到文件
rdd.saveAsObjectFile("Spark-core/output/output1")

 

 

 

 


网站公告

今日签到

点亮在社区的每一天
去签到