与 map 类似,遍历的单位是每个 partition 上的数据。相对于map算子时一个高性能的算子。
- java
package transformations;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* @Author yqq
* @Date 2021/12/09 18:17
* @Version 1.0
*/
public class MapPartitionsTest {
public static void main(String[] args) {
JavaSparkContext context = new JavaSparkContext(
new SparkConf()
.setMaster("local")
.setAppName("mapPartitions")
);
context.setLogLevel("Error");
JavaRDD<String> rdd = context.parallelize(Arrays.asList("a", "b", "c", "e", "f", "g"),2);
List<String> list = new ArrayList<>();
JavaRDD<String> rdd2 = rdd.mapPartitions(e -> {
while (e.hasNext())
list.add(e.next() + "#");
return list.iterator();
});
rdd2.foreach(e-> System.out.print(e+"\t"));
}
}
2. scala
package transformation
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.mutable.ListBuffer
/**
* @Author yqq
* @Date 2021/12/09 19:13
* @Version 1.0
*/
object MapPartitionTest {
def main(args: Array[String]): Unit = {
val context = new SparkContext(
new SparkConf()
.setMaster("local")
.setAppName("mapPartition")
)
context.setLogLevel("Error")
val rdd = context.parallelize(List[String]("a", "b", "c", "e", "f", "g"),2)
val array: Array[String] = rdd.mapPartitions(e => {
val buff = new ListBuffer[String]()
while (e.hasNext) {
buff.append(e.next() + "# ")
}
buff.iterator
}).collect()
array.foreach(print)
}
}