import math

from pyspark import SparkConf,SparkContext

#from pyspark.sql import SQlContext
from pyspark.sql import SQLContext

from random import random
conf=SparkConf().setAppName('IsPrime')

sc=SparkContext(conf=conf)
sqlCtx=SQLContext(sc)

import time

#sqlCtx=SQLContext.read(sc)


def IsPrime(n):
"""返回n是否是素数"""
if not isinstance(n, int):
raise TypeError("argument passed to is_prime is not of 'int' type")
if n < 2:
return False
if n == 2:

return True
max = int(math.ceil(math.sqrt(n)))
i = 2
while i <= max:
if n % i == 0:
return False
i += 1
return True


#print(IsPrime(100))
t1=time.time()
rdd=sc.parallelize(range(8*10**6))
result=rdd.filter(IsPrime).count()
print('='*30)
print(result)

print('spark time=',time.time()-t1)


s1=time.time()

num=0
for n in range(8*10**6):
if IsPrime(n):
num=num+1
print(num)

print('python time',time.time()-s1)
==============================
539777
spark time= 74.22103095054626
539777
python time 181.73229908943176

  spark 比Python 串行计算快一倍。