Back to home page

OSCL-LXR

 
 

    


0001 #
0002 # Licensed to the Apache Software Foundation (ASF) under one or more
0003 # contributor license agreements.  See the NOTICE file distributed with
0004 # this work for additional information regarding copyright ownership.
0005 # The ASF licenses this file to You under the Apache License, Version 2.0
0006 # (the "License"); you may not use this file except in compliance with
0007 # the License.  You may obtain a copy of the License at
0008 #
0009 #    http://www.apache.org/licenses/LICENSE-2.0
0010 #
0011 # Unless required by applicable law or agreed to in writing, software
0012 # distributed under the License is distributed on an "AS IS" BASIS,
0013 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0014 # See the License for the specific language governing permissions and
0015 # limitations under the License.
0016 #
0017 
0018 r"""
0019  Use DataFrames and SQL to count words in UTF8 encoded, '\n' delimited text received from the
0020  network every second.
0021 
0022  Usage: sql_network_wordcount.py <hostname> <port>
0023    <hostname> and <port> describe the TCP server that Spark Streaming would connect to receive data.
0024 
0025  To run this on your local machine, you need to first run a Netcat server
0026     `$ nc -lk 9999`
0027  and then run the example
0028     `$ bin/spark-submit examples/src/main/python/streaming/sql_network_wordcount.py localhost 9999`
0029 """
0030 from __future__ import print_function
0031 
0032 import sys
0033 
0034 from pyspark import SparkContext
0035 from pyspark.streaming import StreamingContext
0036 from pyspark.sql import Row, SparkSession
0037 
0038 
0039 def getSparkSessionInstance(sparkConf):
0040     if ('sparkSessionSingletonInstance' not in globals()):
0041         globals()['sparkSessionSingletonInstance'] = SparkSession\
0042             .builder\
0043             .config(conf=sparkConf)\
0044             .getOrCreate()
0045     return globals()['sparkSessionSingletonInstance']
0046 
0047 
0048 if __name__ == "__main__":
0049     if len(sys.argv) != 3:
0050         print("Usage: sql_network_wordcount.py <hostname> <port> ", file=sys.stderr)
0051         sys.exit(-1)
0052     host, port = sys.argv[1:]
0053     sc = SparkContext(appName="PythonSqlNetworkWordCount")
0054     ssc = StreamingContext(sc, 1)
0055 
0056     # Create a socket stream on target ip:port and count the
0057     # words in input stream of \n delimited text (eg. generated by 'nc')
0058     lines = ssc.socketTextStream(host, int(port))
0059     words = lines.flatMap(lambda line: line.split(" "))
0060 
0061     # Convert RDDs of the words DStream to DataFrame and run SQL query
0062     def process(time, rdd):
0063         print("========= %s =========" % str(time))
0064 
0065         try:
0066             # Get the singleton instance of SparkSession
0067             spark = getSparkSessionInstance(rdd.context.getConf())
0068 
0069             # Convert RDD[String] to RDD[Row] to DataFrame
0070             rowRdd = rdd.map(lambda w: Row(word=w))
0071             wordsDataFrame = spark.createDataFrame(rowRdd)
0072 
0073             # Creates a temporary view using the DataFrame.
0074             wordsDataFrame.createOrReplaceTempView("words")
0075 
0076             # Do word count on table using SQL and print it
0077             wordCountsDataFrame = \
0078                 spark.sql("select word, count(*) as total from words group by word")
0079             wordCountsDataFrame.show()
0080         except:
0081             pass
0082 
0083     words.foreachRDD(process)
0084     ssc.start()
0085     ssc.awaitTermination()