Back to home page

OSCL-LXR

 
 

    


0001 #
0002 # Licensed to the Apache Software Foundation (ASF) under one or more
0003 # contributor license agreements.  See the NOTICE file distributed with
0004 # this work for additional information regarding copyright ownership.
0005 # The ASF licenses this file to You under the Apache License, Version 2.0
0006 # (the "License"); you may not use this file except in compliance with
0007 # the License.  You may obtain a copy of the License at
0008 #
0009 #    http://www.apache.org/licenses/LICENSE-2.0
0010 #
0011 # Unless required by applicable law or agreed to in writing, software
0012 # distributed under the License is distributed on an "AS IS" BASIS,
0013 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0014 # See the License for the specific language governing permissions and
0015 # limitations under the License.
0016 #
0017 
0018 from __future__ import print_function
0019 
0020 from pyspark import SparkContext
0021 # $example on$
0022 from pyspark.mllib.clustering import LDA, LDAModel
0023 from pyspark.mllib.linalg import Vectors
0024 # $example off$
0025 
0026 if __name__ == "__main__":
0027     sc = SparkContext(appName="LatentDirichletAllocationExample")  # SparkContext
0028 
0029     # $example on$
0030     # Load and parse the data
0031     data = sc.textFile("data/mllib/sample_lda_data.txt")
0032     parsedData = data.map(lambda line: Vectors.dense([float(x) for x in line.strip().split(' ')]))
0033     # Index documents with unique IDs
0034     corpus = parsedData.zipWithIndex().map(lambda x: [x[1], x[0]]).cache()
0035 
0036     # Cluster the documents into three topics using LDA
0037     ldaModel = LDA.train(corpus, k=3)
0038 
0039     # Output topics. Each is a distribution over words (matching word count vectors)
0040     print("Learned topics (as distributions over vocab of " + str(ldaModel.vocabSize())
0041           + " words):")
0042     topics = ldaModel.topicsMatrix()
0043     for topic in range(3):
0044         print("Topic " + str(topic) + ":")
0045         for word in range(0, ldaModel.vocabSize()):
0046             print(" " + str(topics[word][topic]))
0047 
0048     # Save and load model
0049     ldaModel.save(sc, "target/org/apache/spark/PythonLatentDirichletAllocationExample/LDAModel")
0050     sameModel = LDAModel\
0051         .load(sc, "target/org/apache/spark/PythonLatentDirichletAllocationExample/LDAModel")
0052     # $example off$
0053 
0054     sc.stop()