Back to home page

OSCL-LXR

 
 

    


0001 #
0002 # Licensed to the Apache Software Foundation (ASF) under one or more
0003 # contributor license agreements.  See the NOTICE file distributed with
0004 # this work for additional information regarding copyright ownership.
0005 # The ASF licenses this file to You under the Apache License, Version 2.0
0006 # (the "License"); you may not use this file except in compliance with
0007 # the License.  You may obtain a copy of the License at
0008 #
0009 #    http://www.apache.org/licenses/LICENSE-2.0
0010 #
0011 # Unless required by applicable law or agreed to in writing, software
0012 # distributed under the License is distributed on an "AS IS" BASIS,
0013 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0014 # See the License for the specific language governing permissions and
0015 # limitations under the License.
0016 #
0017 
0018 from __future__ import print_function
0019 
0020 # $example on$
0021 from pyspark.ml.feature import Tokenizer, RegexTokenizer
0022 from pyspark.sql.functions import col, udf
0023 from pyspark.sql.types import IntegerType
0024 # $example off$
0025 from pyspark.sql import SparkSession
0026 
0027 if __name__ == "__main__":
0028     spark = SparkSession\
0029         .builder\
0030         .appName("TokenizerExample")\
0031         .getOrCreate()
0032 
0033     # $example on$
0034     sentenceDataFrame = spark.createDataFrame([
0035         (0, "Hi I heard about Spark"),
0036         (1, "I wish Java could use case classes"),
0037         (2, "Logistic,regression,models,are,neat")
0038     ], ["id", "sentence"])
0039 
0040     tokenizer = Tokenizer(inputCol="sentence", outputCol="words")
0041 
0042     regexTokenizer = RegexTokenizer(inputCol="sentence", outputCol="words", pattern="\\W")
0043     # alternatively, pattern="\\w+", gaps(False)
0044 
0045     countTokens = udf(lambda words: len(words), IntegerType())
0046 
0047     tokenized = tokenizer.transform(sentenceDataFrame)
0048     tokenized.select("sentence", "words")\
0049         .withColumn("tokens", countTokens(col("words"))).show(truncate=False)
0050 
0051     regexTokenized = regexTokenizer.transform(sentenceDataFrame)
0052     regexTokenized.select("sentence", "words") \
0053         .withColumn("tokens", countTokens(col("words"))).show(truncate=False)
0054     # $example off$
0055 
0056     spark.stop()