-
Notifications
You must be signed in to change notification settings - Fork 2
/
run_kafka.py
48 lines (41 loc) · 1.9 KB
/
run_kafka.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from pyspark.sql import SparkSession
from pyspark.sql import functions as f
from pyspark.sql.types import *
import os
import time
SCHEMA = StructType([StructField("Arrival_Time",LongType(),True),
StructField("Creation_Time",LongType(),True),
StructField("Device",StringType(),True),
StructField("Index", LongType(), True),
StructField("Model", StringType(), True),
StructField("User", StringType(), True),
StructField("gt", StringType(), True),
StructField("x", DoubleType(), True),
StructField("y", DoubleType(), True),
StructField("z", DoubleType(), True)])
spark = SparkSession.builder.appName('demo_app')\
.config("spark.kryoserializer.buffer.max", "512m")\
.getOrCreate()
os.environ['PYSPARK_SUBMIT_ARGS'] = \
"--packages=org.apache.spark:spark-sql-kafka-0-10_2.12:2.4.8,com.microsoft.azure:spark-mssql-connector:1.0.1"
kafka_server = 'dds2020s-kafka.eastus.cloudapp.azure.com:9092'
topic = "activities"
streaming = spark.readStream\
.format("kafka")\
.option("kafka.bootstrap.servers", kafka_server)\
.option("subscribe", topic)\
.option("startingOffsets", "earliest")\
.option("failOnDataLoss", False)\
.option("maxOffsetsPerTrigger", 432)\
.load()\
.select(f.from_json(f.decode("value", "US-ASCII"), schema=SCHEMA).alias("value"))\
.select("value.*")
activityCounts = streaming.groupBy("gt").count()
activityQuery = activityCounts\
.writeStream\
.queryName("activity_counts")\
.format("memory")\
.outputMode("complete")\
.start()
time.sleep(45)
spark.sql("SELECT * FROM activity_counts").show()