convert array to struct pyspark - python

I'm quite new to pyspark and I have a dataframe that currently looks like below.
| col1 | col2 |
+---------------------------------+-------------------+
| [(a, 0)], [(b,0)].....[(z,1)] | [0, 0, ... 1] |
| [(b, 0)], [(b,1)].....[(z,0)] | [0, 1, ... 0] |
| [(a, 0)], [(c, 1)].....[(z,0)] | [0, 1, ... 0] |
I extracted values from col1.QueryNum into col2 and when I print the schema, it's an array containing the list of number from col1.QueryNum.
Ultimately my goal is to convert the list values in col2 into struct format inside pyspark(refer to desired schema).
Current Schema
|-- col1: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- types: string (nullable = true)
| | |-- QueryNum: integer (nullable = true)
|-- col2: array (nullable = true)
| |-- element: integer (containsNull = true)
Desired Schema
|-- col2: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- val1: integer (nullable = true)
| | |-- val2: integer (nullable = true)
.
.
.
| | |-- val80: integer (nullable = true)
I tried using from_json and it's not really working.

If the you have fixed array size you can create struct using list-comprehension:
from pyspark.sql import functions as F
df1 = df.withColumn(
"col2",
F.array(
F.struct(*[
F.col("col1")[i]["QueryNum"].alias(f"val{i+1}") for i in range(2)
])
)
)
df1.show()
#+----------------+--------+
#|col1 |col2 |
#+----------------+--------+
#|[[0, a], [0, b]]|[[0, 0]]|
#|[[0, b], [1, b]]|[[0, 1]]|
#|[[0, a], [1, c]]|[[0, 1]]|
#+----------------+--------+
df1.printSchema()
#root
#|-- col1: array (nullable = true)
#| |-- element: struct (containsNull = true)
#| | |-- QueryNum: long (nullable = true)
#| | |-- types: string (nullable = true)
#|-- col2: array (nullable = false)
#| |-- element: struct (containsNull = false)
#| | |-- val1: long (nullable = true)
#| | |-- val2: long (nullable = true)
Note however that there is no need to use array in this case as you'll always have one struct in that array. Just use simple struct:
df1 = df.withColumn(
"col2",
F.struct(*[
F.col("col1")[i]["QueryNum"].alias(f"val{i+1}") for i in range(2)
])
)
Or if you prefer a map type:
df1 = df.withColumn(
"col2",
F.map_from_entries(
F.expr("transform(col1, (x,i) -> struct('val' || (i+1) as name, x.QueryNum as value))")
)
)

Related

Move deeply nested fields one level up in pyspark dataframe

I have a pyspark dataframe created from XML. Because of the way XML is structured I have an extra, unnecessary level of nesting in the schema of the dataframe.
The schema of my current dataframe:
root
|-- a: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- movies: struct (nullable = true)
| | | |-- movie: array (nullable = true)
| | | | |-- element: struct (containsNull = true)
| | | | | |-- b: string (nullable = true)
| | | | | |-- c: string (nullable = true)
| | | | | |-- d: integer (nullable = true)
| | | | | |-- e: string (nullable = true)
| | |-- f: string (nullable = true)
| | |-- g: string (nullable = true)
I'm trying to replace the movies struct with the movie array underneath it as follows:
root
|-- a: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- movies: array (nullable = true)
| | | |-- element: struct (containsNull = true)
| | | | |-- b: string (nullable = true)
| | | | |-- c: string (nullable = true)
| | | | |-- d: integer (nullable = true)
| | | | |-- e: string (nullable = true)
| | |-- f: string (nullable = true)
| | |-- g: string (nullable = true)
The closest I've gotten was using:
from pyspark.sql import functions as F
df.withColumn("a", F.transform('a', lambda x: x.withField("movies_new", F.col("a.movies.movie"))))
which results in the following schema:
root
|-- a: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- movies: struct (nullable = true)
| | | |-- movie: array (nullable = true)
| | | | |-- element: struct (containsNull = true)
| | | | | |-- b: string (nullable = true)
| | | | | |-- c: string (nullable = true)
| | | | | |-- d: integer (nullable = true)
| | | | | |-- e: string (nullable = true)
| | |-- f: string (nullable = true)
| | |-- g: string (nullable = true)
| | |-- movies_new: array (nullable = true)
| | | |-- element: array (containsNull = true)
| | | | |-- element: struct (containsNull = true)
| | | | | |-- b: string (nullable = true)
| | | | | |-- c: string (nullable = true)
| | | | | |-- d: integer (nullable = true)
| | | | | |-- e: string (nullable = true)
I understand why this is happening, but thought if I never extracted the nested array out of 'a' that it might not become an array of an array.
Any suggestions?
The logic is:
Explode array "a".
Recompute new struct as (movies.movie, f, g)
Collect "a" back as array.
df = df.withColumn("a", F.explode("a"))
df = df.withColumn("a", F.struct( \
df.a.movies.getField("movie").alias("movies"), \
df.a.f.alias("f"), \
df.a.g.alias("g")))
df = df.select(F.collect_list("a").alias("a"))
The full working code:
import pyspark.sql.functions as F
df = spark.createDataFrame(data=[
[[(([("b1", "c1", "d1", "e1")],), "f1", "g1")]]
], schema="a array<struct<movies struct<movie array<struct<b string, c string, d string, e string>>>, f string, g string>>")
df.printSchema()
# df.show(truncate=False)
df = df.withColumn("a", F.explode("a"))
df = df.withColumn("a", F.struct( \
df.a.movies.getField("movie").alias("movies"), \
df.a.f.alias("f"), \
df.a.g.alias("g")))
df = df.select(F.collect_list("a").alias("a"))
df.printSchema()
# df.show(truncate=False)
Output schema before:
root
|-- a: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- movies: struct (nullable = true)
| | | |-- movie: array (nullable = true)
| | | | |-- element: struct (containsNull = true)
| | | | | |-- b: string (nullable = true)
| | | | | |-- c: string (nullable = true)
| | | | | |-- d: string (nullable = true)
| | | | | |-- e: string (nullable = true)
| | |-- f: string (nullable = true)
| | |-- g: string (nullable = true)
Output schema after:
root
|-- a: array (nullable = false)
| |-- element: struct (containsNull = false)
| | |-- movies: array (nullable = true)
| | | |-- element: struct (containsNull = true)
| | | | |-- b: string (nullable = true)
| | | | |-- c: string (nullable = true)
| | | | |-- d: string (nullable = true)
| | | | |-- e: string (nullable = true)
| | |-- f: string (nullable = true)
| | |-- g: string (nullable = true)

Flattening the nested JSON spark

Below code flattens the entire JSON but the output is not desired and requires the modification.
# reading the data from datalake
df = spark.read.json(f'/mnt/bronze/categories/**/*.json')
# Function to flatten nested json, created a column for each parameter with a key-value dict as cell-value
def flatten_df(nested_df):
flat_cols = [c[0] for c in nested_df.dtypes if c[1][:6] == 'string']
nested_cols = [c[0] for c in nested_df.dtypes if c[1][:6] == 'struct']
flat_df = nested_df.select(flat_cols +
[col(nc+'.'+c).alias(nc+'_'+c)
for nc in nested_cols
for c in nested_df.select(nc+'.*').columns])
return flat_df
# Top level hierarchy
df = df.select('_embedded.*')
#Reaching the lower level called "items"
df1 = df.select(explode(df.items).alias('required'))
# Creating dataframe which will be passed to flatten_df to flatten entire data under "items" hierarchy
df2 = df1.select('required.*')
final = flatten_df(df2)
display(final)
Structure of the file is here:
root
|-- _embedded: struct (nullable = true)
| |-- items: array (nullable = true)
| | |-- element: struct (containsNull = true)
| | | |-- _links: struct (nullable = true)
| | | | |-- self: struct (nullable = true)
| | | | | |-- href: string (nullable = true)
| | | |-- code: string (nullable = true)
| | | |-- labels: struct (nullable = true)
| | | | |-- bg_BG: string (nullable = true)
| | | | |-- cs_CZ: string (nullable = true)
| | | | |-- da_DK: string (nullable = true)
| | | | |-- de_AT: string (nullable = true)
| | | | |-- zh_TW: string (nullable = true)
| | | |-- parent: string (nullable = true)
| | | |-- updated: string (nullable = true)
|-- _links: struct (nullable = true)
| |-- first: struct (nullable = true)
| | |-- href: string (nullable = true)
| |-- next: struct (nullable = true)
| | |-- href: string (nullable = true)
| |-- previous: struct (nullable = true)
| | |-- href: string (nullable = true)
| |-- self: struct (nullable = true)
| | |-- href: string (nullable = true)
|-- current_page: long (nullable = true)
output: it takes each key value and transforms it as headers and values as cell values.
Desired output should look like this: Lables key and values should be presented as headers (lables_key and lables_values col names).

Modifying element in nested array of struct

I have one nested array of struct and I would like to modify column name to something else as given in example below.
Source format
|-- HelloWorld: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- version: string (nullable = true)
| | |-- abc-version: string (nullable = true) ----->This part needs to renamed
| | |-- again_something: array (nullable = true)
| | | |-- element: map (containsNull = true)
| | | | |-- key: string
| | | | |-- value: string (valueContainsNull = true)
Output format should look like below.
|-- HelloWorld: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- version: string (nullable = true)
| | |-- abc_version: string (nullable = true). ----->This part has changed
| | |-- again_something: array (nullable = true)
| | | |-- element: map (containsNull = true)
| | | | |-- key: string
| | | | |-- value: string (valueContainsNull = true)
I tried different withField, F.expr to transform the column name, but didn't really work well.
Please help.
I would recast it with the same dtype while changing the column name
df3 = df.withColumn("HelloWorld",F.expr("transform(HelloWorld, x -> struct(cast((x['abc-version']) as integer) as abc_version, x.version,x.gain_something))"))
root
|-- HelloWorld: array (nullable = true)
| |-- element: struct (containsNull = false)
| | |-- abc_version: integer (nullable = true)
| | |-- version: string (nullable = true)
| | |-- gain_something: array (nullable = true)
| | | |-- element: map (containsNull = true)
| | | | |-- key: string
| | | | |-- value: string (valueContainsNull = true)

Spark: How to transpose and explode columns with dynamic nested arrays

I applied an algorithm from the question Spark: How to transpose and explode columns with nested arrays to transpose and explode nested spark dataframe with dynamic arrays.
I have added to the dataframe """{"id":3,"c":[{"date":3,"val":3, "val_dynamic":3}]}}""" , with new column c, where array has new val_dynamic field which can appear on random basis.
I'm looking for required output 2 (Transpose and Explode ) but even example of required output 1 (Transpose) will be very useful.
Input df:
+------------------+--------+-----------+---+
| a| b| c| id|
+------------------+--------+-----------+---+
|[{1, 1}, {11, 11}]| null| null| 1|
| null|[{2, 2}]| null| 2|
| null| null|[{3, 3, 3}]| 3| !!! NOTE: Added `val_dynamic`
+------------------+--------+-----------+---+
root
|-- a: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- date: long (nullable = true)
| | |-- val: long (nullable = true)
|-- b: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- date: long (nullable = true)
| | |-- val: long (nullable = true)
|-- c: array (nullable = true)
| |-- element: struct (containsNull = true)
| | |-- date: long (nullable = true)
| | |-- val: long (nullable = true)
| | |-- val_dynamic: long (nullable = true) !!! NOTE: Added `val_dynamic`
|-- id: long (nullable = true)
Required output 1 (transpose_df):
+---+------+-------------------+
| id| cols | arrays |
+---+------+-------------------+
| 1| a | [{1, 1}, {11, 11}]|
| 2| b | [{2, 2}] |
| 3| c | [{3, 3, 3}] | !!! NOTE: Added `val_dynamic`
+---+------+-------------------+
Required output 2 (explode_df):
+---+----+----+---+-----------+
| id|cols|date|val|val_dynamic|
+---+----+----+---+-----------+
| 1| a| 1| 1| null |
| 1| a| 11| 11| null |
| 2| b| 2| 2| null |
| 3| c| 3| 3| 3 | !!! NOTE: Added `val_dynamic`
+---+----+----+---+-----------+
Current code:
import pyspark.sql.functions as f
df = spark.read.json(sc.parallelize([
"""{"id":1,"a":[{"date":1,"val":1},{"date":11,"val":11}]}""",
"""{"id":2,"b":[{"date":2,"val":2}]}}""",
"""{"id":3,"c":[{"date":3,"val":3, "val_dynamic":3}]}}"""
]))
df.show()
cols = [ 'a', 'b', 'c']
#expr = stack(2,'a',a,'b',b,'c',c )
expr = f"stack({len(cols)}," + \
",".join([f"'{c}',{c}" for c in cols]) + \
")"
transpose_df = df.selectExpr("id", expr) \
.withColumnRenamed("col0", "cols") \
.withColumnRenamed("col1", "arrays") \
.filter("not arrays is null")
transpose_df.show()
explode_df = transpose_df.selectExpr('id', 'cols', 'inline(arrays)')
explode_df.show()
Current outcome
AnalysisException: cannot resolve 'stack(3, 'a', `a`, 'b', `b`, 'c', `c`)' due to data type mismatch: Argument 2 (array<struct<date:bigint,val:bigint>>) != Argument 6 (array<struct<date:bigint,val:bigint,val_dynamic:bigint>>); line 1 pos 0;
'Project [id#2304L, unresolvedalias(stack(3, a, a#2301, b, b#2302, c, c#2303), Some(org.apache.spark.sql.Column$$Lambda$2580/0x00000008411d3040#4d9eefd0))]
+- LogicalRDD [a#2301, b#2302, c#2303, id#2304L], false
ref : Transpose column to row with Spark
stack requires that all stacked columns have the same type. The problem here is that the structs inside of the arrays have different members. One approach would be to add the missing members to all structs so that the approach of my previous answer works again.
cols = ['a', 'b', 'c']
#create a map containing all struct fields per column
existing_fields = {c:list(map(lambda field: field.name, df.schema.fields[i].dataType.elementType.fields))
for i,c in enumerate(df.columns) if c in cols}
#get a (unique) set of all fields that exist in all columns
all_fields = set(sum(existing_fields.values(),[]))
#create a list of transform expressions to fill up the structs will null fields
transform_exprs = [f"transform({c}, e -> named_struct(" +
",".join([f"'{f}', {('e.'+f) if f in existing_fields[c] else 'cast(null as long)'}" for f in all_fields])
+ f")) as {c}" for c in cols]
#create a df where all columns contain arrays with the same struct
full_struct_df = df.selectExpr("id", *transform_exprs)
full_struct_df has now the schema
root
|-- id: long (nullable = true)
|-- a: array (nullable = true)
| |-- element: struct (containsNull = false)
| | |-- val: long (nullable = true)
| | |-- val_dynamic: long (nullable = true)
| | |-- date: long (nullable = true)
|-- b: array (nullable = true)
| |-- element: struct (containsNull = false)
| | |-- val: long (nullable = true)
| | |-- val_dynamic: long (nullable = true)
| | |-- date: long (nullable = true)
|-- c: array (nullable = true)
| |-- element: struct (containsNull = false)
| | |-- val: long (nullable = true)
| | |-- val_dynamic: long (nullable = true)
| | |-- date: long (nullable = true)
From here the logic works as before:
stack_expr = f"stack({len(cols)}," + \
",".join([f"'{c}',{c}" for c in cols]) + \
")"
transpose_df = full_struct_df.selectExpr("id", stack_expr) \
.withColumnRenamed("col0", "cols") \
.withColumnRenamed("col1", "arrays") \
.filter("not arrays is null")
explode_df = transpose_df.selectExpr('id', 'cols', 'inline(arrays)')
The first part of this answer requires that
each column mentioned in cols is an array of structs
all members of all structs are longs. The reason for this restriction is the cast(null as long) when creating the transform expression.

Co-filter two arrays in Pyspark struct based on Null values in one of the arrays

I would like to filter two ordered arrays in a struct that has fields dates, and values. Example DataFrame below followed by and explanation and an example of what I am trying to do.
from pyspark.sql import Row
import datetime
rows = [
Row(
id ='1111',
A=Row(
dates=[datetime.datetime(2015, 7, 29, 14, 27), datetime.datetime(2015, 7, 31, 14, 27)],
values=[20.0, 100.0]),
B=Row(
dates=[datetime.datetime(2015, 4, 18, 17, 52)],
values=[12.58])
),
Row(
id='2222',
A=Row(
dates=[datetime.datetime(2011, 4, 28, 14, 27), datetime.datetime(2011, 4, 28, 14, 27)],
values=[100.0, None]),
B=Row(
dates=[datetime.datetime(2011, 4, 18, 17, 52)],
values=[None])
),
Row(
id='3333',
A=None,
B=None)
]
df = spark.createDataFrame(rows)
df.show(10, False)
+----+-----------------------------------------------------------+--------------------------------+
|id |A |B |
+----+-----------------------------------------------------------+--------------------------------+
|1111|[[2015-07-29 14:27:00, 2015-07-31 14:27:00], [20.0, 100.0]]|[[2015-04-18 17:52:00], [12.58]]|
|2222|[[2011-04-28 14:27:00, 2011-04-28 14:27:00], [100.0,]] |[[2011-04-18 17:52:00], []] |
|3333|null |null |
+----+-----------------------------------------------------------+--------------------------------+
df.printSchema()
root
|-- id: string (nullable = true)
|-- A: struct (nullable = true)
| |-- dates: array (nullable = true)
| | |-- element: timestamp (containsNull = true)
| |-- values: array (nullable = true)
| | |-- element: double (containsNull = true)
|-- B: struct (nullable = true)
| |-- dates: array (nullable = true)
| | |-- element: timestamp (containsNull = true)
| |-- values: array (nullable = true)
| | |-- element: double (containsNull = true)
Here dates and values are ordered such that each element of dates corresponds with each element of values. So the second value in values goes with the second date in dates.
I want to filter the df so that it removes Nulls and their corresponding dates and if values only has Nulls it just returns Null. Like,
new_df.show(10, False)
+----+-----------------------------------------------------------+--------------------------------+
|id |A |B |
+----+-----------------------------------------------------------+--------------------------------+
|1111|[[2015-07-29 14:27:00, 2015-07-31 14:27:00], [20.0, 100.0]]|[[2015-04-18 17:52:00], [12.58]]|
|2222|[[2011-04-28 14:27:00], [100.0]] |null |
|3333|null |null |
+----+-----------------------------------------------------------+--------------------------------+
You can use arrays_zip with filter to do the null filtering, then unzip the arrays back to their original states, while taking care of empty arrays or null columns:
import pyspark.sql.functions as F
df2 = df.withColumn(
'A',
F.expr("filter(arrays_zip(A.dates, A.values), x -> x.values is not null)")
).withColumn(
'A',
F.when(
(F.size('A') != 0) & (F.col('A').isNotNull()),
F.struct(F.col('A.0').alias('dates'), F.col('A.1').alias('values'))
)
).withColumn(
'B',
F.expr("filter(arrays_zip(B.dates, B.values), x -> x.values is not null)")
).withColumn(
'B',
F.when(
(F.size('B') != 0) & (F.col('B').isNotNull()),
F.struct(F.col('B.0').alias('dates'), F.col('B.1').alias('values'))
)
)
df2.show(truncate=False)
+----+-----------------------------------------------------------+--------------------------------+
|id |A |B |
+----+-----------------------------------------------------------+--------------------------------+
|1111|[[2015-07-29 13:27:00, 2015-07-31 13:27:00], [20.0, 100.0]]|[[2015-04-18 16:52:00], [12.58]]|
|2222|[[2011-04-28 13:27:00], [100.0]] |null |
|3333|null |null |
+----+-----------------------------------------------------------+--------------------------------+
df2.printSchema()
root
|-- id: string (nullable = true)
|-- A: struct (nullable = true)
| |-- dates: array (nullable = true)
| | |-- element: timestamp (containsNull = true)
| |-- values: array (nullable = true)
| | |-- element: double (containsNull = true)
|-- B: struct (nullable = true)
| |-- dates: array (nullable = true)
| | |-- element: timestamp (containsNull = true)
| |-- values: array (nullable = true)
| | |-- element: double (containsNull = true)

Categories

Resources