Join 2 JSON inputs linked by Primary Key - python

I am trying to merge 2 JSON inputs (this example is from a file, but it will be from a Google Pub Sub input later) from these:
orderID.json:
{"orderID":"test1","orderPacked":"Yes","orderSubmitted":"Yes","orderVerified":"Yes","stage":1}
combined.json:
{"barcode":"95590","name":"Ash","quantity":6,"orderID":"test1"}
{"barcode":"95591","name":"Beat","quantity":6,"orderID":"test1"}
{"barcode":"95592","name":"Cat","quantity":6,"orderID":"test1"}
{"barcode":"95593","name":"Dog","quantity":6,"orderID":"test2"}
{"barcode":"95594","name":"Scar","quantity":6,"orderID":"test2"}
To something like this (using orderID as the unique and primary key):
output.json:
{"orderID":"test1","orderPacked":"Yes","orderSubmitted":"Yes","orderVerified":"Yes","stage":1,"barcode":"95590","name":"Ash","quantity":6}
{"orderID":"test1","orderPacked":"Yes","orderSubmitted":"Yes","orderVerified":"Yes","stage":1,"barcode":"95591","name":"Beat","quantity":6}
{"orderID":"test1","orderPacked":"Yes","orderSubmitted":"Yes","orderVerified":"Yes","stage":1,"barcode":"95592","name":"Cat","quantity":6}
I have my codes like this now which was adapted from join two json in Google Cloud Platform with dataflow
from __future__ import absolute_import
import argparse
import apache_beam as beam
import json
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from google.api_core import datetime_helpers
from google.api_core.exceptions import InternalServerError
from google.api_core.exceptions import ServiceUnavailable
from google.api_core.exceptions import TooManyRequests
from google.cloud import bigquery
def run(argv=None):
"""Build and run the pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--topic',
type=str,
help='Pub/Sub topic to read from')
parser.add_argument(
'--topic2',
type=str,
help='Pub/Sub topic to match with'
)
parser.add_argument(
'--output',
help=('Output local filename'))
args, pipeline_args = parser.parse_known_args(argv)
options = PipelineOptions(pipeline_args)
options.view_as(SetupOptions).save_main_session = True
options.view_as(StandardOptions).streaming = True
p = beam.Pipeline(options=options)
orderID = (p | 'read from text1' >> beam.io.ReadFromText('orderID.json')
#'Read from orderID PubSub' >> beam.io.ReadFromPubSub(topic=args.topic2)
| 'Parse JSON to Dict' >> beam.Map(lambda e: json.loads(e))
| 'key_orderID' >> beam.Map(lambda orders: (orders['orderID'], orders))
)
orders_si = beam.pvalue.AsDict(orderID)
orderDetails = (p | 'read from text' >> beam.io.ReadFromText('combined.json')
| 'Parse JSON to Dict1' >> beam.Map(lambda e: json.loads(e)))
#'Read from PubSub' >> beam.io.ReadFromPubSub(topic=args.topic))
def join_orderID_orderDetails(order, order_dict):
return order.update(order_dict[order['orderID']])
joined_dicts = orderDetails | beam.Map(join_orderID_orderDetails, order_dict=orders_si)
joined_dicts | beam.io.WriteToText('beam.output')
p.run()
#result.wait_until_finish()
if __name__ == '__main__':
run()
But my output now in beam.output just shows:
None
None
None
Can someone point out to me what I am doing wrong about this ?
The question that is different from the reported duplicate post is:
Why are my results "None"?
What am I doing wrong here?
I suspect these are the issues:
"order" variable - is that correctly referenced in "join_orderID_orderDetails"
List item "join_orderID_orderDetails" in "join_dicts? - is that correctly referneced too?

Try the below, Hope this will help you a little.
Here i have used an array of your order and combined, instead of using a file.
order = [{"orderID":"test1","orderPacked":"Yes","orderSubmitted":"Yes","orderVerified":"Yes","stage":1}]
combined = [
{"barcode":"95590","name":"Ash","quantity":6,"orderID":"test1"},
{"barcode":"95591","name":"Beat","quantity":6,"orderID":"test1"},
{"barcode":"95592","name":"Cat","quantity":6,"orderID":"test1"},
{"barcode":"95593","name":"Dog","quantity":6,"orderID":"test2"},
{"barcode":"95594","name":"Scar","quantity":6,"orderID":"test2"}
]
def joinjson(repl, tobeCombined):
newarr = []
for data in tobeCombined:
replData = getOrderData(repl,data['orderID'])
if replData is not None:
data.update(replData)
newarr.append(data)
return newarr
def getOrderData(order, orderID):
for data in order:
print("Data OrderID : ",data['orderID'])
if data['orderID'] == orderID:
return data
print(joinjson(order,combined))

Related

“IndexError: tuple index out of range” as part of data flow template job

Below is my python code which is absolutely working fine.
from __future__ import absolute_import
import apache_beam as beam
import argparse
import pickle
import logging
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.io.gcp.internal.clients import bigquery
from datetime import date
today = date.today()
current_date = today.strftime("%Y%m%d")
def run(argv=None):
parser = argparse.ArgumentParser()
known_args, pipeline_args = parser.parse_known_args(argv)
p = beam.Pipeline(options=PipelineOptions(pipeline_args))
(p | 'ReadTable' >> beam.io.Read(beam.io.BigQuerySource(query='select DISTINCT(REPLACE(MOBILE,"+91 ","91")) from `whr-asia-datalake-nonprod.WHR_DATALAKE.C4C_CONSUMER_RAW`',use_standard_sql=True))
| 'read values' >> beam.Map(lambda x: x.values())
| 'CSV format' >> beam.Map(lambda row:'|'.join ("WHIRLPOOL|WHR|"+ str(column) +'|"'+"Hi, This msg is from Whirlpool DL" + '"' for column in row))
| 'Write_to_GCS' >> beam.io.WriteToText('gs://whr-asia-datalake-dev-standard/outbound/Valuefirst/WHR_MOBILE_CNSNT_REQ'+''+ str(current_date),file_name_suffix='.csv',header='SENDER_ID|SHORTCODE|MOBILE_NUM|CONSENT_MSG'))
p.run().wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
I modified the above code to add a new requirement of creating and empty ".done" file with every file created above. We added below function in our job to create an empty file
today = date.today()
current_date = today.strftime("%Y%m%d")
def create_done(argv=None):
parser = argparse.ArgumentParser()
known_args, pipeline_args = parser.parse_known_args(argv)
p = beam.Pipeline(options=PipelineOptions(pipeline_args))
(p | 'Create .done File' >> beam.io.WriteToText('gs://whr-asia-datalake-dev-standard/outbound/Valuefirst/Valuefirst'+''+str(current_date),file_name_suffix='.done'))
p.run().wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
create_done()
However,as soon we add this new code for creating empty .done file, the script is failing with error as
input_tag = transform_node.inputs[0].tag
IndexError: tuple index out of range
I am not able to paste the full traceback of the error. Please let me know of this helps.
WriteToText requires an input PCollection, but you are applying it directly to the Pipeline object. You need a data source, like ReadFromText or Create, in order to run your pipeline.
See https://beam.apache.org/documentation/programming-guide/ for more info about Pipelines and PCollections, as well as some examples of simple pipelines

Read a csv file, clean it, then write out the result as a csv using Apache Beam dataflow

I would like to read a csv file, clean it, then write out the result as a csv using Apache Beam dataflow. The purpose is to make the file loadable into BigQuery. The cleaning rule is to simply escape a double quote with a double quote.
My cleaning rule works. I’m having trouble incorporating it into a pipeline. I am seeking advice on what my cleaning function should return and how to call it through the pipeline.
import apache_beam as beam
import csv
import logging
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.io import WriteToText
lines = p | ReadFromText(file_pattern="gs://dev/clean_input/input01.csv")
def parse_method(line):
CSV_PARSING_KWARGS = {
'doublequote': True,
'escapechar': '\\',
'quotechar': '"',
'delimiter': ','
}
reader = csv.reader(csv_file, CSV_PARSING_KWARGS)
for rec in reader:
cw = csv.writer(out_file, escapechar='"', quoting=csv.QUOTE_MINIMAL)
cw.writerow(rec)
return rec
def run(region, project, bucket, temploc ):
argv = [
# Passed in args
'--region={}'.format(region),
'--project={}'.format(project),
'--temp_location={}'.format(temploc),
# Constructs
'--staging_location=gs://{}/clean_input/stg/'.format(bucket),
# Mandatory constants
'--job_name=cleammycsv',
'--runner=DataflowRunner'
]
options = PipelineOptions(
flags=argv
)
pipeline = beam.Pipeline(options=options)
clean_csv = (pipeline
lines = lines| 'Read' >> beam.Map(parse_method)
line = lines | 'Output to file' >> WriteToText(file_pattern="gs://dev/clean_output/output_file.csv")
)
pipeline.run()
if __name__ == '__main__':
import argparse
# Create the parser
parser = argparse.ArgumentParser(description='Run the CSV cleaning pipeline')
parser.add_argument('-r','--region', help='Region ID where data flow job to run', default='australia-southeast1')
parser.add_argument('-p','--project', help='Unique project ID', required=True)
parser.add_argument('-b','--bucket', help='Bucket name', required=True)
parser.add_argument('-t','--temploc', help='Bucket name and folder', required=True)
# Execute the parse_args() method
args = vars(parser.parse_args())
run(project=args['project'], bucket=args['bucket'], region=args['region'],temploc=args['temploc'])
I finally got something working which does the job.
import apache_beam as beam
import csv
import logging
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.io import WriteToText
def parse_file(element):
for line in csv.reader([element], quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL):
line = [s.replace('\"', '') for s in line]
clean_line = '","'.join(line)
final_line = '"'+ clean_line +'"'
return final_line
def run(region, project, bucket, temploc ):
argv = [
# Passed in args
'--region={}'.format(region),
'--project={}'.format(project),
'--temp_location={}'.format(temploc),
# Constructs
'--staging_location=gs://{}/clean_input/stg/'.format(bucket),
# Mandatory constants
'--job_name=cleammycsv',
'--runner=DataflowRunner'
]
filename_in = 'gs://{}/clean_input/IN_FILE.csv'.format(bucket)
files_output = 'gs://{}/clean_output/OUT_FILE.csv'.format(bucket)
options = PipelineOptions(
flags=argv
)
pipeline = beam.Pipeline(options=options)
clean_csv = (pipeline
| 'Read input file' >> beam.io.ReadFromText(filename_in)
| 'Parse file' >> beam.Map(parse_file)
| 'writecsv' >> beam.io.WriteToText(files_output,num_shards=10)
)
pipeline.run()
if __name__ == '__main__':
import argparse
# Create the parser
parser = argparse.ArgumentParser(description='Run the CSV cleaning pipeline')
parser.add_argument('-r','--region', help='Region ID where data flow job to run', required=True)
parser.add_argument('-p','--project', help='Unique project ID', required=True)
parser.add_argument('-b','--bucket', help='Bucket name', required=True)
parser.add_argument('-t','--temploc', help='Bucket name and folder', required=True)
# Execute the parse_args() method
args = vars(parser.parse_args())
run(project=args['project'], bucket=args['bucket'], region=args['region'],temploc=args['temploc'])

Apache beam in Dataflow getting errors related to generator object is not subscriptable

I am trying to create my first pipleine in dataflow, I have the same code runnign when i execute using the interactive beam runner but on dataflow I get all sort of errors, which are not making much sense to me.
I am getting json from pub sub which is of the following format.
{"timestamp":1589992571906,"lastPageVisited":"https://kickassdataprojects.com/simple-and-complete-tutorial-on-simple-linear-regression/","pageUrl":"https://kickassdataprojects.com/","pageTitle":"Helping%20companies%20and%20developers%20create%20awesome%20data%20projects%20%7C%20Data%20Engineering/%20Data%20Science%20Blog","eventType":"Pageview","landingPage":0,"referrer":"direct","uiud":"31af5f22-4cc4-48e0-9478-49787dd5a19f","sessionId":322371}
Here is the code of my pipeline.
from __future__ import absolute_import
import apache_beam as beam
#from apache_beam.runners.interactive import interactive_runner
#import apache_beam.runners.interactive.interactive_beam as ib
import google.auth
from datetime import timedelta
import json
from datetime import datetime
from apache_beam import window
from apache_beam.transforms.trigger import AfterWatermark, AfterProcessingTime, AccumulationMode, AfterCount
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
import argparse
import logging
from time import mktime
def setTimestamp(elem):
from apache_beam import window
yield window.TimestampedValue(elem, elem['timestamp'])
def createTuples(elem):
yield (elem["sessionId"], elem)
class WriteToBigQuery(beam.PTransform):
"""Generate, format, and write BigQuery table row information."""
def __init__(self, table_name, dataset, schema, project):
"""Initializes the transform.
Args:
table_name: Name of the BigQuery table to use.
dataset: Name of the dataset to use.
schema: Dictionary in the format {'column_name': 'bigquery_type'}
project: Name of the Cloud project containing BigQuery table.
"""
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
#super(WriteToBigQuery, self).__init__()
beam.PTransform.__init__(self)
self.table_name = table_name
self.dataset = dataset
self.schema = schema
self.project = project
def get_schema(self):
"""Build the output table schema."""
return ', '.join('%s:%s' % (col, self.schema[col]) for col in self.schema)
def expand(self, pcoll):
return (
pcoll
| 'ConvertToRow' >>
beam.Map(lambda elem: {col: elem[col]
for col in self.schema})
| beam.io.WriteToBigQuery(
self.table_name, self.dataset, self.project, self.get_schema()))
class ParseSessionEventFn(beam.DoFn):
"""Parses the raw game event info into a Python dictionary.
Each event line has the following format:
username,teamname,score,timestamp_in_ms,readable_time
e.g.:
user2_AsparagusPig,AsparagusPig,10,1445230923951,2015-11-02 09:09:28.224
The human-readable time string is not used here.
"""
def __init__(self):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
#super(ParseSessionEventFn, self).__init__()
beam.DoFn.__init__(self)
def process(self, elem):
#timestamp = mktime(datetime.strptime(elem["timestamp"], "%Y-%m-%d %H:%M:%S").utctimetuple())
elem['sessionId'] = int(elem['sessionId'])
elem['landingPage'] = int(elem['landingPage'])
yield elem
class AnalyzeSessions(beam.DoFn):
"""Parses the raw game event info into a Python dictionary.
Each event line has the following format:
username,teamname,score,timestamp_in_ms,readable_time
e.g.:
user2_AsparagusPig,AsparagusPig,10,1445230923951,2015-11-02 09:09:28.224
The human-readable time string is not used here.
"""
def __init__(self):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
#super(AnalyzeSessions, self).__init__()
beam.DoFn.__init__(self)
def process(self, elem, window=beam.DoFn.WindowParam):
sessionId = elem[0]
uiud = elem[1][0]["uiud"]
count_of_events = 0
pageUrl = []
window_end = window.end.to_utc_datetime()
window_start = window.start.to_utc_datetime()
session_duration = window_end - window_start
for rows in elem[1]:
if rows["landingPage"] == 1:
referrer = rows["refererr"]
pageUrl.append(rows["pageUrl"])
return {
"pageUrl":pageUrl,
"eventType":"pageview",
"uiud":uiud,
"sessionId":sessionId,
"session_duration": session_duration,
"window_start" : window_start
}
def run(argv=None, save_main_session=True):
parser = argparse.ArgumentParser()
parser.add_argument('--topic', type=str, help='Pub/Sub topic to read from')
parser.add_argument(
'--subscription', type=str, help='Pub/Sub subscription to read from')
parser.add_argument(
'--dataset',
type=str,
required=True,
help='BigQuery Dataset to write tables to. '
'Must already exist.')
parser.add_argument(
'--table_name',
type=str,
default='game_stats',
help='The BigQuery table name. Should not already exist.')
parser.add_argument(
'--fixed_window_duration',
type=int,
default=60,
help='Numeric value of fixed window duration for user '
'analysis, in minutes')
parser.add_argument(
'--session_gap',
type=int,
default=5,
help='Numeric value of gap between user sessions, '
'in minutes')
parser.add_argument(
'--user_activity_window_duration',
type=int,
default=30,
help='Numeric value of fixed window for finding mean of '
'user session duration, in minutes')
args, pipeline_args = parser.parse_known_args(argv)
session_gap = args.session_gap * 60
options = PipelineOptions(pipeline_args)
# Set the pipeline mode to stream the data from Pub/Sub.
options.view_as(StandardOptions).streaming = True
options.view_as( StandardOptions).runner= 'DataflowRunner'
options.view_as(SetupOptions).save_main_session = save_main_session
p = beam.Pipeline(options=options)
lines = (p
| beam.io.ReadFromPubSub(
subscription="projects/phrasal-bond-274216/subscriptions/rrrr")
| 'decode' >> beam.Map(lambda x: x.decode('utf-8'))
| beam.Map(lambda x: json.loads(x))
| beam.ParDo(ParseSessionEventFn())
)
next = ( lines
| 'AddEventTimestamps' >> beam.Map(setTimestamp)
| 'Create Tuples' >> beam.Map(createTuples)
| beam.Map(print)
| 'Window' >> beam.WindowInto(window.Sessions(15))
| 'group by key' >> beam.GroupByKey()
| 'analyze sessions' >> beam.ParDo(AnalyzeSessions())
| 'WriteTeamScoreSums' >> WriteToBigQuery(
args.table_name,
{
"uiud":'STRING',
"session_duration": 'INTEGER',
"window_start" : 'TIMESTAMP'
},
options.view_as(GoogleCloudOptions).project)
)
next1 = ( next
| 'Create Tuples' >> beam.Map(createTuples)
| beam.Map(print)
)
result = p.run()
# result.wait_till_termination()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
In the following code, I get the following error 'generator' object is not subscriptable, when I try to create tuples in my pipeline. I get it using yield is creating the generator object, even return doesn't work it just beaks my pipeline.
apache_beam.coders.coder_impl.SequenceCoderImpl.get_estimated_size_and_observables File "sessiontest1.py", line 23, in createTuples TypeError: 'generator' object is not subscriptable [while running 'generatedPtransform-148']
Here is the code I use to execute the pipeline.
python3 sessiontest1.py --project phrasal-bond-xxxxx --region us-central1 --subscription projects/phrasal-bond-xxxxx/s
ubscriptions/xxxxxx --dataset sessions_beam --runner DataflowRunner --temp_location gs://webevents/sessions --service_account_email-xxxxxxxx-
compute#developer.gserviceaccount.com
Any help on this would be appreciated. Thanks guys, again first time working on dataflow, so not sure what I am missing here.
Other errors I was getting before that are sorted now:-
a) I get the error that widow is not defined from the line name beam.Map(lambda elem: window.TimestampedValue(elem, elem['timestamp'])) .
If I go beam.window then it says beam is not defined, according to me beam should be provided by dataflow,
NameError: name 'window' is not defined [while running 'generatedPtransform-3820']
You just need to import the modules in the function itself.
Getting a 'generator' object is not subscriptable error on createTuples indicates that when you try to do elem["sessionID"], the elem is already a generator. The previous transform you do is setTimestamp, which is also using yield and therefore outputting a generator that gets passed as the element to createTuples.
The solution here is to implement setTimestamp and createTuples with return instead of yield. Return the element you want to receive in the following transform.
You should set save_main_session = True in your code. ( try to uncomment that line in your code). See more about NameError here : https://cloud.google.com/dataflow/docs/resources/faq

Dataflow/apache beam - how to access current filename when passing in pattern?

I have seen this question answered before on stack overflow (https://stackoverflow.com/questions/29983621/how-to-get-filename-when-using-file-pattern-match-in-google-cloud-dataflow), but not since apache beam has added splittable dofn functionality for python. How would I access the filename of the current file being processed when passing in a file pattern to a gcs bucket?
I want to pass the filename into my transform function:
with beam.Pipeline(options=pipeline_options) as p:
lines = p | ReadFromText('gs://url to file')
data = (
lines
| 'Jsonify' >> beam.Map(jsonify)
| 'Unnest' >> beam.FlatMap(unnest)
| 'Write to BQ' >> beam.io.Write(beam.io.BigQuerySink(
'project_id:dataset_id.table_name', schema=schema,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)
)
Ultimately, what I want to do is pass the filename into my transform function when I transform each row of the json (see this and then use the filename to do a lookup in a different BQ table to get a value). I think once I manage to know how to get the filename I will be able to figure out the side input part in order to do the lookup in the bq table and get the unique value.
I tried to implement a solution with the previously cited case. There, as well as in other approaches such as this one they also get a list of file names but load all the file into a single element which might not scale well with large files. Therefore, I looked into adding the filename to each record.
As input I used two csv files:
$ gsutil cat gs://$BUCKET/countries1.csv
id,country
1,sweden
2,spain
gsutil cat gs://$BUCKET/countries2.csv
id,country
3,italy
4,france
Using GCSFileSystem.match we can access metadata_list to retrieve FileMetadata containing the file path and size in bytes. In my example:
[FileMetadata(gs://BUCKET_NAME/countries1.csv, 29),
FileMetadata(gs://BUCKET_NAME/countries2.csv, 29)]
The code is:
result = [m.metadata_list for m in gcs.match(['gs://{}/countries*'.format(BUCKET)])]
We will read each of the matching files into a different PCollection. As we don't know the number of files a priori we need to create programmatically a list of names for each PCollection (p0, p1, ..., pN-1) and ensure that we have unique labels for each step ('Read file 0', 'Read file 1', etc.):
variables = ['p{}'.format(i) for i in range(len(result))]
read_labels = ['Read file {}'.format(i) for i in range(len(result))]
add_filename_labels = ['Add filename {}'.format(i) for i in range(len(result))]
Then we proceed to read each different file into its corresponding PCollection with ReadFromText and then we call the AddFilenamesFn ParDo to associate each record with the filename.
for i in range(len(result)):
globals()[variables[i]] = p | read_labels[i] >> ReadFromText(result[i].path) | add_filename_labels[i] >> beam.ParDo(AddFilenamesFn(), result[i].path)
where AddFilenamesFn is:
class AddFilenamesFn(beam.DoFn):
"""ParDo to output a dict with filename and row"""
def process(self, element, file_path):
file_name = file_path.split("/")[-1]
yield {'filename':file_name, 'row':element}
My first approach was using a Map function directly which results in simpler code. However, result[i].path was resolved at the end of the loop and each record was incorrectly mapped to the last file of the list:
globals()[variables[i]] = p | read_labels[i] >> ReadFromText(result[i].path) | add_filename_labels[i] >> beam.Map(lambda elem: (result[i].path, elem))
Finally, we flatten all the PCollections into one:
merged = [globals()[variables[i]] for i in range(len(result))] | 'Flatten PCollections' >> beam.Flatten()
and we check the results by logging the elements:
INFO:root:{'filename': u'countries2.csv', 'row': u'id,country'}
INFO:root:{'filename': u'countries2.csv', 'row': u'3,italy'}
INFO:root:{'filename': u'countries2.csv', 'row': u'4,france'}
INFO:root:{'filename': u'countries1.csv', 'row': u'id,country'}
INFO:root:{'filename': u'countries1.csv', 'row': u'1,sweden'}
INFO:root:{'filename': u'countries1.csv', 'row': u'2,spain'}
I tested this with both DirectRunner and DataflowRunner for Python SDK 2.8.0.
I hope this addresses the main issue here and you can continue by integrating BigQuery into your full use case now. You might need to use the Python Client Library for that, I wrote a similar Java example.
Full code:
import argparse, logging
from operator import add
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.io import ReadFromText
from apache_beam.io.filesystem import FileMetadata
from apache_beam.io.filesystem import FileSystem
from apache_beam.io.gcp.gcsfilesystem import GCSFileSystem
class GCSFileReader:
"""Helper class to read gcs files"""
def __init__(self, gcs):
self.gcs = gcs
class AddFilenamesFn(beam.DoFn):
"""ParDo to output a dict with filename and row"""
def process(self, element, file_path):
file_name = file_path.split("/")[-1]
# yield (file_name, element) # use this to return a tuple instead
yield {'filename':file_name, 'row':element}
# just logging output to visualize results
def write_res(element):
logging.info(element)
return element
def run(argv=None):
parser = argparse.ArgumentParser()
known_args, pipeline_args = parser.parse_known_args(argv)
p = beam.Pipeline(options=PipelineOptions(pipeline_args))
gcs = GCSFileSystem(PipelineOptions(pipeline_args))
gcs_reader = GCSFileReader(gcs)
# in my case I am looking for files that start with 'countries'
BUCKET='BUCKET_NAME'
result = [m.metadata_list for m in gcs.match(['gs://{}/countries*'.format(BUCKET)])]
result = reduce(add, result)
# create each input PCollection name and unique step labels
variables = ['p{}'.format(i) for i in range(len(result))]
read_labels = ['Read file {}'.format(i) for i in range(len(result))]
add_filename_labels = ['Add filename {}'.format(i) for i in range(len(result))]
# load each input file into a separate PCollection and add filename to each row
for i in range(len(result)):
# globals()[variables[i]] = p | read_labels[i] >> ReadFromText(result[i].path) | add_filename_labels[i] >> beam.Map(lambda elem: (result[i].path, elem))
globals()[variables[i]] = p | read_labels[i] >> ReadFromText(result[i].path) | add_filename_labels[i] >> beam.ParDo(AddFilenamesFn(), result[i].path)
# flatten all PCollections into a single one
merged = [globals()[variables[i]] for i in range(len(result))] | 'Flatten PCollections' >> beam.Flatten() | 'Write results' >> beam.Map(write_res)
p.run()
if __name__ == '__main__':
run()
I had to read some metadata files and use the filename for further processing.
I struggled when I finally came across apache_beam.io.ReadFromTextWithFilename
def run(argv=None, save_main_session=True):
import typing
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.io import ReadFromTextWithFilename
class ExtractMetaData(beam.DoFn):
def process(self, element):
filename, meta = element
image_name = filename.split("/")[-2]
labels = json.loads(meta)["labels"]
image = {"image_name": image_name, "labels": labels}
print(image)
return image
parser = argparse.ArgumentParser()
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
with beam.Pipeline(options=pipeline_options) as pipeline:
meta = (
pipeline
| "Read Metadata" >> ReadFromTextWithFilename(f'gs://{BUCKET}/dev-set/**/*metadata.json')
| beam.ParDo(ExtractMetaData())
)
pipeline.run()

google dataflow read from spanner

I am trying to read a table from a Google spanner database, and write it to a text file to do a backup, using google dataflow with the python sdk.
I have written the following script:
from __future__ import absolute_import
import argparse
import itertools
import logging
import re
import time
import datetime as dt
import logging
import apache_beam as beam
from apache_beam.io import iobase
from apache_beam.io import WriteToText
from apache_beam.io.range_trackers import OffsetRangeTracker, UnsplittableRangeTracker
from apache_beam.metrics import Metrics
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import StandardOptions, SetupOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from google.cloud.spanner.client import Client
from google.cloud.spanner.keyset import KeySet
BUCKET_URL = 'gs://my_bucket'
OUTPUT = '%s/output/' % BUCKET_URL
PROJECT_ID = 'my_project'
INSTANCE_ID = 'my_instance'
DATABASE_ID = 'my_db'
JOB_NAME = 'spanner-backup'
TABLE = 'my_table'
class SpannerSource(iobase.BoundedSource):
def __init__(self):
logging.info('Enter __init__')
self.spannerOptions = {
"id": PROJECT_ID,
"instance": INSTANCE_ID,
"database": DATABASE_ID
}
self.SpannerClient = Client
def estimate_size(self):
logging.info('Enter estimate_size')
return 1
def get_range_tracker(self, start_position=None, stop_position=None):
logging.info('Enter get_range_tracker')
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = OffsetRangeTracker.OFFSET_INFINITY
range_tracker = OffsetRangeTracker(start_position, stop_position)
return UnsplittableRangeTracker(range_tracker)
def read(self, range_tracker): # This is not called when using the dataflowRunner !
logging.info('Enter read')
# instantiate spanner client
spanner_client = self.SpannerClient(self.spannerOptions["id"])
instance = spanner_client.instance(self.spannerOptions["instance"])
database = instance.database(self.spannerOptions["database"])
# read from table
table_fields = database.execute_sql("SELECT t.column_name FROM information_schema.columns AS t WHERE t.table_name = '%s'" % TABLE)
table_fields.consume_all()
self.columns = [x[0] for x in table_fields]
keyset = KeySet(all_=True)
results = database.read(table=TABLE, columns=self.columns, keyset=keyset)
# iterator over rows
results.consume_all()
for row in results:
JSON_row = {
self.columns[i]: row[i] for i in range(len(self.columns))
}
yield JSON_row
def split(self, start_position=None, stop_position=None):
# this should not be called since the source is unspittable
logging.info('Enter split')
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = 1
# Because the source is unsplittable (for now), only a single source is returned
yield iobase.SourceBundle(
weight=1,
source=self,
start_position=start_position,
stop_position=stop_position)
def run(argv=None):
"""Main entry point"""
pipeline_options = PipelineOptions()
google_cloud_options = pipeline_options.view_as(GoogleCloudOptions)
google_cloud_options.project = PROJECT_ID
google_cloud_options.job_name = JOB_NAME
google_cloud_options.staging_location = '%s/staging' % BUCKET_URL
google_cloud_options.temp_location = '%s/tmp' % BUCKET_URL
#pipeline_options.view_as(StandardOptions).runner = 'DirectRunner'
pipeline_options.view_as(StandardOptions).runner = 'DataflowRunner'
p = beam.Pipeline(options=pipeline_options)
output = p | 'Get Rows from Spanner' >> beam.io.Read(SpannerSource())
iso_datetime = dt.datetime.now().replace(microsecond=0).isoformat()
output | 'Store in GCS' >> WriteToText(file_path_prefix=OUTPUT + iso_datetime + '-' + TABLE, file_name_suffix='') # if this line is commented, job completes but does not do anything
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
However, this script runs correctly only on the DirectRunner: when I let it run on the DataflowRunner, it runs for a while without any output, before exiting with an error:
"Executing failure step failure14 [...] Workflow failed. Causes: [...] The worker lost contact with the service."
Sometimes, it just goes on forever, without creating an output.
Moreover, if I comment the line 'output = ...', the job completes, but without actually reading the data.
It also appears that the dataflowRunner calls the function 'estimate_size' of the source, but not the functions 'read' or 'get_range_tracker'.
Does anyone have any ideas about what may cause this ?
I know there is a (more complete) java SDK with an experimental spanner source/sink available, but if possible I'd rather stick with python.
Thanks
Google currently added support of Backup Spanner with Dataflow, you can choose related template when creating DataFlow job.
For more: https://cloud.google.com/blog/products/gcp/cloud-spanner-adds-import-export-functionality-to-ease-data-movement
I have reworked my code following the suggestion to simply use a ParDo, instead of using the BoundedSource class. As a reference, here is my solution; I am sure there are many ways to improve on it, and I would be happy to to hear opinions.
In particular I am surprised that I have to a create a dummy PColl when starting the pipeline (if I don't, I get an error
AttributeError: 'PBegin' object has no attribute 'windowing'
that I could not work around. The dummy PColl feels a bit like a hack.
from __future__ import absolute_import
import datetime as dt
import logging
import apache_beam as beam
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import StandardOptions, SetupOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from google.cloud.spanner.client import Client
from google.cloud.spanner.keyset import KeySet
BUCKET_URL = 'gs://my_bucket'
OUTPUT = '%s/some_folder/' % BUCKET_URL
PROJECT_ID = 'my_project'
INSTANCE_ID = 'my_instance'
DATABASE_ID = 'my_database'
JOB_NAME = 'my_jobname'
class ReadTables(beam.DoFn):
def __init__(self, project, instance, database):
super(ReadTables, self).__init__()
self._project = project
self._instance = instance
self._database = database
def process(self, element):
# get list of tables in the database
table_names_row = Client(self._project).instance(self._instance).database(self._database).execute_sql('SELECT t.table_name FROM information_schema.tables AS t')
for row in table_names_row:
if row[0] in [u'COLUMNS', u'INDEXES', u'INDEX_COLUMNS', u'SCHEMATA', u'TABLES']: # skip these
continue
yield row[0]
class ReadSpannerTable(beam.DoFn):
def __init__(self, project, instance, database):
super(ReadSpannerTable, self).__init__()
self._project = project
self._instance = instance
self._database = database
def process(self, element):
# first read the columns present in the table
table_fields = Client(self._project).instance(self._instance).database(self._database).execute_sql("SELECT t.column_name FROM information_schema.columns AS t WHERE t.table_name = '%s'" % element)
columns = [x[0] for x in table_fields]
# next, read the actual data in the table
keyset = KeySet(all_=True)
results_streamed_set = Client(self._project).instance(self._instance).database(self._database).read(table=element, columns=columns, keyset=keyset)
for row in results_streamed_set:
JSON_row = { columns[i]: row[i] for i in xrange(len(columns)) }
yield (element, JSON_row) # output pairs of (table_name, data)
def run(argv=None):
"""Main entry point"""
pipeline_options = PipelineOptions()
pipeline_options.view_as(SetupOptions).save_main_session = True
pipeline_options.view_as(SetupOptions).requirements_file = "requirements.txt"
google_cloud_options = pipeline_options.view_as(GoogleCloudOptions)
google_cloud_options.project = PROJECT
google_cloud_options.job_name = JOB_NAME
google_cloud_options.staging_location = '%s/staging' % BUCKET_URL
google_cloud_options.temp_location = '%s/tmp' % BUCKET_URL
pipeline_options.view_as(StandardOptions).runner = 'DataflowRunner'
p = beam.Pipeline(options=pipeline_options)
init = p | 'Begin pipeline' >> beam.Create(["test"]) # have to create a dummy transform to initialize the pipeline, surely there is a better way ?
tables = init | 'Get tables from Spanner' >> beam.ParDo(ReadTables(PROJECT, INSTANCE_ID, DATABASE_ID)) # read the tables in the db
rows = (tables | 'Get rows from Spanner table' >> beam.ParDo(ReadSpannerTable(PROJECT, INSTANCE_ID, DATABASE_ID)) # for each table, read the entries
| 'Group by table' >> beam.GroupByKey()
| 'Formatting' >> beam.Map(lambda (table_name, rows): (table_name, list(rows)))) # have to force to list here (dataflowRunner produces _Unwindowedvalues)
iso_datetime = dt.datetime.now().replace(microsecond=0).isoformat()
rows | 'Store in GCS' >> WriteToText(file_path_prefix=OUTPUT + iso_datetime, file_name_suffix='')
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()

Categories

Resources