Unable to run Python script in PowerBI - python

I need to connect with API which requires a SHA from a token and session id.
I call for token and session id, however to make a call with that I need a Python script to run. I wanted to use the table from the authorization call as a parameter in the Python script:
let
api_key = "_api_key",
Source = Json.Document(Web.Contents("https://xxxxxxxxxxx/api/public/json/_Api/auth_call/_api_method/getToken?_api_auth=key", [ApiKeyName = "api_key"])),
#"Converted to Table" = Record.ToTable(Source),
Value = #"Converted to Table"{0}[Value],
#"Converted to Table1" = Record.ToTable(Value),
#"Transposed Table" = Table.Transpose(#"Converted to Table1"),
#"Promoted Headers" = Table.PromoteHeaders(#"Transposed Table", [PromoteAllScalars=true]),
#"Changed Type" = Table.TransformColumnTypes(#"Promoted Headers",{{"token", type text}, {"session_id", type text}}),
#"Run Python script" = Python.Execute("# 'dataset' holds the input data for this script#(lf)#(lf)import requests#(lf)import pandas as pd#(lf)from hashlib import sha1#(lf)#(lf)def LSGetData(sufix, limit=all, object_type='contact', additionallData ={}):#(lf) host = ""https://xxxxxxxxxxxxx/api/public/json/""#(lf) authHost = ""_Api/auth_call/_api_method/getToken""#(lf) api_key = 'xxxxxxxxxxxxx'#(lf) api_secret = 'xxxxxxxxxxxxx'#(lf)#(lf) data = {'_api_auth': ""key"", '_api_key': api_key}#(lf)#(lf) token = dataset['token']#(lf) session_id = dataset['session_id']#(lf)#(lf) api_sha = sha1((api_key + token + api_secret).encode('utf-8')).hexdigest()#(lf) data = {'_api_auth': ""key"", '_api_key': api_key, '_api_sha': api_sha, '_api_session': session_id, 'limit': limit}#(lf)#(lf) if sufix == 'kontakty':#(lf) data['type'] = 'contact'#(lf) elif sufix == 'firmy':#(lf) data['type'] = 'company'#(lf) elif sufix == 'przestrzenie':#(lf) data['type'] = 'space'#(lf) elif sufix == 'tablica':#(lf) data['object type'] = object_type # 'contact', 'company'. 'deal', 'space'#(lf)#(lf) for key, data in additionallData.keys():#(lf) data[key] = data#(lf)#(lf) lsq = requests.post(host + typeOfData[sufix], data=data)#(lf) return lsq.json()#(lf)#(lf)#(lf)def CreateDataFrame(lsdata):#(lf) if sufix == 'kontakty':#(lf) df = pd.DataFrame(lsdata['data']['contact'])#(lf) elif sufix == 'firmy':#(lf) df = pd.DataFrame(lsdata['data']['company'])#(lf) elif sufix == 'szanse':#(lf) df = pd.DataFrame(lsdata['data']['deal'])#(lf) elif sufix == 'tablica':#(lf) df = pd.DataFrame(lsdata['data']['items'])#(lf) elif sufix == 'zadania':#(lf) df = pd.DataFrame(lsdata['data']['todo'])#(lf) elif sufix == 'przestrzenie':#(lf) df = pd.DataFrame(lsdata['data']['space'])#(lf) return df#(lf)#(lf)#(lf)def DataFiler(dataFrame):#(lf) #filter dataFrame here#(lf) dataFrame = dataFrame[dataFrame.owner_name.notnull()]#(lf) return dataFrame#(lf)#(lf)typeOfData = {#(lf) 'kontakty': 'Contact/getAll',#(lf) 'firmy': 'Contact/getAll',#(lf) 'szanse': 'Deal/getAll',#(lf) 'przestrzenie': 'Space/getAll',#(lf) 'zadania': 'Todo/getTodoObjects',#(lf) 'tablica': 'Wall/getList'#(lf)}#(lf)#(lf)sufix = 'firmy'#(lf)#(lf)lsdata = LSGetData(sufix)#(lf)lsDataFrame = CreateDataFrame(lsdata)#(lf)fileredlsLSDataFrame = DataFiler(lsDataFrame)#(lf)#(lf)print(lsDataFrame)",[dataset=#"Changed Type"])
in
#"Run Python script"
Unfortunately this doesn't work.
How do I use "dataset" in this script to make it work?

Related

'timestamp' messages appeared on my terminal, but i didn't code print('timestamp')

enter image description hereI made my autobot for coin trade.
I'm using iterm2 terminal to operate my code.
But there's some problem I don't understand.
first I use the same fuction by changing parameter, symbolname etc.
but on the terminal window for A coin, there's only 'timestamp' message.
i can't tell if it is error or not because there's no error message
and i didn't code print('timestamp')
on the other terminal windows for other coin, there's no such a message.
I guess my explanation is not enough.
but if you have any idea about this kind of situations, please tell me.
I'm using python.
thank you already!!
sometimes, although i didn't change anytihng.
'timestamp' message was disappeared.
enter image description here
enter code here
def runBot(symbol, second=int):
try:
subtitle = 'B'
fourth_interval = '30m'
symbol = symbol
path = path
df_1 = callChart(exchange=exchange, symbol=symbol, interval='15m')
df_2 = callChart(exchange=exchange, symbol=symbol, interval='5m')
df_3 = callChart(exchange=exchange, symbol=symbol, interval='1m')
df_4 = callChart(exchange=exchange, symbol=symbol, interval='30m')
df_5 = callChart(exchange=exchange, symbol=symbol, interval='1h')
df_6 = callChart(exchange=exchange, symbol=symbol, interval='4h')
df_7 = callChart(exchange=exchange, symbol=symbol, interval='2h')
n4hma2 = df_6.ma20[-1]
n4hma6 = df_6.ma60[-1]
n4hma8 = df_6.ma80[-1]
p4h_rsi = df_6.rsi[-2]
p4h_open = df_6.open[-2]
p4h_close = df_6.close[-2]
n1hma2 = df_5.ma20[-1]
n1hma6 = df_5.ma60[-1]
n1hma8 = df_5.ma80[-1]
n1h_low = df_5.low[-1]
n1h_high = df_5.high[-1]
n1h_open = df_5.open[-1]
p1h_rsi = df_5.rsi[-2]
p1hma2 = df_5.ma20[-2]
p1hma6 = df_5.ma60[-2]
p1hma8 = df_5.ma80[-2]
p1h_close = df_5.close[-2]
n30ma2 = df_4.ma20[-1]
n30ma6 = df_4.ma60[-1]
n30ma8 = df_4.ma80[-1]
n30_high = df_4.high[-1]
n30_low = df_4.low[-1]
n30_open = df_4.open[-1]
p30_rsi = df_4.rsi[-2]
p30_rsi = df_4.rsi[-2]
p30_close = df_4.close[-2]
pp30_close = df_4.close[-3]
ppp30_close = df_4.close[-4]
p30ma2 = df_4.ma20[-2]
p30ma6 = df_4.ma60[-2]
p30ma8 = df_4.ma80[-2]
pp30ma2 = df_4.ma20[-4]
pp30ma6 = df_4.ma60[-4]
pp30ma8 = df_4.ma80[-4]
ppp30ma2 = df_4.ma20[-4]
ppp30ma6 = df_4.ma60[-4]
ppp30ma8 = df_4.ma80[-4]
p4_30ma2 = df_4.ma20[-5]
p4_30ma6 = df_4.ma60[-5]
p4_30ma8 = df_4.ma80[-5]
n_price = df_1.close[-1]
n15ma2 = df_1.ma20[-1]
n15ma6 = df_1.ma60[-1]
n15ma8 = df_1.ma80[-1]
p15_close = df_1.close[-2]
p15_open = df_1.open[-2]
n15_high = df_1.high[-1]
n15_low = df_1.low[-1]
p15ma2 = df_1.ma20[-2]
p15ma6 = df_1.ma60[-2]
p15ma8 = df_1.ma80[-2]
n5ma2 = df_2.ma20[-1]
n5ma6 = df_2.ma60[-1]
n5ma8 = df_2.ma80[-1]
n5_high = df_2.high[-1]
n5_low = df_2.low[-1]
p5ma2 = df_2.ma20[-2]
p5ma6 = df_2.ma60[-2]
p5ma8 = df_2.ma80[-2]
p5_close = df_2.close[-2]
n1ma2 = df_3.ma20[-1]
n1ma6 = df_3.ma60[-1]
n1ma8 = df_3.ma80[-1]
p1m_close = df_3.close[-2]
p1ma2 = df_3.ma20[-2]
p1ma6 = df_3.ma60[-2]
p1ma8 = df_3.ma80[-2]
n2hma2 = df_7.ma20[-1]
n2hma6 = df_7.ma60[-1]
n2hma8 = df_7.ma80[-1]
now = datetime.datetime.utcnow() + datetime.timedelta(hours=9)
now_date = datetime.date(now.year, now.month, now.day)
now_time = datetime.time(now.hour, now.minute, now.second)
what_day = now.weekday()
timestamp = time.mktime(datetime.datetime.strptime(str(f'{now_date} {now_time}'), '%Y-%m-%d %H:%M:%S').timetuple())
mt4 = datetime.datetime.utcnow() + datetime.timedelta(hours=3)
mt4_date = datetime.date(mt4.year, mt4.month, mt4.day)
mt4_time = datetime.time(mt4.hour, mt4.minute, mt4.second)
total_balance = exchange.fetch_balance()['USDT']['total']
total_balance2 = exchange2.fetch_balance()['USDT']['total']
total_balance5 = exchange5.fetch_balance()['USDT']['total']
print(colors.fg.blue, f'{subtitle}', colors.reset, colors.fg.green, f'{symbol}', colors.reset, f'{n_price}', colors.fg.yellow, f'{now_date} {now_time}', colors.reset)

peewee always displays None, but the user is in the database

The screenshot shows that the user is in the database, but peewee thinks differently :c
user in database
console input
#bot.message_handler(func = lambda message: message.text in ['/start', '🏠 Главное меню 🏠'])
def start(message) -> None:
try:
check = users.get_or_none(users.user_id == message.from_user.id)
print(check)
if check == None:
logger.info('NEW user!')
short_id = random.randint(100000, 999999)
try:
refer_id = message.text.split()[1]
refer_id = int(refer_id)
except:
refer_id = 0
passw = generate_passw(len = 12)
users.create(user_id = message.from_user.id, short_id = short_id, passw = passw, plan = 'net', refer_id = refer_id, balance = 0.0)
bot.send_message(chat_id = message.from_user.id, text = welcome_new, parse_mode = 'Markdown', reply_markup = start_)
else:
bot.send_message(chat_id = message.from_user.id, text = welcome_back, parse_mode = 'Markdown', reply_markup = start_)
except:
logger.exception('uwuth')

"sqlalchemy.exc.InternalError: (psycopg2.errors.InFailedSqlTransaction) current transaction is aborted

I wrote API using Flask-SQLAlchemy which sometimes throws error "sqlalchemy.exc.InternalError: (psycopg2.errors.InFailedSqlTransaction) current transaction is aborted, commands ignored until end of transaction block" and I'm using PostgreSQL & docker containe and Ass is the sample model.
Here is my code.
#APP.route('/get_ass_info/<int:ass_id>', methods=["GET"])
def get_ass_info(ass_id):
item = Ass.query.filter_by(main_ass_id=ass_id).first()
ag_list = []
if item:
if item.sponsor != 0:
sp_obj =Ass.query.filter_by(main_ass_id=item.sponsor).first()
sp_name = sp_obj.name
else:
sp_name = 'NA'
dob_new = datetime.datetime.strptime(str(item.dob), "%Y-%m-%d")
today = datetime.date.today()
age = today.year - dob_new.year
st_obj = State.query.filter_by(id=item.ag_state).first()
if st_obj:
state_name = st_obj.name
state_id = st_obj.id
old_state_id = st_obj.old_state_id
else:
state_name = ''
state_id = 0
old_state_id = 0
dist_obj = District.query.filter_by(id=item.ag_district).first()
if dist_obj:
dist_name = dist_obj.name
dist_id = dist_obj.id
old_dist_id = dist_obj.old_district_id
else:
dist_name = ''
dist_id = 0
old_dist_id = 0
ass_dict = {'ass_id':item.id, 'name':item.name, 'rank': item.asslevel.name if
item.agentlevel_id != None else '',
'city':item.city if item.city != 'null' and item.city != None and
item.city !='None' else '',
'mobile':item.mobile_no, 'sponsor':item.sponsor, 'state':state_name,
'title':item.title,'ag_state':old_state_id,'ag_district':old_dist_id}
ag_list.append(ass_dict)
response = jsonify(ag_list)
response.status_code = 200
return response
else:
response = jsonify(ag_list)
response.status_code = 200
return response
If anyone knows the solution then let me know.

Python 3.6 Lambda code error for os.environ variable

try :
sf = Salesforce(username = sfdc_username,
password = sfdc_password,
security_token = sfdc_security_token,
instance_url = sfdc_salesforce_instance_url,
domain = sfdc_sandbox)
print('salesforce login good')
except (SalesforceGeneralError,
SalesforceMoreThanOneRecord,
SalesforceMalformedRequest,
SalesforceExpiredSession,
SalesforceRefusedRequest,
SalesforceResourceNotFound) as e :
print(e.content[0]['message'])
sys.exit(1)
this portion of code on lambda is failing with the error:
a bytes-like object is required, not 'str': TypeError
Traceback (most recent call last):
File "/var/task/sfdc_etl/bin/sfdc_etl.py", line 80, in lambda_handler
domain = sfdc_sandbox)
File "/var/task/sfdc_etl/lib/python3.6/site-packages/simple_salesforce/api.py", line 146, in __init__
domain=self.domain)
File "/var/task/sfdc_etl/lib/python3.6/site-packages/simple_salesforce/login.py", line 80, in SalesforceLogin
username = escape(username)
File "/var/lang/lib/python3.6/html/__init__.py", line 19, in escape
s = s.replace("&", "&") # Must be done first!
TypeError: a bytes-like object is required, not 'str'
When I move this code to my test environment on an EC2 amazon linux and set the sfdc_sandox to 'test' in line, it works with no issues. I tried using os.environb["L_SFDC_SANDBOX"] and os.environ["L_SFDC_SANDBOX"].encode('utf8'), but that also did not help as it gave the same error. How do I fix the type error when I pull in this variable in Lambda?
Here is the entire script, maybe the error isn't because of that specific piece of code even though it seems like it is.
import os
import sys
# this adds the parent directory of bin so we can find the module
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parent_dir)
#This addes venv lib/python2.7/site-packages/ to the search path
mod_path = os.path.abspath(parent_dir+"/lib/python"+str(sys.version_info[0])+"."+str(sys.version_info[1])+"/site-packages/")
sys.path.append(mod_path)
from awsmgr.awsmgr import S3Helper
from base64 import b64decode
import boto3
from collections import OrderedDict
import datetime
from dateutil.parser import parse
import logging
import json
import math
import pandas as pd
from simple_salesforce import Salesforce, SalesforceLogin
from simple_salesforce.exceptions import SalesforceGeneralError, SalesforceMoreThanOneRecord, SalesforceMalformedRequest, SalesforceExpiredSession, SalesforceRefusedRequest, SalesforceResourceNotFound
from sqlalchemy import create_engine
from sqlalchemy import exc
current_path = os.path.dirname(os.path.realpath(__file__))
# Use this one for the parent directory
ENV_ROOT = os.path.abspath(os.path.join(current_path, os.path.pardir))
# Use this one for the current directory
#ENV_ROOT = os.path.abspath(os.path.join(current_path))
sys.path.append(ENV_ROOT)
def lambda_handler(event, context):
###############################
# Global Variable Definitions #
###############################
d_parse = parse
TMP_PATH = '/tmp'
igersUser = 'admin'
igersPwd = boto3.client('kms').decrypt(CiphertextBlob=b64decode(os.environ["RS_PASSWORD"]))['Plaintext']
igersHost = os.environ["RS_HOST"]
igers = create_engine('postgres://{}:{}#{}/ibdrs'.format(igersUser, igersPwd, igersHost), encoding="utf-8")
igersSchema = os.environ["RS_SCHEMA"]
s3 = S3Helper(debug=os.environ["DEBUG"])
nextObjFile = s3.get_s3_file('s3://test-sfdc-sds-team/sfdc-etl-jp-test/sfdc_etl/objects/next_object.txt',os.path.abspath(os.path.join(TMP_PATH,'next_object.txt')))
s3Destination = 's3://test-sfdc-sds-team/sfdc-etl-jp-test/sfdc_etl/json/'
s3Path = '{}_json'
s3NextObjDestination = 's3://test-sfdc-sds-team/sfdc-etl-jp-test/sfdc_etl/objects/{}'
fileCount = 1
sfdc_username = os.environ["L_USERNAME"].encode('utf8')
sfdc_salesforce_instance_url = os.environ["L_SALESFORCE_INSTANCE_URL"].encode('utf8')
sfdc_password = boto3.client('kms').decrypt(CiphertextBlob=b64decode(os.environ["L_PASSWORD"]))['Plaintext']
sfdc_security_token = boto3.client('kms').decrypt(CiphertextBlob=b64decode(os.environ["L_SECURITY_TOKEN"]))['Plaintext']
sfdc_sandbox = os.environ["L_SFDC_SANDBOX"].encode('utf8')
print(type(sfdc_username), type(sfdc_password), type(sfdc_security_token), type(sfdc_salesforce_instance_url), type(sfdc_sandbox))
try :
sf = Salesforce(username = sfdc_username,
password = sfdc_password,
security_token = sfdc_security_token,
instance_url = sfdc_salesforce_instance_url,
domain = sfdc_sandbox)
print('salesforce login good')
except (SalesforceGeneralError,
SalesforceMoreThanOneRecord,
SalesforceMalformedRequest,
SalesforceExpiredSession,
SalesforceRefusedRequest,
SalesforceResourceNotFound) as e :
print(e.content[0]['message'])
sys.exit(1)
# get nextobj from s3
with open(nextObjFile, 'r') as f :
nextObjItem = f.read().strip().lower()
nextObj = nextObjItem.lower()
print('Processing {}'.format(nextObj))
######################################################
# get rs table group permissions, store in dataframe #
######################################################
def rsGetGroupPerms(igers, nextObj) :
global groupPerms
groupPerms = {}
existingGroupPerms = '''
SELECT
namespace, item, type, groname
FROM
(
SELECT
use.usename AS subject,
nsp.nspname AS NAMESPACE,
cls.relname AS item,
cls.relkind AS TYPE,
use2.usename AS OWNER,
cls.relacl
FROM
pg_user use
CROSS JOIN pg_class cls
LEFT JOIN pg_namespace nsp ON cls.relnamespace = nsp.oid
LEFT JOIN pg_user use2 ON cls.relowner = use2.usesysid
WHERE
cls.relowner = use.usesysid
AND nsp.nspname NOT IN ( 'pg_catalog', 'pg_toast', 'information_schema' )
AND nsp.nspname IN ( 'salesforce' )
AND relacl IS NOT NULL
ORDER BY
subject,
NAMESPACE,
item
)
JOIN pg_group pu ON array_to_string( relacl, '|' ) LIKE'%%' || pu.groname || '%%'
WHERE item = '{}'
'''.format(nextObj)
groupPerms = pd.read_sql(existingGroupPerms, igers)
print('got the group permissions')
return groupPerms
#####################################################
# get rs table user permissions, store in dataframe #
# NOT CURRENTLY IN USE #
#####################################################
#def rsGetUseerPerms(igers, nextObj) :
# existingUserPerms = '''
# SELECT *
# FROM
# (
# SELECT
# schemaname
# ,objectname
# ,usename
# ,HAS_TABLE_PRIVILEGE(usrs.usename, fullobj, 'select') AND has_schema_privilege(usrs.usename, schemaname, 'usage') AS sel
# ,HAS_TABLE_PRIVILEGE(usrs.usename, fullobj, 'insert') AND has_schema_privilege(usrs.usename, schemaname, 'usage') AS ins
# ,HAS_TABLE_PRIVILEGE(usrs.usename, fullobj, 'update') AND has_schema_privilege(usrs.usename, schemaname, 'usage') AS upd
# ,HAS_TABLE_PRIVILEGE(usrs.usename, fullobj, 'delete') AND has_schema_privilege(usrs.usename, schemaname, 'usage') AS del
# ,HAS_TABLE_PRIVILEGE(usrs.usename, fullobj, 'references') AND has_schema_privilege(usrs.usename, schemaname, 'usage') AS ref
# FROM
# (
# SELECT schemaname, 't' AS obj_type, tablename AS objectname, schemaname + '.' + tablename AS fullobj FROM pg_tables
# UNION
# SELECT schemaname, 'v' AS obj_type, viewname AS objectname, schemaname + '.' + viewname AS fullobj FROM pg_views
# ) AS objs
# ,(SELECT * FROM pg_user) AS usrs
# ORDER BY fullobj
# )
# WHERE (sel = true or ins = true or upd = true or del = true or ref = true)
# and objectname = '{}'
# '''.format(nextObj)
#
# userPerms = pd.read_sql_query(existingUserPerms, igers)
# return userPerms
####################################################
# Connect to Salesforce, Query JSON, and Copy to S3#
####################################################
def sfToS3(fileCount, sf, nextObj) :
# Initiate list for returned data
pulls = []
# Pull initial Query
sfobject = sf.restful('sobjects/{}/describe/'.format(nextObj), params=None)
fields_list = [record['name'] for record in sfobject['fields']]
initialQuery = sf.query("SELECT {} FROM {}".format(','.join(fields_list),nextObj))
#Send a single file or the first file to S3
data = initialQuery['records']
try :
send_temp_jsonl_to_s3(data, nextObj, s3, s3Destination, fileCount, s3Path)
# Append initial query data to pulls
if 'nextRecordsUrl' in initialQuery :
pulls.append(initialQuery['nextRecordsUrl'])
nextChunk = initialQuery['nextRecordsUrl']
nextQuery = sf.query_more(nextChunk,True)
if 'nextRecordsUrl' in nextQuery :
pulls.append(nextQuery['nextRecordsUrl'])
x = True
fileCount = 2
while x == True:
try:
# set up while loop to re-query salesforce until returned
# query does not have a 'nextRecordsUrl' return value
# Query new 'nextRecordsUrl'
nextQuery = sf.query_more(nextQuery['nextRecordsUrl'],True)
# append new query to pulls
pulls.append(nextQuery['nextRecordsUrl'])
except: # This triggers when nextQuery['nextRecordsUrl'] does not exist
# set x to False to end loop
x = False
#if there was a follow on set of records, query it and add to S3
if len(pulls) >= 1 :
for i in range(len(pulls)) :
data = sf.query_more(str(pulls[i].split('/')[5]))['records']
send_temp_jsonl_to_s3(data, nextObj, s3, s3Destination, fileCount, s3Path)
fileCount += 1
print('completed sending JSON files to S3')
except :
print('Salesforce Object Empty, ending execution')
updateNextObj(nextObj, s3NextObjDestination)
sys.exit(1)
####################
# JSONL to S3 #
####################
def send_temp_jsonl_to_s3(data, nextObj, s3, s3Destination, fileCount, s3Path) :
fileName = '{}_file{}.json'
localFilePath ='/tmp/'
for element in data :
item = data.pop()
item.pop('attributes', None)
tempdict = OrderedDict({})
for k,v in item.items() :
if 'date' in k.lower() or 'stamp' in k.lower() :
if not v is None :
d = d_parse(v)
v = d.strftime('%Y-%m-%d %I:%M:%S')
tempdict[k.lower()] = v
else :
tempdict[k.lower()] = v
with open(localFilePath+fileName.format(nextObj,fileCount), 'a') as outfile :
outfile.write(json.dumps(tempdict))
outfile.write('\n')
s3.put_s3_file_datedpath(localFilePath+fileName.format(nextObj,fileCount),s3Destination+s3Path.format(nextObj))
os.remove(localFilePath+fileName.format(nextObj,fileCount))
#################################################
# maps SFDC type to SQL type - used for ddl #
#################################################
def map_data_type(sfdc_type, length):
"""
Definition to map Salesforce datatype to Redshift datatype.
"""
__MULTIPLIER = 1.3 # may not be Zero!
if length == 0:
length = 1092
if length == 4095:
length = 15000
if length > 65535:
length = 65534
if sfdc_type == u'boolean':
return u'varchar(5)'
elif sfdc_type == u'date':
return u'timestamp'
elif sfdc_type == u'datetime':
return u'timestamp'
elif sfdc_type == u'currency':
return u'decimal(38,6)'
elif sfdc_type == u'double':
return u'decimal(38,6)'
elif sfdc_type == u'int':
return u'numeric(10)'
elif sfdc_type == u'picklist':
return u'varchar({})'.format(length)
elif sfdc_type == u'id':
return u'varchar({})'.format(length)
elif sfdc_type == u'reference':
return u'varchar({})'.format(length)
elif sfdc_type == u'textarea':
if length >= (65535/length*__MULTIPLIER):
return u'varchar({})'.format(65534)
else:
return u'varchar({})'.format( math.ceil(length*__MULTIPLIER))
elif sfdc_type == u'email':
return u'varchar({})'.format(length)
elif sfdc_type == u'phone':
return u'varchar({})'.format(length)
elif sfdc_type == u'url':
return u'varchar({})'.format(length)
elif sfdc_type == u'multipicklist':
return u'varchar({})'.format(length)
elif sfdc_type == u'anyType':
if length >= 65535:
return u'varchar({})'.format(65534)
else:
return u'varchar({})'.format(math.ceil(length*__MULTIPLIER))
elif sfdc_type == u'percent':
return u'numeric(38,6)'
elif sfdc_type == u'combobox':
return u'varchar({})'.format(length)
elif sfdc_type == u'base64':
return u'varchar({})'.format(length)
elif sfdc_type == u'time':
return u'varchar(255)'
elif sfdc_type == u'string':
if length >= 65535:
return u'varchar({})'.format(65534)
else:
return u'varchar({})'.format(math.ceil(length*__MULTIPLIER))
else:
return u'varchar(65535)'
####################################
# Turn SFDC metadata into SQL #
####################################
def get_ddl(sf, nextObj, igersSchema, col_remove=None):
md = sf.restful("sobjects/{}/describe/".format(nextObj), params=None)
target_table=nextObj
total_field_count = 0
global ddl_str
ddl_str = ''
ddl_str += 'CREATE TABLE '+ igersSchema+"."+target_table +' ('
for x in md["fields"]:
#print x["name"]
if col_remove:
if x["name"].lower() in [element.lower() for element in col_remove]:
print("Skipping: {}".format(x["name"]))
continue
ddl_str += x["name"] + ' ' + map_data_type(x["type"],x["length"])
if x["name"] == 'Id':
ddl_str += ' NOT NULL DISTKEY'
ddl_str += ","
total_field_count = total_field_count + 1
ddl_str = ddl_str[:-1]
ddl_str += ')'
logging.info('DDL Successfully created...')
# print("Total Field Count: "+str(total_field_count))
return ddl_str
#########################
# Create Table from DDL, execute the copy query and update permissions #
#########################
def rs_operations(ddl_str, groupPerms, igersSchema, nextObj, s3Destination, s3Path, igers) :
today = datetime.date.today()
dated_path = today.strftime('%Y/%m/%d')
perms_statement = ''
drop_table = '''
DROP TABLE IF EXISTS {}.{} CASCADE
'''.format(igersSchema, nextObj)
loadQuery = '''
COPY {}.{}
FROM '{}{}/{}/'
iam_role 'arn:aws:iam::087024238921:role/LambdaFullAccessRole'
TRUNCATECOLUMNS
FORMAT AS JSON 'auto'
'''.format(igersSchema, nextObj, s3Destination, s3Path.format(nextObj), dated_path)
grantPerms = '''
GRANT SELECT ON {}.{} TO GROUP {}
'''
with igers.connect() as conn:
try :
conn.execute(drop_table)
print('completed drop table')
conn.execute(ddl_str)
print('completed create table')
conn.execute(loadQuery)
print('completed load query')
for row in range(len(groupPerms)) :
perms_statement = grantPerms.format(groupPerms['namespace'].iloc[row],groupPerms['item'].iloc[row],groupPerms['groname'].iloc[row])
conn.execute(perms_statement)
print('completed grant group permissions')
conn.close()
except exc.SQLAlchemyError as e :
print(e)
######################################
# Update Next Object and Write to S3 #
######################################
def updateNextObj(nextObj, s3NextObjDestination) :
objectsList = []
objectsFile = s3.get_s3_file('s3://test-sfdc-sds-team/sfdc-etl-jp-test/sfdc_etl/objects/sfdc_etl_objects.txt',os.path.abspath(os.path.join(TMP_PATH,'sfdc_etl_objects.txt')))
localNobjTempFile = os.path.abspath(os.path.join(TMP_PATH,'next_object.txt'))
nextObjText = ''
with open (objectsFile, 'r') as objs :
for line in objs :
objectsList.append(line.strip("\n"))
for i in range(len(objectsList)-1) :
if objectsList[i].lower() == nextObj :
nextObjText = objectsList[i+1]
print(nextObjText)
with open (localNobjTempFile, 'w') as f :
f.write(nextObjText)
s3.put_s3_file(localNobjTempFile,s3NextObjDestination.format('next_object.txt'))
print('completed Updating the next object')
################################################
# Test if the object exists and execute #
################################################
try :
getattr(sf,nextObj).describe()
except (SalesforceGeneralError,
SalesforceMoreThanOneRecord,
SalesforceMalformedRequest,
SalesforceExpiredSession,
SalesforceRefusedRequest,
SalesforceResourceNotFound) as e :
print(e.content[0]['message'] +', writing next object and ending')
updateNextObj(nextObj, s3NextObjDestination)
sys.exit(1)
rsGetGroupPerms(igers, nextObj)
sfToS3(fileCount, sf, nextObj)
get_ddl(sf, nextObj, igersSchema, col_remove=None)
rs_operations(ddl_str, groupPerms, igersSchema, nextObj, s3Destination, s3Path, igers)
updateNextObj(nextObj, s3NextObjDestination)
The problem is that the following line
sfdc_password = boto3.client('kms').decrypt(CiphertextBlob=b64decode(os.environ["L_PASSWORD"]))['Plaintext']
is returning a bytes object, and in the other hand, the Salesforce class expects a string.
Solution:
try :
sf = Salesforce(username=sfdc_username,
password=sfdc_password.decode("utf-8"),
security_token=sfdc_security_token,
instance_url=sfdc_salesforce_instance_url,
domain=sfdc_sandbox)
print('salesforce login good')
Note that the sfdc_password variable is being converted to string using the .decode bytes method

Why is my code looping at just one line in the while loop instead over the whole block?

Sorry for the unsophisticated question title but I need help desperately:
My objective at work is to create a script that pulls all the records from exacttarget salesforce marketing cloud API. I have successfully setup the API calls, and successfully imported the data into DataFrames.
The problem I am running into is two-fold that I need to keep pulling records till "Results_Message" in my code stops reading "MoreDataAvailable" and I need to setup logic which allows me to control the date from either within the API call or from parsing the DataFrame.
My code is getting stuck at line 44 where "print Results_Message" is looping around the string "MoreDataAvailable"
Here is my code so far, on lines 94 and 95 you will see my attempt at parsing the date directly from the dataframe but no luck and no luck on line 32 where I have specified the date:
import ET_Client
import pandas as pd
AggreateDF = pd.DataFrame()
Data_Aggregator = pd.DataFrame()
#Start_Date = "2016-02-20"
#End_Date = "2016-02-25"
#retrieveDate = '2016-07-25T13:00:00.000'
Export_Dir = 'C:/temp/'
try:
debug = False
stubObj = ET_Client.ET_Client(False, debug)
print '>>>BounceEvents'
getBounceEvent = ET_Client.ET_BounceEvent()
getBounceEvent.auth_stub = stubObj
getBounceEvent.search_filter = {'Property' : 'EventDate','SimpleOperator' : 'greaterThan','Value' : '2016-02-22T13:00:00.000'}
getResponse1 = getBounceEvent.get()
ResponseResultsBounces = getResponse1.results
Results_Message = getResponse1.message
print(Results_Message)
#EventDate = "2016-05-09"
print "This is orginial " + str(Results_Message)
#print ResponseResultsBounces
i = 1
while (Results_Message == 'MoreDataAvailable'):
#if i > 5: break
print Results_Message
results1 = getResponse1.results
#print(results1)
i = i + 1
ClientIDBounces = []
partner_keys1 = []
created_dates1 = []
modified_date1 = []
ID1 = []
ObjectID1 = []
SendID1 = []
SubscriberKey1 = []
EventDate1 = []
EventType1 = []
TriggeredSendDefinitionObjectID1 = []
BatchID1 = []
SMTPCode = []
BounceCategory = []
SMTPReason = []
BounceType = []
for BounceEvent in ResponseResultsBounces:
ClientIDBounces.append(str(BounceEvent['Client']['ID']))
partner_keys1.append(BounceEvent['PartnerKey'])
created_dates1.append(BounceEvent['CreatedDate'])
modified_date1.append(BounceEvent['ModifiedDate'])
ID1.append(BounceEvent['ID'])
ObjectID1.append(BounceEvent['ObjectID'])
SendID1.append(BounceEvent['SendID'])
SubscriberKey1.append(BounceEvent['SubscriberKey'])
EventDate1.append(BounceEvent['EventDate'])
EventType1.append(BounceEvent['EventType'])
TriggeredSendDefinitionObjectID1.append(BounceEvent['TriggeredSendDefinitionObjectID'])
BatchID1.append(BounceEvent['BatchID'])
SMTPCode.append(BounceEvent['SMTPCode'])
BounceCategory.append(BounceEvent['BounceCategory'])
SMTPReason.append(BounceEvent['SMTPReason'])
BounceType.append(BounceEvent['BounceType'])
df1 = pd.DataFrame({'ClientID': ClientIDBounces, 'PartnerKey': partner_keys1,
'CreatedDate' : created_dates1, 'ModifiedDate': modified_date1,
'ID':ID1, 'ObjectID': ObjectID1,'SendID':SendID1,'SubscriberKey':SubscriberKey1,
'EventDate':EventDate1,'EventType':EventType1,'TriggeredSendDefinitionObjectID':TriggeredSendDefinitionObjectID1,
'BatchID':BatchID1,'SMTPCode':SMTPCode,'BounceCategory':BounceCategory,'SMTPReason':SMTPReason,'BounceType':BounceType})
#print df1
#df1 = df1[(df1.EventDate > "2016-02-20") & (df1.EventDate < "2016-02-25")]
#AggreateDF = AggreateDF[(AggreateDF.EventDate > Start_Date) and (AggreateDF.EventDate < End_Date)]
print(df1['ID'].max())
AggreateDF = AggreateDF.append(df1)
print(AggreateDF.shape)
#df1 = df1[(df1.EventDate > "2016-02-20") and (df1.EventDate < "2016-03-25")]
#AggreateDF = AggreateDF[(AggreateDF.EventDate > Start_Date) and (AggreateDF.EventDate < End_Date)]
print("Final Aggregate DF is: " + str(AggreateDF.shape))
#EXPORT TO CSV
AggreateDF.to_csv(Export_Dir +'DataTest1.csv')
#with pd.option_context('display.max_rows',10000):
#print (df_masked1.shape)
#print df_masked1
except Exception as e:
print 'Caught exception: ' + str(e.message)
print e
Before my code parses the data, the orginal format I get of the data is a SOAP response, this is what it look like(below). Is it possible to directly parse records based on EventDate from the SOAP response?
}, (BounceEvent){
Client =
(ClientID){
ID = 1111111
}
PartnerKey = None
CreatedDate = 2016-05-12 07:32:20.000937
ModifiedDate = 2016-05-12 07:32:20.000937
ID = 1111111
ObjectID = "1111111"
SendID = 1111111
SubscriberKey = "aaa#aaaa.com"
EventDate = 2016-05-12 07:32:20.000937
EventType = "HardBounce"
TriggeredSendDefinitionObjectID = "aa111aaa"
BatchID = 1111111
SMTPCode = "1111111"
BounceCategory = "Hard bounce - User Unknown"
SMTPReason = "aaaa"
BounceType = "immediate"
Hope this makes sense, this is my desperately plea for help.
Thank you in advance!
You don't seem to be updating Results_Message in your loop, so it's always going to have the value it gets in line 29: Results_Message = getResponse1.message. Unless there's code involved that you didn't share, that is.

Categories

Resources