i have a python script which makes a call to Jira API for creating a Jira issue.
The API Call istself works fine, if i test it with Postman i am able to create tickets however the same URL does not work using Python.I do not not understand why i am not able to create a secure connection to jira
Executed [createIssue] action via OEC[integrationId: f457bfd9-5fe0-4fc5-89a9-ee007e85cf1b integrationType: Jira] with errors. Reason: Err: exit status 1, Stderr: Traceback (most recent call last): File
"/home/opsgenie/oec_test/scripts/actionExecutor.py", line 279, in <module> main() File "/home/opsgenie/oec_test/scripts/actionExecutor.py", line 233, in main timeout=timeout) File
"/usr/lib/python2.7/dist-packages/requests/api.py", line 112, in post return request('post', url, data=data, json=json, **kwargs) File "/usr/lib/python2.7/dist-packages/requests/api.py", line 58, in request return session.request(method=method, url=url, **kwargs) File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 520, in
request resp = self.send(prep, **send_kwargs) File "/usr/lib/python2.7/dist-packages/requests/sessions.py", line 630, in send r = adapter.send(request, **kwargs) File
"/usr/lib/python2.7/dist-packages/requests/adapters.py", line 508, in send raise
ConnectionError(e, request=request) requests.exceptions.ConnectionError: HTTPSConnectionPool(host='jiratest.gk.gk-software.com', port=443): Max retries exceeded with url: /rest/api/2/issue
(Caused by NewConnectionError('<urllib3.connection.VerifiedHTTPSConnection object at 0x7ffa264aa1d0>: Failed to establish a new connection: [Errno -2] Name or service not known',))
The complete code looks like this:
import argparse
import json
import logging
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import re
import requests
from requests.auth import HTTPBasicAuth
parser = argparse.ArgumentParser()
parser.add_argument('-payload', '--queuePayload', help='Payload from queue', required=True)
parser.add_argument('-apiKey', '--apiKey', help='The apiKey of the integration', required=True)
parser.add_argument('-opsgenieUrl', '--opsgenieUrl', help='The url', required=True)
parser.add_argument('-logLevel', '--logLevel', help='Level of log', required=True)
parser.add_argument('-username', '--username', help='Username', required=False)
parser.add_argument('-password', '--password', help='Password', required=False)
parser.add_argument('-url', '--url', help='URL', required=False)
parser.add_argument('-projectKey', '--projectKey', help='Project Key', required=False)
parser.add_argument('-issueTypeName', '--issueTypeName', help='Issue Type', required=False)
args = vars(parser.parse_args())
logging.basicConfig(stream=sys.stdout, level=args['logLevel'])
def parse_field(key, mandatory):
variable = queue_message.get(key)
if not variable:
variable = args.get(key)
if mandatory and not variable:
logging.error(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key +
"' is missing. Check your configuration file.")
raise ValueError(LOG_PREFIX + " Skipping action, Mandatory conf item '" + key +
"' is missing. Check your configuration file.")
return variable
def parse_timeout():
parsed_timeout = args.get('http.timeout')
if not parsed_timeout:
return 30000
return int(parsed_timeout)
def get_alert_details(alertId):
alert_api_url = args['opsgenieUrl'] + "/v2/alerts/" + alertId
headers = {
"Content-Type": "application/json",
"Accept-Language": "application/json",
"Authorization": "GenieKey " + args['apiKey']
}
req = requests.get(alert_api_url, headers=headers)
alert = req.json()
return alert["data"]
def get_transition_id(request_headers, jira_url, transition_name, token):
transition_id = str()
response = requests.get(jira_url, None, headers=request_headers, auth=token, timeout=timeout)
try:
body = response.json()
if body and response.status_code < 299:
transition_list = body["transitions"]
for transition in transition_list:
to = transition['to']
if transition_name == to['name']:
transition_id = transition['id']
logging.info(LOG_PREFIX + " Successfully executed at Jira")
logging.debug(LOG_PREFIX + " Jira response: " + str(response.status_code) + " " + str(response.content))
else:
logging.error(
LOG_PREFIX + " Could not execute at Jira; response: " + str(response.content) + " status code: " + str(
response.status_code))
if not transition_id:
logging.debug(LOG_PREFIX + " Transition id is empty")
return transition_id
except ValueError:
logging.error("The response body is not a valid json object!")
def get_comp():
jira_comp = re.search(r"topic:\s\'(.*)[']", str(queue_message.get("description")))
if jira_comp:
jira_comp = jira_comp.group(1)
return (jira_comp)
else:
jira_comp = "Dummy"
return (jira_comp)
def get_prio():
severity = re.search(r"severity:\s\'(.*)[']",queue_message.get("description"))
if severity:
jira_prio = severity.group(1)
if jira_prio == "critical":
jira_prio = "Very High"
return (jira_prio)
if jira_prio == "Very High":
jira_prio = "High"
return (jira_prio)
else:
severity = "High"
return (severity)
def get_context():
context = re.search(r"context:\s\'(.*)[']", str(queue_message.get("description")))
if context:
context = context.group(1)
return (context)
else:
context = ""
return (context)
def main():
global LOG_PREFIX
global queue_message
global timeout
global to_customfield_20500; to_customfield_20500=[]
global project_to_customfield_20500
global cluster_to_customfield_20500
queue_message_string = args['queuePayload']
queue_message_string = queue_message_string.strip()
queue_message = json.loads(queue_message_string)
alert_id = queue_message["alertId"]
mapped_action = queue_message["mappedActionV2"]["name"]
alert_details = get_alert_details(alert_id)
LOG_PREFIX = "[" + mapped_action + "]"
logging.info("Will execute " + mapped_action + " for alertId " + alert_id)
timeout = parse_timeout()
url = parse_field('url', True)
username = parse_field('username', True)
password = parse_field('password', True)
project_key = parse_field('projectKey', False)
issue_type_name = parse_field('issueTypeName', False)
issue_key = queue_message.get("key")
logging.debug("Url: " + str(url))
logging.debug("Username: " + str(username))
logging.debug("Project Key: " + str(project_key))
logging.debug("Issue Type: " + str(issue_type_name))
logging.debug("Issue Key: " + str(issue_key))
content_params = dict()
token = HTTPBasicAuth(username, password)
headers = {
"Content-Type": "application/json",
"Accept-Language": "application/json",
}
result_url = url + "/rest/api/2/issue"
if mapped_action == "addCommentToIssue":
content_params = {
"body": queue_message.get('body')
}
result_url += "/" + issue_key + "/comment"
elif mapped_action == "createIssue":
getcontext = get_context()
getcomp = get_comp()
priority = get_prio()
content_params = {
"fields": {
"project": {"key": project_key},
"issuetype": {"name": issue_type_name},
"summary": queue_message.get("summary"),
"description": queue_message.get("description"),
"customfield_20500": [{"value": "DE - Germany"}],
"customfield_13604": "tbd",
"components": [{"name": getcomp}],
"versions": [{"name": "tbd"}],
"customfield_15000": [getcontext],
"priority": {"name": priority},
"assignee": {"name": "#cloudoperations"}
}
}
elif mapped_action == "resolveIssue":
result_url += "/" + issue_key + "/transitions"
content_params = {
"transition": {
"id": get_transition_id(headers, result_url, "Resolved", token)
},
"fields": {
"resolution": {
"name": "Done"
}
}
}
elif mapped_action == "closeIssue":
result_url += "/" + issue_key + "/transitions"
content_params = {
"transition": {
"id": get_transition_id(headers, result_url, "Closed", token)
},
"fields": {
"resolution": {
"name": "Done"
}
}
}
elif mapped_action == "issueDone":
result_url += "/" + issue_key + "/transitions"
content_params = {
"transition": {
"id": get_transition_id(headers, result_url, "Done", token)
}
}
elif mapped_action == "inProgressIssue":
result_url += "/" + issue_key + "/transitions"
content_params = {
"transition": {
"id": get_transition_id(headers, result_url, "In Progress", token)
}
}
logging.debug(str(content_params))
response = requests.post(result_url, data=json.dumps(content_params), headers=headers, auth=token,
timeout=timeout)
if response.status_code < 299:
logging.info("Successfully executed at Jira")
if mapped_action == "createIssue":
try:
response_body = response.json()
if response_body:
issue_key_from_response = response_body['key']
if issue_key_from_response:
alert_api_url = args['opsgenieUrl'] + "/v2/alerts/" + alert_id + "/details"
content = {
"details":
{
"issueKey": issue_key_from_response
}
}
headers = {
"Content-Type": "application/json",
"Accept-Language": "application/json",
"Authorization": "GenieKey " + args['apiKey']
}
logging.debug(str(alert_api_url) + str(content) + str(headers))
alert_response = requests.post(alert_api_url,
data=json.dumps(content), headers=headers,
timeout=timeout)
if alert_response.status_code < 299:
logging.info(LOG_PREFIX + " Successfully sent to Opsgenie")
logging.debug(
LOG_PREFIX + " Jira response: " + str(alert_response.content) + " " + str(
alert_response.status_code))
else:
logging.warning(
LOG_PREFIX + " Could not execute at Opsgenie; response: " + str(
alert_response.content) + " status code: " + str(alert_response.status_code))
else:
logging.warning(
LOG_PREFIX + " Jira response is empty")
except ValueError:
logging.error(ValueError)
else:
logging.warning(
LOG_PREFIX + " Could not execute at Jira; response: " + str(response.content) + " status code: " + str(
response.status_code))
if __name__ == '__main__':
main()
I'm not sure either but I suggest using the Jira Python library, which in turn uses the requests library
i found out it was an connection issue as from the VM where py script is executed,the jira URL was not reachable
Related
This is a code to send invoice via , but I cannot enter a " for loop " loop on it to put product name and price and quantity of it , so how to deal with this to put products and other data , I tried to add for loop but it didn't work ,
###########Send Payment###########
baseURL = "https://apitest.myfatoorah.com"
token = 'rLtt6JWvbUHDDhsZnfpAhpYk4dxYDQkbcPTyGaKp2TYqQgG7FGZ5Th_WD53Oq8Ebz6A53njUoo1w3pjU1D4vs_ZMqFiz_j0urb_BH9Oq9VZoKFoJEDAbRZepGcQanImyYrry7Kt6MnMdgfG5jn4HngWoRdKduNNyP4kzcp3mRv7x00ahkm9LAK7ZRieg7k1PDAnBIOG3EyVSJ5kK4WLMvYr7sCwHbHcu4A5WwelxYK0GMJy37bNAarSJDFQsJ2ZvJjvMDmfWwDVFEVe_5tOomfVNt6bOg9mexbGjMrnHBnKnZR1vQbBtQieDlQepzTZMuQrSuKn-t5XZM7V6fCW7oP-uXGX-sMOajeX65JOf6XVpk29DP6ro8WTAflCDANC193yof8-f5_EYY-3hXhJj7RBXmizDpneEQDSaSz5sFk0sV5qPcARJ9zGG73vuGFyenjPPmtDtXtpx35A-BVcOSBYVIWe9kndG3nclfefjKEuZ3m4jL9Gg1h2JBvmXSMYiZtp9MR5I6pvbvylU_PP5xJFSjVTIz7IQSjcVGO41npnwIxRXNRxFOdIUHn0tjQ-7LwvEcTXyPsHXcMD8WtgBh-wxR8aKX7WPSsT1O8d8reb2aR7K3rkV3K82K_0OgawImEpwSvp9MNKynEAJQS6ZHe_J_l77652xwPNxMRTMASk1ZsJL'
def send_payment():
url = baseURL + "/v2/SendPayment"
payload = "{\"CustomerName\": \"Ahmed\",\"NotificationOption\": \"ALL\",\"MobileCountryCode\": \"+965\"," \
"\"CustomerMobile\": \"12345678\",\"CustomerEmail\": \"xx#yy.com\",\"InvoiceValue\": 100," \
"\"DisplayCurrencyIso\": \"KWD\",\"CallBackUrl\": \"https://google.com\",\"ErrorUrl\": " \
"\"https://google.com\",\"Language\": \"en\",\"CustomerReference\": \"ref 1\",\"CustomerCivilId\": " \
"12345678,\"UserDefinedField\": \"Custom field\",\"ExpireDate\": \"\",\"CustomerAddress\": {\"Block\": " \
"\"\",\"Street\": \"\",\"HouseBuildingNo\": \"\",\"Address\": \"\",\"AddressInstructions\": \"\"}," \
"\"InvoiceItems\": [{\"ItemName\": \"Product 01\",\"Quantity\": 1,\"UnitPrice\": 100}]} "
headers = {'Content-Type': "application/json", 'Authorization': "Bearer " + token}
response = requests.request("POST", url, data=payload, headers=headers)
print("Send Payment Response:\n" + response.text)
I tried this and made for loop but it didn't work
url = baseURL + "/v2/SendPayment"
sss={'ItemName': 'product 01',
'Quantity': 30,
'UnitPrice': 10,},
payload={
"CustomerName": "name", # Mandatory Field ("string")
"NotificationOption": "SMS", # Mandatory Field ("LNK", "SMS", "EML", or "ALL")
"InvoiceValue": 300, # Mandatory Field (Number)
# Optional Fields
"MobileCountryCode": "+966",
"CustomerMobile": "12345678", #Mandatory if the NotificationOption = SMS or ALL
# "CustomerEmail": "mail#company.com", #Mandatory if the NotificationOption = EML or ALL
"DisplayCurrencyIso": "kwd",
"CallBackUrl": "https://yoursite.com/success",
"ErrorUrl": "https://yoursite.com/error",
"Language": "ar",
# "CustomerReference": "noshipping-nosupplier",
# "CustomerAddress": {
# "Block": "string",
# "Street": "string",
# "HouseBuildingNo": "string",
# "Address": "address",
# "AddressInstructions": "string"
# },
"InvoiceItems": [
sss
]
}
payload=str(payload)
print(f"this is pyload: {payload}")
print(f"this is sss: {sss}")
headers = {'Content-Type': "application/json", 'Authorization': "Bearer " + token}
response = requests.request("POST", url, data=payload2, headers=headers)
print("Send Payment Response:\n" + response.text)
The data parameter in requests.request should be a dict, in other word, don't convert payload into string, make it dict.
I am trying to validate facebook's webhook payload using the instruction they have given in their developer docs. The signature I am generating (expectedHash) is not matching the signature that I am receiving from Facebook (signatureHash). I think I am following what they are saying but I am doing something wrong which I cannot pinpoint yet.
Validating Payloads
We sign all Event Notification payloads with a SHA256 signature and
include the signature in the request's X-Hub-Signature-256 header,
preceded with sha256=. You don't have to validate the payload, but you
should.
To validate the payload:
Generate a SHA256 signature using the payload and your app's App Secret.
Compare your signature to the signature in the X-Hub-Signature-256 header (everything after sha256=).
If the signatures match, the payload is genuine.
Please note that we generate the signature using an escaped unicode
version of the payload, with lowercase hex digits. If you just
calculate against the decoded bytes, you will end up with a different
signature. For example, the string äöå should be escaped to
\u00e4\u00f6\u00e5.
Below is my code in lambda
def lambda_handler(event, context):
response = {
"status": 500,
"body" : "failed"
}
print("event is")
print(event)
signature = event["headers"]["X-Hub-Signature-256"]
if(not signature):
return(f"couldn't find {signature} in headers")
else:
elements = signature.split("=")
print("elements is")
print(elements)
signatureHash = elements[1]
print("signature hash is " + str(signatureHash))
app_secret = os.environ.get('APP_SECRET')
print("app_secret is " + str(app_secret))
expectedHash = hmac.new(bytes(app_secret,'utf-8') ,digestmod=hashlib.sha256).hexdigest()
print("expected hash is " + expectedHash)
if(signatureHash != expectedHash):
return response
else:
response["status"] = 200
response["body"] = expectedHash
return response
response I am getting is:
{ "status": 500, "body": "failed" }
expected response:
{ "status": 200, "body": value of expectedHash }
Could you please help me with this?
Edit 1:
Figured out how to do it.
Apparently I was using a wrong content mapping in AWS API Gateway. I needed to use the $input.body to get the raw payload data in the event argument of AWS lambda handler function. My content mapping looks like this:
#set($allParams = $input.params())
{
"method": "$context.httpMethod",
"params" : {
#foreach($type in $allParams.keySet())
#set($params = $allParams.get($type))
"$type" : {
#foreach($paramName in $params.keySet())
"$paramName" : "$util.escapeJavaScript($params.get($paramName))"
#if($foreach.hasNext),#end
#end
}
#if($foreach.hasNext),#end
#end
},
"body" : $input.body
}
Below is my lambda handler function for validating payload:
def lambda_handler(event, context):
response = {
"status": 500,
"body" : "failed"
}
print("event is")
print(event)
signature = event["params"]["header"]["X-Hub-Signature-256"]
if(not signature):
return(f"couldn't find {signature} in headers")
else:
try:
elements = signature.split("=")
print("elements is")
print(elements)
signatureHash = elements[1]
#print("signature hash is " + str(signatureHash))
app_secret = os.environ.get('APP_SECRET')
key = bytes(app_secret, 'UTF-8')
payload = event['body']
json_string = json.dumps(payload)
print("payload json_string is " + json_string)
expectedHash = hmac.new(key, msg=json_string.encode(), digestmod=hashlib.sha256).hexdigest()
print("expected hash is " + expectedHash)
if(signatureHash != expectedHash):
print(response)
return response
else:
response["status"] = 200
response["body"] = expectedHash
print(response)
return response
except Exception as e:
return e
As of 12/14/2022, the above function works for all webhook fields except messages (which is the one I really need). Trying to figure it out.
This is your code but using Lambda Proxy Integration, so event keys are a bit different, event["body"], is a raw string, then you can parse it to get the elements you need from it, i think that is easier than all the mapping stuff without the lambda proxy:
import os
import json
import hmac
import hashlib
def lambda_handler(event, context):
response = {
'statusCode': '200',
'body' : "OK"
}
print("event is")
print(event)
signature = event["headers"]["X-Hub-Signature-256"]
if(not signature):
response["body"] = (f"couldn't find {signature} in headers")
return response
else:
try:
elements = signature.split("=")
print("elements is")
print(elements)
signatureHash = elements[1]
#print("signature hash is " + str(signatureHash))
app_secret = os.environ.get('APP_SECRET')
key = bytes(app_secret, 'UTF-8')
payload = event['body']
#json_string = json.dumps(payload)
#print("payload json_string is " + json_string)
expectedHash = hmac.new(key, msg=bytes(payload,'UTF-8'), digestmod=hashlib.sha256).hexdigest()
print("expected hash is " + expectedHash)
if(signatureHash != expectedHash):
response["body"] = "eh " + expectedHash + " sh " + signatureHash
print(response)
return response
else:
response["statusCode"] = 200
response["body"] = "Check ok"
print(response)
return response
except Exception as err:
response["body"] = f"Unexpected {err=}, {type(err)=}"
return response
I am working with an API that doesn't have all the information I need in a single call, and I need to the project code it came from into the call that I am making. Right now it appends the project data to the list, but I really need it to be part of the original call. Here is my output now:
[{"committer_email": "justin.m.boucher#example.com", "short_id": "981147b9", "title": "Added .gitignore", "author_email": "justin.m.boucher#example.com", "authored_date": "2017-08-29T08:31:11.000-07:00", "created_at": "2017-08-29T08:31:11.000-07:00", "author_name": "Justin Boucher", "parent_ids": [], "committed_date": "2017-08-29T08:31:11.000-07:00", "message": "Added .gitignore\n", "committer_name": "Justin Boucher", "id": "981147b905913a60796283ce10f915c53679df49"}, {"project_id": "2"}]
Here is the output I want to achieve:
[{"project_id": "2", "committer_email": "justin.m.boucher#example.com", "short_id": "981147b9", "title": "Added .gitignore", "author_email": "justin.m.boucher#example.com", "authored_date": "2017-08-29T08:31:11.000-07:00", "created_at": "2017-08-29T08:31:11.000-07:00", "author_name": "Justin Boucher", "parent_ids": [], "committed_date": "2017-08-29T08:31:11.000-07:00", "message": "Added .gitignore\n", "committer_name": "Justin Boucher", "id": "981147b905913a60796283ce10f915c53679df49"}]
Here is my code so far:
get_commits.py:
import gitlab
import json
gitlab = gitlab.Gitlab()
projects = gitlab.getProjectID()
for i in projects:
api_str = '/projects/' + str(i) + '/repository/commits'
connect = gitlab.connectAPI(apiCall=api_str)
data = json.dumps(connect)
# Append project id to json, since it isn't created
# in the commits from Gitlab
commit = json.loads(data)
commit.append({'project_id': str(i)})
# make it pretty again for Splunk to read
commit = json.dumps(commit)
print commit
gitlab.py
import os
import ConfigParser
import requests
import json
# Setup Splunk Environment
APPNAME = 'splunk_gitlab'
CONFIG = 'appconfig.conf'
SPLUNK_HOME = os.environ['SPLUNK_HOME']
parser = ConfigParser.SafeConfigParser()
class Gitlab():
# # Load Settings
# parser.read(SPLUNK_HOME + '/etc/apps/' + APPNAME + '/local/' + CONFIG)
# if parser.has_section('Authentication'):
# pass
# else:
# parser.read(SPLUNK_HOME + '/etc/apps/' + APPNAME + '/default/' + CONFIG)
#
# GITLAB_URL = parser.get('Authentication', 'GITLAB_URL')
# API_KEY = parser.get('Authentication', 'API_KEY')
# Used for testing only
GITLAB_URL = 'http://<my_address>'
API_KEY = '<my_key>'
API_SERVER = GITLAB_URL + '/api/v4'
# Place api call to retrieve data
def connectAPI(self, apiCall='/projects'):
headers = {
'PRIVATE-TOKEN': self.API_KEY
}
final_url = self.API_SERVER + apiCall
resp = requests.get(final_url, headers=headers)
status_code = resp.status_code
resp = resp.json()
if status_code == 200:
return resp
else:
raise Exception("Something went wrong requesting (%s): %s" % (
resp['errors'][0]['errorType'], resp['errors'][0]['message']))
def getProjectID(self):
connect = self.connectAPI(apiCall='/projects')
data = json.dumps(connect)
projects = json.loads(data)
project_list = []
for i in projects:
project_list.append(i['id'])
return project_list
If you want to add a new element to the first dictionary in the list instead of appending a new dictionary to the list, try using assignment instead of append.
commit[0]['project_id'] = str(i)
I have the following streamed response:
def reportgen_iterator(request, object_id):
output_format = request.GET.get('output', 'pdf')
response_data = {
'progress': 'Retrieving data...',
'error': False,
'err_msg': None
}
yield json.dumps(response_data)
try:
vendor_id, dr_datasets = get_dr_datasets(
object_id=object_id, ws_user=settings.WS_USER,
include_vendor_id=True, request=request
)
except Exception as e:
response_data.update({
'error': True,
'err_msg': "Unable to retrieve data for report generation. Exception message: {}".format(e.message)
})
yield "{}{}".format(DELIMITER, json.dumps(response_data))
time.sleep(BEFORE_STOP_ITERATION_SLEEP_SECS)
raise StopIteration
# data retrieved correctly, continue
response_data['progress'] = 'Data retrieved.'
yield "{}{}".format(DELIMITER, json.dumps(response_data))
domain = settings.DR['API_DOMAIN']
dr_id, delivery_opts = get_dr_ids(vendor_id=vendor_id)
delivery_option_id = delivery_opts.get(output_format)
run_idle_time = REST_RUN_IDLE_TIME_MS / 1000 or 1
headers = settings.DR['AUTHORIZATION_HEADER']
headers.update({
'Content-Type': 'application/json', 'deliveryOptionId': delivery_option_id
})
# POST request
response_data['progress'] ='Generating document...'
yield "{}{}".format(DELIMITER, json.dumps(response_data))
post_url = 'https://{domain}{rel_url}/'.format(
domain=domain,
rel_url=settings.DR['API_ENDPOINTS']['create'](ddp_id)
)
header_img, footer_img = get_images_for_template(vendor_id=vendor_id, request=None)
images = {
'HeaderImg': header_img,
'FooterImg': footer_img
}
data = OrderedDict(
[('deliveryOptionId', delivery_option_id),
('clientId', 'MyClient'),
('data', dr_datasets),
('images', images)]
)
payload = json.dumps(data, indent=4).encode(ENCODING)
req = requests.Request('POST', url=post_url, headers=headers, data=payload)
prepared_request = req.prepare()
session = requests.Session()
post_response = session.send(prepared_request)
if post_response.status_code != 200:
response_data.update({
'error': True,
'err_msg': "Error: post response status code != 200, exit."
})
yield "{}{}".format(DELIMITER, json.dumps(response_data))
time.sleep(BEFORE_STOP_ITERATION_SLEEP_SECS)
raise StopIteration
# Post response successful, continue.
# RUN URL - periodic check
post_response_dict = post_response.json()
run_url = 'https://{domain}/{path}'.format(
domain=domain,
path=post_response_dict.get('links', {}).get('self', {}).get('href'),
headers=headers
)
run_id = post_response_dict.get('runId', '')
status = 'Running'
attempt_counter = 0
file_url = '{url}/files/'.format(url=run_url)
while status == 'Running':
attempt_counter += 1
run_response = requests.get(url=run_url, headers=headers)
runs_data = run_response.json()
status = runs_data['status']
message = runs_data['message']
progress = runs_data['progress']
response_data['progress'] = '{} - {}%'.format(status, int(progress * 100))
yield "{}{}".format(DELIMITER, json.dumps(response_data))
if status == 'Error':
msg = '{sc} - run_id: {run_id} - error_id: [{error_id}]: {message}'.format(
sc=run_response.status_code, run_id=run_id,
error_id=runs_data.get('errorId', 'N/A'), message=message
)
response_data.update({
'error': True,
'err_msg': msg
})
yield "{}{}".format(DELIMITER, json.dumps(response_data))
time.sleep(BEFORE_STOP_ITERATION_SLEEP_SECS)
raise StopIteration
if status == 'Complete':
break
if attempt_counter >= ATTEMPTS_LIMIT:
msg = 'File failed to generate after {att_limit} retrieve attempts: ' \
'({progress}% progress) - {message}'.format(
att_limit=ATTEMPTS_LIMIT,
progress=int(progress * 100),
message=message
)
response_data.update({
'error': True,
'err_msg': msg
})
yield "{}{}".format(DELIMITER, json.dumps(response_data))
time.sleep(BEFORE_STOP_ITERATION_SLEEP_SECS)
raise StopIteration
time.sleep(run_idle_time)
# GET GENERATED FILE
file_url_response = requests.get(
url=file_url,
headers=headers,
params={'userId': settings.DR_CREDS['userId']},
stream=True,
)
if file_url_response.status_code != 200:
response_data.update({
'error': True,
'err_msg': 'error in retrieving file\nurl: {url}\n'.format(url=file_url)
})
yield "{}{}".format(DELIMITER, json.dumps(response_data))
time.sleep(BEFORE_STOP_ITERATION_SLEEP_SECS)
raise StopIteration
file_url_dict = file_url_response.json()
retrieve_file_rel_url = file_url_dict['files'][0]['links']['file']['href']
file_ext = DELIVERY_MAPPING.get(output_format, 'pdf')
response_data.update({
'progress': 'Generated.',
'doc_url': retrieve_file_rel_url,
'dest_file_ext': file_ext
})
yield "{}{}".format(DELIMITER, json.dumps(response_data))
class FullDownloadRosterStreamingView(View):
def get(self, request, object_id):
"""
"""
stream = reportgen_iterator(request, object_id)
try:
response = StreamingHttpResponse(
streaming_content=stream, status=200,
content_type='application/octet-stream'
)
response['Cache-Control'] = 'no-cache'
return response
except Exception as e:
return HttpResponseServerError(e.message)
def get_file(request):
domain = settings.DR['API_DOMAIN']
retrieve_file_rel_url = request.GET.get('doc_url')
file_ext = request.GET.get('file_ext')
retrieve_file_response = requests.get(
url='https://{domain}/{path}'.format(
domain=domain,
path=retrieve_file_rel_url
),
headers=settings.DR['AUTHORIZATION_HEADER'],
params={'userId': settings.DR_CREDS['userId']},
stream=True,
)
if retrieve_file_response.status_code != 200:
return HttpResponseServerError(
"Error while downloading file"
)
response = HttpResponse(content_type=CONTENT_TYPE_MAPPING.get(file_ext, 'pdf'))
response['Content-Disposition'] = (
'attachment; filename="my_doc.{}"'.format(file_ext)
)
response.write(retrieve_file_response.content)
return response
handled client side by this js code:
function getStreamedResponse(lo_id, output){
var xhr = new XMLHttpRequest(),
method = 'GET';
xhr.overrideMimeType("application/octet-stream");
var url = window.amphr.baseUrl + '/dl/stream/' + lo_id + '/?output=' + output;
url += "&" + (new Date()).getTime(); // added a timestamp to prevent xhr requests caching
this.rspObj = null;
xhr.onprogress = function (evt) {
var _this = evt.currentTarget;
if (_this.responseText.length == 0) return;
var delimiter = '|';
var responseTextChunks = _this.responseText.split(delimiter);
if (responseTextChunks.length == 0) return;
_this.rspObj = JSON.parse(responseTextChunks.slice(-1)[0]);
if (_this.rspObj.error === true) {
_this.abort(evt);
}
updateProgressMessage(_this.rspObj.progress);
};
xhr.onload = function (evt) {
toggleProgress(false);
var _this = evt.currentTarget;
var uri = window.amphr.baseUrl + "/dl/get_file/?doc_url=" + _this.rspObj.doc_url +"&file_ext=" + _this.rspObj.dest_file_ext;
getFile(uri);
};
xhr.onerror = function (evt) {
var _this = evt.currentTarget;
toggleProgress(false);
};
xhr.onabort = function (evt) {
toggleProgress(false);
var _this = evt.currentTarget;
setTimeout(function(){
if (window.confirm("Error while generating document.\nDownload original?")) {
getFile(window.amphr.originalDownloadUrl);
}}, 100);
};
var getFile = function (uri) {
var link = document.createElement("a");
link.href = uri;
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
delete link;
};
xhr.open(method, url, true);
xhr.send();
}
function toggleProgress(show) {
//toggle overlay/spinner/progress message
var display = (show === true) ? 'block' : 'none';
var overlayDiv = document.getElementsByClassName('overlay')[0];
if (show === true) overlayDiv.style.display = display;
overlayDiv.style.display = display;
var loaderDiv = document.getElementsByClassName('loader')[0];
var msgDiv = document.getElementById('progress-msg');
loaderDiv.style.display = display;
msgDiv.style.display = display;
if (show === false) {
overlayDiv.style.display = display;
msgDiv.innerHTML = "";
}
}
function updateProgressMessage(msg) {
var msgDiv = document.getElementById('progress-msg');
msgDiv.innerHTML = msg;
it works fine locally using the development server (runserver or runserver_plus), the response text comes in chunks.
However, on the dev environment (Apache/wsgi_module with HTTPS), the response is returned entirely at the end, not chuncked.
Any hints about why this is happening?
thanks
h = httplib.HTTPSConnection(host, port)
h.set_debuglevel(0)
headers = {
"Content-Type": "multipart/form-data; boundary=%s" % (boundary,),
"Connection": "Keep-Alive",
}
h.request('POST', uri, body, headers)
res = h.getresponse()
#print res.read()
data = """MIME-Version: 1.0
Content-Type: multipart/mixed; boundary=--Nuance_NMSP_vutc5w1XobDdefsYG3wq
""" + res.read()
msg = email.message_from_string(data)
#print msg
for index, part in enumerate(msg.walk(), start=1):
content_type = part.get_content_type()
#print content_type
payload = part.get_payload()
print res.getheaders()
if content_type == "audio/x-wav" and len(payload):
with open('output.pcm'.format(index), 'wb') as f_pcm:
print f_pcm.write(payload)
I am sending a request to the server and the server is sending a response back to the client as above in the form of .txt. The .txt contains an information header on the top and header at the bottom, which is of text format and the rest is binary.
How to write and parse the text and write it into a separate .txt file, and the binary into .pcm file?
The following kind of approach is recommended using Python's email library to try and decode the MIME:
import ssl
import os
import json
import email
import uuid
from io import BytesIO
import httplib
input_folder = os.path.dirname(os.path.abspath(__file__))
output_folder = os.path.join(input_folder, 'output')
def get_filename(ext, base, sub_folder):
filename = '{}.{}'.format(base, ext)
return os.path.join(output_folder, sub_folder, filename)
def compare_files(file1, file2):
with open(file1, 'rb') as f_file1, open(file2, 'rb') as f_file2:
if f_file1.read() == f_file2.read():
print 'Same:\n {}\n {}'.format(file1, file2)
else:
print 'Different:\n {}\n {}'.format(file1, file2)
class Part(object):
"""Represent a part in a multipart messsage"""
def __init__(self, name, contentType, data, paramName=None):
super(Part, self).__init__()
self.name = name
self.paramName = paramName
self.contentType = contentType
self.data = data
def encode(self):
body = BytesIO()
if self.paramName:
body.write('Content-Disposition: form-data; name="%s"; paramName="%s"\r\n' % (self.name, self.paramName))
else:
body.write('Content-Disposition: form-data; name="%s"\r\n' % (self.name,))
body.write("Content-Type: %s\r\n" % (self.contentType,))
body.write("\r\n")
body.write(self.data)
return body.getvalue()
class Request(object):
"""A handy class for creating a request"""
def __init__(self):
super(Request, self).__init__()
self.parameters = []
def add_json_parameter(self, name, paramName, data):
self.parameters.append(Part(name=name, paramName=paramName, contentType="application/json; charset=utf-8", data=data))
def add_audio_parameter(self, name, paramName, data):
self.parameters.append(Part(name=name, paramName=paramName, contentType="audio/x-wav;codec=pcm;bit=16;rate=16000", data=data))
def encode(self):
boundary = uuid.uuid4().hex
body = BytesIO()
for parameter in self.parameters:
body.write("--%s\r\n" % (boundary,))
body.write(parameter.encode())
body.write("\r\n")
body.write("--%s--\r\n" % (boundary,))
return body.getvalue(), boundary
def get_tts(required_text, LNG):
required_text = required_text.strip()
output_filename = "".join([x if x.isalnum() else "_" for x in required_text[:80]])
host = "mtldev08.nuance.com"
port = 443
uri = "/NmspServlet/"
if LNG == "ENG":
parameters = {'lang' : 'eng_GBR', 'location' : '47.4925, 19.0513'}
if LNG == "GED":
parameters = {'lang' : 'deu-DEU', 'location' : '48.396231, 9.972909'}
RequestData = """{
"appKey": "9c9fa7201e90d3d96718bc3f36ce4cfe1781f2e82f4e5792996623b3b474fee2c77699eb5354f2136063e1ff19c378f0f6dd984471a38ca5c393801bffb062d6",
"appId": "NMDPTRIAL_AutomotiveTesting_NCS61HTTP",
"uId": "Alexander",
"inCodec": "PCM_16_8K",
"outCodec": "PCM_16_8K",
"cmdName": "NVC_TTS_CMD",
"appName": "Python",
"appVersion": "1",
"language": "%(lang)s",
"carrier": "carrier",
"deviceModel": "deviceModel",
"cmdDict": {
"tts_voice": "Serena",
"tts_language": "%(lang)s",
"locale": "canada",
"application_name": "Testing Python Script",
"organization_id": "NUANCE",
"phone_OS": "4.0",
"phone_network": "wifi",
"audio_source": "SpeakerAndMicrophone",
"location": "%(location)s",
"application_session_id": "1234567890",
"utterance_number": "5",
"ui_langugage": "en",
"phone_submodel": "nmPhone2,1",
"application_state_id": "45"
}
}""" % (parameters)
TEXT_TO_READ = """{
"tts_type": "text"
}"""
TEXT_TO_READ = json.loads(TEXT_TO_READ)
TEXT_TO_READ["tts_input"] = required_text
TEXT_TO_READ = json.dumps(TEXT_TO_READ)
request = Request()
request.add_json_parameter("RequestData", None, RequestData)
request.add_json_parameter("TtsParameter", "TEXT_TO_READ", TEXT_TO_READ)
#ssl._create_default_https_context = ssl._create_unverified_context
body, boundary = request.encode()
h = httplib.HTTPSConnection(host, port)
#h.set_debuglevel(1)
headers = {
"Content-Type": "multipart/form-data; boundary=%s" % (boundary,),
"Connection": "Keep-Alive",
}
h.request('POST', uri, body, headers)
res = h.getresponse()
data = """MIME-Version: 1.0
Content-Type: multipart/mixed; boundary=--Nuance_NMSP_vutc5w1XobDdefsYG3wq
""" + res.read()
msg = email.message_from_string(data)
for part in msg.walk():
content_type = part.get_content_type()
payload = part.get_payload()
if content_type == "audio/x-wav" and len(payload):
ref_filename = get_filename('pcm', output_filename + '_ref', LNG)
if not os.path.exists(ref_filename):
with open(ref_filename, 'wb') as f_pcm:
f_pcm.write(payload)
cur_filename = get_filename('pcm', output_filename, LNG)
with open(cur_filename, 'wb') as f_pcm:
f_pcm.write(payload)
compare_files(ref_filename, cur_filename)
elif content_type == "application/json":
with open(get_filename('json', output_filename, LNG), 'w') as f_json:
f_json.write(payload)
filename = r'input.txt'
with open(filename) as f_input:
for line in f_input:
LNG, text = line.strip().split('|')
print "Getting {}: {}".format(LNG, text)
get_tts(text, LNG)
This assumes your input.txt file has the following format:
ENG|I am tired
GED|Ich gehe nach hause
This will produce an output pcm and json file per line of text. It works with multiple files/languages.
Following sample should work for you.
filecontent = []
with open("Output.txt", "rb") as inputfile:
for linenr, line in enumerate(inputfile):
filecontent.append(line)
linecount = linenr + 1
with open("AsciiOut.txt", "wb") as outputfile, open("BinOut.pcm", "wb") as binoutputfile:
for linenr, line in enumerate(filecontent):
if linenr < 4:
outputfile.write(line)
elif linenr < linecount - 12:
binoutputfile.write(line)
else:
outputfile.write(line)