I am creating my vCard with the vObjects module using the following:
def _create_vcard(assigned_address, write_to_disk=False):
vCard = vobject.vCard()
vCard.add('N').value = vobject.vcard.Name(given='EXAMPLE')
vCard.add('FN').value = "EXAMPLE"
vCard.add('EMAIL')
vCard.email.value = 'info#EX.com'
vCard.email.type_param = 'SUPPORT_EMAIL'
vCard.add('TEL')
vCard.tel.value = assigned_address
vCard.tel.type_param = 'SYSTEM_NUMBER'
vCard.add('URL')
vCard.url.value = 'https://X.com/'
vCard.url.type_param = 'WEBSITE'
vCard.add('PHOTO;ENCODING=b;TYPE=image/jpeg').value = _b64_image('assets/example.jpg') # This line
vCard_data = vCard.serialize()
Then sending the serialized data to a public S3 bucket
def _write_vcard_to_s3_bucket(vCard_data, number):
file_name = number[1:-1] + ".vcf"
client = boto3.client('s3')
response = client.put_object(
Bucket='EX-vcards',
Body=vCard_data,
Key=file_name,
ContentType='text/x-vcard'
)
bucket_url = f'https://EX.s3.amazonaws.com/{file_name}'
return bucket_url
Twilio Code
def twilio_send_test_message(number, vcard_url):
client = get_twilio_client()
message = client.messages.create(
body=f'example message with vCard',
from_= number,
media_url=[vcard_url],
to='+1USPHONENUMBER'
)
return message
_b64 function
def _b64_image(filename):
with open(filename, 'rb') as f:
b64 = base64.b64encode(f.read())
final = b64.decode('utf-8')
return final
When commenting the photo line out out, the vCard sends just fine, but with it, it does not work.
I have also texted it by writing it out to a .vcf locally, and it works fine with the image if saved locally.
Am I doing something clearly wrong?
Related
I am using py_zipkin in my code. And I can see the tracing result on the Zipkin UI. But I don't know how to output the tracing results to a file with specified format, like a log file.
Here is a example of my code:
def fun1(self, param):
with zipkin_span(
service_name = 'my_code',
span_name = 'fun1',
transport_handler = http_transport,
port = 8080,
sample_rate = 100,
) as zipkin_context:
run_some_func(param)
zipkin_context.update_binary_annotations(param)
def http_transport(encoder_span):
zipkin_url = 'http://127.0.0.1:9411/api/spans'
requests.post(
zipkin_url,
data = encoded_span,
headers = {'Content-Type': 'application/x-thrift'}
)
I am having issues with my below API request to Flickr. My function takes as input a list of 10 photo ids. However when I print the data from my function I am only getting information based on 1 photo ID. Looking at my below function any ideas on what may be causing the contents of only 1 photo ID to print? Any help would be great.
for item in get_flickr_data(word)["photos"]["photo"]:
photo_ids =item["id"].encode('utf-8')
lst_photo_ids.append(photo_ids)
print lst_photo_ids
lst_photo_ids = ['34117701526', '33347528313', '34158745075', '33315997274', '33315996984', '34028007021', '33315995844', '33347512113', '33315784134', '34024299271']
def get_photo_data(lst_photo_ids):
baseurl = "https://api.flickr.com/services/rest/"
params_d = {}
params_d["method"] = "flickr.photos.getInfo"
params_d["format"] = "json"
params_d["photo_id"] = photo_ids
params_d["api_key"] = FLICKR_KEY
unique_identifier = params_unique_combination(baseurl,params_d)
if unique_identifier in CACHE_DICTION:
flickr_data_diction = CACHE_DICTION[unique_identifier]
else:
resp = requests.get(baseurl,params_d)
json_result_text = resp.text[14:-1]
flickr_data_diction = json.loads(json_result_text)
CACHE_DICTION[unique_identifier] = flickr_data_diction
fileref = open(CACHE_FNAME,"w")
fileref.write(json.dumps(CACHE_DICTION))
fileref.close()
return flickr_data_diction
print get_photo_data(photo_ids)
I'm trying to upload an image into S3 bucket using boto. After the image has successfully uploaded, I want to perform a certain operation using the file URL of the image in the S3 bucket. The problem is that sometimes the image doesn't upload fast enough and I end up with a server error when I want to perform the operation dependent on the file URL of the Image.
This is my source code. I'm using python flask.
def search_test(consumer_id):
consumer = session.query(Consumer).filter_by(consumer_id=consumer_id).one()
products = session.query(Product).all()
product_dictionary = {'Products': [p.serialize for p in products]}
if request.method == 'POST':
p_product_image_url = request.files['product_upload_url']
s3 = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
bucket = s3.get_bucket(AWS_BUCKET_NAME)
k = Key(bucket)
if p_product_image_url and allowed_file(p_product_image_url.filename):
# Read the contents of the file
file_content = p_product_image_url.read()
# Use Boto to upload the file to S3
k.set_metadata('Content-Type', mimetypes.guess_type(p_product_image_url.filename))
k.key = secure_filename(p_product_image_url.filename)
k.set_contents_from_string(file_content)
print ('consumer search upload successful')
new_upload = Uploads(picture_upload_url=k.key.replace(' ', '+'), consumer=consumer)
session.add(new_upload)
session.commit()
new_result = jsonify(Result=perform_actual_search(amazon_s3_base_url + k.key.replace(' ', '+'),
product_dictionary))
return new_result
else:
return render_template('upload_demo.html', consumer_id=consumer_id)
The jsonify method needs a valid image url to perform the operation. It works sometimes, sometimes it doesn't. The reason I suspect being due to the issue that the image would not have uploaded yet by the time it executes that line of code.
The perform_actual_search method is as follows:
def get_image_search_results(image_url):
global description
url = ('http://style.vsapi01.com/api-search/by-url/?apikey=%s&url=%s' % (just_visual_api_key, image_url))
h = httplib2.Http()
response, content = h.request(url,
'GET') # alternatively write content=h.request((url,'GET')[1]) ///Numbr 2 in our array
result = json.loads(content)
result_dictionary = []
for i in range(0, 10):
if result:
try:
if result['errorMessage']:
result_dictionary = []
except:
pass
if result['images'][i]:
images = result['images'][i]
jv_img_url = images['imageUrl']
title = images['title']
try:
if images['description']:
description = images['description']
else:
description = "no description"
except:
pass
# print("\njv_img_url: %s,\ntitle: %s,\ndescription: %s\n\n"% (
# jv_img_url, title, description))
image_info = {
'image_url': jv_img_url,
'title': title,
'description': description,
}
result_dictionary.append(image_info)
if result_dictionary != []:
# for i in range(len(result_dictionary)):
# print (result_dictionary[i])
# print("\n\n")
return result_dictionary
else:
return []
def performSearch(jv_input_dictionary, imagernce_products_dict):
print jv_input_dictionary
print imagernce_products_dict
global common_desc_ratio
global isReady
image_search_results = []
if jv_input_dictionary != []:
for i in range(len(jv_input_dictionary)):
print jv_input_dictionary[i]
for key in jv_input_dictionary[i]:
if key == 'description':
input_description = jv_input_dictionary[i][key]
s1w = re.findall('\w+', input_description.lower())
s1count = Counter(s1w)
print input_description
for j in imagernce_products_dict:
if j == 'Products':
for q in range(len(imagernce_products_dict['Products'])):
for key2 in imagernce_products_dict['Products'][q]:
if key2 == 'description':
search_description = imagernce_products_dict['Products'][q]['description']
print search_description
s2w = re.findall('\w+', search_description.lower())
s2count = Counter(s2w)
# Commonality magic
common_desc_ratio = difflib.SequenceMatcher(None, s1w, s2w).ratio()
print('Common ratio is: %.2f' % common_desc_ratio)
if common_desc_ratio > 0.09:
image_search_results.append(imagernce_products_dict['Products'][q])
if image_search_results:
print image_search_results
return image_search_results
else:
return {'404': 'No retailers registered with us currently own this product.'}
def perform_actual_search(image_url, imagernce_product_dictionary):
return performSearch(get_image_search_results(image_url), imagernce_product_dictionary)
Any help solving this would be greatly appreciated.
I would configure S3 to generate notifications on events such as s3:ObjectCreated:*
Notifications can be posted to an SNS topic, a SQS queue or directly trigger a lambda function.
More details about S3 notifications : http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
You should rewrite your code to separate the upload part and the image processing part. The later can be implemented as a Lambda function in Python.
Working in an Asynchronous way is key here, writing blocking code is usually not scalable.
you can compare bytes written to s3 with file size. lets say you use following method to write to s3:
bytes_written = key.set_contents_from_file(file_binary, rewind=True)
in your case it's set_contents_from_string
then I would compare, bytes_written with p_product_image_url.seek(0, os.SEEK_END)
if they match. whole file has been uploaded to s3.
When the user enters http://example2.com:5500/?param=x the code below generates a data.csv file and serves it to the browser. It works perfectly like this.
However, I have deployed it behind an API proxy, so that the user makes a call to http://example1.com/?param=x which is internally transformed into http://example2.com:5500/?param=x.
As a result, instead of serving data.csv to the browser as before, it displays on the browser all the data.csv content. The view source-code feature shows exactly what data.csv should contain, without any HTML headers, just the data.csv content, but it is not being served as attachement. Any ideas?
from flask import make_response
#app.route('/', methods = ['GET'])
def get_file():
alldata = []
while len(new_data) > 0:
new_data = api.timeline(max_id=oldest)
alldata.extend(new_data)
oldest = alldata[-1].id - 1
outdata = ""
for data in alldata:
outdata += ",".join(data) + "\n"
response = make_response(outdata)
response.headers["Content-Disposition"] = "attachment; filename=data.csv"
return response
if __name__ == '__main__':
app.run(host = app.config['HOST'], port = app.config['PORT'])
EDIT: Included mapping code to transform request to example1.com to example2.com (secret_url)
# This is example1.com
#app.route("/api/<projectTitle>/<path:urlSuffix>", methods=['GET'])
def projectTitlePage(projectTitle, urlSuffix):
projectId = databaseFunctions.getTitleProjectId(projectTitle)
projectInfo = databaseFunctions.getProjectInfo(projectId)
redirectionQueryString = re.sub('apikey=[^&]+&?', '', request.query_string).rstrip('&')
redirectionUrl = projectInfo['secretUrl'].rstrip('/')
if urlSuffix is not None:
redirectionUrl += '/' + urlSuffix.rstrip('/')
redirectionUrl += '/?' + redirectionQueryString
redirectionHeaders = request.headers
print request.args.to_dict(flat=False)
try:
r = requests.get(redirectionUrl, data=request.args.to_dict(flat=False), headers=redirectionHeaders)
except Exception, e:
return '/error=Error: bad secret url: ' + projectInfo.get('secretUrl')
return r.text
Your homegrown proxy is not returning headers back to the application. Try this:
#app.route("/api/<projectTitle>/<path:urlSuffix>", methods=['GET'])
def projectTitlePage(projectTitle, urlSuffix):
# ...
return r.text, r.status_code, r.headers
I'm using the csv module in python to create a download from one of the datastore tables in Google App Engine. The download works alright but you have to manually add an extension so that you can open it in Excel. I can't figure out how to modify the response so that the file download has a .csv extension. I could leave it like this however this web app is meant for a broad audience so I wanted to make it as easy as possible for them to use.
class fuCheckUp(webapp2.RequestHandler):
def get(self):
schedule_query = emailSchedule.all()
follow_up_num = schedule_query[0].follow_up_num
email_job_query = emailJobs.all()
email_job_query.order('consent_date')
header_tuple = ('last_modified', 'trigger_id', 'recipient_id', 'test_data', 'unsubscribe', 'start_date_local', 'consent_date', 'fu_period', 'last_fu_sent')
data_tuples = ()
variable_list = []
for i in range(1, follow_up_num + 1):
i = str(i)
fu_due = 'fu' + i
fu_sent = 'fu' + i + '_email_sent'
variable_list.append(fu_due)
variable_list.append(fu_sent)
data_tuples = data_tuples + (fu_due, fu_sent)
final_data_tuple = header_tuple + data_tuples
data = [final_data_tuple]
for part in email_job_query:
last_modified = str(part.last_modified)
trigger_id = str(part.trigger_id)
recipient_id = str(part.recipient_id)
test_data = str(part.test_data)
unsubscribed = str(part.unsubscribed)
start_date_local = str(part.start_date_local)
consent_date = str(part.consent_date)
fu_period = str(part.fu_period)
last_fu_sent = str(part.last_fu_sent)
var_list = []
for var in variable_list:
fu_var = getattr(part, var)
var_list.append(str(fu_var))
var_tuple = tuple(var_list)
fixed_tuple = (last_modified, trigger_id, recipient_id, test_data, unsubscribed, start_date_local, consent_date, fu_period, last_fu_sent)
csv_tuple = fixed_tuple + var_tuple
data.append((csv_tuple))
self.response.headers['Content-Type'] = 'application/csv'
writer = csv.writer(self.response.out)
for item in data:
writer.writerow(item)
Add another response header like this:
Content-Disposition: attachment;filename=example.csv