running parallel process on page submit in django - python

i want to create simple website for pnr check . code is working right but it performs only one work at a time either render result to other page or send mail . while working with thread it send mail on back end till then page remains loading .
please anyone give me suggestion. also i want to run it on google appengine so i haven't tried celery.
from django.http import HttpResponse
from bs4 import BeautifulSoup
import re
import requests
from django.shortcuts import render
from functools import partial, wraps
from django.core.mail import send_mail
import time
import thread
def checkpnr(request):
return render(request, 'checkpnr.html')
def check(string, pnr, sleeptime, lock, *args):
while 1:
# entering critical section
lock.acquire()
# time1=request.get_all("notif")
url_pnr = "pnr url"
r = requests.get(url_pnr)
data = r.text
soup = BeautifulSoup(data)
train = str(soup.find("ul", attrs={"class": "train_info"}))
train_number = soup.find("li", attrs={"class": "first"}).text
source = str(soup.find("travellers"))
route = str(soup.findAll("li")[1]).replace(
'<li>', '').replace('</li>', '')
#head, sep, tail = route.partition(' -')
travel_date = str(soup.findAll("li")[2].text)
date, sep, total = travel_date.partition('|')
rows = soup.findAll("td", attrs={"class": "pax"})
rowlength = len(rows)
chart_status = str(soup.findAll("tr")[rowlength + 1].findAll("td")[0]).replace(
'<td colspan="3"><strong>', '').replace('</strong>', '').replace('</td>', '')
passengers = []
status = []
coach = []
tot = []
w=''
i = 1
j = 1
while i <= rowlength:
j = str(soup.findAll("tr")[i].findAll(
"td")[0].text).replace(':', '')
passengers.append(j)
s = str(soup.findAll("tr")[i].findAll("td")[1].text)
w=w+','+s
status.append(s)
c = str(soup.findAll("tr")[i].findAll("td")[2].text)
coach.append(c)
tot.append(i)
i += 1
time.sleep(sleeptime)
emailMsg = status
subject = pnr+'-'+w
send_mail(
subject,'emailMsg', 'email-from',
[email], fail_silently=False)
lock.release()
if (status[rowlength - 1] == "CONFIRMED"):
time.sleep(sleeptime)
else:
time.sleep(1000000000000000000000000)
def fetch(request):
pnr = request.POST['pnr']
if len(pnr) != 10:
msg = "PNR must be of 10 digits ..."
return render(request, 'checkpnr.html', {'msg': msg})
email = request.POST['email']
e = request.POST['ntime']
if (e != ''):
n_time = int(e)
n = request.POST['notify']
if (n != ''):
notify = int(n)
sleeptim = notify * n_time
sleeptime= 10
# time1=request.get_all("notif")
url_pnr = "pnr url"
try:
r = requests.get(url_pnr)
data = r.text
soup = BeautifulSoup(data)
train = str(soup.find("ul", attrs={"class": "train_info"}))
train_number = soup.find("li", attrs={"class": "first"}).text
source = str(soup.find("travellers"))
route = str(soup.findAll("li")[1]).replace(
'<li>', '').replace('</li>', '')
#head, sep, tail = route.partition(' -')
travel_date = str(soup.findAll("li")[2].text)
date, sep, total = travel_date.partition('|')
rows = soup.findAll("td", attrs={"class": "pax"})
rowlength = len(rows)
chart_status = str(soup.findAll("tr")[rowlength + 1].findAll("td")[0]).replace(
'<td colspan="3"><strong>', '').replace('</strong>', '').replace('</td>', '')
passengers = []
status = []
coach = []
tot = []
w=''
i = 1
j = 1
while i <= rowlength:
j = str(soup.findAll("tr")[i].findAll(
"td")[0].text).replace(':', '')
passengers.append(j)
s = str(soup.findAll("tr")[i].findAll("td")[1].text)
w=w+','+s
status.append(s)
c = str(soup.findAll("tr")[i].findAll("td")[2].text)
coach.append(c)
tot.append(i)
i += 1
msg = "Mail not Sent"
msg1 = ''
if(email != ''):
emailMsg = status
subject = pnr+'-'+w
send_mail(
subject,'emailMsg', 'ashutosh8nitjsr#gmail.com',
[email], fail_silently=False)
msg = "mail sent.."
if __name__ == "__main__":
lock = thread.allocate_lock()
thread.start_new_thread(
check,("Thread No:1", pnr, email, sleeptime, lock))
msg1 = "thread created"
time.sleep(sleeptime)
while 1:
pass
detail2 = {
'train_number': train_number, 'route': route, 'date': date, 'chart_status': chart_status, 'tot': tot,
'passengers': passengers, 'status': status, 'coach': coach, 'msg': msg}
return render(request, 'status.html', detail2)l
except:
msg = "there was error. please try again..."
return render(request, 'checkpnr.html', {'msg': msg})

You can try using TaskQueues for this purpose on app engine.
Task Queue API

Related

python script to run splunk query and get output as text output

Trying to run below code it executes but I do not get the correct value any help is appreciated expecting single value like 492. Code runs but does not give the correct value. Tried splunk library but unable to use those.
import urllib
import httplib2 #import library
import json
import pprint
import time
import re
from xml.dom import minidom
searchquery = 'search index="movable_in" sourcetype="movable:in:assets" | stats avg(exposure_score)'
myhttp = httplib2.Http()
baseurl = 'https://xxxx.splunkxxx.com:8089'
usernamesp = 'xxxx'
passwordsp = 'xxxx'
def get_splunk_result(searchquery):
# Step 1: Get a session key
servercontent = myhttp.request(f'{baseurl}/services/auth/login', 'POST', headers={},
body=urllib.parse.urlencode({'username': usernamesp, 'password': passwordsp}))[1]
sessionkey = minidom.parseString(servercontent).getElementsByTagName('sessionKey')[0].childNodes[0].nodeValue
# print ("====>sessionkey: %s <====" % sessionkey)
sid = ''
# ------------------
if not searchquery.startswith('search'):
searchquery = f'search {searchquery}'
# Step 2: Get a sid with the search query
i = 0
while True:
time.sleep(1)
try:
searchjob = myhttp.request(f'{baseurl}/services/search/jobs', 'POST',
headers={F'Authorization': F'Splunk %s' % sessionkey},
body=urllib.parse.urlencode({'search': searchquery}))[1]
sid = minidom.parseString(searchjob).getElementsByTagName('sid')[0].childNodes[0].nodeValue
break
except:
i = i + 1
# print(i)
if (i > 30): break
# print("====>SID: %s <====" % sid)
# Step 3: Get search status
myhttp.add_credentials(usernamesp, passwordsp)
servicessearchstatusstr = '/services/search/jobs/%s/' % sid
isnotdone = True
while isnotdone:
searchstatus = myhttp.request(f'{baseurl}{servicessearchstatusstr}', 'GET')[1]
isdonestatus = re.compile('isDone">(0|1)')
strstatus = str(searchstatus)
isdonestatus = isdonestatus.search(strstatus).groups()[0]
if (isdonestatus == '1'):
isnotdone = False
# Step 4: Get the search result
services_search_results_str = '/services/search/jobs/%s/results?output_mode=json_rows&count=0' % sid
searchresults = myhttp.request(f'{baseurl}{services_search_results_str}', 'GET')[1]
searchresults = json.loads(searchresults)
# searchresults = splunk_result(searchresults)
return searchresults
output = get_splunk_result(searchquery)
print(output)
import urllib
import httplib2 #import library
import json
import pprint
import time
import re
from xml.dom import minidom
searchquery = 'search index="movable_in" sourcetype="movable:in:assets" | stats avg(exposure_score)'
myhttp = httplib2.Http()
baseurl = 'https://xxxx.splunkxxx.com:8089'
usernamesp = 'xxxx'
passwordsp = 'xxxx'
def get_splunk_result(searchquery):
# Step 1: Get a session key
servercontent = myhttp.request(f'{baseurl}/services/auth/login', 'POST', headers={},
body=urllib.parse.urlencode({'username': usernamesp, 'password': passwordsp}))[1]
sessionkey = minidom.parseString(servercontent).getElementsByTagName('sessionKey')[0].childNodes[0].nodeValue
# print ("====>sessionkey: %s <====" % sessionkey)
sid = ''
# ------------------
if not searchquery.startswith('search'):
searchquery = f'search {searchquery}'
# Step 2: Get a sid with the search query
i = 0
while True:
time.sleep(1)
try:
searchjob = myhttp.request(f'{baseurl}/services/search/jobs', 'POST',
headers={F'Authorization': F'Splunk %s' % sessionkey},
body=urllib.parse.urlencode({'search': searchquery}))[1]
sid = minidom.parseString(searchjob).getElementsByTagName('sid')[0].childNodes[0].nodeValue
break
except:
i = i + 1
# print(i)
if (i > 30): break
# print("====>SID: %s <====" % sid)
# Step 3: Get search status
myhttp.add_credentials(usernamesp, passwordsp)
servicessearchstatusstr = '/services/search/jobs/%s/' % sid
isnotdone = True
while isnotdone:
searchstatus = myhttp.request(f'{baseurl}{servicessearchstatusstr}', 'GET')[1]
isdonestatus = re.compile('isDone">(0|1)')
strstatus = str(searchstatus)
isdonestatus = isdonestatus.search(strstatus).groups()[0]
if (isdonestatus == '1'):
isnotdone = False
# Step 4: Get the search result
services_search_results_str = '/services/search/jobs/%s/results?output_mode=json_rows&count=0' % sid
searchresults = myhttp.request(f'{baseurl}{services_search_results_str}', 'GET')[1]
searchresults = json.loads(searchresults)
# searchresults = splunk_result(searchresults)
return searchresults
output = get_splunk_result(searchquery)
print(output)

JSONDecodeError: Extra data: line 1 column 8 (char 7)

I've followed a tutorial to scrape from a facebook profile and I keep getting this error:
JSONDecodeError: Extra data: line 1 column 8 (char 7)
Does anyone know what the problem might be?
Here is my python script:
def get_bs(session, url):
#Makes a GET requests using the given Session objectand returns a BeautifulSoup object.
r = None
while True:
r = session.get(url)
if r.ok:
break
return BeautifulSoup(r.text, 'lxml'
#To login
def make_login(session, base_url, credentials):
#Returns a Session object logged in with credentials.
login_form_url = '/login/device-based/regular/login/?refsrc=https%3A'\
'%2F%2Fmobile.facebook.com%2Flogin%2Fdevice-based%2Fedit-user%2F&lwv=100'
params = {'email':credentials['email'], 'pass':credentials['pass']}
while True:
time.sleep(3)
logged_request = session.post(base_url+login_form_url, data=params)
if logged_request.ok:
logging.info('[*] Logged in.')
break
#Crawling FB
def crawl_profile(session, base_url, profile_url, post_limit):
#Goes to profile URL, crawls it and extracts posts URLs.
profile_bs = get_bs(session, profile_url)
n_scraped_posts = 0
scraped_posts = list()
posts_id = None
while n_scraped_posts < post_limit:
try:
posts_id = 'recent'
posts = profile_bs.find('div', id=posts_id).div.div.contents
except Exception:
posts_id = 'structured_composer_async_container'
posts = profile_bs.find('div', id=posts_id).div.div.contents
posts_urls = [a['href'] for a in profile_bs.find_all('a', text='Full Story')]
for post_url in posts_urls:
# print(post_url)
try:
post_data = scrape_post(session, base_url, post_url)
scraped_posts.append(post_data)
except Exception as e:
logging.info('Error: {}'.format(e))
n_scraped_posts += 1
if posts_completed(scraped_posts, post_limit):
break
show_more_posts_url = None
if not posts_completed(scraped_posts, post_limit):
show_more_posts_url = profile_bs.find('div', id=posts_id).next_sibling.a['href']
profile_bs = get_bs(session, base_url+show_more_posts_url)
time.sleep(3)
else:
break
return scraped_posts
def get_bs(session, url):
#Makes a GET requests using the given Session object and returns a BeautifulSoup object.
r = None
while True:
r = session.get(url)
time.sleep(3)
if r.ok:
break
return BeautifulSoup(r.text, 'lxml')
#Scraping FB
def scrape_post(session, base_url, post_url):
#Goes to post URL and extracts post data.
post_data = OrderedDict()
post_bs = get_bs(session, base_url+post_url)
time.sleep(5)
# Here we populate the OrderedDict object
post_data['url'] = post_url
#Find Post main element
try:
post_text_element = post_bs.find('div', id='u_0_0').div
string_groups = [p.strings for p in post_text_element.find_all('p')]
strings = [repr(string) for group in string_groups for string in group]
post_data['text'] = strings
except Exception:
post_data['text'] = []
#Extract post media URL
try:
post_data['media_url'] = post_bs.find('div', id='u_0_0').find('a')['href']
except Exception:
post_data['media_url'] = ''
#Extract remaining data
try:
post_data['comments'] = extract_comments(session, base_url, post_bs, post_url)
except Exception:
post_data['comments'] = []
return dict(post_data)
#Scraping FB
def scrape_post(session, base_url, post_url):
#Goes to post URL and extracts post data.
post_data = OrderedDict()
post_bs = get_bs(session, base_url+post_url)
time.sleep(5)
# Here we populate the OrderedDict object
post_data['url'] = post_url
#Find Post main element
try:
post_text_element = post_bs.find('div', id='u_0_0').div
string_groups = [p.strings for p in post_text_element.find_all('p')]
strings = [repr(string) for group in string_groups for string in group]
post_data['text'] = strings
except Exception:
post_data['text'] = []
#Extract post media URL
try:
post_data['media_url'] = post_bs.find('div', id='u_0_0').find('a')['href']
except Exception:
post_data['media_url'] = ''
#Extract remaining data
try:
post_data['comments'] = extract_comments(session, base_url, post_bs, post_url)
except Exception:
post_data['comments'] = []
return dict(post_data)
#Function for profile URL and creditials for FB
def json_to_obj(filename):
#Extracts data from JSON file and saves it on Python object
obj = None
with open(filename) as json_file:
obj = json.loads(json_file.read())
return obj
def save_data(data):
#Converts data to JSON.
with open('profile_posts_data.json', 'w') as json_file:
json.dump(data, json_file, indent=4)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
base_url = 'https://mobile.facebook.com'
session = requests.session()
# Extracts credentials for the login and all of the profiles URL to scrape
credentials = json_to_obj('credentials.json')
profiles_urls = json_to_obj('profiles_urls.json')
make_login(session, base_url, credentials)
posts_data = None
for profile_url in profiles_urls:
posts_data = crawl_profile(session, base_url, profile_url, 25)
logging.info('[!] Scraping finished. Total: {}'.format(len(posts_data)))
logging.info('[!] Saving.')
save_data(posts_data)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
base_url = 'https://mobile.facebook.com'
session = requests.session()
# Extracts credentials for the login and all of the profiles URL to scrape
credentials = json_to_obj(r"C:\Users\E7450\Desktop\GIS702\FBScrapping\credentials.json")
profiles_urls = json_to_obj(r"C:\Users\E7450\Desktop\GIS702\FBScrapping\profiles_urls.json")
make_login(session, base_url, credentials)
posts_data = None
for profile_url in profiles_urls:
posts_data = crawl_profile(session, base_url, profile_url, 25)
logging.info('[!] Scraping finished. Total: {}'.format(len(posts_data)))
logging.info('[!] Saving.')
save_data(posts_data)

How do I get my Telegram bot to acknowledge previously seen messages?

Problem:
Telegram bot doesn't recognise seen messages and keeps responding to the latest message until I send "quit" or crtl-c in command line.
Completely new to python. There may be a flaw in my programming logic.
In 'for last_update_id in updates["result"]' I tried to add 1 to the last_update_id variable after each loop. But the variable doesn't seem to update.
# chatbot.py not included. It trains NN model.
import json
import requests
import time
import urllib
import telegram
TOKEN = "xxx"
URL = "https://api.telegram.org/bot{}/".format(TOKEN)
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
def get_json_from_url(url):
content = get_url(url)
js = json.loads(content)
return js
def get_updates(offset): #gets json file from URL
url = URL + "getUpdates"
if offset:
url += "?offset={}".format(offset)
js = get_json_from_url(url)
return js
def get_last_update_id(updates):
update_ids = []
for update in updates["result"]:
update_ids.append(int(update["update_id"]))
return max(update_ids)
def get_last_chat_text(updates):
num_updates = len(updates["result"])
last_update = num_updates - 1
text = updates["result"][last_update]["message"]["text"] #text input
return text
def get_last_chat_id(updates):
chat_id = updates["result"][-1]["message"]["chat"]["id"]
return chat_id
def send_message(output,chat_id):
bot = telegram.Bot(token=TOKEN)
bot.sendMessage(chat_id=chat_id, text = output)
def main():
input_text = get_last_chat_text(updates)
return input_text
print("Let's chat! (type 'quit' to exit)")
last_update_id = None
while True:
updates = get_updates(last_update_id) #returns json file
last_update_id = get_last_update_id(updates) #returns max_update_id
for last_update_id in updates["result"]:
main()
input_text = main()
if input_text == "quit":
break
input_text = tokenize(input_text)
X = bag_of_words(input_text, all_words)
X = X.reshape(1, X.shape[0])
X = torch.from_numpy(X).to(device)
output = model(X)
_, predicted = torch.max(output, dim=1)
tag = tags[predicted.item()]
probs = torch.softmax(output, dim=1)
prob = probs[0][predicted.item()]
if prob.item() > 0.75:
for intent in intents['intents']:
if tag == intent["tag"]:
output = f"{random.choice(intent['responses'])}"
else:
output = f"{bot_name}: I do not understand..."
print(output)
chat_id = get_last_chat_id(updates)
print(chat_id)
send_message(output, chat_id)
time.sleep(0.1)
last_update_id =+ 1 #returns max_id in the json file and adds 1
continue
I managed to fix this problem by adding a break in the loop so it loops back to the outside 'while' loop. Below is the edited code:
# chatbot.py module imported above this line not included. It trains NN model.
import json
import requests
import time
import urllib
import telegram
TOKEN = "XXX"
URL = "https://api.telegram.org/bot{}/".format(TOKEN)
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
def get_json_from_url(url):
content = get_url(url)
js = json.loads(content)
return js
def get_updates(offset): #gets json file from URL
url = URL + "getUpdates"
if offset:
url += "?offset={}".format(offset)
js = get_json_from_url(url)
return js
def get_last_update_id(updates):
update_ids = []
for update in updates["result"]:
update_ids.append(update["update_id"])
return max(update_ids, default = last_update_id)
def get_last_chat_text(updates):
# num_updates = len(updates["result"])
# last_update = num_updates - 1
text = updates["result"][-1]["message"]["text"] #text input
return text
def get_last_chat_id(updates):
chat_id = updates["result"][-1]["message"]["chat"]["id"]
return chat_id
def send_message(output,chat_id):
bot = telegram.Bot(token=TOKEN)
bot.sendMessage(chat_id=chat_id, text = output)
def main():
input_text = get_last_chat_text(updates)
return input_text
bot_name = "XXX"
print("Let's chat! (type 'quit' to exit)")
last_update_id = 0
while True:
updates = get_updates(last_update_id) #returns json file
for last_update_id in updates["result"]:
main()
input_text = main()
if input_text == "quit":
break
input_text = tokenize(input_text)
X = bag_of_words(input_text, all_words)
X = X.reshape(1, X.shape[0])
X = torch.from_numpy(X).to(device)
output = model(X)
_, predicted = torch.max(output, dim=1)
tag = tags[predicted.item()]
probs = torch.softmax(output, dim=1)
prob = probs[0][predicted.item()]
if prob.item() > 0.75:
for intent in intents['intents']:
if tag == intent["tag"]:
output = f"{random.choice(intent['responses'])}"
else:
output = f"{bot_name}: I do not understand..."
print(output)
chat_id = get_last_chat_id(updates)
print(chat_id)
send_message(output, chat_id)
time.sleep(0.1)
break
last_update_id = get_last_update_id(updates) + 1 #returns max_id in the json file and adds 1

python tinder bot won't send messages - networking error

I'm putting together a python Tinder bot that auto swipes right, and I also want it to send messages. Everything works properly except that when it matches, it should send a message, but doesn't. I've tried every possible combination of code I can think of, but to no avail. Any help with figuring this out would be greatly appreciate by me and all my future Tinder matches (if I even get any).
The code calls a funtcion called talk(user_id) that should perform a POST curl request.
Code:
1,1 Top# encoding: utf8
import argparse
from datetime import datetime
import json
from random import randint
import requests
import sys
from time import sleep
headers = {
'app_version': '519',
'platform': 'ios',
}
fb_id = 'XXXXXXXXXXXXX'
fb_auth_token = 'XXXXXXXXXXXX'
class User(object):
def __init__(self, data_dict):
self.d = data_dict
#property
def user_id(self):
return self.d['_id']
#property
def name(self):
return self.d['name']
#property
def ago(self):
raw = self.d.get('ping_time')
if raw:
d = datetime.strptime(raw, '%Y-%m-%dT%H:%M:%S.%fZ')
secs_ago = int(datetime.now().strftime("%s")) - int(d.strftime("%s"))
if secs_ago > 86400:
return u'{days} days ago'.format(days=secs_ago / 86400)
elif secs_ago < 3600:
return u'{mins} mins ago'.format(mins=secs_ago / 60)
else:
return u'{hours} hours ago'.format(hours=secs_ago / 3600)
return '[unknown]'
#property
def bio(self):
try:
x = self.d['bio'].encode('ascii', 'ignore').replace('\n', '')[:50].strip()
except (UnicodeError, UnicodeEncodeError, UnicodeDecodeError):
return '[garbled]'
else:
return x
#property
def age(self):
raw = self.d.get('birth_date')
if raw:
d = datetime.strptime(raw, '%Y-%m-%dT%H:%M:%S.%fZ')
return datetime.now().year - int(d.strftime('%Y'))
return 0
def __unicode__(self):
return u'{name} ({age}), {distance}km, {ago}'.format(
name=self.d['name'],
age=self.age,
distance=self.d['distance_mi'],
ago=self.ago
)
def auth_token(fb_auth_token, fb_user_id):
h = headers
h.update({'content-type': 'application/json'})
req = requests.post(
'https://api.gotinder.com/auth',
headers=h,
data=json.dumps({'facebook_token': fb_auth_token, 'facebook_id': fb_user_id})
)
try:
return req.json()['token']
except:
return None
def recommendations(auth_token):
h = headers
h.update({'X-Auth-Token': auth_token})
r = requests.get('https://api.gotinder.com/user/recs', headers=h)
if r.status_code == 401 or r.status_code == 504:
raise Exception('Invalid code')
print r.content
if not 'results' in r.json():
print r.json()
for result in r.json()['results']:
yield User(result)
def like(user_id):
try:
u = 'https://api.gotinder.com/like/%s' % user_id
d = requests.get(u, headers=headers, timeout=0.7).json()
except KeyError:
raise
else:
return d['match']
def nope(user_id):
try:
u = 'https://api.gotinder.com/pass/%s' % user_id
requests.get(u, headers=headers, timeout=0.7).json()
except KeyError:
raise
def like_or_nope():
return 'nope' if randint(1, 100) == 31 else 'like'
def talk(user_id):
try:
u = 'https://api.gotinder.com/user/matches/%s' % user_id
requests.post(
u,
data=json.dumps=({'message': 'Hey! How are you?'})
)
except KeyError:
raise
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tinder automated bot')
parser.add_argument('-l', '--log', type=str, default='activity.log', help='Log file destination')
args = parser.parse_args()
print 'Tinder bot'
print '----------'
matches = 0
liked = 0
nopes = 0
while True:
token = auth_token(fb_auth_token, fb_id)
if not token:
print 'could not get token'
sys.exit(0)
for user in recommendations(token):
if not user:
break
print unicode(user)
if user.name == 'Tinder Team':
print('Out of swipes, pausing one hour...')
sleep(3601)
else:
try:
action = like_or_nope()
if action == 'like':
print ' -> Like'
match = like(user.user_id)
if match:
print ' -> Match!'
conversation = talk(user.user_id)
if conversation:
print ' -> Message Sent!'
with open('./matched.txt', 'a') as m:
m.write(user.user_id + u'\n')
with open('./liked.txt', 'a') as f:
f.write(user.user_id + u'\n')
else:
print ' -> random nope :('
nope(user.user_id)
except:
print 'networking error %s' % user.user_id
s = float(randint(10000, 20000) / 1000)
sleep(s)`
Check the below code if this helps. I used pynder package for dissecting the APIs, but I guess the logic remains the same.
session._post('/user/matches/' + match['id'], {"message": "Hey! How are you?"})
Where the post function works like below :-
def _post(self, url, data={}):
return self._request("post", url, data=data)
Try this method data={message} directly
It works for me, perfectly fine.

HTTP requests on localhost via python urllib and urllib2 are very slow

I have written a simple class using urllib and urllib2 to send http requests and get the response. However, any request to localhost using its IP Address is very slow.
IP Address of LOCALHOST is = 192.168.158.27
import urllib2,urllib,re,datetime,time
class HTTPRequest():
def __init__(self,**kargs):
self._response = None
self._buffer = None
self._conn = urllib2.build_opener(urllib2.HTTPCookieProcessor())
urllib2.install_opener(self._conn)
def _encode_url(self,**kargs):
try:
params = urllib.urlencode(kargs)
except:
raise HTTPError("Failed to encode URL parameters..")
return str(params)
def _request(self,url=None,params=None):
try:
self._buffer = self._conn.open(url,params)
self._response = self._buffer.read()
except ValueError:
raise HTTPError("Invalid URL %s" % url)
except:
raise HTTPError("Failed to send HTTP(s) Request")
return str(self._response)
class HTTPError(Exception):
pass
PARAM_PASSWORD = 'password'
PARAM_USER = 'userName'
PARAM_ACTION = 'a'
PARAM_RID = 'rid'
PARAM_XO = 'xo'
PARAM_START_TIME = 't1'
PARAM_END_TIME = 't2'
PARAM_PATH = 'path'
BOOLEAN_TRUE = 'true'
BOOLEAN_FALSE = 'false'
ACTION_SIGNIN = 'signIn'
ACTION_SEARCH = 'search'
ACTION_GET_NEXT_RESULTS = 'getNextResults'
STATUS_SUCCEEDED = 'succeeded'
DEFAULT_WAIT = 5
host = "192.168.158.27"
user = "admin"
password = "admin"
protocol = "https"
port = 8443
query = "vm[=name rx (?i) *]&[#cpuUsage rx b .+][#cpuUsagemhz rx b .+]"
start_time = "10/05/2013 16:16:00"
end_time = "10/05/2013 17:16:00"
base_url = "%s://%s:%d" % (protocol,host,port)
login_url = "%s/user" % base_url
http = HTTPRequest()
attributes = {PARAM_PASSWORD : password,
PARAM_USER : user,
PARAM_ACTION : ACTION_SIGNIN,
PARAM_RID : 1000,
PARAM_XO : BOOLEAN_TRUE}
params = http._encode_url(**attributes)
if not http._request(login_url,params):
print "Login Failed.."
else:
print "Login Successful.. \n"
rid = 1000
search_url = "%s/Search" % base_url
status = STATUS_SUCCEEDED
hasMoreData = BOOLEAN_TRUE
completed = BOOLEAN_FALSE
total = 0
processed = 1
responseContent = ""
xml_dict = {}
_response = ""
attributes = {PARAM_START_TIME : start_time,
PARAM_END_TIME : end_time,
PARAM_ACTION : ACTION_SEARCH,
PARAM_RID : rid,
PARAM_PATH : query}
print "URL PARAMETERS :"
print "\tBase url = %s" % base_url
for param in attributes:
print "\t%s = %s" % (param,attributes[param])
#Query Execution Start Time
start = datetime.datetime.now()
while True:
params = http._encode_url(**attributes)
if hasMoreData == BOOLEAN_TRUE:
#Delay 10ms
time.sleep(10/1000)
#Send HTTP Request
response = http._request(search_url,params)
pattern = re.match(".*?hasMoreData=\"(.*?)\".*?",response)
if pattern:
hasMoreData = pattern.group(1)
pattern = re.match(".*?status=\"(.*?)\".*?",response)
if pattern:
status = pattern.group(1)
pattern = re.match(".*?completed=\"(.*?)\".*?",response)
if pattern:
completed = pattern.group(1)
pattern = re.match(".*?processed=\"(.*?)\".*?",response)
if pattern:
processed = pattern.group(1)
pattern = re.match(".*?total=\"(.*?)\".*?",response)
if pattern:
total = pattern.group(1)
pattern = re.match(".*?matched=\"(.*?)\".*?",response)
if pattern:
matched = pattern.group(1)
attributes = {PARAM_ACTION : ACTION_GET_NEXT_RESULTS,
PARAM_RID : rid}
if matched != "0":
response = re.sub(r'\n',"",response)
matchObj = re.search(r'(<Resource.*</Resource>)',response)
resp_match = ""
if matchObj:
resp_match = matchObj.group(1)
responseContent = str(responseContent) + str(resp_match)
else:
#Query Execution Completed
#Query Execution End Time
end = datetime.datetime.now()
print "RESULTS : "
print "\tStatus = %s"%status
print "\tHas More Data = %s"%hasMoreData
print "\tCompleted = %s"%completed
print "\tProcessed = %s"%processed
print "\tTotal = %s"%total
print "\tMatched = %s"%matched
print "\nQuery Execution Started : %s" % start
print "Query Execution Ended : %s\n" % end
if total != processed:
err = "The number records processed did not match"
err += " with the number of records completed."
print err
if not status == STATUS_SUCCEEDED:
err = "The responce status is not 'succeeded'"
print err
if completed == BOOLEAN_FALSE:
err = "The repsponse is completed. "
err += "However, the flag is set to 'False'"
print err
break
Instead of your local network IP, try using 127.0.0.1 instead.

Categories

Resources