Python exception handling with selenium - python

I am new to Python and I am trying to write a nagios script which uses selenium to log into a webapp and print out some information. As of now the script works as expected but I would like it to alert the system if it fails to retrieve the website. Here is what I have
#!/usr/bin/env python
import sys
from selenium import webdriver
url = '<main web site>'
systemInformation = '<sys information site>'
logout = '<log out link>'
browser = webdriver.PhantomJS('<path to phantomjs for headless operation>')
login_username = '<username>'
login_password = '<password>'
try:
browser.get(url)
username = browser.find_element_by_name("username")
password = browser.find_element_by_name("password")
username.send_keys(login_username)
password.send_keys(login_password)
link = browser.find_element_by_name('loginbutton')
link.click()
browser.get(systemInformation)
print "OK: Web Application is Running"
for element in browser.find_elements_by_name('SystemReportsForm'):
print element.text
browser.get(logout)
browser.quit()
sys.exit(0)
except:
print "WARNING: Web Application is Down!"
sys.exit(2)
I would expect if that first section fails it would then go to the except section, however the script is printing out both the try and except even though there is an exit. I'm sure it's something simple I am missing.
Thank's in advance
Update
This is how I ended up resolving this issue, thanks for the help
#!/usr/bin/env python
import sys, urllib2
from selenium import webdriver
url = '<log in url>'
systemInformation = '<sys info url>'
logout = '<logout url>'
browser = webdriver.PhantomJS('<phantomjs location for headless browser>')
login_username = '<user>'
login_password = '<password>'
def login(login_url,status_url):
browser.get(login_url)
username = browser.find_element_by_name("username")
password = browser.find_element_by_name("password")
username.send_keys(login_username)
password.send_keys(login_password)
link = browser.find_element_by_name('loginbutton')
link.click()
browser.get(status_url)
if browser.title == 'Log In':
print "WARNING: Site up but Failed to login!"
browser.get(logout)
browser.quit()
sys.exit(1)
else:
print "OK: Everything Looks Good"
for element in browser.find_elements_by_name('SystemReportsForm'):
print element.text
browser.get(logout)
browser.quit()
sys.exit(0)
req = urllib2.Request(url)
try:
urllib2.urlopen(req)
login(url,systemInformation)
except urllib2.HTTPError as e:
print('CRITICAL: Site Appears to be Down!')
browser.get(logout)
browser.quit()
sys.exit(2)

sys.exit([status]) raising SystemExit(status) exception that's why the except clause is executed
Exit the interpreter by raising SystemExit(status). If the status is
omitted or None, it defaults to zero (i.e., success). If the status is
an integer, it will be used as the system exit status. If it is
another kind of object, it will be printed and the system exit status
will be one (i.e., failure).
Remove sys.exit(0) inside try
(if you shown the complete version of the script)

Related

How to end a session in Selenium and start a new one?

I'm trying to quit the browser session and start a new one when I hit an exception. Normally I wouldn't do this, but in this specific case it seems to make sense.
def get_info(url):
browser.get(url)
try:
#get page data
business_type_x = '//*[#id="page-desc"]/div[2]/div'
business_type = browser.find_element_by_xpath(business_type_x).text
print(business_type)
except Exception as e:
print(e)
#new session
browser.quit()
return get_info(url)
This results in this error: http.client.RemoteDisconnected: Remote end closed connection without response
I expected it to open a new browser window with a new session. Any tips are appreciated. Thanks!
You need to create the driver object again once you quite that. Initiate the driver in the get_info method again.
You can replace webdriver.Firefox() with whatever driver you are using.
def get_info(url):
browser = webdriver.Firefox()
browser.get(url)
try:
#get page data
business_type_x = '//*[#id="page-desc"]/div[2]/div'
business_type = browser.find_element_by_xpath(business_type_x).text
print(business_type)
except Exception as e:
print(e)
#new session
browser.quit()
return get_info(url)
You can also use close method instead of quit. So that you do not have to recreate the browser object.
def get_info(url):
browser.get(url)
try:
#get page data
business_type_x = '//*[#id="page-desc"]/div[2]/div'
business_type = browser.find_element_by_xpath(business_type_x).text
print(business_type)
except Exception as e:
print(e)
#new session
browser.close()
return get_info(url)
difference between quit and close can be found in the documentation as well.
quit
close
This error message...
http.client.RemoteDisconnected: Remote end closed connection without response
...implies that the WebDriver instance i.e. browser was unable to communicate with the Browsing Context i.e. the Web Browsing session.
If your usecase is to keep on trying to invoke the same url in a loop till the desired element is getting located you can use the following solution:
def get_info(url):
while True:
browser.get(url)
try:
#get page data
business_type_x = '//*[#id="page-desc"]/div[2]/div'
business_type = browser.find_element_by_xpath(business_type_x).text
print(business_type)
break
except NoSuchElementException as e:
print(e)
continue

Autodownload for firefox in selenium using python [duplicate]

My purpose it to download a zip file from https://www.shareinvestor.com/prices/price_download_zip_file.zip?type=history_all&market=bursa
It is a link in this webpage https://www.shareinvestor.com/prices/price_download.html#/?type=price_download_all_stocks_bursa. Then save it into this directory "/home/vinvin/shKLSE/ (I am using pythonaywhere). Then unzip it and the csv file extract in the directory.
The code run until the end with no error but it does not downloaded.
The zip file is automatically downloaded when click on https://www.shareinvestor.com/prices/price_download_zip_file.zip?type=history_all&market=bursa manually.
My code with a working username and password is used. The real username and password is used so that it is easier to understand the problem.
#!/usr/bin/python
print "hello from python 2"
import urllib2
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from pyvirtualdisplay import Display
import requests, zipfile, os
display = Display(visible=0, size=(800, 600))
display.start()
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', "/home/vinvin/shKLSE/")
profile.set_preference('browser.helperApps.neverAsk.saveToDisk', '/zip')
for retry in range(5):
try:
browser = webdriver.Firefox(profile)
print "firefox"
break
except:
time.sleep(3)
time.sleep(1)
browser.get("https://www.shareinvestor.com/my")
time.sleep(10)
login_main = browser.find_element_by_xpath("//*[#href='/user/login.html']").click()
print browser.current_url
username = browser.find_element_by_id("sic_login_header_username")
password = browser.find_element_by_id("sic_login_header_password")
print "find id done"
username.send_keys("bkcollection")
password.send_keys("123456")
print "log in done"
login_attempt = browser.find_element_by_xpath("//*[#type='submit']")
login_attempt.submit()
browser.get("https://www.shareinvestor.com/prices/price_download.html#/?type=price_download_all_stocks_bursa")
print browser.current_url
time.sleep(20)
dl = browser.find_element_by_xpath("//*[#href='/prices/price_download_zip_file.zip?type=history_all&market=bursa']").click()
time.sleep(30)
browser.close()
browser.quit()
display.stop()
zip_ref = zipfile.ZipFile(/home/vinvin/sh/KLSE, 'r')
zip_ref.extractall(/home/vinvin/sh/KLSE)
zip_ref.close()
os.remove(zip_ref)
HTML snippet:
<li>All Historical Data <span>About 220 MB</span></li>
Note that &amp is shown when I copy the snippet. It was hidden from view source, so I guess it is written in JavaScript.
Observation I found
The directory home/vinvin/shKLSE do not created even I run the code with no error
I try to download a much smaller zip file which can be completed in a second but still do not download after a wait of 30s. dl = browser.find_element_by_xpath("//*[#href='/prices/price_download_zip_file.zip?type=history_daily&date=20170519&market=bursa']").click()
I don't see any major drawback in your code block as such. But here are a few recommendations through this Solution & the execution of this Automated Test Script:
This code works perfect in Off Market Hours. During Market Hours a lot of JavaScript & Ajax Calls are in play and handling those are beyond the scope of this Question.
You may consider checking for the the intended download directory first & if not available, create a new one. That code block for this functionality is in Windows style and works perfect on Windows platform.
Once you click on "Login" induce some wait for the HTML DOM to render properly.
When you want to see off the downloading process, you need to set certain more preferences in the FirefoxProfile as mentioned in my code below.
Always consider maximizing the browser window through browser.maximize_window()
When you start downloading you need to wait for sufficient amount of time to get the file completely downloaded.
If you are using browser.quit() at the end you don't need to use browser.close()
You may consider to replace all the time.sleep() with either of ImplicitlyWait or ExplicitWait or FluentWait.
Here is your own code block with some simple tweaks in it:
#!/usr/bin/python
print "hello from python 2"
import urllib2
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from pyvirtualdisplay import Display
import requests, zipfile, os
display = Display(visible=0, size=(800, 600))
display.start()
newpath = 'C:\\home\\vivvin\\shKLSE'
if not os.path.exists(newpath):
os.makedirs(newpath)
profile = webdriver.FirefoxProfile()
profile.set_preference("browser.download.dir",newpath);
profile.set_preference("browser.download.folderList",2);
profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/zip");
profile.set_preference("browser.download.manager.showWhenStarting",False);
profile.set_preference("browser.helperApps.neverAsk.openFile","application/zip");
profile.set_preference("browser.helperApps.alwaysAsk.force", False);
profile.set_preference("browser.download.manager.useWindow", False);
profile.set_preference("browser.download.manager.focusWhenStarting", False);
profile.set_preference("browser.helperApps.neverAsk.openFile", "");
profile.set_preference("browser.download.manager.alertOnEXEOpen", False);
profile.set_preference("browser.download.manager.showAlertOnComplete", False);
profile.set_preference("browser.download.manager.closeWhenDone", True);
profile.set_preference("pdfjs.disabled", True);
for retry in range(5):
try:
browser = webdriver.Firefox(profile)
print "firefox"
break
except:
time.sleep(3)
time.sleep(1)
browser.maximize_window()
browser.get("https://www.shareinvestor.com/my")
time.sleep(10)
login_main = browser.find_element_by_xpath("//*[#href='/user/login.html']").click()
time.sleep(10)
print browser.current_url
username = browser.find_element_by_id("sic_login_header_username")
password = browser.find_element_by_id("sic_login_header_password")
print "find id done"
username.send_keys("bkcollection")
password.send_keys("123456")
print "log in done"
login_attempt = browser.find_element_by_xpath("//*[#type='submit']")
login_attempt.submit()
browser.get("https://www.shareinvestor.com/prices/price_download.html#/?type=price_download_all_stocks_bursa")
print browser.current_url
time.sleep(20)
dl = browser.find_element_by_xpath("//*[#href='/prices/price_download_zip_file.zip?type=history_all&market=bursa']").click()
time.sleep(900)
browser.close()
browser.quit()
display.stop()
zip_ref = zipfile.ZipFile(/home/vinvin/sh/KLSE, 'r')
zip_ref.extractall(/home/vinvin/sh/KLSE)
zip_ref.close()
os.remove(zip_ref)
Let me know if this Answers your Question.
I rewrote your script, with comments explaining why I made the changes I made. I think your main problem might have been a bad mimetype, however, your script had a log of systemic issues that would have made it unreliable at best. This rewrite uses explicit waits, which completely removes the need to use time.sleep(), allowing it to run as fast as possible, while also eliminating errors that arise from network congestion.
You will need do the following to make sure all modules are installed:
pip install requests explicit selenium retry pyvirtualdisplay
The script:
#!/usr/bin/python
from __future__ import print_function # Makes your code portable
import os
import glob
import zipfile
from contextlib import contextmanager
import requests
from retry import retry
from explicit import waiter, XPATH, ID
from selenium import webdriver
from pyvirtualdisplay import Display
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
DOWNLOAD_DIR = "/tmp/shKLSE/"
def build_profile():
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', DOWNLOAD_DIR)
# I think your `/zip` mime type was incorrect. This works for me
profile.set_preference('browser.helperApps.neverAsk.saveToDisk',
'application/vnd.ms-excel,application/zip')
return profile
# Retry is an elegant way to retry the browser creation
# Though you should narrow the scope to whatever the actual exception is you are
# retrying on
#retry(Exception, tries=5, delay=3)
#contextmanager # This turns get_browser into a context manager
def get_browser():
# Use a context manager with Display, so it will be closed even if an
# exception is thrown
profile = build_profile()
with Display(visible=0, size=(800, 600)):
browser = webdriver.Firefox(profile)
print("firefox")
try:
yield browser
finally:
# Let a try/finally block manage closing the browser, even if an
# exception is called
browser.quit()
def main():
print("hello from python 2")
with get_browser() as browser:
browser.get("https://www.shareinvestor.com/my")
# Click the login button
# waiter is a helper function that makes it easy to use explicit waits
# with it you dont need to use time.sleep() calls at all
login_xpath = '//*/div[#class="sic_logIn-bg"]/a'
waiter.find_element(browser, login_xpath, XPATH).click()
print(browser.current_url)
# Log in
username = "bkcollection"
username_id = "sic_login_header_username"
password = "123456"
password_id = "sic_login_header_password"
waiter.find_write(browser, username_id, username, by=ID)
waiter.find_write(browser, password_id, password, by=ID, send_enter=True)
# Wait for login process to finish by locating an element only found
# after logging in, like the Logged In Nav
nav_id = 'sic_loggedInNav'
waiter.find_element(browser, nav_id, ID)
print("log in done")
# Load the target page
target_url = ("https://www.shareinvestor.com/prices/price_download.html#/?"
"type=price_download_all_stocks_bursa")
browser.get(target_url)
print(browser.current_url)
# CLick download button
all_data_xpath = ("//*[#href='/prices/price_download_zip_file.zip?"
"type=history_all&market=bursa']")
waiter.find_element(browser, all_data_xpath, XPATH).click()
# This is a bit challenging: You need to wait until the download is complete
# This file is 220 MB, it takes a while to complete. This method waits until
# there is at least one file in the dir, then waits until there are no
# filenames that end in `.part`
# Note that is is problematic if there is already a file in the target dir. I
# suggest looking into using the tempdir module to create a unique, temporary
# directory for downloading every time you run your script
print("Waiting for download to complete")
at_least_1 = lambda x: len(x("{0}/*.zip*".format(DOWNLOAD_DIR))) > 0
WebDriverWait(glob.glob, 300).until(at_least_1)
no_parts = lambda x: len(x("{0}/*.part".format(DOWNLOAD_DIR))) == 0
WebDriverWait(glob.glob, 300).until(no_parts)
print("Download Done")
# Now do whatever it is you need to do with the zip file
# zip_ref = zipfile.ZipFile(DOWNLOAD_DIR, 'r')
# zip_ref.extractall(DOWNLOAD_DIR)
# zip_ref.close()
# os.remove(zip_ref)
print("Done!")
if __name__ == "__main__":
main()
Full disclosure: I maintain the explicit module. It is designed to make using explicit waits much easier, for exactly situations like this, where websites slowly load in dynamic content based on user interactions. You could replace all of the waiter.XXX calls above with direct explicit waits.
Take it out side the scope of the selenium. Change the preference settings so that when the link is clicked (First check if link is valid) it gives you a pop up asking to save , now use sikuli http://www.sikuli.org/ to click on the popup.
Mime types does not always work, and there is no black and white answer why it is not working.
The reason is due to the webpage is loading slowly. I added a wait of 20 seconds after open the webpage link
login_attempt.submit()
browser.get("https://www.shareinvestor.com/prices/price_download.html#/?type=price_download_all_stocks_bursa")
print browser.current_url
time.sleep(20)
dl = browser.find_element_by_xpath("//*[#href='/prices/price_download_zip_file.zip?type=history_all&market=bursa']").click()
It returns no error.
Additional,
/zip is incorrect MIME type. Change to profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'application/zip')
The final correction :
#!/usr/bin/python
print "hello from python 2"
import urllib2
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from pyvirtualdisplay import Display
import requests, zipfile, os
display = Display(visible=0, size=(800, 600))
display.start()
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', "/home/vinvin/shKLSE/")
# application/zip not /zip
profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'application/zip')
for retry in range(5):
try:
browser = webdriver.Firefox(profile)
print "firefox"
break
except:
time.sleep(3)
time.sleep(1)
browser.get("https://www.shareinvestor.com/my")
time.sleep(10)
login_main = browser.find_element_by_xpath("//*[#href='/user/login.html']").click()
print browser.current_url
username = browser.find_element_by_id("sic_login_header_username")
password = browser.find_element_by_id("sic_login_header_password")
print "find id done"
username.send_keys("bkcollection")
password.send_keys("123456")
print "log in done"
login_attempt = browser.find_element_by_xpath("//*[#type='submit']")
login_attempt.submit()
browser.get("https://www.shareinvestor.com/prices/price_download.html#/?type=price_download_all_stocks_bursa")
print browser.current_url
time.sleep(20)
dl = browser.find_element_by_xpath("//*[#href='/prices/price_download_zip_file.zip?type=history_all&market=bursa']").click()
time.sleep(30)
browser.close()
browser.quit()
display.stop()
zip_ref = zipfile.ZipFile('/home/vinvin/shKLSE/file.zip', 'r')
zip_ref.extractall('/home/vinvin/shKLSE')
zip_ref.close()
# remove with correct path
os.remove('/home/vinvin/shKLSE/file.zip')
I haven't tried on the site you mentioned, however following code works perfectly and downloads the ZIP. if you are not able to download the zip, Mime type could be different. you can use chrome browser and network inspection to check the mime type of the file you are trying to download.
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', "/home/vinvin/shKLSE/")
profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'application/zip')
browser = webdriver.Firefox(profile)
browser.get("http://www.colorado.edu/conflict/peace/download/peace.zip")

python selenium scrape not printing data

I'm trying to run a selenium web scrape but it is not printing the data I'm trying to get. The page will load but nothing prints in terminal. Terminal just says "DevTools listening on ws..."
import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
import time
url = "https://www.tdameritrade.com/home.page"
username = "XXXXX"
password = "XXXXX"
browser = webdriver.Chrome()
if __name__ == "__main__":
browser.get(url)
uname = browser.find_element_by_name("tbUsername")
uname.send_keys(username)
passw = browser.find_element_by_name("tbPassword")
passw.send_keys(password)
submit_button = browser.find_element_by_xpath("//div[#class='main-header-login-fields']//button[#class='main-header-login-submit main-header-login-label btn btn-green-solid']").click()
time.sleep(10)#delay to answer extra login/security questions
browser.get("https://invest.ameritrade.com/grid/p/site#r=jPage/https://research.ameritrade.com/grid/wwws/research/Markets/SectorsIndustries/Industry?symbol=220992436&c_name=invest_VENDOR")
cell = browser.find_element_by_xpath("""//*[#id="module-detail"]/div/div/div[1]/div/div[3]/div[2]/span""")
print cell.text
EDIT:
I'm not sure where it is failing. I changed the last few lines to the code below, but terminal still just stays on "DevTools listening..." The page loads but does not print an error. Only when I close the browser will it reach and print "finished".
browser.get("https://invest.ameritrade.com/grid/p/site#r=jPage/https://research.ameritrade.com/grid/wwws/research/Markets/SectorsIndustries/Industry?symbol=220992436&c_name=invest_VENDOR")
try:
browser.switch_to.window(browser.current_window_handle)
cell = browser.find_element_by_xpath("""//*[#id="module-detail"]/div/div/div[1]/div/div[3]/div[2]/span""")
except Exception as e:
print e
pass
print "finished"
Try this:
Before the:
cell = browser.find_element_by_xpath("""//*[#id="module-detail"]/div/div/div[1]/div/div[3]/div[2]/span""")
Line add
browser.switch_to.window(browser.current_window_handle)
Then it try again. Hope this will helps you! :)

How to handle TimeoutException in selenium, python

First of all, I created several functions to use them instead of default "find_element_by_..." and login() function to create "browser". This is how I use it:
def login():
browser = webdriver.Firefox()
return browser
def find_element_by_id_u(browser, element):
try:
obj = WebDriverWait(browser, 10).until(
lambda browser : browser.find_element_by_id(element)
)
return obj
#########
driver = login()
find_element_by_link_text_u(driver, 'the_id')
Now I use such tests through jenkins(and launch them on a virtual machine). And in case I got TimeoutException, browser session will not be killed, and I have to manually go to VM and kill the process of Firefox. And jenkins will not stop it's job while web browser process is active.
So I faced the problem and I expect it may be resoved due to exceptions handling.
I tryed to add this to my custom functions, but it's not clear where exactly exception was occured. Even if I got line number, it takes me to my custom function, but not the place where is was called:
def find_element_by_id_u(browser, element):
try:
obj = WebDriverWait(browser, 1).until(
lambda browser : browser.find_element_by_id(element)
)
return obj
except TimeoutException, err:
print "Timeout Exception for element '{elem}' using find_element_by_id\n".format(elem = element)
print traceback.format_exc()
browser.close()
sys.exit(1)
#########
driver = login()
driver .get(host)
find_element_by_id_u('jj_username').send_keys('login' + Keys.TAB + 'passwd' + Keys.RETURN)
This will print for me the line number of string "lambda browser : browser.find_element_by_id(element)" and it's useles for debugging. In my case I have near 3000 rows, so I need a propper line number.
Can you please share your expirience with me.
PS: I divided my program for few scripts, one of them contains only selenium part, that's why I need login() function, to call it from another script and use returned object in it.
Well, spending some time in my mind, I've found a proper solution.
def login():
browser = webdriver.Firefox()
return browser
def find_element_by_id_u(browser, element):
try:
obj = WebDriverWait(browser, 10).until(
lambda browser : browser.find_element_by_id(element)
)
return obj
#########
try:
driver = login()
find_element_by_id_u(driver, 'the_id')
except TimeoutException:
print traceback.format_exc()
browser.close()
sys.exit(1)
It was so obvious, that I missed it :(

Scrape Data Point Using Python

I am looking to scrape a data point using Python off of the url http://www.cavirtex.com/orderbook .
The data point I am looking to scrape is the lowest bid offer, which at the current moment looks like this:
<tr>
<td><b>Jan. 19, 2014, 2:37 a.m.</b></td>
<td><b>0.0775/0.1146</b></td>
<td><b>860.00000</b></td>
<td><b>66.65 CAD</b></td>
</tr>
The relevant point being the 860.00 . I am looking to build this into a script which can send me an email to alert me of certain price differentials compared to other exchanges.
I'm quite noobie so if in your explanations you could offer your thought process on why you've done certain things it would be very much appreciated.
This is what I have so far which will return me the name of the title correctly, I'm having trouble grabbing the table data though.
import urllib2, sys
from bs4 import BeautifulSoup
site= "http://cavirtex.com/orderbook"
hdr = {'User-Agent': 'Mozilla/5.0'}
req = urllib2.Request(site,headers=hdr)
page = urllib2.urlopen(req)
soup = BeautifulSoup(page)
print soup.title
Here is the code for scraping the lowest bid from the 'Buying BTC' table:
from selenium import webdriver
fp = webdriver.FirefoxProfile()
browser = webdriver.Firefox(firefox_profile=fp)
browser.get('http://www.cavirtex.com/orderbook')
lowest_bid = float('inf')
elements = browser.find_elements_by_xpath('//div[#id="orderbook_buy"]/table/tbody/tr/td')
for element in elements:
text = element.get_attribute('innerHTML').strip('<b>|</b>')
try:
bid = float(text)
if lowest_bid > bid:
lowest_bid = bid
except:
pass
browser.quit()
print lowest_bid
In order to install Selenium for Python on your Windows-PC, run from a command line:
pip install selenium (or pip install selenium --upgrade if you already have it).
If you want the 'Selling BTC' table instead, then change "orderbook_buy" to "orderbook_sell".
If you want the 'Last Trades' table instead, then change "orderbook_buy" to "orderbook_trades".
Note:
If you consider performance critical, then you can implement the data-scraping via URL-Connection instead of Selenium, and have your program running much faster. However, your code will probably end up being a lot "messier", due to the tedious XML parsing that you'll be obliged to apply...
Here is the code for sending the previous output in an email from yourself to yourself:
import smtplib,ssl
def SendMail(username,password,contents):
server = Connect(username)
try:
server.login(username,password)
server.sendmail(username,username,contents)
except smtplib.SMTPException,error:
Print(error)
Disconnect(server)
def Connect(username):
serverName = username[username.index("#")+1:username.index(".")]
while True:
try:
server = smtplib.SMTP(serverDict[serverName])
except smtplib.SMTPException,error:
Print(error)
continue
try:
server.ehlo()
if server.has_extn("starttls"):
server.starttls()
server.ehlo()
except (smtplib.SMTPException,ssl.SSLError),error:
Print(error)
Disconnect(server)
continue
break
return server
def Disconnect(server):
try:
server.quit()
except smtplib.SMTPException,error:
Print(error)
serverDict = {
"gmail" :"smtp.gmail.com",
"hotmail":"smtp.live.com",
"yahoo" :"smtp.mail.yahoo.com"
}
SendMail("your_username#your_provider.com","your_password",str(lowest_bid))
The above code should work if your email provider is either gmail or hotmail or yahoo.
Please note that depending on your firewall configuration, it may ask your permission upon the first time you try it...

Categories

Resources