Python 3.5 logger.info in #classmethod - python

New to python, never user #classmethod before.
The problem - for some reason logger methods are not executed within the shutdown_webdriver function.
import time
import logging
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.proxy import Proxy
class WebBrowserSettings(object):
logger = logging.getLogger(__name__)
def __init__(self, proxy):
self.proxy = proxy
def setup_remote_chromedriver(self):
**irrelevant code**
return browser
#classmethod
def shutdown_webdriver(cls, browser):
print('here')
cls.logger.info("Shutting down 1")
for index in range(0, 20):
error_check = 0
try:
time.sleep(5)
browser.quit()
except Exception:
error_check = 1
if error_check == 0:
break
cls.logger.info("Browser is down")
So i see only the print('here') message in my console output.
P.S. the logging config is setup and stable, working in other classes.

I see no problem with the code shown here. I suspect that you will see the logging output when you replace cls.logger.info with cls.logger.error. This means that something is going wrong with the configuration of the logging system. This is also the part that is not shown here, so I can't tell what is going wrong.
In any case you have to make sure that the logging system is configured to display level INFO before the instantiation of the logger instance.
Your logger gets instantiated at the time the code is being parsed, probably at the time some other code imports this module.
This in turn means that you have to configure the logging system before you import this module.

Related

Python logging with multiple module imports

I'm trying to establish logging in all modules I'm using. My project structure is
# driver.py
import logging
logger = logging.getLogger(__name__)
class driver:
....
# driver_wrapper.py
from driver import driver
device = driver(...)
def driver_func():
logging.info("...")
....
# main.py
import logging
import driver_wrapper
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
driver_wrapper.driver_func()
My problem now is that I still get INFO level messages and also the output is 'INFO:root'. But I would expect the module name instead of root.
Is there a way to set the logging level in the main.py for all modules or is it already correct how I do it? There are a lot of posts about this problem but the solutions don't seem to work for me.
All your modules that use logging should have the logger = logging.getLogger(__name__) line, and thereafter you always log to e.g.logger.info(...), and never call e.g. logging.info(...). The latter is equivalent to logging to the root logger, not the module's logger. That "all your modules" includes driver_wrapper.py in your example.

How do I import selenium webdriver tests from one Python file to another?

I'm incredibly new to separating modules. I have this long Python script that I want to separate into different files by class and run them collectively in the same browser instance/window. The reason for this is all the tests are reliant on being logged into the same session. I'd like to do a universal setUp, then login, and then pull the different tests in one after another.
Folder structure is:
ContentCreator
- main.py
- _init_.py
- Features
- login.py
- pytest.py
- _init_.py
Here is my code:
login.py
import unittest
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import time
import json
driver = webdriver.Chrome()
class logIn(unittest.TestCase):
#classmethod
def test_login(self):
"""Login"""
driver.get("sign_in_url")
# load username and pw through a json file
with open('path/to/file.json', 'r') as f:
config = json.load(f)
# login
driver.find_element_by_id("email").click()
driver.find_element_by_id("email").clear()
driver.find_element_by_id("email").send_keys(config['user']['name'])
driver.find_element_by_id("password").click()
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys(config['user']['password'])
driver.find_element_by_id("submit").click()
time.sleep(3)
print("You are Logged In!")
pytest.py
import time
import unittest
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from displays import DISPLAY_TYPES, DISPLAY_NAMES
driver = webdriver.Chrome()
#driver.get("url")
class createContent(unittest.TestCase):
#classmethod
def test_add_required(self):
"""Test adding all the required fields across all sites:"""
for i in range(1):
"""This is the number of each type of article that will be created."""
for i in range(1):
"""This is how many different article types that will be created."""
print("create new content")
time.sleep(1)
driver.find_element_by_link_text("Content").click()
time.sleep(1)
driver.find_element_by_link_text("Create New").click()
print("select a display type:")
display = DISPLAY_TYPES
display_type = driver.find_element_by_id(display[i])
display_type.click()
names = (DISPLAY_NAMES[i])
print(names), (" created and saved successfully!")
#classmethod
def tearDownClass(cls):
# close the browser window
driver.quit()
def is_element_present(self, how, what):
"""
Helper method to confirm the presence of an element on page
:params how: By locator type
:params what: locator value
"""
try:
driver.find_element(by=how, value=what)
except NoSuchElementException:
return False
return True
main.py
import unittest
from HtmlTestRunner import HTMLTestRunner
from features.login import logIn
from features.pytest import createContent
login_script = unittest.TestLoader().loadTestsFromTestCase(logIn)
add_pytest = unittest.TestLoader().loadTestsFromTestCase(createContent)
# create a test suite combining all tests
test_suite = unittest.TestSuite([login, add_pytest])
# create output
runner = HTMLTestRunner(output='Test Results')
# run the suite
runner.run(test_suite)
When running the above code it opens two browser sessions, and only the login script get executed. The test fails do to not finding the elements outlined in the next script.
EDIT:
Alfonso Jimenez or anyone else, here's what I have so far...
Folder structure:
- Files
- singleton.py
- singleton2.py
New Singleton code...
singleton.py:
from robot.api import logger
from robot.utils import asserts
from selenium import webdriver
class Singleton(object):
instance = None
def __new__(cls, base_url, browser='chrome'):
if cls.instance is None:
i = object.__new__(cls)
cls.instance = i
cls.base_url = base_url
cls.browser = browser
if browser == "chrome":
# Create a new instance of the Chrome driver
cls.driver = webdriver.Chrome()
else:
# Sorry, we can't help you right now.
asserts.fail("Support for Chrome only!")
else:
i = cls.instance
return i
singleton2.py:
import time
import json
from datetime import datetime
from singleton import Singleton
driver = Singleton('base_url')
def teardown_module(module):
driver.quit()
class logIn(object):
def test_login(self):
"""Login"""
driver.get("url.com")
# load username and pw through a json file
with open('file.json', 'r') as f:
config = json.load(f)
# login
driver.find_element_by_id("email").click()
driver.find_element_by_id("email").clear()
driver.find_element_by_id("email").send_keys(config['user']['name'])
driver.find_element_by_id("password").click()
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys(config['user']['password'])
driver.find_element_by_id("submit").click()
time.sleep(3)
print("You are Logged In!")
# take screenshot
driver.save_screenshot('path/screenshot_{}.png'.format(datetime.now()))
The result is that an instance of Chrome kicks off, but nothing happens. The base_url (or any other URL defined in my test) doesn't come up. The blank window is all I get. Any insights on what I'm doing wrong?
You're instantiating two times the selenium driver.
If you want to keep the same session opened you should pass the same object to both scripts, or import it, what it could be valid, however it would be a more dirty solution.
The best thing to do is create a singleton class to initiate the driver. Once you have done this, every time you create an object from this class you will get the a unique object of webdriver.
You can get an example from this answer.
You can also check more about singleton instances, they're a very common and very useful. You can check here.
I dont understand what you mean with robot, perhaps the testing framework?
You can write the singleton class wherever you want to. You will have to import the class from that place and then instantiate the object. Ex:
lib/singleton_web_driver.py
from robot.api import logger
from robot.utils import asserts
from selenium import webdriver
class Singleton(object):
instance = None
def __new__(cls, base_url, browser='firefox'):
if cls.instance is None:
i = object.__new__(cls)
cls.instance = i
cls.base_url = base_url
cls.browser = browser
if browser == "firefox":
# Create a new instance of the Firefox driver
cls.driver = webdriver.Firefox()
elif browser == "remote":
# Create a new instance of the Chrome driver
cls.driver = webdriver.Remote("http://localhost:4444/wd/hub", webdriver.DesiredCapabilities.HTMLUNITWITHJS)
else:
# Sorry, we can't help you right now.
asserts.fail("Support for Firefox or Remote only!")
else:
i = cls.instance
return i
and then in every script youre going to need the webdriver:
test_script_file.py
from lib.singleton_web_driver import Singleton
driver = Singleton('base_url')
This just a dummy code, I dindnt test it. The important point is to create the class with the _new_ method where you can check if the class has already been called. The import is just like any other class import, you write the class in a folder and then import it in the scripts youre going to use.
I had a similar problem. My solution was just to initiate the driver in the main file and then to use this driver within the imported files and functions. Like in this example to change createContent(unittest.TestCase) to createContent(unittest.TestCase, driver)

Python logging in multiple modules

I want to write a logger which I can use in multiple modules. I must be able to enable and disable it from one place. And it must be reusable.
Following is the scenario.
switch_module.py
class Brocade(object):
def __init__(self, ip, username, password):
...
def connect(self):
...
def disconnect(self):
...
def switch_show(self):
...
switch_module_library.py
import switch_module
class Keyword_Mapper(object):
def __init__(self, keyword_to_execute):
self._brocade_object = switch_module.Brocade(ip, username, password)
...
def map_keyword_to_command(self)
...
application_gui.py
class GUI:
# I can open a file containing keyword for brocade switch
# in this GUI in a tab and tree widget(it uses PyQt which I don't know)
# Each tab is a QThread and there could be multiple tabs
# Each tab is accompanied by an execute button.
# On pressing exeute it will read the string/keywords from the file
# and create an object of keyword_Mapper class and call
# map_key_word_to_command method, execute the command on brocade
# switch and log the results. Current I am logging the result
# only from the Keyword_Mapper class.
Problem I have is how to write a logger which could be enabled and disabled at will
and it must log to one file as well as console from all three modules.
I tried writing global logger in int.py and then importing it in all three modules
and had to give a common name so that they log to the same file, but then
ran into trouble since there is multi-threading and later created logger to
log to a file which has thread-id in its name so that I can have each log
per thread.
What if I am required to log to different file rather than the same file?
I have gone through python logging documentation but am unable to get a clear picture
about writing a proper logging system which could be reused.
I have gone through this link
Is it better to use root logger or named logger in Python
but due to the GUI created by someone other than me using PyQt and multi-threading I am unable to get my head around logging here.
I my project I only use a root logger (I dont have the time to create named loggers, even if it would be nice). So if you don't want to use a named logger, here is a quick solution:
I created a function to set up logger quickly:
import logging
def initLogger(level=logging.DEBUG):
if level == logging.DEBUG:
# Display more stuff when in a debug mode
logging.basicConfig(
format='%(levelname)s-%(module)s:%(lineno)d-%(funcName)s: %(message)s',
level=level)
else:
# Display less stuff for info mode
logging.basicConfig(format='%(levelname)s: %(message)s', level=level)
I created a package for it so that I can import it anywhere.
Then, in my top level I have:
import LoggingTools
if __name__ == '__main__':
# Configure logger
LoggingTools.initLogger(logging.DEBUG)
#LoggingTools.initLogger(logging.INFO)
Depending if I am debugging or not, I using the corresponding statement.
Then in each other files, I just use the logging:
import logging
class MyClass():
def __init__(self):
logging.debug("Debug message")
logging.info("Info message")

Python: Making PhantomJS browser shut down if test terminated

I'm working on a Django app. I'm using Selenium together with PhantomJS for testing.
I found today that I every time I terminate the test (which I do a lot when debugging,) the PhantomJS process is still alive. This means that after a debugging session I could be left with 200 zombie PhantomJS processes!
How do I get these PhantomJS processes to terminate when I terminate the Python debug process? If there's a time delay, that works too. (i.e. have them terminate if not used for 2 minutes, that would solve my problem.)
The usual setup is to quit the PhantomJS browser in the teardown method of the class. For example:
from django.conf import settings
from django.test import LiveServerTestCase
from selenium.webdriver.phantomjs.webdriver import WebDriver
PHANTOMJS = (settings.BASE_DIR +
'/node_modules/phantomjs/bin/phantomjs')
class PhantomJSTestCase(LiveServerTestCase):
#classmethod
def setUpClass(cls):
cls.web = WebDriver(PHANTOMJS)
cls.web.set_window_size(1280, 1024)
super(PhantomJSTestCase, cls).setUpClass()
#classmethod
def tearDownClass(cls):
screenshot_file = getattr(settings, 'E2E_SCREENSHOT_FILE', None)
if screenshot_file:
cls.web.get_screenshot_as_file(screenshot_file)
cls.web.quit()
super(PhantomJSTestCase, cls).tearDownClass()
If you do not use unittest test cases, you'll have to use the quit method yourself. You can use the atexit module to run code when the Python process terminates, for example:
import atexit
web = WebDriver(PHANTOMJS)
atexit.register(web.quit)

Python logging: reverse effects of disable()

The logging docs say that calling the logging.disable(lvl) method can "temporarily throttle logging output down across the whole application," but I'm having trouble finding the "temporarily." Take, for example, the following script:
import logging
logging.disable(logging.CRITICAL)
logging.warning("test")
# Something here
logging.warning("test")
So far, I haven't been able to find the Something here that will re-enable the logging system as a whole and allow the second warning to get through. Is there a reverse to disable()?
logging.disable(logging.NOTSET)
Based on the answer by #unutbu, I created a context manager:
import logging
log = logging.getLogger(__name__)
class SuppressLogging:
"""
Context handler class that suppresses logging for some controlled code.
"""
def __init__(self, loglevel):
logging.disable(loglevel)
return
def __enter__(self):
return
def __exit__(self, exctype, excval, exctraceback):
logging.disable(logging.NOTSET)
return False
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
log.info("log this")
with SuppressLogging(logging.WARNING):
log.info("don't log this")
log.warning("don't log this warning")
log.error("log this error while up to WARNING suppressed")
log.info("log this again")

Categories

Resources