I am using radish bdd with selenium to test my django app, however sometimes django ask to delete database because it's already exists in database. here's my terrain.py:
import os
import django
from django.test.runner import DiscoverRunner
from django.test import LiveServerTestCase
from radish import before, after
from selenium import webdriver
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tangorblog.settings.features')
BASE_URL = os.environ.get('BASE_URL', 'http://localhost:8000')
#before.each_scenario
def setup_django_test(scenario):
django.setup()
scenario.context.test_runner = DiscoverRunner()
scenario.context.test_runner.setup_test_environment()
scenario.context.old_db_config =\
scenario.context.test_runner.setup_databases()
scenario.context.base_url = BASE_URL
scenario.context.test_case = LiveServerTestCase()
scenario.context.test_case.setUpClass()
scenario.context.browser = webdriver.Chrome()
#after.each_scenario
def teardown_django(scenario):
scenario.context.browser.quit()
scenario.context.test_case.tearDownClass()
del scenario.context.test_case
scenario.context.test_runner.teardown_databases(
scenario.context.old_db_config)
scenario.context.test_runner.teardown_test_environment()
I think that, I can somehow could alter this on
scenario.context.old_db_config =\
scenario.context.test_runner.setup_databases()
But I don't know how. Any help?
It seems to me that recreating the database for every scenario would end up being highly inefficient (and super slow). It should only be necessary to create the database once per test run and then drop it at the end.
I've come up with a solution I think integrates better with Django. It allows you to run the tests with manage.py test, only creates/drops the database once per test run, and clears the database tables after every feature is tested.
Note that this runs both the Django unit tests and radish tests by default. To run just the radish tests, you can do RADISH_ONLY=1 manage.py test. Also, for the live server/Selenium tests to work, you have to run manage.py collectstatic first.
# package/settings.py
TEST_RUNNER = 'package.test.runner.RadishTestRunner'
# package/test/runner
import os
from django.test.runner import DiscoverRunner
import radish.main
class RadishTestRunner(DiscoverRunner):
def run_suite(self, suite, **kwargs):
# Run unit tests
if os.getenv('RADISH_ONLY') == '1':
result = None
else:
result = super().run_suite(suite, **kwargs)
# Run radish behavioral tests
self._radish_result = radish.main.main(['features'])
return result
def suite_result(self, suite, result, **kwargs):
if result is not None:
# Django unit tests were run
result = super().suite_result(suite, result, **kwargs)
else:
result = 0
result += self._radish_result
return result
# radish/world.py
from django.db import connections
from django.test.testcases import LiveServerThread, _StaticFilesHandler
from django.test.utils import modify_settings
from radish import pick
from selenium import webdriver
#pick
def get_browser():
return webdriver.Chrome()
#pick
def get_live_server():
live_server = LiveServer()
live_server.start()
return live_server
class LiveServer:
host = 'localhost'
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
def __init__(self):
connections_override = {}
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
self.modified_settings = modify_settings(ALLOWED_HOSTS={'append': self.host})
self.server_thread = self.server_thread_class(
self.host,
self.static_handler,
connections_override=connections_override,
port=self.port,
)
self.server_thread.daemon = True
#property
def url(self):
self.server_thread.is_ready.wait()
return 'http://{self.host}:{self.server_thread.port}'.format(self=self)
def start(self):
self.modified_settings.enable()
self.server_thread.start()
self.server_thread.is_ready.wait()
if self.server_thread.error:
self.stop()
raise self.server_thread.error
def stop(self):
if hasattr(self, 'server_thread'):
self.server_thread.terminate()
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
conn.allow_thread_sharing = False
self.modified_settings.disable()
# radish/terrain.py
from django.db import connections, transaction
from radish import world, before, after
#before.all
def set_up(features, marker):
world.get_live_server()
#after.all
def tear_down(features, marker):
browser = world.get_browser()
live_server = world.get_live_server()
browser.quit()
live_server.stop()
#before.each_scenario
def set_up_scenario(scenario):
live_server = world.get_live_server()
scenario.context.base_url = live_server.url
scenario.context.browser = world.get_browser()
# XXX: Only works with the default database
# XXX: Assumes the default database supports transactions
scenario.context.transaction = transaction.atomic(using='default')
scenario.context.transaction.__enter__()
#after.each_scenario
def tear_down_scenario(scenario):
transaction.set_rollback(True, using='default')
scenario.context.transaction.__exit__(None, None, None)
for connection in connections.all():
connection.close()
#Wyatt, again I'm just gonna modify your answer. I have try and run your solution, however it didn't manage to make each scenario independent, I even encounter an Integrity error when I try to make model object inside scenario. Regardless I still use your solution (especially RadishTestRunner, as the idea comes from you. I modified it so I could run django unittest separately from radish. I use LiveServerTestCase directly and remove LiveServer as I notice the similirity between the two, except that LiveServerTestCase inherits from TransactionTestCase and it also has the LiveServerThread and _StaticFilesHandler built in. Here's how it is:
# package/test/runner.py
import os
from django.test.runner import DiscoverRunner
import radish.main
class RadishTestRunner(DiscoverRunner):
radish_features = ['features']
def run_suite(self, suite, **kwargs):
# run radish test
return radish.main.main(self.radish_features)
def suite_result(self, suite, result, **kwargs):
return result
def set_radish_features(self, features):
self.radish_features = features
# radish/world.py
from django.test import LiveServerTestCase
from radish import pick
from selenium import webdriver
#pick
def get_browser():
return webdriver.Chrome()
#pick
def get_live_server():
live_server = LiveServerTestCase
live_server.setUpClass()
return live_server
# radish/terrain.py
from radish import world, before, after
from selenium import webdriver
#before.all
def set_up(features, marker):
world.get_live_server()
#after.all
def tear_down(features, marker):
live_server = world.get_live_server()
live_server.tearDownClass()
#before.each_scenario
def set_up_scenario(scenario):
live_server = world.get_live_server()
scenario.context.browser = webdriver.Chrome()
scenario.context.base_url = live_server.live_server_url
scenario.context.test_case = live_server()
scenario.context.test_case._pre_setup()
#after.each_scenario
def tear_down_scenario(scenario):
scenario.context.test_case._post_teardown()
scenario.context.browser.quit()
That's it. This also fix the problem with PostgreSQL on my other question that you point out. I also open and quit browser on each scenario, as it gives me more control over the browser inside scenario. Thank you so much for you effort to point me at the right direction.
Finally I return to PostgreSQL.
PostgreSQL seem to better than MySQL in terms of speed. It greatly reduced the time to run test.
And oh ya, I need to run ./manage.py collectstatic first after specify STATIC_ROOT in django settings file.
I also modify RadishTestRunner, so instead of running with RADISH_ONLY=1, I could run it with python manage.py radish /path/to/features/file. Here's my radish command:
# package.management.commands.radish
from __future__ import absolute_import
import sys
from django.core.management.base import BaseCommand, CommandError
from package.test.runner import RadishTestRunner
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('features', nargs='+', type=str)
def handle(self, *args, **options):
test_runner = RadishTestRunner(interactive=False)
if options['features']:
test_runner.set_radish_features(options['features'])
result = test_runner.run_suite(None)
if result:
sys.exit(result)
By using radish with django management command, we have control over which feature file we want to run.
Related
I've been struggling with this for a couple months now and have tried a lot of different things to try to alleviate but am not sure what to do anymore. All the examples that I see are different than what I need and in my case it just wouldn't work.
To preface the problem, I have processor applications that get spawned by a manager as a docker container. The processor is a single class that gets run in a forever while loop that processes the same list of items over and over again and runs a function on them. The code I'm working with is quite large so I created a smaller version of the problem below.
this is how I create my engine
db.py
from os import getpid
from pymongo import MongoClient
_mongo_client = None
_mongo_client_pid = None
def get_mongodb_uri(MONGO_DB_HOST, MONGO_DB_PORT) -> str:
return 'mongodb://{}:{}/{}'.format(MONGO_DB_HOST, MONGO_DB_PORT, 'taskprocessor')
def get_db_engine():
global _mongo_client, _mongo_client_pid
curr_pid = getpid()
if curr_pid != _mongo_client_pid:
_mongo_client = MongoClient(get_mongodb_uri(), connect=False)
_mongo_client_pid = curr_pid
return _mongo_client
def get_db(name):
return get_db_engine()['taskprocessor'][name]
These are my DB models
processor.py
from uuid import uuid4
from taskprocessor.db import get_db
class ProcessorModel():
db = get_db("processors")
def __init__(self, **kwargs):
self.uid = kwargs.get('uid', str(uuid4()))
self.exceptions = kwargs.get('exceptions', [])
self.to_process = kwargs.get('to_process', [])
self.functions = kwargs.get('functions', ["int", "round"])
def save(self):
return self.db.insert_one(self.__dict__).inserted_id is not None
#classmethod
def get(cls, uid):
res = cls.db.find_one(dict(uid=uid))
return ProcessorModel(**res)
result.py
from uuid import uuid4
from taskprocessor.db import get_db
class ResultModel():
db = get_db("results")
def __init__(self, **kwargs):
self.uid = kwargs.get('uid', str(uuid4()))
self.res = kwargs.get('res', dict())
def save(self):
return self.db.insert_one(self.__dict__).inserted_id is not None
And my main.py that gets started as a docker container
to run a forever loop
import os
from time import sleep
from taskprocessor.db.processor import ProcessorModel
from taskprocessor.db.result import ResultModel
from multiprocessing import Pool
class Processor:
def __init__(self):
self.id = os.getenv("PROCESSOR_ID")
self.db_model = ProcessorModel.get(self.id)
self.to_process = self.db_model.to_process # list of floats [1.23, 1.535, 1.33499, 242.2352, 352.232]
self.functions = self.db_model.functions # list i.e ["round", "int"]
def run(self):
while True:
try:
pool = Pool(2)
res = list(pool.map(self.analyse, self.to_process))
print(res)
sleep(100)
except Exception as e:
self.db_model = ProcessorModel.get(os.getenv("PROCESSOR_ID"))
self.db_model.exceptions.append(f"exception {e}")
self.db_model.save()
print("Exception")
def analyse(self, item):
res = {}
for func in self.functions:
if func == "round":
res['round'] = round(item)
if func == "int":
res['int'] = int(item)
ResultModel(res=res).save()
return res
if __name__ == "__main__":
p = Processor()
p.run()
I've tried setting connect=False, or even trying to close the connection after the configuration but then end up with connection closed errors. I also tried using a system of recognizing the PID and giving a different client but that still did not help.
Almost all examples I see are where the DB access is not needed before the multiprocessing fork. In my case the initial configuration is heavy and cannot be efficient to do every single time in the process loop. Furthermore the items to process themselves depends on the data from the DB.
I can live with not being able to save the exceptions to the db object from the main pid.
I'm seeing the error logs around fork safety as well as hitting connection pool paused errors as as symptom of this issue.
If anybody sees this, I was using pymongo 4.0.2 and upgraded to 4.3.3 and not seeing the errors I was previously seeing.
So just like the title suggest. i followed the instrcutions.
this is the code.
the command doesnt work not from the command line either. zero idea why
import time
from django.db import connections
from django.db.utils import OperationalError
from core.management.base import BaseCommand
class Command(BaseCommand):
"""django command to pause execution is available"""
def handle(self, *args, **options):
self.std.write('waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections[default]
except:
self.std.out('Database unavailable, wait 1 sec')
time.sleep(1)
self.std.write(self.style.SUCCESS('Database available'))
in the call is in a unit test just a line with command_call('wait_for_db') (the name of the file ofcourse)
this is the test unit and this is the wait for db command
thnkx yall
Management commands for your app should be in a directory named <app>/management/commands/. You have named the management directory "management.py", it should be "management"
I am performing mobile application automation using Appium with Python. I am also in need of creating HTML reports. I am wanting to create multiple test suites too. And all these works, except for one problem.
My problem is that the application closes and re-opens in every test case. How can I fix this? Thanks in advance.
(Please note that this is a sample code I'm putting on here.)
from adb.client import Client as AdbClient
import HtmlTestRunner
import datetime
import os, sys
import glob
import unittest
from appium import webdriver
from time import sleep
from appium.webdriver.common.touch_action import TouchAction
PLATFORM_VERSION = '8.1.0'
class Q_suite1_01(unittest.TestCase):
def setUp(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '8.1.0'
desired_caps['deviceName'] = 'Samsung Galaxy J7 Max'
devices = AdbClient(host= "127.0.0.1", port= 5037).devices()
for device in devices:
desired_caps['udid'] = device.serial
desired_caps['appPackage'] = 'com.testapp'
desired_caps['appActivity'] = 'com.testapp.MainActivity'
url = "http://localhost:{}/wd/hub".format(4723)
self.driver = webdriver.Remote(url, desired_caps)
def install(self):
print 'ABDC!'
def run_app(self):
try:
x = self.driver.is_app_installed('com.quallogi')
if x is True:
print 'App is already installed.'
else:
print 'App is not installed.'
except:
print 'App not installed'
def signin(self):
sleep(5)
self.driver.find_element_by_xpath('//*[contains(#text,"Login") and contains(#class, "android.widget.TextView")]').click()
print 'Sign'
def testcase_Install_app(self):
self.install()
def testcase_Run_app(self):
self.run_app()
def testcase_SignIn(self):
self.signin()
# def testcase_Install_app(self):
# self.install()
# self.run_app()
# self.signin()
#
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
result = []
suite1= unittest.TestLoader().loadTestsFromTestCase(Q_suite1_01)
result.append(HtmlTestRunner.HTMLTestRunner(output='./HTML Reports/'
+ str(datetime.date.today())).run(suite1))
print(result)
At the first I want to recomend you to look at the Appium capability "noReset" - "Don't reset app state before this session." (true, false).
If i right understand you question. What do you meant "application closes and re-opens in every test case"? Can you describe it more detail?
I'm incredibly new to separating modules. I have this long Python script that I want to separate into different files by class and run them collectively in the same browser instance/window. The reason for this is all the tests are reliant on being logged into the same session. I'd like to do a universal setUp, then login, and then pull the different tests in one after another.
Folder structure is:
ContentCreator
- main.py
- _init_.py
- Features
- login.py
- pytest.py
- _init_.py
Here is my code:
login.py
import unittest
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import time
import json
driver = webdriver.Chrome()
class logIn(unittest.TestCase):
#classmethod
def test_login(self):
"""Login"""
driver.get("sign_in_url")
# load username and pw through a json file
with open('path/to/file.json', 'r') as f:
config = json.load(f)
# login
driver.find_element_by_id("email").click()
driver.find_element_by_id("email").clear()
driver.find_element_by_id("email").send_keys(config['user']['name'])
driver.find_element_by_id("password").click()
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys(config['user']['password'])
driver.find_element_by_id("submit").click()
time.sleep(3)
print("You are Logged In!")
pytest.py
import time
import unittest
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from displays import DISPLAY_TYPES, DISPLAY_NAMES
driver = webdriver.Chrome()
#driver.get("url")
class createContent(unittest.TestCase):
#classmethod
def test_add_required(self):
"""Test adding all the required fields across all sites:"""
for i in range(1):
"""This is the number of each type of article that will be created."""
for i in range(1):
"""This is how many different article types that will be created."""
print("create new content")
time.sleep(1)
driver.find_element_by_link_text("Content").click()
time.sleep(1)
driver.find_element_by_link_text("Create New").click()
print("select a display type:")
display = DISPLAY_TYPES
display_type = driver.find_element_by_id(display[i])
display_type.click()
names = (DISPLAY_NAMES[i])
print(names), (" created and saved successfully!")
#classmethod
def tearDownClass(cls):
# close the browser window
driver.quit()
def is_element_present(self, how, what):
"""
Helper method to confirm the presence of an element on page
:params how: By locator type
:params what: locator value
"""
try:
driver.find_element(by=how, value=what)
except NoSuchElementException:
return False
return True
main.py
import unittest
from HtmlTestRunner import HTMLTestRunner
from features.login import logIn
from features.pytest import createContent
login_script = unittest.TestLoader().loadTestsFromTestCase(logIn)
add_pytest = unittest.TestLoader().loadTestsFromTestCase(createContent)
# create a test suite combining all tests
test_suite = unittest.TestSuite([login, add_pytest])
# create output
runner = HTMLTestRunner(output='Test Results')
# run the suite
runner.run(test_suite)
When running the above code it opens two browser sessions, and only the login script get executed. The test fails do to not finding the elements outlined in the next script.
EDIT:
Alfonso Jimenez or anyone else, here's what I have so far...
Folder structure:
- Files
- singleton.py
- singleton2.py
New Singleton code...
singleton.py:
from robot.api import logger
from robot.utils import asserts
from selenium import webdriver
class Singleton(object):
instance = None
def __new__(cls, base_url, browser='chrome'):
if cls.instance is None:
i = object.__new__(cls)
cls.instance = i
cls.base_url = base_url
cls.browser = browser
if browser == "chrome":
# Create a new instance of the Chrome driver
cls.driver = webdriver.Chrome()
else:
# Sorry, we can't help you right now.
asserts.fail("Support for Chrome only!")
else:
i = cls.instance
return i
singleton2.py:
import time
import json
from datetime import datetime
from singleton import Singleton
driver = Singleton('base_url')
def teardown_module(module):
driver.quit()
class logIn(object):
def test_login(self):
"""Login"""
driver.get("url.com")
# load username and pw through a json file
with open('file.json', 'r') as f:
config = json.load(f)
# login
driver.find_element_by_id("email").click()
driver.find_element_by_id("email").clear()
driver.find_element_by_id("email").send_keys(config['user']['name'])
driver.find_element_by_id("password").click()
driver.find_element_by_id("password").clear()
driver.find_element_by_id("password").send_keys(config['user']['password'])
driver.find_element_by_id("submit").click()
time.sleep(3)
print("You are Logged In!")
# take screenshot
driver.save_screenshot('path/screenshot_{}.png'.format(datetime.now()))
The result is that an instance of Chrome kicks off, but nothing happens. The base_url (or any other URL defined in my test) doesn't come up. The blank window is all I get. Any insights on what I'm doing wrong?
You're instantiating two times the selenium driver.
If you want to keep the same session opened you should pass the same object to both scripts, or import it, what it could be valid, however it would be a more dirty solution.
The best thing to do is create a singleton class to initiate the driver. Once you have done this, every time you create an object from this class you will get the a unique object of webdriver.
You can get an example from this answer.
You can also check more about singleton instances, they're a very common and very useful. You can check here.
I dont understand what you mean with robot, perhaps the testing framework?
You can write the singleton class wherever you want to. You will have to import the class from that place and then instantiate the object. Ex:
lib/singleton_web_driver.py
from robot.api import logger
from robot.utils import asserts
from selenium import webdriver
class Singleton(object):
instance = None
def __new__(cls, base_url, browser='firefox'):
if cls.instance is None:
i = object.__new__(cls)
cls.instance = i
cls.base_url = base_url
cls.browser = browser
if browser == "firefox":
# Create a new instance of the Firefox driver
cls.driver = webdriver.Firefox()
elif browser == "remote":
# Create a new instance of the Chrome driver
cls.driver = webdriver.Remote("http://localhost:4444/wd/hub", webdriver.DesiredCapabilities.HTMLUNITWITHJS)
else:
# Sorry, we can't help you right now.
asserts.fail("Support for Firefox or Remote only!")
else:
i = cls.instance
return i
and then in every script youre going to need the webdriver:
test_script_file.py
from lib.singleton_web_driver import Singleton
driver = Singleton('base_url')
This just a dummy code, I dindnt test it. The important point is to create the class with the _new_ method where you can check if the class has already been called. The import is just like any other class import, you write the class in a folder and then import it in the scripts youre going to use.
I had a similar problem. My solution was just to initiate the driver in the main file and then to use this driver within the imported files and functions. Like in this example to change createContent(unittest.TestCase) to createContent(unittest.TestCase, driver)
I don't understand how to filter for structured properties
I keep getting there error:
BadFilterError: invalid filter: Cannot query for unindexed property author.email.
I'm attempting to create an entity in my test setUp.
I'm using the code from the GAE tutorial.
Here is the model:
class Author(ndb.Model):
"""Sub model for representing an author."""
identity = ndb.StringProperty(indexed=False)
email = ndb.StringProperty(indexed=True)
class Greeting(ndb.Model):
"""A main model for representing an individual Guestbook entry."""
author = ndb.StructuredProperty(Author)
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
Here is the simple query:
Author.query( Author.email == "bryan#mail.com").get()
Here is my testing code in functional_tests.py:
import sys, os, subprocess, time, unittest, shlex
sys.path.append("/usr/local/google_appengine")
sys.path.append("/usr/local/google_appengine/lib/yaml/lib")
sys.path.append("/usr/local/google_appengine/lib/webapp2-2.5.2")
sys.path.append("/usr/local/google_appengine/lib/django-1.5")
sys.path.append("/usr/local/google_appengine/lib/cherrypy")
sys.path.append("/usr/local/google_appengine/lib/concurrent")
sys.path.append("/usr/local/google_appengine/lib/docker")
sys.path.append("/usr/local/google_appengine/lib/requests")
sys.path.append("/usr/local/google_appengine/lib/websocket")
sys.path.append("/usr/local/google_appengine/lib/fancy_urllib")
sys.path.append("/usr/local/google_appengine/lib/antlr3")
from selenium import webdriver
from google.appengine.api import memcache, apiproxy_stub, apiproxy_stub_map
from google.appengine.ext import db
from google.appengine.ext import testbed
from google.appengine.datastore import datastore_stub_util
from google.appengine.tools.devappserver2 import devappserver2
class NewVisitorTest(unittest.TestCase):
def setUp(self):
# Start the dev server
cmd = "/usr/local/bin/dev_appserver.py /Users/Bryan/work/GoogleAppEngine/guestbook/app.yaml --port 8080 --storage_path /tmp/datastore --clear_datastore --skip_sdk_update_check"
self.dev_appserver = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE)
time.sleep(2) # Important, let dev_appserver start up
self.testbed = testbed.Testbed()
self.testbed.setup_env(app_id='dermal')
self.testbed.activate()
self.testbed.init_user_stub()
# Create a consistency policy that will simulate the High Replication consistency model.
# with a probability of 1, the datastore should be available.
self.policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
# Initialize the datastore stub with this policy.
self.testbed.init_datastore_v3_stub(datastore_file="/tmp/datastore/datastore.db", use_sqlite=True, consistency_policy=self.policy)
self.testbed.init_memcache_stub()
self.datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
# setup the dev_appserver
APP_CONFIGS = ['app.yaml']
# setup client to make sure
from guestbook import Author, Greeting
if not ( Author.query( Author.email == "bryan#mail.com").get()):
logging.info("create Admin")
client = Author(
email = "bryan#mail.com",
).put()
Assert( Author.query( Author.email == "bryan#mail.com").get() )
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
self.testbed.deactivate()
self.dev_appserver.terminate()
def test_submit_anon_greeting(self):
self.browser.get('http://localhost:8080')
self.browser.find_element_by_name('content').send_keys('Anonymous test post')
self.browser.find_element_by_name('submit').submit()
Assert.assertEquals(driver.getPageSource().contains('Anonymous test post'))
You're querying the Author model. But in your structure, Authors only exist as part of a Greeting. So you should be creating a Greeting and querying that model.
Greeting(
author=Author(email="bryan#mail.com"),
content="Hello there!")
).put()
Greeting.query(Greeting.author.email=="bryan#mail.com").get()