call_command not working, did everything like the tutorial - python

So just like the title suggest. i followed the instrcutions.
this is the code.
the command doesnt work not from the command line either. zero idea why
import time
from django.db import connections
from django.db.utils import OperationalError
from core.management.base import BaseCommand
class Command(BaseCommand):
"""django command to pause execution is available"""
def handle(self, *args, **options):
self.std.write('waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections[default]
except:
self.std.out('Database unavailable, wait 1 sec')
time.sleep(1)
self.std.write(self.style.SUCCESS('Database available'))
in the call is in a unit test just a line with command_call('wait_for_db') (the name of the file ofcourse)
this is the test unit and this is the wait for db command
thnkx yall

Management commands for your app should be in a directory named <app>/management/commands/. You have named the management directory "management.py", it should be "management"

Related

How to run a crontab job on macos Monterey with a pytest(python) file?

I have a simple pytest test file that calls a Page calls and outputs a CSV file, the test runs fine in the terminal.
from generic_framework_code.teststat import TestStatus
import unittest
import pytest
from generic_framework_code.random_scripts.new_script import Newscript
import generic_framework_code.utilities.custom_logger as cl
import logging
#pytest.mark.usefixtures("NewSetup", "setup")
class NewTest(unittest.TestCase):
log = cl.testLogger(logging.DEBUG)
#pytest.fixture(autouse=True)
def classSetup(self, NewSetup):
self.ts = TestStatus(self.driver)
self.ns = Newscript(self.driver)
#pytest.mark.run(order=1)
def test_ns_subtest(self):
self.ns.print_csv()
here is page class below
from generic_framework_code.base.basepage import BasePage
import generic_framework_code.utilities.custom_logger as cl
import logging
class Newscript(BasePage):
log = cl.customLogger(logging.INFO)
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
def print_content(self):
does the job
These scripts run fine on my local machine with the usual pytest /user..../test_file.py command.
I want to set a cronjob on my macOS to run every day at 9am, but nothing seems to happen
I went with crontab -e and added the following command
0 09 * * * /Users/abc/PycharmProjects/fullpath_to_file/tests/test_file.py
also tried this https://towardsdatascience.com/how-to-easily-automate-your-python-scripts-on-mac-and-windows-459388c9cc94
any help is appreciated.
Update
I did some reading on stack and created another python file and invoked the pytest from there
import pytest
pytest.main(['/Users/full_path/tests/test_file.py'])
when I run this file in the terminal with python3 /Users/full_path/tests/test_file.py it runs fine but inserting in crontab and checking the log says
"Exceptions:
- WebDriverException : Raised either when it can't start the service
or when it can't connect to the service

Standalone python script to retrieve django models

I have been figuring on how to run a script on background, so that i can process an object without having the user to wait for the server to respond.
Views.py
import subprocess, sys
def allocate_request(request, event_id):
event = get_object_or_404(MeetingEvent, id=event_id)
subprocess.Popen([sys.executable, 'sorting.py', str(event_id)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return HttpResponse("Request Submitted.")
and in the same directory, Sorting.py
import sys
class Organize:
if len(sys.argv) <= 1:
event_id = int(sys.argv[1])
event_i = MeetingEvent.objects.get(id=event_id)
Problem is...
the script itself can't be run alone,
from .models import MeetingEvent
ModuleNotFoundError: No module named '__main__.models'; '__main__' is not a package
Would appreciate any help :)

Set up Django DiscoverRunner to always recreate database on testing with radish

I am using radish bdd with selenium to test my django app, however sometimes django ask to delete database because it's already exists in database. here's my terrain.py:
import os
import django
from django.test.runner import DiscoverRunner
from django.test import LiveServerTestCase
from radish import before, after
from selenium import webdriver
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tangorblog.settings.features')
BASE_URL = os.environ.get('BASE_URL', 'http://localhost:8000')
#before.each_scenario
def setup_django_test(scenario):
django.setup()
scenario.context.test_runner = DiscoverRunner()
scenario.context.test_runner.setup_test_environment()
scenario.context.old_db_config =\
scenario.context.test_runner.setup_databases()
scenario.context.base_url = BASE_URL
scenario.context.test_case = LiveServerTestCase()
scenario.context.test_case.setUpClass()
scenario.context.browser = webdriver.Chrome()
#after.each_scenario
def teardown_django(scenario):
scenario.context.browser.quit()
scenario.context.test_case.tearDownClass()
del scenario.context.test_case
scenario.context.test_runner.teardown_databases(
scenario.context.old_db_config)
scenario.context.test_runner.teardown_test_environment()
I think that, I can somehow could alter this on
scenario.context.old_db_config =\
scenario.context.test_runner.setup_databases()
But I don't know how. Any help?
It seems to me that recreating the database for every scenario would end up being highly inefficient (and super slow). It should only be necessary to create the database once per test run and then drop it at the end.
I've come up with a solution I think integrates better with Django. It allows you to run the tests with manage.py test, only creates/drops the database once per test run, and clears the database tables after every feature is tested.
Note that this runs both the Django unit tests and radish tests by default. To run just the radish tests, you can do RADISH_ONLY=1 manage.py test. Also, for the live server/Selenium tests to work, you have to run manage.py collectstatic first.
# package/settings.py
TEST_RUNNER = 'package.test.runner.RadishTestRunner'
# package/test/runner
import os
from django.test.runner import DiscoverRunner
import radish.main
class RadishTestRunner(DiscoverRunner):
def run_suite(self, suite, **kwargs):
# Run unit tests
if os.getenv('RADISH_ONLY') == '1':
result = None
else:
result = super().run_suite(suite, **kwargs)
# Run radish behavioral tests
self._radish_result = radish.main.main(['features'])
return result
def suite_result(self, suite, result, **kwargs):
if result is not None:
# Django unit tests were run
result = super().suite_result(suite, result, **kwargs)
else:
result = 0
result += self._radish_result
return result
# radish/world.py
from django.db import connections
from django.test.testcases import LiveServerThread, _StaticFilesHandler
from django.test.utils import modify_settings
from radish import pick
from selenium import webdriver
#pick
def get_browser():
return webdriver.Chrome()
#pick
def get_live_server():
live_server = LiveServer()
live_server.start()
return live_server
class LiveServer:
host = 'localhost'
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
def __init__(self):
connections_override = {}
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
self.modified_settings = modify_settings(ALLOWED_HOSTS={'append': self.host})
self.server_thread = self.server_thread_class(
self.host,
self.static_handler,
connections_override=connections_override,
port=self.port,
)
self.server_thread.daemon = True
#property
def url(self):
self.server_thread.is_ready.wait()
return 'http://{self.host}:{self.server_thread.port}'.format(self=self)
def start(self):
self.modified_settings.enable()
self.server_thread.start()
self.server_thread.is_ready.wait()
if self.server_thread.error:
self.stop()
raise self.server_thread.error
def stop(self):
if hasattr(self, 'server_thread'):
self.server_thread.terminate()
for conn in connections.all():
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
conn.allow_thread_sharing = False
self.modified_settings.disable()
# radish/terrain.py
from django.db import connections, transaction
from radish import world, before, after
#before.all
def set_up(features, marker):
world.get_live_server()
#after.all
def tear_down(features, marker):
browser = world.get_browser()
live_server = world.get_live_server()
browser.quit()
live_server.stop()
#before.each_scenario
def set_up_scenario(scenario):
live_server = world.get_live_server()
scenario.context.base_url = live_server.url
scenario.context.browser = world.get_browser()
# XXX: Only works with the default database
# XXX: Assumes the default database supports transactions
scenario.context.transaction = transaction.atomic(using='default')
scenario.context.transaction.__enter__()
#after.each_scenario
def tear_down_scenario(scenario):
transaction.set_rollback(True, using='default')
scenario.context.transaction.__exit__(None, None, None)
for connection in connections.all():
connection.close()
#Wyatt, again I'm just gonna modify your answer. I have try and run your solution, however it didn't manage to make each scenario independent, I even encounter an Integrity error when I try to make model object inside scenario. Regardless I still use your solution (especially RadishTestRunner, as the idea comes from you. I modified it so I could run django unittest separately from radish. I use LiveServerTestCase directly and remove LiveServer as I notice the similirity between the two, except that LiveServerTestCase inherits from TransactionTestCase and it also has the LiveServerThread and _StaticFilesHandler built in. Here's how it is:
# package/test/runner.py
import os
from django.test.runner import DiscoverRunner
import radish.main
class RadishTestRunner(DiscoverRunner):
radish_features = ['features']
def run_suite(self, suite, **kwargs):
# run radish test
return radish.main.main(self.radish_features)
def suite_result(self, suite, result, **kwargs):
return result
def set_radish_features(self, features):
self.radish_features = features
# radish/world.py
from django.test import LiveServerTestCase
from radish import pick
from selenium import webdriver
#pick
def get_browser():
return webdriver.Chrome()
#pick
def get_live_server():
live_server = LiveServerTestCase
live_server.setUpClass()
return live_server
# radish/terrain.py
from radish import world, before, after
from selenium import webdriver
#before.all
def set_up(features, marker):
world.get_live_server()
#after.all
def tear_down(features, marker):
live_server = world.get_live_server()
live_server.tearDownClass()
#before.each_scenario
def set_up_scenario(scenario):
live_server = world.get_live_server()
scenario.context.browser = webdriver.Chrome()
scenario.context.base_url = live_server.live_server_url
scenario.context.test_case = live_server()
scenario.context.test_case._pre_setup()
#after.each_scenario
def tear_down_scenario(scenario):
scenario.context.test_case._post_teardown()
scenario.context.browser.quit()
That's it. This also fix the problem with PostgreSQL on my other question that you point out. I also open and quit browser on each scenario, as it gives me more control over the browser inside scenario. Thank you so much for you effort to point me at the right direction.
Finally I return to PostgreSQL.
PostgreSQL seem to better than MySQL in terms of speed. It greatly reduced the time to run test.
And oh ya, I need to run ./manage.py collectstatic first after specify STATIC_ROOT in django settings file.
I also modify RadishTestRunner, so instead of running with RADISH_ONLY=1, I could run it with python manage.py radish /path/to/features/file. Here's my radish command:
# package.management.commands.radish
from __future__ import absolute_import
import sys
from django.core.management.base import BaseCommand, CommandError
from package.test.runner import RadishTestRunner
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('features', nargs='+', type=str)
def handle(self, *args, **options):
test_runner = RadishTestRunner(interactive=False)
if options['features']:
test_runner.set_radish_features(options['features'])
result = test_runner.run_suite(None)
if result:
sys.exit(result)
By using radish with django management command, we have control over which feature file we want to run.

Unit Tests for Python: Mock Patch

I am trying to write Unit Tests for Cassandra but am not able to get it work. Here is the code:
CassandraLoggingModel.py:
import uuid
from cassandra.cqlengine import columns
from datetime import datetime
from cassandra.cqlengine.models import Model
class CassandraRunLog(Model):
pipeline_id = columns.Text(partition_key=True, max_length=180)
task_id = columns.Text(partition_key=True, max_length=180)
execution_date = columns.DateTime(partition_key=True)
created_at = columns.DateTime(primary_key=True, default=datetime.now())
host = columns.Text(max_length=1000)
run_as_unixname = columns.Text(max_length=1000)
logger = columns.Text(max_length=128)
level = columns.Text(max_length=16)
trace = columns.Text(max_length=10000)
msg = columns.Text(max_length=64000)
CassandraLogging.py
import sys
import logging
import traceback
import uuid
from datetime import datetime
from CassandraLoggingModel import CassandraRunLog
from cassandra.cqlengine import connection
from cassandra.auth import PlainTextAuthProvider
import cassandra
class CassandraHandler(logging.Handler):
def __init__(self, user, *args, **kwargs):
self.user = user
super(CassandraHandler, self).__init__(*args, **kwargs)
def emit(self, record):
print("emit called")
trace = "None"
exc = record.__dict__['exc_info']
if exc:
trace = traceback.format_exc(exc)
if hasattr(record, 'message'):
log_msg = record.message
else:
log_msg = self.format(record)
self.host = 'localhost'
self.keyspace = 'logging'
try:
auth_provider = PlainTextAuthProvider(username='some', password='some')
connection.setup([self.host], self.keyspace, auth_provider=auth_provider)
model = CassandraRunLog(host=self.user, created_at=datetime.now(), trace=trace, msg=log_msg)
model.save()
except Exception as e:
print(str(e))
test.py
import datetime
import logging
import mock
from CassandraLogging import CassandraHandler
#mock.patch('CassandraLoggingModel.CassandraRunLog')
def test_formatting(MockClassRunLog):
run_log = MockClassRunLog.return_value
# construct our logging handler
handler = CassandraHandler('name')
# Log an unformated message.
record = logging.LogRecord(name='pytest',
level=logging.INFO,
pathname='something',
lineno=0,
msg='something',
args=(),
exc_info=None,
func='test_formatting')
handler.emit(record)
# we should have a record added to the DB
run_log.save.assert_called_once_with()
I am trying to add a logging handler in python that stores the log message to a cassandra database. I am trying to test if model's save method is called. save method is implemented in Cassandra Model and CassandraRunLog inherits from that.
When I am running the test using command:
py.test test.py
I am getting the following error:
E AssertionError: Expected to be called once. Called 0 times.
Can someone please help ?
Never mind. I figured it out. The test was not able to connect to the database, so control was getting passed to the except block every time.

Processing some file before starting app and reacting for every change

I have a file containing some data – data.txt (existing in proper localization). I would like that django app processes this file before starting app and reacts for every change (without restart). What is the best way to do it?
For startup you can write middleware that does what you want in init and afterwards raise django.core.exceptions.MiddlewareNotUsed from the init, so the django will not use it for any request processing. docs
And middleware init will be called at startup, not at the first request.
As for react to file changes you can use https://github.com/gorakhargosh/watchdog ( an example of usage can be found here).
So you can either start it somewhere in middleware too, or if its only db updates you can create a separate script(or django management command) that will be run via supervisor or something like this and will monitor this file and update the db.
An option could be pynotify that monitors the filesystem for changes, it works only on Linux though.
Otherwise look at the code of runserver command that seems to do the same thing (the code of the autoreload module is here).
To run a command before starting an app I suppose you can write some code in the settings module.
maybe you could put a object in the settings which will lookup to the file for every change. ...
ie :
make a class who will load the file and reload this one if he is modified
class ExtraConfigWatcher(object):
def __init__(self, file):
self.file = file
self.cached = dict()
self.last_date_modified = None
def update_config(self):
"""
update the config by reloading the file
"""
if has_been_modified(self.file, self.last_date_modified):
# regenerate the config with te file.
self.cached = get_dict_with_file(self.file)
self.last_date_modified = time.time()
def __getitem__(self, *args, **kwargs):
self.update_config()
return self.cached.__getitem__(*args, **kwargs)
def __setitem__(self, *args, **kwargs):
raise NotImplemented("you can't set config into this")
in settings.py: initialize this object
EXTRA_CONFIG = ExtraConfigWatcher("path/to/the/file.dat")
in myapps/views.py: import settings and use EXTRA_CONFIG
from django.conf import settings
def dosomthing(request):
if settings.EXTRA_CONFIG["the_data_from_the_file"] == "foo":
# bouhh
A while ago I was trying to find a mechanism to "hot-swap" Python modules. While that is not exactly what you need, maybe you can use the implementation I proposed, and monitor your configuration file for modifications and act accordingly.
The code I proposed is the following (I did not use inotify because I am working in an NFS file system):
import imp
import time
import hashlib
import threading
import logging
logger = logging.getLogger("")
class MonitorThread(threading.Thread):
def __init__(self, engine, frequency=1):
super(MonitorThread, self).__init__()
self.engine = engine
self.frequency = frequency
# daemonize the thread so that it ends with the master program
self.daemon = True
def run(self):
while True:
with open(self.engine.source, "rb") as fp:
fingerprint = hashlib.sha1(fp.read()).hexdigest()
if not fingerprint == self.engine.fingerprint:
self.engine.notify(fingerprint)
time.sleep(self.frequency)
class Engine(object):
def __init__(self, source):
# store the path to the engine source
self.source = source
# load the module for the first time and create a fingerprint
# for the file
self.mod = imp.load_source("source", self.source)
with open(self.source, "rb") as fp:
self.fingerprint = hashlib.sha1(fp.read()).hexdigest()
# turn on monitoring thread
monitor = MonitorThread(self)
monitor.start()
def notify(self, fingerprint):
logger.info("received notification of fingerprint change ({0})".\
format(fingerprint))
self.fingerprint = fingerprint
self.mod = imp.load_source("source", self.source)
def __getattr__(self, attr):
return getattr(self.mod, attr)
def main():
logging.basicConfig(level=logging.INFO,
filename="hotswap.log")
engine = Engine("engine.py")
# this silly loop is a sample of how the program can be running in
# one thread and the monitoring is performed in another.
while True:
engine.f1()
engine.f2()
time.sleep(1)
if __name__ == "__main__":
main()

Categories

Resources