Testing django with mongoengine - python

I have a django project, defautly testing on Django only works on sql database, but I need to work on mongodb and mongoengine.
I use Django 1.9, mongoengine 0.9 cause it supports django.
I follow the docs here https://mongoengine.readthedocs.io/en/v0.9.0/django.html
and django docs for test https://docs.djangoproject.com/en/1.8/topics/testing/tools/
The problem is how I can config the test file to tell it I want to use mongodb database. Without any setup, the test file look like this:
import unittest
from django.test import Client
from .models import User
class UserTests(unittest.TestCase):
def setUp(self):
self.client = Client()
def test_create_user(self):
self.client.post('/users/', {'first_name': 'aaa', 'last_name': 'bbb',
'username': 'xxx', 'email': 'abc#gmail.com'})
...
The error when run python manage.py test will be:
raise ImproperlyConfigured("settings.DATABASES is improperly configured. "
ImproperlyConfigured: settings.DATABASES is improperly configured. Please supply the ENGINE value. Check settings documentation for more details.
In settings.py:
from mongoengine import connect
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy',
},
}
connect(
host='mongodb://localhost/book'
)

1. Define custom DiscoverRunner for NoSQLTests
For example in yourapp/tests.py
from django.test.runner import DiscoverRunner
class NoSQLTestRunner(DiscoverRunner):
def setup_databases(self, **kwargs):
pass
def teardown_databases(self, old_config, **kwargs):
pass
2. Define custom TestCase class for NoSQLTests.
For example in yourapp/tests.py
from django.test import TestCase
class NoSQLTestCase(TestCase):
def _fixture_setup(self):
pass
def _fixture_teardown(self):
pass
3. Change default TEST_RUNNER in your settings.py
TEST_RUNNER = 'yourapp.tests.NoSQLTestRunner'
4. Write tests
Tests that not require database:
class YourTest(NoSQLTestCase):
def test_foo(self):
to_compare = 'foo'
assumed = 'foo'
self.assertEqual(to_compare, assumed)
Tests that require database, use mocking:
https://docs.mongoengine.org/guide/mongomock.html
Step by step
Install mongomock pip install mongomock
Write test:
from mongoengine import connect, disconnect, Document, StringField
class Foo(Document):
content = StringField()
class TestFoo(NoSQLTestCase):
#classmethod
def setUpClass(cls):
connect('mongoenginetest', host='mongomock://localhost', alias='testdb')
#classmethod
def tearDownClass(cls):
disconnect(alias='testdb)
def test_thing(self):
foo = Foo(content='bar')
foo.save()
fresh_foo = Foo.objects().first()
assert fresh_foo.content == 'bar'

Testing of django while using mongodb can be done by creating a custom testcase in which setUp function connects to the mongodb using mongoengine and tearDown will drop the testing database and disconnects.
from django.test import TestCase
import mongoengine
class MongoTestCase(TestCase):
def setUp(self):
mongoengine.connection.disconnect()
mongoengine.connect(
host=settings.MONGO['host'],
port=settings.MONGO['port'],
db=settings.MONGO['db'],
username=settings.MONGO['username'],
password=settings.MONGO['password']
)
super().setUpClass()
def tearDown(self):
from mongoengine.connection import get_connection, disconnect
connection = get_connection()
connection.drop_database(settings.MONGO['db'])
disconnect()
super().tearDownClass()
Then you can use this test case in your tests. For ex assuming we have a model named 'App'
from models import App
class AppCreationTest(MongoTestCase):
def test(self):
app = App(name="first_app")
app.save()
assert App.objects.first().name == app.name
You can run these tests by python manage.py test
Here is a small gist for your reference

Related

Test Pydantic settings in FastAPI

Suppose my main.py is like this (this is a simplified example, in my app I use an actual database and I have two different database URIs for development and testing):
from fastapi import FastAPI
from pydantic import BaseSettings
app = FastAPI()
class Settings(BaseSettings):
ENVIRONMENT: str
class Config:
env_file = ".env"
case_sensitive = True
settings = Settings()
databases = {
"dev": "Development",
"test": "Testing"
}
database = databases[settings.ENVIRONMENT]
#app.get("/")
def read_root():
return {"Environment": database}
while the .env is
ENVIRONMENT=dev
Suppose I want to test my code and I want to set ENVIRONMENT=test to use a testing database. What should I do? In FastAPI documentation (https://fastapi.tiangolo.com/advanced/settings/#settings-and-testing) there is a good example but it is about dependencies, so it is a different case as far as I know.
My idea was the following (test.py):
import pytest
from fastapi.testclient import TestClient
from main import app
#pytest.fixture(scope="session", autouse=True)
def test_config(monkeypatch):
monkeypatch.setenv("ENVIRONMENT", "test")
#pytest.fixture(scope="session")
def client():
return TestClient(app)
def test_root(client):
response = client.get("/")
assert response.status_code == 200
assert response.json() == {"Environment": "Testing"}
but it doesn't work.
Furthermore I get this error:
ScopeMismatch: You tried to access the 'function' scoped fixture 'monkeypatch' with a 'session' scoped request object, involved factories
test.py:7: def test_config(monkeypatch)
env\lib\site-packages\_pytest\monkeypatch.py:16: def monkeypatch()
while from pytest official documentation it should work (https://docs.pytest.org/en/3.0.1/monkeypatch.html#example-setting-an-environment-variable-for-the-test-session). I have the latest version of pytest installed.
I tried to use specific test environment variables because of this: https://pydantic-docs.helpmanual.io/usage/settings/#field-value-priority.
To be honest I'm lost, my only real aim is to have a different test configuration (in the same way Flask works: https://flask.palletsprojects.com/en/1.1.x/tutorial/tests/#setup-and-fixtures). Am I approaching the problem the wrong way?
PydanticSettings are mutable, so you can simply override them in your test.py:
from main import settings
settings.ENVIRONMENT = 'test'
This is a simple way that works for me. Consider that you have a configuration file named APPNAME.cfg with the following settings:
DEV_DSN='DSN=my_dev_dsn; UID=my_dev_user_id; PWD=my_dev_password'
PROD_DSN='DSN=my_prod_dsn; UID=my_prod_user_id; PWD=my_prod_password'
Set your environment according to your OS or Docker variable. For Linux you could enter:
export MY_ENVIORONMENT=DEV
Now consider the following settings.py:
from pydantic import BaseSettings
import os
class Settings(BaseSettings):
DSN: str
class Config():
env_prefix = f"{os.environ['MY_ENVIORONMENT']}_"
env_file = "APPNAME.cfg"
Your app would simply need to do the following:
from settings import Settings
s = Settings()
db = pyodbc.connect(s.DSN)
Bumping an old thread because I found a solution that was a bit cleaner for my use case. I was having trouble getting test specific dotenv files to load only while tests were running and when I had a local development dotenv in the project dir.
You can do something like the below where test.enviornment is a special dotenv file that is NOT an env_file path in the settings class Config. Because env vars > dotenv for BaseSettings, this will override any settings from a local .env as long as this is run in conftest.py before your settings class is imported. It also guarantees that your test environment is only active when tests are being run.
#conftest.py
from dotenv import load_dotenv
load_dotenv("tests/fixtures/test.environment", override=True)
from app import settings # singleton instance of BaseSettings class
It's really tricky to mock environment with pydantic involved.
I only achieved desired behaviour with dependency injection in fastapi and making get_settings function, which itself seems to be good practice since even documentation says to do so.
Suppose you have
...
class Settings(BaseSettings):
ENVIRONMENT: str
class Config:
env_file = ".env"
case_sensitive = True
def get_settings() -> Settings:
return Settings()
databases = {
"dev": "Development",
"test": "Testing"
}
database = databases[get_settings().ENVIRONMENT]
#app.get("/")
def read_root():
return {"Environment": database}
And in your tests you would write:
import pytest
from main import get_settings
def get_settings_override() -> Settings:
return Settings(ENVIRONMENT="dev")
#pytest.fixture(autouse=True)
def override_settings() -> None:
app.dependency_overrides[get_settings] = get_settings_override
You can use scope session if you'd like.
This would override your ENVIRONMENT variable and wouldn't touch rest of configuration variables.

Cannot login in Django tests

Trying to write tests for a legacy Django 1.11 project. The project is using unittest package (I don't know if that's standard or a choice).
I'm trying to use django.test.Client.force_login to test a view with LoginRequiredMixin but it does nothing. I have added print(self.request.user) to the mixin's dispatch method. It outputs AnonymousUser whether I use force_login or not. Why is it not working?
The test (simplified for readability's sake):
class CheckoutTest(TestCase):
def test_successful_payment(self):
user = UserAccount.objects.create(email='foo#bar.com', is_owner=True, is_customer=True)
self.client.force_login(user)
self.client.post('/subscription/buy/' + str(package.id) + '/', {
... redacted ...
})
The view:
class BuyPackageView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
... redacted ...
My test code snippet looks like, I am getting user from table related to allauth app:
from django.test import TestCase
from rest_framework.test import APIClient
from mainapp.tests.test_helper import TestsHelper
class ProjectViewSetTestCase(TestCase):
def setUp(self):
self._client = APIClient(enforce_csrf_checks=False)
self._test_helper.prepare_data()
def tearDown(self):
# do some custom clean-up
def testCreateProjectWithoutKeyWords(self):
user_id = self._test_helper.user1_id
user = User.objects.get(id=user_id)
# prepare current test data ...
self._client.force_authenticate(user=user)
# interact with REST API, if you need to switch user use logout
self._client.logout()
_client = None

database mocking Python

I am writing test cases for a file called api.py and app.py instantiated my app.
app.py
from d import db
def create():
d = db()
# here I create db instance and pass to my application
api = API(d)
app = create()
api.py
from app import app
class API1:
pass
class API2
pass
I want to patch my database.
Below is my test file, here I setup my application in setUp once.
#from app.app import app doesn't work here
class TestCase(testing):
#patch("/path/db")
def setUp(self):
# when I import my app here I am able to patch db but doesn't when I have import at file level
from app.app import app
self.app = app.create()
I am trying to patch whole db stuff to avoid setting up db and stuff for each test cases.
How do I patch when I import at file level?

flask_sqlalchemy create_all() fails silently in unit testing

I'm writing unit tests for a REST API written in Flask with the flask_sqlalchemy extension. Because I have a number of model classes, I wrote a TestCase subclass to do the standard setUp/cleanUp of the test database. All my test classes inherit from this. Each test succeeds when run alone, but when I run more than one test in a single class, the second setUp() fails on the self.db.session.commit() (I'm trying to add an entry to the User table) because self.db.create_all() has (silently) failed to create any tables.
Here is my base test class, in the __init__.py of the test package:
import unittest
from .test_client import TestClient
from .. import create_app
from pdb import set_trace as DBG
class ApiTest(unittest.TestCase):
default_username = 'fred'
default_password = 'bloggs'
db = None
def setUp(self):
try:
self.app = create_app('testing')
self.addCleanup(self.cleanUp)
self.ctx = self.app.app_context()
self.ctx.push()
from .. import db
self.db = db
self.db.session.commit()
self.db.drop_all(app=self.app)
from ..models import User, Player, Team, Match, Game
# self.app.logger.debug('drop_all())')
self.db.create_all(app=self.app)
# self.app.logger.debug('create_all())')
user = User(user_name=self.default_username)
user.password = self.default_password
self.db.session.add(u)
self.db.session.commit()
self.client = TestClient(self.app, user.generate_auth_token(), '')
except Exception, ex:
self.app.logger.error("Error during setUp: %s" % ex)
raise
def cleanUp(self):
try:
self.db.session.commit()
self.db.session.remove()
self.db.drop_all(app=self.app)
# self.app.logger.debug('drop_all())')
self.ctx.pop()
except Exception, ex:
self.app.logger.error("Error during cleanUp: %s" % ex)
raise
Can anyone tell me what's wrong here please?
EDIT: Added the code for create_app() as requested.
# chessleague/__init__.py
import os
from flask import Flask, g
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from . import config
app = None
db = None # The database, initialised in create_app()
def create_app(config_name):
app = Flask(__name__)
app.config.update(config.get_config(config_name))
# if app.config['USE_TOKEN_AUTH']:
# from api.token import token as token_blueprint
# app.register_blueprint(token_blueprint, url_prefix='/auth')
import logging
from logging.handlers import SysLogHandler
syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.WARNING)
app.logger.addHandler(syslog_handler)
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.init_app(app)
global db
db = SQLAlchemy(app)
db.init_app(app)
from .models import User,Player,Game,Match,Team,Post
db.create_all()
from .api import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/chessleague')
return app
`
create_all() applies to the metadata, that is being discovered by importing modules with models. In your case, models' metadata binds to the db from your models.py but you are calling create_all() from chessleague/__init__.db from create_app() function, which is different objects for SqlAlchemy. You can fix that by using db from models.py:
from .models import User,Player,Game,Match,Team,Post, db as models_db
models_db.create_all()
Here's the initialisation sequence that worked for me - comments welcome!
My test class setUp() calls create_app(config_name) from the main app package.
The main app package(__init__.py) creates the app instance at module level, ie app=Flask(my_app_package_name)
Then my function
create_app(config_name)
loads the right config into app.config (including the right SQLACHEMY_DATABASE_URL)
imports the model classes and db (as model_db) from models.py
This import creates the symbol db at module level in models.py, followed by the model class definitions:
# models.py
from . import app
db = SQLAlchemy(app)
...
class User(db.Model)
...
etc
Now everything is set up properly: the symbol 'db' can be imported anywhere from models.py, and I can call db.create_all() successfully from my test setUp().
#Fian, can you post your solution as an answer so I can give you credit?

Flask test setup with Flask-Babel

I'd like to setUp with unittest module.
My Flask App is created using factory (create_app) uses Flask-Babel for i18n/
def create_app(config=None, app_name=None, blueprints=None):
# Create Flask App instance
app_name = app_name or __name__
app = Flask(app_name)
app.config.from_pyfile(config)
configure_hook(app)
configure_blueprints(app, blueprints)
configure_extensions(app)
configure_jinja_filters(app)
configure_logging(app)
configure_error_handlers(app)
configure_cli(app)
return app
create_app function calls configure_extensions(app) which is as follows:
def configure_extensions(app):
"""Initialize Flask Extensions."""
db.init_app(app)
babel.init_app(app)
csrf.init_app(app)
#babel.localeselector
def get_locale():
# If logged in, load user locale settings.
user = getattr(g, 'user', None)
if user is not None:
return user.locale
# Otherwise, choose the language from user browser.
return request.accept_languages.best_match(
app.config['BABEL_LANGUAGES'].keys())
#babel.timezoneselector
def get_timezone():
user = getattr(g, 'user', None)
if user is not None:
return user.timezone
It works fine when I run app, but I can't create a unittest properly because it asserts error like this:
File "C:\projects\rabiang\venv\lib\site-packages\flask_babel\__init__.py", line 127, in localeselector
'a localeselector function is already registered'
AssertionError: a localeselector function is already registered
Due to the message "a localeselector function is already registered", I thought that fact that my setUp method of unittest was invoked when each test method is called makes problem. Thus, I changed #classmethod setUpClass like this:
# -*- coding: utf-8 -*-
import unittest
from app import create_app, db
from app.blueprints.auth import auth
from app.blueprints.forum import forum
from app.blueprints.main import main
from app.blueprints.page import page
class BasicsTestCase(unittest.TestCase):
#classmethod
def setUpClass(cls):
blueprints = [main, page, auth, forum]
app = create_app(config='../test.cfg', blueprints=blueprints)
cls.app = app.test_client()
db.create_all()
#classmethod
def tearDownClass(cls):
db.session.remove()
db.drop_all()
def test_app_exists(self):
self.assertFalse(BasicsTestCase.app is None)
if __name__ == '__main__':
unittest.main()
However, #babel.localeselector and #babel.timezoneselector decorator doesn't work.
I fixed it by setting the app only once with the function setUpClass from unittest.
See also the answer Run setUp only once

Categories

Resources