Pytest - Writing to real database while test script runs in test client with #pytest.fixture - python

I have test client which is implemented using #pytest.fixture client. In test client I have all my database tables/modals. Inside the test script block, I want to be able to write log records to real database tables, not to test client tables. Test script works fine, however there is no change at the real database tables.
Here is the simplified example:
#pytest.fixture
def client():
tables = [
User,
Profile
]
with db.atomic():
db.drop_tables(tables)
db.create_tables(tables)
with app.test_client() as client:
yield client
with db.atomic():
db.drop_tables(tables)
def test_script(client):
with db.atomic():
User.create(
name = "example",
surname = "example"
)
assert True == True
Additionally, I don't does it matter but I am using PeeWee as ORM and SQLite for storage.

Related

How to mock PostgresSQL database with pytest fixture

I have a file with two functions that I must test that look like this:
def create_conn(config):
conn = psycopg2.connect(dbname = config['dbname'], ...)
return conn
def use_conn():
conn = create_conn(CONSTANT_CONFIG)
with conn.cursor() as cursor:
cursor.execute("some sql query")
conn.close()
I've been able to create pytest fixtures for testing functions that use DynamoDB and S3 with a mock database using moto that essentially overrides any call to boto3 like below:
# in conftest.py
#pytest.fixture()
def s3_client(aws_credentials):
with moto.mock_s3()
client = boto3.client('s3')
yield client
# in test file
def test_func(s3_client):
func() # a function that uses boto3
But I can't come across any examples that do something similar for PostgresSQL databases. Is there anyway to essentially override psycopg2.connect() to return a connection to a mock database? I think pytest-postgresql might help, but I couldn't find any code examples.

Why open the same one database with sqlalchemy, but get different, how can I update it?

I write some tests with pytest, I want to test create user and email with post method.
With some debug, I know the issue is I open two databases in memory, but they are same database SessionLocal().
So how can I fix this, I try db.flush(), but it doesn't work.
this is the post method code
#router.post("/", response_model=schemas.User)
def create_user(
*,
db: Session = Depends(deps.get_db), #the get_db is SessionLocal()
user_in: schemas.UserCreate,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Create new user.
"""
user = crud.user.get_by_email(db, email=user_in.email)
if user:
raise HTTPException(
status_code=400,
detail="The user with this username already exists in the system.",
)
user = crud.user.create(db, obj_in=user_in)
print("====post====")
print(db.query(models.User).count())
print(db)
if settings.EMAILS_ENABLED and user_in.email:
send_new_account_email(
email_to=user_in.email, username=user_in.email, password=user_in.password
)
return user
and the test code is:
def test_create_user_new_email(
client: TestClient, superuser_token_headers: dict, db: Session # db is SessionLocal()
) -> None:
username = random_email()
password = random_lower_string()
data = {"email": username, "password": password}
r = client.post(
f"{settings.API_V1_STR}/users/", headers=superuser_token_headers, json=data,
)
assert 200 <= r.status_code < 300
created_user = r.json()
print("====test====")
print(db.query(User).count())
print(db)
user = crud.user.get_by_email(db, email=username)
assert user
assert user.email == created_user["email"]
and the test result is
> assert user
E assert None
====post====
320
<sqlalchemy.orm.session.Session object at 0x7f0a9f660910>
====test====
319
<sqlalchemy.orm.session.Session object at 0x7f0aa09c4d60>
Your code does not provide enough information to help you, the key issues are probably in what is hidden and explained by your comments.
And it seems like you are confusing sqlalchemy session and databases. If you are not familiar with these concepts, I highly recommend you to have a look at SQLAlchemy documentation.
But, looking at your code structure, it seems like you are using FastAPI.
Then, if you want to test SQLAlchemy with pytest, I recommend you to use pytest fixture with SQL transactions.
Here is my suggestion on how to implement such a test. I'll suppose that you want to run the test on your actual database and not create a new database especially for the tests. This implementation is heavily based on this github gist (the author made a "feel free to use statement", so I suppose he is ok with me copying his code here):
# test.py
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from fastapi.testclient import TestClient
from myapp.models import BaseModel
from myapp.main import app # import your fastapi app
from myapp.database import get_db # import the dependency
client = TestClient(app)
# scope="session" mean that the engine will last for the whole test session
#pytest.fixture(scope="session")
def engine():
return create_engine("postgresql://localhost/test_database")
# at the end of the test session drops the created metadata using fixture with yield
#pytest.fixture(scope="session")
def tables(engine):
BaseModel.metadata.create_all(engine)
yield
BaseModel.metadata.drop_all(engine)
# here scope="function" (by default) so each time a test finished, the database is cleaned
#pytest.fixture
def dbsession(engine, tables):
"""Returns an sqlalchemy session, and after the test tears down everything properly."""
connection = engine.connect()
# begin the nested transaction
transaction = connection.begin()
# use the connection with the already started transaction
session = Session(bind=connection)
yield session
session.close()
# roll back the broader transaction
transaction.rollback()
# put back the connection to the connection pool
connection.close()
## end of the gist.github code
#pytest.fixture
def db_fastapi(dbsession):
def override_get_db():
db = dbsession
try:
yield db
finally:
db.close()
client.app.dependency_overrides[get_db] = override_get_db
yield db
# Now you can run your test
def test_create_user_new_email(db_fastapi):
username = random_email()
# ...

Test fixture 'postgres' not found

I'm setting up unittest to test a postgres connection for an airflow operator. I have a setup function to spin up a postgres container and then a function to test some queries against the container. I'm relatively new to this so no doubt it's my logic not right.
class TestOperator:
def setUp(self):
#pytest postresql container patches
postgres_image = fetch(repository="postgres:11.1-alpine")
postgres = container(
image="{postgres_image.id}",
environment={"POSTGRES_USER": "testuser", "POSTGRES_PASSWORD": "testpass"},
ports={"5432/tcp": None},
volumes={
os.path.join(os.path.dirname(__file__), "postgres-init.sql"): {
"bind": "/docker-entrypoint-initdb.d/postgres-init.sql"
}
}
)
"""
Using Pytest Mocker to create a Postgresql container to connect test.
"""
def test_postgres_operator(self, mocker, postgres):
mocker.patch.object(
PostgresHook,
"get_connection",
return_value=Connection(
conn_id="postgres",
conn_type="postgres",
host="localhost",
login="testuser",
password="testpass",
port=postgres.ports["5432/tcp"][0],
),
)
#target Postgres Container for
task = PostGresOperator(
task_id="PostgresOperator",
postgres_conn_id="postgres_id",
)
pg_hook = PostgresHook()
row_count = pg_hook.get_first("select * from test")[0]
assert row_count >1
I then get the error
fixture 'postgres' not found
I'm sure my logic is wrong.
pytest thinks postgres is a fixture and fails looking for it.
Instead of having postgres passed in as an argument, you could set is as an instance field:
def setUp(self):
postgres_image = fetch(repository="postgres:11.1-alpine")
self.postgres = container(...)
def test_postgres_operator(self, mocker):
# use self.postgres instead of postgres
Or, you could define postgres as a proper pytest fixture to promote better reusability.
Alternatively, you could look into pytest-postgresql plugin which may make it easier to mock out and test postgresql related code.

DB changes made in fixture don't seem to persist to test

I'm writing some Pytest code using a sqlite db, to test some logic. I setup a root level fixture to instantiate a db engine:
class SqliteEngine:
def __init__(self):
self._conn_engine = create_engine("sqlite://")
self._conn_engine.execute("pragma foreign_keys=ON")
def get_engine(self):
return self._conn_engine
def get_session(self):
Session = sessionmaker(bind=self._conn_engine, autoflush=True)
return Session()
#pytest.fixture(scope="session")
def sqlite_engine():
sqlite_engine = SqliteEngine()
return sqlite_engine
Then in my test class, I have
class TestRbac:
#pytest.fixture(scope="class")
def setup_rbac_tables(self, sqlite_engine):
conn_engine = sqlite_engine.get_engine()
conn_engine.execute("attach ':memory:' as rbac")
Application.__table__.create(conn_engine)
Client.__table__.create(conn_engine)
Role.__table__.create(conn_engine)
session = sqlite_engine.get_session()
application = Application(id=1, name="test-application")
session.add(application)
session.flush()
client = Client(id=0, name="Test", email_pattern="")
session.add(client)
session.flush()
Finally in the test in that class, I tried
def test_query_config_data_default(self, sqlite_engine, setup_rbac_tables, rbac):
conn_engine = sqlite_engine.get_engine()
session = sqlite_engine.get_session()
client = Client(id=1, name=factory.Faker("name").generate(), email_pattern="")
session.add(client)
session.flush()
clients = sqlite_engine.get_session().query(Client).all()
for client in clients:
print(client.id, client.name)
However, only one client prints (and if I try for Application, none print), and I can't figure out why. Is this a problem with the fixture scopes? Or the engine? Or how sqlite works in pytest?
I'm not an expert on this but I think you need to define the fixture in such a way that the session is shared unless you plan to commit in each fixture. In setup_rbac_tables the session is destroyed with the function scope. And when get_session is called again a new session is created.
In my pytest sqlalchemy tests I do something like this, where the db fixture is a db session that is reused between fixtures and in the test:
#pytest.fixture
def customer_user(db):
from ..model.user import User
from ..model.auth import Group
group = db.query(Group).filter(
Group.name == 'customer').first()
if not group:
group = Group(name='customer', label='customer')
user = User(email=test_email_fmt.format(uuid4().hex), group=group)
db.add(user)
return user

mySQL UPDATE doesn't work for GAE when called from browser

I am writing a verify email address python file for Google App Engine. (ya I know django has stuff, but I wanted to write my own because that is how I learn)
Below is the python code. The code returns "Email Account Verified" which seems to me that the queries worked. However when I look at the "active" column in the database, it is still 0.
If I run the query string that logging.info("%s",db_query) in the database itself, it works and is updated to 1.
All my other python code (with UPDATES) works fine, the only difference is those python files are called from my ios app and this one is called from a browser.
#Make the libs folder with 3rd party libraries and common methods
import sys
sys.path.insert(0, 'libs')
#Imports
import logging
import webapp2
from django.utils.html import strip_tags
import common
import MySQLdb
import json
VERIFIED_HTML = """\
<html>
<body>
<h1>Email Account Verified</h1>
</body>
</html>
"""
ERROR_HTML = """\
<html>
<body>
<h1>ERROR</h1>
</body>
</html>
"""
class VerifyEmail(webapp2.RequestHandler):
def get(self):
user_email = strip_tags(self.request.get('user_email').lower().strip())
user_activation_hash = strip_tags(self.request.get('user_activation_hash').strip())
logging.info("User Email = %s", user_email)
logging.info("User Activation Hash = %s", user_activation_hash)
#Insert the information into the users table
#Get the database connection to Google Cloud SQL
db = common.connect_to_google_cloud_sql()
db_cursor = db.cursor(MySQLdb.cursors.DictCursor)
#Check to see if user already exists
#Query for user
db_query = """SELECT \
email, activation_hash \
FROM users WHERE email='%s' AND activation_hash='%s'""" % (user_email, user_activation_hash)
db_cursor.execute(db_query)
#If there is one record containing the username check password
if(db_cursor.rowcount == 1):
db_query = """UPDATE users SET active=%s WHERE email='%s';""" % (1, user_email)
logging.info("%s" % db_query)
if(db_cursor.execute(db_query)):
self.response.write(VERIFIED_HTML)
else:
self.response.write(ERROR_HTML)
else: #either no user, or activation_hash doesn't match
self.response.write(ERROR_HTML)
Connect to Google Cloud SQL
def connect_to_google_cloud_sql():
#hostname = DEV_DB_HOSTNAME
#hostname = PROD_DB_HOSTNAME
db_username = 'dummy_user' #not real
db_password = 'dummypassword' # not real
#If PROD or Deployed Testing, use unix_socket
if(os.getenv('SERVER_SOFTWARE') and os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')):
db = MySQLdb.connect(unix_socket='/cloudsql/' + _DATABASE_HOSTNAME, db='dummydbname', user=db_username, passwd=db_password)
else: #Local Testing uses host
db = MySQLdb.connect(host=_DATABASE_HOSTNAME, port=3306, db='dummydbname', user=db_username, passwd=db_password)
logging.info("Got DB Connection")
return db
Any suggestions? Is it GAE Cloud SQL Privledges????
Maybe because I was using my browser with the local app engine running on my local ip???
You need to call .commit() on MySQLdb cursors after executing queries. This is why your update is failing. It updates inside the transaction, but when your code ends without committing the transaction, the changes to the DB are rolled back, despite having told the user of success on update.
You can also use the following method to ensure commits when using a cursor: db_cursor.autocommit(True).

Categories

Resources