In docker container are not created db migrations - python

Can anyone say why my migrations are not created in docker container? Locally I have a working project but in container there are no tables.
Locally my tables are created but when I run docker-compose up there are no migrations in console logs
My Dockerfile:
FROM golang:1.17-alpine as build-stage
RUN mkdir -p /app
WORKDIR /app
COPY . /app
RUN go mod download
RUN go build -o crypto main.go
FROM alpine:latest
WORKDIR /
COPY --from=build-stage /app/crypto .
EXPOSE 9999
ENTRYPOINT [ "/crypto" ]
docker-compose.yml
version: "3"
volumes:
crypto_postgres_data: {}
services:
crypto:
build:
context: .
dockerfile: ./Dockerfile
image: crypto_app
platform: linux/amd64
env_file:
- ./.env
depends_on:
- postgres
ports:
- "9999:9999"
postgres:
image: postgres:14.2
healthcheck:
test: [ "CMD", "pg_isready", "-U", "$POSTGRES_USER", "-d", "$POSTGRES_DB" ]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
restart: unless-stopped
env_file:
- ./.env
deploy:
resources:
limits:
cpus: '1'
memory: 4G
volumes:
- crypto_postgres_data:/var/lib/postgresql/data:Z
migrate:
image: migrate/migrate
volumes:
- .:/migrations
My output in container:
[gomigrate] 2022/06/22 14:02:11 Migrations path: migrations/
[gomigrate] 2022/06/22 14:02:11 Migrations table not found
[gomigrate] 2022/06/22 14:02:11 Created migrations table: gomigrate
[gomigrate] 2022/06/22 14:02:11 Migrations file pairs found: 0
Thanks in advance

Your migrate service does not have any command so it cannot migrate data in the migrations folder. You should change it like this:
version: "3"
volumes:
crypto_postgres_data: {}
services:
crypto:
build:
context: .
dockerfile: ./Dockerfile
image: crypto_app
platform: linux/amd64
env_file:
- ./.env
depends_on:
- postgres
ports:
- "9999:9999"
networks:
- crypto
postgres:
image: postgres:14.2
healthcheck:
test: [ "CMD", "pg_isready", "-U", "$POSTGRES_USER", "-d", "$POSTGRES_DB" ]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
restart: unless-stopped
env_file:
- ./.env
deploy:
resources:
limits:
cpus: '1'
memory: 4G
volumes:
- crypto_postgres_data:/var/lib/postgresql/data:Z
networks:
- crypto
migrate:
image: migrate/migrate
restart: on-failure:5
env_file:
- ./.env
volumes:
- ./migrations:/migrations
command: ["-path", "/migrations", "-database", "postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}#postgres:5432/${POSTGRES_DB}?sslmode=disable", "up"]
depends_on:
- db
networks:
- crypto
networks:
crypto:
driver: bridge
Reference: How to run golang-migrate with docker-compose?

Related

Is it possible to add new packages and run docker non-dev file?

I have been working on Superset (cloned the code from superset github).Created new file under the folder superset/superset/ named db_access.py.
I have imported this file in the existing app.py file .Now have the following imports in the newly created file,
db_access.py
import boto3
import sys
import hvac
import pymysql
import psycopg2
import os
import io
import logging
from datetime import datetime, timedelta
import traceback
from timestream_reader import Timestream_reader
import json
from pathlib import Path
from zenpy import Zenpy
app.py
import logging
import os
import json
from flask import Flask
from flask import request,jsonify
from superset.initialization import SupersetAppInitializer
import sys
from superset.db_access import Db_access
I have included the hvac in requirements.txt and when I am composing docker dev file, it works with out any issue.
But when I try to compose the non-dev docker file with the above code, it shows error like hvac module not found (I tried pip install hvac and included that in requirements file too).Is there any other places that I need to customize based on new imports for non-dev?
docker-compose-non-dev.yml
x-superset-image: &superset-image apache/superset:${TAG:-latest-dev}
x-superset-depends-on: &superset-depends-on
- db
- redis
x-superset-volumes: &superset-volumes
# /app/pythonpath_docker will be appended to the PYTHONPATH in the final container
- ./docker:/app/docker
- superset_home:/app/superset_home
- ./superset:/app/superset
- ./superset-frontend:/app/superset-frontend
version: "3.7"
services:
redis:
image: redis:latest
container_name: superset_cache
restart: unless-stopped
volumes:
- redis:/data
db:
env_file: docker/.env-non-dev
image: postgres:10
container_name: superset_db
restart: unless-stopped
volumes:
- db_home:/var/lib/postgresql/data
superset:
env_file: docker/.env-non-dev
image: *superset-image
container_name: superset_app
command: ["/app/docker/docker-bootstrap.sh", "app-gunicorn"]
user: "root"
restart: unless-stopped
ports:
- 8088:8088
depends_on: *superset-depends-on
volumes: *superset-volumes
superset-init:
image: *superset-image
container_name: superset_init
command: ["/app/docker/docker-init.sh"]
env_file: docker/.env-non-dev
depends_on: *superset-depends-on
user: "root"
volumes: *superset-volumes
healthcheck:
disable: true
superset-worker:
image: *superset-image
container_name: superset_worker
command: ["/app/docker/docker-bootstrap.sh", "worker"]
env_file: docker/.env-non-dev
restart: unless-stopped
depends_on: *superset-depends-on
user: "root"
volumes: *superset-volumes
healthcheck:
test: ["CMD-SHELL", "celery inspect ping -A superset.tasks.celery_app:app -d celery#$$HOSTNAME"]
superset-worker-beat:
image: *superset-image
container_name: superset_worker_beat
command: ["/app/docker/docker-bootstrap.sh", "beat"]
env_file: docker/.env-non-dev
restart: unless-stopped
depends_on: *superset-depends-on
user: "root"
volumes: *superset-volumes
healthcheck:
disable: true
volumes:
superset_home:
external: false
db_home:
external: false
redis:
external: false
docker-compose.yml (dev)
x-superset-image: &superset-image apache/superset:${TAG:-latest-dev}
x-superset-user: &superset-user root
x-superset-depends-on: &superset-depends-on
- db
- redis
x-superset-volumes: &superset-volumes
# /app/pythonpath_docker will be appended to the PYTHONPATH in the final container
- ./docker:/app/docker
- ./superset:/app/superset
- ./superset-frontend:/app/superset-frontend
- superset_home:/app/superset_home
- ./tests:/app/tests
version: "3.7"
services:
redis:
image: redis:latest
container_name: superset_cache
restart: unless-stopped
ports:
- "127.0.0.1:6379:6379"
volumes:
- redis:/data
db:
env_file: docker/.env
image: postgres:14
container_name: superset_db
restart: unless-stopped
ports:
- "127.0.0.1:5432:5432"
volumes:
- db_home:/var/lib/postgresql/data
superset:
env_file: docker/.env
image: *superset-image
container_name: superset_app
command: ["/app/docker/docker-bootstrap.sh", "app"]
restart: unless-stopped
ports:
- 8088:8088
user: *superset-user
depends_on: *superset-depends-on
volumes: *superset-volumes
environment:
CYPRESS_CONFIG: "${CYPRESS_CONFIG}"
superset-websocket:
container_name: superset_websocket
build: ./superset-websocket
image: superset-websocket
ports:
- 8080:8080
depends_on:
- redis
# Mount everything in superset-websocket into container and
# then exclude node_modules and dist with bogus volume mount.
# This is necessary because host and container need to have
# their own, separate versions of these files. .dockerignore
# does not seem to work when starting the service through
# docker-compose.
#
# For example, node_modules may contain libs with native bindings.
# Those bindings need to be compiled for each OS and the container
# OS is not necessarily the same as host OS.
volumes:
- ./superset-websocket:/home/superset-websocket
- /home/superset-websocket/node_modules
- /home/superset-websocket/dist
environment:
- PORT=8080
- REDIS_HOST=redis
- REDIS_PORT=6379
- REDIS_SSL=false
superset-init:
image: *superset-image
container_name: superset_init
command: ["/app/docker/docker-init.sh"]
env_file: docker/.env
depends_on: *superset-depends-on
user: *superset-user
volumes: *superset-volumes
environment:
CYPRESS_CONFIG: "${CYPRESS_CONFIG}"
healthcheck:
disable: true
superset-node:
image: node:16
container_name: superset_node
command: ["/app/docker/docker-frontend.sh"]
env_file: docker/.env
depends_on: *superset-depends-on
volumes: *superset-volumes
superset-worker:
image: *superset-image
container_name: superset_worker
command: ["/app/docker/docker-bootstrap.sh", "worker"]
env_file: docker/.env
restart: unless-stopped
depends_on: *superset-depends-on
user: *superset-user
volumes: *superset-volumes
healthcheck:
test: ["CMD-SHELL", "celery inspect ping -A superset.tasks.celery_app:app -d celery#$$HOSTNAME"]
# Bump memory limit if processing selenium / thumbnails on superset-worker
# mem_limit: 2038m
# mem_reservation: 128M
superset-worker-beat:
image: *superset-image
container_name: superset_worker_beat
command: ["/app/docker/docker-bootstrap.sh", "beat"]
env_file: docker/.env
restart: unless-stopped
depends_on: *superset-depends-on
user: *superset-user
volumes: *superset-volumes
healthcheck:
disable: true
superset-tests-worker:
image: *superset-image
container_name: superset_tests_worker
command: ["/app/docker/docker-bootstrap.sh", "worker"]
env_file: docker/.env
environment:
DATABASE_HOST: localhost
DATABASE_DB: test
REDIS_CELERY_DB: 2
REDIS_RESULTS_DB: 3
REDIS_HOST: localhost
network_mode: host
depends_on: *superset-depends-on
user: *superset-user
volumes: *superset-volumes
healthcheck:
test: ["CMD-SHELL", "celery inspect ping -A superset.tasks.celery_app:app -d celery#$$HOSTNAME"]
volumes:
superset_home:
external: false
db_home:
external: false
redis:
external: false

Airflow folder structure on how to incorporate container with custom code to avoid bloat

Airflow beginner here. I have a question on how to install a custom utility package in a docker container that will be used in docker compose for airflow. The reason why I want to do this is because this package has a lot of reusable code and I don't want to constantly copy the code into new projects directories.
The custom utility package that I would use would only be needed for the Webserver container.
Since I am aiming not to copy the utility code into my docker compose directory, would I need to install it in a separate container then reference (through extending?) that container somewhere in my airflow directory? I hope I'm not overcomplicating.
My current airflow setup is as follows:
Airflow_ETL
--/airflow
--/scripts
-/data
-/resource
-pull_data.py
--docker-compose.yml
--Dockerfile
--env.list
--requirements.txt
My docker file looks likes this:
FROM puckel/docker-airflow:1.10.9
COPY airflow/airflow.cfg ${AIRFLOW_HOME}/airflow.cfg
RUN pip install --upgrade pip
RUN pip install SQLAlchemy==1.3.15
WORKDIR /usr/src/app
COPY requirements.txt /requirements.txt
RUN pip install -r /requirements.txt
Docker Compose uses this Docker File to build the rest of the containers.
version: '3.7'
services:
postgres:
image: postgres:9.6
environment:
- POSTGRES_USER=airflow
- POSTGRES_PASSWORD=airflow
- POSTGRES_DB=airflow
redis:
image: redis:5.0.5
flower:
image: flower:latest
build:
context: .
restart: always
depends_on:
- redis
environment:
- EXECUTOR=Celery
ports:
- "5555:5555"
command: flower
webserver:
image: webserver:latest
build:
context: .
restart: always
depends_on:
- postgres
- redis
environment:
- LOAD_EX=n
- FERNET_KEY=46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=
- EXECUTOR=Celery
- PYTHONPATH=/usr/local/airflow
env_file:
- env.list
volumes:
- ./airflow/dags:/usr/local/airflow/dags
- ./scripts:/usr/local/airflow/scripts
ports:
- "8080:8080"
command: webserver
healthcheck:
test: ["CMD-SHELL", "[ -f /usr/local/airflow/airflow-webserver.pid ]"]
interval: 30s
timeout: 30s
retries: 3
scheduler:
image: scheduler:latest
build:
context: .
restart: always
depends_on:
- webserver
volumes:
- ./airflow/dags:/usr/local/airflow/dags
- ./scripts:/usr/local/airflow/scripts
environment:
- LOAD_EX=n
- FERNET_KEY=46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=
- EXECUTOR=Celery
- PYTHONPATH=/usr/local/airflow
command: scheduler
env_file:
- env.list
worker1:
image: worker1:latest
build:
context: .
restart: always
depends_on:
- scheduler
volumes:
- ./airflow/dags:/usr/local/airflow/dags
- ./scripts:/usr/local/airflow/scripts
environment:
- FERNET_KEY=46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=
- EXECUTOR=Celery
- PYTHONPATH=/usr/local/airflow
command: worker
env_file:
- env.list
worker2:
image: worker2:latest
build:
context: .
restart: always
depends_on:
- scheduler
volumes:
- ./airflow/dags:/usr/local/airflow/dags
- ./scripts:/usr/local/airflow/scripts
environment:
- FERNET_KEY=46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=
- EXECUTOR=Celery
- PYTHONPATH=/usr/local/airflow
command: worker
env_file:
- env.list
Thank you for your time.

Cannot connect to redis container

I am trying to connect redis container to python app container using environment variable. I passed password as an environment variable but it is not connecting, if I don't use an environment variable and hard code the password it works fine otherwise it gives redis.exceptions.ConnectionError
version: "3.7"
services:
nginx_app:
image: nginx:latest
depends_on:
- flask_app
volumes:
- ./default.conf:/etc/nginx/conf.d/default.conf
ports:
- 8090:80
networks:
- my_project_network
flask_app:
build:
context: .
dockerfile: Dockerfile
expose:
- 5000
environment:
- PASSWORD=pass123a
depends_on:
- redis_app
networks:
- my_project_network
redis_app:
image: redis:latest
command: redis-server --requirepass ${PASSWORD} --appendonly yes
environment:
- PASSWORD=pass123a
volumes:
- ./redis-vol:/data
expose:
- 6379
networks:
- my_project_network
networks:
my_project_network:
index.py
from flask import Flask
from redis import Redis
import os
app = Flask(__name__)
redis = Redis(host='redis_app', port=6379, password=os.getenv('PASSWORD'))
#app.route('/')
def hello():
redis.incr('hits')
return 'Hello World! I have been seen %s times.' % redis.get('hits')
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
Update your docker-compose.yaml
the environment is a list of strings:
docker-composer interpolates ${ENV} where the value of ENV is loaded from .env file
Use:
command: redis-server --requirepass $PASSWORD --appendonly yes
Instead of:
command: redis-server --requirepass ${PASSWORD} --appendonly yes
You can verify environment variable inside ur container by:
docker-compose run --rm flask_app printenv | grep PASSWORD
That should return:
PASSWORD=pass123a
docker-compose example for environment variables: Here
Looks like you have missed passing the environment variable to your Redis container.
Try This:
version: "3.7"
services:
nginx_app:
image: nginx:latest
#LOCAL IMAGE
depends_on:
- flask_app
volumes:
- ./default.conf:/etc/nginx/conf.d/default.conf
ports:
- 8082:80
networks:
- my_project_network
flask_app:
build:
context: .
dockerfile: Dockerfile
expose:
- 5000
environment:
- PASSWORD=pass123a
depends_on:
- redis_app
networks:
- my_project_network
redis_app:
image: redis:latest
command: redis-server --requirepass ${PASSWORD} --appendonly yes
environment:
- PASSWORD=pass123a
volumes:
- ./redis-vol:/data
expose:
- 6379
networks:
- my_project_network
networks:
my_project_network:

Celery not running at times in docker

I have been going bonkers over this one, the celery service in my docker-compose.yml just does not pick up tasks (sometimes). It works at times though
Dockerfile:
FROM python:3.6
RUN apt-get update
RUN mkdir /web_back
WORKDIR /web_back
COPY web/requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
COPY web/ .
docker-compose.yml
(Taken out a few services for the sake of understanding)
version: '3'
services:
web_serv:
restart: always
build: .
container_name: web_back_01
env_file:
- ./envs/web_back_01.env
volumes:
- ./web/:/web_back
depends_on:
- web_postgres
expose:
- 8282
extra_hosts:
- "dockerhost:104.10.4.11"
command: bash -c "./initiate.sh"
service_A:
restart: always
build: ../../web-service-A/A/
container_name: web_back_service_a_01
volumes:
- ../../web-service-A/A.:/web-service-A
depends_on:
- web
ports:
- '5100:5100'
command: bash -c "python server.py"
service_B:
restart: always
build: ../../web-service-B/B/
container_name: web_back_service_b_01
volumes:
- ../../web-service-B/B.:/web-service-B
depends_on:
- web
ports:
- '5200:5200'
command: bash -c "python server.py"
web_postgres:
restart: always
build: ./postgres
container_name: web_postgres_01
# restart: unless-stopped
ports:
- "5433:5432"
environment: # will be used by the init script
LC_ALL: C.UTF-8
POSTGRES_USER: web
POSTGRES_PASSWORD: web
POSTGRES_DB: web
volumes:
- pgdata:/var/lib/postgresql/data/
nginx:
restart: always
build: ./nginx/
container_name: web_nginx_01
volumes:
- ./nginx/:/etc/nginx/conf.d
- ./logs/:/code/logs
- ./web/static/:/static_cdn/
- ./web/media/:/media_cdn/
ports:
- "80:80"
links:
- web_serv
redis:
restart: always
container_name: web_redis_01
ports:
- "6379:6379"
links:
- web_serv
image: redis
celery:
build: .
volumes:
- ./web/:/web_back
container_name: web_celery_01
command: celery -A web worker -l info
links:
- redis
depends_on:
- redis
volumes:
pgdata:
media:
static:
settings.py
CELERY_BROKER_URL = 'redis://redis:6379'
CELERY_RESULT_BACKEND = 'redis://redis:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_SERIALIZER = 'json'
Notice the service_A and service_B, those are the two servies that at times do not get fired up.
Any help in understanding the odd behavior would be very helpful! Thanks
So, I think I ran into a similar problem. I was pulling my hair out because I was updating my worker.py and then not only would the autoload not reflect any changes, but, the when I'd rerun my docker-compose up my changes would still not be reflected.
Sometimes when I'd run docker-compose up --build --force-recreate my changes would be reflected, but not reliably.
I was able to resolve this problem by doing two things:
Remove the __pycache__ in my worker's directory.
Run $ find . -name "*.pyc" -exec rm {} \; before doing docker-compose up --build --force-recreate when caching behavior persists.
I'm not 100% sure what's going on myself, but its clear that Celery + Docker w/o autoload means that Docker has a tendency to use a cached version of the compiled task. I see a bit of chatter regarding ways to setup autoload with Celery + Docker with things like webdog or modd, but, I have yet to set that up for my project.

Setting up docker-compose.yml to run celery worker and celery beat for a django project with redis as broker

I have setup django project using django cookiecutter. The project scaffolding is excellent. I also opted to use docker along with it. Now I am struggling with getting celery v4.0.x working in the whole setup.
This is my docker-compose.yml
version: '2'
volumes:
postgres_data_dev: {}
postgres_backup_dev: {}
services:
postgres:
build: ./compose/postgres
volumes:
- postgres_data_dev:/var/lib/postgresql/data
- postgres_backup_dev:/backups
environment:
- POSTGRES_USER=application
django:
build:
context: .
dockerfile: ./compose/django/development/Dockerfile
depends_on:
- postgres
environment:
- POSTGRES_USER=application
- USE_DOCKER=yes
volumes:
- .:/app
- /tmp/
links:
- postgres
- redis
expose:
- "8000"
env_file:
- ./dev.env
restart:
- "on-failure"
nginx:
build:
context: .
dockerfile: ./compose/nginx/development/Dockerfile
depends_on:
- django
ports:
- "0.0.0.0:80:80"
links:
- django
volumes_from:
- django
redis:
image: redis:latest
hostname: redis
celeryworker:
build:
context: .
dockerfile: ./compose/django/development/Dockerfile
env_file: ./dev.env
depends_on:
- postgres
- redis
command: celery -A application.taskapp worker -l INFO
restart: "on-failure"
celerybeat:
build:
context: .
dockerfile: ./compose/django/development/Dockerfile
env_file: ./dev.env
depends_on:
- postgres
- redis
command: celery -A application.taskapp beat -l INFO
Quite honestly I feel there seems to be some tiny issue with config for celerybeat/celeryworker service. It would be nice if someone can point it out.
Update:
When I execute the command to run the containers, I get an error saying that application could not be found
Update
This is the new compose file which ironed out few errors in my compose. Somewhere along the way of getting it all working I also came across thread where someone had mentioned that ordering of the services mattered as well. So in the new version, django is placed first.
version: '2'
volumes:
postgres_data_dev: {}
postgres_backup_dev: {}
services:
django: &django
build:
context: .
dockerfile: ./compose/django/development/Dockerfile
depends_on:
- postgres
volumes:
- .:/app
- /tmp/
links:
- postgres
- redis
environment:
- POSTGRES_USER=application
- USE_DOCKER=yes
expose:
- "8000"
env_file:
- ./dev.env
postgres:
build: ./compose/postgres
volumes:
- postgres_data_dev:/var/lib/postgresql/data
- postgres_backup_dev:/backups
environment:
- POSTGRES_USER=application
ports:
- "5432:5432"
redis:
image: redis:latest
hostname: redis
ports:
- "0.0.0.0:6379:6379"
env_file:
- ./dev.env
nginx:
build:
context: .
dockerfile: ./compose/nginx/development/Dockerfile
depends_on:
- django
ports:
- "0.0.0.0:80:80"
links:
- django
volumes_from:
- django
celeryworker:
<<: *django
depends_on:
- redis
- postgres
command: "celery -A application.taskapp worker --loglevel INFO --uid taskmaster"
I am using the same tech stack . This works fine for me.
docker-compose.yml
redis:
image: redis
container_name: redis
command: ["redis-server", "--port", "${REDIS_PORT}", "--appendonly", "yes","--maxmemory", "1gb", "--maxmemory-policy", "allkeys-lru"]
ports:
- "${REDIS_PORT}:${REDIS_PORT}"
volumes:
- .:/redis.conf
networks:
- pipeline-net
celery-worker:
build:
context: ./app
container_name: celery-worker
entrypoint: celery
command: -A celery_app.celery worker --loglevel=info
volumes:
- .:/var/www/app/worker
links:
- redis
depends_on:
- redis
networks:
- pipeline-net
celery-beat:
build:
context: ./app
container_name: celery-beat
entrypoint: celery
command: -A celery_app.celery beat --loglevel=info
volumes:
- .:/var/www/app/beat
links:
- celery-worker
- redis
depends_on:
- celery-worker
- redis
networks:
- pipeline-net
flower:
image: mher/flower
container_name: flower
environment:
- CELERY_BROKER_URL=redis://redis:6379
- FLOWER_PORT=8888
ports:
- 8888:8888
links:
- redis
- celery-worker
- celery-beat
depends_on:
- redis
- celery-worker
- celery-beat
networks:
- pipeline-net

Categories

Resources