I'm trying to use the database configuration set on settings files to make a database dump using fabric.
There's more than one settings file, so I'd like to be able to do so based on the environment I choose.
by now, my task is like this
def dump_database():
with cd('~/project_folder'), prefix(WORKON_VIRTUALENV):
django.settings_module(env.settings)
from django.conf import settings
dbname = settings.DATABASES['default']['NAME']
dbuser = settings.DATABASES['default']['USER']
dbpassword = settings.DATABASES['default']['PASSWORD']
fname = '/tmp/{0}-backup-{1}.sql.gz'.format(
dbname,
time.strftime('%Y%m%d%H%M%S')
)
run('mysqldump -u %s -p=%s %s | gzip -9 /tmp/backup-%s.sql.gz' % (
dbuser,
dbpassword,
dbname,
fname))
But I'm getting an ImportError:
ImportError: Could not import settings 'project.settings.production'
I've tried to use shell_env() to set the DJANGO_SETTINGS_MODULE instead of django.settings_module(env.settings), with the same result.
I use a task to change the environment based on a environment dict:
def environment(name):
env.update(environments[name])
env.environment = name
This way, I want to be able to create a dump from multiple hosts like:
fab environment:live dump_database
fab environment:otherhost dump_database
Without having to reproduce database settings from all hosts on fabfile.
Importing your Django settings file in fabric is explained here.
http://fabric.readthedocs.org/en/1.3.3/api/contrib/django.html
Quoting from the above link:
from fabric.api import run
from fabric.contrib import django
django.settings_module('myproject.settings')
from django.conf import settings
def dump_production_database():
run('mysqldump -u %s -p=%s %s > /tmp/prod-db.sql' % (
settings.DATABASE_USER,
settings.DATABASE_PASSWORD,
settings.DATABASE_NAME
))
NOTE: I don't answer the question but offer a different solution
I had the same problem.. so I did custom .py script like that:
I created a file named dump_db.py (placed next to fabfile.py for example, that is on the remote machine)
import os
import sys
from datetime import datetime
from django.conf import settings
def dump_mysql():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", SETTINGS_MODULE)
DB_NAME = settings.DATABASES['default']['NAME']
DB_USER = settings.DATABASES['default']['USER']
DB_PASSWORD = settings.DATABASES['default']['PASSWORD']
dump_file_name = '{time}_{db_name}.sql'.format(
time=datetime.now().strftime('%Y_%m_%d'),
db_name=DB_NAME,
)
os.system('mysqldump -u {db_user} -p{db_password} {db_name} > {to_file}'.format(
db_user=DB_USER,
db_password=DB_PASSWORD,
db_name=DB_NAME,
to_file=dump_file_name,
))
return dump_file_name
if __name__ == '__main__':
try:
SETTINGS_MODULE = sys.argv[1:].pop()
except IndexError:
SETTINGS_MODULE = 'project_name.settings'
print dump_mysql()
As you see sys.argv[1:].pop() tries to take optional argument (the setting module in this case).
So in my fabfile:
import os
from fabric.api import env, local, run, prefix, cd
.....
def dump():
current_dir = os.getcwd()
with prefix('source {}bin/activate'.format(env.venv)), cd('{}'.format(env.home)):
dumped_file = run('python dump_db.py {}'.format(env.environment)) # the optional argument given
file_path = os.path.join(env.home, dumped_file)
copy_to = os.path.join(current_dir, dumped_file)
scp(file_path, copy_to)
def scp(file_path, copy_to):
local('scp {}:{} {}'.format(env.host, file_path, copy_to))
where env.environment = 'project_name.settings.env_module'
And this is how I dump my DB and copy it back to me.
Hope it comes handy to someone! :)
Related
I'm trying to run a standalone script that uses the Django models for accessing the database.
The script is very simple, see below:
import sys
from manager.models import Playlist
from manager.utils import clean_up_playlist, add_record_to_playlist
def main(playlist_id, username):
playlist = Playlist.objects.get(playlists=playlist_id)
# the script does other stuff
if __name__ == "__main__":
playlist_id = sys.argv[1]
username = sys.argv[2]
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SpotifyPlaylistManager.settings')
import django
django.setup()
main(playlist_id, username)
The script is in the top folder of the Django folder
SpotifyPlaylistManager/
|-SpotifyPlaylistManager/
|-settings.py
|-venv
|-manage.py
|-my_script.py
For some reason, if I try to run it with the command below I got the error
raise ImproperlyConfigured(
django.core.exceptions.ImproperlyConfigured: Requested setting INSTALLED_APPS, but settings are not configured. You must either define the environment variable DJANGO_SETTINGS_MODULE or call settings.configure() before accessing settings.
The actual command I need to launch
source /home/nicola/PycharmProjects/SpotifyPlaylistManager/venv/bin/activate && python /home/nicola/PycharmProjects/SpotifyPlaylistManager/scheduler.py 6tIMeXF1Q9bB7KDywBhG2P nicoc && deactivate
I can't find the issue
Moving the Django include inside the main worked
if __name__ == "__main__":
playlist_id = sys.argv[1]
username = sys.argv[2]
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SpotifyPlaylistManager.settings')
import django
django.setup()
from manager.models import Playlist
from manager.utils import clean_up_playlist, add_record_to_playlist
main(playlist_id, username)
I have a flask app that is giving me 500 when I import a file using sys.path.append('path/to/file.py)
Here is my file located in /var/www/html/ip.py which flask is calling:
import sys
sys.path.append('/auto/conf/')
from config import config
server_username = config['server_username']
server_password = config['server_prod_password']
def check_ip(ip_addr):
return "test"
Here is my /auto/conf/config.py:
import os
import ConfigParser
# Define names of dir and file
CRED_DIR = 'cred'
FILE_NAME = 'cred.conf'
# Path to cred file
my_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
cred_dir = os.path.join(my_dir, CRED_DIR)
file_path = os.path.join(cred_dir, FILE_NAME)
# Load the conf file with ConfigParser
config = ConfigParser.SafeConfigParser()
config.read(file_path)
# Build a config dictionary
config = {
'server_username': config.get('server', 'username'),
'server_password': config.get('server', 'password'),
}
and cred.conf is located in /auto/cred/cred.conf and contains server info.
Now, here is the issue. If I run python ip.py, it runs fine. I added print statement and it was fetching proper server username and password. But when I run it via Flask, it gives me 500 error.
Here are some of the things I tried:
-- If I comment out "from config import config" from ip.py, Flask runs returns "test" meaning it worked. It will not get server un and pw but atleast it does not 500.
-- If I move cred.conf and config.py to same directory as ip.py and comment out "sys.path.append('/auto/conf/')" and uncomment "from config import config", Flask works.
Any ideas why its happening? I am thinking Flask does not like sys.path.append. Is there any alternative I can use so Flask works?
Edit:
I changed ip.py to this:
import sys
sys.path.append('/auto/conf/')
import config
and removed all code in config.py and it is still gving me error. If I comment out "import config", Flask works. Definitely it does not like importing in this fashion..
I'm struggling to figure out why the session I'm getting after pyramid's bootstrap is refusing to execute queries, raising the transaction.interfaces.NoTransaction exception.
I'm trying to create a script using the pyramid configuration, but working on a background task. I'm using the bootstrap function to get the environment in place. One of the approaches I tried was:
from pyramid.plaster import bootstrap
with bootstrap(sys.argv[1]) as env
dbsession = env['request'].dbsession
with dbsession.begin_nested():
res = dbsession.execute('''SELECT ....''')
...
That creates a SessionTransaction as expected, but still raises a NoTransaction.
How can I initialise the connection, so I can access it as I normally do in the views?
As described in https://github.com/Pylons/pyramid/issues/3219 the transaction is not initialised by default. It can be done using:
with bootstrap(sys.argv[1]) as env:
with env['request'].tm:
dbsession = env['request'].dbsession
dbsession.execute(...)
I've never used pyramid.plaster.bootstrap. However, you could use the same template as the script that is auto generated when you create a new project using the alchemy template.
pcreate -t alchemy myproject
The script looks like this:
import os
import sys
import transaction
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models.meta import Base
from ..models import (
get_engine,
get_session_factory,
get_tm_session,
)
from ..models import MyModel
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = get_engine(settings)
Base.metadata.create_all(engine)
session_factory = get_session_factory(engine)
with transaction.manager:
dbsession = get_tm_session(session_factory, transaction.manager)
model = MyModel(name='one', value=1)
dbsession.add(model)
And the entrypoints in setup.py looks like this:
entry_points="""\
[paste.app_factory]
main = myproject:main
[console_scripts]
initialize_myproject_db = myproject.scripts.initializedb:main
""",
I'm trying to develop my Scrapy application using multiple configurations depending on my environment (e.g. development, production). My problem is that there are some settings that I'm not sure how to set them. For example, if I have to setup my database, in development should be "localhost" and in production has to be another one.
How can I specify these settings when I'm doing scrapy deploy ? Can I set them with a variable in command-line?
You should set the deploy options in your scrapy.cfg file. For example:
[deploy:dev]
url = http://dev_url/
[deploy:production]
url = http://production_url/
With that, you could do:
scrapyd-deploy def
or
scrapyd-deploy production
You can refer to the answer in the following link :
https://alanbuxton.wordpress.com/2018/10/09/using-local-settings-in-a-scrapy-project/
I copy here for quick reference:
Edit the settings.py file so it would read from additional settings files depending on a SCRAPY_ENV environment variable
Move all the settings files to a separate config directory (and change scrapy.cfg so it knew where to look
The magic happens at the end of settings.py:
from importlib import import_module
from scrapy.utils.log import configure_logging
import logging
import os
SCRAPY_ENV=os.environ.get('SCRAPY_ENV',None)
if SCRAPY_ENV == None:
raise ValueError("Must set SCRAPY_ENV environment var")
logger = logging.getLogger(__name__)
configure_logging({'LOG_FORMAT': '%(levelname)s: %(message)s'})
# Load if file exists; incorporate any names started with an
# uppercase letter into globals()
def load_extra_settings(fname):
if not os.path.isfile("config/%s.py" % fname):
logger.warning("Couldn't find %s, skipping" % fname)
return
mdl=import_module("config.%s" % fname)
names = [x for x in mdl.__dict__ if x[0].isupper()]
globals().update({k: getattr(mdl,k) for k in names})
load_extra_settings("secrets")
load_extra_settings("secrets_%s" % SCRAPY_ENV)
load_extra_settings("settings_%s" % SCRAPY_ENV)
Then in the python file you want to get the variables defined in the setting, use the following code
from scrapy.utils.project import get_project_settings
settings = get_project_settings()
env_variable = settings.get('ENV_VARIABLE')
Is there a way for me to configure PyCharm to run shell_plus instead of the default shell?
I've tried putting the text of the manage command in the 'Starting script' but then I get the folloiwing
django_manage_shell.run("/Users/cmason/counsyl/code/website/counsyl/product")
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
# The new Django 1.4 default manage.py wants "from django..." before
# importing settings, but we usually tinker with sys.path in
# settings_local.py, which is called from settings.py. Importing
# settings.py works but does mean some double importing. Luckily that
# module does very little work.
import settings
# appease pyflakes; don't ever do this in
# non-super-meta-namespace-trickery code
settings
from django.core.management import execute_from_command_line
execute_from_command_line("shellplus")
and it hasn't really run shell_plus.
It seems like the 'Starting script' happens in addition to rather than instead of the default.
Shell_plus automatically imports all Django model classes, among other things.
I got the model objects auto-loading by hooking into the shell_plus code. I appended this to the default startup script in Preferences > Build, Execution, Deployment > Console > Django Console:
from django_extensions.management import shells
from django.core.management.color import color_style
imported_items = shells.import_objects({}, color_style())
for k, v in imported_items.items():
globals()[k] = v
This was on PyCharm 2018.3.3 Pro
For completeness, this was the full content of starting script:
import sys; print('Python %s on %s' % (sys.version, sys.platform))
import django; print('Django %s' % django.get_version())
sys.path.extend([WORKING_DIR_AND_PYTHON_PATHS])
if 'setup' in dir(django): django.setup()
import django_manage_shell; django_manage_shell.run(PROJECT_ROOT)
from django_extensions.management import shells
from django.core.management.color import color_style
imported_items = shells.import_objects({}, color_style())
for k, v in imported_items.items():
globals()[k] = v
I've been looking for a solution to the same problem, and I ended up here. I tried solutions proposed by others, but none of those appeared to solve this issue. So I decided to find another solution. This is what I came up with:
The code block below is the original Django Console starting script of PyCharm 2019.2:
import sys, django
print('Python %s on %s' % (sys.version, sys.platform))
print('Django %s' % django.get_version())
sys.path.extend([WORKING_DIR_AND_PYTHON_PATHS])
if 'setup' in dir(django):
django.setup()
import django_manage_shell
django_manage_shell.run(PROJECT_ROOT)
Installing IPython and changing the last two lines as below gets it done in the most proper way:
from IPython.core.getipython import get_ipython
ipython = get_ipython()
from django_extensions.management.notebook_extension import load_ipython_extension
load_ipython_extension(ipython)
To make it work: open PyCharm settings (CTRL+S) and head to Django Console section. Then make changes in Starting script window and apply. Finally, start the new Python Console instance.
I looked at the source code of shell_plus, and noticed you could use a method on a Command class named get_imported_objects({})
In PyCharm, go to: Build, Execution, Deployment > Console > Django Console > Starting script
Add this to the existing code in that box:
from django_extensions.management.commands.shell_plus import Command
globals().update(Command().get_imported_objects({}))
Note: you may have to restart PyCharm to see the effect.
One way to solve this is to create a new Python run configuration. Set the target to module, and select the manage.py file for the project. Then put shell_plus in the Parameters field. Set the Working Directory to the project directory. Then lastly, set the Execution to Run with Python Console. Apply the changes, then run the new configuration.
This isn't a complete answer, but I found this script that at least loads up all the app models. Put this in Settings > Console > Django Console > Starting script:
import sys
import logging
logging.basicConfig(format="%(levelname)-8s %(asctime)s %(name)s %(message)s", datefmt='%m/%d/%y %H:%M:%S', stream=sys.stdout )
log = logging.getLogger("root")
from django.db.models import get_models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
logging.config.dictConfig(settings.LOGGING)
log.debug("Logging has been initialized at DEBUG")
log.setLevel( logging.DEBUG)
log.disabled = False
for _class in get_models():
if _class.__name__.startswith("Historical"): continue
log.debug("Registering model {}".format(_class.__name__))
globals()[_class.__name__] = _class
def debug_sql():
from debug_toolbar.management.commands import debugsqlshell
return
I also submitted this a feature request to JetBrains.
In Django 1.7, following script can be used as a workaround with PyCharm 3.4:
File -> Settings -> Console -> Django Console and manage.py options
In Starting script, put:
import sys
import django
django.setup()
from django.db.models import get_models
for _class in get_models():
globals()[_class.__name__] = _class
This configuration works for me
As django.db.models.get_models no longer exists, here's an updated version that will accomplish the same as Christopher Mason's version.
import sys; print('Python %s on %s' % (sys.version, sys.platform))
import django; print('Django %s' % django.get_version())
import logging
logging.basicConfig(format="%(levelname)-8s %(asctime)s %(name)s %(message)s", datefmt='%m/%d/%y %H:%M:%S', stream=sys.stdout )
log = logging.getLogger("root")
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
logging.config.dictConfig(settings.LOGGING)
log.debug("Logging has been initialized at DEBUG")
log.setLevel( logging.DEBUG)
log.disabled = False
for _configs in apps.get_app_configs():
for _class in _configs.get_models():
if _class.__name__.startswith("Historical"): continue
log.debug("Registering model {}".format(_class.__name__))
globals()[_class.__name__] = apps.get_model(_configs.label, _class.__name__)
def debug_sql():
from debug_toolbar.management.commands import debugsqlshell
return