ftputil package using for ftp backup from odoo - python

I'm trying to get a database backup from odoo erp using ftputil
this is the code
# -*- coding: utf-8 -*-
from odoo import models, fields, api, tools, _
from odoo.exceptions import Warning
import odoo
from odoo.http import content_disposition
import logging
_logger = logging.getLogger(__name__)
import os
import datetime
try:
from xmlrpc import client as xmlrpclib
except ImportError:
import xmlrpclib
import time
import base64
import socket
try:
import ftputil
except ImportError:
raise ImportError(
'This module needs ftputil to automatically write backups to the FTP through ftp. Please install ftputil on your system. (sudo pip3 install ftputil)')
def execute(connector, method, *args):
res = False
try:
res = getattr(connector, method)(*args)
except socket.error as error:
_logger.critical('Error while executing the method "execute". Error: ' + str(error))
raise error
return res
class db_backup(models.Model):
_name = 'db.backup'
#api.multi
def get_db_list(self, host, port, context={}):
uri = 'http://' + host + ':' + port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
db_list = execute(conn, 'list')
return db_list
#api.multi
def _get_db_name(self):
dbName = self._cr.dbname
return dbName
# Columns for local server configuration
host = fields.Char('Host', required=True, default='localhost')
port = fields.Char('Port', required=True, default=8069)
name = fields.Char('Database', required=True, help='Database you want to schedule backups for',
default=_get_db_name)
folder = fields.Char('Backup Directory', help='Absolute path for storing the backups', required='True',
default='/odoo/backups')
backup_type = fields.Selection([('zip', 'Zip'), ('dump', 'Dump')], 'Backup Type', required=True, default='zip')
autoremove = fields.Boolean('Auto. Remove Backups',
help='If you check this option you can choose to automaticly remove the backup after xx days')
days_to_keep = fields.Integer('Remove after x days',
help="Choose after how many days the backup should be deleted. For example:\nIf you fill in 5 the backups will be removed after 5 days.",
required=True)
# Columns for external server (SFTP)
sftp_write = fields.Boolean('Write to external server with sftp',
help="If you check this option you can specify the details needed to write to a remote server with SFTP.")
sftp_path = fields.Char('Path external server',
help='The location to the folder where the dumps should be written to. For example /odoo/backups/.\nFiles will then be written to /odoo/backups/ on your remote server.')
sftp_host = fields.Char('IP Address SFTP Server',
help='The IP address from your remote server. For example 192.168.0.1')
sftp_port = fields.Integer('SFTP Port', help='The port on the FTP server that accepts SSH/SFTP calls.', default=22)
sftp_user = fields.Char('Username SFTP Server',
help='The username where the SFTP connection should be made with. This is the user on the external server.')
sftp_password = fields.Char('Password User SFTP Server',
help='The password from the user where the SFTP connection should be made with. This is the password from the user on the external server.')
days_to_keep_sftp = fields.Integer('Remove SFTP after x days',
help='Choose after how many days the backup should be deleted from the FTP server. For example:\nIf you fill in 5 the backups will be removed after 5 days from the FTP server.',
default=30)
send_mail_sftp_fail = fields.Boolean('Auto. E-mail on backup fail',
help='If you check this option you can choose to automaticly get e-mailed when the backup to the external server failed.')
email_to_notify = fields.Char('E-mail to notify',
help='Fill in the e-mail where you want to be notified that the backup failed on the FTP.')
#api.multi
def _check_db_exist(self):
self.ensure_one()
db_list = self.get_db_list(self.host, self.port)
if self.name in db_list:
return True
return False
_constraints = [(_check_db_exist, _('Error ! No such database exists!'), [])]
#api.multi
def test_sftp_connection(self, context=None):
self.ensure_one()
# Check if there is a success or fail and write messages
messageTitle = ""
messageContent = ""
error = ""
has_failed = False
for rec in self:
db_list = self.get_db_list(rec.host, rec.port)
pathToWriteTo = rec.sftp_path
ipHost = rec.sftp_host
portHost = rec.sftp_port
usernameLogin = rec.sftp_user
passwordLogin = rec.sftp_password
# Connect with external server over SFTP, so we know sure that everything works.
try:
with ftputil.FTPHost(ipHost, usernameLogin, passwordLogin) as s:
messageTitle = _("Connection Test Succeeded!\nEverything seems properly set up for FTP back-ups!")
except Exception as e:
_logger.critical('There was a problem connecting to the remote ftp: ' + str(e))
error += str(e)
has_failed = True
messageTitle = _("Connection Test Failed!")
if len(rec.sftp_host) < 8:
messageContent += "\nYour IP address seems to be too short.\n"
messageContent += _("Here is what we got instead:\n")
finally:
if s:
s.close()
if has_failed:
raise Warning(messageTitle + '\n\n' + messageContent + "%s" % str(error))
else:
raise Warning(messageTitle + '\n\n' + messageContent)
#api.model
def schedule_backup(self):
conf_ids = self.search([])
for rec in conf_ids:
db_list = self.get_db_list(rec.host, rec.port)
if rec.name in db_list:
try:
if not os.path.isdir(rec.folder):
os.makedirs(rec.folder)
except:
raise
# Create name for dumpfile.
bkp_file = '%s_%s.%s' % (time.strftime('%Y_%m_%d_%H_%M_%S'), rec.name, rec.backup_type)
file_path = os.path.join(rec.folder, bkp_file)
uri = 'http://' + rec.host + ':' + rec.port
conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
bkp = ''
try:
# try to backup database and write it away
fp = open(file_path, 'wb')
odoo.service.db.dump_db(rec.name, fp, rec.backup_type)
fp.close()
except Exception as error:
_logger.debug(
"Couldn't backup database %s. Bad database administrator password for server running at http://%s:%s" % (
rec.name, rec.host, rec.port))
_logger.debug("Exact error from the exception: " + str(error))
continue
else:
_logger.debug("database %s doesn't exist on http://%s:%s" % (rec.name, rec.host, rec.port))
# Check if user wants to write to SFTP or not.
if rec.sftp_write is True:
try:
# Store all values in variables
dir = rec.folder
pathToWriteTo = rec.sftp_path
ipHost = rec.sftp_host
portHost = rec.sftp_port
usernameLogin = rec.sftp_user
passwordLogin = rec.sftp_password
_logger.debug('sftp remote path: %s' % pathToWriteTo)
try:
with ftputil.FTPHost(ipHost, usernameLogin, passwordLogin) as sftp:
pass
except Exception as error:
_logger.critical('Error connecting to remote server! Error: ' + str(error))
try:
sftp.chdir(pathToWriteTo)
except IOError:
# Create directory and subdirs if they do not exist.
currentDir = ''
for dirElement in pathToWriteTo.split('/'):
currentDir += dirElement + '/'
try:
sftp.chdir(currentDir)
except:
_logger.info('(Part of the) path didn\'t exist. Creating it now at ' + currentDir)
# Make directory and then navigate into it
sftp.mkdir(currentDir, 777)
sftp.chdir(currentDir)
pass
sftp.chdir(pathToWriteTo)
# Loop over all files in the directory.
for f in os.listdir(dir):
if rec.name in f:
fullpath = os.path.join(dir, f)
if os.path.isfile(fullpath):
try:
sftp.StatResult(os.path.join(pathToWriteTo, f))
_logger.debug(
'File %s already exists on the remote FTP Server ------ skipped' % fullpath)
# This means the file does not exist (remote) yet!
except IOError:
try:
# sftp.put(fullpath, pathToWriteTo)
sftp.upload(fullpath, os.path.join(pathToWriteTo, f))
_logger.info('Copying File % s------ success' % fullpath)
except Exception as err:
_logger.critical(
'We couldn\'t write the file to the remote server. Error: ' + str(err))
# Navigate in to the correct folder.
sftp.chdir(pathToWriteTo)
# Loop over all files in the directory from the back-ups.
# We will check the creation date of every back-up.
for file in sftp.listdir(pathToWriteTo):
if rec.name in file:
# Get the full path
fullpath = os.path.join(pathToWriteTo, file)
# Get the timestamp from the file on the external server
timestamp = sftp.StatResult(fullpath).st_atime
createtime = datetime.datetime.fromtimestamp(timestamp)
now = datetime.datetime.now()
delta = now - createtime
# If the file is older than the days_to_keep_sftp (the days to keep that the user filled in on the Odoo form it will be removed.
if delta.days >= rec.days_to_keep_sftp:
# Only delete files, no directories!
if sftp.isfile(fullpath) and (".dump" in file or '.zip' in file):
_logger.info("Delete too old file from SFTP servers: " + file)
sftp.unlink(file)
# Close the SFTP session.
sftp.close()
except Exception as e:
_logger.debug('Exception! We couldn\'t back up to the FTP server..')
# At this point the SFTP backup failed. We will now check if the user wants
# an e-mail notification about this.
if rec.send_mail_sftp_fail:
try:
ir_mail_server = self.env['ir.mail_server']
message = "Dear,\n\nThe backup for the server " + rec.host + " (IP: " + rec.sftp_host + ") failed.Please check the following details:\n\nIP address SFTP server: " + rec.sftp_host + "\nUsername: " + rec.sftp_user + "\nPassword: " + rec.sftp_password + "\n\nError details: " + tools.ustr(
e) + "\n\nWith kind regards"
msg = ir_mail_server.build_email("auto_backup#" + rec.name + ".com", [rec.email_to_notify],
"Backup from " + rec.host + "(" + rec.sftp_host + ") failed",
message)
ir_mail_server.send_email(self._cr, self._uid, msg)
except Exception:
pass
"""
Remove all old files (on local server) in case this is configured..
"""
if rec.autoremove:
dir = rec.folder
# Loop over all files in the directory.
for f in os.listdir(dir):
fullpath = os.path.join(dir, f)
# Only delete the ones which are from the current database
# (Makes it possible to save different databases in the same folder)
if rec.name in fullpath:
timestamp = os.stat(fullpath).st_ctime
createtime = datetime.datetime.fromtimestamp(timestamp)
now = datetime.datetime.now()
delta = now - createtime
if delta.days >= rec.days_to_keep:
# Only delete files (which are .dump and .zip), no directories.
if os.path.isfile(fullpath) and (".dump" in f or '.zip' in f):
_logger.info("Delete local out-of-date file: " + fullpath)
os.remove(fullpath)
I cant get pass this logger "_logger.critical(
'We couldn't write the file to the remote server. Error: ' + str(err))"

Related

Python SIEM log collector hangs randomly

I am trying to troubleshoot a script for Mimecast's API. The script runs fine for the most part, but a few times, I have noticed that it stops pulling logs and generally appears to be a hung process. After restarting the script and manually pushing logs to the syslog server, it starts working again without issue. I am not able to reproduce this issue at will.
The script is supposed to do the following:
Authenticate against Mimecast's API
Sign responses
download, extract and save log files to log dir
utilize a tokenized header to determine which file was downloaded in the last request. Should save the token ID within a file in the checkpoint directory
Push files to remote syslog server
Output any errors and info to console
Below is the sample code from Mimecast.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging.handlers
import json
import os
import requests
import base64
import uuid
import datetime
import hashlib
import shutil
import hmac
import time
from zipfile import ZipFile
import io
# Set up variables
APP_ID = "YOUR DEVELOPER APPLICATION ID"
APP_KEY = "YOUR DEVELOPER APPLICATION KEY"
URI = "/api/audit/get-siem-logs"
EMAIL_ADDRESS = 'EMAIL ADDRESS OF YOUR ADMINISTRATOR'
ACCESS_KEY = 'ACCESS KEY FOR YOUR ADMINISTRATOR'
SECRET_KEY = 'SECRET KEY FOR YOUR ADMINISTRATOR'
LOG_FILE_PATH = "FULLY QUALIFIED PATH TO FOLDER TO WRITE LOGS"
CHK_POINT_DIR = 'FULLY QUALIFIED PATH TO FOLDER TO WRITE PAGE TOKEN'
# Set True to output to syslog, false to only save to file
syslog_output = False
# Enter the IP address or hostname of your syslog server
syslog_server = 'localhost'
# Change this to override default port
syslog_port = 514
# delete files after fetching
delete_files = True
# Set threshold in number of files in log file directory
log_file_threshold = 10000
# Set up logging (in this case to terminal)
log = logging.getLogger(__name__)
log.root.setLevel(logging.DEBUG)
log_formatter = logging.Formatter('%(levelname)s %(message)s')
log_handler = logging.StreamHandler()
log_handler.setFormatter(log_formatter)
log.addHandler(log_handler)
# Set up syslog output
syslog_handler = logging.handlers.SysLogHandler(address=(syslog_server, syslog_port))
syslog_formatter = logging.Formatter('%(message)s')
syslog_handler.setFormatter(syslog_formatter)
syslogger = logging.getLogger(__name__)
syslogger = logging.getLogger('SysLogger')
syslogger.addHandler(syslog_handler)
# Supporting methods
def get_hdr_date():
return datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S UTC")
def read_file(file_name):
try:
with open(file_name, 'r') as f:
data = f.read()
return data
except Exception as e:
log.error('Error reading file ' + file_name + '. Cannot continue. Exception: ' + str(e))
quit()
def write_file(file_name, data_to_write):
if '.zip' in file_name:
try:
byte_content = io.BytesIO(data_to_write)
zip_file = ZipFile(byte_content)
zip_file.extractall(LOG_FILE_PATH)
except Exception as e:
log.error('Error writing file ' + file_name + '. Cannot continue. Exception: ' + str(e))
quit()
else:
try:
with open(file_name, 'w') as f:
f.write(data_to_write)
except Exception as e:
log.error('Error writing file ' + file_name + '. Cannot continue. Exception: ' + str(e))
quit()
def get_base_url(email_address):
# Create post body for request
post_body = dict()
post_body['data'] = [{}]
post_body['data'][0]['emailAddress'] = email_address
# Create variables required for request headers
request_id = str(uuid.uuid4())
request_date = get_hdr_date()
headers = {'x-mc-app-id': APP_ID, 'x-mc-req-id': request_id, 'x-mc-date': request_date}
# Send request to API
log.debug('Sending request to https://api.mimecast.com/api/discover-authentication with request Id: ' +
request_id)
try:
r = requests.post(url='https://api.mimecast.com/api/login/discover-authentication',
data=json.dumps(post_body), headers=headers)
# Handle Rate Limiting
if r.status_code == 429:
log.warning('Rate limit hit. sleeping for ' + str(r.headers['X-RateLimit-Reset'] * 1000))
time.sleep(r.headers['X-RateLimit-Reset'] * 1000)
except Exception as e:
log.error('Unexpected error getting base url. Cannot continue.' + str(e))
quit()
# Handle error from API
if r.status_code != 200:
log.error('Request returned with status code: ' + str(r.status_code) + ', response body: ' +
r.text + '. Cannot continue.')
quit()
# Load response body as JSON
resp_data = json.loads(r.text)
# Look for api key in region region object to get base url
if 'region' in resp_data["data"][0]:
base_url = resp_data["data"][0]["region"]["api"].split('//')
base_url = base_url[1]
else:
# Handle no region found, likely the email address was entered incorrectly
log.error(
'No region information returned from API, please check the email address.'
'Cannot continue')
quit()
return base_url
def post_request(base_url, uri, post_body, access_key, secret_key):
# Create variables required for request headers
request_id = str(uuid.uuid4())
request_date = get_hdr_date()
unsigned_auth_header = '{date}:{req_id}:{uri}:{app_key}'.format(
date=request_date,
req_id=request_id,
uri=uri,
app_key=APP_KEY
)
hmac_sha1 = hmac.new(
base64.b64decode(secret_key),
unsigned_auth_header.encode(),
digestmod=hashlib.sha1).digest()
sig = base64.encodebytes(hmac_sha1).rstrip()
headers = {
'Authorization': 'MC ' + access_key + ':' + sig.decode(),
'x-mc-app-id': APP_ID,
'x-mc-date': request_date,
'x-mc-req-id': request_id,
'Content-Type': 'application/json'
}
try:
# Send request to API
log.debug('Sending request to https://' + base_url + uri + ' with request Id: ' + request_id)
r = requests.post(url='https://' + base_url + uri, data=json.dumps(post_body), headers=headers)
# Handle Rate Limiting
if r.status_code == 429:
log.warning('Rate limit hit. sleeping for ' + str(r.headers['X-RateLimit-Reset'] * 1000))
time.sleep(r.headers['X-RateLimit-Reset'] * 1000)
r = requests.post(url='https://' + base_url + uri, data=json.dumps(post_body), headers=headers)
# Handle errors
except Exception as e:
log.error('Unexpected error connecting to API. Exception: ' + str(e))
return 'error'
# Handle errors from API
if r.status_code != 200:
log.error('Request to ' + uri + ' with , request id: ' + request_id + ' returned with status code: ' +
str(r.status_code) + ', response body: ' + r.text)
return 'error'
# Return response body and response headers
return r.content, r.headers
def get_mta_siem_logs(checkpoint_dir, base_url, access_key, secret_key):
uri = "/api/audit/get-siem-logs"
# Set checkpoint file name to store page token
checkpoint_filename = os.path.join(checkpoint_dir, 'get_mta_siem_logs_checkpoint')
# Build post body for request
post_body = dict()
post_body['data'] = [{}]
post_body['data'][0]['type'] = 'MTA'
post_body['data'][0]['compress'] = True
if os.path.exists(checkpoint_filename):
post_body['data'][0]['token'] = read_file(checkpoint_filename)
# Send request to API
resp = post_request(base_url, uri, post_body, access_key, secret_key)
now = datetime.datetime.now().strftime("%a %b %d %H:%M:%S %Y")
# Process response
if resp != 'error':
resp_body = resp[0]
resp_headers = resp[1]
content_type = resp_headers['Content-Type']
# End if response is JSON as there is no log file to download
if content_type == 'application/json':
log.info('No more logs available')
return False
# Process log file
elif content_type == 'application/octet-stream':
file_name = resp_headers['Content-Disposition'].split('=\"')
file_name = file_name[1][:-1]
# Save files to LOG_FILE_PATH
write_file(os.path.join(LOG_FILE_PATH, file_name), resp_body)
# Save mc-siem-token page token to check point directory
write_file(checkpoint_filename, resp_headers['mc-siem-token'])
try:
if syslog_output is True:
for filename in os.listdir(LOG_FILE_PATH):
file_creation_time = time.ctime(os.path.getctime(LOG_FILE_PATH + "/" + filename))
if now < file_creation_time or now == file_creation_time:
log.info('Loading file: ' + filename + ' to output to ' + syslog_server + ':' + str(syslog_port))
with open(file=os.path.join(LOG_FILE_PATH, filename), mode='r', encoding='utf-8') as log_file:
lines = log_file.read().splitlines()
for line in lines:
syslogger.info(line)
log.info('Syslog output completed for file ' + filename)
except Exception as e:
log.error('Unexpected error writing to syslog. Exception: ' + str(e))
# return true to continue loop
return True
else:
# Handle errors
log.error('Unexpected response')
for header in resp_headers:
log.error(header)
return False
def run_script():
# discover base URL
try:
base_url = get_base_url(email_address=EMAIL_ADDRESS)
except Exception as e:
log.error('Error discovering base url for ' + EMAIL_ADDRESS + ' . Exception: ' + str(e))
quit()
# Request log data in a loop until there are no more logs to collect
try:
log.info('Getting MTA log data')
while get_mta_siem_logs(checkpoint_dir=CHK_POINT_DIR, base_url=base_url, access_key=ACCESS_KEY,
secret_key=SECRET_KEY) is True:
log.info('Getting more MTA log files')
except Exception as e:
log.error('Unexpected error getting MTA logs ' + (str(e)))
file_number = len([name for name in os.listdir(LOG_FILE_PATH) if os.path.isfile(name)])
if delete_files or file_number >= log_file_threshold:
for filename in os.listdir(LOG_FILE_PATH):
file_path = os.path.join(LOG_FILE_PATH, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
quit()
# Run script
run_script()
It seems like it may potentially be a race condition but I am not sure how to confirm since I can't reproduce it. I notice that SumoLogic has a modified version of this script as well with a different methodology for managing the files/paths. If this script works better than the main sample script above, would anybody be able to explain WHY? I haven't had any issues with it yet.
https://github.com/SumoLogic/sumologic-content/blob/master/MimeCast/SumoLogic-Mimecast-Data-Collection/siem_collection.py

psycopg2.OperationalError: FATAL: role does not exist

I'm trying to update my Heroku DB from a Python script I have on my computer. I set up my app on Heroku with NodeJS (because I just like Javascript for that sort of thing), and I'm not sure I can add in a Python script to manage everything. I was able to fill out the DB once, with the script, and it had no hangups. When I try to update it, I get the following statement in my console:
Traceback (most recent call last):
File "/home/alan/dev/python/smog_usage_stats/scripts/DBManager.py", line 17, in <module>
CONN = pg2.connect(
File "/home/alan/dev/python/smog_usage_stats/venv/lib/python3.8/site-packages/psycopg2/__init__.py", line 127, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
psycopg2.OperationalError: FATAL: role "alan" does not exist
and this is my script:
#DBManager.py
import os
import zipfile
import psycopg2 as pg2
from os.path import join, dirname
from dotenv import load_dotenv
# -------------------------------
# Connection variables
# -------------------------------
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
# -------------------------------
# Connection to database
# -------------------------------
# Server connection
CONN = pg2.connect(
database = os.environ.get('PG_DATABASE'),
user = os.environ.get('PG_USER'),
password = os.environ.get('PG_PASSWORD'),
host = os.environ.get('PG_HOST'),
port = os.environ.get('PG_PORT')
)
# Local connection
# CONN = pg2.connect(
# database = os.environ.get('LOCAL_DATABASE'),
# user = os.environ.get('LOCAL_USER'),
# password = os.environ.get('LOCAL_PASSWORD'),
# host = os.environ.get('LOCAL_HOST'),
# port = os.environ.get('LOCAL_PORT')
# )
print("Connected to POSTGRES!")
global CUR
CUR = CONN.cursor()
# -------------------------------
# Database manager class
# -------------------------------
class DB_Manager:
def __init__(self):
self.table_name = "smogon_usage_stats"
try:
self.__FILE = os.path.join(
os.getcwd(),
"data/statsmaster.csv"
)
except:
print('you haven\'t downloaded any stats')
# -------------------------------
# Create the tables for the database
# -------------------------------
def construct_tables(self):
master_file = open(self.__FILE)
columns = master_file.readline().strip().split(",")
sql_cmd = "DROP TABLE IF EXISTS " + self.table_name + ";\n"
sql_cmd += "CREATE TABLE " + self.table_name + " (\n"
sql_cmd += (
"id_ SERIAL PRIMARY KEY,\n"
+ columns[0] + " INTEGER,\n"
+ columns[1] + " VARCHAR(50),\n"
+ columns[2] + " FLOAT,\n"
+ columns[3] + " INTEGER,\n"
+ columns[4] + " FLOAT,\n"
+ columns[5] + " INTEGER,\n"
+ columns[6] + " FLOAT,\n"
+ columns[7] + " INTEGER,\n"
+ columns[8] + " VARCHAR(10),\n"
+ columns[9] + " VARCHAR(50));"
)
CUR.execute(sql_cmd)
CONN.commit()
# -------------------------------
# Copy data from CSV files created in smogon_pull.py into database
# -------------------------------.
def fill_tables(self):
master_file = open(self.__FILE, "r")
columns = tuple(master_file.readline().strip().split(","))
CUR.copy_from(
master_file,
self.table_name,
columns=columns,
sep=","
)
CONN.commit()
# -------------------------------
# Disconnect from database.
# -------------------------------
def close_db(self):
CUR.close()
print("Cursor closed.")
CONN.close()
print("Connection to server closed.")
if __name__ == "__main__":
manager = DB_Manager()
print("connected")
manager.construct_tables()
print("table made")
manager.fill_tables()
print("filled")
as I said, everything worked fine, but now I'm getting this unexpected error, and not sure how to trace it back. The name "alan" is not in any of my credentials, which is confusing me.
I'm not running it via CLI, but through my text editor (in this case VS code).
So the reason this didn't work, is that I was pointing to the wrong directory for my .env file. dotenv_path = join(dirname(__file__), '.env') needs to "walk" up one more level to find my .env. Changed it to dotenv_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '.env')) and it worked. Just in case someone else has a similar issue, that might be something to check!
Might be unrelated, but double check your ports if using multiple instances: I also got psycopg2.OperationalError: FATAL: role "myUser" does not exist when I wanted to log in to one PostgreSQL database running on (default) port 5432 with credentials which I had set up in another instance running on port 5433...

downloading file using ftp

I am writing a python script to get logged in using ftp and download a file.But whenever I run this script,it says I have provided wrong user name or passwd.I am inputting right password still i am unable to run this script.My code is:
import os,getpass
from urllib.request import urlopen
filename='68544.jpg'
password=getpass.getpass('??')
At this line below the script is failed to run and whenever i run this address in browser it runs fine.
remoteaddr='ftp://Kamal:%s#localhost/%s;type=i'%(password,filename)
remotefile=urlopen(remoteaddr)
localfile=open(filename,'wb')
localfile.write(remotefile.read())
localfile.close()
remotefile.close()
def ftp_connect(path):
link = FTP(host = 'example.com', timeout = 5) #Keep low timeout
link.login(passwd = 'ftppass', user = 'ftpuser')
debug("%s - Connected to FTP" % strftime("%d-%m-%Y %H.%M"))
link.cwd(path)
return link
downloaded = open('/local/path/to/file.tgz', 'wb')
def debug(txt):
print txt
link = ftp_connect(path)
file_size = link.size(filename)
max_attempts = 5 #I dont want death loops.
while file_size != downloaded.tell():
try:
debug("%s while > try, run retrbinary\n" % strftime("%d-%m-%Y %H.%M"))
if downloaded.tell() != 0:
link.retrbinary('RETR ' + filename, downloaded.write, downloaded.tell())
else:
link.retrbinary('RETR ' + filename, downloaded.write)
except Exception as myerror:
if max_attempts != 0:
debug("%s while > except, something going wrong: %s\n \tfile lenght is: %i > %i\n"(strftime("%d-%m-%Y %H.%M"), myerror, file_size, downloaded.tell()))
link = ftp_connect(path)
max_attempts -= 1
else:
break
debug("Done with file, attempt to download m5dsum")
[...]
Use Paramiko library
import paramiko
paramiko.util.log_to_file('/tmp/paramiko.log')
# Open a transport
host = "example.com"
port = 22
transport = paramiko.Transport((host, port))
# Auth
password = "foo"
username = "bar"
transport.connect(username = username, password = password)
# Go!
sftp = paramiko.SFTPClient.from_transport(transport)
# Download
### It is relative path from the folder path on which this sftp user has default rights.
filepath = 'folder/file1.txt'
localpath = '/opt/backup/file.txt'
sftp.get(filepath, localpath)
# Close
sftp.close()
transport.close()

Check server status on Twisted

While I was writing simple message-based fileserver and client, I got the idea about checking fileserver status, but don't know how to realize this: just try to connect and disconnect from server (and how disconnect immediately, when server is not running, if using this way?) or maybe twisted/autobahn have some things, which help to get server status without creating "full connection"?
a) fileserver.py
import os
import sys
import json
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketServerFactory, WebSocketServerProtocol, listenWS
CONFIG_TEMPLATE = ''
CONFIG_DATA = {}
class MessageBasedServerProtocol(WebSocketServerProtocol):
"""
Message-based WebSockets server
Template contains some parts as string:
[USER_ID:OPERATION_NAME:FILE_ID] - 15 symbols for USER_ID,
10 symbols for OPERATION_NAME,
25 symbols for FILE_ID
other - some data
"""
def __init__(self):
path = CONFIG_DATA['path']
base_dir = CONFIG_DATA['base_dir']
# prepare to working with files...
if os.path.exists(path) and os.path.isdir(path):
os.chdir(path)
if not os.path.exists(base_dir) or not os.path.isdir(base_dir):
os.mkdir(base_dir)
os.chdir(base_dir)
else:
os.makedir(path)
os.chdir(path)
os.mkdir(base_dir)
os.chdir(base_dir)
# init some things
self.fullpath = path + '/' + base_dir
def __checkUserCatalog(self, user_id):
# prepare to working with files...
os.chdir(self.fullpath)
if not os.path.exists(user_id) or not os.path.isdir(user_id):
os.mkdir(user_id)
os.chdir(user_id)
else:
os.chdir(self.fullpath + '/' + user_id)
def onOpen(self):
print "[USER] User with %s connected" % (self.transport.getPeer())
def connectionLost(self, reason):
print '[USER] Lost connection from %s' % (self.transport.getPeer())
def onMessage(self, payload, isBinary):
"""
Processing request from user and send response
"""
user_id, cmd, file_id = payload[:54].replace('[', '').replace(']','').split(':')
data = payload[54:]
operation = "UNK" # WRT - Write, REA -> Read, DEL -> Delete, UNK -> Unknown
status = "C" # C -> Complete, E -> Error in operation
commentary = 'Succesfull!'
# write file into user storage
if cmd == 'WRITE_FILE':
self.__checkUserCatalog(user_id)
operation = "WRT"
try:
f = open(file_id, "wb")
f.write(data)
except IOError, argument:
status = "E"
commentary = argument
except Exception, argument:
status = "E"
commentary = argument
raise Exception(argument)
finally:
f.close()
# read some file
elif cmd == 'READU_FILE':
self.__checkUserCatalog(user_id)
operation = "REA"
try:
f = open(file_id, "rb")
commentary = f.read()
except IOError, argument:
status = "E"
commentary = argument
except Exception, argument:
status = "E"
commentary = argument
raise Exception(argument)
finally:
f.close()
# delete file from storage (and in main server, in parallel delete from DB)
elif cmd == 'DELET_FILE':
self.__checkUserCatalog(user_id)
operation = "DEL"
try:
os.remove(file_id)
except IOError, argument:
status = "E"
commentary = argument
except Exception, argument:
status = "E"
commentary = argument
raise Exception(argument)
self.sendMessage('[%s][%s]%s' % (operation, status, commentary), isBinary=True)
if __name__ == '__main__':
if len(sys.argv) < 2:
print "using python fileserver_client.py [PATH_TO_config.json_FILE]"
else:
# read config file
CONFIG_TEMPLATE = sys.argv[1]
with open(CONFIG_TEMPLATE, "r") as f:
CONFIG_DATA = json.load(f)
# create server
factory = WebSocketServerFactory("ws://localhost:9000")
factory.protocol = MessageBasedServerProtocol
listenWS(factory)
reactor.run()
b) client.py
import json
import sys
import commands
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketClientFactory, WebSocketClientProtocol, connectWS
CONFIG_TEMPLATE = ''
CONFIG_DATA = {}
class MessageBasedClientProtocol(WebSocketClientProtocol):
"""
Message-based WebSockets client
Template contains some parts as string:
[USER_ID:OPERATION_NAME:FILE_ID] - 15 symbols for USER_ID,
10 symbols for OPERATION_NAME,
25 symbols for FILE_ID
other - some data
"""
def onOpen(self):
user_id = CONFIG_DATA['user']
operation_name = CONFIG_DATA['cmd']
file_id = CONFIG_DATA['file_id']
src_file = CONFIG_DATA['src_file']
data = '[' + str(user_id) + ':' + str(operation_name) + ':' + str(file_id) + ']'
if operation_name == 'WRITE_FILE':
with open(src_file, "r") as f:
info = f.read()
data += str(info)
self.sendMessage(data, isBinary=True)
def onMessage(self, payload, isBinary):
cmd = payload[1:4]
result_cmd = payload[6]
if cmd in ('WRT', 'DEL'):
print payload
elif cmd == 'REA':
if result_cmd == 'C':
try:
data = payload[8:]
f = open(CONFIG_DATA['src_file'], "wb")
f.write(data)
except IOError, e:
print e
except Exception, e:
raise Exception(e)
finally:
print payload[:8] + "Successfully!"
f.close()
else:
print payload
reactor.stop()
if __name__ == '__main__':
if len(sys.argv) < 2:
print "using python fileserver_client.py [PATH_TO_config.json_FILE]"
else:
# read config file
CONFIG_TEMPLATE = sys.argv[1]
with open(CONFIG_TEMPLATE, "r") as f:
CONFIG_DATA = json.load(f)
# connection to server
factory = WebSocketClientFactory("ws://localhost:9000")
factory.protocol = MessageBasedClientProtocol
connectWS(factory)
reactor.run()
Find solution this issue: using callLater or deferLater for disconnect from server, if can't connect, but when all was 'OK', just take server status, which he says.
import sys
from twisted.internet.task import deferLater
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketClientFactory, WebSocketClientProtocol, connectWS
CONFIG_IP = ''
CONFIG_PORT = 9000
def isOffline(status):
print status
class StatusCheckerProtocol(WebSocketClientProtocol):
def __init__(self):
self.operation_name = "STATUS_SRV"
self.user_id = 'u00000000000000'
self.file_id = "000000000000000000000.log"
def onOpen(self):
data = '[' + str(self.user_id) + ':' + str(self.operation_name) + ':' + str(self.file_id) + ']'
self.sendMessage(data, isBinary=True)
def onMessage(self, payload, isBinary):
cmd = payload[1:4]
result_cmd = payload[6]
data = payload[8:]
print data
reactor.stop()
if __name__ == '__main__':
if len(sys.argv) < 3:
print "using python statuschecker.py [IP] [PORT]"
else:
# read preferences
CONFIG_IP = sys.argv[1]
CONFIG_PORT = int(sys.argv[2])
server_addr = "ws://%s:%d" % (CONFIG_IP, CONFIG_PORT)
# connection to server
factory = WebSocketClientFactory(server_addr)
factory.protocol = StatusCheckerProtocol
connectWS(factory)
# create special Deffered, which disconnect us from some server, if can't connect within 3 seconds
d = deferLater(reactor, 3, isOffline, 'OFFLINE')
d.addCallback(lambda ignored: reactor.stop())
# run all system...
reactor.run()

Upload folders from local system to FTP using Python script

I have to automatically upload folders to an FTP using a Python script. I am able to upload a single file, but not folders with subfolders and files in them. I did a lot of search, but failed. Could some one help me out here? Thanks in advance.
#! /usr/bin/python
import ftplib
s = ftplib.FTP('serverip','usrname','password')
file = '/home/rock/test.txt'
ftppath = '/IT'
filename = "rak"
s.cwd(ftppath)
f = open(file,'rb')
s.storbinary('STOR ' + filename, f)
f.close()
s.quit()
I recently came into this problem and figured out a recursive function to solve it.
import ftplib
import os
server = 'localhost'
username = 'generic_user'
password = 'password'
myFTP = ftplib.FTP(server, username, password)
myPath = r'c:\temp'
def uploadThis(path):
files = os.listdir(path)
os.chdir(path)
for f in files:
if os.path.isfile(path + r'\{}'.format(f)):
fh = open(f, 'rb')
myFTP.storbinary('STOR %s' % f, fh)
fh.close()
elif os.path.isdir(path + r'\{}'.format(f)):
myFTP.mkd(f)
myFTP.cwd(f)
uploadThis(path + r'\{}'.format(f))
myFTP.cwd('..')
os.chdir('..')
uploadThis(myPath) # now call the recursive function
You basically need to use os.walk() to grab those files and transfer them.
Here's a script I wrote for myself to do much of what your asking.
I wrote it a long time ago, so I'd probably do it differently if I wrote it again, but I get a lot of use out of it.
It imports psftplib, which is a wrapper I wrote for the putty sftp.
Feel free to remove these references, or grab the lib at:
http://code.google.com/p/psftplib/source/browse/trunk/psftplib.py
# -*- coding: utf8 -*-
'''This tool will ftp all the files in a given directory to a given location
if the file ftpallcfg.py exists in the directory it will be loaded and the values within it used,
with the current directory used as the source directory.
ftpallcfg.py file contains the following variables.
===========================
server = <server to ftp to>
username = <Username for access to given server>
remote_dir = <remote server directory>
encrypt= True/False
monitor = True/False
walk = True/False
===========================
'''
import ftplib
import os
import getpass
import sys
import time
import socket
import psftplib
__revision__ = 1.11
SLEEP_SECONDS = 1
class FtpAddOns():
PATH_CACHE = []
def __init__(self, ftp_h):
self.ftp_h = ftp_h
def ftp_exists(self, path):
'''path exists check function for ftp handler'''
exists = None
if path not in self.PATH_CACHE:
try:
self.ftp_h.cwd(path)
exists = True
self.PATH_CACHE.append(path)
except ftplib.error_perm, e:
if str(e.args).count('550'):
exists = False
else:
exists = True
return exists
def ftp_mkdirs(self, path, sep='/'):
'''mkdirs function for ftp handler'''
split_path = path.split(sep)
new_dir = ''
for server_dir in split_path:
if server_dir:
new_dir += sep + server_dir
if not self.ftp_exists(new_dir):
try:
print 'Attempting to create directory (%s) ...' % (new_dir),
self.ftp_h.mkd(new_dir)
print 'Done!'
except Exception, e:
print 'ERROR -- %s' % (str(e.args))
def _get_local_files(local_dir, walk=False):
'''Retrieve local files list
result_list == a list of dictionaries with path and mtime keys. ex: {'path':<filepath>,'mtime':<file last modified time>}
ignore_dirs == a list of directories to ignore, should not include the base_dir.
ignore_files == a list of files to ignore.
ignore_file_ext == a list of extentions to ignore.
'''
result_list = []
ignore_dirs = ['CVS', '.svn']
ignore_files = ['.project', '.pydevproject']
ignore_file_ext = ['.pyc']
base_dir = os.path.abspath(local_dir)
for current_dir, dirs, files in os.walk(base_dir):
for this_dir in ignore_dirs:
if this_dir in dirs:
dirs.remove(this_dir)
sub_dir = current_dir.replace(base_dir, '')
if not walk and sub_dir:
break
for this_file in files:
if this_file not in ignore_files and os.path.splitext(this_file)[-1].lower() not in ignore_file_ext:
filepath = os.path.join(current_dir, this_file)
file_monitor_dict = {
'path': filepath,
'mtime': os.path.getmtime(filepath)
}
result_list.append(file_monitor_dict)
return result_list
def monitor_and_ftp(server,
username,
password,
local_dir,
remote_dir,
encrypt=False,
walk=False):
'''Monitor local files and when an update is found connect and upload'''
print 'Monitoring changes in (%s).' % (os.path.abspath(local_dir))
print '(Use ctrl-c to exit)'
last_files_list = _get_local_files(local_dir)
while True:
try:
time.sleep(SLEEP_SECONDS)
latest_files_list = _get_local_files(local_dir)
files_to_update = []
for idx in xrange(len(latest_files_list)):
if idx < len(last_files_list):
# compare last modified times
if latest_files_list[idx]['mtime'] > last_files_list[idx]['mtime']:
files_to_update.append(latest_files_list[idx])
else:
# add the file to the list (new file)
files_to_update.append(latest_files_list[idx])
if files_to_update:
print
print 'Detected NEW or CHANGED file(s), attempting to send ...'
print
is_success = upload_all(server,
username,
password,
local_dir,
remote_dir,
files_to_update,
encrypt,
walk)
if not is_success:
break
else:
print '.',
last_files_list = latest_files_list[:] # copy the list to hold
except KeyboardInterrupt:
print
print 'Exiting.'
break
def upload_all(server,
username,
password,
base_local_dir,
base_remote_dir,
files_to_update=None,
encrypt=False,
walk=False):
'''Upload all files in a given directory to the given remote directory'''
continue_on = False
login_ok = False
server_connect_ok = False
base_local_dir = os.path.abspath(base_local_dir)
base_remote_dir = os.path.normpath(base_remote_dir)
if files_to_update:
local_files = files_to_update
else:
local_files = _get_local_files(base_local_dir, walk)
if local_files:
if not encrypt: # Use standard FTP
ftp_h = ftplib.FTP()
else: # Use sftp
ftp_h = psftplib.SFTP()
try:
ftp_h.connect(server)
server_connect_ok = True
except socket.gaierror, e:
print 'ERROR -- Could not connect to (%s): %s' % (server, str(e.args))
except IOError, e:
print 'ERROR -- File not found: %s' % (str(e.args))
except socket.error, e:
print 'ERROR -- Could not connect to (%s): %s' % (server, str(e.args))
ftp_path_tools = FtpAddOns(ftp_h)
if server_connect_ok:
try:
ftp_h.login(username,password)
print 'Logged into (%s) as (%s)' % (server, username)
login_ok = True
except ftplib.error_perm, e:
print 'ERROR -- Check Username/Password: %s' % (str(e.args))
except psftplib.ProcessTimeout, e:
print 'ERROR -- Check Username/Password (timeout): %s' % (str(e.args))
if login_ok:
for file_info in local_files:
filepath = file_info['path']
path, filename = os.path.split(filepath)
remote_sub_path = path.replace(base_local_dir, '')
remote_path = path.replace(base_local_dir, base_remote_dir)
remote_path = remote_path.replace('\\', '/') # Convert to unix style
if not ftp_path_tools.ftp_exists(remote_path):
ftp_path_tools.ftp_mkdirs(remote_path)
# Change to directory
try:
ftp_h.cwd(remote_path)
continue_on = True
except ftplib.error_perm, e:
print 'ERROR -- %s' % (str(e.args))
except psftplib.PsFtpInvalidCommand, e:
print 'ERROR -- %s' % (str(e.args))
if continue_on:
if os.path.exists(filepath):
f_h = open(filepath,'rb')
filename = os.path.split(f_h.name)[-1]
display_filename = os.path.join(remote_sub_path, filename)
display_filename = display_filename.replace('\\', '/')
print 'Sending (%s) ...' % (display_filename),
send_cmd = 'STOR %s' % (filename)
try:
ftp_h.storbinary(send_cmd, f_h)
f_h.close()
print 'Done!'
except Exception, e:
print 'ERROR!'
print str(e.args)
print
else:
print "WARNING -- File no longer exists, (%s)!" % (filepath)
ftp_h.quit()
print 'Closing Connection'
else:
print 'ERROR -- No files found in (%s)' % (base_local_dir)
return continue_on
if __name__ == '__main__':
import optparse
default_config_file = u'ftpallcfg.py'
# Create parser, and configure command line options to parse
parser = optparse.OptionParser()
parser.add_option("-l", "--local_dir",
dest="local_dir",
help="Local Directory (Defaults to CWD)",
default='.')
parser.add_option("-r", "--remote_dir",
dest="remote_dir",
help="[REQUIRED] Target Remote directory",
default=None)
parser.add_option("-u", "--username",
dest="username",
help="[REQUIRED] username",
default=None)
parser.add_option("-s","--server",
dest="server",
help="[REQUIRED] Server Address",
default=None)
parser.add_option("-e", "--encrypt",
action="store_true",
dest="encrypt",
help="Use sftp",
default=False)
parser.add_option("-m",
action="store_true",
dest="monitor",
help="Keep process open and monitor changes",
default=False)
parser.add_option("-w",
action="store_true",
dest="walkdir",
help="Walk sub directories of the given directory to find files to send.",
default=False)
(options,args) = parser.parse_args()
if (options.username and options.server and options.remote_dir) or \
os.path.exists(default_config_file):
local_dir = options.local_dir
if os.path.exists(default_config_file):
sys.path.append('.')
import ftpallcfg
try:
server = ftpallcfg.server
username = ftpallcfg.username
remote_dir = ftpallcfg.remote_dir
encrypt = ftpallcfg.encrypt
monitor = ftpallcfg.monitor
walk = ftpallcfg.walk
except AttributeError, e:
print "ERROR --", str(e.args)
print
print 'Value(s) missing in %s file! The following values MUST be included:' % (default_config_file)
print '================================'
print 'server = <server to ftp to>'
print 'username = <Username for access to given server>'
print 'remote_dir = <remote server directory>'
print 'encrypt= True/False'
print 'monitor = True/False'
print 'walk == True/False'
print '================================'
sys.exit()
else:
server = options.server
username = options.username
remote_dir = options.remote_dir
encrypt = options.encrypt
monitor = options.monitor
walk = options.walkdir
# get the user password
prompt = 'Password (%s#%s): ' % (username, server)
if os.isatty(sys.stdin.fileno()):
p = getpass.getpass(prompt)
else:
#p = sys.stdin.readline().rstrip()
p = raw_input(prompt).rstrip()
if options.encrypt:
print '>> Using sftp for secure transfers <<'
print
if monitor:
try:
monitor_and_ftp(server,username,p,local_dir, remote_dir, encrypt, walk)
except KeyboardInterrupt:
print 'Exiting...'
else:
try:
upload_all(server, username, p, local_dir, remote_dir, [], encrypt, walk)
except KeyboardInterrupt:
print 'Exiting...'
else:
print 'ERROR -- Required option not given!'
print __revision__
print __doc__
print
parser.print_help()
EDIT 20/12/2017:
I have written a project in GitHub for this purpose. Click for details!
There are good answers above but i also want to add a good one using ftputil package. If you need to upload files from local directory to ftp directory, you can use this recursive function:
def upload_dir(localDir, ftpDir):
list = os.listdir(localDir)
for fname in list:
if os.path.isdir(localDir + fname):
if(ftp_host.path.exists(ftpDir + fname) != True):
ftp_host.mkdir(ftpDir + fname)
print(ftpDir + fname + " is created.")
upload_dir(localDir + fname + "/", ftpDir + fname + "/")
else:
if(ftp_host.upload_if_newer(localDir + fname, ftpDir + fname)):
print(ftpDir + fname + " is uploaded.")
else:
print(localDir + fname + " has already been uploaded.")
If you decide to use this function, you have to connect ftp using ftputil package. For this, you can use this snippet:
with ftputil.FTPHost("ftp_host", "ftp_username", "ftp_password") as ftp_host:
So, we're almost done. The last thing is usage of the function for beginners like me:
local_dir = "D:/Projects/.../"
ftp_dir = "/.../../"
upload_dir(local_dir, ftp_dir)
The most important thing is "/" character at the end of paths. You need to put it at the end. Finally, i want to share entire code:
with ftputil.FTPHost("ftp_host", "ftp_username", "ftp_password") as ftp_host:
def upload_dir(localDir, ftpDir):
list = os.listdir(localDir)
for fname in list:
if os.path.isdir(localDir + fname):
if(ftp_host.path.exists(ftpDir + fname) != True):
ftp_host.mkdir(ftpDir + fname)
print(ftpDir + fname + " is created.")
upload_dir(localDir + fname + "/", ftpDir + fname + "/")
else:
if(ftp_host.upload_if_newer(localDir + fname, ftpDir + fname)):
print(ftpDir + fname + " is uploaded.")
else:
print(localDir + fname + " has already been uploaded.")
local_dir = "D:/Projects/.../"
ftp_dir = "/.../../"
upload_dir(local_dir, ftp_dir)
Maybe you try ftpsync.py. If this one doesn't helps, try google search on python ftpsync and you get a lot of answers.
using ftputil:
import os
import ftputil
import ftputil.session
def upload_dir(root):
root = unicode(root, 'utf-8')
for dir_name, _, dir_files in os.walk(root):
local = os.path.join(os.curdir, dir_name)
remote = ftp_host.path.join(ftp_host.curdir, dir_name)
if not ftp_host.path.exists(remote):
print 'mkdir:', local, '->', remote
ftp_host.mkdir(remote)
for f in dir_files:
local_f = os.path.join(local, f)
remote_f = ftp_host.path.join(remote, f)
print 'upload:', local_f, '->', remote_f
ftp_host.upload(local_f, remote_f)
sf = ftputil.session.session_factory(use_passive_mode=True)
with ftputil.FTPHost('HOST', 'USER', 'PASS', session_factory=sf) as ftp_host:
upload_dir('DIR')
It is easy to use lftp to upload folders to an FTP. I use this in my Python script to move folders to FTP
Python script:
#! /usr/bin/python
import subprocess
subprocess.call(["bash", ftp_script, username, password, ftp, folder_to_move, src,folder_name_in_destination])
ftp_script:
lftp -u $1,$2 $3 <<EOF
mkdir $4
lcd $5
cd $6
mirror --reverse
EOF
Yet another answer using ftputil. And pathlib for figuring out the local path.
I've compiled and improved some of the above answers.
So thank you for the inspiration!
import os
import ftplib
import ftputil
from pathlib import Path
def ftp_upload(ftp_host, src_path, dst_path=None):
if dst_path is None:
ftp_host.path.join(os.curdir)
# Make sure that local path is a directory
if src_path.is_dir():
# Make sure that the root path exists
if ftp_host.path.exists(dst_path) is False:
# Inform user
print(f'mkdir: {src_path} -> {dst_path}')
# Create folder
ftp_host.mkdir(dst_path)
# Parse content of the root folder
for src_child in src_path.iterdir():
if src_child.is_dir():
# Use recursion for sub folders/files
dst_child = ftp_host.path.join(dst_path, src_child.stem)
ftp_upload(ftp_host, src_child, dst_child)
else:
# Copy sub files
dst_child = ftp_host.path.join(dst_path, src_child.name)
# Inform user
print(f'upload: {src_child} -> {dst_child}')
# Perform copy (upload)
ftp_host.upload(src_child, dst_child)
# ftp_host.upload_if_newer(src_child, dst_child)
else:
# Inform user
print(f"src_path: '{src_path}' must be an existing directory!")
if __name__ == '__main__':
# FTP settings
user = "ftp_user"
password = "ftp_password"
host = "localhost"
port = 2121
# Path settings
src_path = Path(__file__).parent.resolve() / "upload_dir"
dst_dir = "upload_dir"
# Inform user
print(f"Establishing FTP session as '{user}'")
# Establish FTP session
ftplib.FTP.port = port
with ftputil.FTPHost(host, user, password) as ftp_host:
# Perform recursive FTP upload
dst_path = ftp_host.path.join(os.curdir, dst_dir)
ftp_upload(ftp_host, src_path, dst_path)

Categories

Resources