We are using Angular JS for frontend and Django framework at backend. Whenever a request comes from the front-end, passenger_wsgi file receives the request. It processes the request and returns the expected response.
However, passenger_wsgi calls itself, which calls itself. This happens recursively, in an loop causing infinite processes and server to overload.
Appreciate your comments and a fix. Below is the current passenger_wsgi.
import os
import sys
import django.core.handlers.wsgi
from django.core.wsgi import get_wsgi_application
# Set up paths and environment variables
sys.path.append(os.getcwd())
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
# Set script name for the PATH_INFO fix below
SCRIPT_NAME = os.getcwd()
class PassengerPathInfoFix(object):
"""
Sets PATH_INFO from REQUEST_URI because Passenger doesn't provide it.
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
from urllib.parse import unquote
environ['SCRIPT_NAME'] = SCRIPT_NAME
request_uri = unquote(environ['REQUEST_URI'])
script_name = unquote(environ.get('SCRIPT_NAME', ''))
offset = request_uri.startswith(script_name) and len(environ['SCRIPT_NAME']) or 0
environ['PATH_INFO'] = request_uri[offset:].split('?', 1)[0]
return self.app(environ, start_response)
# Set the application
application = get_wsgi_application()
application = PassengerPathInfoFix(application)
I have tried changing the last 2 lines. In this scenario, passenger_wsgi is not able to serve the request and frontend received 500 Server Error.
Related
If I run flask app directly, I am getting environment variables, using wsgi I am not getting system variables,
wsgi.py
from myproject import app
if __name__ == "__main__":
app.run()
myproject.ini
[uwsgi]
module = wsgi:app
master = true
processes = 5
#protocol = httpa
protocol = http
socket = 0.0.0.0:8443
#shared-socket = 0.0.0.0:8443
buffer-size=32768
#chmod-socket = 660
#vacuum = true
#https = =0,foobar.crt,foobar.key,HIGH
die-on-term = true
enable-threads = true
vacuum = true
req-logger = file:/var/log/uwsgi/app/cart-req.log
logger = file:/var/log/uwsgi/app/cart-err.log
myproject.py
import logging, re, subprocess, json, hashlib, os, jwt, mysql.connector, datetime
from flask import Flask, request, jsonify
from flask import current_app
from flask_restful import Resource, Api
import socket, logging, sys, os, pkgutil, importlib, inspect, logging.handlers
from importlib import import_module
from logging.config import dictConfig
class GetInput(Resource):
def get(self):
output = os.environ.get("MYSYSVAR")
if output is None:
return {'message': 'System Enviroment Variable not found'}, 404
return {'user': output}, 200
cli = sys.modules['flask.cli']
cli.show_server_banner = lambda *x: None
app = Flask(__name__)
api = Api(app)
api.add_resource(GetInput, '/getinput')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8443, debug=True)
This is Linus OS (Cent OS), I set enviroment variable like below
added entry in /etc/profile
export MYSYSVAR=4
I also tried adding entry in below file
/etc/environment
MYSYSVAR=4
source /etc/environment
Not sure, it is not working If I run with web gateway that is wsgi, but it works if I run directly myproject.py
Note: system inbuilt variable like USER working
Can anyone help how to fix it.
you need to add your env file in the virtualhost like:
<VirtualHost *:443>
...
SetEnv /dir/to/app/folder/.env
...
</VirtualHost>
I have an app that will convert audio file to text. Using flask and flask-socketio. It works perfectly when I run it using: "python run.py", but when I run it using: "gunicorn -k eventlet -b 0.0.0.0:5000 run:app" it will stop on the part where it calls the google speech to text api in audio.py file.
These are the current codes right now.
run.py:
from ats import socketio, app, db
if __name__ == '__main__':
db.create_all()
socketio.run(app, host='0.0.0.0', port=5001, debug=True)
init.py
import logging, json
from flask import Flask, jsonify, render_template, request
from flask_socketio import SocketIO, emit, send
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmall
app = Flask(__name__, instance_relative_config=True, static_folder="templates/static", template_folder="templates")
# Create db instance
db = SQLAlchemy(app)
ma = Marshmallow(app)
#app.route('/')
def index():
return render_template('index.html');
# import models
from ats import models
# set up CORS
CORS(app)
socketio = SocketIO(app, cors_allowed_origins='*', async_mode='eventlet')
# import blueprints
from ats.product.product import product_blueprint
# register blueprints
app.register_blueprint(product_blueprint, url_prefix='/api/product')
from ats import error_handlers
product.py
import os
import math
import eventlet
from os.path import join
from flask import Blueprint, request, jsonify, abort
from ats.utils import audio as AUDIO
product_blueprint = Blueprint('product', __name__)
#product_blueprint.route('/add', methods=['post'])
def addProduct():
try:
data = request.form
foldername = data['name']
scriptFile = request.files['script']
audioFile = request.files['audio']
# save the script and audio file to uploads folder
FILE.createFolder(foldername)
FILE.save(foldername, scriptFile)
FILE.save(foldername, audioFile)
# list the files in the uploads
audioFiles = FILE.getAudioFileList(foldername)
fileCount = len(audioFiles)
currentFile = 1
# ============ speech to text =============
for file in audioFiles:
recognizedText = AUDIO.convert(foldername, file)
# save to database
newAudio = {
'name': file,
'recognizedText': recognizedText,
'length': duration,
}
Audio.add(newAudio)
# emit event to update the client about the progress
percent = math.floor((currentFile / float(fileCount) ) * 100)
emit('upload_progress', {'data': percent}, room=data['sid'], namespace='/')
eventlet.sleep()
currentFile += 1
# Delete the files in uploads folder
FILE.delete(foldername)
return jsonify({'data': None, 'message': 'Product was added.', 'success': True}), 200
except Exception as e:
abort(500, str(e))
audio.py
import os
from ats import app
# Imports the Google Cloud client library
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
# Instantiates a client
client = speech.SpeechClient()
def convert(foldername, filename):
try:
file = os.path.join(app.config['UPLOAD_FOLDER'], foldername, filename)
# Loads the audio into memory
with io.open(file, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=48000,
language_code='ja-JP')
# Call speech in the audio file
response = client.recognize(config, audio) # The code will stop here, that results to worker timeout in gunicorn
return response
except Exception as e:
raise e
I've been searching solution for almost a week but I still couldn't find one. THank you for you're help guys.
When you run your application directly using python run.py there is no timeout applied the application takes whatever time it needs to process, however when you run your application using Gunicorn, the default timeout is 30 seconds which means that you will get a timeout error incase your application does not respond within 30 seconds. To avoid this you can increase the default timeout set by Gunicorn by adding --timeout <timeinterval-in-seconds>
The following command sets the timeout to 10 mins
gunicorn -k eventlet -b 0.0.0.0:5000 --timeout 600 run:app
It's working now, by running it using uwsgi instead of gunicorn. Here's the config, service and nginx
ats.ini
[uwsgi]
module = wsgi:app
master = true
processes = 1
socket = ats.sock
chmod-socket = 660
vacuum = true
die-on-term = true
/etc/systemd/system/ats.service
[Unit]
Description=uWSGI instance to serve ats
After=network.target
[Service]
User=ubuntu
Group=www-data
WorkingDirectory=/home/user/ats
Environment="PATH=/home/user/ats/env/bin"
ExecStart=/home/user/ats/env/bin/uwsgi --ini ats.ini --gevent 100
[Install]
WantedBy=multi-user.target
nginx
server {
listen 80;
server_name <ip_address or domain>;
access_log /var/log/nginx/access.log;
location / {
include uwsgi_params;
uwsgi_pass unix:/home/user/ats/ats.sock;
proxy_set_header Connection "Upgrade";
client_max_body_size 200M;
}
location /socket.io {
include uwsgi_params;
uwsgi_pass unix:/home/user/ats/ats.sock;
proxy_set_header Connection "Upgrade";
}
}
Thank you guys
Google cloud python had some conflict with gevent. I found out from this thread that in order for them to work, you need to add the following in the beginning of init.py:
from gevent import monkey
monkey.patch_all()
import grpc.experimental.gevent as grpc_gevent
grpc_gevent.init_gevent()
I met this problem too today, finally I found that the bug caused by proxy setting. at first, I set my proxy is "",
os.environ['http_proxy'] = ""
os.environ['https_proxy'] = ""
and I get the error about time out in request, after I comment the code and it works
# os.environ['http_proxy'] = ""
# os.environ['https_proxy'] = ""
I think it is not an error about gunicore timeout default setting, it is about system proxy setting.
I developed a Flask application on localhost (running Python 3). It works, but when transferred to my shared hosting account (running Python 2), it doesn't. I fixed all the issues related to Python versions. But the session is not working. Its value does not persists between requests.
I tried to recreate the problem with simpler code (test.py), the commented out part is how session is configured in my application:
import sys
sys.path.insert(0, '/home/user_name/public_html')
from flask import Flask, request, session
from flask.ext.session import Session
from tempfile import mkdtemp
from cs50 import SQL
from constants import *
app = Flask(__name__)
#app.config["SESSION_TYPE"] = "filesystem"
#app.config["SESSION_PERMANENT"] = False
#app.config["SESSION_FILE_DIR"] = mkdtemp()
Session(app)
#app.route('/set/')
def set():
session['key'] = 'value'
return 'ok'
#app.route('/get/')
def get():
return "{}".format(session.get('key'))
If you go to /set/, you will see ok. But on /get/ you will see None.
And here's my CGI file (which I need only for shared hosting, not for localhost):
#!/home/user_name/public_html/cgi-bin/flask/bin/python
import sys
sys.path.insert(0, '/home/user_name/public_html')
# Enable CGI error reporting
import cgitb
cgitb.enable()
import os
from wsgiref.handlers import CGIHandler
app = None
try:
import test
app = test.app
except Exception, e:
print "Content-type: text/html"
print
cgitb.handler()
exit()
#os.environ['SERVER_PORT'] = '80'
#os.environ['REQUEST_METHOD'] = 'POST'
class ScriptNameStripper(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
environ['SCRIPT_NAME'] = ''
return self.app(environ, start_response)
app = ScriptNameStripper(app)
try:
CGIHandler().run(app)
except Exception, e:
print "Content-type: text/html"
print
cgitb.handler()
exit()
In case .htaccess file is needed:
RewriteEngine On
RewriteCond %{REQUEST_FILENAME} !-f
RewriteRule ^(.*)$ /cgi-bin/mas.cgi/$1 [L]
Googling didn't help, and other similar questions on Stackoverflow don't fix it. Any solution/help is welcomed. Thanks!
I am still not sure if session was being written or not (as #Danila Ganchar pointed out), but only commenting out the 3rd configuration line solved the issue.
So the changes made in test.py are:
app.config["SESSION_TYPE"] = "filesystem"
app.config["SESSION_PERMANENT"] = False
#app.config["SESSION_FILE_DIR"] = mkdtemp()
I guess mkdtemp() wasn't working as it was on localhost.
Hi I am trying to implement behave test framework in my django python app. However not sure what the problem is and I keep getting connection refused.
Following is the content of features/environment.py:
import os
import django
import urlparse
os.environ['DJANGO_SETTINGS_MODULE'] = 'tilesproj.settings'
django.setup()
def before_all(context):
from django.test.runner import DiscoverRunner
context.runner = DiscoverRunner()
import wsgi_intercept
from django.core.handlers.wsgi import WSGIHandler
host = context.host = 'localhost'
port = context.port = 8000
from django.core.wsgi import get_wsgi_application
from wsgiref.simple_server import make_server
application = get_wsgi_application()
wsgi_intercept.add_wsgi_intercept(host, port, lambda: application)
import mechanize
context.browser = mechanize.Browser()
def browser_url(url):
return urlparse.urljoin('http://%s:%d/' % (host, port), url)
context.browser_url = browser_url
from BeautifulSoup import BeautifulSoup
def parse_soup():
r = context.browser.response()
html = r.read()
r.seek(0)
return BeautifulSoup(html)
context.parse_soup = parse_soup
def before_scenario(context, scenario):
context.runner.setup_test_environment()
context.old_db_config = context.runner.setup_databases()
def after_scenario(context, scenario):
context.runner.teardown_databases(context.old_db_config)
context.runner.teardown_test_environment()
I am trying to figure out how to bootstrap my django app when behave script is run so can test my web app.
You should try the example Django Behave configuration from the website. Essentially you use a fake browser to perform requests which don't actually go through a real website. Your connections are refused because there is no real server to connect to.
So you should get the mechanize browser from wsgi_intercept instead of from mechanize directly:
### Set up the Mechanize browser.
from wsgi_intercept import mechanize_intercept
# MAGIC: All requests made by this monkeypatched browser to the magic
# host and port will be intercepted by wsgi_intercept via a
# fake socket and routed to Django's WSGI interface.
browser = context.browser = mechanize_intercept.Browser()
I'm trying to create tests for a Tornado code base I'm picking up. I get the project to run fine but the first test I've written is getting a connection refused error.
Here's the code:
import unittest, os, os.path, sys, urllib
import tornado.options
from tornado.options import options
from tornado.testing import AsyncHTTPTestCase
APP_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(APP_ROOT, '..'))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from main import Application
app = Application()
def clear_db(app=None):
os.system("mysql -u user --password=pw --database=testdb < %s" % (os.path.join(APP_ROOT, 'db', 'schema.sql')))
class TestHandlerBase(AsyncHTTPTestCase):
def setUp(self):
clear_db()
super(TestHandlerBase, self).setUp()
def get_app(self):
return app
def get_http_port(self):
return 5000
class TestRootHandler(TestHandlerBase):
def test_redirect(self):
response = self.fetch(
'/',
method='GET',
follow_redirects=False)
print response
self.assertTrue(response.headers['Location'].endswith('/login'))
This is the response I get:
HTTPResponse(_body=None, buffer=None, code=599,
effective_url='http://localhost:5000/',
error=HTTPError('HTTP 599: [Errno 61] Connection refused',),
headers={}, reason='Unknown',
request=<tornado.httpclient.HTTPRequest object at 0x10c363510>,
request_time=0.01304006576538086, time_info={})
Any idea on what might be causing the error? Is there a step I'm missing to get everything running for the test? Thanks!!!
Don't override get_http_port. A new HTTP server with a new port is setup for each test, so it won't be 5000 every time, even if that's what you have in your settings.
I agree with the answer by Cole Maclean
If you need to configure custom URL, then override the below method of AsyncHTTPTestCase
def get_url(self, path):
url = 'http://localhost:8080' + path
return url
In this scenario, this will take the URL as http://localhost:8080 by default.