python wrapper to execute two commands - python

I am trying to run two commands using python on a remote server. Although the
function main() executes, only the first command is executed, the second does not. Can somebody help me on this?
#!/usr/bin/python
import sys
import shlex, subprocess
sut = sys.argv[1]
pp = sys.argv[2]
sut_adapter = sys.argv[3]
pp_adapter = sys.argv[4]
switch_ip = sys.argv[5]
switch = sys.argv[6]
node_type = sys.argv[7]
hmc_name = sys.argv[8]
filename = sys.argv[1] + "_" + sys.argv[2] + ".p.config"
args_str= "/home/raymond/config_gen.pl {} {} {} {} {} {} {} {}".format(sut, pp, sut_adapter, pp_adapter, switch_ip, switch, node_type, hmc_name)
args_str2 = "/framework/scripts/ts//ts /home/NDD/fvndd_dedicated.p -t standard_frames_dedicated -c 1 -g /Jenkins/config/{}".format(filename)
def test_run():
args2 = shlex.split(args_str2)
pipe = subprocess.Popen(args2,bufsize=-1,stdin=subprocess.PIPE)
def main():
args = shlex.split(args_str)
pipe = subprocess.Popen(args,bufsize=-1,stdin=subprocess.PIPE)
test_run()
if __name__== "__main__":
main()

Related

How can i run sphinxtrain script?

I'm using sphinx to build my own model for vocal recognition , i've followed the tutorial step by step and all works fine until that point when i should run the python script of sphinixtrain (whose role is to execute a set of perl files throw terminal in normal case) but for me, the program only opens the files one by one with a chosen editor without executing them !(watching other tutorials videos, the code bellow works normal)
The code of trainer :
#!/usr/bin/python
from __future__ import print_function
import getopt, sys, os
training_basedir = ""
sphinxbinpath = ""
sphinxpath = ""
def find_paths():
global training_basedir
global sphinxbinpath
global sphinxpath
# Find the location of the files, it can be libexec or lib or lib64
currentpath = os.path.dirname(os.path.realpath(__file__))
sphinxbinpath = os.path.realpath(currentpath + "/../libexec/sphinxtrain")
if os.path.exists(currentpath + "/../lib/sphinxtrain/bw"):
sphinxbinpath = os.path.realpath(currentpath + "/../lib/sphinxtrain/bw")
if os.path.exists(currentpath + "/../bin/Release/Win32"):
sphinxbinpath = os.path.realpath(currentpath + "/../bin/Release/Win32")
# Find the location for the libraries
sphinxpath = os.path.realpath(currentpath + "/../lib/sphinxtrain")
if os.path.exists(currentpath + "/../lib64/sphinxtrain/scripts/00.verify"):
sphinxpath = os.path.realpath(currentpath + "/../lib64/sphinxtrain")
if os.path.exists(currentpath + "/../scripts/00.verify"):
sphinxpath = os.path.realpath(currentpath + "/..")
if not (os.path.exists(sphinxbinpath + "/bw") or os.path.exists(sphinxbinpath + "/bw.exe")):
print("Failed to find sphinxtrain binaries. Check your installation")
exit(1)
# Perl script want forward slashes
training_basedir = os.getcwd().replace('\\', '/');
sphinxpath = sphinxpath.replace('\\','/')
sphinxbinpath = sphinxbinpath.replace('\\','/')
print("Sphinxtrain path:", sphinxpath)
print("Sphinxtrain binaries path:", sphinxbinpath)
def setup(task):
if not os.path.exists("etc"):
os.mkdir("etc")
print("Setting up the database " + task)
out_cfg = open("./etc/sphinx_train.cfg", "w")
for line in open(sphinxpath + "/etc/sphinx_train.cfg", "r"):
line = line.replace("___DB_NAME___", task)
line = line.replace("___BASE_DIR___", training_basedir)
line = line.replace("___SPHINXTRAIN_DIR___", sphinxpath)
line = line.replace("___SPHINXTRAIN_BIN_DIR___", sphinxbinpath)
out_cfg.write(line)
out_cfg.close()
out_cfg = open("etc/feat.params", "w")
for line in open(sphinxpath + "/etc/feat.params", "r"):
out_cfg.write(line)
out_cfg.close()
steps = [
"000.comp_feat/slave_feat.pl",
"00.verify/verify_all.pl",
"0000.g2p_train/g2p_train.pl",
"01.lda_train/slave_lda.pl",
"02.mllt_train/slave_mllt.pl",
"05.vector_quantize/slave.VQ.pl",
"10.falign_ci_hmm/slave_convg.pl",
"11.force_align/slave_align.pl",
"12.vtln_align/slave_align.pl",
"20.ci_hmm/slave_convg.pl",
"30.cd_hmm_untied/slave_convg.pl",
"40.buildtrees/slave.treebuilder.pl",
"45.prunetree/slave.state-tying.pl",
"50.cd_hmm_tied/slave_convg.pl",
"60.lattice_generation/slave_genlat.pl",
"61.lattice_pruning/slave_prune.pl",
"62.lattice_conversion/slave_conv.pl",
"65.mmie_train/slave_convg.pl",
"90.deleted_interpolation/deleted_interpolation.pl",
"decode/slave.pl",
]
def run_stages(stages):
for stage in stages.split(","):
for step in steps:
name = step.split("/")[0].split(".")[-1]
if name == stage:
ret = os.system(sphinxpath + "/scripts/" + step)
if ret != 0:
exit(ret)
def run_from(stage):
found = False
for step in steps:
name = step.split("/")[0].split(".")[-1]
if name == stage or found:
found = True
ret = os.system(sphinxpath + "/scripts/" + step)
if ret != 0:
exit(ret)
def run():
print("Running the training")
for step in steps:
ret = os.system(sphinxpath + "/scripts/" + step)
if ret != 0:
exit(ret)
def usage():
print ("")
print ("Sphinxtrain processes the audio files and creates and acoustic model ")
print ("for CMUSphinx toolkit. The data needs to have a certain layout ")
print ("See the tutorial http://cmusphinx.sourceforge.net/wiki/tutorialam ")
print ("for details")
print ("")
print ("Usage: sphinxtrain [options] <command>")
print ("")
print ("Commands:")
print (" -t <task> setup - copy configuration into database")
print (" [-s <stage1,stage2,stage3>] [-f <stage>] run - run the training or just selected
stages")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "ht:s:f:", ["help", "task", "stages", "from"])
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(-1)
task = None
stages = None
from_stage = None
for o, a in opts:
if o in ("-t", "--task"):
task = a
if o in ("-f", "--from"):
from_stage = a
if o in ("-s", "--stages"):
stages = a
if o in ("-h", "--help"):
usage()
if len(args) == 0:
usage()
sys.exit(-1)
command = args[0]
find_paths()
if command == "setup":
if task == None:
print("No task name defined")
sys.exit(-1)
setup(task)
elif command == "run":
if stages != None:
run_stages(stages)
elif from_stage != None:
run_from(from_stage)
else:
run()
else:
run()
if __name__ == "__main__":
main()
Another way to solve this is to be explicit about calling the Perl interpreter in your os.system call
i.e.
ret = os.system('perl ' + sphinxpath + "/scripts/" + step)
Solved by associating perl files ( with pl extension) to perl.exe

how to print variables after to use subprocess.call?

In my program, I use subprocess.call to run apache drill automatically.
After that, I make some queries and I would like to print the result.
Before to program the code to run apache drill automatically, I was doing it manually and i could print the results but now i cannot do it.
My last try was to write in a file, but the behavior is the same, nothing is written.
My code is bellow.
import subprocess
from pydrill.client import PyDrill
import sys
writer = open('resultado.txt', 'w')
cmdmsg = subprocess.check_output("cd C:\\Users\\Tito\\Downloads\\apache-drill-1.14.0\\bin & sqlline -u \"jdbc:drill:zk=local\"", shell = True)
writer.write("teste de msg: " + str(cmdmsg))
drill = PyDrill(host='localhost', port=8047)
if drill.is_active:
result = drill.query('''SELECT * FROM cp.`employee.json` LIMIT 3''')
result2 = drill.query('''SELECT * FROM dfs.`/Users/Tito/Desktop/banco_gal.csv` LIMIT 5''')
for tuple in result2:
writer.write(tuple)
writer.close
I could solve this problem.
3 things are important for this topic.
a) we should kill the java virtual machine and shell after that apache drill is turned on.
b) the windows buffer is very short, so the result has not been printed.
c) popen method is better than call for this task.
import os
import re
import subprocess
import traceback
from os import path
from pydrill.client import PyDrill
DRILL_HOST = 'localhost'
DRILL_PORT = 8047
JAVA_HOME = ''
JAVA_HOME = JAVA_HOME or ("JAVA_HOME" in os.environ and os.environ["JAVA_HOME"])
JPS_EXE_PATH = path.join(JAVA_HOME, 'bin', 'jps.exe')
KILL_DRILL_JVM = True
def getJvmPID(className):
pid = None
print('Running JPS cmd: %s' % JPS_EXE_PATH)
jpsOutput = subprocess.check_output(JPS_EXE_PATH)
jpsOutput = str(jpsOutput)
se = re.search(r'([0-9]*\s*)' + className, jpsOutput)
if se:
pid = se.group(1)
return pid
def killProcessByPID(pid):
killCmd = ['taskkill', '/f', '/pid', str(pid)]
print('Running taskkill cmd: %s' % killCmd)
killCmdOuput = subprocess.check_output(killCmd, stderr=subprocess.STDOUT)
print(str(killCmdOuput))
def killJvm(className):
pid = getJvmPID(className)
killProcessByPID(pid)
drillBinDir = 'C:/Users/Tito/Downloads/apache-drill-1.14.0/bin'
sqlinePath = path.join(drillBinDir, 'sqlline.bat')
drillCmdList = [sqlinePath , '-u', '"jdbc:drill:zk=local"']
drillCmdStr = " ".join(drillCmdList)
drillConAttempts = 2
while drillConAttempts > 0:
drillConAttempts -= 1
print("Connecting to drill on %s:%d..." % (DRILL_HOST, DRILL_PORT))
try:
drill = PyDrill(host=DRILL_HOST, port=DRILL_PORT)
except:
print("Exception when creating object")
traceback.print_exc()
print("Checking Drill conection...")
try:
if drill.is_active():
print("Connected.")
break
elif drillConAttempts > 0:
print("Could not connect to Drill. Trying to start Drill...")
print("Running cmd '%s > %s'" % (drillCmdStr, os.devnull) )
devNull = open(os.devnull,"w")
cmdProc = subprocess.Popen(drillCmdStr, cwd=drillBinDir, stdout=devNull, stderr=subprocess.STDOUT, shell=True)
print("Started CMD process with PID %d" %(cmdProc.pid))
except:
print("Exception when checking connection")
traceback.print_exc()
if drill.is_active():
result = drill.query('''SELECT * FROM cp.`employee.json` LIMIT 3''')
for resultTuple in result:
print(resultTuple)
if KILL_DRILL_JVM:
print('Killing Drill process...')
killJvm('SqlLine')

How to run PSCP cmd window step in my Python script

I am running Hadoop MapReduce and other SSH commands from a Python script using the paramiko module (code can be seen here). Once the MapReduce job is complete, I run the getmerge step to get the output into a text file.
The problem is, I then have to open a cmd window and run PSCP to download the output.txt file from the HDFS environment to my computer. For example:
pscp xxxx#xx.xx.xx.xx:/nfs_home/appers/cnielsen/MROutput_121815_0.txt C:\Users\cnielsen\Desktop\MR_Test
How can I incorporate this pscp step into my script so that I don't have to open a cmd window to run this after my MapReduce and getmerge tasks are complete? I would like my script to be able to run the MR task, getmerge task, and then automatically save the MR output to my computer.
Here is my code.
I have solved this problem with the following code. The trick was to use the scp module and import SCPClient. See the scp_download(ssh) function below.
When the MapReduce job completes the getmerge command is run, followed by the scp_download step.
import paramiko
from scp import SCPClient
import time
# Define connection info
host_ip = 'xx.xx.xx.xx'
user = 'xxxxxxxx'
pw = 'xxxxxxxx'
port = 22
# Paths
input_loc = '/nfs_home/appers/extracts/*/*.xml'
output_loc = '/user/lcmsprod/output/cnielsen/'
python_path = "/usr/lib/python_2.7.3/bin/python"
hdfs_home = '/nfs_home/appers/cnielsen/'
output_log = r'C:\Users\cnielsen\Desktop\MR_Test\MRtest011316_0.txt'
# File names
xml_lookup_file = 'product_lookups.xml'
mapper = 'Mapper.py'
reducer = 'Reducer.py'
helper_script = 'Process.py'
product_name = 'test1'
output_ref = 'test65'
target_file = 'test_011416_3.txt'
# ----------------------------------------------------
def createSSHClient(host_ip, port, user, pw):
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host_ip, port, user, pw)
return client
# ----------------------------------------------------
def buildMRcommand(product_name):
space = " "
mr_command_list = [ 'hadoop', 'jar', '/share/hadoop/tools/lib/hadoop-streaming.jar',
'-files', hdfs_home+xml_lookup_file,
'-file', hdfs_home+mapper,
'-file', hdfs_home+reducer,
'-mapper', "'"+python_path, mapper, product_name+"'",
'-file', hdfs_home+helper_script,
'-reducer', "'"+python_path, reducer+"'",
'-input', input_loc,
'-output', output_loc+output_ref]
MR_command = space.join(mr_command_list)
print MR_command
return MR_command
# ----------------------------------------------------
def unbuffered_lines(f):
line_buf = ""
while not f.channel.exit_status_ready():
line_buf += f.read(1)
if line_buf.endswith('\n'):
yield line_buf
line_buf = ""
# ----------------------------------------------------
def stream_output(stdin, stdout, stderr):
writer = open(output_log, 'w')
# Using line_buffer function
for l in unbuffered_lines(stderr):
e = '[stderr] ' + l
print '[stderr] ' + l.strip('\n')
writer.write(e)
# gives full listing..
for line in stdout:
r = '[stdout]' + line
print '[stdout]' + line.strip('\n')
writer.write(r)
writer.close()
# ----------------------------------------------------
def run_MapReduce(ssh):
stdin, stdout, stderr = ssh.exec_command(buildMRcommand(product_name))
stream_output(stdin, stdout, stderr)
return 1
# ----------------------------------------------------
def run_list_dir(ssh):
list_dir = "ls "+hdfs_home+" -l"
stdin, stdout, stderr = ssh.exec_command(list_dir)
stream_output(stdin, stdout, stderr)
# ----------------------------------------------------
def run_getmerge(ssh):
getmerge = "hadoop fs -getmerge "+output_loc+output_ref+" "+hdfs_home+target_file
print getmerge
stdin, stdout, stderr = ssh.exec_command(getmerge)
for line in stdout:
print '[stdout]' + line.strip('\n')
time.sleep(1.5)
return 1
# ----------------------------------------------------
def scp_download(ssh):
scp = SCPClient(ssh.get_transport())
print "Fetching SCP data.."
scp.get(hdfs_home+target_file, local_dir)
print "File download complete."
# ----------------------------------------------------
def main():
# Get the ssh connection
global ssh
ssh = createSSHClient(host_ip, port, user, pw)
print "Executing command..."
# Command list
##run_list_dir(ssh)
##run_getmerge(ssh)
##scp_download(ssh)
# Run MapReduce
MR_status = 0
MR_status = run_MapReduce(ssh)
if MR_status == 1:
gs = 0
gs = run_getmerge(ssh)
if gs == 1:
scp_download(ssh)
# Close ssh connection
ssh.close()
if __name__ == '__main__':
main()

Run python Webserver as Windows service

I have server and console scripts which keeps on listening on port for console and server requests.
In UNIX environment I made both the server and console script as continuously running daemons which will keep them listening on port.
Is there any way way in windows to keep them running like daemon in UNIX ? I also want them to get up on reboot (should get auto started on reboot)
I read about windows services and followed code written here, but I am getting 404 error on my webpage
__version__ = "0.4"
__all__ = ["RequestHandler"]
import atexit
import BaseHTTPServer
import CGIHTTPServer
import copy
import os
import select
import SimpleHTTPServer
import sys
import time
import threading
import urllib
from signal import SIGTERM
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
have_fork = hasattr(os, 'fork')
have_popen2 = hasattr(os, 'popen2')
have_popen3 = hasattr(os, 'popen3')
rbufsize = 0
def do_POST(self):
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
splitpath = _url_collapse_path_split(self.path)
if splitpath[0] in self.cgi_directories:
self.cgi_info = splitpath
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
return executable(path)
def is_python(self, path):
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
path = self.path
dir, rest = self.cgi_info
i = path.find('/', len(dir) + 1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir) + 1)
else:
break
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if not ispy:
if not (self.have_fork or self.have_popen2 or self.have_popen3):
self.send_error(403, "CGI script is not a Python script (%r)" %
scriptname)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.getheader("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = base64.decodestring(authorization[1])
except binascii.Error:
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.getheader('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
env.setdefault(k, "")
os.environ.update(env)
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, os.environ)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
else:
# Non Unix - use subprocess
import subprocess
cmdline = [scriptfile]
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = [interp, '-u'] + cmdline
if '=' not in query:
cmdline.append(query)
self.log_message("command: %s", subprocess.list2cmdline(cmdline))
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
p = subprocess.Popen(cmdline,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
else:
data = None
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
stdout, stderr = p.communicate(data)
self.wfile.write(stdout)
if stderr:
self.log_error('%s', stderr)
status = p.returncode
if status:
self.log_error("CGI script exit status %#x", status)
else:
self.log_message("CGI script exited OK")
def _url_collapse_path_split(path):
path_parts = []
for part in path.split('/'):
if part == '.':
path_parts.append('')
else:
path_parts.append(part)
# Filter out blank non trailing parts before consuming the '..'.
path_parts = [part for part in path_parts[:-1] if part] + path_parts[-1:]
if path_parts:
tail_part = path_parts.pop()
else:
tail_part = ''
head_parts = []
for part in path_parts:
if part == '..':
head_parts.pop()
else:
head_parts.append(part)
if tail_part and tail_part == '..':
head_parts.pop()
tail_part = ''
return ('/' + '/'.join(head_parts), tail_part)
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return False
return st.st_mode & 0111 != 0
Handler = RequestHandler
PORT = 7998
ADDRESS = "0.0.0.0"
httpd = ThreadedHTTPServer((ADDRESS, PORT), Handler)
print "serving at %s:%s" % (ADDRESS, PORT)
import os
import SocketServer
import BaseHTTPServer
import SimpleHTTPServer
import xmlrpclib
import SimpleXMLRPCServer
import socket
import httplib
import inspect
import win32service
import win32serviceutil
import win32api
import win32con
import win32event
import win32evtlogutil
class XMLRPCServerService(win32serviceutil.ServiceFramework):
_svc_name_ = "XMLRPCServerService"
_svc_display_name_ = "XMLRPCServerService"
_svc_description_ = "Tests Python service framework by receiving and echoing messages over a named pipe"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
import servicemanager
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,servicemanager.PYS_SERVICE_STARTED,(self._svc_name_, ''))
self.timeout = 100
while 1:
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
if rc == win32event.WAIT_OBJECT_0:
servicemanager.LogInfoMsg("XMLRPCServerService - STOPPED")
break
else:
httpd.serve_forever()
servicemanager.LogInfoMsg("XMLRPCServerService - is alive and well")
def ctrlHandler(ctrlType):
return True
if __name__ == '__main__':
win32api.SetConsoleCtrlHandler(ctrlHandler, True)
win32serviceutil.HandleCommandLine(XMLRPCServerService)
Any clues where I am going wrong ? Or good way to implement it (May be w/o using service).
Strict Note:
Solution must be in Python 2.6 (Project requirements).
Updates:
I saw some weird thing in log:python service.py debug
127.0.0.1 - - [04/Apr/2014 09:41:04] command: C:\Python27\Lib\site-packages\win3
2\**pythonservice.exe** -u C:\CONSOLE-CGI\cgi-bin\login.py ""
Why is executing CGI script using pythonservice.exe?
What am I missing Here?
More updates:
Code snippet from daemon process python script
#Non Unix - use subprocess
import subprocess
cmdline = [scriptfile]
if self.is_python(scriptfile):
#interp = sys.executable // here it return pythonservice.exe
interp = "python.exe" // if I hardcode it to python.exe all goes fine
if interp.lower().endswith("w.exe"): #On Windows,use python.exe,not pythonw.exe
interp = interp[: -5] + interp[-4: ]
cmdline = [interp, '-u'] + cmdline
Any clues why is so??
You may need to redirect all the output since Windows scheduler has some issues doing this in pythonw case. Process does start properly, but no action being done and server does not respond without redirecting stdout and stderr.
import http.server
import socketserver
import sys
PORT = 1234
Handler = http.server.SimpleHTTPRequestHandler
if __name__ == '__main__':
sys.stdout = open('out.txt', 'w')
sys.stderr = open('err.txt', 'w')
with socketserver.TCPServer(("", PORT), Handler) as httpd:
print("serving at port %d" % PORT, flush=True)
httpd.serve_forever()

Why wont my Simple CGI Server handle AJAX Post requests?

I have created my own CGI python server script (that serves on port 8000) by following a tutorial. The server works beautifully if I want to generate web pages from python scripts, or serve a native HTML page BUT it doesn't work for when I make an AJAX POST request?
If I make an AJAX request to the python file aaa.py (using the javascript below) my server prints out the following error text:
Code 501, message can only POST to to CGI scripts
"POST /aaa.py HTTP/1.1" 501 -
What do you think I need to do to allow my python cgi server to allow/handle AJAX requests?
My CGI server:
__version__ = "0.4"
__all__ = ["CGIHTTPRequestHandler"]
import os
import sys
import urllib
import BaseHTTPServer
import SimpleHTTPServer
import select
class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
have_popen2 = hasattr(os, 'popen2')
have_popen3 = hasattr(os, 'popen3')
# pretend we don't have these to force execution in process
have_fork = 0
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Return a tuple (dir, rest) if self.path requires running a
CGI script, None if not. Note that rest begins with a
slash if it is not empty.
The default implementation tests whether the path
begins with one of the strings in the list
self.cgi_directories (and the next character is a '/'
or the end of the string).
"""
path = self.path
for x in self.cgi_directories:
i = len(x)
if path[:i] == x and (not path[i:] or path[i] == '/'):
self.cgi_info = path[:i], path[i+1:]
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
dir, rest = self.cgi_info
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
# i = rest.find('/')
# if i >= 0:
# script, rest = rest[:i], rest[i:]
# else:
# script, rest = rest, ''
script = rest
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%s)" % `scriptname`)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%s)" %
`scriptname`)
return
ispy = self.is_python(scriptname)
if not ispy:
if not (self.have_fork or self.have_popen2 or self.have_popen3):
self.send_error(403, "CGI script is not a Python script (%s)" %
`scriptname`)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%s)" %
`scriptname`)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
# XXX AUTH_TYPE
# XXX REMOTE_USER
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
if not self.have_fork:
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT'):
###, 'HTTP_COOKIE' -- removed by S.
env.setdefault(k, "")
# for key in env.keys():
# print key + " '" + env[key] + "'"
os.environ.update(env)
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, os.environ)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
elif self.have_popen2 or self.have_popen3:
# Windows -- use popen2 or popen3 to create a subprocess
import shutil
if self.have_popen3:
popenx = os.popen3
else:
popenx = os.popen2
cmdline = scriptfile
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = "%s -u \"%s\"" % (interp, cmdline)
if '=' not in query and '"' not in query:
cmdline = '%s "%s"' % (cmdline, query)
self.log_message("command: %s", cmdline)
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
files = popenx(cmdline, 'b')
fi = files[0]
fo = files[1]
if self.have_popen3:
fe = files[2]
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
fi.write(data)
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
fi.close()
shutil.copyfileobj(fo, self.wfile)
if self.have_popen3:
errors = fe.read()
fe.close()
if errors:
self.log_error('%s', errors)
sts = fo.close()
if sts:
self.log_error("CGI script exit status %#x", sts)
else:
self.log_message("CGI script exited OK")
else:
# Other O.S. -- execute script in this process
save_argv = sys.argv
save_stdin = sys.stdin
save_stdout = sys.stdout
save_stderr = sys.stderr
try:
try:
sys.argv = [scriptfile]
if '=' not in decoded_query:
sys.argv.append(decoded_query)
sys.stdout = self.wfile
sys.stdin = self.rfile
execfile(scriptfile, {"__name__": "__main__"})
finally:
sys.argv = save_argv
sys.stdin = save_stdin
sys.stdout = save_stdout
sys.stderr = save_stderr
except SystemExit, sts:
self.log_error("CGI script exit status %s", str(sts))
else:
self.log_message("CGI script exited OK")
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return False
return st.st_mode & 0111 != 0
def test(HandlerClass = CGIHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
SimpleHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
Code in aaa.py:
#!/usr/bin/env python
import cgitb; cgitb.enable()
import cgi
import os
print "Content-Type: text/html\n"
input_data = cgi.FieldStorage()
print "hello"
My AJAX/ Javascript:
function onTest( dest, params )
{
var xmlhttp;
if (window.XMLHttpRequest)
{// code for IE7+, Firefox, Chrome, Opera, Safari
xmlhttp=new XMLHttpRequest();
}
else
{// code for IE6, IE5
xmlhttp=new ActiveXObject("Microsoft.XMLHTTP");
}
xmlhttp.onreadystatechange=function()
{
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
document.getElementById( "bb" ).innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("POST",dest,true);
xmlhttp.setRequestHeader("Content-type","application/x-www-form-urlencoded");
xmlhttp.send( params );
}
when you use the python http cgi server, cgi scripts have to be under the subdir (from you server script ) /cgi-bin

Categories

Resources