I want to execute a Python script that closes all the tickets of JIRA once my branch is merged with master. Can any one please help me how to solve the problem?
from __future__ import with_statement
from jira import JIRA, JIRAError
from requests.exceptions import ConnectionError
import cProfile
import logging
import sys
import os
import shutil
import logging.handlers
jiraEnabled = True
dashes = "---------------------------------------------------------------------"
import contextlib
import subprocess
import re
import collections
import getpass
import traceback
import pprint
import pdb
import stat
import cookielib
import subprocess
import urllib2
import ConfigParser
import string
def main():
global username, password, loglevel, jiraCheckEnabled, url, allowed_states, check_assignee, check_state, disabled_on_branches
configure_logging(loglevel)
config_file = get_config_file("config.ini")
error_code = handle_pre_receive()
if error_code != 0:
logging.error(“Hook failed please try later\n”)
return error_code
# Performs the git "pre-receive" hook
def handle_pre_receive():
line = sys.stdin.read()
try:
(old_commit_id, new_commit_id, ref) = line.strip().split()
except ValueError:
logging.error("\n%s", dashes)
return -1
if new_commit_id == "0000000000000000000000000000000000000000":
logging.debug("Branch was deleted, going to skip commit")
return 0
if disabled_on_branch(git_get_branchname_from_ref(ref)):
return 0
commit_id_array = git_get_array_of_commit_ids(old_commit_id, new_commit_id)
if commit_id_array == None or len(commit_id_array)==0:
if old_commit_id == "0000000000000000000000000000000000000000":
logging.debug("Branch was created, going to skip commit processing")
return 0
logging.error("No new commits found!")
return -1
if jiraEnabled:
try:
jira = JIRA(url,basic_auth=(username,password))
except ConnectionError, e:
logging.error("Failed to connect to JIRA")
return 0
except JIRAError, e:
logging.error("JIRA has rejected connection” )
return 0;
else:
jira = None
def get_shell_cmd_output(cmd):
try:
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
return proc.stdout.read().rstrip('\n')
except KeyboardInterrupt:
logging.info("... interrupted")
except Exception, e:
logging.error("Failed trying to execute '%s'", cmd)
def disabled_on_branch(current_branchname):
logging.debug("Test if '%s' is disabled...", current_branchname)
if disabled_on_branches == None or string.strip(disabled_on_branches) == "":
logging.debug("All branches enabled")
return False
branchlist = string.split(disabled_on_branches, ',')
for branch in branchlist:
branch = string.strip(branch)
if current_branchname == branch:
logging.debug("Current branch '%s' is disabled", current_branchname)
return True
logging.debug("Current branch '%s' is enabled", current_branchname)
return False
def git_get_curr_branchname():
buf = get_shell_cmd_output("git branch --no-color")
# buf is a multiline output, each line containing a branch name
# the line that starts with a "*" contains the current branch name
m = re.search("^\* .*$", buf, re.MULTILINE)
if m == None:
return None
return buf[m.start()+2 : m.end()]
def git_get_branchname_from_ref(ref):
# "refs/heads/<branchname>"
if string.find(ref, "refs/heads") != 0:
logging.error("Invalid ref '%s'", ref)
exit -1
return string.strip(ref[len("refs/heads/"):])
def git_get_commit_msg(commit_id):
return get_shell_cmd_output("git rev-list --pretty --max-count=1 " + commit_id)
#----------------------------------------------------------------------------
# python script entry point. Dispatches main()
if __name__ == "__main__":
cProfile.run('main()')
exit(0)
handle_pre_receive method checks if the branch is still enabled or not. If branch is disabled we have to close all the JIRA tickets related to that branch.
Related
I'm working on a project with IoT devices that are connected to a dotnet server hosted in the Azure cloud. I'm currently using for loops to read real-time data but want to read some real-time stats from the Redis database using Pandas. Can Someone explain to me the way how to start with?
Using the below script to read stats but want to start using pandas.
import os
import re
import json
import traceback
from collections import Counter
import time
import datetime as dt
import redis
from tqdm import tqdm # taqadum (تقدّم) == progress
from jsonpointer import resolve_pointer as j_get
from jsonpointer import JsonPointerException
import pandas as pd
os.system("color 0c") # change console color to red
if False:
# x Redis
r = redis.Redis(host="****.redis.cache.windows.net",
port=***,
password="***",
ssl=True,)
else:
# y Redis
r = redis.Redis(host="***.redis.cache.windows.net",
port=****,
password="*****",
ssl=True,)
print(r.info())
print("Server started at: ", end="")
print(dt.datetime.now() - dt.timedelta(seconds=r.info()['uptime_in_seconds']))
print("Building pipe")
pipe = r.pipeline()
# for key in tqdm(r.scan_iter("MC:SessionInfo*")):
for key in tqdm(r.scan_iter("MC:SessionInfo*", count=2500)):
pipe.hgetall(key)
print("Executing pipe")
responses = pipe.execute()
print("Processing effluvia")
q = {}
k={}
first = True
last_contact = {}
for data in tqdm(responses):
try:
j = json.loads(data[b'LastStatusBody'])
serial = j['System']['Serial'].lower()
q[serial] = j
last_contact[serial] = time.time() - int(data[b'LastContact'])
# TODO: json searching sensibly!
vac[serial] = j['LiveA']['Unit']['Volatge_Vac']
except:
if first:
traceback.print_exc()
first = False
else:
pass
for key,value in fw_versions.items():
if value.split(',')[0]=="xx v1.0.0.0":
x_paired.append(key)
print(x_paired)
print("Total paired :", len(x_paired))`
Instead of the above procedure want to start with Pandas to read data easily and do some charts for daily updates to the team.
I serialize / deserialize to pyarrow or pickle and then use an additional key as meta data. This works across local, GCloud, AWS EB and Azure
import pandas as pd
import pyarrow as pa, os
import redis,json, os, pickle
import ebutils
from logenv import logenv
from pandas.core.frame import DataFrame
from redis.client import Redis
from typing import (Union, Optional)
class mycache():
__redisClient:Redis
CONFIGKEY = "cacheconfig"
def __init__(self) -> None:
try:
ep = os.environ["REDIS_HOST"]
except KeyError:
if os.environ["HOST_ENV"] == "GCLOUD":
os.environ["REDIS_HOST"] = "redis://10.0.0.3"
elif os.environ["HOST_ENV"] == "EB":
os.environ["REDIS_HOST"] = "redis://" + ebutils.get_redis_endpoint()
elif os.environ["HOST_ENV"] == "AZURE":
#os.environ["REDIS_HOST"] = "redis://ignore:password#redis-sensorvenv.redis.cache.windows.net"
pass # should be set in azure env variable
elif os.environ["HOST_ENV"] == "LOCAL":
os.environ["REDIS_HOST"] = "redis://127.0.0.1"
else:
raise "could not initialise redis"
return # no known redis setup
#self.__redisClient = redis.Redis(host=os.environ["REDIS_HOST"])
self.__redisClient = redis.Redis.from_url(os.environ["REDIS_HOST"])
self.__redisClient.ping()
# get config as well...
self.config = self.get(self.CONFIGKEY)
if self.config is None:
self.config = {"pyarrow":True, "pickle":False}
self.set(self.CONFIGKEY, self.config)
self.alog = logenv.alog()
def redis(self) -> Redis:
return self.__redisClient
def exists(self, key:str) -> bool:
if self.__redisClient is None:
return False
return self.__redisClient.exists(key) == 1
def get(self, key:str) -> Union[DataFrame, str]:
keytype = "{k}.type".format(k=key)
valuetype = self.__redisClient.get(keytype)
if valuetype is None:
if (key.split(".")[-1] == "pickle"):
return pickle.loads(self.redis().get(key))
else:
ret = self.redis().get(key)
if ret is None:
return ret
else:
return ret.decode()
elif valuetype.decode() == str(pd.DataFrame):
# fallback to pickle serialized form if pyarrow fails
# https://issues.apache.org/jira/browse/ARROW-7961
try:
return pa.deserialize(self.__redisClient.get(key))
except pa.lib.ArrowIOError as err:
self.alog.warning("using pickle from cache %s - %s - %s", key, pa.__version__, str(err))
return pickle.loads(self.redis().get(f"{key}.pickle"))
except OSError as err:
if "Expected IPC" in str(err):
self.alog.warning("using pickle from cache %s - %s - %s", key, pa.__version__, str(err))
return pickle.loads(self.redis().get(f"{key}.pickle"))
else:
raise err
elif valuetype.decode() == str(type({})):
return json.loads(self.__redisClient.get(key).decode())
else:
return self.__redisClient.get(key).decode() # type: ignore
def set(self, key:str, value:Union[DataFrame, str]) -> None:
if self.__redisClient is None:
return
keytype = "{k}.type".format(k=key)
if str(type(value)) == str(pd.DataFrame):
self.__redisClient.set(key, pa.serialize(value).to_buffer().to_pybytes())
if self.config["pickle"]:
self.redis().set(f"{key}.pickle", pickle.dumps(value))
# issue should be transient through an upgrade....
# once switched off data can go away
self.redis().expire(f"{key}.pickle", 60*60*24)
elif str(type(value)) == str(type({})):
self.__redisClient.set(key, json.dumps(value))
else:
self.__redisClient.set(key, value)
self.__redisClient.set(keytype, str(type(value)))
if __name__ == '__main__':
os.environ["HOST_ENV"] = "LOCAL"
r = mycache()
rr = r.redis()
for k in rr.keys("cache*"):
print(k.decode(), rr.ttl(k))
print(rr.get(k.decode()))
i need a script to make it like a cpanel checker, with more than 1 url and the url is stored in a txt file.
usage : python script.py list.txt
format in file list.txt : https://demo.cpanel.net:2083|democom|DemoCoA5620
this is my code but it doesn't work, can someone help me?
Thanks.
import requests, sys
from multiprocessing.dummy import Pool as ThreadPool
try:
with open(sys.argv[1], 'r') as f:
list_data = [line.strip() for line in f if line.strip()]
except IOError:
pass
def cpanel(url):
try:
data = {'user':'democom', 'pass':'DemoCoA5620'}
r = requests.post(url, data=data)
if r.status_code==200:
print "login success"
else:
print "login failed"
except:
pass
def chekers(url):
try:
cpanel(url)
except:
pass
def Main():
try:
start = timer()
pp = ThreadPool(25)
pr = pp.map(chekers, list_data)
print('Time: ' + str(timer() - start) + ' seconds')
except:
pass
if __name__ == '__main__':
Main()
I fixed your code in a way that it will return an actual array containing a boolean array indicating the success of the cpanel function.
from __future__ import print_function
import requests
from multiprocessing.pool import ThreadPool
try:
list_data = ["https://demo.cpanel.net:2083|democom|DemoCoA5620",
"https://demo.cpanel.net:2083|UserDoesNotExist|WRONGPASSWORD",
]
except IOError:
pass
def cpanel(url):
try:
# try to split that url to get username / password
try:
url, username, password = url.split('|')
except Exception as e:
print("Url {} seems to have wrong format. Concrete error: {}".format(url, e))
return False
# build the correct url
url += '/login/?login_only=1'
# build post parameters
params = {'user': username,
'pass': password}
# make request
r = requests.post(url, params)
if r.status_code==200:
print("login for user {} success".format(username))
return True
else:
print("login for user {} failed due to Status Code {} and message \"{}\"".format(username, r.status_code, r.reason))
return False
except Exception as e:
print("Error occured for url {} ".format(e))
return False
def chekers(url):
return cpanel(url)
def Main():
try:
# start = timer()
pp = ThreadPool(1)
pr = pp.map(chekers, list_data)
print(pr)
# print('Time: ' + str(timer() - start) + ' seconds')
except:
pass
if __name__ == '__main__':
Main()
Output:
login for user democom success
login for user UserDoesNotExist failed due to Status Code 401 and message "Access Denied"
[True, False]
Be aware that I replaced your file read operation by some fixed urls.
Since you use request.post I guess you actually want to POST something to that urls. Your code does not do that. If you just want to send a request, use the requests.get method.
See the official documentation for the requests packet: https://2.python-requests.org/en/master/user/quickstart/#make-a-request for more details.
Also note that
"but it doesn't work"
is NOT a question.
I'm trying to create config.py in my libraries because I have in every script below code:
config = ""
try:
config = configparser.ConfigParser()
if (configFile is None) or (configFile == ""):
configReadArrayLength = len(config.read(pathABSofScript + 'config.ini'))
else:
configReadArrayLength = len(config.read(configFile))
if configReadArrayLength == 0:
print("Script could not find configuration file.")
exit(1)
except Exception as ex:
print("An exception of typ {0} occurred in config parsing function. Arguments:\n{1!r} exiting."
.format(type(ex).__name__, ex.args))
exit(1)
def get_config_value(value):
try:
return config['environment'][value]
except KeyError:
return None
except Exception as config_value_exception:
standard_logger_error(type(config_value_exception).__name__, config_value_exception.args)
And I don't want do duplicate code. So the steps which I have followed:
0) Structure looks like this:
/
scripts/
somescript.py
lib/
config.py
1) In my somescript.py I have put:
sys.path.insert(0, /home/user/some/path/lib')
import config
2) config.py content:
import configparser
config = ""
def create_config_instance(path, file_name):
try:
global config
config = configparser.ConfigParser()
configReadArrayLength = len(config.read(path + file_name))
if configReadArrayLength == 0:
raise IOError("Cannot read config file")
except Exception as ex:
raise Exception("An exception of typ {0} occurred in config parsing function. Arguments:\n{1!r} exiting.".format(type(ex).__name__, ex.args))
def get_config_value(value):
try:
global config
section = config['DEFAULT']['section_to_use']
# return config[section][value]
return "ELO"
except KeyError:
raise KeyError
except Exception as config_value_exception:
# return str(type(config_value_exception).__name__, config_value_exception.args)
raise Exception(config_value_exception)
3) Content of somescript.py
#!/usr/bin/python3.6
import unittest
import os
import sys
import platform
sys.path.insert(0, '/scripts/lib')
import logger
import config
pathABSofScript = ""
if platform.system() == "Linux":
pathABSofScript = str(os.path.realpath(__file__).rsplit('/', 1)[0]) + "/"
else:
print("Unknown linux")
exit(1)
# I'm creating instance of config.py object and passing there location of a file and file name
config_instance = config.create_config_instance(pathABSofScript, "config.ini")
# I'm trying to get value from config.ini
pathDBfile = config_instance.get_config_value("pathDBfile")
print(pathDBfile)
class TestDatabase(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
if __name__ == '__main__':
unittest.main()
And I'm facing this error:
File "./somescript.py", line 21, in <module>
pathDBfile = config_instance.get_config_value("pathDBfile")
AttributeError: 'NoneType' object has no attribute 'get_config_value'
Can someone give me a tip to resolve my problem?
I have server and console scripts which keeps on listening on port for console and server requests.
In UNIX environment I made both the server and console script as continuously running daemons which will keep them listening on port.
Is there any way way in windows to keep them running like daemon in UNIX ? I also want them to get up on reboot (should get auto started on reboot)
I read about windows services and followed code written here, but I am getting 404 error on my webpage
__version__ = "0.4"
__all__ = ["RequestHandler"]
import atexit
import BaseHTTPServer
import CGIHTTPServer
import copy
import os
import select
import SimpleHTTPServer
import sys
import time
import threading
import urllib
from signal import SIGTERM
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
have_fork = hasattr(os, 'fork')
have_popen2 = hasattr(os, 'popen2')
have_popen3 = hasattr(os, 'popen3')
rbufsize = 0
def do_POST(self):
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
splitpath = _url_collapse_path_split(self.path)
if splitpath[0] in self.cgi_directories:
self.cgi_info = splitpath
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
return executable(path)
def is_python(self, path):
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
path = self.path
dir, rest = self.cgi_info
i = path.find('/', len(dir) + 1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir) + 1)
else:
break
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if not ispy:
if not (self.have_fork or self.have_popen2 or self.have_popen3):
self.send_error(403, "CGI script is not a Python script (%r)" %
scriptname)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.getheader("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = base64.decodestring(authorization[1])
except binascii.Error:
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.getheader('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
env.setdefault(k, "")
os.environ.update(env)
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, os.environ)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
else:
# Non Unix - use subprocess
import subprocess
cmdline = [scriptfile]
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = [interp, '-u'] + cmdline
if '=' not in query:
cmdline.append(query)
self.log_message("command: %s", subprocess.list2cmdline(cmdline))
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
p = subprocess.Popen(cmdline,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
else:
data = None
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
stdout, stderr = p.communicate(data)
self.wfile.write(stdout)
if stderr:
self.log_error('%s', stderr)
status = p.returncode
if status:
self.log_error("CGI script exit status %#x", status)
else:
self.log_message("CGI script exited OK")
def _url_collapse_path_split(path):
path_parts = []
for part in path.split('/'):
if part == '.':
path_parts.append('')
else:
path_parts.append(part)
# Filter out blank non trailing parts before consuming the '..'.
path_parts = [part for part in path_parts[:-1] if part] + path_parts[-1:]
if path_parts:
tail_part = path_parts.pop()
else:
tail_part = ''
head_parts = []
for part in path_parts:
if part == '..':
head_parts.pop()
else:
head_parts.append(part)
if tail_part and tail_part == '..':
head_parts.pop()
tail_part = ''
return ('/' + '/'.join(head_parts), tail_part)
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return False
return st.st_mode & 0111 != 0
Handler = RequestHandler
PORT = 7998
ADDRESS = "0.0.0.0"
httpd = ThreadedHTTPServer((ADDRESS, PORT), Handler)
print "serving at %s:%s" % (ADDRESS, PORT)
import os
import SocketServer
import BaseHTTPServer
import SimpleHTTPServer
import xmlrpclib
import SimpleXMLRPCServer
import socket
import httplib
import inspect
import win32service
import win32serviceutil
import win32api
import win32con
import win32event
import win32evtlogutil
class XMLRPCServerService(win32serviceutil.ServiceFramework):
_svc_name_ = "XMLRPCServerService"
_svc_display_name_ = "XMLRPCServerService"
_svc_description_ = "Tests Python service framework by receiving and echoing messages over a named pipe"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
import servicemanager
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,servicemanager.PYS_SERVICE_STARTED,(self._svc_name_, ''))
self.timeout = 100
while 1:
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
if rc == win32event.WAIT_OBJECT_0:
servicemanager.LogInfoMsg("XMLRPCServerService - STOPPED")
break
else:
httpd.serve_forever()
servicemanager.LogInfoMsg("XMLRPCServerService - is alive and well")
def ctrlHandler(ctrlType):
return True
if __name__ == '__main__':
win32api.SetConsoleCtrlHandler(ctrlHandler, True)
win32serviceutil.HandleCommandLine(XMLRPCServerService)
Any clues where I am going wrong ? Or good way to implement it (May be w/o using service).
Strict Note:
Solution must be in Python 2.6 (Project requirements).
Updates:
I saw some weird thing in log:python service.py debug
127.0.0.1 - - [04/Apr/2014 09:41:04] command: C:\Python27\Lib\site-packages\win3
2\**pythonservice.exe** -u C:\CONSOLE-CGI\cgi-bin\login.py ""
Why is executing CGI script using pythonservice.exe?
What am I missing Here?
More updates:
Code snippet from daemon process python script
#Non Unix - use subprocess
import subprocess
cmdline = [scriptfile]
if self.is_python(scriptfile):
#interp = sys.executable // here it return pythonservice.exe
interp = "python.exe" // if I hardcode it to python.exe all goes fine
if interp.lower().endswith("w.exe"): #On Windows,use python.exe,not pythonw.exe
interp = interp[: -5] + interp[-4: ]
cmdline = [interp, '-u'] + cmdline
Any clues why is so??
You may need to redirect all the output since Windows scheduler has some issues doing this in pythonw case. Process does start properly, but no action being done and server does not respond without redirecting stdout and stderr.
import http.server
import socketserver
import sys
PORT = 1234
Handler = http.server.SimpleHTTPRequestHandler
if __name__ == '__main__':
sys.stdout = open('out.txt', 'w')
sys.stderr = open('err.txt', 'w')
with socketserver.TCPServer(("", PORT), Handler) as httpd:
print("serving at port %d" % PORT, flush=True)
httpd.serve_forever()
I am running a simple python script to log the accessed url using squid url_rewriter_program.
However every time it runs, rewriter crashes with broken pipe error at sys.stdout.flush().
Please suggest a specific solution.
python code is:
import sys
import os
import io
line = sys.stdin.readline()
fo=open("/home/linux/Desktop/foo1.txt","a")
fo.write(line)
fo.close()
sys.stdout.write("\n")
sys.stdout.flush()
This is a squid redirector file, written on python, can you get, or compare with your script:
Redirector:
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#-----------------------------------------------------------------------------
# Name: redirector_master.py
# Purpose: SiSCont checker cuote
#
# Author: Ernesto Licea Martin
#
# Created: 2011/11/24
# RCS-ID: $Id: redirector_master.py $
# Copyright: (c) 2011
# Licence: GPL
#-----------------------------------------------------------------------------
import sys, string, libSiSCont
__name__= "redirector_master"
query="SELECT accounts_proxyquotatype.name, accounts_proxyaccount.proxy_quota, accounts_proxyaccount.proxy_quota_extra, accounts_proxyaccount.proxy_active, accounts_proxyaccounttype.name FROM accounts_proxyaccount INNER JOIN accounts_proxyaccounttype INNER JOIN accounts_proxyquotatype ON (accounts_proxyaccount.proxy_quota_type_id = accounts_proxyquotatype.id) AND (accounts_proxyaccount.proxy_account_type_id = accounts_proxyaccounttype.id) WHERE accounts_proxyaccount.proxy_username=%s"
class RedirMaster:
def __init__(self):
obj = libSiSCont.ParceConf()
obj.parcecfgfile()
self.__listModules = obj.getModList()
self.__redirDicc = obj.getRedirectURL()
self.__penalURL = obj.getPenalizedURL()
self.__confDicc = obj.getConfParam()
self.__dbDicc = obj.getDBDicc()
self.__proxyDicc = obj.getProxyCacheParam()
self.__dbParam = []
def getProxyTypes(self):
db=libSiSCont.connectDB(dbDicc=self.__dbDicc)
c=db.cursor()
c.execute("SELECT accounts_proxy")
def run(self):
modules=[]
for mod in self.__listModules:
try:
m=__import__(mod)
modules.append(m)
except Exception, e:
libSiSCont.reportLogs("%s.run" %__name__, 'Unable to load redirector module %s; the error was: %s' % (mod,str(e)))
if len(modules) == 0:
libSiSCont.reportLogs("%s.run" %__name__, 'No usable redirector module found; switching to trasparent behavour')
while 1:
try:
data_redir=raw_input()
data_redir=data_redir.split()
url,ip_host,user,method,urlgroup = data_redir[0:5]
ip=ip_host.split("/")[0]
host_name=ip_host.split("/")[1]
uri = url
mode=""
#Don't check cache access
if string.find(url,"cache_object") == 0:
sys.stdout.write("%s%s\n" %(mode,uri))
sys.stdout.flush()
continue
db=libSiSCont.connectDB(dbDicc=self.__dbDicc)
c=db.cursor()
c.execute(query,user)
cuote_type,cuote, ext_cuote, active, acc_type = c.fetchall()[0]
self.__dbParam=[cuote_type,int(cuote), int(ext_cuote), active, acc_type]
for module in modules:
try:
uri = module.redir(url = url, ip = ip, host_name = host_name, user = user, method = method, urlgroup = urlgroup, redirDicc = self.__redirDicc, penalURL = self.__penalURL, confDicc = self.__confDicc, proxyDicc = self.__proxyDicc, dbParam = self.__dbParam)
except Exception, e:
libSiSCont.reportLogs("%s.run" %__name__, 'Error while running module: %s -- The error was: %s' % (module,str(e)))
if uri != url:
mode = "301:"
break
sys.stdout.write("%s%s\n" %(mode,uri))
sys.stdout.flush()
except Exception, e:
if not string.find('%s' % e,'EOF') >= 0:
sys.stdout.write('%s\n' % uri)
sys.stdout.flush()
libSiSCont.reportLogs("%s.run" %__name__, '%s: data received from parent: %s' % (str(e),string.join(data_redir)))
else:
sys.exit()
obj=RedirMaster()
obj.run()
Helper:
#!/usr/bin/env python
import sys, syslog, libSiSCont, string,crypt
__name__ = "Helper"
query = "SELECT accounts_proxyaccount.proxy_username FROM accounts_proxyaccount WHERE accounts_proxyaccount.proxy_username=%s AND accounts_proxyaccount.proxy_password=%s"
class BasicAuth:
def __init__(self):
obj = libSiSCont.ParceConf()
obj.parcecfgfile()
self.__dbDicc = obj.getDBDicc()
def run(self):
while 1:
try:
user_pass = string.split(raw_input())
user = user_pass[0].strip("\n")
passwd = user_pass[1].strip("\n")
crypt_passwd = crypt.crypt(passwd,user)
db = libSiSCont.connectDB(self.__dbDicc)
c = db.cursor()
c.execute(query,(user,crypt_passwd))
if c.fetchone() == None:
libSiSCont.reportLogs('%s.run' %__name__,'User Authentication Fail, user = %s password= %s, Access Denied' %(user,passwd) )
sys.stdout.write("ERR\n")
sys.stdout.flush()
else:
libSiSCont.reportLogs('%s.run' %__name__, 'User Authentication Success, user = %s, Access Granted' %user)
sys.stdout.write("OK\n")
sys.stdout.flush()
except Exception, e:
if not string.find("%s" %e, "EOF") >= 0:
sys.stdout.write("ERR\n")
sys.stdout.flush()
libSiSCont.reportLogs('%s.run' %__name__, 'Authenticator error, user will navigate without authentication: %s' %str(e))
else:
sys.exit()
obj = BasicAuth()
obj.run()
I hope help you ;-)