How do I kill SimpleHTTPServer from within a Python script? - python

I am trying to use http.server to test all the links in a Python project. I can get my script to work if I start the server before running my script, and then the server stops when I close the terminal window. But I'd really like the script itself to start and stop the server.
I made a test script to simply start the server, get a page and prove the server is running, and then stop the server. I can't seem to get the pid of the server. When I try to kill the pid that this script reports after the script runs, I get a message that there is no such process; but the server is still running.
How do I get the correct pid for the server, or more generally how do I stop the server from the script?
import os
import requests
from time import sleep
# Start server, in background.
print("Starting server...")
os.system('python -m http.server &')
# Make sure server has a chance to start before making request.
sleep(1)
print "Server pid: "
os.system('echo $$')
url = 'http://localhost:8000/index.html'
print("Testing request: ", url)
r = requests.get(url)
print("Status code: ", r.status_code)

Here is what I am doing:
import threading
try:
from http.server import HTTPServer, SimpleHTTPRequestHandler # Python 3
except ImportError:
from SimpleHTTPServer import BaseHTTPServer
HTTPServer = BaseHTTPServer.HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler # Python 2
server = HTTPServer(('localhost', 0), SimpleHTTPRequestHandler)
thread = threading.Thread(target = server.serve_forever)
thread.daemon = True
thread.start()
def fin():
server.shutdown()
print('server running on port {}'.format(server.server_port))
# here is your program
If you call fin in your program, then the server shuts down.

A slight modification to User's code above:
import threading
try:
from http.server import HTTPServer, BaseHTTPRequestHandler # Python 3
except ImportError:
import SimpleHTTPServer
from BaseHTTPServer import HTTPServer # Python 2
from SimpleHTTPServer import SimpleHTTPRequestHandler as BaseHTTPRequestHandler
server = HTTPServer(('localhost', 0), BaseHTTPRequestHandler)
thread = threading.Thread(target = server.serve_forever)
thread.deamon = True
def up():
thread.start()
print('starting server on port {}'.format(server.server_port))
def down():
server.shutdown()
print('stopping server on port {}'.format(server.server_port))

This is a closure solution to the problem. Works on python 3.
import os
import threading
import webbrowser
from http.server import HTTPServer, SimpleHTTPRequestHandler
def simple_http_server(host='localhost', port=4001, path='.'):
server = HTTPServer((host, port), SimpleHTTPRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.deamon = True
cwd = os.getcwd()
def start():
os.chdir(path)
thread.start()
webbrowser.open_new_tab('http://{}:{}'.format(host, port))
print('starting server on port {}'.format(server.server_port))
def stop():
os.chdir(cwd)
server.shutdown()
server.socket.close()
print('stopping server on port {}'.format(server.server_port))
return start, stop
simple_http_server which will return start and stop functions
>>> start, stop = simple_http_server(port=4005, path='/path/to/folder')
which you can use as
>>> start()
starting server on port 4005
127.0.0.1 - - [14/Aug/2016 17:49:31] "GET / HTTP/1.1" 200 -
>>> stop()
stopping server on port 4005

Here's a context-flavored version which I prefer because it cleans up automatically and you can specify the directory to serve:
from contextlib import contextmanager
from functools import partial
from http.server import SimpleHTTPRequestHandler, ThreadingHTTPServer
from threading import Thread
#contextmanager
def http_server(host: str, port: int, directory: str):
server = ThreadingHTTPServer(
(host, port), partial(SimpleHTTPRequestHandler, directory=directory)
)
server_thread = Thread(target=server.serve_forever, name="http_server")
server_thread.start()
try:
yield
finally:
server.shutdown()
server_thread.join()
def usage_example():
import time
with http_server("127.0.0.1", 8087, "."):
# now you can use the web server
time.sleep(100)

I got this to run, but I'm curious to hear how this compares to User's answer above. I came up with this after looking at the accepted answer here.
import subprocess
import requests
import os
import signal
from time import sleep
print "Starting server..."
cmd = 'python -m SimpleHTTPServer'
pro = subprocess.Popen(cmd, shell=True, preexec_fn=os.setsid)
# Make sure server has a chance to start before making request.
sleep(1)
url = 'http://localhost:8000/index.html'
print "Testing request: ", url
r = requests.get(url)
print "Status code: ", r.status_code
os.killpg(pro.pid, signal.SIGTERM)

My solution with browser opening:
File: http.py
import SimpleHTTPServer
import SocketServer
import threading
import webbrowser
import platform
from socket import SOL_SOCKET,SO_REUSEADDR
class HTTPServer():
def __init__(self,port=8000,url='http://localhost'):
self.port = port
self.thread = None
self.httpd = None
self.run = False
self.url = url
os = platform.system()
if os=='Linux':
self.browser_path = "/usr/bin/google-chrome %s"
elif os == 'Windows':
self.browser_path = "C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s"
else:
print("Chrome not found!")
def start(self):
self.run = True
self.httpd = SocketServer.TCPServer(("", self.port), SimpleHTTPServer.SimpleHTTPRequestHandler)
self.httpd.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.thread = threading.Thread(target = self._serve)
self.thread.start()
webbrowser.get(str(self.browser_path)).open(self.url+":"+str(self.port)+"/")
def _serve(self):
while self.run:
self.httpd.handle_request()
def stop(self):
self.run = False
self.httpd.server_close()
After, just run:
from http import HTTPServer
server = HTTPServer()
server.start()
raw_input("Enter to close")
server.stop()

In my opinion, what I did is:
go to activity monitor
then go to network
search for word 'python'
(only if you ran the cmd/terminal command in python, because I think the way to stop it in cmd/terminal is ctrl+c or just quit the cmd/terminal)
note: the pid is also there
quit the process 'python' (if it doesn't, use force quit process)
if this works in cmd/terminal, then you are done! enjoii~
Hope this helps :D!!

Using Python 3.8
import http.server
import socketserver
import threading
PORT = 8000
Handler = http.server.SimpleHTTPRequestHandler
server = socketserver.TCPServer(("", PORT), Handler)
thread = threading.Thread(target = server.serve_forever)
thread.daemon = True
thread.start()

Related

In python is the .start() command blocking for the time it takes the thread start to complete?

Hypothesis:
thread....start() blocks until start completes.
Question:
Is hypothesis True or False?
Start http web server then open browser has the following code.
import sys
import time
import threading
import webbrowser
from http.server import HTTPServer, SimpleHTTPRequestHandler
ip = "127.0.0.1"
port = 3600
url = f"http://{ip}:{port}"
def start_server():
server_address = (ip, port)
httpd = HTTPServer(server_address, SimpleHTTPRequestHandler)
httpd.serve_forever()
threading.Thread(target=start_server).start()
webbrowser.open_new(url)
while True: # make a blocker to prevent the application finish of execute:
try:
time.sleep(1)
except KeyboardInterrupt:
sys.exit(0)
This works fine. However, the following also works.
import sys
import time
import threading
import webbrowser
from http.server import HTTPServer, SimpleHTTPRequestHandler
ip = "127.0.0.1"
port = 3600
url = f"http://{ip}:{port}"
def start_server():
server_address = (ip, port)
httpd = HTTPServer(server_address, SimpleHTTPRequestHandler)
httpd.serve_forever()
threading.Thread(target=start_server).start()
webbrowser.open_new(url)
Hypothesis:
thread....start() actually blocks until start completes. So,webbrowser.open_new(url) does not execute until start completes. Thus making the following unnecessary.
while True: # make a blocker to prevent the application finish of execute:
try:
time.sleep(1)
except KeyboardInterrupt:
sys.exit(0)
I have not been able to prove or disprove the Hypothesis after extensive searching.
There is no blocking when calling Thread.start() in the way you suggest.
The call is blocking in the sense that a call is placed that initalizes the new-thread internal state, and a system call is made to start the actual OS Thread - but that should take less than 1ms. The function that is the target of the thread is only called on the new thread, and the main thread will continue to run, regardless of what takes place inside that function.
If you want your program not to end, there is no need to resort to a complicated pausing loop like the one you setup - just place a call to threading.join() instead. This will block until all other threads end running, and only them the threading calling join() will proceed.

Python HTTPServer and periodic tasks

I´m using HTTPServer to listen for incoming POST requests and serving them. All is working fine with that.
I need to add some periodic tasks in the script (every X seconds: do something). As the HTTP server takes full command after
def run(server_class=HTTPServer, handler_class=S, port=9999):
server_address = (ethernetIP, port)
httpd = server_class(server_address, handler_class)
httpd.serve_forever()
I guess if there´s any way to include a check for time.time() as part of:
class S(BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self._set_response()
self.wfile.write("GET request for {}".format(self.path).encode('utf-8'))
def do_POST(self):
# my stuff here
Any ideas are welcome. Thanks!
Thanks to #rdas for pointing me to the separate thread solution. I tried schedule but it didn´t work with the HTTP server, because I can´t tell the script to run the pending jobs.
I tried with threading, running my periodic task as deamon.. and it worked! Here´s the code structure:
import time
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
polTime = 60 # how often we touch the file
polFile = "myfile.abc"
# this is the deamon thread
def polUpdate():
while True:
thisSecond = int(time.time())
if thisSecond % polTime == 0: # every X seconds
f = open(polFile,"w")
f.close() # touch and close
time.sleep(1) # avoid loopbacks
return "should never come this way"
# here´s the http server starter
def run(server_class=HTTPServer, handler_class=S, port=9999):
server_address = (ethernetIP, port)
httpd = server_class(server_address, handler_class)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
sys.exit(1)
# init the thread as deamon
d = threading.Thread(target=polUpdate, name='Daemon')
d.setDaemon(True)
d.start()
# runs the HTTP server
run(port=conf_port)
The HTTP server doesn´t block the thread, so it works great.
By the way, I´m using the file 'touching' as proof of life for the process.

How can I set a timeout in Python HTTPServer

I have a Python (2.7.13) HTTP Server running in Debian, I want to stop any GET request that takes longer than 10 seconds, but can't find a solution anywhere.
I already tried all the snippets posted in the following question: How to implement Timeout in BaseHTTPServer.BaseHTTPRequestHandler Python
#!/usr/bin/env python
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import os
class handlr(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text-html')
self.end_headers()
self.wfile.write(os.popen('sleep 20 & echo "this took 20 seconds"').read())
def run():
server_address = ('127.0.0.1', 8080)
httpd = HTTPServer(server_address, handlr)
httpd.serve_forever()
if __name__ == '__main__':
run()
As a test, I'm running a shell command that takes 20 seconds to execute, so I need the server stop before that.
Put your operation on a background thread, and then wait for your background thread to finish. There isn't a general-purpose safe way to abort threads, so this implementation unfortunately leaves the function running in the background even though it had already given up.
If you can, you might consider putting a proxy server (like say nginx) in front of your server and let it handle timeouts for you, or perhaps use a more robust HTTP server implementation that allows this as a configuration option. But the answer below should basically cover it.
#!/usr/bin/env python
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import os
import threading
class handlr(BaseHTTPRequestHandler):
def do_GET(self):
result, succeeded = run_with_timeout(lambda: os.popen('sleep 20 & echo "this took 20 seconds"').read(), timeout=3)
if succeeded:
self.send_response(200)
self.send_header('Content-type','text-html')
self.end_headers()
self.wfile.write(os.popen('sleep 20 & echo "this took 20 seconds"').read())
else:
self.send_response(500)
self.send_header('Content-type','text-html')
self.end_headers()
self.wfile.write('<html><head></head><body>Sad panda</body></html>')
self.wfile.close()
def run():
server_address = ('127.0.0.1', 8080)
httpd = HTTPServer(server_address, handlr)
httpd.serve_forever()
def run_with_timeout(fn, timeout):
lock = threading.Lock()
result = [None, False]
def run_callback():
r = fn()
with lock:
result[0] = r
result[1] = True
t = threading.Thread(target=run_callback)
t.daemon = True
t.start()
t.join(timeout)
with lock:
return tuple(result)
if __name__ == '__main__':
run()

RPyC serve_all blocking threads access to connection

When in a multithreaded application i call rpyc.Connection.serve_all() from a thread other threads are unable to immediately use the connection.
I think that serve_all is blocking the connection and the other threads are able to access it only when there is some timeout.
This code should reproduce the issue
Server:
#!/usr/bin/env python3
import rpyc
import rpyc.utils.server
import threading
class Service(rpyc.Service):
def on_connect(self):
print("New connection")
def on_disconnect(self):
print("Connection closed")
def exposed_get_status(self):
return "Test string"
server = rpyc.utils.server.ThreadedServer(Service, port = 12345)
t = threading.Thread(target = server.start)
t.daemon = True
t.start()
t.join()
Client:
#!/usr/bin/env python3
import rpyc
import threading
import datetime
con = rpyc.connect('localhost',12345)
def delayed():
print("Time is up")
now = datetime.datetime.now()
print(con.root.get_status())
print(str(datetime.datetime.now() - now))
timer = threading.Timer(10,delayed)
print("Starting timer")
timer.start()
print("serving all")
con.serve_all()
Sample output (from the client):
$ python3 testrpyc.py
Starting timer
serving all
Time is up
Test string
0:01:30.064087
I'm using RPyC 3.4.3 on Python 3.5.4rc1 (debian sid) installed with pip.
I think I'm misusing serve_all, but I can't find anything in the docs.
(Answering myself)
I opened a issue on github and it seems that this is normal. The solution is to use perform IO only on a single thread for any resource.

Python multi threading HTTP server not working

I am trying to create multi threaded web server in python, but the requests are handled one by one. After searching few hours, I found this link but the approved answer seems to be incorrect as the request over there is also handled one by one.
Here is the code:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
import threading
from time import sleep
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
sleep(5)
message = threading.currentThread().getName()
self.wfile.write(message)
self.wfile.write('\n')
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
if __name__ == '__main__':
server = ThreadedHTTPServer(('localhost', 8080), Handler)
print 'Starting server, use <Ctrl-C> to stop'
server.serve_forever()
I added "sleep(5)" for 5 second delay to handle the request. After that I send multiple requests but all the requests are handled one by one and each request took 5 seconds. I am unable to find the reason. Help me.
The key requirement here is to be able to have a 5-second delay between the send_response and the data returned. That means you need streaming; you can't use ThreadingMixIn, gunicorn, or any other such hack.
You need something like this:
import time, socket, threading
sock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostname()
port = 8000
sock.bind((host, port))
sock.listen(1)
HTTP = "HTTP/1.1 200 OK\nContent-Type: text/html; charset=UTF-8\n\n"
class Listener(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True # stop Python from biting ctrl-C
self.start()
def run(self):
conn, addr = sock.accept()
conn.send(HTTP)
# serve up an infinite stream
i = 0
while True:
conn.send("%i " % i)
time.sleep(0.1)
i += 1
[Listener() for i in range(100)]
time.sleep(9e9)

Categories

Resources