I've create a gRPC server using proto3 and python to do basic health checking on a long running daemon. When I start my application though, it doesn't actually start the gRPC server. I was wondering if anyone could help identify why it doesn't start and serve the gRPC API
Proto Definition: health.proto
syntax = "proto3";
option java_multiple_files = true;
option java_package = "com.redacted.example.worker";
option java_outer_classname = "ExampleWorker";
option objc_class_prefix = "DSW";
package exampleworker;
service Worker {
rpc Health (Ping) returns (Pong) {}
}
// The request message containing PONG
message Ping {
string message = 1;
}
// The response message containing PONG
message Pong {
string message = 1;
}
Then I generated the python code using:
python -m grpc_tools.protoc -I=../protos --python_out=. --grpc_python_out=. ../protos/health.proto
This generated the health_pb2.py and health_pb2_grpc.py files. I next created a server file:
u"""Health server is used to create a new health monitoring GRPC server."""
from concurrent import futures
import logging
import grpc
import health_pb2
import health_pb2_grpc
# grpc related variables
grpc_host = u'[::]'
grpc_port = u'50001'
grpc_address = u'{host}:{port}'.format(host=grpc_host, port=grpc_port)
# logging related variables
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class WorkerServicer(health_pb2_grpc.WorkerServicer):
u"""Provides methods that implement functionality of health server."""
def Health(self, request, context):
u"""Return PONG to say the Worker is alive."""
return health_pb2.Pong(message='PONG')
def serve_health_api():
u"""Create and start the GRPC server."""
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
health_pb2_grpc.add_WorkerServicer_to_server(WorkerServicer(), server)
logging.info(u'adding port {grpc_address}'.format(
grpc_address=grpc_address))
server.add_insecure_port(grpc_address)
server.start()
Then in my main run.py file:
#!mac/bin/python
"""Run is the local (non-wsgi) entrypoint to the flask application."""
from subscriber.worker import Worker
from redis import StrictRedis
from examplegrpc.health_server import serve_health_api
import logging
redis_host = 'localhost'
redis_port = 6379
redis_db = 0
redis_chan = 'deployment'
if __name__ == "__main__":
FORMAT = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.debug('Creating redis client')
client = StrictRedis(host=redis_host, port=redis_port, db=redis_db)
w = Worker(client, [redis_chan])
try:
logger.info('Starting Health gRPC API...')
serve_health_api()
logger.info('Starting worker...')
w.run()
except KeyboardInterrupt:
logger.info('Exiting...')
The w.run() starts correctly to execute work off of a redis channel, but the gRPC server does not start as trying to access it with
channel = grpc.insecure_channel('localhost:{port}'.format(port=grpc_port))
stub = WorkerStub(channel)
ping = examplegrpc.health_pb2.Ping(message='PING')
health = stub.Health(ping)
starts
_Rendezvous: <_Rendezvous of RPC that terminated with (StatusCode.UNAVAILABLE, Connect Failed)>
It looks to me like what's happening is that your server is only assigned to a local field in your serve_health_api function, so when that function returns (immediately after having started the server) the server is garbage-collected and shut down.
Related
I have some weird behaviour with an API I am currently developing with PyCharm.
I already developed many REST-API with Python FastAPI successfully. But this time, I am facing some very weird behaviour of my application.
To test locally, I added in the main.py script the following line, which works fine for every other application I ever developed:
if __name__ == "__main__":
logger.info("starting server with dev configuration")
uvicorn.run(app, host="127.0.0.1", port=5000)
The code runs when I set a breakpoint on line uvicorn.run(...) and then resume from there.
The code does also run, when I build the docker image and then run it on docker when I use the following dockerfile:
FROM python:3.10-slim
...
EXPOSE 80
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80", "--workers", "1"]
But when I run normally, uvicorn does not seem to boot as no messages are logged like I would expect:
INFO: Started server process [6828]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:5000 (Press CTRL+C to quit)
The problem occurred, after I added Azure application insights handler.
Configuration of application insights:
import logging
from fastapi import Request
from opencensus.ext.azure.trace_exporter import AzureExporter
from opencensus.trace import attributes_helper, samplers
from opencensus.trace.span import SpanKind
from opencensus.trace.tracer import Tracer
from starlette.types import ASGIApp
HTTP_HOST = attributes_helper.COMMON_ATTRIBUTES["HTTP_HOST"]
HTTP_METHOD = attributes_helper.COMMON_ATTRIBUTES["HTTP_METHOD"]
HTTP_PATH = attributes_helper.COMMON_ATTRIBUTES["HTTP_PATH"]
HTTP_ROUTE = attributes_helper.COMMON_ATTRIBUTES["HTTP_ROUTE"]
HTTP_URL = attributes_helper.COMMON_ATTRIBUTES["HTTP_URL"]
HTTP_STATUS_CODE = attributes_helper.COMMON_ATTRIBUTES["HTTP_STATUS_CODE"]
STACKTRACE = attributes_helper.COMMON_ATTRIBUTES["STACKTRACE"]
_logger = logging.getLogger(__name__)
class AzureApplicationInsightsMiddleware:
def __init__(self, app: ASGIApp, azure_exporter: AzureExporter) -> None:
self._app = app
self._sampler = samplers.AlwaysOnSampler()
self._exporter = azure_exporter
async def __call__(self, request: Request, call_next):
tracer = Tracer(exporter=self._exporter, sampler=self._sampler)
with tracer.span("main") as span:
span.span_kind = SpanKind.SERVER
response = await call_next(request)
# does not seem to return a response
span.name = "[{}]{}".format(request.method, request.url)
tracer.add_attribute_to_current_span(HTTP_HOST, request.url.hostname)
tracer.add_attribute_to_current_span(HTTP_METHOD, request.method)
tracer.add_attribute_to_current_span(HTTP_PATH, request.url.path)
tracer.add_attribute_to_current_span(HTTP_URL, str(request.url))
try:
tracer.add_attribute_to_current_span(HTTP_STATUS_CODE, response.status_code)
except Exception:
tracer.add_attribute_to_current_span(HTTP_STATUS_CODE, "500")
return response
Added as middleware:
from fastapi import FastAPI
from opencensus.ext.azure.trace_exporter import AzureExporter
from app.startup.middleware.application_insights import AzureApplicationInsightsMiddleware
from app.startup.middleware.cors import add_cors
from config import app_config
def create_middleware(app: FastAPI) -> FastAPI:
if app_config.appinsights_instrumentation_key is not None:
azure_exporter = AzureExporter(connection_string=app_config.appinsights_instrumentation_key)
app.middleware("http")(AzureApplicationInsightsMiddleware(app=app, azure_exporter=azure_exporter))
app = add_cors(app)
return app
Added handler to logger:
import logging
import sys
from logging import StreamHandler
from opencensus.ext.azure.log_exporter import AzureLogHandler
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(fmt="%(asctime)s %(levelname)-8s %(name)-15s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
handler = StreamHandler(stream=sys.stdout)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
handler = AzureLogHandler(connection_string=connection_string)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
I use the standard template, which works for all my other application but this time obviously it does not. I wonder, there might be an issue with maybe some of the installed modules I am using in requirements.txt:
fastapi
uvicorn
azure-servicebus
pymongo[srv]
pandas
numpy
jinja2
svgwrite
matplotlib
Has anyone faced the same problem yet?
changed host to localhost --> uvicorn server still not booting
I am very new to wsdl, asmx, client, zeep and other topics related to SOAP APIs, and I have to fix a system of "REST, SOAP APIs - service", made in Python by others.
Currently I am working only on my local machine. This is the main context:
In my project main folder myservice there are:
a service made with Tornado, myservice > myserviceSrv.py, and
a set of APIs whose logic is split into two scripts:
myservice > handlers > myservice_api.py: containing classes inerithing from tornado.web.RequestHandler, which contain posts methods calling the SOAP APIs in the externalcompany_myservice_api.py script.
myservice > handlers > externalcompany_myservice_api.py: containing a single class which contains the SOAP APIs methods that will call the externalcompany service SOAP APIs.
One of these methods is ping, and my aim is to make it work, that is, I want it to get the correct response when I launch a post with Postman.
In particular, the POST request from Postman should trigger the ping method of my service that should trigger the ping SOAP API that should trigger the corresponding SOAP API of the externalcompany service, which should answer back and in the end give me the post response.
A module containing some configurations myservice > config.py, mainly the ones bound to tornado.options, the wsdl files path, and the APIs urls.
This is the problem:
I start my server(/service) and then I lauch the POST with Postman to http://localhost:30205/service/ping (with no inputs), but this is the log and traceback that I get:
[I 2021-11-15 10:37:20.964 myserviceSrv:76] **************************************************
[I 2021-11-15 10:37:20.964 myserviceSrv:77] Starting externalcompany service 2021-11-15 10:37:20 on port 30205 for destination externalcompany
[I 2021-11-15 10:37:20.964 myserviceSrv:78] **************************************************
[I 2021-11-15 10:33:21.354 myserver_api:154] S - post Ping Handler
[I 2021-11-15 10:33:21.354 myserver_api:158] Destination Ping Handler: externalcompany
[I 2021-11-15 10:33:21.354 externalcompany_myserver_api:23] S - initialise wsdl
[W 2021-11-15 10:33:22.833 soap:218] Forcing soap:address location to HTTPS
[W 2021-11-15 10:33:22.833 soap:218] Forcing soap:address location to HTTPS
[I 2021-11-15 10:33:22.834 externalcompany_myserver_api:26] Created wsdl externalcompany connection
[I 2021-11-15 10:33:22.834 externalcompany_myserver_api:27] E - initialise wsdl
[E 2021-11-15 10:35:33.348 externalcompany_myserver_api:216] Exception error ping: HTTPSConnectionPool(host='10.90.XXX.YY', port=8080): Max retries exceeded with url: /WebServices/Api/SalesApi.asmx (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7f3a5dfc4cd0>: Failed to establish a new connection: [Errno 110] Connection timed out'))
[I 2021-11-15 10:35:33.348 web:2239] 200 POST /service/ping (::1) 131994.51ms
and here are my questions:
I don't understand where that IP address 10.90.XXX.YY comes from. I can't find a reference to it into my entire project folder.
It looks like the code manages to initialize the wsdl, but then it cannot establish a connection. But why? What am I doing wrong, and how can I fix it?
Here is the content of:
myservice_api.py:
import tornado.web
from tornado import gen
import json
import logging
import config as config
class Ping(tornado.web.RequestHandler):
def initialize(self, **kwargs):
self.destination = kwargs["destination"]
#gen.coroutine
def post(self):
logging.info('S - post Ping Handler')
response = {}
# Data received
logging.info("Destination Ping Handler: {}".format(self.destination))
# Init module with correct destination - start specific method ping()
try:
# calls the class associated to key "destination" -> the class ("externalcompanymyserviceApi") is initialized
# init_wsdl is passed the wsdl sales file for settings
destination_object_init = config.destination_schema[self.destination]()
response = yield destination_object_init.ping()
except Exception as e:
logging.error("Error Ping Handler: {}".format(e))
raise gen.Return(self.write(response))
externalcompany_myservice_api.py:
import logging
import config as config
import json
import os
from tornado import gen
from zeep import Client, Settings, helpers
from zeep.exceptions import Fault, Error
from utils import utilities as utils
class externalcompanymyserviceApi:
def __init__(self):
# Init wsdl object for all methods in class externalcompany - utilities object
self.wsdl_object_sales = self.init_wsdl(config.WSDL_SALES)
# wsdl object
#staticmethod
def init_wsdl(type_wsdl):
logging.info("S - initialise wsdl")
settings = Settings(strict=False, xml_huge_tree=True)
wsdl_externalcompany = Client(wsdl=type_wsdl, settings=settings)
logging.info("Created wsdl externalcompany connection")
logging.info("E - initialise wsdl")
return wsdl_externalcompany
config.py
from tornado.options import define, parse_command_line
import logging
from handlers.externalcompany_myservice_api import externalcompanymyserviceApi
# LOGGING
logging.basicConfig(format='[%(levelname)1.1s %(asctime)s.%(msecs)03d %(module)s:%(lineno)d] %(message)s',
datefmt='%F %T')
# OPTIONS
# default: ip address of externalcompany (alias of externalcompany)
define("destination", default="externalcompany", help="destination of service", type=str)
# default: port of cash service intermediating with externalcompany
define("http_port", default=30205, help="run on the given port", type=int)
parse_command_line()
# SERVICE SETTINGS
DEBUG = True
######
# WSDL
######
# links to externalcompany test server
WSDL_PRODUCTS = "https://externalcompanyapi.vendorcompany.com/webservices/productservice_v1.asmx?WSDL"
WSDL_SALES = "https://externalcompanyapi.vendorcompany.com/WebServices/Api/SalesApi.asmx?WSDL"
# HANDLERS
PING = r"/service/ping"
# ...
destination_schema = {
"externalcompany": externalcompanymyserviceApi,
"John": "init class John"
}
myserviceSrv.py:
import config as cf
from config import PING
import logging
import tornado.web
from datetime import datetime
from tornado.log import enable_pretty_logging
from tornado.options import options
from tornado.ioloop import IOLoop
from tornado import httpserver
from handlers.myservice_api import Ping
#...
enable_pretty_logging()
class Destination_Service_Srv:
def __init__(self):
app = self.make_app()
self.http_server = self.make_server(app)
#staticmethod
def make_app():
settings = {
"debug": cf.DEBUG
}
return tornado.web.Application([
# ...
(PING, Ping, {"destination": options.destination})
], **settings)
#staticmethod
def make_server(app):
http_server = httpserver.HTTPServer(app, decompress_request=True)
http_server.listen(options.http_port)
return http_server
def start(self):
io_loop = IOLoop.current()
io_loop.start()
def stop(self):
io_loop = IOLoop.current()
io_loop.stop()
if __name__ == "__main__":
today = datetime.today().strftime("%Y-%m-%d %H:%M:%S")
myservice_server = Destination_Service_Srv()
try:
logging.info('**************************************************')
logging.info('Starting myservice service {} on port {} for destination {}'.format(today, options.http_port,
options.destination))
logging.info('**************************************************')
myservice_server.start()
except KeyboardInterrupt:
myservice_server.stop()
SOLVED
Turns out that the URLs stored inside variables WSDL_PRODUCTS and WSDL_SALES are wrong, because I can't access the corresponding wsdl files from my browser.
So I found out that the correct URLs are HTTP:// and not HTTPS://
WSDL_PRODUCTS = "http://externalcompanyapi.vendorcompany.com/webservices/productservice_v1.asmx?WSDL"
WSDL_SALES = "http://externalcompanyapi.vendorcompany.com/WebServices/Api/SalesApi.asmx?WSDL"
Now my post gets the corrrect response:
{"ping": {"MessageNo": 1001, "MessageText": "Ping Response", "MessageInfo": "Response from 'START' on '16/11/2021
10:20:31'", "IsSuccess": true, "Content": null}}
So, to answer my own questions:
The IP address 10.90.XXX.YY comes from the wsdl file whose URL is saved in WSDL_SALES.
If I get it right, zeep initialized the wsdl, but it did it following the HTTPS protocol, so that the traceback indicated
[W 2021-11-15 10:33:22.833 soap:218] Forcing soap:address location to HTTPS
[W 2021-11-15 10:33:22.833 soap:218] Forcing soap:address location to HTTPS
and so the connection was configured for the invalid wsdl URL.
This was the line failing
zeep_response = helpers.serialize_object(self.wsdl_object_sales.service.Ping())
because the POST could not get a response, so the zeep serializer could not process an empty response.
Hello fellow developers,
I'm actually trying to create a small webapp that would allow me to monitor multiple binance accounts from a dashboard and maybe in the futur perform some small automatic trading actions.
My frontend is implemented with Vue+quasar and my backend server is based on python Flask for the REST api.
What I would like to do is being able to start a background process dynamically when a specific endpoint of my server is called. Once this process is started on the server, I would like it to communicate via websocket with my Vue client.
Right now I can spawn the worker and create the websocket communication, but somehow, I can't figure out how to make all the threads in my worker to work all together. Let me get a bit more specific:
Once my worker is started, I'm trying to create at least two threads. One is the infinite loop allowing me to automate some small actions and the other one is the flask-socketio server that will handle the sockets connections. Here is the code of that worker :
customWorker.py
import time
from flask import Flask
from flask_socketio import SocketIO, send, emit
import threading
import json
import eventlet
# custom class allowing me to communicate with my mongoDD
from db_wrap import DbWrap
from binance.client import Client
from binance.exceptions import BinanceAPIException, BinanceWithdrawException, BinanceRequestException
from binance.websockets import BinanceSocketManager
def process_message(msg):
print('got a websocket message')
print(msg)
class customWorker:
def __init__(self, workerId, sleepTime, dbWrap):
self.workerId = workerId
self.sleepTime = sleepTime
self.socketio = None
self.dbWrap = DbWrap()
# this retrieves worker configuration from database
self.config = json.loads(self.dbWrap.get_worker(workerId))
keys = self.dbWrap.get_worker_keys(workerId)
self.binanceClient = Client(keys['apiKey'], keys['apiSecret'])
def handle_message(self, data):
print ('My PID is {} and I received {}'.format(os.getpid(), data))
send(os.getpid())
def init_websocket_server(self):
app = Flask(__name__)
socketio = SocketIO(app, async_mode='eventlet', logger=True, engineio_logger=True, cors_allowed_origins="*")
eventlet.monkey_patch()
socketio.on_event('message', self.handle_message)
self.socketio = socketio
self.app = app
def launch_main_thread(self):
while True:
print('My PID is {} and workerId {}'
.format(os.getpid(), self.workerId))
if self.socketio is not None:
info = self.binanceClient.get_account()
self.socketio.emit('my_account', info, namespace='/')
def launch_worker(self):
self.init_websocket_server()
self.socketio.start_background_task(self.launch_main_thread)
self.socketio.run(self.app, host="127.0.0.1", port=8001, debug=True, use_reloader=False)
Once the REST endpoint is called, the worker is spawned by calling birth_worker() method of "Broker" object available within my server :
from custom_worker import customWorker
#...
def create_worker(self, workerid, sleepTime, dbWrap):
worker = customWorker(workerid, sleepTime, dbWrap)
worker.launch_worker()
def birth_worker(workerid, 5, dbwrap):
p = Process(target=self.create_worker, args=(workerid,10, botPipe, dbWrap))
p.start()
So when this is done, the worker is launched in a separate process that successfully creates threads and listens for socket connection. But my problem is that I can't use my binanceClient in my main thread. I think that it is using threads and the fact that I use eventlet and in particular the monkey_patch() function breaks it. When I try to call the binanceClient.get_account() method I get an error AttributeError: module 'select' has no attribute 'poll'
I'm pretty sure about that it comes from monkey_patch because if I use it in the init() method of my worker (before patching) it works and I can get the account info. So I guess there is a conflict here that I've been trying to resolve unsuccessfully.
I've tried using only the thread mode for my socket.io app by using async_mode=threading but then, my flask-socketio app won't start and listen for sockets as the line self.socketio.run(self.app, host="127.0.0.1", port=8001, debug=True, use_reloader=False) blocks everything
I'm pretty sure I have an architecture problem here and that I shouldn't start my app by launching socketio.run. I've been unable to start it with gunicorn for example because I need it to be dynamic and call it from my python scripts. I've been struggling to find the proper way to do this and that's why I'm here today.
Could someone please give me a hint on how is this supposed to be achieved ? How can I dynamically spawn a subprocess that will manage a socket server thread, an infinite loop thread and connections with binanceClient ? I've been roaming stack overflow without success, every advice is welcome, even an architecture reforge.
Here is my environnement:
Manjaro Linux 21.0.1
pip-chill:
eventlet==0.30.2
flask-cors==3.0.10
flask-socketio==5.0.1
pillow==8.2.0
pymongo==3.11.3
python-binance==0.7.11
websockets==8.1
i cache the problem, sense of this problem is bot, which doesn't work without arg threaded, anyone have a solution for this problem?
Server Linux Ubuntu 18.04 / centos 8 (both doesn't work) on first clear uwsgi on second uwsgi + nginx, and bot work with threaded, but without not.
I want achieve that my bot can work in threaded mode, i don't have errors and others, if i start my bot without parametr threaded (default it true)
bot = telebot.TeleBot('my_token')
The string which i demonstrate under (i mark it <----):
#app.route(WEBHOOK_URL_PATH, methods=['POST'])
def webhook():
if flask.request.headers.get('content-type') == 'application/json':
json_string = flask.request.get_data().decode('utf-8')
update = telebot.types.Update.de_json(json_string)
bot.process_new_updates([update,]) # <---- THIS STRING
bot.send_message(update.message.from_user.id, update.message.text)
return ''
else:
flask.abort(403)
Doesn't work, and from this all of my other code doesn't work, because it's method let start the query to handle of all my others handlers, but this not happening because threaded is True, if i set this parametr in false it's work, but it's work not so good and I want so help to solve this problem.
I finded description(github forum) why it's not work, but my host VDSINA and i same have one thread, maybe it's a problem?
My uwsgi configuration:
[uwsgi]
socket = 127.0.0.1:9090
#shared-socket = :9090
#https = =0,webhook_cert.pem,webhook_pkey.pem
wsgi-file = foobar.py
callable = app
master = true
processes = 4
threads = 2
enable-threads = true
single-interpreter = true
But how I change this and how many time, I don't get the solve.
All code of my bot:
from flask import Flask
import flask
from time import sleep
import logging
import telebot
app = Flask(__name__)
#app.route('/')
def index():
print("simple output")
return "<span style='color:red'>I am app 1</span>"
bot = telebot.TeleBot('my_tok')
logger = telebot.logger
telebot.logger.setLevel(logging.INFO)
API_TOKEN = bot.token
WEBHOOK_HOST = 'ip_my_serve'
# nginx # WEBHOOK_PORT = 8443 # 443, 80, 88 or 8443 (port need to be 'open')
WEBHOOK_LISTEN = '0.0.0.0' # In some VPS you may need to put here the IP addr
WEBHOOK_SSL_CERT = '/etc/ssl/certs/nginx-selfsigned.crt' # Path to the ssl certificate
WEBHOOK_SSL_PRIV = '/etc/ssl/private/nginx-selfsigned.key' # Path to the ssl private key
WEBHOOK_URL_BASE = "https://%s" % (WEBHOOK_HOST)
WEBHOOK_URL_PATH = "/%s/" % (API_TOKEN)
bot.remove_webhook()
sleep(1)
bot.set_webhook(url=WEBHOOK_URL_BASE + WEBHOOK_URL_PATH,
certificate=open(WEBHOOK_SSL_CERT, 'r'))
#app.route(WEBHOOK_URL_PATH, methods=['POST'])
def webhook():
if flask.request.headers.get('content-type') == 'application/json':
json_string = flask.request.get_data().decode('utf-8')
update = telebot.types.Update.de_json(json_string)
bot.process_new_updates([update,])
bot.send_message(update.message.from_user.id, update.message.text)
return ''
else:
flask.abort(403)
#bot.message_handler(func=lambda message: True, content_types=['text'])
def echo_message(message):
bot.reply_to(message, message.text)
A solve for this answer it's change uwsgi on cherrypy, and it's my solution.
I'm working on my first iOS app that uses push notifications. I have a python script that lets me to send push notifications from my machine but I'm unable to get this working with the Google App Engine Launcher.
When I run this on GAE I get nothing - no errors and no push notifications. What am I doing wrong? I know the code for sending the actual notification is working properly but I'm not able to duplicate this on Google's servers.
Here is the script I'm trying to run with GAE Launcher:
import os
import cgi
import webapp2
from google.appengine.ext.webapp.util import run_wsgi_app
import ssl
import json
import socket
import struct
import binascii
TOKEN = 'my_app_token'
PAYLOAD = {'aps': {'alert':'Push!','sound':'default'}}
class APNStest(webapp2.RequestHandler):
def send_push(token, payload):
# Your certificate file
cert = 'ck.pem'
# APNS development server
apns_address = ('gateway.sandbox.push.apple.com', 2195)
# Use a socket to connect to APNS over SSL
s = socket.socket()
sock = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_SSLv3, certfile=cert)
sock.connect(apns_address)
# Generate a notification packet
token = binascii.unhexlify(token)
fmt = '!cH32sH{0:d}s'.format(len(payload))
cmd = '\x00'
message = struct.pack(fmt, cmd, len(token), token, len(payload), payload)
sock.write(message)
sock.close()
send_push(TOKEN, json.dumps(PAYLOAD))
application = webapp2.WSGIApplication([
('/apns', APNStest)
], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
So the solution was very simple as I expected. I had enabled billing for the project on cloud.google.com but needed to have billing enabled at appengine.google.com as well. Stupid mistake that set me back 2 days.