Python: How to pass header and data into gRPC API code - python

Hi folks I am new to gRPCs. I am currently building a client that can send requests to the server. I have hit a bit of a roadblock and cant figure out how to continue. I need to code into python the following gRPCurl call:
grpcurl -H "Authorization: Bearer aAbcdEefgHh123poIjuYHtgbNmCxxx" -H "x-team-mesh: TEST" -d '{"context":{"project_id":"this-is-my-project-id"}}' api.app.co.com:443 team.app.infrastructure.v3.App.envs_pb2_grpc.ListEnvs ../protobuf/app-api/team/app/infrastructure/v3/envs.proto
I have looked at examples such as these but they are not quite the same structure as my code. I cannot figure out how to pass in the header and data body when instantiating a channel on the client side.
How and where do I place the -H (header) and -d (body?) parts of the gRPCurl call?
import grpc
import team.app.infrastructure.v3.branches_pb2 as pb2
import team.app.infrastructure.v3.branches_pb2_grpc as pb2_grpc
import google.api
import sys
from okta.token_auth import OktaToken
class AppAuth(grpc.AuthMetadataPlugin):
def __init__(self, key):
self._key = key
def __call__(self, context, callback):
callback((('rpc-auth-header', self._key),), None)
class AppClient(object):
def __init__(self):
self.server = 'api.app.co.com:443'
with open('auth/certs/DigiCertClientRSA4096RootG5.crt.pem', 'rb') as fh:
rootCert = fh.read()
new_token = OktaToken()
metadata = []
metadata.append(('context', 'project_id', 'this-is-my-project-id'))
metadata.append(('x-team-mesh', 'TEST'))
metadata.append(('authorization', 'Bearer ' + new_token.okta_token_request()))
self.channel = grpc.secure_channel('{}'.format(self.server),
grpc.composite_channel_credentials(
grpc.ssl_channel_credentials(),
grpc.ssl_channel_credentials(rootCert),
grpc.metadata_call_credentials(
BitcAuth(new_token.okta_token_request())
)
)
)
try:
grpc.channel_ready_future(self.channel).result(timeout=3)
except grpc.FutureTimeoutError:
sys.exit('Error connecting to server')
else:
self.stub = pb2_grpc.EnvsStub(self.channel)
def get_envs(self):
request = pb2.ListEnvsRequest()
response = self.stub.ListEnvs(request)
print(f'{response}')
return response
if __name__ == '__main__':
client = AppClient()
result = client.get_envs()
I suspect that this line may be where to make the necessary changes based on what I have read so far but all my attempts to add metadata and payload(?) have been unsuccessful: response = self.stub.ListEnvs(request).
Since the curl call works fine, and my python code does not Im pretty sure I am not passing on the right pieces in the gRPC API call:
custom error:
debug_error_string = "UNKNOWN:Error received from peer ipv4:127.0.0.1:443 {grpc_message:"RBAC: access denied", grpc_status:7, created_time:"2022-09-12T17:59:22.654641-07:00"}"
Help is greatly appreciated

Related

Trying to send Python HTTPConnection content after accepting 100-continue header

I've been trying to debug a Python script I've inherited. It's trying to POST a CSV to a website via HTTPLib. The problem, as far as I can tell, is that HTTPLib doesn't handle receiving a 100-continue response, as per python http client stuck on 100 continue. Similarly to that post, this "Just Works" via Curl, but for various reasons we need this to run from a Python script.
I've tried to employ the work-around as detailed in an answer on that post, but I can't find a way to use that to submit the CSV after accepting the 100-continue response.
The general flow needs to be like this:
-> establish connection
-> send data including "expect: 100-continue" header, but not including the JSON body yet
<- receive "100-continue"
-> using the same connection, send the JSON body of the request
<- receive the 200 OK message, in a JSON response with other information
Here's the code in its current state, with my 10+ other commented remnants of other attempted workarounds removed:
#!/usr/bin/env python
import os
import ssl
import http.client
import binascii
import logging
import json
#classes taken from https://stackoverflow.com/questions/38084993/python-http-client-stuck-on-100-continue
class ContinueHTTPResponse(http.client.HTTPResponse):
def _read_status(self, *args, **kwargs):
version, status, reason = super()._read_status(*args, **kwargs)
if status == 100:
status = 199
return version, status, reason
def begin(self, *args, **kwargs):
super().begin(*args, **kwargs)
if self.status == 199:
self.status = 100
def _check_close(self, *args, **kwargs):
return super()._check_close(*args, **kwargs) and self.status != 100
class ContinueHTTPSConnection(http.client.HTTPSConnection):
response_class = ContinueHTTPResponse
def getresponse(self, *args, **kwargs):
logging.debug('running getresponse')
response = super().getresponse(*args, **kwargs)
if response.status == 100:
setattr(self, '_HTTPConnection__state', http.client._CS_REQ_SENT)
setattr(self, '_HTTPConnection__response', None)
return response
def uploadTradeIngest(ingestFile, certFile, certPass, host, port, url):
boundary = binascii.hexlify(os.urandom(16)).decode("ascii")
headers = {
"accept": "application/json",
"Content-Type": "multipart/form-data; boundary=%s" % boundary,
"Expect": "100-continue",
}
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(certfile=certFile, password=certPass)
connection = ContinueHTTPSConnection(
host, port=port, context=context)
with open(ingestFile, "r") as fh:
ingest = fh.read()
## Create form-data boundary
ingest = "--%s\r\nContent-Disposition: form-data; " % boundary + \
"name=\"file\"; filename=\"%s\"" % os.path.basename(ingestFile) + \
"\r\n\r\n%s\r\n--%s--\r\n" % (ingest, boundary)
print("pre-request")
connection.request(
method="POST", url=url, headers=headers)
print("post-request")
#resp = connection.getresponse()
resp = connection.getresponse()
if resp.status == http.client.CONTINUE:
resp.read()
print("pre-send ingest")
ingest = json.dumps(ingest)
ingest = ingest.encode()
print(ingest)
connection.send(ingest)
print("post-send ingest")
resp = connection.getresponse()
print("response1")
print(resp)
print("response2")
print(resp.read())
print("response3")
return resp.read()
But this simply returns a 400 "Bad Request" response. The problem (I think) lies with the formatting and type of the "ingest" variable. If I don't run it through json.dumps() and encode() then the HTTPConnection.send() method rejects it:
ERROR: Got error: memoryview: a bytes-like object is required, not 'str'
I had a look at using the Requests library instead, but I couldn't get it to use my local certificate bundle to accept the site's certificate. I have a full chain with an encrypted key, which I did decrypt, but still ran into constant SSL_VERIFY errors from Requests. If you have a suggestion to solve my current problem with Requests, I'm happy to go down that path too.
How can I use HTTPLib or Requests (or any other libraries) to achieve what I need to achieve?
In case anyone comes across this problem in future, I ended up working around it with a bit of a kludge. I tried HTTPLib, Requests, and URLLib3 are all known to not handle the 100-continue header, so... I just wrote a Python wrapper around Curl via the subprocess.run() function, like this:
def sendReq(upFile):
sendFile=f"file=#{upFile}"
completed = subprocess.run([
curlPath,
'--cert',
args.cert,
'--key',
args.key,
targetHost,
'-H',
'accept: application/json',
'-H',
'Content-Type: multipart/form-data',
'-H',
'Expect: 100-continue',
'-F',
sendFile,
'-s'
], stdout=subprocess.PIPE, universal_newlines=True)
return completed.stdout
The only issue I had with this was that it fails if Curl was built against the NSS libraries, which I resolved by including a statically-built Curl binary with the package, the path to which is contained in the curlPath variable in the code. I obtained this binary from this Github repo.

How to make each client get their state if there is class instance in grpc-python server side?

I want to use grpc-python in the following scenario, but I don' t know how to realize it.
The scenario is that, in the python server, it uses class to calculate and update the instance' s state, then sends such state to corresponding client; in the client side, more than one clients need to communicate with the server to get its one result and not interfered by others.
Specifically, suppose there is a class with initial value self.i =0, then each time the client calls the class' s update function, it does self.i=self.i+1 and returns self.i. Actually there are two clients call such update function simultaneously, like when client1 calls update at third time, client2 calls update at first time.
I think this may can be solved by creating thread for each client to avoid conflict. If the new client calls, new thead will be created; if existing client calls, existing thread will be used. But I don' t know how to realize it?
Hope you can help me. Thanks in advance.
I think I solved this problem by myself. If you have any other better solutions, you can post here.
I edited helloworld example in grpc-python introduction to explain my aim.
For helloworld.proto
syntax = "proto3";
option java_multiple_files = true;
option java_package = "io.grpc.examples.helloworld";
option java_outer_classname = "HelloWorldProto";
option objc_class_prefix = "HLW";
package helloworld;
// The greeting service definition.
service Greeter {
// Sends a greeting
rpc SayHello (HelloRequest) returns (HelloReply) {}
rpc Unsubscribe (HelloRequest) returns (HelloReply) {}
}
// The request message containing the user's name.
message HelloRequest {
string name = 1;
}
// The response message containing the greetings
message HelloReply {
string message = 1;
}
I add Unsubsribe function to allow one specific client to diconnect from server.
In hello_server.py
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
import threading
from threading import RLock
import time
from concurrent import futures
import logging
class Calcuate:
def __init__(self):
self.i = 0
def add(self):
self.i+=1
return self.i
class PeerSet(object):
def __init__(self):
self._peers_lock = RLock()
self._peers = {}
self.instances = {}
def connect(self, peer):
with self._peers_lock:
if peer not in self._peers:
print("Peer {} connecting".format(peer))
self._peers[peer] = 1
a = Calcuate()
self.instances[peer] = a
output = a.add()
return output
else:
self._peers[peer] += 1
a = self.instances[peer]
output = a.add()
return output
def disconnect(self, peer):
print("Peer {} disconnecting".format(peer))
with self._peers_lock:
if peer not in self._peers:
raise RuntimeError("Tried to disconnect peer '{}' but it was never connected.".format(peer))
del self._peers[peer]
del self.instances[peer]
def peers(self):
with self._peers_lock:
return self._peers.keys()
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def __init__(self):
self._peer_set = PeerSet()
def _record_peer(self, context):
return self._peer_set.connect(context.peer())
def SayHello(self, request, context):
output = self._record_peer(context)
print("[thread {}] Peers: {}, output: {}".format(threading.currentThread().ident, self._peer_set.peers(), output))
time.sleep(1)
return helloworld_pb2.HelloReply(message='Hello, {}, {}!'.format(request.name, output))
def Unsubscribe(self, request, context):
self._peer_set.disconnect(context.peer())
return helloworld_pb2.HelloReply(message='{} disconnected!'.format(context.peer()))
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port('[::]:50051')
server.start()
server.wait_for_termination()
if __name__ == '__main__':
logging.basicConfig()
serve()
The use of context.peer() is adapted from Richard Belleville' s answer in this post. You can change add() function to any other functions that can be used to update instance' s state.
In hello_client.py
from __future__ import print_function
import logging
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
def run():
# NOTE(gRPC Python Team): .close() is possible on a channel and should be
# used in circumstances in which the with statement does not fit the needs
# of the code.
with grpc.insecure_channel('localhost:50051') as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
print("Greeter client received: " + response.message)
response = stub.SayHello(helloworld_pb2.HelloRequest(name='Tom'))
print("Greeter client received: " + response.message)
response = stub.SayHello(helloworld_pb2.HelloRequest(name='Jerry'))
print("Greeter client received: " + response.message)
stub.Unsubscribe(helloworld_pb2.HelloRequest(name="end"))
if __name__ == '__main__':
logging.basicConfig()
run()
If we run serveral hello_client.py simultaneously, the server can distinguish the different clients and send correct corresponding info to them.

requests - Gateway Timeout

this is a test script to request data from Rovi API, provided by the API itself.
test.py
import requests
import time
import hashlib
import urllib
class AllMusicGuide(object):
api_url = 'http://api.rovicorp.com/data/v1.1/descriptor/musicmoods'
key = 'my key'
secret = 'secret'
def _sig(self):
timestamp = int(time.time())
m = hashlib.md5()
m.update(self.key)
m.update(self.secret)
m.update(str(timestamp))
return m.hexdigest()
def get(self, resource, params=None):
"""Take a dict of params, and return what we get from the api"""
if not params:
params = {}
params = urllib.urlencode(params)
sig = self._sig()
url = "%s/%s?apikey=%s&sig=%s&%s" % (self.api_url, resource, self.key, sig, params)
resp = requests.get(url)
if resp.status_code != 200:
# THROW APPROPRIATE ERROR
print ('unknown err')
return resp.content
from another script I import the module:
from roviclient.test import AllMusicGuide
and create an instance of the class inside a mood function:
def mood():
test = AllMusicGuide()
print (test.get('[moodids=moodids]'))
according to documentation, the following is the syntax for requests:
descriptor/musicmoods?apikey=apikey&sig=sig [&moodids=moodids] [&format=format] [&country=country] [&language=language]
but running the script I get the following error:
unknown err
<h1>Gateway Timeout</h1>:
what is wrong?
"504, try once more. 502, it went through."
Your code is fine, this is a network issue. "Gateway Timeout" is a 504. The intermediate host handling your request was unable to complete it. It made its own request to another server on your behalf in order to handle yours, but this request took too long and timed out. Usually this is because of network congestion in the backend; if you try a few more times, does it sometimes work?
In any case, I would talk to your network administrator. There could be any number of reasons for this and they should be able to help fix it for you.

Twisted: Advise on using txredisapi library required

Below I provided a code example which simply respond to HTTP GET request with the data from Redis:
Request: http://example.com:8888/?auth=zefDWDd5mS7mcbfoDbDDf4eVAKb1nlDmzLwcmhDOeUc
Response: get: u'"True"'
The purpose of this code is to serve as a REST server (that's why I'm using lazyConnectionPool) responding to the requests, and using data from Redis (read/ write).
What I need to do:
Run multiple requests to Redis inside render_GET of the IndexHandler (like GET, HMGET, SET, etc)
Run multiple requests in a transaction inside render_GET of the IndexHandler
I've tried multiple ways to do that (including examples from the txredisapi library), but due to lack of experience failed to do that. Could you please advise on questions 1) and 2).
Thanks in advance.
import txredisapi as redis
from twisted.application import internet
from twisted.application import service
from twisted.web import server
from twisted.web.resource import Resource
class Root(Resource):
isLeaf = False
class BaseHandler(object):
isLeaf = True
def __init__(self, db):
self.db = db
Resource.__init__(self)
class IndexHandler(BaseHandler, Resource):
def _success(self, value, request, message):
request.write(message % repr(value))
request.finish()
def _failure(self, error, request, message):
request.write(message % str(error))
request.finish()
def render_GET(self, request):
try:
auth = request.args["auth"][0]
except:
request.setResponseCode(404, "not found")
return ""
d = self.db.hget(auth, 'user_add')
d.addCallback(self._success, request, "get: %s\n")
d.addErrback(self._failure, request, "get failed: %s\n")
return server.NOT_DONE_YET
# Redis connection parameters
REDIS_HOST = '10.10.0.110'
REDIS_PORT = 6379
REDIS_DB = 1
REDIS_POOL_SIZE = 1
REDIS_RECONNECT = True
# redis connection
_db = redis.lazyConnectionPool(REDIS_HOST, REDIS_PORT, REDIS_DB, REDIS_POOL_SIZE)
# http resources
root = Root()
root.putChild("", IndexHandler(_db))
application = service.Application("web")
srv = internet.TCPServer(8888, server.Site(root), interface="127.0.0.1")
srv.setServiceParent(application)
Regarding first question:
There is a few ways to generalize to making multiple database requests in a single HTTP request.
For example you can make multiple requests:
d1 = self.db.hget(auth, 'user_add')
d2 = self.db.get('foo')
Then you can get a callback to trigger when all of these simultaneous requests are finished (see twisted.internet.defer.DeferredList).
Or you can use inlineCallbacks if you need sequential requests. For example:
#inlineCallbacks
def do_redis(self):
foo = yield self.db.get('somekey')
bar = yield self.db.hget(foo, 'bar') # Get 'bar' field of hash foo
But you will need to read more about combining inlineCallbacks with twisted.web (there are SO questions on that topic you should look up).
Regarding question 2:
Transactions are really ugly to do without using inlineCallbacks. There is an example at txredisapi homepage that shows it using inlineCallbacks.

Python: Twitter Streaming and pycurl problems

I am having problems with pycurl in conjunction with Twitter's Streaming API filter stream.
What is happening when I run the code below it seems to barf on the perform call. I know this because I placed print statements before and after the perform call. I am using Python 2.6.1 and I am on a Mac if that matters.
#!/usr/bin/python
print "Content-type: text/html"
print
import pycurl, json, urllib
STREAM_URL = "http://stream.twitter.com/1/statuses/filter.json?follow=1&count=100"
USER = "user"
PASS = "password"
print "<html><head></head><body>"
class Client:
def __init__(self):
self.buffer = ""
self.conn = pycurl.Curl()
self.conn.setopt(pycurl.POST,1)
self.conn.setopt(pycurl.USERPWD, "%s:%s" % (USER,PASS))
self.conn.setopt(pycurl.URL, STREAM_URL)
self.conn.setopt(pycurl.WRITEFUNCTION, self.on_receive)
try:
self.conn.perform()
self.conn.close()
except BaseException:
traceback.print_exc()
def on_receive(self,data):
self.buffer += data
if data.endswith("\r\n") and self.buffer.strip():
content = json.loads(self.buffer)
self.buffer = ""
print content
if "text" in content:
print u"{0[user][name]}: {0[text]}".format(content)
client = Client()
print "</body></html>"
First, try turning on verbosity to help debug:
self.conn.setopt(pycurl.VERBOSE ,1)
It looks like you aren't setting the basic auth mode:
self.conn.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
Also according to the documentation, you need to provide a POST of the parameters to the API, not pass them in as a GET parameter:
data = dict( track='stack overflow' )
self.conn.setopt(pycurl.POSTFIELDS,urlencode(data))
You are trying to use basic authentication.
Basic Authentication sends user
credentials in the header of the HTTP
request. This makes it easy to use,
but insecure. OAuth is the Twitter
preferred method of authentication
moving forward - come August 2010,
we'll be turning off Basic Auth from
the API. --Authentication, Twitter

Categories

Resources