I try to create roles in an automated way in Google Kubernetes (GKE).
For that, I use the python client library, but I don't want to have any dependency to kubectl and kubeconfig, or gcloud,
I use a service account (with a json key file from GCP) which has the permissions to create roles in namespaces (it is a cluster admin). When I use the access token given by this command :
gcloud auth activate-service-account --key-file=credentials.json
gcloud auth print-access-token
It works.
But when I try to generate the token by myself, I can create namespaces and other standard resources, but I have this error when it comes to roles :
E kubernetes.client.rest.ApiException: (403)
E Reason: Forbidden
E HTTP response headers: HTTPHeaderDict({'Audit-Id': 'b89b0fc2-9350-456e-9eca-730e7ad2cea1', 'Content-Type': 'application/json', 'Date': 'Tue, 26 Feb 2019 20:35:20 GMT', 'Content-Length': '1346'})
E HTTP response body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"roles.rbac.authorization.k8s.io \"developers\" is forbidden: attempt to grant extra privileges: [{[*] [apps] [statefulsets] [] []} {[*] [apps] [deployments] [] []} {[*] [autoscaling] [horizontalpodautoscalers] [] []} {[*] [] [pods] [] []} {[*] [] [pods/log] [] []} {[*] [] [pods/portforward] [] []} {[*] [] [serviceaccounts] [] []} {[*] [] [containers] [] []} {[*] [] [services] [] []} {[*] [] [secrets] [] []} {[*] [] [configmaps] [] []} {[*] [extensions] [ingressroutes] [] []} {[*] [networking.istio.io] [virtualservices] [] []}] user=\u0026{100701357824788592239 [system:authenticated] map[user-assertion.cloud.google.com:[AKUJVp+KNvF6jw9II+AjCdqjbC0vz[...]hzgs0JWXOyk7oxWHkaXQ==]]} ownerrules=[{[create] [authorization.k8s.io] [selfsubjectaccessreviews selfsubjectrulesreviews] [] []} {[get] [] [] [] [/api /api/* /apis /apis/* /healthz /openapi /openapi/* /swagger-2.0.0.pb-v1 /swagger.json /swaggerapi /swaggerapi/* /version /version/]}] ruleResolutionErrors=[]","reason":"Forbidden","details":{"name":"developers","group":"rbac.authorization.k8s.io","kind":"roles"},"code":403}
I'm using the same service account, so I guess gcloud is doing something more than my script.
Here the python code I use to generate the token :
def _get_token(self) -> str:
# See documentation here
# https://developers.google.com/identity/protocols/OAuth2ServiceAccount
epoch_time = int(time.time())
# Generate a claim from the service account file.
claim = {
"iss": self._service_account_key["client_email"],
"scope": "https://www.googleapis.com/auth/cloud-platform",
"aud": "https://www.googleapis.com/oauth2/v4/token",
"exp": epoch_time + 3600,
"iat": epoch_time
}
# Sign claim with JWT.
assertion = jwt.encode(
claim,
self._service_account_key["private_key"],
algorithm='RS256'
).decode()
# Create payload for API.
data = urlencode({
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"assertion": assertion
})
# Request the access token.
result = requests.post(
url="https://www.googleapis.com/oauth2/v4/token",
headers={
"Content-Type": "application/x-www-form-urlencoded"
},
data=data
)
result.raise_for_status()
return json.loads(result.text)["access_token"]
def _get_api_client(self) -> client.ApiClient:
configuration = client.Configuration()
configuration.host = self._api_url
configuration.verify_ssl = self._tls_verify
configuration.api_key = {
"authorization": f"Bearer {self._get_token()}"
}
return client.ApiClient(configuration)
And the function to create the role (which generates the 403 error):
def _create_role(self, namespace: str, body: str):
api_client = self._get_api_client()
rbac = client.RbacAuthorizationV1Api(api_client)
rbac.create_namespaced_role(
namespace,
body
)
If I short-circuit the _get_token method with the token extracted from gcloud, it works.
I guess it has something to do with the way I create my token (missing scope ?), but I don't find any documentation about it.
ANSWER :
Adding a scope does the job ! Thanks a lot :
# Generate a claim from the service account file.
claim = {
"iss": self._service_account_key["client_email"],
"scope": " ".join([
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/userinfo.email"
]),
"aud": "https://www.googleapis.com/oauth2/v4/token",
"exp": epoch_time + 3600,
"iat": epoch_time
}
So if you look at the code here for print-access-token you can see that the access token is generally printed without a scope. You see:
try:
creds = client.GoogleCredentials.get_application_default()
except client.ApplicationDefaultCredentialsError as e:
log.debug(e, exc_info=True)
raise c_exc.ToolException(str(e))
if creds.create_scoped_required():
...
and then on this file you see:
def create_scoped_required(self):
"""Whether this Credentials object is scopeless.
create_scoped(scopes) method needs to be called in order to create
a Credentials object for API calls.
"""
return False
Apparently, in your code, you are getting the token with the https://www.googleapis.com/auth/cloud-platform scope. You could try removing it or try with the USER_EMAIL_SCOPE since you are specifying: "iss": self._service_account_key["client_email"].
You can always check what gcloud auth activate-service-account --key-file=credentials.json stores under ~/.config. So you know what gcloud auth print-access-token uses. Note that as per this and this it looks like the store is in sqlite format.
Related
Recently I've been having some trouble with the X-Flashbots-Signature header when sending a request to the flashbots goerli endpoint.
My python code looks like this:
import requests
import json
import secrets
from eth_account import Account, messages
from web3 import Web3
from math import ceil
rpcUrl = GOERLI_RPC_NODE_PROVIDER
web3 = Web3(Web3.HTTPProvider(rpcUrl))
publicKey = ETH_PUBLIC_KEY
privateKey = ETH_PRIVATE_KEY
contractAddress = GOERLI_TEST_CONTRACT # Goerli test contract
data = CONTRACT_DATA # Contract data to execute
signed = []
for _ in range(2):
nonce = web3.eth.getTransactionCount(publicKey, 'pending')
checksumAddress = Web3.toChecksumAddress(contractAddress)
checksumPublic = Web3.toChecksumAddress(publicKey)
tx = {
'nonce': nonce,
'to': checksumAddress,
'from': checksumPublic,
'value': 0,
'gasPrice': web3.toWei(200, 'gwei'),
'data': data
}
gas = web3.eth.estimateGas(tx)
tx['gas'] = ceil(gas + gas * .1)
signed_tx = web3.eth.account.signTransaction(tx, privateKey)
signed.append(Web3.toHex(signed_tx.rawTransaction))
dt = {
'jsonrpc': '2.0',
'method': 'eth_sendBundle',
'params': [
{
'txs': [
signed[0], signed[1] # Signed txs with web3.eth.account.signTransaction
],
'blockNumber': web3.eth.block_number + 1,
'minTimestamp': '0x0',
'maxTimestamp': '0x0',
'revertingTxHashes': []
}
],
'id': 1337
}
pvk = secrets.token_hex(32)
pbk = Account.from_key(pvk).address
body = json.dumps(dt)
message = messages.encode_defunct(text=Web3.keccak(text=body).hex())
signature = pbk + ':' + Account.sign_message(message, pvk).signature.hex()
hd = {
'Content-Type': 'application/json',
'X-Flashbots-Signature': signature,
}
res = requests.post('https://relay-goerli.flashbots.net/', headers=hd, data=body)
print(res.text)
This code is a modified version of code taken straight from the flashbots docs: https://docs.flashbots.net/flashbots-auction/searchers/advanced/rpc-endpoint/#authentication
Upon running this code I get an internal server error error response. At first, I thought the problem might be fixed by replacing text=Web3.keccak(text=body).hex() to hexstr=Web3.keccak(text=body).hex() or primative=Web3.keccak(text=body), as per the definition of messages.encode_defunct: https://eth-account.readthedocs.io/en/stable/eth_account.html#eth_account.messages.encode_defunct. But after making this replacement, I got the error signer address does not equal expected. This is very confusing, especially because I have resolved the message
with the signature myself and the public key does match. But whenever I send it to the flashbots endpoint, I am left with this error.
Any ideas would be greatly appreciated.
I create a bot to monitor the comment if there is any new comment and if so it will automatically private_replies them But instead i got a Request [400] Error instead.
def monitor_comment():
print("Bot is monitoring comments")
time.sleep(5)
comment_data = graph.get_connections(COMBINED_POST_ID_TO_MONITOR,"comments",order='reverse_chronological')
commends = []
for comment in comment_data['data'][:10]:
commends.append (comment)
data = commends[0]['id']
data_converted = str(data)
#time.sleep(5)
print(data)
return data_converted
def private_reply(comment_ids):
url = "https://graph.facebook.com/v12.0/me/messages?"
access = {"access_token":Page_Token}
params = {
"recipient": {
"comment_id": comment_ids
},
"message": {
"text":"Testing Private_Replies"
}
request = requests.post(url=url, files=access, json=params)
print(request)
This is the logs
{"error":{"message":"An active access token must be used to query information about the current user.","type":"OAuthException","code":2500,"fbtrace_id":"AMCiqy1Aw8CyODPlUBE1b98"}}
This is my code to extract player data from an endpoint containing basketball data for a Data Science project.NOTE: I changed the name of the actual API key I was given since it's subscription. And I change the username/password because for privacy purposes. Using the correct credentials, I wouldn't receive a syntax error but the status code always returns 401. Since it wasn't accepting the API key, I added my account username, password, and the HTTP authentication header as well, but the status code still returns 401.
In case this is relevant, this is the website's recommendation in the developer portal: **The API key can be passed either as a query parameter or using the following HTTP request header.
Please let me know what changes I can make to my code. Any help is appreciated.
Ocp-Apim-Subscription-Key: {key}**
PS: My code got fragmented while posting this, but it is all in one function.
def getData():
user_name = "name#gmail.com"
api_endpoint = "https://api.sportsdata.io/v3/nba/stats/json/PlayerGameStatsByDate/2020-FEB7"
api_key = "a45;lkf"
password = "ksaljd"
header = "Ocp-Apim-Subscription-Key"
PARAMS = {'user': user_name, 'pass': password, 'header': header, 'key': api_key}
response = requests.get(url = api_endpoint, data = PARAMS)
print(response.status_code)
file = open("Data.csv", "w")
file.write(response.text)
file.close()
def _get_auth_headers() -> dict:
return {
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': "`Insert key here`"
}
api_endpoint = "https://api.sportsdata.io/v3/nba/stats/json/PlayerGameStatsByDate/2020-FEB7"
PARAMS = {
# Your params here
}
response = requests.get(
api_endpoint,
headers=_get_auth_headers(),
params=PARAMS
)
Instead of just a string, you need to pass dict in the headers parameter and auth param exist so you can use it as follow:
def getData():
[...]
header = {
"Ocp-Apim-Subscription-Key": api_key
}
[...]
response = requests.get(url = api_endpoint, data = PARAMS, headers=header, auth = (user_name, password))
According to the API documentation you don't need to provide email and password. You're only need to add your API Key to header:
import requests
r = requests.get(url='https://api.sportsdata.io/v3/nba/stats/json/PlayerGameStatsByDate/2020-FEB7', headers={'Ocp-Apim-Subscription-Key': 'API_KEY'})
print(r.json())
Output:
[{
'StatID': 768904,
'TeamID': 25,
'PlayerID': 20000788,
'SeasonType': 1,
'Season': 2020,
'Name': 'Tim Hardaway Jr.',
'Team': 'DAL',
'Position': 'SF',
'Started': 1,
'FanDuelSalary': 7183,
'DraftKingsSalary': 7623,
'FantasyDataSalary': 7623,
...
I am trying to get a python script to say whether a twitch channel is live but haven't been able to do it, any and all help would be appreciated.
here are the docs I've been able to find
https://dev.twitch.tv/docs/api/guide
This is what I have atm but I keep on getting "'set' object has no attribute 'items'". This is modified code from "Is There Any Way To Check if a Twitch Stream Is Live Using Python?" however it is now outdated because of the new API.
import requests
def checkUser():
API_HEADERS = {
'Client-ID : [client id here from dev portal]',
'Accept : application/vnd.twitchtv.v5+json',
}
url = "https://api.twitch.tv/helix/streams/[streamer here]"
req = requests.Session().get(url, headers=API_HEADERS)
jsondata = req.json()
print(jsondata)
checkUser()
The answer to your problem of "'set' object has no attribute 'items'" is just a simple typo. It should be
API_HEADERS = {
'Client-ID' : '[client id here from dev portal]',
'Accept' : 'application/vnd.twitchtv.v5+json'
}
Notice how the Colon's aren't part of the text now
And to answer your overarching question of how to tell if a channel is online you can look at this sample code I made.
import requests
URL = 'https://api.twitch.tv/helix/streams?user_login=[Channel_Name_Here]'
authURL = 'https://id.twitch.tv/oauth2/token'
Client_ID = [Your_client_ID]
Secret = [Your Client_Secret]
AutParams = {'client_id': Client_ID,
'client_secret': Secret,
'grant_type': 'client_credentials'
}
def Check():
AutCall = requests.post(url=authURL, params=AutParams)
access_token = AutCall.json()['access_token']
head = {
'Client-ID' : Client_ID,
'Authorization' : "Bearer " + access_token
}
r = requests.get(URL, headers = head).json()['data']
if r:
r = r[0]
if r['type'] == 'live':
return True
else:
return False
else:
return False
print(Check())
I'm using python to access elasticsearch cluster. Now I want to backup my index by using snapshot.
The most difficult thing is that: the python-elasticsearch's doc just give me a API description. there is no example to show me how to create snapshot. I tried some parameters, but failed. Can anyone give a snapshot example of elastic-search by using python? The following is my code:
from elasticsearch import Elasticsearch
es = Elasticsearch()
snapshot_body = {
"type": "url",
"settings": {
"url": "http://download.elasticsearch.org/definitiveguide/sigterms_demo/"
}
}
body = {"snapshot": snapshot_body}
es.snapshot.create_repository(repository='test', body=body)
Your repository creation is almost correct, you don't need the line body = {"snapshot": snapshot_body}, simply create your repository like this:
es.snapshot.create_repository(repository='test', body=snapshot_body)
Now in order to create a snapshot, all you have to do is this:
es.snapshot.create(repository='test', snapshot='my_snapshot')
If you want to store only a few indices and not all you can also provide a body like this:
index_body = {
"indices": "index_1,index_2"
}
es.snapshot.create(repository='test', snapshot='my_snapshot', body=index_body)
Save the following sample Python code as a Python file, such as register-repo.py. The client requires the AWS SDK for Python (Boto3), requests and requests-aws4auth packages. The client contains commented-out examples for other snapshot operations.
import boto3
import requests
from requests_aws4auth import AWS4Auth
host = '' # include https:// and trailing /
region = '' # e.g. us-west-1
service = 'es'
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)
# Register repository
path = '_snapshot/my-snapshot-repo-name' # the Elasticsearch API endpoint
url = host + path
payload = {
"type": "s3",
"settings": {
"bucket": "s3-bucket-name",
# "endpoint": "s3.amazonaws.com", # for us-east-1
"region": "us-west-1", # for all other regions
"role_arn": "arn:aws:iam::123456789012:role/TheSnapshotRole"
}
}
headers = {"Content-Type": "application/json"}
r = requests.put(url, auth=awsauth, json=payload, headers=headers)
print(r.status_code)
print(r.text)
# # Take snapshot
#
# path = '_snapshot/my-snapshot-repo/my-snapshot'
# url = host + path
#
# r = requests.put(url, auth=awsauth)
#
# print(r.text)
#
# # Delete index
#
# path = 'my-index'
# url = host + path
#
# r = requests.delete(url, auth=awsauth)
#
# print(r.text)
#
# # Restore snapshot (all indices except Kibana and fine-grained access control)
#
# path = '_snapshot/my-snapshot-repo/my-snapshot/_restore'
# url = host + path
#
# payload = {
# "indices": "-.kibana*,-.opendistro_security",
# "include_global_state": false
# }
#
# headers = {"Content-Type": "application/json"}
#
# r = requests.post(url, auth=awsauth, json=payload, headers=headers)
#
# # Restore snapshot (one index)
#
# path = '_snapshot/my-snapshot-repo/my-snapshot/_restore'
# url = host + path
#
# payload = {"indices": "my-index"}
#
# headers = {"Content-Type": "application/json"}
#
# r = requests.post(url, auth=awsauth, json=payload, headers=headers)
#
# print(r.text)
DONT USE THIS IN US EAST 1 then you have to use this
Important
If the S3 bucket is in the us-east-1 region, you must use "endpoint": "s3.amazonaws.com" instead of "region": "us-east-1".
To enable server-side encryption with S3-managed keys for the snapshot repository, add "server_side_encryption": true to the "settings" JSON.
https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-managedomains-snapshots.html#es-managedomains-snapshot-registerdirectory