Related
I'm trying to read data from MySQL database (located in GCP) and writing the same in GCP Bucket. I want to use Python SDK for the same. Below is the code I've written.
from __future__ import generators
import apache_beam as beam
import time
import jaydebeapi
import os
import argparse
from google.cloud import bigquery
import logging
import sys
from string import lower
from google.cloud import storage as gstorage
import pandas as pd
from oauth2client.client import GoogleCredentials
print("Import Successful")
class setenv(beam.DoFn):
def process(self,context):
os.system('gsutil cp gs://<MY_BUCKET>/mysql-connector-java-5.1.45.jar /tmp/')
logging.info('Enviornment Variable set.')
class readfromdatabase(beam.DoFn):
def process(self, context):
logging.info('inside readfromdatabase')
database_user='root'
database_password='<DB_PASSWORD>'
database_host='<DB_HOST>'
database_port='<DB_PORT>'
database_db='<DB_NAME>'
logging.info('reached readfromdatabase 1')
jclassname = "com.mysql.jdbc.Driver"
url = ("jdbc:mysql://{0}:{1}/{2}?user={3}&password={4}".format(database_host,database_port, database_db,database_user,database_password))
jars = ["/tmp/mysql-connector-java-5.1.45.jar"]
libs = None
cnx = jaydebeapi.connect(jclassname, url, jars=jars,
libs=libs)
logging.info('Connection Successful..')
cursor = cnx.cursor()
logging.info('Reading Sql Query from the file...')
query = 'select * from employees.employee_details'
logging.info('Query is %s',query)
logging.info('Query submitted to Database..')
for chunk in pd.read_sql(query, cnx, coerce_float=True, params=None, parse_dates=None, columns=None,chunksize=500000):
chunk.apply(lambda x: x.replace(u'\r', u' ').replace(u'\n', u' ') if isinstance(x, str) or isinstance(x, unicode) else x).WriteToText('gs://first-bucket-arpan/output2/')
logging.info("Load completed...")
return list("1")
def run():
try:
pcoll = beam.Pipeline()
dummy= pcoll | 'Initializing..' >> beam.Create(['1'])
logging.info('inside run 1')
dummy_env = dummy | 'Setting up Instance..' >> beam.ParDo(setenv())
logging.info('inside run 2')
readrecords=(dummy_env | 'Processing' >> beam.ParDo(readfromdatabase()))
logging.info('inside run 3')
p=pcoll.run()
logging.info('inside run 4')
p.wait_until_finish()
except:
logging.exception('Failed to launch datapipeline')
raise
def main():
logging.getLogger().setLevel(logging.INFO)
GOOGLE_APPLICATION_CREDENTIALS="gs://<MY_BUCKET>/My First Project-800a97e1fe65.json"
run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main()
I'm running using below command:
python /home/aarpan_roy/readfromdatabase.py --region $REGION --runner DataflowRunner --project $PROJECT --temp_location gs://$BUCKET/tmp
While running, betting below output and no Dataflow job is being created:
INFO:root:inside run 2
INFO:root:inside run 3
INFO:apache_beam.runners.portability.fn_api_runner.translations:==================== <function annotate_downstream_side_inputs at 0x7fc1ea8a27d0> ====================
INFO:apache_beam.runners.portability.fn_api_runner.translations:==================== <function fix_side_input_pcoll_coders at 0x7fc1ea8a28c0> ====================
INFO:apache_beam.runners.portability.fn_api_runner.translations:==================== <function lift_combiners at 0x7fc1ea8a2938> ====================
INFO:apache_beam.runners.portability.fn_api_runner.translations:==================== <function expand_sdf at 0x7fc1ea8a29b0> ====================
INFO:apache_beam.runners.portability.fn_api_runner.translations:==================== <function expand_gbk at 0x7fc1ea8a2a28> ====================
INFO:apache_beam.runners.portability.fn_api_runner.translations:==================== <function sink_flattens at 0x7fc1ea8a2b18> ====================
INFO:apache_beam.runners.portability.fn_api_runner.translations:==================== <function greedily_fuse at 0x7fc1ea8a2b90> ====================
INFO:apache_beam.runners.portability.fn_api_runner.translations:==================== <function read_to_impulse at 0x7fc1ea8a2c08> ====================
INFO:apache_beam.runners.portability.fn_api_runner.translations:==================== <function impulse_to_input at 0x7fc1ea8a2c80> ====================
INFO:apache_beam.runners.portability.fn_api_runner.translations:==================== <function sort_stages at 0x7fc1ea8a2e60> ====================
INFO:apache_beam.runners.portability.fn_api_runner.translations:==================== <function setup_timer_mapping at 0x7fc1ea8a2de8> ====================
INFO:apache_beam.runners.portability.fn_api_runner.translations:==================== <function populate_data_channel_coders at 0x7fc1ea8a2ed8> ====================
INFO:apache_beam.runners.worker.statecache:Creating state cache with size 100
INFO:apache_beam.runners.portability.fn_api_runner.worker_handlers:Created Worker handler <apache_beam.runners.portability.fn_api_runner.worker_handlers.EmbeddedWorkerHandler object at 0x7fc1ea356ad0> for environment ref_Environment_default_environment_1 (beam:env:embedded_python:v1, '')
INFO:apache_beam.runners.portability.fn_api_runner.fn_runner:Running (ref_AppliedPTransform_Initializing../Impulse_3)+((ref_AppliedPTransform_Initializing../FlatMap(<lambda at core.py:2632>)_4)+((ref_AppliedPTransform_Initializing../Map(decode)_6)+((ref_AppliedPTransform_Setting up Instance.._7)+(ref_AppliedPTransform_Processing_8))))
Copying gs://<MY_BUCKET>/mysql-connector-java-5.1.45.jar...
- [1 files][976.4 KiB/976.4 KiB]
Operation completed over 1 objects/976.4 KiB.
INFO:root:Enviornment Variable set.
INFO:root:inside run 4
Kindly help me resolving the issue and also guide how to extract data from MySql to GCP Bucket using Apache Beam Python API.
Thanks in advance.
=================================================================================
Hello All,
I changed the code a bit and running it from a shell script after exporting GOOGLE_APPLICATION_CREDENTIALS. But, when it is executing, getting below error:
WARNING:apache_beam.internal.gcp.auth:Unable to find default credentials to use: File /home/aarpan_roy/script/dataflowerviceaccount.json (pointed by GOOGLE_APPLICATION_CREDENTIALS environment variable) does not exist!
Connecting anonymously.
WARNING:apache_beam.options.pipeline_options:Discarding unparseable args: ['True', '--service_account_name', 'dataflowserviceaccount', '--service_account_key_file', '/home/aarpan_roy/script/dataflowserviceaccount.json']
WARNING:apache_beam.options.pipeline_options:Discarding unparseable args: ['True', '--service_account_name', 'dataflowserviceaccount', '--service_account_key_file', '/home/aarpan_roy/script/dataflowserviceaccount.json']
ERROR:root:Failed to launch datapipeline"
Below is the total log file:
aarpan_roy#my-dataproc-cluster-m:~/script/util$ sh -x dataflow_runner.sh
+ export GOOGLE_APPLICATION_CREDENTIALS=gs://<MY_BUCKET>/My First Project-800a97e1fe65.json
+ python /home/aarpan_roy/script/util/loadfromdatabase.py --config config.properties --productconfig cts.properties --env dev --sourcetable employee_details --sqlquery /home/aarpan_roy/script/sql/employee_details.sql --connectionprefix d3 --incrementaldate 1900-01-01
/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/__init__.py:82: UserWarning: You are using Apache Beam with Python 2. New releases of Apache Beam will soon support Python 3 only.
'You are using Apache Beam with Python 2. '
/home/aarpan_roy/script/util/
INFO:root:Job Run Id is 22152
INFO:root:Job Name is load-employee-details-20200820
SELECT * FROM EMPLOYEE_DETAILS
WARNING:apache_beam.options.pipeline_options_validator:Option --zone is deprecated. Please use --worker_zone instead.
INFO:apache_beam.runners.portability.stager:Downloading source distribution of the SDK from PyPi
INFO:apache_beam.runners.portability.stager:Executing command: ['/usr/bin/python', '-m', 'pip', 'download', '--dest', '/tmp/tmpEarTQ0', 'apache-beam==2.23.0', '--no-deps', '--no-binary', ':all:']
INFO:apache_beam.runners.portability.stager:Staging SDK sources from PyPI: dataflow_python_sdk.tar
INFO:apache_beam.runners.portability.stager:Downloading binary distribution of the SDK from PyPi
INFO:apache_beam.runners.portability.stager:Executing command: ['/usr/bin/python', '-m', 'pip', 'download', '--dest', '/tmp/tmpEarTQ0', 'apache-beam==2.23.0', '--no-deps', '--only-binary', ':all:', '--python-version', '27', '--implementation', 'cp', '--abi', 'cp27mu', '--platform', 'manylinux1_x86_64']
INFO:apache_beam.runners.portability.stager:Staging binary distribution of the SDK from PyPI: apache_beam-2.23.0-cp27-cp27mu-manylinux1_x86_64.whl
WARNING:root:Make sure that locally built Python SDK docker image has Python 2.7 interpreter.
INFO:root:Using Python SDK docker image: apache/beam_python2.7_sdk:2.23.0. If the image is not available at local, we will try to pull from hub.docker.com
INFO:apache_beam.internal.gcp.auth:Setting socket default timeout to 60 seconds.
INFO:apache_beam.internal.gcp.auth:socket default timeout is 60.0 seconds.
WARNING:apache_beam.internal.gcp.auth:Unable to find default credentials to use: File /home/aarpan_roy/script/dataflowerviceaccount.json (pointed by GOOGLE_APPLICATION_CREDENTIALS environment variable) does not exist!
Connecting anonymously.
INFO:apache_beam.runners.dataflow.internal.apiclient:Starting GCS upload to gs://<MY_BUCKET>/load-employee-details-20200820.1597916566.568419/pipeline.pb...
INFO:oauth2client.transport:Attempting refresh to obtain initial access_token
INFO:oauth2client.transport:Attempting refresh to obtain initial access_token
INFO:apache_beam.runners.dataflow.internal.apiclient:Completed GCS upload to gs://<MY_BUCKET>/load-employee-details-20200820.1597916566.568419/pipeline.pb in 0 seconds.
INFO:apache_beam.runners.dataflow.internal.apiclient:Starting GCS upload to gs://<MY_BUCKET>/load-employee-details-20200820.1597916566.568419/pickled_main_session...
INFO:apache_beam.runners.dataflow.internal.apiclient:Completed GCS upload to gs://<MY_BUCKET>/load-employee-details-20200820.1597916566.568419/pickled_main_session in 0 seconds.
INFO:apache_beam.runners.dataflow.internal.apiclient:Starting GCS upload to gs://<MY_BUCKET>/load-employee-details-20200820.1597916566.568419/dataflow_python_sdk.tar...
INFO:apache_beam.runners.dataflow.internal.apiclient:Completed GCS upload to gs://<MY_BUCKET>/load-employee-details-20200820.1597916566.568419/dataflow_python_sdk.tar in 0 seconds.
INFO:apache_beam.runners.dataflow.internal.apiclient:Starting GCS upload to gs://<MY_BUCKET>/load-employee-details-20200820.1597916566.568419/apache_beam-2.23.0-cp27-cp27mu-manylinux1_x86_64.whl...
INFO:apache_beam.runners.dataflow.internal.apiclient:Completed GCS upload to gs://<MY_BUCKET>/load-employee-details-20200820.1597916566.568419/apache_beam-2.23.0-cp27-cp27mu-manylinux1_x86_64.whl in 0 seconds.
WARNING:apache_beam.options.pipeline_options:Discarding unparseable args: ['True', '--service_account_name', 'dataflowserviceaccount', '--service_account_key_file', '/home/aarpan_roy/script/dataflowserviceaccount.json']
WARNING:apache_beam.options.pipeline_options:Discarding unparseable args: ['True', '--service_account_name', 'dataflowserviceaccount', '--service_account_key_file', '/home/aarpan_roy/script/dataflowserviceaccount.json']
ERROR:root:Failed to launch datapipeline
Traceback (most recent call last)
File "/home/aarpan_roy/script/util/loadfromdatabase.py", line 105, in run
p=pcoll.run()
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/pipeline.py", line 521, in run
allow_proto_holders=True).run(False)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/pipeline.py", line 534, in run
return self.runner.run_pipeline(self, self._options)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/runners/dataflow/dataflow_runner.py", line 586, in run_pipeline
self.dataflow_client.create_job(self.job), self)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/utils/retry.py", line 236, in wrapper
return fun(*args, **kwargs)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/runners/dataflow/internal/apiclient.py", line 681, in create_job
return self.submit_job_description(job)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/utils/retry.py", line 236, in wrapper
return fun(*args, **kwargs)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/runners/dataflow/internal/apiclient.py", line 748, in submit_job_description
response = self._client.projects_locations_jobs.Create(request)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/runners/dataflow/internal/clients/dataflow/dataflow_v1b3_client.py", line 667, in Create
config, request, global_params=global_params)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apitools/base/py/base_api.py", line 731, in _RunMethod
return self.ProcessHttpResponse(method_config, http_response, request)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apitools/base/py/base_api.py", line 737, in ProcessHttpResponse
self.__ProcessHttpResponse(method_config, http_response, request))
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apitools/base/py/base_api.py", line 604, in __ProcessHttpResponse
http_response, method_config=method_config, request=request)
HttpForbiddenError: HttpError accessing <https://dataflow.googleapis.com/v1b3/projects/turing-thought-277215/locations/asia-southeast1/jobs?alt=json>: response: <{'status': '403', 'content-length': '138', 'x-xss-protection': '0', 'x-content-type-options': 'nosniff', 'transfer-encoding': 'chunked', 'vary': 'Origin, X-Origin, Referer', 'server': 'ESF', '-content-encoding': 'gzip', 'cache-control': 'private', 'date': 'Thu, 20 Aug 2020 09:42:47 GMT', 'x-frame-options': 'SAMEORIGIN', 'content-type': 'application/json; charset=UTF-8', 'www-authenticate': 'Bearer realm="https://accounts.google.com/", error="insufficient_scope", scope="https://www.googleapis.com/auth/compute.readonly https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/userinfo.email email https://www.googleapis.com/auth/userinfo#email"'}>, content <{
"error": {
"code": 403,
"message": "Request had insufficient authentication scopes.",
"status": "PERMISSION_DENIED"
}
}
>
Traceback (most recent call last):
File "/home/aarpan_roy/script/util/loadfromdatabase.py", line 194, in <module>
args.sourcetable)
File "/home/aarpan_roy/script/util/loadfromdatabase.py", line 142, in main
run()
File "/home/aarpan_roy/script/util/loadfromdatabase.py", line 105, in run
p=pcoll.run()
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/pipeline.py", line 521, in run
allow_proto_holders=True).run(False)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/pipeline.py", line 534, in run
return self.runner.run_pipeline(self, self._options)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/runners/dataflow/dataflow_runner.py", line 586, in run_pipeline
self.dataflow_client.create_job(self.job), self)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/utils/retry.py", line 236, in wrapper
return fun(*args, **kwargs)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/runners/dataflow/internal/apiclient.py", line 681, in create_job
return self.submit_job_description(job)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/utils/retry.py", line 236, in wrapper
return fun(*args, **kwargs)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/runners/dataflow/internal/apiclient.py", line 748, in submit_job_description
response = self._client.projects_locations_jobs.Create(request)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apache_beam/runners/dataflow/internal/clients/dataflow/dataflow_v1b3_client.py", line 667, in Create
config, request, global_params=global_params)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apitools/base/py/base_api.py", line 731, in _RunMethod
return self.ProcessHttpResponse(method_config, http_response, request)
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apitools/base/py/base_api.py", line 737, in ProcessHttpResponse
self.__ProcessHttpResponse(method_config, http_response, request))
File "/home/aarpan_roy/.local/lib/python2.7/site-packages/apitools/base/py/base_api.py", line 604, in __ProcessHttpResponse
http_response, method_config=method_config, request=request)
apitools.base.py.exceptions.HttpForbiddenError: HttpError accessing <https://dataflow.googleapis.com/v1b3/projects/turing-thought-277215/locations/asia-southeast1/jobs?alt=json>: response: <{'status': '403', 'content-length': '138', 'x-xss-protection': '0', 'x-content-type-options': 'nosniff', 'transfer-encoding': 'chunked', 'vary': 'Origin, X-Origin, Referer', 'server': 'ESF', '-content-encoding': 'gzip', 'cache-control': 'private', 'date': 'Thu, 20 Aug 2020 09:42:47 GMT', 'x-frame-options': 'SAMEORIGIN', 'content-type': 'application/json; charset=UTF-8', 'www-authenticate': 'Bearer realm="https://accounts.google.com/", error="insufficient_scope", scope="https://www.googleapis.com/auth/compute.readonly https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/userinfo.email email https://www.googleapis.com/auth/userinfo#email"'}>, content <{
"error": {
"code": 403,
"message": "Request had insufficient authentication scopes.",
"status": "PERMISSION_DENIED"
}
}
>
+ echo 1
1
I'm not able to understand where I'm making the mistake. Kindly help me to resolve the issue.
Thanks in advance.
I think that there might be a spelling mistake in your account credentials
/home/aarpan_roy/script/dataflowerviceaccount.json should be /home/aarpan_roy/script/dataflowserviceaccount.json. Can you check this ?
There seem to be two issues here:
On the one hand, the authentication issue, please perform the test provided by #Jayadeep to verify if it was an issue with the name of the credentials.
In the same way, there is probably an issue with the handling of credentials because you are running your code in a Dataproc instance, so I recommend you test your code in the Cloud Shell
On the other hand, I found another post where it is mentioned that the connection to Cloud SQL in python seems to be not as transparent as it is with Java (using jdbcIO).
At the same time, I found this another post where is mentioned a workaround to connect to cloud SQL but using psycopg2 instead of jaydebeapi. I recommend you try it.
import psycopg2
connection = psycopg2.connect(
host = host,
hostaddr = hostaddr,
dbname = dbname,
user = user,
password = password,
sslmode=sslmode,
sslrootcert = sslrootcert,
sslcert = sslcert,
sslkey = sslkey
)
I am trying to use the Docusign Rest API and attempted to follow the sample code on the Python SDK on Github here: https://github.com/docusign/docusign-python-client. I've replaced the necessary values with those obtained from Docusign (keys, urls etc) but the example seems to break down at the following line:
api_client.configure_jwt_authorization_flow(private_key_filename, oauth_base_url, integrator_key, user_id, 3600)
Before then, the code takes the given information and creates the url to authenticate as I understand it. I've manually accessed this link and it seems to work; it asks me for permission to allow the program to act on my behalf and then redirects me to the redirect uri with a code added to the uri. My problem is that when it executes the next line (the one I posted above), it results in this:
Traceback (most recent call last):
File "<ipython-input-40-84847e506c37>", line 1, in <module>
runfile('C:/Users/santi/Desktop/docusign/untitled0.py', wdir='C:/Users/santi/Desktop/docusign')
File "C:\Users\santi\Anaconda3\lib\site-packages\spyder\utils\site\sitecustomize.py", line 705, in runfile
execfile(filename, namespace)
File "C:\Users\santi\Anaconda3\lib\site-packages\spyder\utils\site\sitecustomize.py", line 102, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/santi/Desktop/docusign/untitled0.py", line 501, in <module>
api_client.configure_jwt_authorization_flow(private_key_filename, oauth_base_url, integrator_key, user_id, 3600)
File "C:\Users\santi\Anaconda3\lib\site-packages\docusign_esign\api_client.py", line 126, in configure_jwt_authorization_flow
post_params=self.sanitize_for_serialization({"assertion": assertion, "grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer"}))
File "C:\Users\santi\Anaconda3\lib\site-packages\docusign_esign\api_client.py", line 430, in request
body=body)
File "C:\Users\santi\Anaconda3\lib\site-packages\docusign_esign\rest.py", line 244, in POST
body=body)
File "C:\Users\santi\Anaconda3\lib\site-packages\docusign_esign\rest.py", line 200, in request
raise ApiException(http_resp=r)
ApiException: (400)
Reason: Bad Request
HTTP response headers: HTTPHeaderDict({'Cache-Control': 'private', 'Content-Type': 'text/html', 'X-AspNetMvc-Version': '5.2', 'X-DocuSign-TraceToken': '15fb83c2-9054-4522-b5ed-b77646fe1c4b', 'X-DocuSign-Node': 'DA2DFE4', 'Date': 'Sun, 05 Aug 2018 19:35:12 GMT', 'Content-Length': '11'})
HTTP response body: b'Bad Request'
I've googled the problem and found some similar errors on here but none that helped me out much. Following the traceback takes me into the api_client.py and rest.py files but I wasn't able to decipher much. In the end the origin in the error seems to be in line 200 of rest.py where an exception is raised because a value unknown to me is not within the proper range. I'm not sure what is causing the bad request on my end. Has anyone had a similar experience or might know what the issue is? I'd appreciate it a lot, thanks.
I was getting exactly the same error at the same line.
The account specific inputs to api_client.configure_jwt_authorization_flow() are:
the private_key_filename (which is processed correctly to get a JWT token)
the oauth_base_url (which is hard coded in the example as
"account-d.docusign.com")
the integrator_key (copy-pasted from my account)
the user_id
It turns out that the user_id value should the API Username code. That fixed the problem. I found the answer here:
DocuSign Python SDK - API Exception 400 - 'Bad Request'
Hope it solves your problem too!
I have been trying to play around with creating secrets for Kubernetes cluster using the python client. I keep getting an error that says
Traceback (most recent call last):
File "create_secrets.py", line 19, in <module>
api_response = v1.create_namespaced_secret(namespace, body)
File "/usr/local/lib/python3.6/site-packages/kubernetes/client/apis/core_v1_api.py", line 7271, in create_namespaced_secret
(data) = self.create_namespaced_secret_with_http_info(namespace, body, **kwargs)
File "/usr/local/lib/python3.6/site-packages/kubernetes/client/apis/core_v1_api.py", line 7361, in create_namespaced_secret_with_http_info
collection_formats=collection_formats)
File "/usr/local/lib/python3.6/site-packages/kubernetes/client/api_client.py", line 335, in call_api
_preload_content, _request_timeout)
File "/usr/local/lib/python3.6/site-packages/kubernetes/client/api_client.py", line 148, in __call_api
_request_timeout=_request_timeout)
File "/usr/local/lib/python3.6/site-packages/kubernetes/client/api_client.py", line 393, in request
body=body)
File "/usr/local/lib/python3.6/site-packages/kubernetes/client/rest.py", line 287, in POST
body=body)
File "/usr/local/lib/python3.6/site-packages/kubernetes/client/rest.py", line 240, in request
raise ApiException(http_resp=r)
kubernetes.client.rest.ApiException: (400)
Reason: Bad Request
HTTP response headers: HTTPHeaderDict({'Content-Type': 'application/json', 'Date': 'Mon, 16 Oct 2017 04:17:35 GMT', 'Content-Length': '234'})
HTTP response body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"none in version \"v1\" cannot be handled as a Secret: no kind \"none\" is registered for version \"v1\"","reason":"BadRequest","code":400}
This is my code that I am trying to execute to create a secret.
from __future__ import print_function
import time
import kubernetes.client
from pprint import pprint
from kubernetes import client, config
config.load_kube_config()
v1 = client.CoreV1Api()
namespace = 'kube-system'
metadata = {'name': 'pk-test-tls', 'namespace': 'kube-system'}
data= {'tls.crt': '###BASE64 encoded crt###', 'tls.key': '###BASE64 encoded Key###'}
api_version = 'v1'
kind = 'none'
body = kubernetes.client.V1Secret(api_version, data , kind, metadata,
type='kubernetes.io/tls')
api_response = v1.create_namespaced_secret(namespace, body)
pprint(api_response)
What am I missing here?
Almost everything that you have written is alright but pay attention to the message received from kube-apiserver:
HTTP response body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"none in version "v1" cannot be handled as a Secret: no kind "none" is registered for version "v1"","reason":"BadRequest","code":400}
Especially no kind "none". Is it just typo or do you have something on your mind here?
You have list of kinds here https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#types-kinds
If you change kind to "Secret" then everything will be working fine.
I have a Python Cloud Dataflow job that works fine on smaller subsets, but seems to be failing for no obvious reasons on the complete dataset.
The only error I get in the Dataflow interface is the standard error message:
A work item was attempted 4 times without success. Each time the worker eventually lost contact with the service.
Analysing the Stackdriver logs only shows this error:
Exception in worker loop: Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/dataflow_worker/batchworker.py", line 736, in run deferred_exception_details=deferred_exception_details) File "/usr/local/lib/python2.7/dist-packages/dataflow_worker/batchworker.py", line 590, in do_work exception_details=exception_details) File "/usr/local/lib/python2.7/dist-packages/apache_beam/utils/retry.py", line 167, in wrapper return fun(*args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/dataflow_worker/batchworker.py", line 454, in report_completion_status exception_details=exception_details) File "/usr/local/lib/python2.7/dist-packages/dataflow_worker/batchworker.py", line 266, in report_status work_executor=self._work_executor) File "/usr/local/lib/python2.7/dist-packages/dataflow_worker/workerapiclient.py", line 364, in report_status response = self._client.projects_jobs_workItems.ReportStatus(request) File "/usr/local/lib/python2.7/dist-packages/apache_beam/internal/clients/dataflow/dataflow_v1b3_client.py", line 210, in ReportStatus config, request, global_params=global_params) File "/usr/local/lib/python2.7/dist-packages/apitools/base/py/base_api.py", line 723, in _RunMethod return self.ProcessHttpResponse(method_config, http_response, request) File "/usr/local/lib/python2.7/dist-packages/apitools/base/py/base_api.py", line 729, in ProcessHttpResponse self.__ProcessHttpResponse(method_config, http_response, request)) File "/usr/local/lib/python2.7/dist-packages/apitools/base/py/base_api.py", line 599, in __ProcessHttpResponse http_response.request_url, method_config, request) HttpError: HttpError accessing https://dataflow.googleapis.com/v1b3/projects//jobs/2017-05-03_03_33_40-3860129055041750274/workItems:reportStatus?alt=json>: response: <{'status': '400', 'content-length': '360', 'x-xss-protection': '1; mode=block', 'x-content-type-options': 'nosniff', 'transfer-encoding': 'chunked', 'vary': 'Origin, X-Origin, Referer', 'server': 'ESF', '-content-encoding': 'gzip', 'cache-control': 'private', 'date': 'Wed, 03 May 2017 16:46:11 GMT', 'x-frame-options': 'SAMEORIGIN', 'content-type': 'application/json; charset=UTF-8'}>, content <{ "error": { "code": 400, "message": "(2a7b20b33659c46e): Failed to publish the result of the work update. Causes: (2a7b20b33659c523): Failed to update work status. Causes: (8a8b13f5c3a944ba): Failed to update work status., (8a8b13f5c3a945d9): Work \"4047499437681669251\" not leased (or the lease was lost).", "status": "INVALID_ARGUMENT" } } >
I assume this Failed to update work status error is related to the Cloud Runner? But since I didn't find any information on this error online, I was wondering if somebody else encountered it and does have a better explanation?
I am using Google Cloud Dataflow SDK for Python 0.5.5.
One major cause of lease expirations is related to memory pressure on the VM. You may try running your job on machines with higher memory. Particularly, a highmem machine type should do the trick.
For more info on machine types, please check out the GCE Documentation
The next Dataflow release (2.0.0) should be able to handle these cases better.
I'm trying to make a Boto (version 2.38) S3 connection to my Openstack (Juno) cloud and list the containers (buckets).
This is with Python 2.7 and swift3 middleware (https://github.com/stackforge/swift3).
I'm using the admin account, which is also the account I made them with, so it shouldn't be a permissions issue.
I can make an ec2 connection and list all the images just fine.
I can't get an individual bucket either. The one attempted to be retrieved has 1 object in at that I can see in the Openstack dashboard and also list with the python-swiftclient.
Any suggestions would be greatly appreciated!
import boto
import boto.s3.connection
from boto.s3.connection import OrdinaryCallingFormat
import logging
logging.basicConfig(filename="boto.log", level=logging.DEBUG)
A_KEY = '<access_key>'
S_KEY = '<secret_key>'
s3_conn = boto.connect_s3(
aws_access_key_id=A_KEY,
aws_secret_access_key=S_KEY,
port=8080,
host='<host>',
is_secure=False,
path='/swift/v1',
validate_certs=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat())
s3_conn.get_all_buckets()
###############################################
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/macleanal/Development/cl2/lib/python2.7/site-packages/boto/s3/connection.py", line 445, in get_all_buckets
xml.sax.parseString(body, h)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/xml/sax/__init__.py", line 49, in parseString
parser.parse(inpsrc)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/xml/sax/expatreader.py", line 110, in parse
xmlreader.IncrementalParser.parse(self, source)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/xml/sax/xmlreader.py", line 125, in parse
self.close()
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/xml/sax/expatreader.py", line 225, in close
self.feed("", isFinal = 1)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/xml/sax/expatreader.py", line 217, in feed
self._err_handler.fatalError(exc)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/xml/sax/handler.py", line 38, in fatalError
raise exception
xml.sax._exceptions.SAXParseException: <unknown>:1:0: no element found
#################################################
b = s3_conn.get_bucket('Test')
#################################################
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/macleanal/Development/cl2/lib/python2.7/site-packages/boto/s3/connection.py", line 502, in get_bucket
return self.head_bucket(bucket_name, headers=headers)
File "/Users/macleanal/Development/cl2/lib/python2.7/site-packages/boto/s3/connection.py", line 549, in head_bucket
response.status, response.reason, body)
boto.exception.S3ResponseError: S3ResponseError: 204 No Content
And the log:
Thu, 01 Oct 2015 13:43:48 GMT
/swift/v1/
DEBUG:boto:Signature:
AWS <access_key>
DEBUG:boto:Final headers: {'Date': 'Wed, 30 Sep 2015 20:46:39 GMT', 'Content-Length': '0', 'Authorization': u'AWS <access_key>', 'User-Agent': 'Boto/2.38.0 Python/2.7.10 Darwin/14.5.0'}
DEBUG:boto:Response headers: [('date', 'Wed, 30 Sep 2015 20:45:32 GMT'), ('content-type', 'text/plain; charset=utf-8'), ('server', 'Apache/2.4.7 (Ubuntu)')]
/swift/v1/Test/
DEBUG:boto:Signature:
AWS <access_key>
DEBUG:boto:Final headers: {'Date': 'Thu, 01 Oct 2015 13:43:48 GMT', 'Content-Length': '0', 'Authorization': u'AWS <access_key>', 'User-Agent': 'Boto/2.38.0 Python/2.7.10 Darwin/14.5.0'}
DEBUG:boto:Response headers: [('x-container-object-count', '1'), ('server', 'Apache/2.4.7 (Ubuntu)'), ('x-container-bytes-used-actual', '4096'), ('x-container-bytes-used', '1652'), ('x-container-read', '.r:*'), ('date', 'Thu, 01 Oct 2015 13:42:39 GMT'), ('content-type', 'text/plain; charset=utf-8')]
By redeploying our dev cloud with Swift object storage this is no longer a problem. Ceph for object storage was not compatible enough with the Boto S3 API.