I basically want to run this command: argo submit -n argo workflows/workflow.yaml -f params.json through the official python SDK.
This example covers how to submit a workflow manifest, but I don't know where to add the input parameter file.
import os
from pprint import pprint
import yaml
from pathlib import Path
import argo_workflows
from argo_workflows.api import workflow_service_api
from argo_workflows.model.io_argoproj_workflow_v1alpha1_workflow_create_request import \
IoArgoprojWorkflowV1alpha1WorkflowCreateRequest
configuration = argo_workflows.Configuration(host="https://localhost:2746")
configuration.verify_ssl = False
with open("workflows/workflow.yaml", "r") as f:
manifest = yaml.safe_load(f)
api_client = argo_workflows.ApiClient(configuration)
api_instance = workflow_service_api.WorkflowServiceApi(api_client)
api_response = api_instance.create_workflow(
namespace="argo",
body=IoArgoprojWorkflowV1alpha1WorkflowCreateRequest(workflow=manifest, _check_type=False),
_check_return_type=False)
pprint(api_response)
Where to pass in the params.json file?
I found this snippet in the docs of WorkflowServiceApi.md (which was apparently too big to render as markdown):
import time
import argo_workflows
from argo_workflows.api import workflow_service_api
from argo_workflows.model.grpc_gateway_runtime_error import GrpcGatewayRuntimeError
from argo_workflows.model.io_argoproj_workflow_v1alpha1_workflow_submit_request import IoArgoprojWorkflowV1alpha1WorkflowSubmitRequest
from argo_workflows.model.io_argoproj_workflow_v1alpha1_workflow import IoArgoprojWorkflowV1alpha1Workflow
from pprint import pprint
# Defining the host is optional and defaults to http://localhost:2746
# See configuration.py for a list of all supported configuration parameters.
configuration = argo_workflows.Configuration(
host = "http://localhost:2746"
)
# Enter a context with an instance of the API client
with argo_workflows.ApiClient() as api_client:
# Create an instance of the API class
api_instance = workflow_service_api.WorkflowServiceApi(api_client)
namespace = "namespace_example" # str |
body = IoArgoprojWorkflowV1alpha1WorkflowSubmitRequest(
namespace="namespace_example",
resource_kind="resource_kind_example",
resource_name="resource_name_example",
submit_options=IoArgoprojWorkflowV1alpha1SubmitOpts(
annotations="annotations_example",
dry_run=True,
entry_point="entry_point_example",
generate_name="generate_name_example",
labels="labels_example",
name="name_example",
owner_reference=OwnerReference(
api_version="api_version_example",
block_owner_deletion=True,
controller=True,
kind="kind_example",
name="name_example",
uid="uid_example",
),
parameter_file="parameter_file_example",
parameters=[
"parameters_example",
],
pod_priority_class_name="pod_priority_class_name_example",
priority=1,
server_dry_run=True,
service_account="service_account_example",
),
) # IoArgoprojWorkflowV1alpha1WorkflowSubmitRequest |
# example passing only required values which don't have defaults set
try:
api_response = api_instance.submit_workflow(namespace, body)
pprint(api_response)
except argo_workflows.ApiException as e:
print("Exception when calling WorkflowServiceApi->submit_workflow: %s\n" % e)
Have you tried using a IoArgoprojWorkflowV1alpha1WorkflowSubmitRequest? Looks like it has submit_options of type IoArgoprojWorkflowV1alpha1SubmitOpts which has a parameter_file param.
Related
A follow up to this question. I am using Kedro v0.18.2. I am trying use the TemplateConfig so I have created a globals.yml under conf/base, which looks like this:
paths:
base_path: s3://my_project
datasets:
pdf: base.PDFDataSet
png: pillow.ImageDataSet
csv: pandas.CSVDataSet
excel: pandas.ExcelDataSet
data_folders:
raw: 01_raw
intermediate: 02_intermediate
primary: 03_primary
feature: 04_feature
model_input: 05_model_input
models: 06_models
model_output: 07_model_output
reporting: 08_reporting
I have followed the documentation and I have uncommented some of the settings.py as such:
"""Project settings. There is no need to edit this file unless you want to change values
from the Kedro defaults. For further information, including these default values, see
https://kedro.readthedocs.io/en/stable/kedro_project_setup/settings.html."""
# Instantiated project hooks.
# from certifai.hooks import ProjectHooks
# HOOKS = (ProjectHooks(),)
# Installed plugins for which to disable hook auto-registration.
# DISABLE_HOOKS_FOR_PLUGINS = ("kedro-viz",)
# Class that manages storing KedroSession data.
# from kedro.framework.session.store import ShelveStore
# SESSION_STORE_CLASS = ShelveStore
# Keyword arguments to pass to the `SESSION_STORE_CLASS` constructor.
# SESSION_STORE_ARGS = {
# "path": "./sessions"
# }
# Class that manages Kedro's library components.
# from kedro.framework.context import KedroContext
# CONTEXT_CLASS = KedroContext
# Directory that holds configuration.
# CONF_SOURCE = "conf"
# Class that manages how configuration is loaded.
from kedro.config import TemplatedConfigLoader
CONFIG_LOADER_CLASS = TemplatedConfigLoader
CONFIG_LOADER_ARGS = {
"globals_pattern": "*globals.yml",
}
# Class that manages the Data Catalog.
# from kedro.io import DataCatalog
# DATA_CATALOG_CLASS = DataCatalog
catalog.yml looks like this:
_label_images: &label_images
type: PartitionedDataSet
path: ${paths.base_path}/data/${data_folders.raw}/label_images
dataset: ${datasets.png}
label_images_png:
<<: *label_images
filename_suffix: .png
label_images_jpg:
<<: *label_images
filename_suffix: .jpg
label_images_jpeg:
<<: *label_images
filename_suffix: .jpeg
label_images_pdf:
<<: *label_images
dataset: base.PDFDataSet
filename_suffix: .pdf
my_project_label_extracts:
type: PartitionedDataSet
path: s3://my_project/data/01_raw/label_extracts
dataset: pandas.ExcelDataSet
My testing script looks like this:
from kedro.config import ConfigLoader
from kedro.framework.project import settings
from pathlib import Path
from kedro.extras.datasets import pillow
project_path = Path(__file__).parent.parent.parent
conf_path = str(project_path / settings.CONF_SOURCE)
conf_loader = ConfigLoader(conf_source=conf_path, env="base")
conf_catalog = conf_loader.get("catalog*", "catalog*/**")
images_dataset = pillow.ImageDataSet.from_config("label_images_png", conf_catalog["label_images_png"])
images_loader = images_dataset.load()
images_loader["00337180800086"]().show()
With hard-coded values inside the catalog.yml, the script runs and outputs an image, However, with the template config it does not work. Am I missing something?
P.S. Apologies if the question is duplicated.
The first bug I've noticed is in your catalog for the entry:
_label_images: &label_images
type: PartitionedDataSet
path: ${paths.base_path}/data/${data_folders.raw}/label_images
dataset: ${datasets.png}
You've missed the type key for the dataset. The correct entry should be:
_label_images: &label_images
type: PartitionedDataSet
path: ${paths.base_path}/data/${data_folders.raw}/label_images
dataset:
type: ${datasets.png}
If you now run the script with the TemplatedConfigLoader you should hopefully not receive the mentioned errors anymore:
from kedro.config import ConfigLoader, TemplatedConfigLoader
from kedro.framework.project import settings
from pathlib import Path
from kedro.extras.datasets import pillow
project_path = Path(__file__).parent.parent.parent
conf_path = str(project_path / settings.CONF_SOURCE)
conf_loader = TemplatedConfigLoader(conf_source=conf_path, env="base", globals_pattern="*globals.yml")
conf_catalog = conf_loader.get("catalog*", "catalog*/**")
images_dataset = pillow.ImageDataSet.from_config("label_images_png", conf_catalog["label_images_png"])
images_loader = images_dataset.load()
images_loader["00337180800086"]().show()
For ease of communication you might want to join the Kedro Discord channel so we can respond to you in real time: https://discord.gg/akJDeVaxnB
I am trying to connect to Oanda REST API using juypter notebook with the following code:
#!/usr/bin/env python
import sys
import select
import argparse
import common.config
from .account import Account
def main():
"""
Create an API context, and use it to fetch an Account state and then
continually poll for changes to it.
The configuration for the context and Account to fetch is parsed from the
config file provided as an argument.
"""
parser = argparse.ArgumentParser()
#
# The config object is initialized by the argument parser, and contains
# the REST APID host, port, accountID, etc.
#
common.config.add_argument(parser)
parser.add_argument(
"--poll-interval",
type=int,
default=5,
help="The number of seconds between polls for Account changes"
)
args = parser.parse_args()
account_id = args.config.active_account
#
# The v20 config object creates the v20.Context for us based on the
# contents of the config file.
#
api = args.config.create_context()
#
# Fetch the details of the Account found in the config file
#
response = api.account.get(account_id)
#
# Extract the Account representation from the response and use
# it to create an Account wrapper
#
account = Account(
response.get("account", "200")
)
def dump():
account.dump()
print("Press <ENTER> to see current state for Account {}".format(
account.details.id
))
dump()
while True:
i, _, _ = select.select([sys.stdin], [], [], args.poll_interval)
if i:
sys.stdin.readline()
dump()
#
# Poll for all changes to the account since the last
# Account Transaction ID that was seen
#
response = api.account.changes(
account_id,
sinceTransactionID=account.details.lastTransactionID
)
account.apply_changes(
response.get(
"changes",
"200"
)
)
account.apply_state(
response.get(
"state",
"200"
)
)
account.details.lastTransactionID = response.get(
"lastTransactionID",
"200"
)
if __name__ == "__main__":
main()
It is showing this error:
ModuleNotFoundError Traceback (most recent call last)
in ----> 1 import common.view
2 from position.view import print_positions_map 3 from order.view
import print_orders_map 4 from trade.view import print_trades_map 5
ModuleNotFoundError: No module named 'common.view'
I added the 2 line on the top of the code then run it correctly.I think it's because of error path.
import sys
sys.path.append('/Users/apple/Documents/code/PythonX86/OandaAPI/example/v20-python-samples/src')
Trying to connect to an smb share via pysmb and getting error...
smb.smb_structs.OperationFailure: Failed to list on \\\\H021BSBD20\\shared_folder: Unable to connect to shared device
The code I am using looks like...
from smb.SMBConnection import SMBConnection
import json
import pprint
import warnings
pp = pprint.PrettyPrinter(indent=4)
PROJECT_HOME = "/path/to/my/project/"
# load configs
CONF = json.load(open(f"{PROJECT_HOME}/configs/configs.json"))
pp.pprint(CONF)
# list all files in storage smb dir
#https://pysmb.readthedocs.io/en/latest/api/smb_SMBConnection.html#smb.SMBConnection.SMBConnection.listPath
IS_DIRECT_TCP = False
CNXN_PORT = 139 if not IS_DIRECT_TCP else 445
LOCAL_IP = "172.18.4.69"
REMOTE_NAME = "H021BSBD20" # exact name shown as Device Name in System Settings
SERVICE_NAME = "\\\\H021BSBD20\\shared_folder"
REMOTE_IP = "172.18.7.102"
try:
conn = SMBConnection(CONF['smb_creds']['username'], CONF['smb_creds']['password'],
my_name=LOCAL_IP, remote_name=REMOTE_NAME,
use_ntlm_v2=True,
is_direct_tcp=IS_DIRECT_TCP)
conn.connect(REMOTE_IP, CNXN_PORT)
except Exception:
warnings.warn("\n\nFailed to initially connect, attempting again with param use_ntlm_v2=False\n\n")
conn = SMBConnection(CONF['smb_creds']['username'], CONF['smb_creds']['password'],
my_name=LOCAL_IP, remote_name=REMOTE_NAME,
use_ntlm_v2=False,
is_direct_tcp=IS_DIRECT_TCP)
conn.connect(REMOTE_IP, CNXN_PORT)
files = conn.listPath(f'{SERVICE_NAME}', '\\')
pp.pprint(files)
Using smbclient on my machine, I can successfully connect to the share by doing...
[root#airflowetl etl]# smbclient -U my_user \\\\H021BSBD20\\shared_folder
The amount of backslashes I use in the python code is so that I can create the same string that works when using this smbclient (have tried with less backslashes in the code and that has not helped).
Note that the user that I am using the access the shared folder in the python code and with smbclient is not able to access / log on to the actual machine that the share is hosted on (they are only allowed to access that particular shared folder as shown above).
Does anyone know what could be happening here? Any other debugging steps that could be done?
After asking on the github repo Issues section (https://github.com/miketeo/pysmb/issues/169), I was able to fix the problem. It was just due to the arg I was using for the conn.listPath() servicename param.
When looking closer at the docs for that function (https://pysmb.readthedocs.io/en/latest/api/smb_SMBConnection.html), I saw...
service_name (string/unicode) – the name of the shared folder for the path
Originally, I was only looking at the function signature, which said service_name, so I assumed it would be the same as with the smbclient command-line tool (which I have been entering the servicename param as \\\\devicename\\sharename (unlike with pysmb which we can see from the docstring wants just the share as the service_name)).
So rather than
files = conn.listPath("\\\\H021BSBD20\\shared_folder", '\\')
I do
files = conn.listPath("shared_folder", '\\')
The full refactored snippet is shown below, just for reference.
import argparse
import json
import os
import pprint
import socket
import sys
import traceback
import warnings
from smb.SMBConnection import SMBConnection
def parseArguments():
# Create argument parser
parser = argparse.ArgumentParser()
# Positional mandatory arguments
parser.add_argument("project_home", help="project home path", type=str)
parser.add_argument("device_name", help="device (eg. NetBIOS) name in configs of share to process", type=str)
# Optional arguments
# parser.add_argument("-dfd", "--data_file_dir",
# help="path to data files dir to be pushed to sink, else source columns based on form_type",
# type=str, default=None)
# Parse arguments
args = parser.parse_args()
return args
args = parseArguments()
for a in args.__dict__:
print(str(a) + ": " + str(args.__dict__[a]))
pp = pprint.PrettyPrinter(indent=4)
PROJECT_HOME = args.project_home
REMOTE_NAME = args.device_name
# load configs
CONF = json.load(open(f"{PROJECT_HOME}/configs/configs.json"))
CREDS = json.load(open(f"{PROJECT_HOME}/configs/creds.json"))
pp.pprint(CONF)
SMB_CONFS = next(info for info in CONF["smb_server_configs"] if info["device_name"] == args.device_name)
print("\nUsing details for device:")
pp.pprint(SMB_CONFS)
# list all files in storage smb dir
#https://pysmb.readthedocs.io/en/latest/api/smb_SMBConnection.html#smb.SMBConnection.SMBConnection.listPath
IS_DIRECT_TCP = False
CNXN_PORT = 139 if IS_DIRECT_TCP is False else 445
LOCAL_IP = socket.gethostname() #"172.18.4.69"
REMOTE_NAME = SMB_CONFS["device_name"]
SHARE_FOLDER = SMB_CONFS["share_folder"]
REMOTE_IP = socket.gethostbyname(REMOTE_NAME) # "172.18.7.102"
print(LOCAL_IP)
print(REMOTE_NAME)
try:
conn = SMBConnection(CREDS['smb_creds']['username'], CREDS['smb_creds']['password'],
my_name=LOCAL_IP, remote_name=REMOTE_NAME,
use_ntlm_v2=False,
is_direct_tcp=IS_DIRECT_TCP)
conn.connect(REMOTE_IP, CNXN_PORT)
except Exception:
traceback.print_exc()
warnings.warn("\n\nFailed to initially connect, attempting again with param use_ntlm_v2=True\n\n")
conn = SMBConnection(CREDS['smb_creds']['username'], CREDS['smb_creds']['password'],
my_name=LOCAL_IP, remote_name=REMOTE_NAME,
use_ntlm_v2=True,
is_direct_tcp=IS_DIRECT_TCP)
conn.connect(REMOTE_IP, CNXN_PORT)
files = conn.listPath(SHARE_FOLDER, '\\')
if len(files) > 0:
print("Found listed files")
for f in files:
print(f.filename)
else:
print("No files to list, this likely indicates a problem. Exiting...")
exit(255)
I am trying to use the Stava API in a Flask project. I have seen the following stackoverflow
and installed swagger_client
swagger-codegen generate -i https://developers.strava.com/swagger/swagger.json -l python -o ./StravaPythonClient
as per their instructions. However when i run the app i still get import swagger_client
ModuleNotFoundError: No module named 'swagger_client'
My code is here
import swagger_client
from swagger_client.rest import ApiException
from pprint import pprint
# Configure OAuth2 access token for authorization: strava_oauth
swagger_client.configuration.access_token = 'fe931c21b503a46b61b1000000000000000000000'
# create an instance of the API class
api_instance = swagger_client.StreamsApi()
id = 2284367626 # Long | The identifier of the activity.
#keys = # array[String] | Desired stream types.
keyByType = true # Boolean | Must be true. (default to true)
try:
# Get Activity Streams
api_response = api_instance.getActivityStreams(id, keys, keyByType)
pprint(api_response)
except ApiException as e:
print("Exception when calling StreamsApi->getActivityStreams: %s\n" % e)
not sure what packages i should be installing to get this working now.
First install swagger-codegen and check that it's working, this example is for linux. Easier with mac where you can use homebrew.
wget https://repo1.maven.org/maven2/io/swagger/swagger-codegen-cli/2.4.13/swagger-codegen-cli-2.4.13.jar -O swagger-codegen-cli.jar
java -jar swagger-codegen-cli.jar help
After that go in your project and generate the swagger-client. The code below tells that it's for python and should be stored in a folder within the project called generated
java -jar swagger-codegen-cli.jar generate -i https://developers.strava.com/swagger/swagger.json -l python -o generated
Go into the generated folder and install the requirements
cd generated && python setup.py install --user && cd ..
Change your import statements to refer to the generated folder.
from generated import swagger_client
from generated.swagger_client.rest import ApiException
from pprint import pprint
# Configure OAuth2 access token for authorization: strava_oauth
swagger_client.Configuration.access_token = 'fe931c21b503a46b61b1000000000000000000000'
# create an instance of the API class
api_instance = swagger_client.StreamsApi()
id = 2284367626 # Long | The identifier of the activity.
#keys = # array[String] | Desired stream types.
keyByType = true # Boolean | Must be true. (default to true)
try:
# Get Activity Streams
api_response = api_instance.getActivityStreams(id, keys, keyByType)
pprint(api_response)
except ApiException as e:
print("Exception when calling StreamsApi->getActivityStreams: %s\n" % e)
Now you can run the file. Ps when you set the access token: configuration needs to be written with upper case C.
I am getting the following import error on Mac:
ImportError: No module named Conf_Reader
Here are the few initial lines of my Python code:
import dotenv
import os
import testrail
import Conf_Reader
#setup the testrail client and connect to the testrail instance
def get_testrail_client():
testrail_file = os.path.join(os.path.dirname(__file__),'testrail.env')
testrail_url = Conf_Reader.get_value(testrail_file,'TESTRAIL_URL')
client = testrail.APIClient(testrail_url)
..
..
..
So far tried with pip and not able to find any sources for doing its installation.
I have the same problem on Mac.
To avoid using other independence you can skip using env. but pass as variable:
# create a credential.py
TESTRAIL_URL='https://testrail.com/testrail'
TESTRAIL_USER='xxxxx'
TESTRAIL_PASSWORD = 'xxxxx'
# on your update_testrail.py
from credential import TESTRAIL_URL,
TESTRAIL_USER,
TESTRAIL_PASSWORD
testrail_url = TESTRAIL_URL
client = testrail.APIClient(testrail_url)
# Get and set the TestRail User and Password
client.user = TESTRAIL_USER
client.password = TESTRAIL_PASSWORD
They should have linked to https://bangladroid.wordpress.com/2016/08/20/create-separate-credential-files-for-selenium-python/
where it explains you make your own ‘Conf_Reader.py’ file is as below:
"""
A simple conf reader.
For now, we just use dotenv and return a key.
"""
import dotenv,os
def get_value(conf,key):
# type: (object, object) -> object
"Return the value in conf for a given key"
value = None
try:
dotenv.load_dotenv(conf)
value = os.environ[key]
except Exception,e:
print 'Exception in get_value'
print 'file: ',conf
print 'key: ',key
return value