ModuleNotFoundError: No module named 'common.config' oanda api python - python

I am trying to connect to Oanda REST API using juypter notebook with the following code:
#!/usr/bin/env python
import sys
import select
import argparse
import common.config
from .account import Account
def main():
"""
Create an API context, and use it to fetch an Account state and then
continually poll for changes to it.
The configuration for the context and Account to fetch is parsed from the
config file provided as an argument.
"""
parser = argparse.ArgumentParser()
#
# The config object is initialized by the argument parser, and contains
# the REST APID host, port, accountID, etc.
#
common.config.add_argument(parser)
parser.add_argument(
"--poll-interval",
type=int,
default=5,
help="The number of seconds between polls for Account changes"
)
args = parser.parse_args()
account_id = args.config.active_account
#
# The v20 config object creates the v20.Context for us based on the
# contents of the config file.
#
api = args.config.create_context()
#
# Fetch the details of the Account found in the config file
#
response = api.account.get(account_id)
#
# Extract the Account representation from the response and use
# it to create an Account wrapper
#
account = Account(
response.get("account", "200")
)
def dump():
account.dump()
print("Press <ENTER> to see current state for Account {}".format(
account.details.id
))
dump()
while True:
i, _, _ = select.select([sys.stdin], [], [], args.poll_interval)
if i:
sys.stdin.readline()
dump()
#
# Poll for all changes to the account since the last
# Account Transaction ID that was seen
#
response = api.account.changes(
account_id,
sinceTransactionID=account.details.lastTransactionID
)
account.apply_changes(
response.get(
"changes",
"200"
)
)
account.apply_state(
response.get(
"state",
"200"
)
)
account.details.lastTransactionID = response.get(
"lastTransactionID",
"200"
)
if __name__ == "__main__":
main()
It is showing this error:
ModuleNotFoundError Traceback (most recent call last)
in ----> 1 import common.view
2 from position.view import print_positions_map 3 from order.view
import print_orders_map 4 from trade.view import print_trades_map 5
ModuleNotFoundError: No module named 'common.view'

I added the 2 line on the top of the code then run it correctly.I think it's because of error path.
import sys
sys.path.append('/Users/apple/Documents/code/PythonX86/OandaAPI/example/v20-python-samples/src')

Related

Argo - submit workflow from python with input parameter file

I basically want to run this command: argo submit -n argo workflows/workflow.yaml -f params.json through the official python SDK.
This example covers how to submit a workflow manifest, but I don't know where to add the input parameter file.
import os
from pprint import pprint
import yaml
from pathlib import Path
import argo_workflows
from argo_workflows.api import workflow_service_api
from argo_workflows.model.io_argoproj_workflow_v1alpha1_workflow_create_request import \
IoArgoprojWorkflowV1alpha1WorkflowCreateRequest
configuration = argo_workflows.Configuration(host="https://localhost:2746")
configuration.verify_ssl = False
with open("workflows/workflow.yaml", "r") as f:
manifest = yaml.safe_load(f)
api_client = argo_workflows.ApiClient(configuration)
api_instance = workflow_service_api.WorkflowServiceApi(api_client)
api_response = api_instance.create_workflow(
namespace="argo",
body=IoArgoprojWorkflowV1alpha1WorkflowCreateRequest(workflow=manifest, _check_type=False),
_check_return_type=False)
pprint(api_response)
Where to pass in the params.json file?
I found this snippet in the docs of WorkflowServiceApi.md (which was apparently too big to render as markdown):
import time
import argo_workflows
from argo_workflows.api import workflow_service_api
from argo_workflows.model.grpc_gateway_runtime_error import GrpcGatewayRuntimeError
from argo_workflows.model.io_argoproj_workflow_v1alpha1_workflow_submit_request import IoArgoprojWorkflowV1alpha1WorkflowSubmitRequest
from argo_workflows.model.io_argoproj_workflow_v1alpha1_workflow import IoArgoprojWorkflowV1alpha1Workflow
from pprint import pprint
# Defining the host is optional and defaults to http://localhost:2746
# See configuration.py for a list of all supported configuration parameters.
configuration = argo_workflows.Configuration(
host = "http://localhost:2746"
)
# Enter a context with an instance of the API client
with argo_workflows.ApiClient() as api_client:
# Create an instance of the API class
api_instance = workflow_service_api.WorkflowServiceApi(api_client)
namespace = "namespace_example" # str |
body = IoArgoprojWorkflowV1alpha1WorkflowSubmitRequest(
namespace="namespace_example",
resource_kind="resource_kind_example",
resource_name="resource_name_example",
submit_options=IoArgoprojWorkflowV1alpha1SubmitOpts(
annotations="annotations_example",
dry_run=True,
entry_point="entry_point_example",
generate_name="generate_name_example",
labels="labels_example",
name="name_example",
owner_reference=OwnerReference(
api_version="api_version_example",
block_owner_deletion=True,
controller=True,
kind="kind_example",
name="name_example",
uid="uid_example",
),
parameter_file="parameter_file_example",
parameters=[
"parameters_example",
],
pod_priority_class_name="pod_priority_class_name_example",
priority=1,
server_dry_run=True,
service_account="service_account_example",
),
) # IoArgoprojWorkflowV1alpha1WorkflowSubmitRequest |
# example passing only required values which don't have defaults set
try:
api_response = api_instance.submit_workflow(namespace, body)
pprint(api_response)
except argo_workflows.ApiException as e:
print("Exception when calling WorkflowServiceApi->submit_workflow: %s\n" % e)
Have you tried using a IoArgoprojWorkflowV1alpha1WorkflowSubmitRequest? Looks like it has submit_options of type IoArgoprojWorkflowV1alpha1SubmitOpts which has a parameter_file param.

pysmb from linux to Windows, Unable to connect to shared device

Trying to connect to an smb share via pysmb and getting error...
smb.smb_structs.OperationFailure: Failed to list on \\\\H021BSBD20\\shared_folder: Unable to connect to shared device
The code I am using looks like...
from smb.SMBConnection import SMBConnection
import json
import pprint
import warnings
pp = pprint.PrettyPrinter(indent=4)
PROJECT_HOME = "/path/to/my/project/"
# load configs
CONF = json.load(open(f"{PROJECT_HOME}/configs/configs.json"))
pp.pprint(CONF)
# list all files in storage smb dir
#https://pysmb.readthedocs.io/en/latest/api/smb_SMBConnection.html#smb.SMBConnection.SMBConnection.listPath
IS_DIRECT_TCP = False
CNXN_PORT = 139 if not IS_DIRECT_TCP else 445
LOCAL_IP = "172.18.4.69"
REMOTE_NAME = "H021BSBD20" # exact name shown as Device Name in System Settings
SERVICE_NAME = "\\\\H021BSBD20\\shared_folder"
REMOTE_IP = "172.18.7.102"
try:
conn = SMBConnection(CONF['smb_creds']['username'], CONF['smb_creds']['password'],
my_name=LOCAL_IP, remote_name=REMOTE_NAME,
use_ntlm_v2=True,
is_direct_tcp=IS_DIRECT_TCP)
conn.connect(REMOTE_IP, CNXN_PORT)
except Exception:
warnings.warn("\n\nFailed to initially connect, attempting again with param use_ntlm_v2=False\n\n")
conn = SMBConnection(CONF['smb_creds']['username'], CONF['smb_creds']['password'],
my_name=LOCAL_IP, remote_name=REMOTE_NAME,
use_ntlm_v2=False,
is_direct_tcp=IS_DIRECT_TCP)
conn.connect(REMOTE_IP, CNXN_PORT)
files = conn.listPath(f'{SERVICE_NAME}', '\\')
pp.pprint(files)
Using smbclient on my machine, I can successfully connect to the share by doing...
[root#airflowetl etl]# smbclient -U my_user \\\\H021BSBD20\\shared_folder
The amount of backslashes I use in the python code is so that I can create the same string that works when using this smbclient (have tried with less backslashes in the code and that has not helped).
Note that the user that I am using the access the shared folder in the python code and with smbclient is not able to access / log on to the actual machine that the share is hosted on (they are only allowed to access that particular shared folder as shown above).
Does anyone know what could be happening here? Any other debugging steps that could be done?
After asking on the github repo Issues section (https://github.com/miketeo/pysmb/issues/169), I was able to fix the problem. It was just due to the arg I was using for the conn.listPath() servicename param.
When looking closer at the docs for that function (https://pysmb.readthedocs.io/en/latest/api/smb_SMBConnection.html), I saw...
service_name (string/unicode) – the name of the shared folder for the path
Originally, I was only looking at the function signature, which said service_name, so I assumed it would be the same as with the smbclient command-line tool (which I have been entering the servicename param as \\\\devicename\\sharename (unlike with pysmb which we can see from the docstring wants just the share as the service_name)).
So rather than
files = conn.listPath("\\\\H021BSBD20\\shared_folder", '\\')
I do
files = conn.listPath("shared_folder", '\\')
The full refactored snippet is shown below, just for reference.
import argparse
import json
import os
import pprint
import socket
import sys
import traceback
import warnings
from smb.SMBConnection import SMBConnection
def parseArguments():
# Create argument parser
parser = argparse.ArgumentParser()
# Positional mandatory arguments
parser.add_argument("project_home", help="project home path", type=str)
parser.add_argument("device_name", help="device (eg. NetBIOS) name in configs of share to process", type=str)
# Optional arguments
# parser.add_argument("-dfd", "--data_file_dir",
# help="path to data files dir to be pushed to sink, else source columns based on form_type",
# type=str, default=None)
# Parse arguments
args = parser.parse_args()
return args
args = parseArguments()
for a in args.__dict__:
print(str(a) + ": " + str(args.__dict__[a]))
pp = pprint.PrettyPrinter(indent=4)
PROJECT_HOME = args.project_home
REMOTE_NAME = args.device_name
# load configs
CONF = json.load(open(f"{PROJECT_HOME}/configs/configs.json"))
CREDS = json.load(open(f"{PROJECT_HOME}/configs/creds.json"))
pp.pprint(CONF)
SMB_CONFS = next(info for info in CONF["smb_server_configs"] if info["device_name"] == args.device_name)
print("\nUsing details for device:")
pp.pprint(SMB_CONFS)
# list all files in storage smb dir
#https://pysmb.readthedocs.io/en/latest/api/smb_SMBConnection.html#smb.SMBConnection.SMBConnection.listPath
IS_DIRECT_TCP = False
CNXN_PORT = 139 if IS_DIRECT_TCP is False else 445
LOCAL_IP = socket.gethostname() #"172.18.4.69"
REMOTE_NAME = SMB_CONFS["device_name"]
SHARE_FOLDER = SMB_CONFS["share_folder"]
REMOTE_IP = socket.gethostbyname(REMOTE_NAME) # "172.18.7.102"
print(LOCAL_IP)
print(REMOTE_NAME)
try:
conn = SMBConnection(CREDS['smb_creds']['username'], CREDS['smb_creds']['password'],
my_name=LOCAL_IP, remote_name=REMOTE_NAME,
use_ntlm_v2=False,
is_direct_tcp=IS_DIRECT_TCP)
conn.connect(REMOTE_IP, CNXN_PORT)
except Exception:
traceback.print_exc()
warnings.warn("\n\nFailed to initially connect, attempting again with param use_ntlm_v2=True\n\n")
conn = SMBConnection(CREDS['smb_creds']['username'], CREDS['smb_creds']['password'],
my_name=LOCAL_IP, remote_name=REMOTE_NAME,
use_ntlm_v2=True,
is_direct_tcp=IS_DIRECT_TCP)
conn.connect(REMOTE_IP, CNXN_PORT)
files = conn.listPath(SHARE_FOLDER, '\\')
if len(files) > 0:
print("Found listed files")
for f in files:
print(f.filename)
else:
print("No files to list, this likely indicates a problem. Exiting...")
exit(255)

Passing in 'date' as a runtime argument in Google Dataflow Template

I'm currently trying to generate a Google Dataflow custom template, that will call an API when run, and write the results to a BigQuery table.
However the issue I'm encountering is that the API requires a date parameter 'YYYY-MM-DD' to be passed in for it to work.
Unfortunately it seems that when constructing a template Dataflow requires that you use ValueProvider (as described here) for any variables that are relative to when the job is being run (i.e. today's date). Otherwise it'll just carry on using the same date that was generated when the template was originally created. (i.e. with dt.date.today() etc - h/t to this post)
Therefore with the code that I've got, is there any way to generate the template so that it will utilise today's date correctly as an argument at runtime, rather than just using the same static date indefinitely - or as is currently the case - just not converting to a template at all.
from __future__ import print_function, absolute_import
import argparse
import logging
import sys
import apache_beam as beam
from apache_beam.io.gcp.internal.clients import bigquery
from apache_beam.metrics.metric import Metrics
from apache_beam.options.pipeline_options import PipelineOptions, GoogleCloudOptions, StandardOptions, SetupOptions
from apache_beam.options.value_provider import ValueProvider
import datetime as dt
from datetime import timedelta, date
import time
import re
logging.getLogger().setLevel(logging.INFO)
class GetAPI():
def __init__(self, data={}, date=None):
self.num_api_errors = Metrics.counter(self.__class__, 'num_api_errors')
self.data = data
self.date = date
def get_job(self):
import requests
endpoint = f'https://www.rankranger.com/api/v2/?rank_stats&key={self.data.api_key}&date={self.date}'\
f'&campaign_id={self.data.campaign}&se_id={self.data.se}&domain={self.data.domain}&output=json'
logging.info("Endpoint: {}".format(str(endpoint)))
try:
res = requests.get(endpoint)
if res.status_code == 200:
# logging.info("Reponse: {}".format(str(res.text)))
json_data = res.json()
## Store the API response
if 'result' in json_data:
response = json_data.get('result')
return response
except Exception as e:
self.num_api_errors.inc()
logging.error(f'Exception: {e}')
logging.error(f'Extract error on "%s"', 'Rank API')
def format_dates(api):
api['date'] = dt.datetime.strptime(api['date'], "%m/%d/%Y").strftime("%Y-%m-%d")
return api
# Class to pass in date generated at runtime to template
class UserOptions(PipelineOptions):
#classmethod
def _add_argparse_args(cls, parser):
## Special runtime argument e.g. date
parser.add_value_provider_argument('--date',
type=str,
default=(dt.date.today()).strftime("%Y-%m-%d"),
help='Run date in YYYY-MM-DD format.')
def run(argv=None):
"""
Main entry point; defines the static arguments to be passed in.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--api_key',
type=str,
default=API_KEY,
help='API key for Rank API.')
parser.add_argument('--campaign',
type=str,
default=CAMPAIGN,
help='Campaign ID for Rank API')
parser.add_argument('--se',
type=str,
default=SE,
help='Search Engine ID for Rank API')
parser.add_argument('--domain',
type=str,
default=DOMAIN,
help='Domain for Rank API')
parser.add_argument('--dataset',
type=str,
default=DATASET,
help='BigQuery Dataset to write tables to. Must already exist.')
parser.add_argument('--table_name',
type=str,
default=TABLE_NAME,
help='The BigQuery table name. Should not already exist.')
parser.add_argument('--project',
type=str,
default=PROJECT,
help='Your GCS project.')
parser.add_argument('--runner',
type=str,
default="DataflowRunner",
help='Type of DataFlow runner.')
args, pipeline_args = parser.parse_known_args(argv)
# Create and set your PipelineOptions.
options = PipelineOptions(pipeline_args)
user_options = options.view_as(UserOptions)
pipeline = beam.Pipeline(options=options)
# Gets data from Rank Ranger API
api = (
pipeline
| 'create' >> beam.Create(GetAPI(data=args, date=user_options.date).get_job())
| 'format dates' >> beam.Map(format_dates)
)
# Write to bigquery based on specified schema
BQ = (api | "WriteToBigQuery" >> beam.io.WriteToBigQuery(args.table_name, args.dataset, SCHEMA))
pipeline.run()
if __name__ == '__main__':
run()
As you can see from the error message, rather than passing in a neatly formatted 'YYYY-MM-DD' parameter, it's instead passing in the full ValueProvider object which is stopping the API call from working and returning the NoneType error.
(Apache) C:\Users\user.name\Documents\Alchemy\Dataflow\production_pipeline\templates>python main.py --runner DataflowRunner --project <PROJECT> --staging_location gs://<STORAGE-BUCKET>/staging --temp_location gs://<STORAGE-BUCKET>/temp --template_location gs://<STORAGE-BUCKET>/template/<TEMPLATE> --region europe-west2
INFO:root:Endpoint: https://www.rankranger.com/api/v2/?rank_stats&key=<API_KEY>&date=RuntimeValueProvider(option: date, type: str, default_value: '2020-08-25')&campaign_id=<CAMPAIGN>&se_id=<SE>&domain=<DOMAIN>&output=json
Traceback (most recent call last):
File "main.py", line 267, in <module>
run()
File "main.py", line 257, in run
| 'format dates' >> beam.Map(format_dates)
File "C:\Users\user.name\Anaconda3\envs\Apache\lib\site-packages\apache_beam\transforms\core.py", line 2590, in __init__
self.values = tuple(values)
TypeError: 'NoneType' object is not iterable
Any help would be hugely appreciated!
You are correct in your diagnosis. You should consider migrating to Flex Templates which solve this (and other) issues and provide much more flexibility.

How to fix AttributeError: 'module' object has no attribute 'Client' when running python in Google Cloud Interactive Shell

I'm trying to run a python script that simulates traffic sensors sending in data in real time to PubSub on my Google Cloud Shell. I'm getting this error
Traceback (most recent call last):
File "./send_sensor_data.py", line 87, in <module>
psclient = pubsub.Client()
AttributeError: 'module' object has no attribute 'Client'
Tried running google.cloud.pubsub.__file__, no duplicates exist.
I've been searching everywhere and the popular consensus was to install the pubsub package into a virtual environment which I've tried to no avail.
What I've tried so far:
Set VM to clean state
Uninstalled and reinstalled all gcloud components
Updated all gcloud components to the latest version
uninsalled and reinstalled python pubsub library
Installed pubsub inside a virtualenv
Tried from a different project
Tried from a different GCP account
This is my script:
import time
import gzip
import logging
import argparse
import datetime
from google.cloud import pubsub
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
TOPIC = 'sandiego'
INPUT = 'sensor_obs2008.csv.gz'
def publish(topic, events):
numobs = len(events)
if numobs > 0:
with topic.batch() as batch:
logging.info('Publishing {} events from {}'.
format(numobs, get_timestamp(events[0])))
for event_data in events:
batch.publish(event_data)
def get_timestamp(line):
# look at first field of row
timestamp = line.split(',')[0]
return datetime.datetime.strptime(timestamp, TIME_FORMAT)
def simulate(topic, ifp, firstObsTime, programStart, speedFactor):
# sleep computation
def compute_sleep_secs(obs_time):
time_elapsed = (datetime.datetime.utcnow() - programStart).seconds
sim_time_elapsed = (obs_time - firstObsTime).seconds / speedFactor
to_sleep_secs = sim_time_elapsed - time_elapsed
return to_sleep_secs
topublish = list()
for line in ifp:
event_data = line # entire line of input CSV is the message
obs_time = get_timestamp(line) # from first column
# how much time should we sleep?
if compute_sleep_secs(obs_time) > 1:
# notify the accumulated topublish
publish(topic, topublish) # notify accumulated messages
topublish = list() # empty out list
# recompute sleep, since notification takes a while
to_sleep_secs = compute_sleep_secs(obs_time)
if to_sleep_secs > 0:
logging.info('Sleeping {} seconds'.format(to_sleep_secs))
time.sleep(to_sleep_secs)
topublish.append(event_data)
# left-over records; notify again
publish(topic, topublish)
def peek_timestamp(ifp):
# peek ahead to next line, get timestamp and go back
pos = ifp.tell()
line = ifp.readline()
ifp.seek(pos)
return get_timestamp(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Send sensor data to Cloud Pub/Sub in small groups, simulating real-time behavior')
parser.add_argument('--speedFactor', help='Example: 60 implies 1 hour of data sent to Cloud Pub/Sub in 1 minute', required=True, type=float)
args = parser.parse_args()
# create Pub/Sub notification topic
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
psclient = pubsub.Client()
topic = psclient.topic(TOPIC)
if not topic.exists():
logging.info('Creating pub/sub topic {}'.format(TOPIC))
topic.create()
else:
logging.info('Reusing pub/sub topic {}'.format(TOPIC))
# notify about each line in the input file
programStartTime = datetime.datetime.utcnow()
with gzip.open(INPUT, 'rb') as ifp:
header = ifp.readline() # skip header
firstObsTime = peek_timestamp(ifp)
logging.info('Sending sensor data from {}'.format(firstObsTime))
simulate(topic, ifp, firstObsTime, programStartTime, args.speedFactor)
The pubsub.Client class exists until the 0.27.0 version of the pubsub python package. So I just created a virtual environment and installed the 0.27.0 version of pubsub into it.
Here are the commands:
virtualenv venv
source venv/bin/activate
pip install google-cloud-pubsub==0.27.0
Solution for Google Cloud Platforms is:
Modify the send_senor_data.py file as follows:
a. Comment the original import statement for pub_sub and use _v1 version
#from google.cloud import pubsub
from google.cloud import pubsub_v1
b. Find this code and replace it as follows:
#publisher = pubsub.PublisherClient()
publisher = pubsub_v1.PublisherClient()
Then execute your send_sensor_data.py as follows:
./send_sensor_data.py --speedFactor=60 --project=YOUR-PROJECT-NAME
There's no pubsub.Client class. You
need to choose a PublisherClient or SubscriberClient
see https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/pubsub/google/cloud/pubsub.py

Trying to use Python TestRail API on Mac - ImportError : No module named Conf_Reader

I am getting the following import error on Mac:
ImportError: No module named Conf_Reader
Here are the few initial lines of my Python code:
import dotenv
import os
import testrail
import Conf_Reader
#setup the testrail client and connect to the testrail instance
def get_testrail_client():
testrail_file = os.path.join(os.path.dirname(__file__),'testrail.env')
testrail_url = Conf_Reader.get_value(testrail_file,'TESTRAIL_URL')
client = testrail.APIClient(testrail_url)
..
..
..
So far tried with pip and not able to find any sources for doing its installation.
I have the same problem on Mac.
To avoid using other independence you can skip using env. but pass as variable:
# create a credential.py
TESTRAIL_URL='https://testrail.com/testrail'
TESTRAIL_USER='xxxxx'
TESTRAIL_PASSWORD = 'xxxxx'
# on your update_testrail.py
from credential import TESTRAIL_URL,
TESTRAIL_USER,
TESTRAIL_PASSWORD
testrail_url = TESTRAIL_URL
client = testrail.APIClient(testrail_url)
# Get and set the TestRail User and Password
client.user = TESTRAIL_USER
client.password = TESTRAIL_PASSWORD
They should have linked to https://bangladroid.wordpress.com/2016/08/20/create-separate-credential-files-for-selenium-python/
where it explains you make your own ‘Conf_Reader.py’ file is as below:
"""
A simple conf reader.
For now, we just use dotenv and return a key.
"""
import dotenv,os
def get_value(conf,key):
# type: (object, object) -> object
"Return the value in conf for a given key"
value = None
try:
dotenv.load_dotenv(conf)
value = os.environ[key]
except Exception,e:
print 'Exception in get_value'
print 'file: ',conf
print 'key: ',key
return value

Categories

Resources