Related
Im trying to change play back speed in azure amp.
The following is the url generated from azure apis: https://ampdemo.azureedge.net/?url=https://testingmedia-usea.streaming.media.azure.net/bbd51d47-cc1a-4515-bac8-4053040f8c58/ignite.ism/manifest(format=mpd-time-cmaf,filter=filter1)&heuristicprofile=lowlatency
if you check that link there is no playback speed.
I saw the below link but dont know where to apply in python code
https://amp.azure.net/libs/amp/latest/docs/index.html#amp.player.options.playbackspeed
below is my code:
from dotenv import load_dotenv
from azure.identity import DefaultAzureCredential
from azure.mgmt.media import AzureMediaServices
from azure.storage.blob import BlobServiceClient
from azure.mgmt.media.models import (
Asset,
Transform,
TransformOutput,
BuiltInStandardEncoderPreset,
Job,
JobInputAsset,
JobOutputAsset,
OnErrorType,
Priority,
StreamingLocator,
AssetFilter,
PresentationTimeRange,
)
import os
import random
#Timer for checking job progress
import time
import requests
#Get environment variables
load_dotenv()
default_credential = DefaultAzureCredential(exclude_shared_token_cache_credential=True)
# Get the environment variables SUBSCRIPTIONID, RESOURCEGROUP and ACCOUNTNAME
subscription_id = os.getenv('SUBSCRIPTIONID')
resource_group = os.getenv('RESOURCEGROUP')
account_name = os.getenv('ACCOUNTNAME')
# The file you want to upload. For this example, put the file in the same folder as this script.
# The file ignite.mp4 has been provided for you.
source_file = "https://testingmedia.blob.core.windows.net/data/ignite.mp4"
#url = requests.get(source_file)
# This is a random string that will be added to the naming of things so that you don't have to keep doing this during testing
uniqueness = "streamAssetFilters-" + str(random.randint(0,9999))
# Change this to your specific streaming endpoint name if not using "default"
streaming_endpoint_name = "default"
# Set the attributes of the input Asset using the random number
in_asset_name = 'inputassetName' + uniqueness
in_alternate_id = 'inputALTid' + uniqueness
in_description = 'inputdescription' + uniqueness
# Create an Asset object
# The asset_id will be used for the container parameter for the storage SDK after the asset is created by the AMS client.
in_asset = Asset(alternate_id=in_alternate_id, description=in_description)
# Set the attributes of the output Asset using the random number
out_asset_name = 'outputassetName' + uniqueness
out_alternate_id = 'outputALTid' + uniqueness
out_description = 'outputdescription' + uniqueness
# Create an output asset object
out_asset = Asset(alternate_id=out_alternate_id, description=out_description)
# The AMS Client
print("Creating AMS Client")
client = AzureMediaServices(default_credential, subscription_id)
# Create an input Asset
print(f"Creating input asset {in_asset_name}")
input_asset = client.assets.create_or_update(resource_group, account_name, in_asset_name, in_asset)
# An AMS asset is a container with a specific id that has "asset-" prepended to the GUID.
# So, you need to create the asset id to identify it as the container
# where Storage is to upload the video (as a block blob)
in_container = 'asset-' + input_asset.asset_id
# create an output Asset
print(f"Creating output asset {out_asset_name}")
output_asset = client.assets.create_or_update(resource_group, account_name, out_asset_name, out_asset)
### Use the Storage SDK to upload the video ###
print(f"Uploading the file {source_file}")
blob_service_client = BlobServiceClient.from_connection_string(os.getenv('STORAGEACCOUNTCONNECTION'))
blob_client = blob_service_client.get_blob_client(in_container, "ignite.mp4")
# working_dir = os.getcwd() + "\Media"
# print(working_dir)
# print(f"Current working directory: {working_dir}")
# upload_file_path = os.path.join(working_dir, source_file)
# print(upload_file_path,"####")
# WARNING: Depending on where you are launching the sample from, the path here could be off, and not include the BasicEncoding folder.
# Adjust the path as needed depending on how you are launching this python sample file.
# Upload the video to storage as a block blob
#with open(url, "rb") as data:
blob_client.upload_blob_from_url(source_file)
transform_name = 'ContentAwareEncodingAssetFilters'
# Create a new Standard encoding Transform for Built-in Copy Codec
print(f"Creating Encoding transform named: {transform_name}")
# For this snippet, we are using 'BuiltInStandardEncoderPreset'
transform_output = TransformOutput(
preset=BuiltInStandardEncoderPreset(
preset_name="ContentAwareEncoding"
),
# What should we do with the job if there is an error?
on_error=OnErrorType.STOP_PROCESSING_JOB,
# What is the relative priority of this job to others? Normal, high or low?
relative_priority=Priority.NORMAL
)
print("Creating encoding transform...")
# Adding transform details
my_transform = Transform()
my_transform.description="Transform with Asset filters"
my_transform.outputs = [transform_output]
print(f"Creating transform {transform_name}")
transform = client.transforms.create_or_update(
resource_group_name=resource_group,
account_name=account_name,
transform_name=transform_name,
parameters=my_transform)
print(f"{transform_name} created (or updated if it existed already). ")
job_name = 'ContentAwareEncodingAssetFilters'+ uniqueness
print(f"Creating custom encoding job {job_name}")
files = (source_file)
# Create Job Input and Ouput Assets
input = JobInputAsset(asset_name=in_asset_name)
outputs = JobOutputAsset(asset_name=out_asset_name)
# Create the job object and then create transform job
the_job = Job(input=input, outputs=[outputs])
job: Job = client.jobs.create(resource_group, account_name, transform_name, job_name, parameters=the_job)
# Check job state
job_state = client.jobs.get(resource_group, account_name, transform_name, job_name)
# First check
print("First job check")
print(job_state.state)
# Check the state of the job every 10 seconds. Adjust time_in_seconds = <how often you want to check for job state>
def countdown(t):
while t:
mins, secs = divmod(t, 60)
timer = '{:02d}:{:02d}'.format(mins, secs)
print(timer, end="\r")
time.sleep(1)
t -= 1
job_current = client.jobs.get(resource_group, account_name, transform_name, job_name)
if(job_current.state == "Finished"):
print(job_current.state)
# TODO: Download the output file using blob storage SDK
return
if(job_current.state == "Error"):
print(job_current.state)
# TODO: Provide Error details from Job through API
return
else:
print(job_current.state)
countdown(int(time_in_seconds))
time_in_seconds = 10
countdown(int(time_in_seconds))
print(f"Creating locator for streaming...")
# Publish the output asset for streaming via HLS or DASH
locator_name = f"locator-{uniqueness}"
# Create the Asset filters
print("Creating an asset filter...")
asset_filter_name = 'filter1'
# Create the asset filter
asset_filter = client.asset_filters.create_or_update(
resource_group_name=resource_group,
account_name=account_name,
asset_name=out_asset_name,
filter_name=asset_filter_name,
parameters=AssetFilter(
# In this sample, we are going to filter the manifest by the time range of the presentation using the default timescale.
# You can adjust these settings for your own needs. Not that you can also control output tracks, and quality levels with a filter.
tracks=[],
# start_timestamp = 100000000 and end_timestamp = 300000000 using the default timescale will generate
# a play-list that contains fragments from between 10 seconds and 30 seconds of the VoD presentation.
# If a fragment straddles the boundary, the entire fragment will be included in the manifest.
presentation_time_range=PresentationTimeRange(start_timestamp=100000000, end_timestamp=300000000)
)
)
if asset_filter:
print(f"The asset filter ({asset_filter_name}) was successfully created.")
print()
else:
raise ValueError("There was an issue creating the asset filter.")
if output_asset:
streaming_locator = StreamingLocator(asset_name=out_asset_name, streaming_policy_name="Predefined_DownloadAndClearStreaming",filters=list(asset_filter_name.split(" ")))
locator = client.streaming_locators.create(
resource_group_name=resource_group,
account_name=account_name,
streaming_locator_name=locator_name,
parameters=streaming_locator
)
if locator:
print(f"The streaming locator {locator_name} was successfully created!")
else:
raise Exception(f"Error while creating streaming locator {locator_name}")
if locator.name:
hls_format = "format=m3u8-cmaf"
dash_format = "format=mpd-time-cmaf"
# Get the default streaming endpoint on the account
streaming_endpoint = client.streaming_endpoints.get(
resource_group_name=resource_group,
account_name=account_name,
streaming_endpoint_name=streaming_endpoint_name
)
if streaming_endpoint.resource_state != "Running":
print(f"Streaming endpoint is stopped. Starting endpoint named {streaming_endpoint_name}")
client.streaming_endpoints.begin_start(resource_group, account_name, streaming_endpoint_name)
basename_tup = os.path.splitext(source_file) # Extracting the filename and extension
path_extension = basename_tup[1] # Setting extension of the path
manifest_name = os.path.basename(source_file).replace(path_extension, "")
print(f"The manifest name is: {manifest_name}")
manifest_base = f"https://{streaming_endpoint.host_name}/{locator.streaming_locator_id}/{manifest_name}.ism/manifest"
hls_manifest = ""
if asset_filter_name is None:
hls_manifest = f'{manifest_base}({hls_format})'
else:
hls_manifest = f'{manifest_base}({hls_format},filter={asset_filter_name})'
print(f"The HLS (MP4) manifest URL is: {hls_manifest}")
print("Open the following URL to playback the live stream in an HLS compliant player (HLS.js, Shaka, ExoPlayer) or directly in an iOS device")
print({hls_manifest})
print()
dash_manifest = ""
if asset_filter_name is None:
dash_manifest = f'{manifest_base}({dash_format})'
else:
dash_manifest = f'{manifest_base}({dash_format},filter={asset_filter_name})'
print(f"The DASH manifest URL is: {dash_manifest}")
print("Open the following URL to playback the live stream from the LiveOutput in the Azure Media Player")
print(f"https://ampdemo.azureedge.net/?url={dash_manifest}&heuristicprofile=lowlatency")
print()
else:
raise ValueError("Locator was not created or Locator name is undefined.")
There's an example on https://amp.azure.net/libs/amp/latest/samples/dynamic_playback_speed.html for how to use playback speed. This is also available at https://github.com/Azure-Samples/azure-media-player-samples/blob/master/html/dynamic_playback_speed.html.
I'm writing test automation for API in BDD behave. I need a switcher between environments. Is any possible way to change one value in one place without adding this value to every functions? Example:
I've tried to do it by adding value to every function but its makes all project very complicated
headers = {
'Content-Type': 'application/json',
'country': 'fi'
}
what i what to switch only country value in headers e.g from 'fi' to 'es'
and then all function should switch themselves to es environment, e.g
def sending_post_request(endpoint, user):
url = fi_api_endpoints.api_endpoints_list.get(endpoint)
personalId = {'personalId': user}
json_post = requests.post(url,
headers=headers,
data=json.dumps(personalId)
)
endpoint_message = json_post.text
server_status = json_post.status_code
def phone_number(phone_number_status):
if phone_number_status == 'wrong':
cursor = functions_concerning_SQL_conection.choosen_db('fi_sql_identity')
cursor.execute("SELECT TOP 1 PersonalId from Registrations where PhoneNumber is NULL")
result = cursor.fetchone()
user_with_no_phone_number = result[0]
return user_with_no_phone_number
else:
cursor = functions_concerning_SQL_conection.choosen_db('fi_sql_identity')
cursor.execute("SELECT TOP 1 PersonalId from Registrations where PhoneNumber is not NULL")
result = cursor.fetchone()
user_with_phone_number = result[0]
return user_with_phone_number
and when i will change from 'fi' to 'es' in headers i want:
fi_sql_identity change to es_sql_identity
url = fi_api_endpoints.api_endpoints_list.get(endpoint) change to
url = es_api_endpoints.api_endpoints_list.get(endpoint)
thx and please help
With respect to your original question, a solution for this case is closure:
def f(x):
def long_calculation(y):
return x * y
return long_calculation
# create different functions without dispatching multiple times
g = f(val_1)
h = f(val_2)
g(val_3)
h(val_3)
Well, the problem is why do you hardcode everything? With the update you can simplify your function as:
def phone_number(phone_number_status, db_name='fi_sql_identity'):
cursor = functions_concerning_SQL_conection.choosen_db(db_name)
if phone_number_status == 'wrong':
sql = "SELECT TOP 1 PersonalId from Registrations where PhoneNumber is NULL"
else:
sql = "SELECT TOP 1 PersonalId from Registrations where PhoneNumber is not NULL"
cursor.execute(sql)
result = cursor.fetchone()
return result[0]
Also please don't write like:
# WRONG
fi_db_conn.send_data()
But use a parameter:
region = 'fi' # or "es"
db_conn = initialize_conn(region)
db_conn.send_data()
And use a config file to store your endpoints with respect to your region, e.g. consider YAML:
# config.yml
es:
db_name: es_sql_identity
fi:
db_name: fi_sql_identity
Then use them in Python:
import yaml
with open('config.yml') as f:
config = yaml.safe_load(f)
region = 'fi'
db_name = config[region]['db_name'] # "fi_sql_identity"
# status = ...
result = phone_number(status, db_name)
See additional useful link for using YAML.
First, provide an encapsulation how to access the resources of a region by providing this encapsulation with a region parameter. It may also be a good idea to provide this functionality as a behave fixture.
CASE 1: region parameter needs to vary between features / scenarios
For example, this means that SCENARIO_1 needs region="fi" and SCENARIO_2 needs region="es".
Use fixture and fixture-tag with region parameter.
In this case you need to write own scenarios for each region (BAD TEST REUSE)
or use a ScenarioOutline as template to let behave generate the tests for you (by using a fixture-tag with a region parameter value for example).
CASE 2: region parameter is constant for all features / scenarios (during test-run)
You can support multiple test-runs with different region parameters by using a userdata parameter.
Look at behave userdata concept.
This allows you to run behave -D region=fi ... and behave -D region=es ...
This case provides a better reuse of testsuite, meaning a large part of the testsuite is the common testsuite that is applied to all regions.
HINT: Your code examples are too specific ("fi" based) which is a BAD-SMELL.
Background:
So thanks to the help of the user Theodox I was able to figure out how to create nodes in the node editor with name prefixs based on the joint you load into the selection field.
However: I want to take it a step further and make it so not only are the nodes created with joint name prefixes: but they will also connect the translates of the nodes created via connectAttr.
The problem?
I currently lack the knowledge to make this work and I cant find anything online, so any assistance would be most appreciated
Code:
I've tried lines of code like:
cmds.connectAttr( sel[0] + '.rotate', sel[1] + '.rotate' )
or
cmds.connectAttr( n=text_value +'_firstGuy', n=text_value +'_secondGuy' )
I know I can create seperate textfields and buttons to load the nodes and connect them thatway, but with the shortcut I'm coding, all the nodes I've created would be too many to load so it would be easier if I could just create the nodes with their connections, I'll post the code below for anyone willing to take a crack at it:
import maya.cmds as cmds
if cmds.window(window, exists =True):
cmds.deleteUI(window)
window = cmds.window(title='DS Node Connector demo')
column = cmds.columnLayout(adj=True)
def set_textfield(_):
sel = cmds.ls(selection=True)
cmds.textField(sld_textFld, edit=True, text=sel[0])
def nodebuilder(_):
text_value = cmds.textField(sld_textFld, q = True, text=True)
if text_value:
print "created:", cmds.createNode( 'transform', n=text_value +'_firstGuy' )
print "created:", cmds.createNode( 'transform', n=text_value +'_secondGuy' )
# Connect the translation of two nodes together
print "connected:", cmds.connectAttr (sel[0] +'.t', sel[1] + '.t')
#print "connected:", cmds.connectAttr( '_firstGuy.t', '_secondGuy.translate' )
# Connect the rotation of one node to the override colour
# of a second node.
#print "connected:", cmds.connectAttr( '_firstGuy.rotate', '_secondGuy.overrideColor' )
else:
cmds.warning("select an object and add it to the window first!")
sld_textFld = cmds.textField('sld_surfaceTextHJ', width =240)
load_button = cmds.button( label='Load Helper Joint', c = set_textfield)
node_button = cmds.button( label='Make Node', c = nodebuilder)
cmds.showWindow(window)
My expected result:
Upon hitting "make node" after loading a joint after hitting "load helper joint" that once "_firstGuy" and "_secondGuy" are created with the name prefix of the joint, that their translates will be connected. It helps to have the node editor open to test this.
Okay, you want to connect the translate attributes of the two new created nodes.
Usually connecting attributes works like this:
connectAttr(<attributeA>, <attributeB>)
Where attributeA is something like "NodeA.translate". So what you need is the name of your first node and the attribute name, in your case:
nodeNameA = text_value + "_firstGuy"
nodeNameB = text_value + "_secondGuy"
The attribute is the well known "translate", so the full attribute name would be:
attributeNameA = nodeNameA + ".translate"
attriubteNameB = nodeNameB + ".translate"
And the full command is now:
connectAttr(attributeNameA, attributeNameB)
The only problem here is that Maya automatically renames objects if there is already one with the same name. So a more save way it to use the created name this way:
firstGuyNode = cmds.createNode( 'transform', n=text_value +'_firstGuy' )
So I am trying my best to navigate my way through the Facebook API. I need to crate a script that will download my business' campaign information daily as a csv file so I can use another script to upload the information to our database easily.
I finally have code that works to print the information to the log, but I am reaching the user request limit because I have to call get_insights() for every single campaign individually. I am wondering if anyone knows how to help me make it so I don't have to call the facebook API as often.
What I would like to do if find a field where I can get the daily spend so I don't have to call the API in every iteration of my for campaign loop, but I cannot for the life of me find a way to do so.
#Import all the facebook mumbo jumbo
from facebookads.api import FacebookAdsApi
from facebookads.adobjects.adset import AdSet
from facebookads.adobjects.campaign import Campaign
from facebookads.adobjects.adsinsights import AdsInsights
from facebookads.adobjects.adreportrun import AdReportRun
from facebookads.adobjects.adaccount import AdAccount
from facebookads.adobjects.business import Business
import time
#Set the login info
my_app_id = '****'
my_app_secret = '****'
my_access_token = '****'
#Start the connection to the facebook API
FacebookAdsApi.init(my_app_id, my_app_secret, my_access_token)
business = Business('****')
#Get all ad accounts on the business account
accounts = business.get_owned_ad_accounts(fields=[AdAccount.Field.id])
#iterate through all accounts in the business account
for account in accounts:
tempaccount = AdAccount(account[AdAccount.Field.id])
#get all campaigns in the adaccount
campaigns = tempaccount.get_campaigns(fields=[Campaign.Field.name,Campaign.Field])
#iterate trough all the campaigns in the adaccount
for campaign in campaigns:
print(campaign[Campaign.Field.name])
#get the insight info (spend) from each campaign
campaignsights = campaign.get_insights(params={'date_preset':'yesterday'},fields=[AdsInsights.Field.spend])
print (campaignsights)
It took a while of digging through the API and guessing but I got it! Here is my final script:
# This program downloads all relevent Facebook traffic info as a csv file
# This program requires info from the Facebook Ads API: https://github.com/facebook/facebook-python-ads-sdk
# Import all the facebook mumbo jumbo
from facebookads.api import FacebookAdsApi
from facebookads.adobjects.adsinsights import AdsInsights
from facebookads.adobjects.adaccount import AdAccount
from facebookads.adobjects.business import Business
# Import th csv writer and the date/time function
import datetime
import csv
# Set the info to get connected to the API. Do NOT share this info
my_app_id = '****'
my_app_secret = '****'
my_access_token = '****'
# Start the connection to the facebook API
FacebookAdsApi.init(my_app_id, my_app_secret, my_access_token)
# Create a business object for the business account
business = Business('****')
# Get yesterday's date for the filename, and the csv data
yesterdaybad = datetime.datetime.now() - datetime.timedelta(days=1)
yesterdayslash = yesterdaybad.strftime('%m/%d/%Y')
yesterdayhyphen = yesterdaybad.strftime('%m-%d-%Y')
# Define the destination filename
filename = yesterdayhyphen + '_fb.csv'
filelocation = "/cron/downloads/"+ filename
# Get all ad accounts on the business account
accounts = business.get_owned_ad_accounts(fields=[AdAccount.Field.id])
# Open or create new file
try:
csvfile = open(filelocation , 'w+', 0777)
except:
print ("Cannot open file.")
# To keep track of rows added to file
rows = 0
try:
# Create file writer
filewriter = csv.writer(csvfile, delimiter=',')
except Exception as err:
print(err)
# Iterate through the adaccounts
for account in accounts:
# Create an addaccount object from the adaccount id to make it possible to get insights
tempaccount = AdAccount(account[AdAccount.Field.id])
# Grab insight info for all ads in the adaccount
ads = tempaccount.get_insights(params={'date_preset':'yesterday',
'level':'ad'
},
fields=[AdsInsights.Field.account_id,
AdsInsights.Field.account_name,
AdsInsights.Field.ad_id,
AdsInsights.Field.ad_name,
AdsInsights.Field.adset_id,
AdsInsights.Field.adset_name,
AdsInsights.Field.campaign_id,
AdsInsights.Field.campaign_name,
AdsInsights.Field.cost_per_outbound_click,
AdsInsights.Field.outbound_clicks,
AdsInsights.Field.spend
]
);
# Iterate through all accounts in the business account
for ad in ads:
# Set default values in case the insight info is empty
date = yesterdayslash
accountid = ad[AdsInsights.Field.account_id]
accountname = ""
adid = ""
adname = ""
adsetid = ""
adsetname = ""
campaignid = ""
campaignname = ""
costperoutboundclick = ""
outboundclicks = ""
spend = ""
# Set values from insight data
if ('account_id' in ad) :
accountid = ad[AdsInsights.Field.account_id]
if ('account_name' in ad) :
accountname = ad[AdsInsights.Field.account_name]
if ('ad_id' in ad) :
adid = ad[AdsInsights.Field.ad_id]
if ('ad_name' in ad) :
adname = ad[AdsInsights.Field.ad_name]
if ('adset_id' in ad) :
adsetid = ad[AdsInsights.Field.adset_id]
if ('adset_name' in ad) :
adsetname = ad[AdsInsights.Field.adset_name]
if ('campaign_id' in ad) :
campaignid = ad[AdsInsights.Field.campaign_id]
if ('campaign_name' in ad) :
campaignname = ad[AdsInsights.Field.campaign_name]
if ('cost_per_outbound_click' in ad) : # This is stored strangely, takes a few steps to break through the layers
costperoutboundclicklist = ad[AdsInsights.Field.cost_per_outbound_click]
costperoutboundclickdict = costperoutboundclicklist[0]
costperoutboundclick = costperoutboundclickdict.get('value')
if ('outbound_clicks' in ad) : # This is stored strangely, takes a few steps to break through the layers
outboundclickslist = ad[AdsInsights.Field.outbound_clicks]
outboundclicksdict = outboundclickslist[0]
outboundclicks = outboundclicksdict.get('value')
if ('spend' in ad) :
spend = ad[AdsInsights.Field.spend]
# Write all ad info to the file, and increment the number of rows that will display
filewriter.writerow([date, accountid, accountname, adid, adname, adsetid, adsetname, campaignid, campaignname, costperoutboundclick, outboundclicks, spend])
rows += 1
csvfile.close()
# Print report
print (str(rows) + " rows added to the file " + filename)
I then have a php script that takes the csv file and uploads it to my database. The key is pulling all the insight data in one big yank. You can then break it up however you want because each ad has information about its adset, adaccount, and campaign.
Adding a couple of small functions to improve on LucyTurtle's answer as it is still susceptible to Facebook's Rate Limiting
import logging
import requests as rq
#Function to find the string between two strings or characters
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
#Function to check how close you are to the FB Rate Limit
def check_limit():
def check_limit():
check=rq.get('https://graph.facebook.com/v3.3/act_'+account_number+'/insights?access_token='+my_access_token)
call=float(find_between(check.headers['x-business-use-case-usage'],'call_count":','}'))
cpu=float(find_between(check.headers['x-business-use-case-usage'],'total_cputime":','}'))
total=float(find_between(check.headers['x-business-use-case-usage'],'total_time":',','))
usage=max(call,cpu,total)
return usage
#Check if you reached 75% of the limit, if yes then back-off for 5 minutes (put this chunk in your 'for ad is ads' loop, every 100-200 iterations)
if (check_limit()>75):
print('75% Rate Limit Reached. Cooling Time 5 Minutes.')
logging.debug('75% Rate Limit Reached. Cooling Time 5 Minutes.')
time.sleep(300)
I'd just like to say
Thank you.
As Marks Andre said - you made my day!
The FB SDK documentation is exhaustive, but it completely lacks the practical implementation examples for day-to-day-tasks like this one. Bookmark is set - page will be revisited soon.
So the only thing I can actually contribute for fellow sufferers: it seems that with the newer facebook_business SDK you can simply completely replace "facebookads" in the import statements with "facebook_business".
this script is supposed to select features within distance in two layers based on some their characteristics one feature will get a score (example: water pipes crossing naturally sensitive areas like rivers, the type of that river and its permanency matter in the scoring, so each type will be selected then used in select by lactation function to give water pipes that are within a distance its score
this is the error I get when i run these codes:
Executing: SelectLayerByLocation water_mains WITHIN_A_DISTANCE Just_selected "2.5 Meters" NEW_SELECTION
Start Time: Thu Sep 25 15:21:09 2014
ERROR 999999: Error executing function.
A column was specified that does not exist.
A column was specified that does not exist.
Failed to execute (SelectLayerByLocation).
the select layer by location is in a script that is called by another script (main script)
the main script :
def main():
try:
import arcpy
from arcpy import env
# pathing to avoid retyping
env.workspace = "C:/Users/abusheikan/Desktop/prev_files/RiskAnalysisModel"
dataPath = 'C:\\Users\\abusheikan\\Desktop\\prev_files\\RiskAnalysisModel\\ToolData2'
arcpy.env.overwriteOutput = True
import imp
##Defines INPUT variables
#some variable wont be used but are there for future use, I'm starting off as simple as possible
creekLayer = dataPath + "\\ENVIRONMENTAL\\OHN_WaterCourse.shp"
PipeLayer=dataPath + "\\SERVICES\\water_mains.shp"
nameField = 'ROW_1'
scoreField = 'ROW_SCORE1'
crossingField = 'CROSS_ROW1'
ROWfield = 'ROW_TRUE1'
diaField='INTERNAL_D'
rangeVal= 416
Field = 'WARTERCOURS'
Field2='PERMANENCY'
arcpy.MakeFeatureLayer_management(PipeLayer,"water_mains")
inFeatures = "water_mains"
#The following lists contain road classes. Format is (a, b, c,d) such that a is the creek class name,
#b is an average permencnacy of flow, c is the width, nd d is the xscore to be given .
#Lower value of c means lower criticality.
creeks = [('Vertual Flow','intermittent',10,1),
('Vertual Connector','intermittent', 10,2),
('Vertual Flow','Permanent', 10,1),
('Vertual Connector', 'Permanent', 10,2),
('Ditch','Intermittent',5,3),
('Ditch','Permanent',5,4),
('Stream','Intermittent',5,3),
('Stream','Intermittent',5,4)]
## the following isnt used yet
creeks2 = [('Vertual Flow','intermittent',10,1),
('Vertual Connector','intermittent', 10,2),
('Vertual Flow','Permanent', 10,1),
('Vertual Connector', 'Permanent', 10,2),
('Ditch','Intermittent',5,3),
('Ditch','Permanent',5,4),
('Stream','Intermittent',5,3),
('Stream','Intermittent',5,4)]
## This codeblock isnt utilized yet and will always return row_score, it is supposed to adjusts the value of ROW_SCORE
##based on whether the water main crosses a creek, by looking up the value in CROSS_ROW1 feild that is obtained later on
expression = "crossing(!CROSS_ROW1!,!ROW_SCORE1!)"
codeblock = """def crossing(crosses, row_score):
if crosses != 0:
return 5
else:
return row_score"""
except:
arcpy.AddError("Definitions failed")
arcpy.AddMessage(arcpy.GetMessages())
try:
## pathing to a funtion to be called
fpath = 'C:\\Users\\abusheikan\\Desktop\\prev_files\\RiskAnalysisModel\\Scripts\\'
## defining the function pathing we retyped anyway for debugging purpuses.
functions = imp.load_source('functions', 'C:\\Users\\abusheikan\\Desktop\\prev_files\\RiskAnalysisModel\\Scripts\\functions_creeks.py')
## check check :-p
arcpy.AddMessage("Funtions Loaded")
except:
arcpy.AddMessage("Functions not loaded")
try:
##Clear all selections, because otherwise commands will be applied only to selected features, why? I ont know pls explain where this is
## supposed to be used and where not to. THANKs!
arcpy.SelectLayerByAttribute_management(inFeatures, "CLEAR_SELECTION")
arcpy.AddMessage("Selected")
##This new field will show the road overlying the pipe. Default value of "no Creek" will be assigned.
arcpy.AddField_management(inFeatures, nameField, "TEXT")
arcpy.CalculateField_management(inFeatures, nameField, '"No Creek"')
##This field will contain a score for the highest creek class over the pipe.
## Default of 0 means no creeks
arcpy.AddField_management(inFeatures, scoreField, "SHORT")
arcpy.CalculateField_management(inFeatures, scoreField, 1)
arcpy.AddField_management(inFeatures, crossingField, "SHORT")
## arcpy.AddField_management(mainRoadLayer, ROWfield, "FLOAT",3,1)
## arcpy.CalculateField_management("t_Pavement_Line", ROWfield, expressionROW, "PYTHON_9.3", codeblockROW)
except:
#Report error
arcpy.AddError("Could not create new fields")
#Report error messages
arcpy.AddMessage(arcpy.GetMessages())
try:
## functions.roadclass_loop is a function that loops through all creek classes in
## a list, selects the water mains within a distance of each one, and assigns the
## appropriate score. Full script is in the called function.
## the following s a failed test so never mind that commented out line, it may ciome in handy so left it in there
## arcpy.MakeFeatureLayer_management(PipeLayer,
## "JUST_MADE",str(dialField) + " <= "+ str(rangeVal))
## calls creek_loop funtion() i think here is where the error is created pls check the inputs they may be where problem is! but i cant see anything wrong with them.
functions.roadclass_loop(creeks, creekLayer, Field, inFeatures, "WITHIN_A_DISTANCE",
nameField, scoreField)
arcpy.AddMessage("small pipes")
## same as b4 but with the second tuple list.
functions.roadclass_loop(creeks2, creekLayer, Field, inFeatures, "WITHIN_A_DISTANCE",
nameField, scoreField)
arcpy.AddMessage("BIG PIPES")
## functions.roadclass_loop(provincial, provincialLayer, Field3, inFeatures, "INTERSECT",
## "", crossingField)
## If the CROSS_ROW field has a nonzero value (i.e. if the water main crosses a large road)
## the road class score will be increased to 5(the maximum).
## inserts the scores into the
arcpy.CalculateField_management(inFeatures, scoreField, expression, "PYTHON_9.3", codeblock)
except:
arcpy.AddMessage("Could not run")
arcpy.AddMessage(arcpy.GetMessages())
if __name__== "__main__":
main()
the called function is:
def test():
## import arcpy
arcpy.AddMessage("This function works")
##def roadclass_loop(listOfClassTuples, sourceLayer, fieldName, targetLayer,
## outputField1, outputField2):
def roadclass_loop(listOfClassTuples, sourceLayer, fieldName, targetLayer, crossingType,
outputField1, outputField2):
import arcpy
from arcpy import env
env.workspace = "C:/data/"
##try:
for creekclass in listOfClassTuples:
(classname, Permanency, creekWidth, score) = creekclass
bufferDistance = creekWidth*0.5
try:
if crossingType == "INTERSECT":
stringBuffer = ""
else:
stringBuffer = "%s Meters" % str(bufferDistance)
except:
arcpy.AddMessage("its here")
arcpy.MakeFeatureLayer_management(sourceLayer, "Just_selected",
fieldName + "= '"+ classname + "'")
#arcpy.MakeFeatureLayer_management("Just_Selected", "JUST_SELECTED", FieldName2+" = '"+ Permanency + "'")
arcpy.SelectLayerByLocation_management(targetLayer, crossingType,
"Just_selected", stringBuffer, "NEW_SELECTION")
classname = classname.lower()
if outputField1!= "":
arcpy.CalculateField_management(targetLayer, outputField1, classname )
arcpy.CalculateField_management(targetLayer, outputField2, score )
arcpy.Delete_management("Just_selected")
arcpy.SelectLayerByAttribute_management(targetLayer, "CLEAR_SELECTION")
##except:
# arcpy.AddMessage("Function failed")
#arcpy.AddMessage(arcpy.GetMessages())
See this question on the GIS StackExchange: Points in Polygon Count: Error with arcpy.selectLayerByLocation_management . They made a mistake when calling MakeFeatureLayer_management, but the error was thrown by SelectLayerByLocation_management. You may have a similar situation.
In your case, are you confident that the feature class stored in dataPath + "\\ENVIRONMENTAL\\OHN_WaterCourse.shp" has a field called WARTERCOURS? Is there maybe a typo there? (The word WARTERCOURS caught my attention; Google says you're the first person on the Internet to use it.)
Is your listOfClassTuples, which is being fed by your creeks variable, supposed to be set of fields within your creekLayer (dataPath + "\ENVIRONMENTAL\OHN_WaterCourse.shp)?