I'm trying to import data using an API from IG. It seems to fetch historical prices with 'num_points' fine but when I try to use a date range, it returns an error.
session = requests_cache.CachedSession(cache_name='cache', backend='sqlite', expire_after=timedelta(hours=1))
class config(object):
username = "xxx"
password = "xxx"
api_key = "xxx"
acc_type = "Demo-cfd"
acc_number = "xxx"
ig_service = IGService(config.username, config.password, config.api_key)
ig_service.create_session()
epic = 'CS.D.EURUSD.MINI.IP'
resolution = 'D'
num_points = 10
response = ig_service.fetch_historical_prices_by_epic_and_num_points(epic, resolution, num_points)
# works fine
df_ask = response['prices']['ask']
print("ask prices:\n%s" % df_ask)
But if I replace the num_points parameter with a date range and change the function, I'm getting an error:
epic = 'CS.D.EURUSD.MINI.IP'
resolution = 'D'
(start_date, end_date) = ('2015-09-15', '2015-09-28')
response = ig_service.fetch_historical_prices_by_epic_and_date_range(epic, resolution, start_date, end_date)
df_ask = response['prices']['ask']
print("ask prices:\n%s" % df_ask)
Error:
File "/Users/xxx/untitled5.py", line 53, in <module>
response = ig_service.fetch_historical_prices_by_epic_and_date_range(epic, resolution, start_date, end_date)
File "/opt/anaconda3/lib/python3.9/site-packages/trading_ig/rest.py", line 1618, in fetch_historical_prices_by_epic_and_date_range
data = self.parse_response(response.text)
File "/opt/anaconda3/lib/python3.9/site-packages/trading_ig/rest.py", line 342, in parse_response
raise (Exception(response["errorCode"]))
Exception: error.malformed.date
Related
Im new to AWS Lambda so please take it easy on me :)
Im getting a lot of errors in my code and Im not sure the best way to troubleshoot other than look at the cloudwatch console and adjust things as necessary. If anyone has any tips for troubleshooting Id appreciate it!
Heres my plan for what I want to do and please let me know if this makes sense:
upload file to s3 bucket -> 2. upload triggers a lambda to run (inside this lambda is a python script that modifies the data source. The source data is a messy file) -> 3. store the output to the same s3 bucket in a separate folder - > 4. (future state) perform analysis on the new json file.
I have my s3 bucket created and I have setup the lambda to trigger when a new file is added. That part is working! I have added my python script (which works on my local drive) portion to the lambda function w/in the code section of lambda.
The errors am getting errors consist of saying that my 6 global variables (df_a1-df_aq) are not defined. If I move them out of the function then it works, however when I get to the merge portion of my code I am getting an error saying that says "cannot merge a series without a name" I gave them a name using the name= object and Im still getting this issue.
Here's my code that is in my aws lambda:
try:
import json
import boto3
import pandas as pd
import time
import io
print("All Modules are ok ...")
except Exception as e:
print("Error in Imports ")
s3_client = boto3.client('s3')
#df_a1 = pd.Series(dtype='object', name='test1')
#df_g1 = pd.Series(dtype='object', name='test2')
#df_j1 = pd.Series(dtype='object', name='test3')
#df_p1 = pd.Series(dtype='object', name='test4')
#df_r1 = pd.Series(dtype='object', name='test5')
#df_q1 = pd.Series(dtype='object', name='test6')
def Add_A1 (xyz, RC, string):
#DATA TO GRAB FROM STRING
global df_a1
IMG = boolCodeReturn(string[68:69].strip())
roa = string[71:73].strip()
#xyzName = string[71:73].strip()
#ADD RECORD TO DATAFRAME
series = pd.Series (data=[xyz, IMG, roa], index=['XYZ', 'IMG', 'Roa'])
df_a1 = df_a1.append(series, ignore_index=True)
def Add_G1 (xyz, RC, string):
global df_g1
#DATA TO GRAB FROM STRING
gcode = string[16:30].strip()
ggname = string[35:95].strip()
#ADD RECORD TO DATAFRAME
series = pd.Series (data=[xyz, gcode, ggname], index=['XYZ', 'Gcode', 'Ggname'])
df_g1 = df_g1.append(series, ignore_index=True)
def Add_J1 (xyz, RC, string):
#DATA TO GRAB FROM STRING
global df_j1
xyzName = string[56:81].strip()
#ADD RECORD TO DATAFRAME
series = pd.Series (data=[xyz, xyzName], index=['XYZ', 'XYZName'])
df_j1 = df_j1.append(series, ignore_index=True)
def Add_P01 (xyz, RC, string):
global df_p1
#DATA TO GRAB FROM STRING
giname = string[50:90].strip()
#ADD RECORD TO DATAFRAME
series = pd.Series (data=[xyz, giname], index=['XYZ', 'Giname'])
df_p1 = df_p1.append(series, ignore_index=True)
def Add_R01 (xyz, RC, string):
global df_r1
#DATA TO GRAB FROM STRING
Awperr = boolCodeReturn(string[16:17].strip())
#PPP= string[17:27].lstrip("0")
AUPO = int(string[27:40].lstrip("0"))
AUPO = AUPO / 100000
AupoED = string[40:48]
#ADD RECORD TO DATAFRAME
series = pd.Series (data=[xyz, AUPO, Awperr, AupoED], index = ['XYZ', 'AUPO', 'Awperr', 'AupoED'])
df_r1 = df_r1.append(series, ignore_index=True)
def Add_Q01 (xyz, RC, string):
global df_q1
#DATA TO GRAB FROM STRING
#PPP= string[17:27].lstrip("0")
UPPWA = int(string[27:40].lstrip("0"))
UPPWA = UPPWA / 100000
EDWAPPP = string[40:48]
#ADD RECORD TO DATAFRAME
series = pd.Series (data=[xyz, UPPWA, EDWAPPP], index = ['XYZ', 'UPPWA', 'EDWAPPPer'])
df_q1 = df_q1.append(series, ignore_index=True)
def boolCodeReturn (code):
if code == "X":
return 1
else:
return 0
def errorHandler(xyz, RC, string):
pass
def lambda_handler(event, context):
print(event)
#Get Bucket Name
bucket = event['Records'][0]['s3']['bucket']['name']
#get the file/key name
key = event['Records'][0]['s3']['object']['key']
response = s3_client.get_object(Bucket=bucket, Key=key)
print("Got Bucket! - pass")
print("Got Name! - pass ")
data = response['Body'].read().decode('utf-8')
print('reading data')
buf = io.StringIO(data)
print(buf.readline())
#data is the file uploaded
fileRow = buf.readline()
print('reading_row')
while fileRow:
currentString = fileRow
xyz = currentString[0:11].strip()
RC = currentString[12:15].strip() #this grabs the code the indicates what the data type is
#controls which function to run based on the code
switcher = {
"A1": Add_A1,
"J1": Add_J1,
"G1": Add_G1,
"P01": Add_P01,
"R01": Add_R01,
"Q01": Add_Q01
}
runfunc = switcher.get(RC, errorHandler)
runfunc (xyz, RC, currentString)
fileRow = buf.readline()
print(type(df_a1), "A1 FILE")
print(type(df_g1), 'G1 FILE')
buf.close()
##########STEP 3: JOIN THE DATA TOGETHER##########
df_merge = pd.merge(df_a1, df_g1, how="left", on="XYZ")
df_merge = pd.merge(df_merge, df_j1, how="left", on="XYZ")
df_merge = pd.merge(df_merge, df_p1, how="left", on="XYZ")
df_merge = pd.merge(df_merge, df_q1, how="left", on="XYZ")
df_merge = pd.merge(df_merge, df_r1, how="left", on="XYZ")
##########STEP 4: SAVE THE DATASET TO A JSON FILE##########
filename = 'Export-Records.json'
json_buffer = io.StringIO()
df_merge.to_json(json_buffer)
s3_client.put_object(Buket='file-etl',Key=filename, Body=json_buffer.getvalue())
t = time.localtime()
current_time = time.strftime("%H:%M:%S", t)
print("Finished processing at " + current_time)
response = {
"statusCode": 200,
'body': json.dumps("Code worked!")
}
return response
Here are some of the error messages:
[ERROR] NameError: name 'df_a1' is not defined
Traceback (most recent call last):
File "/var/task/lambda_function.py", line 145, in lambda_handler
runfunc (ndc, recordcode, currentString)
File "/var/task/lambda_function.py", line 26, in Add_A1
df_a1 = df_a1.append(series, ignore_index=True)
[ERROR] NameError: name 'df_g1' is not defined
Traceback (most recent call last):
File "/var/task/lambda_function.py", line 152, in lambda_handler
runfunc (ndc, recordcode, currentString)
File "/var/task/lambda_function.py", line 38, in Add_G1
df_g1 = df_g1.append(series, ignore_index=True)
[ERROR] ValueError: Cannot merge a Series without a name
Traceback (most recent call last):
File "/var/task/lambda_function.py", line 160, in lambda_handler
df_merge = pd.merge(df_a1, df_g1, how="left", on="NDC")
File "/opt/python/pandas/core/reshape/merge.py", line 111, in merge
op = _MergeOperation(
File "/opt/python/pandas/core/reshape/merge.py", line 645, in __init__
_left = _validate_operand(left)
File "/opt/python/pandas/core/reshape/merge.py", line 2425, in _validate_operand
raise ValueError("Cannot merge a Series without a name")
I am running all the sql scripts under the scripts path in a for loop and copying the data into #priya_stage area in snowflake and then using GET command , i am unloading data from stage area to my Unix path in csv format. But I am getting error.
Note: this same code works on my MAC but not on unix server.
import logging
import os
import snowflake.connector
from snowflake.connector import DictCursor as dict
from os import walk
try:
conn = snowflake.connector.connect(
account = 'xxx' ,
user = 'xxx' ,
password = 'xxx' ,
database = 'xxx' ,
schema = 'xxx' ,
warehouse = 'xxx' ,
role = 'xxx' ,
)
conn.cursor().execute('USE WAREHOUSE xxx')
conn.cursor().execute('USE DATABASE xxx')
conn.cursor().execute('USE SCHEMA xxx')
take = []
scripts = '/xxx/apps/xxx/xxx/scripts/snow/scripts/'
os.chdir('/xxx/apps/xxx/xxx/scripts/snow/scripts/')
for root , dirs , files in walk(scripts):
for file in files:
inbound = file[0:-4]
sql = open(file , 'r').read()
# file_number = 0
# file_number += 1
file_prefix = 'bridg_' + inbound
file_name = file_prefix
result_query = conn.cursor(dict).execute(sql)
query_id = result_query.sfqid
sql_copy_into = f'''
copy into #priya_stage/{file_name}
from (SELECT * FROM TABLE(RESULT_SCAN('{query_id}')))
DETAILED_OUTPUT = TRUE
HEADER = TRUE
SINGLE = FALSE
OVERWRITE = TRUE
max_file_size=4900000000'''
rs_copy_into = conn.cursor(dict).execute(sql_copy_into)
for row_copy in rs_copy_into:
file_name_in_stage = row_copy["FILE_NAME"]
sql_get_to_local = f"""
GET #priya_stage/{file_name_in_stage} file:///xxx/apps/xxx/xxx/inbound/zip_files/{inbound}/"""
rs_get_to_local = conn.cursor(dict).execute(sql_get_to_local)
except snowflake.connector.errors.ProgrammingError as e:
print('Error {0} ({1}): {2} ({3})'.format(e.errno , e.sqlstate , e.msg , e.sfqid))
finally:
conn.cursor().close()
conn.close()
Error
Traceback (most recent call last):
File "Generic_local.py", line 52, in <module>
rs_get_to_local = conn.cursor(dict).execute(sql_get_to_local)
File "/usr/local/lib64/python3.6/site-packages/snowflake/connector/cursor.py", line
746, in execute
sf_file_transfer_agent.execute()
File "/usr/local/lib64/python3.6/site-
packages/snowflake/connector/file_transfer_agent.py", line 379, in execute
self._transfer_accelerate_config()
File "/usr/local/lib64/python3.6/site-
packages/snowflake/connector/file_transfer_agent.py", line 671, in
_transfer_accelerate_config
self._use_accelerate_endpoint = client.transfer_accelerate_config()
File "/usr/local/lib64/python3.6/site-
packages/snowflake/connector/s3_storage_client.py", line 572, in
transfer_accelerate_config
url=url, verb="GET", retry_id=retry_id, query_parts=dict(query_parts)
File "/usr/local/lib64/python3.6/site-
packages/snowflake/connector/s3_storage_client.py", line 353, in _.
send_request_with_authentication_and_retry
verb, generate_authenticated_url_and_args_v4, retry_id
File "/usr/local/lib64/python3.6/site-
packages/snowflake/connector/storage_client.py", line 313, in
_send_request_with_retry
f"{verb} with url {url} failed for exceeding maximum retries."
snowflake.connector.errors.RequestExceedMaxRetryError: GET with url b'https://xxx-
xxxxx-xxx-x-customer-stage.xx.amazonaws.com/https://xxx-xxxxx-xxx-x-customer-
stage.xx.amazonaws.com/?accelerate' failed for exceeding maximum retries.
This link redirects me to a error message .
https://xxx-
xxxxx-xxx-x-customer-stage.xx.amazonaws.com/https://xxx-xxxxx-xxx-x-customer-
stage.xx.amazonaws.com/?accelerate
Access Denied error :
<Error>
<Code>AccessDenied</Code>
<Message>Access Denied</Message>
<RequestId>1X1Z8G0BTX8BAHXK</RequestId>
<HostId>QqdCqaSK7ogAEq3sNWaQVZVXUGaqZnPv78FiflvVzkF6nSYXTSKu3iSiYlUOU0ka+0IMzErwGC4=</HostId>
</Error>
I have been working with the alpha vantage python API for a while now, but I have only needed to pull daily and intraday timeseries data. I am trying to pull extended intraday data, but am not having any luck getting it to work. Trying to run the following code:
from alpha_vantage.timeseries import TimeSeries
apiKey = 'MY API KEY'
ts = TimeSeries(key = apiKey, output_format = 'pandas')
totalData, _ = ts.get_intraday_extended(symbol = 'NIO', interval = '15min', slice = 'year1month1')
print(totalData)
gives me the following error:
Traceback (most recent call last):
File "/home/pi/Desktop/test.py", line 9, in <module>
totalData, _ = ts.get_intraday_extended(symbol = 'NIO', interval = '15min', slice = 'year1month1')
File "/home/pi/.local/lib/python3.7/site-packages/alpha_vantage/alphavantage.py", line 219, in _format_wrapper
self, *args, **kwargs)
File "/home/pi/.local/lib/python3.7/site-packages/alpha_vantage/alphavantage.py", line 160, in _call_wrapper
return self._handle_api_call(url), data_key, meta_data_key
File "/home/pi/.local/lib/python3.7/site-packages/alpha_vantage/alphavantage.py", line 354, in _handle_api_call
json_response = response.json()
File "/usr/lib/python3/dist-packages/requests/models.py", line 889, in json
self.content.decode(encoding), **kwargs
File "/usr/lib/python3/dist-packages/simplejson/__init__.py", line 518, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3/dist-packages/simplejson/decoder.py", line 370, in decode
obj, end = self.raw_decode(s)
File "/usr/lib/python3/dist-packages/simplejson/decoder.py", line 400, in raw_decode
return self.scan_once(s, idx=_w(s, idx).end())
simplejson.errors.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
What is interesting is that if you look at the TimeSeries class, it states that extended intraday is returned as a "time series in one csv_reader object" whereas everything else, which works for me, is returned as "two json objects". I am 99% sure this has something to do with the issue, but I'm not entirely sure because I would think that calling intraday extended function would at least return SOMETHING (despite it being in a different format), but instead just gives me an error.
Another interesting little note is that the function refuses to take "adjusted = True" (or False) as an input despite it being in the documentation... likely unrelated, but maybe it might help diagnose.
Seems like TIME_SERIES_INTRADAY_EXTENDED can return only CSV format, but the alpha_vantage wrapper applies JSON methods, which results in the error.
My workaround:
from alpha_vantage.timeseries import TimeSeries
import pandas as pd
apiKey = 'MY API KEY'
ts = TimeSeries(key = apiKey, output_format = 'csv')
#download the csv
totalData = ts.get_intraday_extended(symbol = 'NIO', interval = '15min', slice = 'year1month1')
#csv --> dataframe
df = pd.DataFrame(list(totalData[0]))
#setup of column and index
header_row=0
df.columns = df.iloc[header_row]
df = df.drop(header_row)
df.set_index('time', inplace=True)
#show output
print(df)
This is an easy way to do it.
ticker = 'IBM'
date= 'year1month2'
apiKey = 'MY API KEY'
df = pd.read_csv('https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY_EXTENDED&symbol='+ticker+'&interval=15min&slice='+date+'&apikey='+apiKey+'&datatype=csv&outputsize=full')
#Show output
print(df)
import pandas as pd
symbol = 'AAPL'
interval = '15min'
slice = 'year1month1'
api_key = ''
adjusted = '&adjusted=true&'
csv_url = 'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY_EXTENDED&symbol='+symbol+'&interval='+interval+'&slice='+slice+adjusted+'&apikey='+api_key
data = pd.read_csv(csv_url)
print(data.head)
I am trying to build a kinesis consumer script using python 3.4 below is an example of my code. I want the records to be saved to a local file that I can later push to S3:
from boto import kinesis
import time
import json
# AWS Connection Credentials
aws_access_key = 'your_key'
aws_access_secret = 'your_secret key'
# Selected Kinesis Stream
stream = 'TwitterTesting'
# Aws Authentication
auth = {"aws_access_key_id": aws_access_key, "aws_secret_access_key": aws_access_secret}
conn = kinesis.connect_to_region('us-east-1',**auth)
# Targeted file to be pushed to S3 bucket
fileName = "KinesisDataTest2.txt"
file = open("C:\\Users\\csanders\\PycharmProjects\\untitled\\KinesisDataTest.txt", "a")
# Describe stream and get shards
tries = 0
while tries < 10:
tries += 1
time.sleep(1)
response = conn.describe_stream(stream)
if response['StreamDescription']['StreamStatus'] == 'ACTIVE':
break
else:
raise TimeoutError('Stream is still not active, aborting...')
# Get Shard Iterator and get records from stream
shard_ids = []
stream_name = None
if response and 'StreamDescription' in response:
stream_name = response['StreamDescription']['StreamName']
for shard_id in response['StreamDescription']['Shards']:
shard_id = shard_id['ShardId']
shard_iterator = conn.get_shard_iterator(stream,
shard_id, 'TRIM_HORIZON')
shard_ids.append({'shard_id': shard_id, 'shard_iterator': shard_iterator['ShardIterator']})
tries = 0
result = []
while tries < 100:
tries += 1
response = conn.get_records(shard_iterator, 100)
shard_iterator = response['NextShardIterator']
if len(response['Records'])> 0:
for res in response['Records']:
result.append(res['Data'])
print(result, shard_iterator)
For some reason when I run this script I get the following error each time:
Traceback (most recent call last):
File "C:/Users/csanders/PycharmProjects/untitled/Get_records_Kinesis.py", line 57, in <module>
response = json.load(conn.get_records(shard_ids, 100))
File "C:\Python34\lib\site-packages\boto-2.38.0-py3.4.egg\boto\kinesis\layer1.py", line 327, in get_records
body=json.dumps(params))
File "C:\Python34\lib\site-packages\boto-2.38.0- py3.4.egg\boto\kinesis\layer1.py", line 874, in make_request
body=json_body)
boto.exception.JSONResponseError: JSONResponseError: 400 Bad Request
{'Message': 'Start of list found where not expected', '__type': 'SerializationException'}
My end goal is to eventually kick this data into an S3 bucket. I just need to get these records to return and print first. The data going into the stream is JSON dump twitter data using the put_record function. I can post that code too if needed.
Updated that one line from response = json.load(conn.get_records(shard_ids, 100)) to response = conn.get_records(shard_iterator, 100)
response = json.load(conn.get_records(shard_ids, 100))
get_records expects a shard_id not an array of shards. when it's trying to get records it fails miserably (you see the 400 from Kinesis saying that the request is bad).
http://boto.readthedocs.org/en/latest/ref/kinesis.html?highlight=get_records#boto.kinesis.layer1.KinesisConnection.get_records
if you replace following will work ( "while" you set up according for how many record you would like to collect, you can make infinite "with == 0" and remove "tries += 1")
shard_iterator = conn.get_shard_iterator(stream,
shard_id, 'TRIM_HORIZON')
shard_ids.append({'shard_id': shard_id, 'shard_iterator': shard_iterator['ShardIterator']})
with following:
shard_iterator = conn.get_shard_iterator(stream,
shard_id, "LATEST")["ShardIterator"]
also to write to a file change("\n" is for new line):
print(result, shard_iterator)
to:
file.write(str(result) + "\n")
Hope it helps.
I am doing a location search in Google App Engine and I want my search to be sorted based on proximity. I am getting the following error on the deployed version (production):
Search error:
Traceback (most recent call last):
File "/base/data/home/apps/s~sound-helper-87921/1.385231928987755902/application/search_handler.py", line 68, in doProductSearch
search_results = docs.Product.getIndex().search(search_query)
File "/base/data/home/runtimes/python27/python27_lib/versions/1/google/appengine/datastore/datastore_rpc.py", line 105, in positional_wrapper
return wrapped(*args, **kwds)
File "/base/data/home/runtimes/python27/python27_lib/versions/1/google/appengine/api/search/search.py", line 3676, in search
return self.search_async(query, deadline=deadline, **kwargs).get_result()
File "/base/data/home/runtimes/python27/python27_lib/versions/1/google/appengine/api/search/search.py", line 262, in get_result
return self._get_result_hook()
File "/base/data/home/runtimes/python27/python27_lib/versions/1/google/appengine/api/search/search.py", line 3690, in hook
_CheckStatus(response.status())
File "/base/data/home/runtimes/python27/python27_lib/versions/1/google/appengine/api/search/search.py", line 517, in _CheckStatus
raise _ERROR_MAP[status.code()](status.error_detail())
InvalidRequest: Failed to parse search request "distance(location, geopoint(30.008164999999998,-95.52959159999999)) < 2000"; Default text value is not appropriate for sort expression 'distance(location, geopoint(30.008165,-95.529592))'
The following is my code, which is pretty much copied from Google's tutorial:
def _buildQueryString(self, params):
userstr = string = params.get('querystr')
userprice = params.get('price')
userdist = params.get('less_than_distance')
loc = params.get('cur_location')
lat = loc.split(',')[0].split()[0]
lng = loc.split(',')[1].split()[0]
if userstr:
string = userstr
if userprice:
string = string + ' price < %s' % userprice
if userdist:
if not os.environ.get('SERVER_SOFTWARE','').startswith('Development'):
string = string + ' distance(%s, geopoint(%s,%s)) < %s' % (
docs.Product.LOCATION,lat,lng,userdist)
return string
def _buildQuery(self, params):
"""Build and return a search query object."""
user_query = self._buildQueryString(params)
doc_limit = self._getDocLimit()
try:
offsetval = int(params.get('offset', 0))
except ValueError:
offsetval = 0
loc = params.get('cur_location')
lat = loc.split(',')[0].split()[0]
lng = loc.split(',')[1].split()[0]
expr = 'distance(%s, geopoint(%f,%f))' % (docs.Product.LOCATION,float(lat),float(lng))
computed_expr_distance = search.FieldExpression(name='actual_distance',
expression=expr)
computed_expr_score = search.FieldExpression(name='actual_score',
expression='score')
returned_fields = [docs.Product.PID]
expr_list = []
expr_list.append(search.SortExpression(
expression=expr,
direction=search.SortExpression.ASCENDING,
default_value='2001'))
sortopts = search.SortOptions(expressions=expr_list, limit = doc_limit)
search_query = search.Query(
query_string=user_query.strip(),
options=search.QueryOptions(
limit=doc_limit,
offset=offsetval,
sort_options=sortopts,
returned_expressions=[computed_expr_distance],
returned_fields=returned_fields
)
)
return search_query
def doProductSearch(self, params):
"""Perform a product search and display the results."""
try:
search_query = self._buildQuery(params)
search_results = docs.Product.getIndex().search(search_query)
returned_count = len(search_results.results)
except search.Error:
logging.exception("Search error:")
msg = 'There was a search error (see logs).'
url = '/'
print('%s' % msg)
return [],[]
psearch_response = []
distances = []
# For each document returned from the search
for doc in search_results:
pdoc = docs.Product(doc)
for expr in doc.expressions:
if expr.name == 'actual_distance':
distances.append(expr.value)
pid = pdoc.getPID()
psearch_response.append(long(pid))
logging.debug('Distances: ' +str(distances))
return psearch_response, distances
Why is the Search API not recognizing my search query?
The problem was in my default_value. I modified the SortExpression to have an integer default_value instead of a string:
expr_list.append(search.SortExpression(
expression=expr,
direction=search.SortExpression.ASCENDING,
default_value=500000))