AttributeError: 'MultiArrayLayout' object has no attribute 'x - python

#!/usr/bin/env python
import rospy
import json
from custom_msg_json.msg import json_param
#from std_msgs.msg import String
def json_file_reader():
f = open('/home/arsalan97/ros/sensor_calibration_data.json')
data = json.load(f)
for i in data:
print(i)
f.close()
rospy.init_node("json_publisher_node", anonymous=True)
pub = rospy.Publisher("json_publisher_", json_param, queue_size=10)
#rospy.init_node("json_publisher_node", anonymous=True)
rate = rospy.Rate(10)
msg = 'Publishing now'
my_param = json_param()
#my_param.Translation.x= data['Translation'][0]
my_param.Translation.x = 10
my_param.Translation.y = 20
my_param.Translation.z = 30
my_param.Rotation.x = [1,2,3]
my_param.Rotation.y = [2,3,4]
my_param.Rotation.z = [4,5,6]
This is the error I got because I updated .msg file and in that I updated MultiArray with vector3 for Rotation variable but I don't understand why is it still taking Rotation variable as a MultiArray type.

Related

TF2 transform can't find an actuall existing frame

In a global planner node that I wrote, I have the following init code
#!/usr/bin/env python
import rospy
import copy
import tf2_ros
import time
import numpy as np
import math
import tf
from math import sqrt, pow
from geometry_msgs.msg import Vector3, Point
from std_msgs.msg import Int32MultiArray
from std_msgs.msg import Bool
from nav_msgs.msg import OccupancyGrid, Path
from geometry_msgs.msg import PoseStamped, PointStamped
from tf2_geometry_msgs import do_transform_point
from Queue import PriorityQueue
class GlobalPlanner():
def __init__(self):
print("init global planner")
self.tfBuffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tfBuffer)
self.drone_position_sub = rospy.Subscriber('uav/sensors/gps', PoseStamped, self.get_drone_position)
self.drone_position = []
self.drone_map_position = []
self.map_sub = rospy.Subscriber("/map", OccupancyGrid, self.get_map)
self.goal_sub = rospy.Subscriber("/cell_tower/position", Point, self.getTransformedGoal)
self.goal_position = []
self.goal = Point()
self.goal_map_position = []
self.occupancy_grid = OccupancyGrid()
self.map = []
self.p_path = Int32MultiArray()
self.position_pub = rospy.Publisher("/uav/input/position", Vector3, queue_size = 1)
#next_movement in
self.next_movement = Vector3
self.next_movement.z = 3
self.path_pub = rospy.Publisher('/uav/path', Int32MultiArray, queue_size=1)
self.width = rospy.get_param('global_planner_node/map_width')
self.height = rospy.get_param('global_planner_node/map_height')
#Check whether there is a path plan
self.have_plan = False
self.path = []
self.euc_distance_drone_goal = 100
self.twod_distance_drone_goal = []
self.map_distance_drone_goal = []
self.mainLoop()
And there is a call-back function call getTransformed goal, which will take the goal position in the "cell_tower" frame to the "world" frame. Which looks like this
def getTransformedGoal(self, msg):
self.goal = msg
try:
#Lookup the tower to world transform
transform = self.tfBuffer.lookup_transform('cell_tower', 'world', rospy.Time())
#transform = self.tfBuffer.lookup_transform('world','cell-tower' rospy.Time())
#Convert the goal to a PointStamped
goal_pointStamped = PointStamped()
goal_pointStamped.point.x = self.goal.x
goal_pointStamped.point.y = self.goal.y
goal_pointStamped.point.z = self.goal.z
#Use the do_transform_point function to convert the point using the transform
new_point = do_transform_point(goal_pointStamped, transform)
#Convert the point back into a vector message containing integers
transform_point = [new_point.point.x, new_point.point.y]
#Publish the vector
self.goal_position = transform_point
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:
print(e)
print('global_planner tf2 exception, continuing')
The error message said that
"cell_tower" passed to lookupTransform argument target_frame does not exist.
I check the RQT plot for both active and all, which shows that when active, the topic /tf is not being subscribe by the node global planner. Check the following image, which is for active
enter image description here
and this image is for all the node (include non-active)
enter image description here
But I have actually set up the listner, I have another node call local planner that use the same strategy and it works for that node, but not for the global planner
I'm not sure why this is.
Try adding a timeout to your lookup_transform() function call, as your transformation may not be available when you need it:
transform = self.tfBuffer.lookup_transform('cell_tower', 'world',rospy.Time.now(), rospy.Duration(1.0))

Running python script with multiple values of command line arguments

I have a python script for pre-processing audio and it has frame length, frame step and fft length as the command line arguments. I am able to run the code if I have single values of these arguments. I wanted to know if there is a way in which I can run the python script with multiple values of the arguments? For example, get the output if values of fft lengths are 128, 256 and 512 instead of just one value.
The code for pre-processing is as follows:
import numpy as np
import pandas as pd
import tensorflow as tf
from scipy.io import wavfile
import os
import time
import pickle
import random
import argparse
import configlib
from configlib import config as C
import mfccwithpaddingandcmd
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.preprocessing import MultiLabelBinarizer
from tensorflow import keras
from tensorflow.python.keras import Sequential
from tensorflow.keras.layers import Dense,Conv2D,MaxPooling2D,Flatten,Dropout,BatchNormalization,LSTM,Lambda,Reshape,Bidirectional,GRU
from tensorflow.keras.callbacks import TensorBoard
start = time.time()
classes = ['blinds','fan','light','music','tv']
#dire = r"/mnt/beegfs/home/gehani/test_speech_command/"
parser = configlib.add_parser("Preprocessing config")
parser.add_argument("-dir","--dire", metavar="", help="Directory for the audio files")
def pp():
data_list=[] #To save paths of all the audio files.....all audio files in list format in data_list
#data_list-->folder-->files in folder
for index,label in enumerate(classes):
class_list=[]
if label=='silence': #creating silence folder and storing 1sec noise audio files
silence_path = os.path.join(C["dire"],'silence')
if not os.path.exists(silence_path):
os.mkdir(silence_path)
silence_stride = 2000
#sample_rate = 16000
folder = os.path.join(C["dire"],'_background_noise_') #all silence are kept in the background_noise folder
for file_ in os.listdir(folder):
if '.wav' in file_:
load_path = os.path.join(folder,file_)
sample_rate,y = wavfile.read(load_path)
for i in range(0,len(y)-sample_rate,silence_stride):
file_path = "silence/{}_{}.wav".format(file_[:-4],i)
y_slice = y[i:i+sample_rate]
wavfile.write(os.path.join(C["dire"],file_path),sample_rate,y_slice)
class_list.append(file_path)
else:
folder = os.path.join(C["dire"],label)
for file_ in os.listdir(folder):
file_path = '{}/{}'.format(label,file_) #Ex: up/c9b653a0_nohash_2.wav
class_list.append(file_path)
random.shuffle(class_list) #To shuffle files
data_list.append(class_list) #if not a silence file then just append to the datalist
X = []
Y = []
preemphasis = 0.985
print("Feature Extraction Started")
for i,class_list in enumerate(data_list): #datalist = all files, class list = folder name in datalist, sample = path to the audio file in that particular class list
for j,samples in enumerate(class_list): #samples are of the form classes_name/audio file
if(samples.endswith('.wav')):
sample_rate,audio = wavfile.read(os.path.join(C["dire"],samples))
if(audio.size<sample_rate):
audio = np.pad(audio,(sample_rate-audio.size,0),mode="constant")
#print("****")
#print(sample_rate)
#print(preemphasis)
#print(audio.shape)
coeff = mfccwithpaddingandcmd.mfcc(audio,sample_rate,preemphasis) # 0.985 = preemphasis
#print("****")
#print(coeff)
#print("****")
X.append(coeff)
#print(X)
if(samples.split('/')[0] in classes):
Y.append(samples.split('/')[0])
elif(samples.split('/')[0]=='_background_noise_'):
Y.append('silence')
#print(len(X))
#print(len(Y))
#X= coefficient array and Y = name of the class
A = np.zeros((len(X),X[0].shape[0],X[0][0].shape[0]),dtype='object')
for i in range(0,len(X)):
A[i] = np.array(X[i]) #Converting list X into array A
end1 = time.time()
print("Time taken for feature extraction:{}sec".format(end1-start))
MLB = MultiLabelBinarizer() # one hot encoding for converting labels into binary form
MLB.fit(pd.Series(Y).fillna("missing").str.split(', '))
Y_MLB = MLB.transform(pd.Series(Y).fillna("missing").str.split(', '))
MLB.classes_ #Same like classes array
print(Y_MLB.shape)
pickle_out = open("A_all.pickle","wb") #Writes array A to a file A.pickle
pickle.dump(A, pickle_out) #pickle is the file containing the extracted features
pickle_out.close()
pickle_out = open("Y_all.pickle","wb")
pickle.dump(Y_MLB, pickle_out)
pickle_out.close()
pickle_in = open("Y_all.pickle","rb")
Y = pickle.load(pickle_in)
X = tf.keras.utils.normalize(X)
X_train,X_valtest,Y_train,Y_valtest = train_test_split(X,Y,test_size=0.2,random_state=37)
X_val,X_test,Y_val,Y_test = train_test_split(X_valtest,Y_valtest,test_size=0.5,random_state=37)
print(X_train.shape,X_val.shape,X_test.shape,Y_train.shape,Y_val.shape,Y_test.shape)
if __name__ == "__main__":
configlib.parse(save_fname="last_arguments.txt")
print("Running with configuration:")
configlib.print_config()
pp()
The code for MFCC is as follows:
import tensorflow as tf
import scipy.io.wavfile as wav
import numpy as np
import matplotlib.pyplot as plt
import pickle
import argparse
import configlib
from configlib import config as C
# Configuration arguments
parser = configlib.add_parser("MFCC config")
parser.add_argument("-fl","--frame_length", type=int, default=400, metavar="", help="Frame Length")
parser.add_argument("-fs","--frame_step", type=int, default=160, metavar="", help="Frame Step")
parser.add_argument("-fft","--fft_length", type=int, default=512, metavar="", help="FFT length")
#args = parser.parse_args()
def Preemphasis(signal,pre_emp):
return np.append(signal[0],signal[1:]-pre_emp*signal[:-1])
def Paddinggg(framelength,framestep,samplerate):
frameStart = np.arange(0,samplerate,framestep)
frameEnd = frameStart + framelength
padding = min(frameEnd[(frameEnd > samplerate)]) - samplerate
return padding
def mfcc(audio,sample_rate,pre_emp):
audio = np.pad(audio,(Paddinggg(C["frame_length"],C["frame_step"],sample_rate),0),mode='reflect')
audio = audio.astype('float32')
#Normalization
audio = tf.keras.utils.normalize(audio)
#Preemphasis
audio = Preemphasis(audio,pre_emp)
stfts = tf.signal.stft(audio,C["frame_length"],C["frame_step"],C["fft_length"],window_fn=tf.signal.hann_window)
spectrograms = tf.abs(stfts)
num_spectrogram_bins = stfts.shape[-1]
lower_edge_hertz, upper_edge_hertz, num_mel_bins = 0.0, sample_rate/2.0, 32
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,upper_edge_hertz)
mel_spectrograms = tf.tensordot(spectrograms, linear_to_mel_weight_matrix, 1)
mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6)
# Compute MFCCs from log_mel_spectrograms and take the first 13.
return log_mel_spectrograms
print("End")
And the code for configlib is as follows:
from typing import Dict, Any
import logging
import pprint
import sys
import argparse
# Logging for config library
logger = logging.getLogger(__name__)
# Our global parser that we will collect arguments into
parser = argparse.ArgumentParser(description=__doc__, fromfile_prefix_chars="#")
# Global configuration dictionary that will contain parsed arguments
# It is also this variable that modules use to access parsed arguments
config:Dict[str, Any] = {}
def add_parser(title: str, description: str = ""):
"""Create a new context for arguments and return a handle."""
return parser.add_argument_group(title, description)
def parse(save_fname: str = "") -> Dict[str, Any]:
"""Parse given arguments."""
config.update(vars(parser.parse_args()))
logging.info("Parsed %i arguments.", len(config))
# Optionally save passed arguments
if save_fname:
with open(save_fname, "w") as fout:
fout.write("\n".join(sys.argv[1:]))
logging.info("Saving arguments to %s.", save_fname)
return config
def print_config():
"""Print the current config to stdout."""
pprint.pprint(config)
I use the following command to run my python file:
python3.7 preprocessingwithpaddingandcmd.py -fl 1103 -fs 88 -fft 512 -dir /mnt/beegfs/home/gehani/appliances_audio_one_channel
Should I be writing a shell script or python has some options for it?
EDIT 1
I tried using
parser.add_argument('-fft', '--fft_length', type=int, default=[], nargs=3)
for getting fft length from the command line and used the command
run preprocessingwithpaddingandcmd -dir filepath -fl 1765 -fs 1102 -fft 512 218 64
to run it. But, it gives me this error: ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
Can anyone please help?
I found you can do it by these. mfcc features extraction
You can create your own mfcc features extraction or you can limit window lengths and ceptrums that is enough for simple works except you need logarithms scales where you can use target matrix ( convolution ) or else.
It is logarithms when you use FFT or alternative derivation but mfcc is only extraction where I will provide the sample output in picture.
[ Sample ]:
from python_speech_features import mfcc
from python_speech_features import logfbank
import scipy.io.wavfile as wav
import tensorflow as tf
import matplotlib.pyplot as plt
(rate,sig) = wav.read("F:\\temp\\Python\\Speech\\temple_of_love-sisters_of_mercy.wav")
mfcc_feat = mfcc(signal=sig, samplerate=rate, winlen=0.025, winstep=0.01, numcep=13, nfilt=26, nfft=512, lowfreq=0, highfreq=None, preemph=0.97, ceplifter=22, appendEnergy=True)
fbank_feat = logfbank(sig,rate)
plt.plot( mfcc_feat[50:42000,0] )
plt.xlabel("sample")
plt.show()
plt.close()
input('...')

module 'robin_stocks' has no attribute 'get_current_positions'

This is my first time using the robinhood api and I am trying out their documentation: https://readthedocs.org/projects/robin-stocks/downloads/pdf/latest/,
but one of their key functions is not working. When I call robin_stocks.get_current_positions() I get the error
AttributeError: module 'robin_stocks' has no attribute 'get_current_positions'
Here is my code:
import robin_stocks, json
from robin_stocks import *
import robin_stocks as r
import sys
import time
import requests
content = open('config.json').read()
config = json.loads(content)
login = r.login(config['username'],config['password'], store_session=True)
my_stocks = robin_stocks.build_holdings()
for key,value in my_stocks.items():
mystocks = key,value
print(mystocks)
WEIbalance = mystocks[1]['equity']
WEI = mystocks[0]
print('YY', WEI)
positions_data = robin_stocks.get_current_positions()
print('my equity', WEIbalance)
print(positions_data)
Is this an error on my part?
As of this commit get_current_positions was renamed to get_open_stock_positions() the corresponding issue can be found here

'NoneType' 'NoneType' object is not iterable

I am trying to loop through a list of symbols to get rates for various currencies via the mt5. I use the code below but i get TypeError
d[i] = [y.close for y in rates1]
TypeError: 'NoneType' object is not iterable
I can't see where im going wrong i would like to use this structure to loop through create multiple dataframe and then make a big multiindex of all pairs and time using same kind of loop. I've not been coding long.
sym = ['GBPUSD','USDJPY','USDCHF','AUDUSD','GBPJPY']
# Copying data to dataframe
d = pd.DataFrame()
for i in sym:
rates1 = mt5.copy_rates_from(i, mt5.TIMEFRAME_M1, 5)
d[i] = [y.close for y in rates1]
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 18:38:11 2020
#author: DanPc
"""
# -*- coding: utf-8 -*-
"""
"""
import pytz
import pandas as pd
import MetaTrader5 as mt5
import time
from datetime import datetime
from threading import Timer
import talib
import numpy as np
import matplotlib as plt
from multiprocessing import Process
import sys
server_name = "" ENTER DETAILS HERE
server_num =
password = ""
#------------------------------------------------------------------------------
def actualtime():
# datetime object containing current date and time
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
#print("date and time =", dt_string)
return str(dt_string)
#------------------------------------------------------------------------------
def sync_60sec(op):
info_time_new = datetime.strptime(str(actualtime()), '%d/%m/%Y %H:%M:%S')
waiting_time = 60 - info_time_new.second
t = Timer(waiting_time, op)
t.start()
print(actualtime)
#------------------------------------------------------------------------------
def program(symbol):
if not mt5.initialize(login=server_num, server=server_name, password=password):
print("initialize() failed, error code =",mt5.last_error())
quit()
timezone = pytz.timezone("Etc/UTC")
utc_from = datetime.now()
######### Change here the timeframe 525600
# Create currency watchlist for which correlation matrix is to be plotted
sym = ['GBPUSD','USDJPY','USDCHF','AUDUSD','GBPJPY']
# Copying data to dataframe
d = pd.DataFrame()
for i in sym:
rates1 = mt5.copy_rates_from(i, mt5.TIMEFRAME_M1, 5)
d[i] = [y.close for y in rates1]
print(rates1)
mt5.shutdown()
if not mt5.initialize():
print("initialize() failed, error code =",mt5.last_error())
quit()
# starting mt5
if not mt5.initialize(login=server_num, server=server_name, password=password):
print("initialize() failed, error code =",mt5.last_error())
quit()
#------------------------------------------------------------------------------
# S T A R T I N G M T 5
#------------------------------------------------------------------------------
authorized=mt5.login(server_num, password=password)
if authorized:
account_info=mt5.account_info()
if account_info!=None:
account_info_dict = mt5.account_info()._asdict()
df=pd.DataFrame(list(account_info_dict.items()),columns=['property','value'])
print("account_info() as dataframe:")
print(df)
else:
print(mt5.last_error)
mt5.shutdown()
#------------------------------------------------------------------------------
def trading_bot():
symbol_1 = 'EURUSD'
symbol_2 = 'EURCAD'
while True:
program(symbol_1)
program(symbol_2)
time.sleep(59.8) # it depends on your computer and ping
sync_60sec(trading_bot)
copy_rates_from returns None if there is an error. The documentation suggests calling last_error() to find out what that error is.
(And no, I don't know why copy_rates_from doesn't just raise an exception to indicate the error. Apparently, the module is a thin wrapper around a C library.)
I came to this solution that creates a dictionary of dataframes.
sym = ["GBPUSD","USDJPY","USDCHF","AUDUSD","GBPJPY"]
# Copying data to dataframe
utc_from = datetime.now()
for i in sym:
rates = {i:pd.DataFrame(mt5.copy_rates_from(i, mt5.TIMEFRAME_M1, utc_from , 60),
columns=['time', 'open', 'low', 'high', 'close', 'tick_volume', 'spread', 'real_volume']) for i in sym}

Exception on using ctypes with tesserac-ocr TessPageIteratorBoundingBox

import ctypes
import os
os.putenv("PATH", r'C:\Program Files\Tesseract-OCR')
os.environ["TESSDATA_PREFIX"] = r'C:\Program Files\Tesseract-OCR\tessdata'
liblept = ctypes.cdll.LoadLibrary('liblept-5.dll')
pix = liblept.pixRead('test.png'.encode())
print(pix)
tesseractLib = ctypes.cdll.LoadLibrary('libtesseract-5.dll')
tesseractHandle = tesseractLib.TessBaseAPICreate()
tesseractLib.TessBaseAPIInit3(tesseractHandle, '.', 'eng')
tesseractLib.TessBaseAPISetImage2(tesseractHandle, pix)
# text_out = tesseractLib.TessBaseAPIGetUTF8Text(tesseractHandle)
# print(ctypes.string_at(text_out))
tessPageIterator = tesseractLib.TessResultIteratorGetPageIterator(tesseractHandle)
iteratorLevel = 3 # RIL_BLOCK, RIL_PARA, RIL_TEXTLINE, RIL_WORD, RIL_SYMBOL
tesseractLib.TessPageIteratorBoundingBox(tessPageIterator, iteratorLevel, ctypes.c_int(0), ctypes.c_int(0), ctypes.c_int(0), ctypes.c_int(0))
I got exceptions :
Traceback (most recent call last):
File "D:\BaiduYunDownload\programming\Python\CtypesOCR.py", line 25, in <module>
tesseractLib.TessPageIteratorBoundingBox(tessPageIterator, iteratorLevel, ctypes.c_int(0), ctypes.c_int(0), ctypes.c_int(0), ctypes.c_int(0))
OSError: exception: access violation reading 0x00000018
So what's wrong ?
The aim of this program is to get bounding rectangle of each word. I know projects like tesserocr and PyOCR
P.S. Specifying the required argument types (function prototypes) for the DLL functions doesn't matter here. One could uncoment the commented lines and comment the last three lines to test it. I posted the question before , and it was closed for this reason
I solved my question by myself
import ctypes
import os
import io
os.putenv("PATH", r'C:\Program Files\Tesseract-OCR')
os.environ["TESSDATA_PREFIX"] = r'C:\Program Files\Tesseract-OCR\tessdata'
liblept = ctypes.cdll.LoadLibrary('liblept-5.dll')
pix = liblept.pixRead(b'test.png') # 必须encode
print(pix)
tesseractLib = ctypes.cdll.LoadLibrary('libtesseract-5.dll')
tesseractHandle = tesseractLib.TessBaseAPICreate()
tesseractLib.TessBaseAPIInit3(tesseractHandle, b'.', b'eng') # (TessBaseAPI* handle, const char* datapath,const char* language);
# from PIL import Image
# pixmap = Image.open("test.png")
# image = io.BytesIO()
# pixmap.save(image, 'png') # 没有什么类型,这里就任意指定个吧;For images created by the library itself (via a factory function, or by running a method on an existing image), this attribute is set to None.
# image.seek(0) # 要回到开始才行,不然后面requests读的时候会从结尾读,读不到数据
tesseractLib.TessBaseAPISetImage2(tesseractHandle, pix) # pixmap.tobytes("raw", "RGB")
# text_out = tesseractLib.TessBaseAPIGetUTF8Text(tesseractHandle)
# print(ctypes.string_at(text_out))
tesseractLib.TessBaseAPIRecognize(tesseractHandle, None) # 必须有,否则下面会出问题
tessResultIterator = tesseractLib.TessBaseAPIGetIterator(tesseractHandle) # TessResultIteratorGetPageIterator要用
tessPageIterator = tesseractLib.TessResultIteratorGetPageIterator(tessResultIterator)
wordLevel = 3 # RIL_BLOCK, RIL_PARA, RIL_TEXTLINE, RIL_WORD, RIL_SYMBOL
left = ctypes.c_int(0) # 这几个是要用来写入数据的,所以要构造出来 可写;byref() argument must be a ctypes instance, not 'int'
top = ctypes.c_int(0)
right = ctypes.c_int(0)
bottom = ctypes.c_int(0)
while True:
r = tesseractLib.TessPageIteratorBoundingBox(
tessPageIterator,
wordLevel,
ctypes.byref(left), # byref behaves similar to pointer(obj), but the construction is a lot faster.
ctypes.byref(top),
ctypes.byref(right),
ctypes.byref(bottom)
)
text_out = tesseractLib.TessResultIteratorGetUTF8Text(tessPageIterator, wordLevel)
print(ctypes.string_at(text_out), left.value, top.value, right.value, bottom.value)
if not tesseractLib.TessPageIteratorNext(tessPageIterator, wordLevel):
break

Categories

Resources