Small unpacking buffer for wave.struct - python

I am building a SSTV programm, that can convert 180x136 picture into the sound with the RGB component. But when I try to read data from microphone, it gives me
struct.error: unpack requires a buffer of 400 bytes
when I use only 200 bytes.
I expected the output as decoded microphone out (in short: microphone -> decode signal -> image).
I tried to use chunk*2 but it gave me same error:
struct.error: unpack requires a buffer of 800 bytes
Here is the code for decoder:
import numpy as np
import pyaudio
import wave
from tkinter import *
root = Tk()
c = Canvas(root, width=90*8, height=68*8, bg='white')
c.pack()
def pix(x, y, rr, rg, rb): # r + (r/g/b) is raw rgb because we are decoding frequency
if rr < 0:
r = 0
if rg < 0:
g = 0
if rb < 0:
b = 0
try:
r = min(255,round(rr * (255 / 1124)))
except:
r = 0
try:
g = min(255,round(rg * (255 / 1124)))
except:
g = 0
try:
b = min(255,round(rb * (255 / 1124)))
except:
b = 0
if r < 0:
r = 0
if g < 0:
g = 0
if b < 0:
b = 0
print(r,g,b)
n = hex(r).replace("0x","")
rh = (2 - len(n)) * "0" + n
n = hex(g).replace("0x", "")
gh = (2 - len(n)) * "0" + n
n = hex(b).replace("0x", "")
bh = (2 - len(n)) * "0" + n
c.create_line(x, y, x + 4, y, fill=f"#{rh}{gh}{bh}")
c.create_line(x, y+1, x + 4, y+1, fill=f"#{rh}{gh}{bh}")
c.create_line(x, y+2, x + 4, y+2, fill=f"#{rh}{gh}{bh}")
c.create_line(x, y+3, x + 4, y+3, fill=f"#{rh}{gh}{bh}")
chunk = 100
# open up a wave
wf = wave.open('input.wav', 'rb')
swidth = wf.getsampwidth()
RATE = wf.getframerate()
# use a Blackman window
window = np.blackman(chunk)
# open stream
p = pyaudio.PyAudio()
stream = p.open(format =
p.get_format_from_width(wf.getsampwidth()),
channels = wf.getnchannels(),
rate = RATE,
output = True)
microphone = p.open(format =
p.get_format_from_width(wf.getsampwidth()),
channels = wf.getnchannels(),
rate = RATE,
input = True)
# read some data
data = wf.readframes(chunk)
#print(len(data))
#print(chunk*swidth)
pc = 0
x = 0
e = [255,252]
totaly = 0
rrgb = [255, 255, 255] # raw rgb
# play stream and find the frequency of each chunk
while True:
data = microphone.read(chunk)
# write data out to the audio stream
stream.write(data)
indata = np.array(wave.struct.unpack("%dh"%((len(data))/swidth),data))*window#"%dh"%((len(data)-1)/swidth)
# Take the fft and square each value
fftData=abs(np.fft.rfft(indata))**2
# find the maximum
which = fftData[1:].argmax() + 1
# use quadratic interpolation around the max
if which != len(fftData)-1:
y0,y1,y2 = np.log(fftData[which-1:which+2:])
x1 = (y2 - y0) * .5 / (2 * y1 - y2 - y0)
# find the frequency and output it
thefreq = (which+x1)*RATE/chunk
if thefreq>3030 or totaly==540:
#print("PING!",f"The freq is %f Hz. {pc}" % (thefreq))
if pc==0:
totaly = x
pc+=4
x=0
root.update()
else:
#print(f"The freq is %f Hz. min {min(e)} max {max(e)}, {x//4}" % (thefreq), end=" ")
if x//4 % 3 == 0:
pix(x / 3, pc, rrgb[0], rrgb[1], rrgb[2]) # /0.77039274924
rrgb[0] = thefreq
#print("R")
elif x//4 % 3 == 1:
rrgb[1] = thefreq
#print("G")
elif x//4 % 3 == 2:
rrgb[2] = thefreq
#print("B")
#e.append(thefreq)
#pix()
x+=4#print("The freq is %f Hz." % (thefreq))
else:
thefreq = which*RATE/chunk
#print("The freq is %f Hz." % (thefreq))
# read some more data
data = microphone.read(chunk)
if data:
stream.write(data)
stream.close()
p.terminate()
root.mainloop()
Here is the code for encoder:
import numpy as np
# Importing Image from PIL package
from PIL import Image
# creating a image object
im = Image.open(r"original.bmp")
px = im.load()
arx = []
art = []
pix = ()
pixels = list(im.getdata())
width, height = im.size
pixels = [pixels[i * width:(i + 1) * width] for i in range(height)]
print(pixels)
for ypos in range(0,136):
for xpos in range(0,180):
pix = pixels[ypos][xpos]
#gray = ((pix[0])+(pix[1])+(pix[2]))/3
rgb = (pix[0],pix[1],pix[2])
arx.append(rgb)
arx.append(406)
art.append(arx)
rate = 44100 # samples per second
T = 3 # sample duration (seconds)
slowdown = int(input("Speed in Hz:"))
f = 100.0/slowdown # sound frequency (Hz)
print("speed multiplier is",slowdown)
encoding = "ansi"
total = []
xpos2 = 0
c = 0
for ypos in art:
for xpos in ypos:
#print(xpos)
t = np.linspace(0, 0.01, (slowdown), endpoint=False)
if xpos==406:
sig = np.sin(2 * np.pi * f * 500 * t)
total.extend(sig)
sig = np.sin(2 * np.pi * f * 700 * t)
total.extend(sig)
continue
sig = np.sin(2 * np.pi * f * xpos[0] * t)
total.extend(sig)
sig = np.sin(2 * np.pi * f * xpos[1] * t)
total.extend(sig)
sig = np.sin(2 * np.pi * f * xpos[2] * t)
total.extend(sig)
print(xpos)
sig = np.sin(2 * np.pi * f * 300 * t)
break
import wavio
wavio.write("input.wav", total, rate, sampwidth=2)
input("done, saved as \'input.wav\'")

Related

Non responsive 2D scatter plot in Python 3.0.9

I'm trying to run a python code for an awr1642BOOST board to make a short range radar. since i dont have an dca1000 evm data capture board I'm trying to obtain the detected data points using the detObj dictionary of the provided python code. On running the the code the 2D scatter window appears blank and remains unresponsive. Please can someone point out what i might be doing wrong. I have also attached a screen shot of the error window and the python shell.[enter image description here][1]
import time
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
# Change the configuration file name
configFileName = '1642config.cfg'
CLIport = {}
Dataport = {}
byteBuffer = np.zeros(2**15,dtype = 'uint8')
byteBufferLength = 0;
# ------------------------------------------------------------------
# Function to configure the serial ports and send the data from
# the configuration file to the radar
def serialConfig(configFileName):
global CLIport
global Dataport
# Open the serial ports for the configuration and the data ports
# Raspberry pi
#CLIport = serial.Serial('/dev/ttyACM0', 115200)
#Dataport = serial.Serial('/dev/ttyACM1', 921600)
# Windows
CLIport = serial.Serial('COM4', 115200)
Dataport = serial.Serial('COM5', 921600)
# Read the configuration file and send it to the board
config = [line.rstrip('\r\n') for line in open(configFileName)]
for i in config:
CLIport.write((i+'\n').encode())
print(i)
time.sleep(0.01)
return CLIport, Dataport
# ------------------------------------------------------------------
# Function to parse the data inside the configuration file
def parseConfigFile(configFileName):
configParameters = {} # Initialize an empty dictionary to store the configuration parameters
# Read the configuration file and send it to the board
config = [line.rstrip('\r\n') for line in open(configFileName)]
for i in config:
# Split the line
splitWords = i.split(" ")
# Hard code the number of antennas, change if other configuration is used
numRxAnt = 4
numTxAnt = 2
# Get the information about the profile configuration
if "profileCfg" in splitWords[0]:
startFreq = int(float(splitWords[2]))
idleTime = int(splitWords[3])
rampEndTime = float(splitWords[5])
freqSlopeConst = float(splitWords[8])
numAdcSamples = int(splitWords[10])
numAdcSamplesRoundTo2 = 1;
while numAdcSamples > numAdcSamplesRoundTo2:
numAdcSamplesRoundTo2 = numAdcSamplesRoundTo2 * 2;
digOutSampleRate = int(splitWords[11]);
# Get the information about the frame configuration
elif "frameCfg" in splitWords[0]:
chirpStartIdx = int(splitWords[1]);
chirpEndIdx = int(splitWords[2]);
numLoops = int(splitWords[3]);
numFrames = int(splitWords[4]);
framePeriodicity = int(splitWords[5]);
# Combine the read data to obtain the configuration parameters
numChirpsPerFrame = (chirpEndIdx - chirpStartIdx + 1) * numLoops
configParameters["numDopplerBins"] = numChirpsPerFrame / numTxAnt
configParameters["numRangeBins"] = numAdcSamplesRoundTo2
configParameters["rangeResolutionMeters"] = (3e8 * digOutSampleRate * 1e3) / (2 * freqSlopeConst * 1e12 * numAdcSamples)
configParameters["rangeIdxToMeters"] = (3e8 * digOutSampleRate * 1e3) / (2 * freqSlopeConst * 1e12 * configParameters["numRangeBins"])
configParameters["dopplerResolutionMps"] = 3e8 / (2 * startFreq * 1e9 * (idleTime + rampEndTime) * 1e-6 * configParameters["numDopplerBins"] * numTxAnt)
configParameters["maxRange"] = (300 * 0.9 * digOutSampleRate)/(2 * freqSlopeConst * 1e3)
configParameters["maxVelocity"] = 3e8 / (4 * startFreq * 1e9 * (idleTime + rampEndTime) * 1e-6 * numTxAnt)
return configParameters
# ------------------------------------------------------------------
# Funtion to read and parse the incoming data
def readAndParseData16xx(Dataport, configParameters):
global byteBuffer, byteBufferLength
# Constants
OBJ_STRUCT_SIZE_BYTES = 12;
BYTE_VEC_ACC_MAX_SIZE = 2**15;
MMWDEMO_UART_MSG_DETECTED_POINTS = 1;
MMWDEMO_UART_MSG_RANGE_PROFILE = 2;
maxBufferSize = 2**15;
magicWord = [2, 1, 4, 3, 6, 5, 8, 7]
# Initialize variables
magicOK = 0 # Checks if magic number has been read
dataOK = 0 # Checks if the data has been read correctly
frameNumber = 0
detObj = {}
tlv_type = 0
readBuffer = Dataport.read(Dataport.in_waiting)
byteVec = np.frombuffer(readBuffer, dtype = 'uint8')
byteCount = len(byteVec)
# Check that the buffer is not full, and then add the data to the buffer
if (byteBufferLength + byteCount) < maxBufferSize:
byteBuffer[byteBufferLength:byteBufferLength + byteCount] = byteVec[:byteCount]
byteBufferLength = byteBufferLength + byteCount
# Check that the buffer has some data
if byteBufferLength > 16:
# Check for all possible locations of the magic word
possibleLocs = np.where(byteBuffer == magicWord[0])[0]
# Confirm that is the beginning of the magic word and store the index in startIdx
startIdx = []
for loc in possibleLocs:
check = byteBuffer[loc:loc+8]
if np.all(check == magicWord):
startIdx.append(loc)
# Check that startIdx is not empty
if startIdx:
# Remove the data before the first start index
if startIdx[0] > 0 and startIdx[0] < byteBufferLength:
byteBuffer[:byteBufferLength-startIdx[0]] = byteBuffer[startIdx[0]:byteBufferLength]
byteBuffer[byteBufferLength-startIdx[0]:] = np.zeros(len(byteBuffer[byteBufferLength-startIdx[0]:]),dtype = 'uint8')
byteBufferLength = byteBufferLength - startIdx[0]
# Check that there have no errors with the byte buffer length
if byteBufferLength < 0:
byteBufferLength = 0
# word array to convert 4 bytes to a 32 bit number
word = [1, 2**8, 2**16, 2**24]
# Read the total packet length
totalPacketLen = np.matmul(byteBuffer[12:12+4],word)
# Check that all the packet has been read
if (byteBufferLength >= totalPacketLen) and (byteBufferLength != 0):
magicOK = 1
# If magicOK is equal to 1 then process the message
if magicOK:
# word array to convert 4 bytes to a 32 bit number
word = [1, 2**8, 2**16, 2**24]
# Initialize the pointer index
idX = 0
# Read the header
magicNumber = byteBuffer[idX:idX+8]
idX += 8
version = format(np.matmul(byteBuffer[idX:idX+4],word),'x')
idX += 4
totalPacketLen = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
platform = format(np.matmul(byteBuffer[idX:idX+4],word),'x')
idX += 4
frameNumber = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
timeCpuCycles = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
numDetectedObj = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
numTLVs = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
subFrameNumber = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
# Read the TLV messages
for tlvIdx in range(numTLVs):
# word array to convert 4 bytes to a 32 bit number
word = [1, 2**8, 2**16, 2**24]
# Check the header of the TLV message
try:
tlv_type = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
tlv_length = np.matmul(byteBuffer[idX:idX+4],word)
idX += 4
except:
pass
# Read the data depending on the TLV message
if tlv_type == MMWDEMO_UART_MSG_DETECTED_POINTS:
# word array to convert 4 bytes to a 16 bit number
word = [1, 2**8]
tlv_numObj = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
tlv_xyzQFormat = 2**np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
# Initialize the arrays
rangeIdx = np.zeros(tlv_numObj,dtype = 'int16')
dopplerIdx = np.zeros(tlv_numObj,dtype = 'int16')
peakVal = np.zeros(tlv_numObj,dtype = 'int16')
x = np.zeros(tlv_numObj,dtype = 'int16')
y = np.zeros(tlv_numObj,dtype = 'int16')
z = np.zeros(tlv_numObj,dtype = 'int16')
for objectNum in range(tlv_numObj):
# Read the data for each object
rangeIdx[objectNum] = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
dopplerIdx[objectNum] = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
peakVal[objectNum] = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
x[objectNum] = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
y[objectNum] = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
z[objectNum] = np.matmul(byteBuffer[idX:idX+2],word)
idX += 2
# Make the necessary corrections and calculate the rest of the data
rangeVal = rangeIdx * configParameters["rangeIdxToMeters"]
dopplerIdx[dopplerIdx > (configParameters["numDopplerBins"]/2 - 1)] = dopplerIdx[dopplerIdx > (configParameters["numDopplerBins"]/2 - 1)] - 65535
dopplerVal = dopplerIdx * configParameters["dopplerResolutionMps"]
#x[x > 32767] = x[x > 32767] - 65536
#y[y > 32767] = y[y > 32767] - 65536
#z[z > 32767] = z[z > 32767] - 65536
x = x / tlv_xyzQFormat
y = y / tlv_xyzQFormat
z = z / tlv_xyzQFormat
# Store the data in the detObj dictionary
detObj = {"numObj": tlv_numObj, "rangeIdx": rangeIdx, "range": rangeVal, "dopplerIdx": dopplerIdx, \
"doppler": dopplerVal, "peakVal": peakVal, "x": x, "y": y, "z": z}
dataOK = 1
# Remove already processed data
if idX > 0 and byteBufferLength > idX:
shiftSize = totalPacketLen
byteBuffer[:byteBufferLength - shiftSize] = byteBuffer[shiftSize:byteBufferLength]
byteBuffer[byteBufferLength - shiftSize:] = np.zeros(len(byteBuffer[byteBufferLength - shiftSize:]),dtype = 'uint8')
byteBufferLength = byteBufferLength - shiftSize
# Check that there are no errors with the buffer length
if byteBufferLength < 0:
byteBufferLength = 0
return dataOK, frameNumber, detObj
# ------------------------------------------------------------------
# Funtion to update the data and display in the plot
def update():
dataOk = 0
global detObj
x = []
y = []
# Read and parse the received data
dataOk, frameNumber, detObj = readAndParseData16xx(Dataport, configParameters)
if dataOk and len(detObj["x"])>0:
#print(detObj)
x = -detObj["x"]
y = detObj["y"]
s.setData(x,y)
QtGui.QApplication.processEvents()
return dataOk
# ------------------------- MAIN -----------------------------------------
# Configurate the serial port
CLIport, Dataport = serialConfig(configFileName)
# Get the configuration parameters from the configuration file
configParameters = parseConfigFile(configFileName)
# START QtAPPfor the plot
app = QtGui.QApplication([])
# Set the plot
pg.setConfigOption('background','w')
win = pg.GraphicsLayoutWidget(title="2D scatter plot")
p = win.addPlot()
p.setXRange(-0.5,0.5)
p.setYRange(0,1.5)
p.setLabel('left',text = 'Y position (m)')
p.setLabel('bottom', text= 'X position (m)')
s = p.plot([],[],pen=None,symbol='o')
win.show()
# Main loop
detObj = {}
frameData = {}
currentIndex = 0
while True:
try:
# Update the data and check if the data is okay
dataOk = update()
if dataOk:
# Store the current frame into frameData
frameData[currentIndex] = detObj
currentIndex += 1
time.sleep(0.03) # Sampling frequency of 30 Hz
# Stop the program and close everything if Ctrl + c is pressed
except KeyboardInterrupt:
CLIport.write(('sensorStop\n').encode())
CLIport.close()
Dataport.close()
win.close()
break```
[1]: https://i.stack.imgur.com/BYLnW.png

Convert depth map (base64) of google street view to image

I wonder if someone know how to convert the depth map of google street view (encoded as base64) into an image or a 2D matrix. I'm using Python and was able to get the base64 string, decode it and save it to a .png file. However, the exported png file cannot be open by any graphic viewer... I guess the base64 code wasn't decoded correctly...
Here is a post that mentioned the base64 string in GSV.
Here is my code:
#URL of the json file of a GSV depth map
url_depthmap='http://maps.google.com/cbk?output=json&cb_client=maps_sv&v=4&dm=1&pm=1&ph=1&hl=en&panoid=lcptgwtxfJ6DccSzyWp0zA'
# getting the json file
r = requests.get(url_depthmap)
# open it
depth_json= r.json()
# get the base64 string of the depth map
data=depth_json['model']['depth_map']
# fix the 'inccorrect padding' error. The length of the string needs to be divisible by 4.
data += "=" * ((4 - len(data) % 4) % 4)
# convert the URL safe format to regular format.
data=data.replace('-','+').replace('_','/')
data = base64.decodestring(data) # decode the string
data=zlib.decompress(data) #decompress the data
# write it to a png file
image_result = open('downloads/deer_decode.png', 'wb')
image_result.write(data)
looking at https://github.com/proog128/GSVPanoDepth.js/blob/master/src/GSVPanoDepth.js and starting with your piece of code, here is my overall processing:
import base64
import zlib
import numpy as np
import struct
import matplotlib.pyplot as plt
def parse(b64_string):
# fix the 'inccorrect padding' error. The length of the string needs to be divisible by 4.
b64_string += "=" * ((4 - len(b64_string) % 4) % 4)
# convert the URL safe format to regular format.
data = b64_string.replace("-", "+").replace("_", "/")
data = base64.b64decode(data) # decode the string
data = zlib.decompress(data) # decompress the data
return np.array([d for d in data])
def parseHeader(depthMap):
return {
"headerSize": depthMap[0],
"numberOfPlanes": getUInt16(depthMap, 1),
"width": getUInt16(depthMap, 3),
"height": getUInt16(depthMap, 5),
"offset": getUInt16(depthMap, 7),
}
def get_bin(a):
ba = bin(a)[2:]
return "0" * (8 - len(ba)) + ba
def getUInt16(arr, ind):
a = arr[ind]
b = arr[ind + 1]
return int(get_bin(b) + get_bin(a), 2)
def getFloat32(arr, ind):
return bin_to_float("".join(get_bin(i) for i in arr[ind : ind + 4][::-1]))
def bin_to_float(binary):
return struct.unpack("!f", struct.pack("!I", int(binary, 2)))[0]
def parsePlanes(header, depthMap):
indices = []
planes = []
n = [0, 0, 0]
for i in range(header["width"] * header["height"]):
indices.append(depthMap[header["offset"] + i])
for i in range(header["numberOfPlanes"]):
byteOffset = header["offset"] + header["width"] * header["height"] + i * 4 * 4
n = [0, 0, 0]
n[0] = getFloat32(depthMap, byteOffset)
n[1] = getFloat32(depthMap, byteOffset + 4)
n[2] = getFloat32(depthMap, byteOffset + 8)
d = getFloat32(depthMap, byteOffset + 12)
planes.append({"n": n, "d": d})
return {"planes": planes, "indices": indices}
def computeDepthMap(header, indices, planes):
v = [0, 0, 0]
w = header["width"]
h = header["height"]
depthMap = np.empty(w * h)
sin_theta = np.empty(h)
cos_theta = np.empty(h)
sin_phi = np.empty(w)
cos_phi = np.empty(w)
for y in range(h):
theta = (h - y - 0.5) / h * np.pi
sin_theta[y] = np.sin(theta)
cos_theta[y] = np.cos(theta)
for x in range(w):
phi = (w - x - 0.5) / w * 2 * np.pi + np.pi / 2
sin_phi[x] = np.sin(phi)
cos_phi[x] = np.cos(phi)
for y in range(h):
for x in range(w):
planeIdx = indices[y * w + x]
v[0] = sin_theta[y] * cos_phi[x]
v[1] = sin_theta[y] * sin_phi[x]
v[2] = cos_theta[y]
if planeIdx > 0:
plane = planes[planeIdx]
t = np.abs(
plane["d"]
/ (
v[0] * plane["n"][0]
+ v[1] * plane["n"][1]
+ v[2] * plane["n"][2]
)
)
depthMap[y * w + (w - x - 1)] = t
else:
depthMap[y * w + (w - x - 1)] = 9999999999999999999.0
return {"width": w, "height": h, "depthMap": depthMap}
# see https://stackoverflow.com/questions/56242758/python-equivalent-for-javascripts-dataview
# for bytes-parsing reference
# see https://github.com/proog128/GSVPanoDepth.js/blob/master/src/GSVPanoDepth.js
# for overall processing reference
# Base64 string from request to:
# https://maps.google.com/cbk?output=xml&ll=45.508457,-73.532738&dm=1
# edit long and lat
s = "eJzt2gt0FNUdx_FsQtgEFJIQiAlvQglBILzktTOZ2YitloKKD6qAFjy2KD5ABTlqMxUsqICcVqqC1VoUqS34TkMhydjaorYCStXSKkeEihQoICpUFOzuJruZO4_dmdmZe2fY3_cc4RCS-L_z-c_sguaNy8rKzgrkZSGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBBCCCGEEEIIIYQQQgghhBCbgsFgB9YzIFYFo_wdsAAZWbC5DliATCwYVPhjATKsYNC0f5s2baiNhagUDFrzb47aeMjVgkF7_liC06JgWv7YAX-n1rfjjx3wbVp-m_5YAV_mpH-bPGpjI4dy1h8L4Lcc9scC-Cyn_bEA_sq6vzF_zB8b4Ktc8McC-Cg3_LEAvkmH3wF_LIBfcskfC-CTHH37r_DHAvgj1_yxAL7IPX8sgB9y0R8L4IPc9McCeD9X_bEAns9dfyyA13PZHwvg8Rz11_LnFVI7SVZWO2UU_71-7jTybwd_67nuT28B4G8j-Gd2LvsXFtJbAPjbyLq_MT_8_RcFf1oLAH8bwd9fFRQUOPr9aPhTWoDT378glqPfEv4-qSCRo98W_v6owM_-dBYA_jaCvz_ytz-VBYC_jeDvsfprP1SeRd0_yVfA39W0_uV0_R393z_gbzX42w3-1rP-8g9_d9P4D4C_yeBvOfhTr7Q06W_D33Z-8Y-l-ABBrvYf4B9_LX_Cf6iTQxsFf8vBn3qp_FULAH_T-cq_dQHgr1v37t0tfgX8LUfJf6hp_-7KLB4G_paj5W96IL_5d9Yp-VeY9j839qPX_I35k_zxT9d_2LDmn3v06NH6wdPBP_kCkP4jR44s7T8y1pCWRkeL_2JI1L9Xr14K_16tpT0-e_8ePUj_jq2d3v6juUhRxNL-vZIV8Q-Hwwr_0YO02R2fhf-w-M8a_45kp4l_GVncv8xs5eWRHxT-0Y_1S6T8TOvjwz_NdP1LkhXDN_zdLjqVR_5R-Ot9ijZz48PffmdF0_XvZjtDTl3_Ir3OUhcZtahrJL0z0PIfWl1dPSqWrv-YWB70P6O1Tt9qqahoYLwuXXT9E5e-p8V0QWMp_I2cI3VqLqv5M7oqa9tWcZb48QJG_n2jFTdX0ru1qip7_pWVY6OFolVWVtacE68y5h_5UKUgCGr_wZr68Txf0jpQ_BwjonOS_n365ChqTxbIVpZzpiLlZYoVCASIX8eucGIXdP31bmA9LnVdOnXS0YwX91d9WCT2RGUejTwLmZF_RUVFbl9FxfFK7PkrX-IqiWL-Le9lVP6xz-5NVBVpRLRiorMj5eYS_sb47SPnJv1ziA04k7xmigun3YNOhLvunTtQ8bRIUSfDRN1vrSVvqyiZeyr_6GWraC43Wusm2PJXvcnppyjm3wxbpfKvimsXG9TMHq1Pczk66ehr_VUroLmAqquoswjKWl8p3MjI2wK7Kf9EFcpNsOOv8z63Kn5Hx_xbPFX-KdUT8HruSfR1_bUPgRRLYGIR9NfC9F4kdTZUN8Ge0j83d7je1YwsgQ1_o9tXk7E_YZ7ihk_ysp9I199gA1IugY1V0C0lcnr3ujX_WDpLYN1f9Q7CvD8pbsU9hb6hf47ey4ClLdDZBbPLQIncir_OElj11zFsKXFvJz6i8rfJnlo_mb_xQ8DyFljcCPe1tRn4a7ESS2DRn3jvoLsAirtc7W8d3QC_vfrcyfxzkj4E0toCz2Xi9lctgSX_FErq76_ytwOvp69z7hT-KR8CBlvguzWw5h_Ngn9Kp-bv2PprZ_xT65vwzzHzEPD_08DAP3JR0vW3I-eEvxl9c_5mHwJ-fh4Y-xttgCl-W3IO-JvDD5j1t7MCvtqE_Pwk_vorkNrfJn6stPxN61vwz7H0OmB2GbyyDfn5mg0g_HU2IIW_7l8bWci2vwX8gDV_-w8BK1HxVhf1z49cAWN_zQYk9Vd-4nCbq2DL35q-VX8aK0BBW1vcP1bCP1tdUn-jN4o6mVsJy_5W8QM2_F1fAbepdSP8m5dAq0-sgFXyVCXbADv6Jg9ux9_VFXCV2TCtv3EOoqfyzzHtbws_YNvfvRVwzzhZVvzdWQGjy2zG38ZzP559f_UKOLQDbgknz6K_GyuQZAGs6Fs8eFr-mh3IIH_HV8D4EifzTws_4IC_egXS3QHnbc1ky9_ZFbBx4dO0j-aEv2YHMsffwRVIx94mfsA5f8d2wEFUC6Xh79QK2MZP6-BO-mt2wMYSOORptfT8HdkBW_bp4Qec99cuQYb4p70D1vGdOLgr_tolML0FTpzJRs74p7cDVuwdO7h7_va2wLGDWcs5f_s7QJs-ltv-eluQbA2cPZ3pnPW3twS06WPR8TfcBM0quHBEM7ngb3kJktC7d3Da_qly76RJc8vfyha0XgQK7vHIQdm5x3P_xLq5ym9yD3JoeGuCfzQ6_im2gcnJ4R-Nhb8mJicnJmCtD3_qEROw1oc_9YgJWOvDn3rEBKz14U89YgLW-vCnHjEBa334U4-YgLU-_KlHTMBaH_7UIyZgrc_K392__jcbk6MTE7DWhz_1iAlY68OfesQErPXhTz1iAtb68KceMQFrffhTj5iAtT78qUdMwFof_tQjJmCtD3_qEROw1oc_9YgJWOvDn3rEBKz14U89YgLW-vCnHjEBa334U48cgTU_Q3825srYnJ0YgTU__KlHjMCaH_7UI0ZgzQ9_6hEjsOaHP_WIEVjzw596xAis-eFPPWIE1vxs_L3xx3_4w59BxAis-eFPPWIE1vzwpx4xAmt--FOPGIE1P_ypR4zAmh_-1CNGYM0Pf-oRI7Dmhz_1yBky1p8NORGTs3vsAcDkCsA_EfzZxeTs8Ie_okzkh39rmejvkbf_8Ic_i4gZ4M8uJoeHP_wVwZ9dTA4P_0z399ICMDm_R_jhD38mEUPAn1lMDh_IeH-vvPzDH_5MIoaAP7NYHD4aMQT8mcXi8NHIKTLSnw24KhaHj0VMAX9WsTh8LGKKTPP3zOMf_vBnEzEF_FnF4PDNkWNkoD8bb3UMDt8SMQb8GcXg8C0RY2SWv3ce__CHP6PIOTLOnw23JvqHT0TMAX820T98ImKOTPL30OPfM_4MF4D6wT3Ez9LfKw8A2sf20u0Pf_izi5gko_wZaWujfXgiYpJM8ffU7c_W3xsLQPfI3uJn7O-JBaB7Ym_xw5-yv8duf9b-XlgAmsf1Gj9zfw8sAMXDeo6fvT_7BaB3VO_xe8Cf-QZQO6f39L3hz3gBKB3Sgzd_tkf8mW4AnQN6U98z_gz_ayCNw3kUP9tD_gFWK-D6sbyLn-0t_2jkdL7397R9NFdPbztiRH_ye_UFX5Vbx3coYlY_-PuEvTWHz-9q5OQe4fcbuKr0L4CnUp0uTX6f25qKCos3Y33pvRNrCfqxvuIeKUvVBQ0jQy8dv5C7_YQk140qE4-83eepF-Z-wT24W5KXc2XigpsnbdxVvzm0-ZQkDy_sJRZ1v1n4a93vmy5ecTs3ZG_X8M79k0PrF3UNzftGkis79BVnnXw-9P6Yu7l_H5Pk54RuYtuCvsLaBePkg8d3b1pQf1CcW1Ar5F57lN8-OJc__itZvGPKkL6XjNvCn6qrlee8Wyqurb5OXpW_vemSPuWNt71WUBN8f0moYO_chuD_JPny1eXi2ZMWhW6pq-BrX5fknPMGidftnh5aOvylhkOHJXluTkfxH5-WC7vHjxM6S8P5rhUrxQtH1cqzO-1r_OxnbRqrpn5P3DbuDuHXt11affmCjvzkSavDnz8_T5i0eHH1-Y915jeWXCRW1LXhpq96gbvlkCQLh7qIQnZJaOs5O7hv9knyd0b1jcw_ivt0wmBueGSeV7qVicOO3yEf2XF-de23X2zo0P7l8DfTa7i3nsjj978nyVPaDxWv--JYqG7UWu6uI5J84vVssfc7S7gfHdjDf_fqWrlnzzXiwH43yBc98Jdq4doZDbMW7ArnPXkF94fupfz1b0ry2qfGiOUFU7mTg5bxA6-S5Jo3ponbP-rIHdh4Fv_xNknu-dYocdn6WfKkvZub_jOxd2P9_iPhpaHb5SfvW9j0xtbixp-ce1icemxE6Muc5dyrX0jyhyfyxPlPZ_GLd57kBr0iySsmni3-MHsPd2LgyYZ7XpPkZy6bL55x_ZpN0z7N4St2SnL_2lzxq9_dKry54ZdN78jLuKpFi8Rrjrbnp67b1rhpca28_IKnxaG_HTj2uaPjG_ZH9mdhzUcCt6eSG_O3eu6rA5HzF58SpoXncdMmzGl8dZEk3_f0LeJD22bLby1ZX716xIKGrEVbw48-_CB39d7_chv-KcmPbn1PuC1vjsBNXNc0uHAh9_gnPcWmLl0aTn18H79wpiSPLXpC-NOPV4cOX5zVUHVSklcW7xBKtrfj31-6heeX18qXzd0nHp5ZyAtlsxpXXC_JM2pfFIuXLOS4wjHcVZ9J8rEzy8Qb7_6aO1BXyq9_UZLnyJeLj7w0R9jz0Nrq-9_syn__vZlC0Q2z5WObn21a1K174_3zDof3DPhNw_qartWTv75LLlp-hfBo2XNjz7t3PP_1s5K8ZcXjQocr53NjpD3c0A8j86z5l9D5gUF8Xtk6vtfOWnnSlQPCo_94KdfuyM9Db38pyatu3iB-_uS73Auz2_HhP0vyzG4jxKcmb-Rq9q0MTYrsW1Hdw-K-D-7hz19X3zj-xlp5SoUY3lA_ketdsJM7GLkfHzv3A-Guv-fzB7aM5JetjJxv-kLx6ISXN065cwI_a70kZxVdKVx7sIo_OKy0cfUvJPnWxmvCcu-e_FUzahse3yzJ9bPXiHN2_GTTK5_Mb_xBxOPolEeqZxQIPBd4g__kzlo5f9op8YwdS7mbJr7LleyS5HvXfSnsWjVHvrXgmeqbLvxpw6G9H4n_Bz8_xLw"
# decode string + decompress zip
depthMapData = parse(s)
# parse first bytes to describe data
header = parseHeader(depthMapData)
# parse bytes into planes of float values
data = parsePlanes(header, depthMapData)
# compute position and values of pixels
depthMap = computeDepthMap(header, data["indices"], data["planes"])
# process float 1D array into int 2D array with 255 values
im = depthMap["depthMap"]
im[np.where(im == max(im))[0]] = 255
if min(im) < 0:
im[np.where(im < 0)[0]] = 0
im = im.reshape((depthMap["height"], depthMap["width"])).astype(int)
# display image
plt.imshow(im)
plt.show()
(and save with plt.imsave)

Cannot add scalar and a vector error in VPyhton (GlowScript)

I'm implementing a solar system with VPython in GlowScript. Now I have received this error when running: Error cannot add scalar and a vector. I think I've done all correctly. Do I have to change something with the pos. ?
Here is the code:
GlowScript 2.7 VPython
from visual import *
scene = display(width = 800, height = 800, center = vec(0,0.5,0))
#sun
sonne = sphere(pos = vec (0,0,0), radius=8, color = color.orange, shininess=1)
#earth
erde = sphere(pos = vec (50,0,0), radius=1.5, color = color.blue, make_trail=True)
erdeV = vector(0,0,5)
#masses
erdeM = 5.97*10**24
sonneM = 1.989*10**30
#Grav-constant
G = 6.67259*10**-11
for i in range (1000):
rate(1000)
erde.pos = erde.pos + erdeV
#distance
entfernung = sqrt(erde.pos.y**2 + erde.pos.z**2)
#Gravitational law F = G * m * M / r*r --> G*s*e/AE*AE ae=Astr. Einheit
Fgrav = G *( erdeM * sonneM) / (entfernung*entfernung)
erdeV = erdeV + Fgrav
erde.pos += erdeV
if entfernung <= sonne.radius: break
Problem lines:
Fgrav = G *( erdeM * sonneM) / (entfernung*entfernung)
erdeV = erdeV + Fgrav
Fgrav here is a scalar (strength of gravitational force) whereas erdeV is a vector. To remedy this, include the direction of the force:
Fgrav = (-G * (erdeM * sonneM) / (entfernung ** 3)) * erde.pos
erdeV = erdeV + Fgrav

cannot run the sklearn machine learning model

I am trying to use the sklearn to build a machine learning model for the lunar lander. I use Grid search to tune the model and use joblib to persist the model.
enter image description here
here is the code:
from sklearn.externals import joblib
joblib.dump(my_tuned_model, 'player_state.pkl')
and then I copy the player_state.pkl into the folder of lunar lander. below is the code of lunar_lander
import sys, math
import numpy as np
from sklearn.externals import joblib
import cv2
# MOD Extra imports for image handling
from PIL import Image
import os
import time
import datetime
import keras
import Box2D
from Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)
import gym
from gym import spaces
from gym.utils import seeding
# Rocket trajectory optimization is a classic topic in Optimal Control.
#
# According to Pontryagin's maximum principle it's optimal to fire engine full throttle or
# turn it off. That's the reason this environment is OK to have discreet actions (engine on or off).
#
# Landing pad is always at coordinates (0,0). Coordinates are the first two numbers in state vector.
# Reward for moving from the top of the screen to landing pad and zero speed is about 100..140 points.
# If lander moves away from landing pad it loses reward back. Episode finishes if the lander crashes or
# comes to rest, receiving additional -100 or +100 points. Each leg ground contact is +10. Firing main
# engine is -0.3 points each frame. Solved is 200 points.
#
# Landing outside landing pad is possible. Fuel is infinite, so an agent can learn to fly and then land
# on its first attempt. Please see source code for details.
#
# Too see heuristic landing, run:
#
# python gym/envs/box2d/lunar_lander_mod.py
#
# To play yourself, run:
#
# python examples/agents/keyboard_agent.py LunarLander-v0
#
# Created by Oleg Klimov. Licensed on the same terms as the rest of OpenAI Gym.
FPS = 50
SCALE = 30.0 # affects how fast-paced the game is, forces should be adjusted as well
MAIN_ENGINE_POWER = 13.0
SIDE_ENGINE_POWER = 0.6
INITIAL_RANDOM = 1000.0 # Set 1500 to make game harder
LANDER_POLY = [
(-14, +17), (-17, 0), (-17, -10),
(+17, -10), (+17, 0), (+14, +17)
]
LEG_AWAY = 20
LEG_DOWN = 18
LEG_W, LEG_H = 2, 8
LEG_SPRING_TORQUE = 40
SIDE_ENGINE_HEIGHT = 14.0
SIDE_ENGINE_AWAY = 12.0
VIEWPORT_W = 600
VIEWPORT_H = 400
class ContactDetector(contactListener):
def __init__(self, env):
contactListener.__init__(self)
self.env = env
def BeginContact(self, contact):
if self.env.lander == contact.fixtureA.body or self.env.lander == contact.fixtureB.body:
self.env.game_over = True
for i in range(2):
if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:
self.env.legs[i].ground_contact = True
def EndContact(self, contact):
for i in range(2):
if self.env.legs[i] in [contact.fixtureA.body, contact.fixtureB.body]:
self.env.legs[i].ground_contact = False
class LunarLander(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': FPS
}
continuous = False
def __init__(self):
self.seed()
self.viewer = None
self.world = Box2D.b2World()
self.moon = None
self.lander = None
self.particles = []
self.prev_reward = None
high = np.array([np.inf] * 8) # useful range is -1 .. +1, but spikes can be higher
self.observation_space = spaces.Box(-high, high)
if self.continuous:
# Action is two floats [main engine, left-right engines].
# Main engine: -1..0 off, 0..+1 throttle from 50% to 100% power. Engine can't work with less than 50% power.
# Left-right: -1.0..-0.5 fire left engine, +0.5..+1.0 fire right engine, -0.5..0.5 off
self.action_space = spaces.Box(-1, +1, (2,))
else:
# Nop, fire left engine, main engine, right engine
self.action_space = spaces.Discrete(4)
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _destroy(self):
if not self.moon: return
self.world.contactListener = None
self._clean_particles(True)
self.world.DestroyBody(self.moon)
self.moon = None
self.world.DestroyBody(self.lander)
self.lander = None
self.world.DestroyBody(self.legs[0])
self.world.DestroyBody(self.legs[1])
def reset(self):
self._destroy()
self.world.contactListener_keepref = ContactDetector(self)
self.world.contactListener = self.world.contactListener_keepref
self.game_over = False
self.prev_shaping = None
W = VIEWPORT_W / SCALE
H = VIEWPORT_H / SCALE
# terrain
CHUNKS = 11
height = self.np_random.uniform(0, H / 2, size=(CHUNKS + 1,))
chunk_x = [W / (CHUNKS - 1) * i for i in range(CHUNKS)]
self.helipad_x1 = chunk_x[CHUNKS // 2 - 1]
self.helipad_x2 = chunk_x[CHUNKS // 2 + 1]
self.helipad_y = H / 4
height[CHUNKS // 2 - 2] = self.helipad_y
height[CHUNKS // 2 - 1] = self.helipad_y
height[CHUNKS // 2 + 0] = self.helipad_y
height[CHUNKS // 2 + 1] = self.helipad_y
height[CHUNKS // 2 + 2] = self.helipad_y
smooth_y = [0.33 * (height[i - 1] + height[i + 0] + height[i + 1]) for i in range(CHUNKS)]
self.moon = self.world.CreateStaticBody(shapes=edgeShape(vertices=[(0, 0), (W, 0)]))
self.sky_polys = []
for i in range(CHUNKS - 1):
p1 = (chunk_x[i], smooth_y[i])
p2 = (chunk_x[i + 1], smooth_y[i + 1])
self.moon.CreateEdgeFixture(
vertices=[p1, p2],
density=0,
friction=0.1)
self.sky_polys.append([p1, p2, (p2[0], H), (p1[0], H)])
self.moon.color1 = (0.0, 0.0, 0.0)
self.moon.color2 = (0.0, 0.0, 0.0)
initial_y = VIEWPORT_H / SCALE
self.lander = self.world.CreateDynamicBody(
position=(VIEWPORT_W / SCALE / 2, initial_y),
angle=0.0,
fixtures=fixtureDef(
shape=polygonShape(vertices=[(x / SCALE, y / SCALE) for x, y in LANDER_POLY]),
density=5.0,
friction=0.1,
categoryBits=0x0010,
maskBits=0x001, # collide only with ground
restitution=0.0) # 0.99 bouncy
)
self.lander.color1 = (0.5, 0.4, 0.9)
self.lander.color2 = (0.3, 0.3, 0.5)
self.lander.ApplyForceToCenter((
self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM),
self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM)
), True)
self.legs = []
for i in [-1, +1]:
leg = self.world.CreateDynamicBody(
position=(VIEWPORT_W / SCALE / 2 - i * LEG_AWAY / SCALE, initial_y),
angle=(i * 0.05),
fixtures=fixtureDef(
shape=polygonShape(box=(LEG_W / SCALE, LEG_H / SCALE)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001)
)
leg.ground_contact = False
leg.color1 = (0.5, 0.4, 0.9)
leg.color2 = (0.3, 0.3, 0.5)
rjd = revoluteJointDef(
bodyA=self.lander,
bodyB=leg,
localAnchorA=(0, 0),
localAnchorB=(i * LEG_AWAY / SCALE, LEG_DOWN / SCALE),
enableMotor=True,
enableLimit=True,
maxMotorTorque=LEG_SPRING_TORQUE,
motorSpeed=+0.3 * i # low enough not to jump back into the sky
)
if i == -1:
rjd.lowerAngle = +0.9 - 0.5 # Yes, the most esoteric numbers here, angles legs have freedom to travel within
rjd.upperAngle = +0.9
else:
rjd.lowerAngle = -0.9
rjd.upperAngle = -0.9 + 0.5
leg.joint = self.world.CreateJoint(rjd)
self.legs.append(leg)
self.drawlist = [self.lander] + self.legs
return self.step(np.array([0, 0]) if self.continuous else 0)[0]
def _create_particle(self, mass, x, y, ttl):
p = self.world.CreateDynamicBody(
position=(x, y),
angle=0.0,
fixtures=fixtureDef(
shape=circleShape(radius=2 / SCALE, pos=(0, 0)),
density=mass,
friction=0.1,
categoryBits=0x0100,
maskBits=0x001, # collide only with ground
restitution=0.3)
)
p.ttl = ttl
self.particles.append(p)
self._clean_particles(False)
return p
def _clean_particles(self, all):
while self.particles and (all or self.particles[0].ttl < 0):
self.world.DestroyBody(self.particles.pop(0))
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid " % (action, type(action))
# Engines
tip = (math.sin(self.lander.angle), math.cos(self.lander.angle))
side = (-tip[1], tip[0]);
dispersion = [self.np_random.uniform(-1.0, +1.0) / SCALE for _ in range(2)]
m_power = 0.0
if (self.continuous and action[0] > 0.0) or (not self.continuous and action == 2):
# Main engine
if self.continuous:
m_power = (np.clip(action[0], 0.0, 1.0) + 1.0) * 0.5 # 0.5..1.0
assert m_power >= 0.5 and m_power <= 1.0
else:
m_power = 1.0
ox = tip[0] * (4 / SCALE + 2 * dispersion[0]) + side[0] * dispersion[
1] # 4 is move a bit downwards, +-2 for randomness
oy = -tip[1] * (4 / SCALE + 2 * dispersion[0]) - side[1] * dispersion[1]
impulse_pos = (self.lander.position[0] + ox, self.lander.position[1] + oy)
p = self._create_particle(3.5, impulse_pos[0], impulse_pos[1],
m_power) # particles are just a decoration, 3.5 is here to make particle speed adequate
p.ApplyLinearImpulse((ox * MAIN_ENGINE_POWER * m_power, oy * MAIN_ENGINE_POWER * m_power), impulse_pos,
True)
self.lander.ApplyLinearImpulse((-ox * MAIN_ENGINE_POWER * m_power, -oy * MAIN_ENGINE_POWER * m_power),
impulse_pos, True)
s_power = 0.0
if (self.continuous and np.abs(action[1]) > 0.5) or (not self.continuous and action in [1, 3]):
# Orientation engines
if self.continuous:
direction = np.sign(action[1])
s_power = np.clip(np.abs(action[1]), 0.5, 1.0)
assert s_power >= 0.5 and s_power <= 1.0
else:
direction = action - 2
s_power = 1.0
ox = tip[0] * dispersion[0] + side[0] * (3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE)
oy = -tip[1] * dispersion[0] - side[1] * (3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE)
impulse_pos = (self.lander.position[0] + ox - tip[0] * 17 / SCALE,
self.lander.position[1] + oy + tip[1] * SIDE_ENGINE_HEIGHT / SCALE)
p = self._create_particle(0.7, impulse_pos[0], impulse_pos[1], s_power)
p.ApplyLinearImpulse((ox * SIDE_ENGINE_POWER * s_power, oy * SIDE_ENGINE_POWER * s_power), impulse_pos,
True)
self.lander.ApplyLinearImpulse((-ox * SIDE_ENGINE_POWER * s_power, -oy * SIDE_ENGINE_POWER * s_power),
impulse_pos, True)
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
pos = self.lander.position
vel = self.lander.linearVelocity
state = [
(pos.x - VIEWPORT_W / SCALE / 2) / (VIEWPORT_W / SCALE / 2),
(pos.y - (self.helipad_y + LEG_DOWN / SCALE)) / (VIEWPORT_W / SCALE / 2),
vel.x * (VIEWPORT_W / SCALE / 2) / FPS,
vel.y * (VIEWPORT_H / SCALE / 2) / FPS,
self.lander.angle,
20.0 * self.lander.angularVelocity / FPS,
1.0 if self.legs[0].ground_contact else 0.0,
1.0 if self.legs[1].ground_contact else 0.0
]
assert len(state) == 8
reward = 0
shaping = \
- 100 * np.sqrt(state[0] * state[0] + state[1] * state[1]) \
- 100 * np.sqrt(state[2] * state[2] + state[3] * state[3]) \
- 100 * abs(state[4]) + 10 * state[6] + 10 * state[7] # And ten points for legs contact, the idea is if you
# lose contact again after landing, you get negative reward
if self.prev_shaping is not None:
reward = shaping - self.prev_shaping
self.prev_shaping = shaping
reward -= m_power * 0.30 # less fuel spent is better, about -30 for heurisic landing
reward -= s_power * 0.03
done = False
if self.game_over or abs(state[0]) >= 1.0:
done = True
reward = -100
if not self.lander.awake:
done = True
reward = +100
return np.array(state), reward, done, {}
def render(self, mode='human'):
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.Viewer(VIEWPORT_W, VIEWPORT_H)
self.viewer.set_bounds(0, VIEWPORT_W / SCALE, 0, VIEWPORT_H / SCALE)
for obj in self.particles:
obj.ttl -= 0.15
obj.color1 = (max(0.2, 0.2 + obj.ttl), max(0.2, 0.5 * obj.ttl), max(0.2, 0.5 * obj.ttl))
obj.color2 = (max(0.2, 0.2 + obj.ttl), max(0.2, 0.5 * obj.ttl), max(0.2, 0.5 * obj.ttl))
self._clean_particles(False)
for p in self.sky_polys:
self.viewer.draw_polygon(p, color=(0, 0, 0))
for obj in self.particles + self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
if type(f.shape) is circleShape:
t = rendering.Transform(translation=trans * f.shape.pos)
self.viewer.draw_circle(f.shape.radius, 20, color=obj.color1).add_attr(t)
self.viewer.draw_circle(f.shape.radius, 20, color=obj.color2, filled=False, linewidth=2).add_attr(t)
else:
path = [trans * v for v in f.shape.vertices]
self.viewer.draw_polygon(path, color=obj.color1)
path.append(path[0])
self.viewer.draw_polyline(path, color=obj.color2, linewidth=2)
for x in [self.helipad_x1, self.helipad_x2]:
flagy1 = self.helipad_y
flagy2 = flagy1 + 50 / SCALE
self.viewer.draw_polyline([(x, flagy1), (x, flagy2)], color=(1, 1, 1))
self.viewer.draw_polygon([(x, flagy2), (x, flagy2 - 10 / SCALE), (x + 25 / SCALE, flagy2 - 5 / SCALE)],
color=(0.8, 0.8, 0))
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
class LunarLanderContinuous(LunarLander):
continuous = True
if __name__ == "__main__":
# Load the Lunar Lander environment
env = LunarLander()
total_rewards = list()
for i in range(0, 10):
s = env.reset()
# Load and initialise the contrll model
ROWS = 64
COLS = 64
CHANNELS = 1
model = joblib.load('player_state.pkl')
# Run the game loop
total_reward = 0
steps = 0
while True:
# Get the model to make a prediction
a = model.predict_classes(s)
a = a[0]
# Step on the game
s, r, done, info = env.step(a)
env.render()
total_reward += r
if steps % 20 == 0 or done:
print(["{:+0.2f}".format(x) for x in s])
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
steps += 1
if done:
total_rewards.append(total_reward)
break
print("total rewards", total_rewards)
print("average total reward", np.mean(total_rewards))
# Write total rewards to file
f = open("lunarlander_ml_states_rewards.csv", 'w')
wr = csv.writer(f)
for r in total_rewards:
wr.writerow([r, ])
f.close()
When I run the code, an error occurs
Traceback (most recent call last):
File "/Users/leejoonsung/PycharmProjects/lunar_lander/lunar_lander_ml_states_player.py", line 406, in <module>
a = model.predict_classes(s)
AttributeError: 'GridSearchCV' object has no attribute 'predict_classes'
Could anyone help me solve the problem

How to draw Buddhabrot fractal?

I'm trying to implement Buddhabrot fractal in Python. I read a lot of articles and posts but I think that I missunderstood something (just see the image). Someone can write a pseudocode?
My code is this:
from multiprocessing import Pool
from random import randrange
import matplotlib.pyplot as plt
import numpy as np
from math import ceil
maxiter = 1000
points = 1000
xmin, xmax = -2, 1
ymin, ymax = -2, 1
cores = 4
width, height = 200, 200
maxn = width * height
incrx, incry = abs(xmax - xmin) / width, abs(ymax - ymin) / height
def randomComplexGenerator():
for i in range(points):
n = randrange(maxn)
yield complex(n // height * incrx, n % width * incry)
def buddhabrot(c):
m, z, i = np.zeros((width, height)), c, 0
while i < maxiter and abs(z) < 2:
x, y = ceil(z.real / incrx), ceil(z.imag / incry)
m[x, y] += 1
z = z ** 2 + c
i += 1
return m if i == maxiter else 0
if __name__ == '__main__':
a = np.linspace(xmin, xmax, width)
b = np.linspace(ymin, ymax, height)
with Pool(cores) as p:
ms = p.map(buddhabrot, (c for c in randomComplexGenerator()))
res = 0
for m in ms:
res += m
plt.axis('off')
plt.imshow(res)
plt.show()
The image generated with my code is this (lel):
After days, this is the code that I created, which seems to generate appropriately the fractal. Any performance suggestion is welcome.
from multiprocessing import Pool
from random import randrange
import matplotlib.pyplot as plt
import numpy as np
cores = 4
maxiter = 10000
points = 1000000
width, height = 200, 200
rdom, idom = (-2, 2), (-2, 2)
xdom, ydom = (0, width - 1), (0, height - 1)
def randomComplex():
r = np.interp(randrange(xdom[0], xdom[1]), xdom, rdom)
i = np.interp(randrange(ydom[0], ydom[1]), ydom, idom)
return (r, i)
def complex2pixel(c):
x = int(np.interp(c[0], rdom, xdom))
y = int(np.interp(c[1], idom, ydom))
return (x, y)
def escapedPixels(c):
pixels, z = {}, c
for i in range(maxiter):
z2 = (z[0] * z[0], z[1] * z[1])
if z2[0] + z2[1] > 4: break
p = complex2pixel(z)
try: pixels[p] += 1
except: pixels[p] = 1
z = (z2[0] - z2[1] + c[0], 2 * z[0] * z[1] + c[1])
return pixels if i < maxiter - 1 else {}
if __name__ == '__main__':
with Pool(cores) as p:
ds = p.map(escapedPixels, (randomComplex() for i in range(points)))
m = np.zeros((width, height))
for d in ds:
for p in d:
m[p] += d[p]
plt.axis('off')
plt.imshow(m)
plt.show()

Categories

Resources