Import "lab_utils_uni could not be resolved - python

I need help with this Problem: Import "lab_utils_uni" could not be resolved. I installed numpy and matplotlib but lab_utils_uni didnt work. I am working with Visual Studio Code btw.
import numpy as np
import matplotlib.pyplot as plt
from lab_utils_uni import plt_intuition, plt_stationary, plt_update_onclick, soup_bowl
x_train = np.array([1.0, 2.0])
y_train = np.array([300.0, 500.0])
def compute_cost(x, y, w, b):
# number of training examples
m = x.shape[0]
cost_sum = 0
for i in range(m):
f_wb = w * x[i] + b
cost = (f_wb - y[i]) ** 2
cost_sum = cost_sum + cost
total_cost = (1 / (2 * m)) * cost_sum
return total_cost
plt_intuition(x_train,y_train)

It's a "reportMissingImports" warning.
lab_utils_uni is local drawing routines.
You need a lab_utils_uni.py file in your workspace.

Thatʻs simple, copy lab_utils_uni.py file to the same directory your code is saved to. You can download that python files from the "files" tab on Coursera online tab.

This code is from Coursera - DeepLearning AI course.
lab_utils_uni is a common util library which is used to run the code in the Online Lab in Coursera but doesn't show up in the Lab files section.
To look at the contents of the file try this:
import inspect, os
path = os.path.abspath(inspect.getfile(plt_stationary))
print(path)
f = open(path, 'r')
content = f.read()
print(content)

Related

Plotting a decaying exponential in Pycharm from a CSV file

I am trying to plot this data as a decaying exponential, all of the data has the same x values just the y values differ. y= a*[(-1)*exp(-x/t)].
I am not getting the correct chart when it goes through. csv file In the image is the type of curve I am looking for. I need to plot all of the data in csv (preferably on the same plot) in pycharm. I am relatively new to pycharm so I am starting from scratch! (excel just wouldn't behave for this data) Willing to start fresh as well if there is a simpler way of writing the code, I sparsed this together with some help from the internet.
import scipy.signal as scp
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import numpy.core.function_base
def decaying_exponential(x,a,t,c):
return a *(-1)* np.exp(-1 * (x) / t) + c
import os
for f in os.listdir("/Users/flyar/My Python Stuff/"):
print(f)
df = numpy.transpose(pd.read_csv("D:/Grad Lab/NMR/Data/T1 Data/mineral oil/F0009CH1.CSV", names= ['a','b','c','d']).to_numpy())
temp = scp.find_peaks(df[2], height = 0)
df_subset = [(df[1][n], df[2][n]) for n in temp[0]]
print(df_subset)
plt.scatter([df[2][n] for n in temp[0]], [df[1][n] for n in temp[0]])
y = np.linspace(min(df[2]), max(df[2]), 1000)
params, covs = curve_fit(decaying_exponential, [df[1][n] for n in temp[0][2::]],
[df[2][n] for n in temp[0][2::]], maxfev=10000)
print(params)
plt.plot(y, [decaying_exponential(l, 5, params[1], params[2]) for l in y])
plt.show()

Store variables on a new dimension in a nested for loop python

I am new to Python- trying to move from Matlab
I am reading multiple .nc files using for loop and am trying to save them along a new record dimension.A section of the code below. j and j1 has a size of 4x30x30 and I am trying to store data of j in variable appn for the 3 e along the 0th dim, such that appn is a variable with size 3x4x30x30.
It is simple in Matlab, but could not figure out a way in Python
import os, sys
import netCDF4
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
from cdo import *
import xarray as xr
cdo=Cdo()
indices = ["T"]
models = ["MJ", "MK", "ML"]
seasons =["JJA", "DJF"]
period =["base", "proj"]
exp = ["ssp1", "ssp2", "ssp3"]
odir = "/outs_pytry/season/"
top ="/outs_pytry/monmean_1/"
os.makedirs(odir, exist_ok=True)
#appn=[]
appnx=[]
#pr_arr = np.zeros([models,nlat,nlon], dtype='f4')
#pr_arr = np.zeros([], dtype='f4')
j=[]
for m in models:
folder = "%s"%(top)
if m in ["ML"]:
run = "r1i1p1f2"
else:
run = "r1i1p1f1"
for i in indices:
for e in exp:
origfi1 = '%s%s_%s_%s_%s_base.nc'%(folder,i, m, e, run)
origfi2 = '%s%s_%s_%s_%s_proj.nc'%(folder,i, m, e, run)
k=cdo.timselmean(3,11,9, input="%s"%origfi1, output="%s%s_%s_%s_%s_DJF_proj.nc"%(odir,i,m,e,run), returnCdf=True)
k1=cdo.timselmean(3,5,9, input="%s"%origfi1, output="%s%s_%s_%s_%s_JJA_proj.nc"%(odir,i,m,e,run), returnCdf=True)
j=k.variables["T"][:]
j1=k1.variables["T"][:]
lat=k.variables["lat"]
lon=k.variables["lon"]
#appn=np.zeros([3,4,lon,lat], dtype='f4')
datain = np.array(j)
#Confused with how to store data in appn , so that it has a fourth dimension of size 'e'?
appn(e,:,:,:) =datain
appn.append(datain)
in python you can append to a list dictionaries or lists. if you want to append to appn a dictionary with all if detains variables, plus "e" you can do it like this: {"e":e,**datain}.
if you only want to append an array of the values to appn use: [e,*datain.values()] to create a list and then append it. I put an example below
import os, sys
import netCDF4
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
from cdo import *
import xarray as xr
cdo=Cdo()
indices = ["T"]
models = ["MJ", "MK", "ML"]
seasons =["JJA", "DJF"]
period =["base", "proj"]
exp = ["ssp1", "ssp2", "ssp3"]
odir = "/outs_pytry/season/"
top ="/outs_pytry/monmean_1/"
os.makedirs(odir, exist_ok=True)
#appn=[]
appnx=[]
#pr_arr = np.zeros([models,nlat,nlon], dtype='f4')
#pr_arr = np.zeros([], dtype='f4')
j=[]
j1=[]
appn=[]
for m in models:
folder = "%s"%(top)
if m in ["ML"]:
run = "r1i1p1f2"
else:
run = "r1i1p1f1"
for i in indices:
for e in exp:
origfi1 = '%s%s_%s_%s_%s_base.nc'%(folder,i, m, e, run)
origfi2 = '%s%s_%s_%s_%s_proj.nc'%(folder,i, m, e, run)
k=cdo.timselmean(3,11,9, input="%s"%origfi1, output="%s%s_%s_%s_%s_DJF_proj.nc"%(odir,i,m,e,run), returnCdf=True)
k1=cdo.timselmean(3,5,9, input="%s"%origfi1, output="%s%s_%s_%s_%s_JJA_proj.nc"%(odir,i,m,e,run), returnCdf=True)
j=k.variables["T"][:]
j1=k1.variables["T"][:]
lat=k.variables["lat"]
lon=k.variables["lon"]
#appn=np.zeros([3,4,lon,lat], dtype='f4')
datain = np.array(j)
appn.append({"e":e,**datain}) # appn.append([e,*datain.values()])

Multiprocessing and scipy (dblquad)

I am trying to speed up the following code in python:
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
from scipy import integrate
import camb
from tqdm import tqdm
import os
#Reading a PS
dir = os.getcwd()
data = np.loadtxt(dir+"/ps1-peacock.txt")
kh = data[:,0]
p_lin = data[:,1]
p_nlin = data[:,2]
p_linear = interpolate.interp1d(kh,p_lin)
#Integrand of P22
def upper_mu(x):
return min(1.0,(kk**2 + np.exp(2*x))/(2*kk*np.exp(x)))
def lower_mu(x):
return max(-1.0,-(kk**2+np.exp(x))/(2*kk*np.exp(x)))
def mulow(x):
return max(-1.0,(kh[-1]**2.0-kk**2.0-np.exp(x)**2.0)/(-2.0*kk*np.exp(x)))
def muhigh(x):
return min(1.0,(kh[0]**2.0-kk**2.0-np.exp(x)**2.0)/(-2.0*kk*np.exp(x)))
def f22(mu,q,k):
r = np.exp(q)/k
F = (7.0*mu+(3.0-10.0*mu**2)*r)/(14.0*r*(r**2-2.0*mu*r+1.0))
psik = (k**2+np.exp(2*q)-2.0*k*mu*np.exp(q))**0.5
if (psik>kh[0] and psik<kh[-1]):
return 1.0/2.0/np.pi**2.0*np.exp(3*q)*p_linear(np.exp(q))*p_linear(psik)*F**2
else:
return 0
P22 = np.zeros_like(kh)
error = np.zeros_like(kh)
for i in tqdm(range(0,np.shape(kh)[0])):
kk = kh[i]
P22[i], error[i] = integrate.dblquad(f22,np.log(kh[0]),np.log(kh[-1]),mulow,muhigh,args=(kh[i],),epsrel=1e-3, epsabs=50)[:2]
Here follows the integral in text for reasons of clarity:
I would like to use multiprocessing to improve the performance of dblquad(). Does anyone know how can I implement it in this specific case?
Multiprocessing won't help here, you cannot split the dblquad work between python processes.
If you have several integrals to compute, then yes, you can split integrals between processes. Whether this is worth it strongly depends on the amount of work there is for each process.

Truncated image using matplotlib.pyplot with Python 3 running on Windows 10 and Jupyter NB

I was trying to get a feel for the basic syntax structure of the code for the Mandelbrot set on Python, and came across this online resource. The code calls (optionally) for the package numba, which I don't have installed, although I doubt this is the problem.
The issue is that the output image is this:
as opposed to the expected picture:
so it truly looks like a very zoomed in, low resolution segment of the expected image.
The code that I ran (from the youtube video) is:
import numpy
import matplotlib.pyplot as plt
def mandelbrot(Re, Im, max_iter):
C = complex(Re,Im)
z= 0.0j
for i in range(max_iter):
z= z*z + C
if(z.real * z.real + z.imag * z.imag) >= 4:
return i
return max_iter
columns = 2000
rows = 2000
result = numpy.zeros([rows,columns])
for row_index, Re in enumerate(numpy.linspace(-2,1,num=rows)):
for column_index, Im in enumerate(numpy.linspace(-1,1,num=columns)):
result[row_index, column_index]= mandelbrot(Re, Im, 100)
plt.figure(dpi=100)
plt.imshow(result.T,cmap='hot',interpolation='bilinear', extent=[-2,1,-1,1])
plt.xlabel('Re')
plt.ylabel('Im')
plt.show()
Is the problem in the plotting parameters? If so, any suggestions?
After the accepted answer, here is the corrected code, ready to copy, paste and run:
import numpy
import matplotlib.pyplot as plt
def mandelbrot(Re, Im, max_iter):
C = complex(Re,Im)
z= 0.0j
for i in range(max_iter):
z= z*z + C
if(z.real * z.real + z.imag * z.imag) >= 4:
return i
return max_iter
columns = 2000
rows = columns
re1 = 0.0070
re2 = 0.0085
im1 = -.74770
im2 = -.74600
result = numpy.zeros([rows,columns])
for row_index, Re in enumerate(numpy.linspace(re1,re2,num=rows)):
for column_index, Im in enumerate(numpy.linspace(im1,im2,num=columns)):
result[row_index, column_index]= mandelbrot(Re, Im, 200)
plt.figure(dpi=150)
plt.imshow(result.T,cmap='plasma',interpolation='bilinear', extent=[re1,re2,im1,im2])
plt.xlabel('Re')
plt.ylabel('Im')
plt.show()
Code from the video:
Code from the question:
As I have commented earlier, indentation matters!
In general, a better tutorial on this matter might be https://www.ibm.com/developerworks/community/blogs/jfp/entry/How_To_Compute_Mandelbrodt_Set_Quickly?lang=en which has nice explanations of efficiency optimizations.

Create a TensorFlow Dataset for a CNN from a local dataset

I have a big dataset of B/W images with two classes where the name of the directory is the name of the class:
the directory SELECTION contains all images with label = selection;
the directory NEUTRAL contains all images with label = neutral.
I need to load all these images in a TensorFlow dataset for change the MNIST Dataset in this tutorial.
I've tried to follow this guide and it looks good but there is some problems that I don't know how to fix. Following the guide I'm arrived till here:
from __future__ import absolute_import, division, print_function
import os
import pathlib
import IPython.display as display
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(threshold=np.nan)
tf.enable_eager_execution()
tf.__version__
os.system('clear')
#### some tries for the SELECTION dataset ####
data_root = pathlib.Path('/Users/matteo/Desktop/DATASET_X/SELECTION/TRAIN_IMG')
all_image_paths = []
all_image_labels = []
for item in data_root.iterdir():
item_tmp = str(item)
if 'selection.png' in item_tmp:
all_image_paths.append(str(item))
all_image_labels.append(0)
image_count = len(all_image_paths)
label_names = ['selection', 'neutral']
label_to_index = dict((name, index) for index, name in enumerate(label_names))
img_path = all_image_paths[0]
img_raw = tf.read_file(img_path)
img_tensor = tf.image.decode_png(
contents=img_raw,
channels=1
)
print(img_tensor.numpy().min())
print(img_tensor.numpy().max())
#### it works fine till here ####
#### trying to make a function ####
#### problems from here ####
def load_and_decode_image(path):
print('[LOG:load_and_decode_image]: ' + str(path))
image = tf.read_file(path)
image = tf.image.decode_png(
contents=image,
channels=3
)
return image
image_path = all_image_paths[0]
label = all_image_labels[0]
image = load_and_decode_image(image_path)
print('[LOG:image.shape]: ' + str(image.shape))
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
print('shape: ', repr(path_ds.output_shapes))
print('type: ', path_ds.output_types)
print()
print('[LOG:path_ds]:' + str(path_ds))
If I load only one item it works but when I try to do:
path_ds = tf.data.Dataset.from_tensor_slices(all_image_paths)
if I print path_ds.shape it return shape: TensorShape([]) so it seems that it doesen't works. If I try to continue to follow the tutorial with this block
image_ds = path_ds.map(load_and_decode_image, num_parallel_calls=AUTOTUNE)
plt.figure(figsize=(8, 8))
for n, image in enumerate(image_ds.take(4)):
print('[LOG:n, image]: ' + str(n) + ', ' + str(image))
plt.subplot(2, 2, n+1)
plt.imshow(image)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.xlabel(' selection'.encode('utf-8'))
plt.title(label_names[label].title())
plt.show()
it give me the following error:
It's not possible open ' < string >': The file was not found (file: // /Users/matteo/Documents/GitHub/Cnn_Genetic/cnn_genetic/<string > ).
but the problem is that I don't know what this file is and why it goes looking for it. I dont't neet to plot my images but I want to understand why it doesen't works. If I copy/paste the tutorial code i have the same problem so I think there's a problem with new tf version.
So....if anyone can tell me where I'm going wrong, I'd be very grateful.
Thanks for your time.
Your issue is that path_ds should be the image paths as strings, but you try to convert them to a list of tensors.
So to get the tensors you only need:
image_ds = all_image_paths.map(load_and_decode_image, num_parallel_calls=AUTOTUNE)

Categories

Resources