I'm a beginner in Python, found some code that I wanted to test since nothing seems to work for me:
import numpy as np
import laspy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# reading las file and copy points
input_las = laspy.read("topography.las")
point_records = input_las.points.copy()
# getting scaling and offset parameters
las_scaleX = input_las.header.scale[0]
las_offsetX = input_las.header.offset[0]
las_scaleY = input_las.header.scale[1]
las_offsetY = input_las.header.offset[1]
las_scaleZ = input_las.header.scale[2]
las_offsetZ = input_las.header.offset[2]
# calculating coordinates
p_X = np.array((point_records['point']['X'] * las_scaleX) + las_offsetX)
p_Y = np.array((point_records['point']['Y'] * las_scaleY) + las_offsetY)
p_Z = np.array((point_records['point']['Z'] * las_scaleZ) + las_offsetZ)
# plotting points
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(p_X, p_Y, p_Z, "marker=o")
plt.show()
for the most part seems like my IDE is not throwing any errors. But says it is missing some documentation for .copy .points and so on.
Also when I run the code I get:
Traceback (most recent call last):
line 19, in <module>
p_X = np.array((point_records['point']['X'] * las_scaleX) + las_offsetX)
and:
line 185, in __getitem__
return self.array[item]
ValueError: no field of name point
what am I doing wrong?
code I was trying to adapt: https://gis.stackexchange.com/questions/277317/visualizing-las-with-matplotlib
Related
I try to make a climatic map in python which I am not used to use but want to try if it is more handy than plotting in R. I use the example by
http://joehamman.com/2013/10/12/plotting-netCDF-data-with-Python/ with my data.
from netCDF4 import Dataset
import numpy as np
myfil = "xxxxx"
fh = Dataset(myfil, mode='r')
lons = fh.variables['lon'][:]
lats = fh.variables['lat'][:]
tmean = fh.variables['Tmean_ANN'][:1]
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
# Get some parameters for the Stereographic Projection
lon0 = lons.mean()
lats0 = lats.mean()
m = Basemap(width=5000000,height=3500000,
resolution='l',projection='stere',
lat_ts=60,lat_0=lats0,lon_0=lon0)
lon, lat = np.meshgrid(lons, lats, sparse=True)
xi, yi = m(lon, lat)
# Plot Data
print(xi.shape)
print(yi.shape)
print(tmean.shape)
results
(1, 1142761)
(1142761, 1)
(1, 1069, 1069)
Trying to run this line
cs = m.contour(xi,yi, np.squeeze(tmean))
I got the error
cs = m.contour(xi,yi, np.squeeze(tmean))
Traceback (most recent call last):
File "<ipython-input-37-8be9f03a0e45>", line 1, in <module>
cs = m.contour(xi,yi, np.squeeze(tmean))
File "C:\ProgramData\Anaconda3\lib\site-packages\mpl_toolkits\basemap\__init__.py", line 546, in with_transform
return plotfunc(self,x,y,data,*args,**kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\mpl_toolkits\basemap\__init__.py", line 3566, in contour
np.logical_or(np.greater(x,self.xmax+epsx),np.greater(y,self.ymax+epsy))
MemoryError
Any help why do I got this. My hypothesis is that the dimension of xi and yi is not compatible with tmean thus I got the error. The np.sqeeze function works with tmean data outside the m.contour function But I could not solve that for a while.
I have to plot a two variable function to check model degeneracy. My code looks like this following a tutorial:
from numpy import exp,arange
from pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show
import math
# the function that I'm going to plot
def z_func(x1,x2):
L = exp(-(1-x1)**2 - 100((x2-x1**2)**2))
return L
x1 = arange(-5.0,5.0,0.1)
x2 = arange(-5.0,5.0,0.1)
X1,X2 = meshgrid(x1, x2) # grid of point
Z = z_func(X1, X2) # evaluation of the function on the grid
im = imshow(Z,cmap=cm.RdBu) # drawing the function
# adding the Contour lines with labels
cset = contour(Z,arange(-1,1.5,0.2),linewidths=2,cmap=cm.Set2)
clabel(cset,inline=True,fmt='%1.1f',fontsize=10)
colorbar(im) # adding the colobar on the right
# latex fashion title
title('$z=exp(-(1-x1)^2 - 100(x2-x1^2)^2)$')
show()
and I am getting the following error:
File "multi.py", line 14, in <module>
Z = z_func(X1, X2) # evaluation of the function on the grid
File "multi.py", line 8, in z_func
L = exp(-(1-x1)**2 - 100((x2-x1**2)**2))
TypeError: 'int' object is not callable
I think there is problem how i define my function, How to fix this error and plot the function?
I want to plot the two solutions of quadratic equation as a function of a parameter ( function coeff(t) ). I am using function numpy.roots (I am sure that all roots are real in this case), and I am trying to invoke it from within pyplot.plot as below:
import numpy as np
import matplotlib.pyplot as plt
r = 3.74
def coeff(pp):
return np.array([pp-1,r+1-0.5*pp,-r])
def sroot(t):
return np.roots(coeff(t))
a = np.linspace(0,0.9,100)
fig = plt.figure()
plt.plot(a,sroot(a)[0,:])
plt.plot(a,sroot(a)[1,:])
plt.show()
I get error message:
File "quest.py", line 18, in <module>
plt.plot(a,sroot(a)[0,:])
File "quest.py", line 10, in sroot
return np.roots(coeff(t))
File "/usr/lib64/python2.7/site-packages/numpy/lib/polynomial.py", line 218, in roots
p = p.astype(float)
I understand that the numpy.roots takes only list of parameters and is unable to recognize a row in array 3xlen(a). Is there a way to do it in one line, preferably inside the pyplot.plot? I would like to avoid using loop.
This is because you transform all of your coefficient at once and try to call the numpy roots solver on all of them. np.roots only accept 1-d array and solves a single polynomial. Here is a script that does what you want:
import numpy as np
import matplotlib.pyplot as plt
# Parameters
r = 3.74
T = np.linspace(0.0,0.9,100)
# Coefficients
C = np.zeros((len(T),3))
C[:,0] = T-1
C[:,1] = r + 1 - 0.5*T
C[:,2] = r
# Roots
R = np.zeros((len(T),2))
for i in range(len(T)):
R[i] = np.roots(C[i])
# Plot
fig = plt.figure()
plt.plot(T,R[:,0])
plt.plot(T,R[:,1])
plt.show()
I'm trying to initialize 2D surface in PyCUDA and fill it with values from NumPy 2D array.The idea, as I get it, is
open drv.ArrayDescriptor,
create drv.Array using this descriptor,
copy data from NumPy array with drv.Memcpy2D,
do set_array for SurfaceReference.
But still I have pycuda._driver.LogicError at the last step. A minimal example of what I'm doing:
import numpy as np
import pycuda.driver as drv
import pycuda.autoinit
from pycuda.compiler import SourceModule
mod = SourceModule("surface<void, cudaSurfaceType2D> fld_srf;")
def numpy2d_to_array(np_array):
h, w = np_array.shape
descr = drv.ArrayDescriptor()
descr.width = w
descr.height = h
descr.format = drv.dtype_to_array_format(np_array.dtype)
descr.num_channels = 1
descr.flags = 0
device_array = drv.Array(descr)
copy = drv.Memcpy2D()
copy.set_src_host(np_array)
copy.set_dst_array(device_array)
copy.width_in_bytes = copy.src_pitch = np_array.strides[0]
copy.src_height = copy.height = h
copy(aligned=True)
return device_array
fld = np.random.random_integers(-30, 30, (1920, 1080)).astype(np.int32)
srf = mod.get_surfref('fld_srf')
srf_arr = numpy2d_to_array(fld.copy())
srf.set_array(srf_arr)
The code above throws following exception:
Traceback (most recent call last):
File "./testsurface.py", line 30, in <module>
srf.set_array(srf_arr)
pycuda._driver.LogicError: cuSurfRefSetArray failed: invalid value
Any ideas how to do this correctly? Or at least why this error appears?
It might have something to do with the flags: in the 3D case, you have to set
descr.flags = drv.array3d_flags.SURFACE_LDST
to allow binding to the surface according to
this.
I don't find the 2D equivalent for pycuda though.
I ran the Python code below that is an example of "Plotting Maps: Visualizing Haiti Earthquake Crisis Data" on a book, Python for Data Analysis. Page 242-246
The code is supposed to create a plot map of Haiti but I got an error as below:
Traceback (most recent call last):
File "Haiti.py", line 74, in <module>
x, y = m(cat_data.LONGITUDE, cat_data.LATITUDE)
File "/usr/local/lib/python2.7/site-packages/mpl_toolkits/basemap/__init__.py", line 1148, in __call__
xout,yout = self.projtran(x,y,inverse=inverse)
File "/usr/local/lib/python2.7/site-packages/mpl_toolkits/basemap/proj.py", line 286, in __call__
outx,outy = self._proj4(x, y, inverse=inverse)
File "/usr/local/lib/python2.7/site-packages/mpl_toolkits/basemap/pyproj.py", line 388, in __call__
_proj.Proj._fwd(self, inx, iny, radians=radians, errcheck=errcheck)
File "_proj.pyx", line 122, in _proj.Proj._fwd (src/_proj.c:1571)
RuntimeError
I checked if mpl_toolkits.basemap and proj module were installed okay on my machine. Basemap was installed from source as instructed and proj was installed by Homebrew and they looks fine to me.
If you have basemap and proj installed, does this code run successfully? If not, do you think if it's a module installation issue, the code itself, or any other?
Haiti.csv file can be downloaded from https://github.com/pydata/pydata-book/raw/master/ch08/Haiti.csv
import pandas as pd
import numpy as np
from pandas import DataFrame
data = pd.read_csv('Haiti.csv')
data = data[(data.LATITUDE > 18) & (data.LATITUDE < 20) &
(data.LONGITUDE > -75) & (data.LONGITUDE < -70)
& data.CATEGORY.notnull()]
def to_cat_list(catstr):
stripped = (x.strip() for x in catstr.split(','))
return [x for x in stripped if x]
def get_all_categories(cat_series):
cat_sets = (set(to_cat_list(x)) for x in cat_series)
return sorted(set.union(*cat_sets))
def get_english(cat):
code, names = cat.split('.')
if '|' in names:
names = names.split(' | ')[1]
return code, names.strip()
all_cats = get_all_categories(data.CATEGORY)
english_mapping = dict(get_english(x) for x in all_cats)
def get_code(seq):
return [x.split('.')[0] for x in seq if x]
all_codes = get_code(all_cats)
code_index = pd.Index(np.unique(all_codes))
dummy_frame = DataFrame(np.zeros((len(data), len(code_index))),
index=data.index, columns=code_index)
for row, cat in zip(data.index, data.CATEGORY):
codes = get_code(to_cat_list(cat))
dummy_frame.ix[row, codes] = 1
data = data.join(dummy_frame.add_prefix('category_'))
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
def basic_haiti_map(ax=None, lllat=17.25, urlat=20.25, lllon=-75, urlon=-71):
# create polar stereographic Basemap instance.
m = Basemap(ax=ax, projection='stere',
lon_0=(urlon + lllon) / 2,
lat_0=(urlat + lllat) / 2,
llcrnrlat=lllat, urcrnrlat=urlat,
llcrnrlon=lllon, urcrnrlon=urlon,
resolution='f')
# draw coastlines, state and country boundaries, edge of map. m.drawcoastlines()
m.drawstates()
m.drawcountries()
return m
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 10))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
to_plot = ['2a', '1', '3c', '7a']
lllat=17.25; urlat=20.25; lllon=-75; urlon=-71
for code, ax in zip(to_plot, axes.flat):
m = basic_haiti_map(ax, lllat=lllat, urlat=urlat,
lllon=lllon, urlon=urlon)
cat_data = data[data['category_%s' % code] == 1]
# compute map proj coordinates.
print cat_data.LONGITUDE, cat_data.LATITUDE
x, y = m(cat_data.LONGITUDE, cat_data.LATITUDE)
m.plot(x, y, 'k.', alpha=0.5)
ax.set_title('%s: %s' % (code, english_mapping[code]))
This is resolved by changing m(cat_data.LONGITUDE, cat_data.LATITUDE) to m(cat_data.LONGITUDE.values, cat_data.LATITUDE.values), thanks to Alex Messina's finding.
With a little further study of mine, pandas changed that Series data of DataFrame (derived from NDFrame) should be passed with .values to a Cython function like basemap/proj since v0.13.0 released on 31 Dec 2013 as below.
Quote from github commit log of pandas:
+.. warning::
+
+ In 0.13.0 since ``Series`` has internaly been refactored to no longer sub-class ``ndarray``
+ but instead subclass ``NDFrame``, you can **not pass** a ``Series`` directly as a ``ndarray`` typed parameter
+ to a cython function. Instead pass the actual ``ndarray`` using the ``.values`` attribute of the Series.
+
+ Prior to 0.13.0
+
+ .. code-block:: python
+
+ apply_integrate_f(df['a'], df['b'], df['N'])
+
+ Use ``.values`` to get the underlying ``ndarray``
+
+ .. code-block:: python
+
+ apply_integrate_f(df['a'].values, df['b'].values, df['N'].values)
You can find the corrected version of the example code here.