i am working with CGKIT in my project, i came across a situation that i must use only render.py file and change the background of the picture.
i know how to use viewer.py and change the picture but implementing that i.e. changing background in GLOBALS it is not changing the background as per render.py
so please any one help me out
# Use the previously rendered texture map
from time import sleep
import Image
from cgkit.cgtypes import vec3, mat4
from cgkit.cmds import load, worldObject
from cgkit.glmaterial import GLMaterial, GLTexture
from cgkit.scene import getScene
from cgkit.sceneglobals import Globals
from OpenGL.GL import glReadPixels
from pyglet.gl import GL_RGBA, GL_UNSIGNED_BYTE, GL_DECAL
Globals(
resolution=(512,512),
up = (0,1,0),
background=(1,1,1,1),
output = "kishoreGoodBoy.png",
)
load("singleSofa.obj")
model = worldObject("small_sofa_dark_grey")
mat = GLMaterial(
diffuse = (0,1,0),
texture = GLTexture(
"final1.png",
mode = GL_DECAL,
transform = mat4().scaling(vec3(1,-1,1))
)
)
model.setMaterial(mat)
as per code i had textre file and so that i am directing it to a .PNG file and it is saving. but it is coming with a black background so i need to change the background clour
Related
This is my first python project and a piece of it involves electronic signatures. I've been trying for a while now to find a way to save whatever image is drawn as a file. The problem is it doesn't seem like I can get usable information to even start manipulating in Visual Studio Code.
def signaturefunk ():
st.write("Signature")
canvas_result = st_canvas(
fill_color="#eee",
stroke_width=5,
stroke_color="black",
background_color="",
update_streamlit=False,
height=200,
width=700,
drawing_mode="freedraw",
)
im = canvas_result.image_data
print(im)
I always get back None in the terminal no matter what I draw on the canvas. Maybe canvas_result.image_data just doesn't work how I expect it to. However, if that's the case I don't even know where to go from here.
First of all canvas_result.image_data returns the image in the canvas, you can test this with this simple code:
import streamlit as st
from streamlit_drawable_canvas import st_canvas
st.title("Signature")
def signaturefunk():
st.write("Canvas")
canvas_result = st_canvas(
fill_color="#eee",
stroke_width=5,
stroke_color="black",
background_color="white",
update_streamlit=False,
height=200,
width=700,
drawing_mode="freedraw",
)
st.write("Image of the canvs")
if canvas_result.image_data is not None:
st.image(canvas_result.image_data)
signaturefunk()
Now in order to save the image you can use OpenCV, the code:
import streamlit as st
from streamlit_drawable_canvas import st_canvas
import cv2
st.title("Signature")
def signaturefunk():
canvas_result = st_canvas(
fill_color="#eee",
stroke_width=5,
stroke_color="black",
background_color="white",
update_streamlit=False,
height=200,
width=700,
drawing_mode="freedraw",
)
# save image
if canvas_result.image_data is not None:
cv2.imwrite(f"img.jpg", canvas_result.image_data)
else:
st.write("no image to save")
signaturefunk()
I am trying to plot live data that is streamed from a CMOS camera in python. As a little side note: I want to use this to stabilize the pointing of a laser in the lab.
I am using ImageItem from the pyqtgraph module and my script is basically a modification of the ImageItem example (see below).
However when I execute the script I immediately get a delay of about 3 seconds and the RAM memory increases until the process gets killed. The resolution of the camera is 3672x5496 pixel which is quiet high and it seems like the pictures are queuing up until it is out of memory (my interpretation of the problem).
I tried to clear the ImageItem module every time when it gets updated but it does not change anything.
What am I doing wrong? Is it even possible to get this working live and without running out of memory with this resolution? I am already happy with about 1-2 frames per seconds.
## Add path to library (just for examples; you do not need this)
#import initExample
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pyqtgraph as pg
import pyqtgraph.ptime as ptime
import gxipy as gx
import sys
## Check for connected devices
device_manager = gx.DeviceManager()
dev_num, dev_info_list = device_manager.update_device_list()
if dev_num == 0:
sys.exit(1)
## Connect to the camera
strSN = dev_info_list[0].get("sn")
cam = device_manager.open_device_by_sn(strSN)
## start the data stream
cam.stream_on()
app = QtGui.QApplication([])
## Create window with GraphicsView widget
win = pg.GraphicsLayoutWidget()
win.show() ## show widget alone in its own window
win.setWindowTitle('pyqtgraph example: ImageItem')
view = win.addViewBox()
## lock the aspect ratio so pixels are always square
view.setAspectLocked(True)
## Create image item
img = pg.ImageItem(border='w')
view.addItem(img)
## Set initial view bounds
view.setRange(QtCore.QRectF(0, 0, 3672, 5496))
## Create random image
updateTime = ptime.time()
def updateData():
global img, i, updateTime
## Get a new image from the camera
raw_image = cam.data_stream[0].get_image()
data = raw_image.get_numpy_array()
if data is None:
sys.exit(1)
## Display the image
img.setImage(data)
QtCore.QTimer.singleShot(1, updateData)
updateData()
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
cam.stream_off()
cam.close_device()
I have trouble viewing color images from R200 realsense camera using the python-opencv interface.
The window is blank when I run this script.
When I comment out'cv2.namedWindow("Image window", 1)', it shows the first image.
import roslib
import sys
import rospy
import cv2
from std_msgs.msg import String
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
from rospy.numpy_msg import numpy_msg
#from rospy_tutorials.msg import Floats
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import math;
import cv2;
#import matplotlib.pyplot as plt;
import sys;
#import caffe;
import socket;
#from sklearn import datasets;
import subprocess;
import message_filters
from rospy.numpy_msg import numpy_msg
import time
#####################
import os.path
class image_converter:
# Initializing variables, publishers and subscribers
def __init__(self):
print 'show window'
cv2.namedWindow("Image window", 1)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/rgb/image_color", Image, self.callback)
# The main callback that processes the color and depth data.
def callback(self,color):
start = time.time()
# acquiring color and depth data (from ROS-python tutorial)
try:
frame = self.bridge.imgmsg_to_cv2(color, "bgr8")
except CvBridgeError, e:
print e
frame = np.array(frame, dtype=np.uint8)
cv2.imshow("Image window", frame)
print 'test'
cv2.waitKey(0)
def main(args):
ic = image_converter()
rospy.init_node('image_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print "Shutting down"
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
Are you sure of the namedWindow flags value? I see you are using an int there, but in the docs here you can find the correct flags, for both C++ and python.
I usually use OpenCV in C++, so i haven't tested, but it should work like this:
cv2.namedWindow( "Display window", cv2.WINDOW_AUTOSIZE );
You could also define a show function like this one, if it is convenient for your needs:
def show(image, name="Image"):
'''
Routine to display the image.
'''
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.imshow(name, image)
cv2.waitKey(0)
The function is very trivial, but i didn't code it by myself, so this is the source of this second code snippet.
I'm working on writing a script that will run through data and create a graph. That is easy and done. Unfortunately the graphing modules I am using only create the graphs in a pdf format. I would like to have the graphs displayed in an interactive window, though.
Is their any way to either add a graph created with PyX into a TKinter window or load the pdf into a frame or something?
You need to convert the PyX output into a bitmap to include it in your Tkinter application. While there is no convenience method to get the PyX output as a PIL image directly, you can use the pipeGS method to prepare the bitmap and load it using the PIL. Here comes a rather minimal example:
import tempfile, os
from pyx import *
import Tkinter
import Image, ImageTk
# first we create some pyx graphics
c = canvas.canvas()
c.text(0, 0, "Hello, world!")
c.stroke(path.line(0, 0, 2, 0))
# now we use pipeGS (ghostscript) to create a bitmap graphics
fd, fname = tempfile.mkstemp()
f = os.fdopen(fd, "wb")
f.close()
c.pipeGS(fname, device="pngalpha", resolution=100)
# and load with PIL
i = Image.open(fname)
i.load()
# now we can already remove the temporary file
os.unlink(fname)
# finally we can use this image in Tkinter
root = Tkinter.Tk()
root.geometry('%dx%d' % (i.size[0],i.size[1]))
tkpi = ImageTk.PhotoImage(i)
label_image = Tkinter.Label(root, image=tkpi)
label_image.place(x=0,y=0,width=i.size[0],height=i.size[1])
root.mainloop()
In GTK, how can I scale an image? Right now I load images with PIL and scale them beforehand, but is there a way to do it with GTK?
Load the image from a file using gtk.gdk.Pixbuf for that:
import gtk
pixbuf = gtk.gdk.pixbuf_new_from_file('/path/to/the/image.png')
then scale it:
pixbuf = pixbuf.scale_simple(width, height, gtk.gdk.INTERP_BILINEAR)
Then, if you want use it in a gtk.Image, crate the widget and set the image from the pixbuf.
image = gtk.Image()
image.set_from_pixbuf(pixbuf)
Or maybe in a direct way:
image = gtk.image_new_from_pixbuf(pixbuf)
It might be more effective to simply scale them before loading. I especially think so since I use these functions to load in 96x96 thumbnails from sometimes very large JPEGs, still very fast.
gtk.gdk.pixbuf_new_from_file_at_scale(..)
gtk.gdk.pixbuf_new_from_file_at_size(..)
Scale image from URL. ( scale reference )
import pygtk
pygtk.require('2.0')
import gtk
import urllib2
class MainWin:
def destroy(self, widget, data=None):
print "destroy signal occurred"
gtk.main_quit()
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect("destroy", self.destroy)
self.window.set_border_width(10)
self.image=gtk.Image()
self.response=urllib2.urlopen(
'http://192.168.1.11/video/1024x768.jpeg')
self.loader=gtk.gdk.PixbufLoader()
self.loader.set_size(200, 100)
#### works but throwing: glib.GError: Unrecognized image file format
self.loader.write(self.response.read())
self.loader.close()
self.image.set_from_pixbuf(self.loader.get_pixbuf())
self.window.add(self.image)
self.image.show()
self.window.show()
def main(self):
gtk.main()
if __name__ == "__main__":
MainWin().main()
*EDIT: (work out fix) *
try:
self.loader=gtk.gdk.PixbufLoader()
self.loader.set_size(200, 100)
# ignore tihs:
# glib.GError: Unrecognized image file format
self.loader.write(self.response.read())
self.loader.close()
self.image.set_from_pixbuf(self.loader.get_pixbuf())
except Exception, err:
print err
pass
Just FYI, here is a solution which scales the image based on window size (Implying you are implementing this in a class which extends GtkWindow).
let [width, height] = this.get_size(); // Get size of GtkWindow
this._image = new GtkImage();
let pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(filePath,width,height,true);
this._image.set_from_pixbuf(pixbuf);
anyone doing this in C. This is how it's done
//Assuming you already loaded the file and saved the filename
//GTK_IMAGE(image) is the container used to display the image
GdkPixbuf *pb;
pb = gdk_pixbuf_new_from_file(file_name, NULL);
pb = gdk_pixbuf_scale_simple(pb,700,700,GDK_INTERP_BILINEAR);
gtk_image_set_from_pixbuf(GTK_IMAGE(image), pb);
actually when we use
gdk_pixbuf_scale_simple(pb,700,700,GDK_INTERP_BILINEAR); this function causes memory leakage (If we monitor task manager the memory requirement goes on increasing till it kills the process) when used with a timer event. How to solve that