public void updatePixels() { // ignore updatePixels(0, 0, pixelWidth, pixelHeight); }
PImage sprite = new PImage(7,5,RGB); sprite.pixels = new int[]{color(255),color( 0 ),color(255),color(255),color(255),color( 0 ),color(255), color( 0 ),color(255),color( 0 ),color(255),color( 0 ),color(255),color( 0 ), color(255),color(255),color(255),color(255),color(255),color(255),color(255), color( 0 ),color(255),color( 0 ),color(255),color( 0 ),color(255),color( 0 ), color(255),color( 0 ),color( 0 ),color( 0 ),color( 0 ),color( 0 ),color(255)}; sprite.updatePixels(); noSmooth(); image(sprite,0,0,70,50);
if ((x < 0) || (y < 0) || (x >= pixelWidth) || (y >= pixelHeight)) return; pixels[y*pixelWidth + x] = c; updatePixels(x, y, 1, 1); // slow...
/** * Internal function to actually handle setting a block of pixels that * has already been properly cropped from the image to a valid region. */ protected void setImpl(PImage sourceImage, int sourceX, int sourceY, int sourceWidth, int sourceHeight, int targetX, int targetY) { int sourceOffset = sourceY * sourceImage.pixelWidth + sourceX; int targetOffset = targetY * pixelWidth + targetX; for (int y = sourceY; y < sourceY + sourceHeight; y++) { System.arraycopy(sourceImage.pixels, sourceOffset, pixels, targetOffset, sourceWidth); sourceOffset += sourceImage.pixelWidth; targetOffset += pixelWidth; } //updatePixelsImpl(targetX, targetY, sourceWidth, sourceHeight); updatePixels(targetX, targetY, sourceWidth, sourceHeight); }
/** * Resize this image to a new width and height. * Use 0 for wide or high to make that dimension scale proportionally. */ public void resize(int w, int h) { // ignore if (w <= 0 && h <= 0) { throw new IllegalArgumentException("width or height must be > 0 for resize"); } if (w == 0) { // Use height to determine relative size float diff = (float) h / (float) height; w = (int) (width * diff); } else if (h == 0) { // Use the width to determine relative size float diff = (float) w / (float) width; h = (int) (height * diff); } bitmap = Bitmap.createScaledBitmap(bitmap, w, h, true); this.width = w; this.height = h; // Mark the pixels array as altered updatePixels(); }
myUserImage.updatePixels();
PImage img; void setup () { colorMode(HSB, 100); img = loadImage ("img.png"); size(img.width,img.height); color sat = color (0,0,0); img.loadPixels(); for (int i = 0; i < width * height; i++) { img.pixels[i]=color (hue(img.pixels[i]), sat, brightness(img.pixels[i])); } img.updatePixels(); image(img,0,0); }
void keyPressed() { if (key == 's' || key == 'S') { PImage img = createImage(posX, specSize, RGB); for(int i = 0; i < notes.size(); i++){ int x = notes.get(i).getX(); int y = notes.get(i).getY(); int loc = x + y*posX; img.pixels[loc] = color(notes.get(i).getR(), notes.get(i).getG(), notes.get(i).getB()); } img.updatePixels(); img.save("outputImage.png"); } }
PImage img; void setup() { // this is run once. size(600, 400); img=loadImage("http://c.tadst.com/gfx/600x400/int-mountain-day.jpg?1"); img.loadPixels(); int dimension = (img.width*img.height); for (int i=0; i < dimension; i+=2) { img.pixels[i] = color(0, 0, 0); } img.updatePixels(); } void draw() { // this is run repeatedly. It only draws the image again and again. image(img, 0, 0); }
/** * @param maskArray array of integers used as the alpha channel, needs to be * the same length as the image's pixel array. */ public void mask(int maskArray[]) { // ignore loadPixels(); // don't execute if mask image is different size if (maskArray.length != pixels.length) { throw new IllegalArgumentException("mask() can only be used with an image that's the same size."); } for (int i = 0; i < pixels.length; i++) { pixels[i] = ((maskArray[i] & 0xff) << 24) | (pixels[i] & 0xffffff); } format = ARGB; updatePixels(); }
/** * Set alpha channel for an image. Black colors in the source * image will make the destination image completely transparent, * and white will make things fully opaque. Gray values will * be in-between steps. * <P> * Strictly speaking the "blue" value from the source image is * used as the alpha color. For a fully grayscale image, this * is correct, but for a color image it's not 100% accurate. * For a more accurate conversion, first use filter(GRAY) * which will make the image into a "correct" grayscake by * performing a proper luminance-based conversion. */ public void mask(int alpha[]) { loadPixels(); // don't execute if mask image is different size if (alpha.length != pixels.length) { throw new RuntimeException("The PImage used with mask() must be " + "the same size as the applet."); } for (int i = 0; i < pixels.length; i++) { pixels[i] = ((alpha[i] & 0xff) << 24) | (pixels[i] & 0xffffff); } format = ARGB; updatePixels(); }
scene.updatePixels();//we've use pixels, so update the image at the end
import dLibs.freenect.toolbox.*; import dLibs.freenect.constants.*; import dLibs.freenect.interfaces.*; import dLibs.freenect.*; Kinect kinect; // main kinect-object KinectFrameDepth kinectDepth; // depth frame PImage depthFrame; void setup(){ size(640,480); kinect = new Kinect(0); kinectDepth = new KinectFrameDepth(DEPTH_FORMAT._11BIT_);// create a depth instance kinectDepth.connect(kinect); //connect the created depth instance to the main kinect depthFrame = createImage(DEPTH_FORMAT._11BIT_.getWidth(), DEPTH_FORMAT._11BIT_.getHeight(), RGB); } void draw(){ assignPixels(depthFrame, kinectDepth); image(depthFrame, 0, 0); } void assignPixels(PImage img, Pixelable kinectDev){ img.loadPixels(); img.pixels = kinectDev.getPixels(); // assign pixels of the kinect device to the image img.updatePixels(); } void dispose(){ Kinect.shutDown(); super.dispose(); }
/** * * Deprecated */ // static public void PImageToIplImage(PImage src, IplImage dst) { // dst.copyFrom((BufferedImage) src.getImage()); // } public static void PImageToIplImage2(opencv_core.IplImage img, boolean RGB, PImage ret) { ByteBuffer buff = img.getByteBuffer(); ret.loadPixels(); if (RGB) { for (int i = 0; i < img.width() * img.height(); i++) { int offset = i * 3; ret.pixels[i] = (buff.get(offset) & 255) << 16 | (buff.get(offset + 1) & 255) << 8 | (buff.get(offset + 2) & 255); } } else { for (int i = 0; i < img.width() * img.height(); i++) { int offset = i * 3; ret.pixels[i] = (buff.get(offset + 2) & 255) << 16 | (buff.get(offset + 1) & 255) << 8 | (buff.get(offset) & 255); } } ret.updatePixels(); }
public static void createAnaglyph(PImage imgL, PImage imgR, PImage imgOut) { imgL.loadPixels(); imgR.loadPixels(); imgOut.loadPixels(); int[] pL = imgL.pixels; int[] pR = imgR.pixels; int[] pO = imgOut.pixels; for (int i = 0; i < pL.length; i++) { pO[i] = (pR[i] >> 16) << 16 | (pL[i] >> 8) & 255 << 8 | pL[i] & 255; // pO[i] = pL[i]; } imgOut.updatePixels(); // imgL.updatePixels(); }
mapImg.updatePixels();
who.updatePixels(); // mark the whole thing for update who.setModified(); (!tint && cash.tinted)) { who.updatePixels();
/** * Simple visualization * * @param depth * @param color * @param skip * @return */ public PImage update(IplImage depth, IplImage color, int skip) { updateRawDepth(depth); if (color != null) { updateRawColor(color); } depthData.clear(); depthData.timeStamp = papplet.millis(); validPointsPImage.loadPixels(); // set a default color. Arrays.fill(validPointsPImage.pixels, papplet.color(0, 0, 0)); // TODO: get the color with Kinect2... if (this.colorCamera.getPixelFormat() == Camera.PixelFormat.RGB) { computeDepthAndDo(skip, new SetImageDataRGB()); } if (this.colorCamera.getPixelFormat() == Camera.PixelFormat.BGR) { computeDepthAndDo(skip, new SetImageData()); } validPointsPImage.updatePixels(); return validPointsPImage; }