/** * Encode the uncompressed source image stored in <code>srcImage</code> and output a YUV planar * image to the given destination buffer. See {@link #encodeYUV(byte[], int)} for more detail. * * @param srcImage a <code>BufferedImage</code> instance containing RGB or grayscale pixels to be * encoded * @param dstBuf buffer that will receive the YUV planar image. Use {@link TJ#bufSizeYUV} to * determine the appropriate size for this buffer based on the image width, height, and level * of chrominance subsampling. * @param flags the bitwise OR of one or more of {@link TJ TJ.FLAG_*} */ public void encodeYUV(BufferedImage srcImage, byte[] dstBuf, int flags) throws Exception { if (srcImage == null || dstBuf == null || flags < 0) throw new Exception("Invalid argument in encodeYUV()"); int width = srcImage.getWidth(); int height = srcImage.getHeight(); int pixelFormat; boolean intPixels = false; if (byteOrder == null) byteOrder = ByteOrder.nativeOrder(); switch (srcImage.getType()) { case BufferedImage.TYPE_3BYTE_BGR: pixelFormat = TJ.PF_BGR; break; case BufferedImage.TYPE_4BYTE_ABGR: case BufferedImage.TYPE_4BYTE_ABGR_PRE: pixelFormat = TJ.PF_XBGR; break; case BufferedImage.TYPE_BYTE_GRAY: pixelFormat = TJ.PF_GRAY; break; case BufferedImage.TYPE_INT_BGR: if (byteOrder == ByteOrder.BIG_ENDIAN) pixelFormat = TJ.PF_XBGR; else pixelFormat = TJ.PF_RGBX; intPixels = true; break; case BufferedImage.TYPE_INT_RGB: case BufferedImage.TYPE_INT_ARGB: case BufferedImage.TYPE_INT_ARGB_PRE: if (byteOrder == ByteOrder.BIG_ENDIAN) pixelFormat = TJ.PF_XRGB; else pixelFormat = TJ.PF_BGRX; intPixels = true; break; default: throw new Exception("Unsupported BufferedImage format"); } WritableRaster wr = srcImage.getRaster(); if (subsamp < 0) throw new Exception("Subsampling level not set"); if (intPixels) { SinglePixelPackedSampleModel sm = (SinglePixelPackedSampleModel) srcImage.getSampleModel(); int pitch = sm.getScanlineStride(); DataBufferInt db = (DataBufferInt) wr.getDataBuffer(); int[] buf = db.getData(); encodeYUV(buf, width, pitch, height, pixelFormat, dstBuf, subsamp, flags); } else { ComponentSampleModel sm = (ComponentSampleModel) srcImage.getSampleModel(); int pixelSize = sm.getPixelStride(); if (pixelSize != TJ.getPixelSize(pixelFormat)) throw new Exception("Inconsistency between pixel format and pixel size in BufferedImage"); int pitch = sm.getScanlineStride(); DataBufferByte db = (DataBufferByte) wr.getDataBuffer(); byte[] buf = db.getData(); encodeYUV(buf, width, pitch, height, pixelFormat, dstBuf, subsamp, flags); } compressedSize = TJ.bufSizeYUV(width, height, subsamp); }
/** * Encode the uncompressed source image associated with this compressor instance and output a YUV * planar image to the given destination buffer. This method uses the accelerated color conversion * routines in TurboJPEG's underlying codec to produce a planar YUV image that is suitable for * direct video display. Specifically, if the chrominance components are subsampled along the * horizontal dimension, then the width of the luminance plane is padded to 2 in the output image * (same goes for the height of the luminance plane, if the chrominance components are subsampled * along the vertical dimension.) Also, each line of each plane in the output image is padded to 4 * bytes. Although this will work with any subsampling option, it is really only useful in * combination with {@link TJ#SAMP_420}, which produces an image compatible with the I420 (AKA * "YUV420P") format. * * @param dstBuf buffer that will receive the YUV planar image. Use {@link TJ#bufSizeYUV} to * determine the appropriate size for this buffer based on the image width, height, and level * of chrominance subsampling. * @param flags the bitwise OR of one or more of {@link TJ TJ.FLAG_*} */ public void encodeYUV(byte[] dstBuf, int flags) throws Exception { if (dstBuf == null || flags < 0) throw new Exception("Invalid argument in compress()"); if (srcBuf == null) throw new Exception(NO_ASSOC_ERROR); if (subsamp < 0) throw new Exception("Subsampling level not set"); encodeYUV(srcBuf, srcWidth, srcPitch, srcHeight, srcPixelFormat, dstBuf, subsamp, flags); compressedSize = TJ.bufSizeYUV(srcWidth, srcHeight, subsamp); }
/** * Encode the uncompressed source image associated with this compressor instance and return a * buffer containing a YUV planar image. See {@link #encodeYUV(byte[], int)} for more detail. * * @param flags the bitwise OR of one or more of {@link TJ TJ.FLAG_*} * @return a buffer containing a YUV planar image */ public byte[] encodeYUV(int flags) throws Exception { if (srcWidth < 1 || srcHeight < 1) throw new Exception(NO_ASSOC_ERROR); if (subsamp < 0) throw new Exception("Subsampling level not set"); byte[] buf = new byte[TJ.bufSizeYUV(srcWidth, srcHeight, subsamp)]; encodeYUV(buf, flags); return buf; }
/** * Encode the uncompressed source image stored in <code>srcImage</code> and return a buffer * containing a YUV planar image. See {@link #encodeYUV(byte[], int)} for more detail. * * @param srcImage a <code>BufferedImage</code> instance containing RGB or grayscale pixels to be * encoded * @param flags the bitwise OR of one or more of {@link TJ TJ.FLAG_*} * @return a buffer containing a YUV planar image */ public byte[] encodeYUV(BufferedImage srcImage, int flags) throws Exception { if (subsamp < 0) throw new Exception("Subsampling level not set"); int width = srcImage.getWidth(); int height = srcImage.getHeight(); byte[] buf = new byte[TJ.bufSizeYUV(width, height, subsamp)]; encodeYUV(srcImage, buf, flags); return buf; }
private static int compTest( TJCompressor tjc, byte[] dstBuf, int w, int h, int pf, String baseName, int subsamp, int jpegQual, int flags) throws Exception { String tempstr; byte[] srcBuf = null; BufferedImage img = null; String pfStr; double t; int size = 0, ps, imgType = pf; if (bi) { pf = biTypePF(imgType); pfStr = biTypeStr(imgType); } else pfStr = pixFormatStr[pf]; ps = TJ.getPixelSize(pf); System.out.print(pfStr + " "); if (bi) System.out.print("(" + pixFormatStr[pf] + ") "); if ((flags & TJ.FLAG_BOTTOMUP) != 0) System.out.print("Bottom-Up"); else System.out.print("Top-Down "); System.out.print(" -> " + subNameLong[subsamp] + " "); if (yuv == YUVENCODE) System.out.print("YUV ... "); else System.out.print("Q" + jpegQual + " ... "); if (bi) { img = new BufferedImage(w, h, imgType); initImg(img, pf, flags); tempstr = baseName + "_enc_" + pfStr + "_" + (((flags & TJ.FLAG_BOTTOMUP) != 0) ? "BU" : "TD") + "_" + subName[subsamp] + "_Q" + jpegQual + ".png"; File file = new File(tempstr); ImageIO.write(img, "png", file); } else { srcBuf = new byte[w * h * ps + 1]; initBuf(srcBuf, w, w * ps, h, pf, flags); } Arrays.fill(dstBuf, (byte) 0); t = getTime(); tjc.setSubsamp(subsamp); tjc.setJPEGQuality(jpegQual); if (bi) { if (yuv == YUVENCODE) tjc.encodeYUV(img, dstBuf, flags); else tjc.compress(img, dstBuf, flags); } else { tjc.setSourceImage(srcBuf, w, 0, h, pf); if (yuv == YUVENCODE) tjc.encodeYUV(dstBuf, flags); else tjc.compress(dstBuf, flags); } size = tjc.getCompressedSize(); t = getTime() - t; if (yuv == YUVENCODE) tempstr = baseName + "_enc_" + pfStr + "_" + (((flags & TJ.FLAG_BOTTOMUP) != 0) ? "BU" : "TD") + "_" + subName[subsamp] + ".yuv"; else tempstr = baseName + "_enc_" + pfStr + "_" + (((flags & TJ.FLAG_BOTTOMUP) != 0) ? "BU" : "TD") + "_" + subName[subsamp] + "_Q" + jpegQual + ".jpg"; writeJPEG(dstBuf, size, tempstr); if (yuv == YUVENCODE) { if (checkBufYUV(dstBuf, size, w, h, subsamp) == 1) System.out.print("Passed."); else { System.out.print("FAILED!"); exitStatus = -1; } } else System.out.print("Done."); System.out.format(" %.6f ms\n", t * 1000.); System.out.println(" Result in " + tempstr); return size; }