处理视频流byte数据
来源:互联网 发布:网络优化大师.apk 编辑:程序博客网 时间:2024/06/16 17:34
android端代码:
Parameters parameters = cam.getParameters(); Integer width = parameters.getPreviewSize().width; Integer height = parameters.getPreviewSize().height; Log.i("preview size: ", String.valueOf(width) + "x" + String.valueOf(height)); int[] mIntArray = new int[width*height]; // Decode Yuv data to integer array decodeYUV420SP(mIntArray, data, width, height); //Initialize the bitmap, with the replaced color Bitmap bmp = Bitmap.createBitmap(mIntArray, width, height, Bitmap.Config.ARGB_8888); saveImage(bmp);
这是decodeYUV方法:
static public void decodeYUV420SP(int[] rgba, byte[] yuv420sp, int width, int height) { final int frameSize = width * height; for (int j = 0, yp = 0; j < height; j++) { int uvp = frameSize + (j >> 1) * width, u = 0, v = 0; for (int i = 0; i < width; i++, yp++) { int y = (0xff & ((int) yuv420sp[yp])) - 16; if (y < 0) y = 0; if ((i & 1) == 0) { v = (0xff & yuv420sp[uvp++]) - 128; u = (0xff & yuv420sp[uvp++]) - 128; } int y1192 = 1192 * y; int r = (y1192 + 1634 * v); int g = (y1192 - 833 * v - 400 * u); int b = (y1192 + 2066 * u); if (r < 0) r = 0; else if (r > 262143) r = 262143; if (g < 0) g = 0; else if (g > 262143) g = 262143; if (b < 0) b = 0; else if (b > 262143) b = 262143; // rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & // 0xff00) | ((b >> 10) & 0xff); // rgba, divide 2^10 ( >> 10) rgba[yp] = ((r << 14) & 0xff000000) | ((g << 6) & 0xff0000) | ((b >> 2) | 0xff00); } } }
方法二:
static public void encodeYUV420SP_original(byte[] yuv420sp, int[] rgba, int width, int height) { final int frameSize = width * height; int[] U, V; U = new int[frameSize]; V = new int[frameSize]; final int uvwidth = width / 2; int r, g, b, y, u, v; for (int j = 0; j < height; j++) { int index = width * j; for (int i = 0; i < width; i++) { r = (rgba[index] & 0xff000000) >> 24; g = (rgba[index] & 0xff0000) >> 16; b = (rgba[index] & 0xff00) >> 8; // rgb to yuv y = (66 * r + 129 * g + 25 * b + 128) >> 8 + 16; u = (-38 * r - 74 * g + 112 * b + 128) >> 8 + 128; v = (112 * r - 94 * g - 18 * b + 128) >> 8 + 128; // clip y yuv420sp[index++] = (byte) ((y < 0) ? 0 : ((y > 255) ? 255 : y)); U[index] = u; V[index++] = v; } }
方法三:
static public void decodeYUV420SP(int[] rgba, byte[] yuv420sp, int width, int height) { final int frameSize = width * height; int r, g, b, y1192, y, i, uvp, u, v; for (int j = 0, yp = 0; j < height; j++) { uvp = frameSize + (j >> 1) * width; u = 0; v = 0; for (i = 0; i < width; i++, yp++) { y = (0xff & ((int) yuv420sp[yp])) - 16; if (y < 0) y = 0; if ((i & 1) == 0) { // above answer is wrong at the following lines. just swap ***u*** and ***v*** u = (0xff & yuv420sp[uvp++]) - 128; v = (0xff & yuv420sp[uvp++]) - 128; } y1192 = 1192 * y; r = (y1192 + 1634 * v); g = (y1192 - 833 * v - 400 * u); b = (y1192 + 2066 * u); r = Math.max(0, Math.min(r, 262143)); g = Math.max(0, Math.min(g, 262143)); b = Math.max(0, Math.min(b, 262143)); // combine ARGB rgba[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) | 0xff); } }}
方法三有人评论说:
最后一行似乎有问题:((g >> 2 6)&0xff00 |((b >> 10)| 0xff)不应该是((g >> 2)&0xff00)|( (b >> 10)&0xff)?
方法四:
// decode Y, U, and V values on the YUV 420 buffer described as YCbCr_422_SP by Android // David Manpearl 081201 public void decodeYUV(int[] out, byte[] fg, int width, int height) throws NullPointerException, IllegalArgumentException { int sz = width * height; if (out == null) throw new NullPointerException("buffer out is null"); if (out.length < sz) throw new IllegalArgumentException("buffer out size " + out.length + " < minimum " + sz); if (fg == null) throw new NullPointerException("buffer 'fg' is null"); if (fg.length < sz) throw new IllegalArgumentException("buffer fg size " + fg.length + " < minimum " + sz * 3 / 2); int i, j; int Y, Cr = 0, Cb = 0; for (j = 0; j < height; j++) { int pixPtr = j * width; final int jDiv2 = j >> 1; for (i = 0; i < width; i++) { Y = fg[pixPtr]; if (Y < 0) Y += 255; if ((i & 0x1) != 1) { final int cOff = sz + jDiv2 * width + (i >> 1) * 2; Cb = fg[cOff]; if (Cb < 0) Cb += 127; else Cb -= 128; Cr = fg[cOff + 1]; if (Cr < 0) Cr += 127; else Cr -= 128; } int R = Y + Cr + (Cr >> 2) + (Cr >> 3) + (Cr >> 5); if (R < 0) R = 0; else if (R > 255) R = 255; int G = Y - (Cb >> 2) + (Cb >> 4) + (Cb >> 5) - (Cr >> 1) + (Cr >> 3) + (Cr >> 4) + (Cr >> 5); if (G < 0) G = 0; else if (G > 255) G = 255; int B = Y + Cb + (Cb >> 1) + (Cb >> 2) + (Cb >> 6); if (B < 0) B = 0; else if (B > 255) B = 255; out[pixPtr++] = 0xff000000 + (B << 16) + (G << 8) + R; } }}
方法五:(发布的人说转换的颜色挺正)
static public void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width, int height) { final int frameSize = width * height; for (int j = 0, yp = 0; j < height; j++) { int uvp = frameSize + (j >> 1) * width, u = 0, v = 0; for (int i = 0; i < width; i++, yp++) { int y = (0xff & ((int) yuv420sp[yp])) - 16; if (y < 0) y = 0; if ((i & 1) == 0) { v = (0xff & yuv420sp[uvp++]) - 128; u = (0xff & yuv420sp[uvp++]) - 128; } int y1192 = 1192 * y; int r = (y1192 + 1634 * v); int g = (y1192 - 833 * v - 400 * u); int b = (y1192 + 2066 * u); if (r < 0) r = 0; else if (r > 262143) r = 262143; if (g < 0) g = 0; else if (g > 262143) g = 262143; if (b < 0) b = 0; else if (b > 262143) b = 262143; rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff); } }}
方法六:将YUV图像转换为RGB的图像,在ndk中操作(作者说处理480*320需要0~25ms)
void toRGB565(unsigned short * yuvs,int widthIn,int heightIn,unsigned int * rgbs,int widthOut,int heightOut){ int half_widthIn = widthIn >> 1; //亮度数据的结束 int lumEnd =(widthIn * heightIn)>> 1; //指向下一个亮度值对 int lumPtr = 0; //指向下一个色度值对 int chrPtr = lumEnd; //当前亮度扫描线的结束 int lineEnd = half_widthIn; int x,y; for(y = 0; y> 1; for(x = 0; x> 8)&0xff; Y1 = Y1&0xff; int Cr = yuvs [chrPtr ++]; int Cb =((Cr >> 8)&0xff) - 128; Cr =(Cr&0xff) - 128; int R,G,B; //生成第一个RGB组件 B = Y1 +((454 * Cb)>> 8); 如果(B <0)B = 0; 如果(B> 255)B = 255; G = Y1 - ((88 * Cb + 183 * Cr)>> 8); 如果(G <0)G = 0; 如果(G> 255)G = 255; R = Y1 +((359 * Cr)>> 8); 如果(R 0)R = 0; 如果(R> 255)R = 255; int val =((R&0xf8)<< 8)| ((G&0xfc)<< 3)| (B >> 3); //生成第二个RGB组件 B = Y1 +((454 * Cb)>> 8); 如果(B <0)B = 0; 如果(B> 255)B = 255; G = Y1 - ((88 * Cb + 183 * Cr)>> 8); 如果(G <0)G = 0; 如果(G> 255)G = 255; R = Y1 +((359 * Cr)>> 8); 如果(R 0)R = 0; 如果(R> 255)R = 255; rgbs [yPosOut + x] = val | (((R&0xf8)<< 8)|((G&0xfc)<< 3)|(B >> 3))<< 16); } //必要时跳回到色度值的开头 chrPtr = lumEnd +((lumPtr >> 1)/ half_widthIn)* half_widthIn; lineEnd + = half_widthIn; }}
/ ** *将输入图像从YUV转换为RGB 5_6_5图像。 *输出缓冲区的大小必须至少为输入图像的大小。 * /JNIEXPORT void JNICALL Java_de_offis_magic_core_NativeWrapper_image2TextureColor (JNIEnv * env,jclass clazz, jbyteArray imageIn,jint widthIn,jint heightIn, jobject imageOut,jint widthOut,jint heightOut, jint过滤器){jbyte * cImageIn =(* env) - > GetByteArrayElements(env,imageIn,NULL);jbyte * cImageOut =(jbyte *)(* env) - > GetDirectBufferAddress(env,imageOut);toRGB565((unsigned short *)cImageIn,widthIn,heightIn,(unsigned int *)cImageOut,widthOut,heightOut);(* env) - > ReleaseByteArrayElements(env,imageIn,cImageIn,JNI_ABORT);}
这下面是一个jni接口函数
jintArray Java_com_spore_ImageUtilEngine_decodeYUV420SP(JNIEnv * env,jobject thiz, jbyteArray buf, jint width, jint height) {jbyte * yuv420sp = (*env)->GetByteArrayElements(env, buf, 0);int frameSize = width * height;jint rgb[frameSize]; // 鏂板浘鍍忓儚绱犲��initTable();int i = 0, j = 0,yp = 0;int uvp = 0, u = 0, v = 0;for (j = 0, yp = 0; j < height; j++){uvp = frameSize + (j >> 1) * width;u = 0;v = 0;for (i = 0; i < width; i++, yp++){int y = (0xff & ((int) yuv420sp[yp]));if (y < 0)y = 0;if ((i & 1) == 0){v = (0xff & yuv420sp[uvp++]);u = (0xff & yuv420sp[uvp++]);}//int y1192 = 1192 * y;//int r = (y1192 + 1634 * v);//int g = (y1192 - 833 * v - 400 * u);//int b = (y1192 + 2066 * u);int y1192 = y_table[y];int r = r_yv_table[y][v];//(y1192 + r_v_table[v]);int g = (y1192 - g_v_table[v] - g_u_table[u]);int b = b_yu_table[y][u];//(y1192 + b_u_table[u]);//if (r < 0) r = 0; else if (r > 262143) r = 262143;if (g < 0) g = 0; else if (g > 262143) g = 262143;//if (b < 0) b = 0; else if (b > 262143) b = 262143;//r = (r >> 31) ? 0 : (r & 0x3ffff);//g = (g >> 31) ? 0 : (g & 0x3ffff);//b = (b >> 31) ? 0 : (b & 0x3ffff);rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);}}jintArray result = (*env)->NewIntArray(env, frameSize);(*env)->SetIntArrayRegion(env, result, 0, frameSize, rgb);(*env)->ReleaseByteArrayElements(env, buf, yuv420sp, 0);return result;}
这里附上本人修改的,对byte数据进行裁剪所需要的区域。
注意:代码中width和height分别代表原图的宽和高
//====方法五:竖直方向裁剪int outGrey[width*800];jint *bb=outGrey;int frameSize = width * height;for (int j = 200, yp = 200*width,cp=0; j < 1000; j++) {int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;for (int i = 0; i < width; i++, yp++, cp++) {int y = (0xff & ((int) yuv420sp[yp])) - 16; if (y < 0) y = 0; if ((i & 1) == 0) { v = (0xff & yuv420sp[uvp++]) - 128; u = (0xff & yuv420sp[uvp++]) - 128; } int y1192 = 1192 * y; int r = (y1192 + 1634 * v); int g = (y1192 - 833 * v - 400 * u); int b = (y1192 + 2066 * u); if (r < 0) r = 0; else if (r > 262143) r = 262143; if (g < 0) g = 0; else if (g > 262143) g = 262143; if (b < 0) b = 0; else if (b > 262143) b = 262143; outGrey[cp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff); } }
竖直裁剪的效果图
//====方法五: 直接裁剪byte数据int cutW = width / 6; //裁剪宽度int cutX = width / 2 - width / 12; //裁剪宽度起始点int outGrey[cutW * height];jint *bb = outGrey;int frameSize = width * height;for (int j = 0, yp = cutX, cp = 0; j < height; j++) {int uvp = frameSize + (j >> 1) * width+ cutX, u = 0, v = 0;for (int i = cutX; i < cutX+ cutW; i++, yp++, cp++) {int y = (0xff & ((int)yuv420sp[yp])) - 16;if (y < 0) y = 0;if ((i & 1) == 0) {v = (0xff & yuv420sp[uvp++]) - 128;u = (0xff & yuv420sp[uvp++]) - 128;}int y1192 = 1192 * y;int r = (y1192 + 1634 * v);int g = (y1192 - 833 * v - 400 * u);int b = (y1192 + 2066 * u);if (r < 0) r = 0; else if (r > 262143) r = 262143;if (g < 0) g = 0; else if (g > 262143) g = 262143;if (b < 0) b = 0; else if (b > 262143) b = 262143;outGrey[cp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);}yp += (width- cutW);}
阅读全文
0 0
- 处理视频流byte数据
- 获取视频大小Byte
- Directshow采集到的视频BYTE数据转成IplImage数据并保存出去
- AVFoundation 视频流处理
- 基于流模式的字节byte缓存区 bytebuffer.cs 用于tcp/udp的网络数据高效处理
- IPCAM视频数据解码并处理
- 视频采集与处理-YUV数据
- 音视频处理 各层数据 规范
- AnyChat视频回调RGB24 byte[]数据转换成Bitmap图像
- 视频流读取与视频帧处理
- java 图片处理byte[]
- ffmpeg笔记:音频数据和视频数据的处理
- 处理视频流的代码
- Java -- 保存byte[] 数据
- byte[]数据写成文件
- byte类型数据
- java io: 目前总结到 byte(字节流)的处理
- java 16进制数据格式化处理工具类,16进制byte数组转String
- Mining Your Own Business 点双连通分量
- 案例一:集群推送消息
- vSAN架构细节-分布式RAID
- 解决Ubuntu16.04系统耳机无声音问题
- XML文档--Schema
- 处理视频流byte数据
- MVCC机制
- Android 面试
- 程序员是该选择大公司,还是小公司?你会如何抉择呢?
- 成为 Google 认证机器学习工程师,零基础也只需要 2 步!
- Android Studio详细安装流程和配置、主题
- 用Node.js创建自签名的HTTPS服务器
- 你知道比程序员最讨厌的四件事,更严重的问题是什么吗?
- 如果每一种语言都对应一种女生,你会喜欢哪一个?