自定义相主要通过 GLSurfaceView视图和Camera相机进行实现 。由于GLSurfaceView进行视图的渲染 Camera进行拍照即回调处理
首先添加需要的权限 否则会报打不开相机服务的异常 需要添加动态权限的自己加一下
首先自定义相机view
public class CameraPreview extends GLSurfaceView implements GLSurfaceView.Renderer, SurfaceTexture.OnFrameAvailableListener {
public String vss =
"attribute vec2 vPosition;\n" +
"attribute vec2 vTexCoord;\n" +
"varying vec2 texCoord;\n" +
"void main() {\n" +
" texCoord = vTexCoord;\n" +
" gl_Position = vec4 ( vPosition.y, vPosition.x, 0.0, 1.0 );\n" +
"}";
private final String fss_front =
"#extension GL_OES_EGL_image_external : require\n" +
"precision mediump float;\n" +
"uniform samplerExternalOES sTexture;\n" +
"varying vec2 texCoord;\n" +
"void main() {\n" +
" gl_FragColor = texture2D(sTexture,texCoord);\n" +
"}";
private final String fss_back =
"#extension GL_OES_EGL_image_external : require\n" +
"precision mediump float;\n" +
"uniform samplerExternalOES sTexture;\n" +
"varying vec2 texCoord;\n" +
"void main() {\n" +
" vec2 newText = vec2(1.0-texCoord.x, texCoord.y);\n" +
" gl_FragColor = texture2D(sTexture,newText);\n" +
"}";
private static String LOG_TAG = CameraPreview.class.getName();
private List<Camera.Size> mSupportedPreviewSizes;
private int[] mTextureId;
private int[] fbo = new int[]{0};
private SurfaceTexture mSurfaceTexture;
private Camera mCamera;
private FloatBuffer pVertex;
private FloatBuffer pTexCoord;
private ByteBuffer pixelBuffer;
private MediaRecorder mMediarecorder;
private File mOutputFile;
public Boolean isPreviewing = false;
private int nCameraFrontBack; //0: front 1:back
private int hProgram;
private int bufferWidth = 1040;
private int bufferHeight = 1848;
private int mWindowWidth = 1040;
private int mWindowHeight = 1848;
private static float mPreviewRatio = 640 / 480.0f;
public static final int CameraFront = 0;
public static final int CameraBack = 1;
private OnTakePicCallBack mOnTakePicCallBack;
private OnDrawFrameCallback mOnDrawFrameCallback;
public interface OnDrawFrameCallback {
void call(byte[] rgba, int w, int h);
}
public interface OnTakePicCallBack {
void onPictureTaken(byte[] data);
}
public CameraPreview(Context context) {
super(context);
initView();
}
public CameraPreview(Context context, AttributeSet attrs) {
super(context, attrs);
TypedArray typedArray = context.obtainStyledAttributes(attrs, R.styleable.youtuattrs);
nCameraFrontBack = typedArray.getInteger(R.styleable.youtuattrs_cameraPosition, 0);
initView();
}
private void initView() {
Log.d(LOG_TAG, "initView");
float[] vtmp = {1.0f, -1.0f, -1.0f, -1.0f, 1.0f, 1.0f, -1.0f, 1.0f};
float[] ttmp = {1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f};
pVertex = ByteBuffer.allocateDirect(8 * 4).order(ByteOrder.nativeOrder()).asFloatBuffer();
pVertex.put(vtmp);
pVertex.position(0);
pTexCoord = ByteBuffer.allocateDirect(8 * 4).order(ByteOrder.nativeOrder()).asFloatBuffer();
pTexCoord.put(ttmp);
pTexCoord.position(0);
pixelBuffer = ByteBuffer.allocateDirect(bufferWidth * bufferHeight * 4).order(ByteOrder.nativeOrder());
setEGLContextClientVersion(2);
setEGLConfigChooser(8, 8, 8, 8, 0, 0);// fixed:
setRenderer(this);
setRenderMode(GLSurfaceView.RENDERMODE_WHEN_DIRTY);
getHolder().setFormat(PixelFormat.RGBA_8888);
if (nCameraFrontBack == CameraFront) {
openFrontCamera();
} else {
openBackCamera();
}
Camera.Size size = mCamera.getParameters().getPreviewSize();
mPreviewRatio = (float) size.width / size.height;
}
@Override
public void onResume() {
Log.d(LOG_TAG, "onResume");
}
@Override