android opencv android-orientation

android - Problema de orientación de la cámara OpenCV



android-orientation (5)

Ok, encontré esto como una solución:

Primero me meto en JavaCameraView.Java Class en la OpenCV Library - 2.4.5

y luego en la función initializeCamera antes de mCamera.startPreview (); Agregué esta 2 función:

setDisplayOrientation(mCamera, 90); mCamera.setPreviewDisplay(getHolder());

y la primera función implementada así:

protected void setDisplayOrientation(Camera camera, int angle){ Method downPolymorphic; try { downPolymorphic = camera.getClass().getMethod("setDisplayOrientation", new Class[] { int.class }); if (downPolymorphic != null) downPolymorphic.invoke(camera, new Object[] { angle }); } catch (Exception e1) { } }

Solo recuerdo que trabajo con OpenCV.

Espero que esto ayude a alguien.

Tengo un proyecto simple que solo muestra la cámara con org.opencv.android.JavaCameraView.

Mi problema es que, por defecto, la cámara está en modo horizontal y no puedo cambiar esta causa. Necesito definir CameraBridgeViewBase en lugar de una cámara normal.

esto es parte de mi código:

Código XML:

<LinearLayout android:layout_width="match_parent" android:layout_height="match_parent" > <org.opencv.android.JavaCameraView android:layout_width="fill_parent" android:layout_height="300dp" android:visibility="gone" android:id="@+id/HelloOpenCvView" opencv:show_fps="true" opencv:camera_id="1" /> </LinearLayout> <LinearLayout android:layout_width="match_parent" android:layout_height="wrap_content" > <Button android:id="@+id/BtnVideo" android:layout_marginLeft="2dp" android:layout_marginRight="2dp" android:layout_width="0dp" style="@style/button" android:layout_height="wrap_content" android:layout_weight="1.00" android:text="@string/videoBtn" android:textSize="18dip" /> </LinearLayout>

Código de Java:

CameraBridgeViewBase mOpenCvCameraView; Button VideoButton; protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); overridePendingTransition(0, 0); VideoButton = (Button) this.findViewById(R.id.BtnVideo); VideoButton.setOnClickListener(onClickListener); mOpenCvCameraView= (CameraBridgeViewBase) findViewById(R.id.HelloOpenCvView); mOpenCvCameraView.setVisibility(SurfaceView.INVISIBLE); } private OnClickListener onClickListener = new OnClickListener() { @Override public void onClick(View v) { switch (v.getId()){ case R.id.BtnVideo: if(mOpenCvCameraView.getVisibility() == SurfaceView.VISIBLE) { mOpenCvCameraView.setVisibility(SurfaceView.INVISIBLE); } else { mOpenCvCameraView.setVisibility(SurfaceView.VISIBLE); } break; default : break; } } }; public void onResume() { super.onResume(); overridePendingTransition(0, 0); OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback); } public void onPause() { super.onPause(); if (mOpenCvCameraView != null) mOpenCvCameraView.disableView(); } public void onDestroy() { super.onDestroy(); if (mOpenCvCameraView != null) mOpenCvCameraView.disableView(); } public void onCameraViewStarted(int width, int height) { } public void onCameraViewStopped() { } public Mat onCameraFrame(CvCameraViewFrame inputFrame) { return inputFrame.rgba(); } private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) { @Override public void onManagerConnected(int status) { switch (status) { case LoaderCallbackInterface.SUCCESS: { //Log.i(TAG, "OpenCV loaded successfully"); mOpenCvCameraView.enableView(); } break; default: { super.onManagerConnected(status); } break; } } };

Entonces, ¿cómo puedo cambiar la orientación predeterminada?

¡Gracias!


el valor android: screenOrientation en AndroidManifest.xml debería ayudar.

android: screenOrientation = "retrato"


Estoy usando OpenCV 3.1, lo arreglo mediante la transformación de aplicar cuando dibuje un mapa de bits en el método deliverAndDrawFrame de la clase CameraBridgeViewBase. Espero que sea útil:

package org.opencv.android; import android.app.Activity; import android.app.AlertDialog; import android.content.Context; import android.content.DialogInterface; import android.content.res.TypedArray; import android.graphics.Bitmap; import android.graphics.Canvas; import android.graphics.Matrix; import android.graphics.Paint; import android.graphics.Rect; import android.hardware.Camera; import android.util.AttributeSet; import android.util.Log; import android.view.SurfaceHolder; import android.view.SurfaceView; import org.opencv.R; import org.opencv.core.Mat; import org.opencv.core.Size; import java.util.List; /** * This is a basic class, implementing the interaction with Camera and OpenCV library. * The main responsibility of it - is to control when camera can be enabled, process the frame, * call external listener to make any adjustments to the frame and then draw the resulting * frame to the screen. * The clients shall implement CvCameraViewListener. */ public abstract class CameraBridgeViewBase extends SurfaceView implements SurfaceHolder.Callback { private static final String TAG = "CameraBridge"; private static final int MAX_UNSPECIFIED = -1; private static final int STOPPED = 0; private static final int STARTED = 1; private int mState = STOPPED; private Bitmap mCacheBitmap; private CvCameraViewListener2 mListener; private boolean mSurfaceExist; private Object mSyncObject = new Object(); protected int mFrameWidth; protected int mFrameHeight; protected int mMaxHeight; protected int mMaxWidth; protected float mScale = 0; protected int mPreviewFormat = RGBA; protected int mCameraIndex = CAMERA_ID_ANY; protected boolean mEnabled; protected FpsMeter mFpsMeter = null; private final Paint mPaint; private final Matrix mMatrix; public static final int CAMERA_ID_ANY = -1; public static final int CAMERA_ID_BACK = 99; public static final int CAMERA_ID_FRONT = 98; public static final int RGBA = 1; public static final int GRAY = 2; public CameraBridgeViewBase(Context context, int cameraId) { super(context); mPaint = new Paint(); mMatrix = new Matrix(); this.setCameraIndex(cameraId); getHolder().addCallback(this); mMaxWidth = MAX_UNSPECIFIED; mMaxHeight = MAX_UNSPECIFIED; } public CameraBridgeViewBase(Context context, AttributeSet attrs) { super(context, attrs); int count = attrs.getAttributeCount(); Log.d(TAG, "Attr count: " + Integer.valueOf(count)); TypedArray styledAttrs = getContext().obtainStyledAttributes(attrs, R.styleable.CameraBridgeViewBase); if (styledAttrs.getBoolean(R.styleable.CameraBridgeViewBase_show_fps, false)) enableFpsMeter(); mPaint = new Paint(); mMatrix = new Matrix(); this.setCameraIndex(styledAttrs.getInt(R.styleable.CameraBridgeViewBase_camera_id, -1)); getHolder().addCallback(this); mMaxWidth = MAX_UNSPECIFIED; mMaxHeight = MAX_UNSPECIFIED; styledAttrs.recycle(); } /** * Sets the camera index * * @param cameraIndex new camera index */ public void setCameraIndex(int cameraIndex) { this.mCameraIndex = cameraIndex; updateMatrix(); } @Override public void layout(int l, int t, int r, int b) { super.layout(l, t, r, b); updateMatrix(); } @Override protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) { super.onMeasure(widthMeasureSpec, heightMeasureSpec); updateMatrix(); } private void updateMatrix() { //synchronized (mMatrix) { float hw = this.getWidth() / 2.0f; float hh = this.getHeight() / 2.0f; boolean isFrontCamera = Camera.CameraInfo.CAMERA_FACING_FRONT == mCameraIndex; mMatrix.reset(); if (isFrontCamera) { //Flip Horizoltal mMatrix.preScale(-1, 1, hw, hh); } mMatrix.preTranslate(hw, hh); if (isFrontCamera) mMatrix.preRotate(270); else mMatrix.preRotate(90); mMatrix.preTranslate(-hw, -hh); //} } public interface CvCameraViewListener { /** * This method is invoked when camera preview has started. After this method is invoked * the frames will start to be delivered to client via the onCameraFrame() callback. * * @param width - the width of the frames that will be delivered * @param height - the height of the frames that will be delivered */ public void onCameraViewStarted(int width, int height); /** * This method is invoked when camera preview has been stopped for some reason. * No frames will be delivered via onCameraFrame() callback after this method is called. */ public void onCameraViewStopped(); /** * This method is invoked when delivery of the frame needs to be done. * The returned values - is a modified frame which needs to be displayed on the screen. * TODO: pass the parameters specifying the format of the frame (BPP, YUV or RGB and etc) */ public Mat onCameraFrame(Mat inputFrame); } public interface CvCameraViewListener2 { /** * This method is invoked when camera preview has started. After this method is invoked * the frames will start to be delivered to client via the onCameraFrame() callback. * * @param width - the width of the frames that will be delivered * @param height - the height of the frames that will be delivered */ public void onCameraViewStarted(int width, int height); /** * This method is invoked when camera preview has been stopped for some reason. * No frames will be delivered via onCameraFrame() callback after this method is called. */ public void onCameraViewStopped(); /** * This method is invoked when delivery of the frame needs to be done. * The returned values - is a modified frame which needs to be displayed on the screen. * TODO: pass the parameters specifying the format of the frame (BPP, YUV or RGB and etc) */ public Mat onCameraFrame(CvCameraViewFrame inputFrame); } ; protected class CvCameraViewListenerAdapter implements CvCameraViewListener2 { public CvCameraViewListenerAdapter(CvCameraViewListener oldStypeListener) { mOldStyleListener = oldStypeListener; } public void onCameraViewStarted(int width, int height) { mOldStyleListener.onCameraViewStarted(width, height); } public void onCameraViewStopped() { mOldStyleListener.onCameraViewStopped(); } public Mat onCameraFrame(CvCameraViewFrame inputFrame) { Mat result = null; switch (mPreviewFormat) { case RGBA: result = mOldStyleListener.onCameraFrame(inputFrame.rgba()); break; case GRAY: result = mOldStyleListener.onCameraFrame(inputFrame.gray()); break; default: Log.e(TAG, "Invalid frame format! Only RGBA and Gray Scale are supported!"); } ; return result; } public void setFrameFormat(int format) { mPreviewFormat = format; } private int mPreviewFormat = RGBA; private CvCameraViewListener mOldStyleListener; } ; /** * This class interface is abstract representation of single frame from camera for onCameraFrame callback * Attention: Do not use objects, that represents this interface out of onCameraFrame callback! */ public interface CvCameraViewFrame { /** * This method returns RGBA Mat with frame */ public Mat rgba(); /** * This method returns single channel gray scale Mat with frame */ public Mat gray(); } ; public void surfaceChanged(SurfaceHolder arg0, int arg1, int arg2, int arg3) { Log.d(TAG, "call surfaceChanged event"); synchronized (mSyncObject) { if (!mSurfaceExist) { mSurfaceExist = true; checkCurrentState(); } else { /** Surface changed. We need to stop camera and restart with new parameters */ /* Pretend that old surface has been destroyed */ mSurfaceExist = false; checkCurrentState(); /* Now use new surface. Say we have it now */ mSurfaceExist = true; checkCurrentState(); } } } public void surfaceCreated(SurfaceHolder holder) { /* Do nothing. Wait until surfaceChanged delivered */ } public void surfaceDestroyed(SurfaceHolder holder) { synchronized (mSyncObject) { mSurfaceExist = false; checkCurrentState(); } } /** * This method is provided for clients, so they can enable the camera connection. * The actual onCameraViewStarted callback will be delivered only after both this method is called and surface is available */ public void enableView() { synchronized (mSyncObject) { mEnabled = true; checkCurrentState(); } } /** * This method is provided for clients, so they can disable camera connection and stop * the delivery of frames even though the surface view itself is not destroyed and still stays on the scren */ public void disableView() { synchronized (mSyncObject) { mEnabled = false; checkCurrentState(); } } /** * This method enables label with fps value on the screen */ public void enableFpsMeter() { if (mFpsMeter == null) { mFpsMeter = new FpsMeter(); mFpsMeter.setResolution(mFrameWidth, mFrameHeight); } } public void disableFpsMeter() { mFpsMeter = null; } /** * @param listener */ public void setCvCameraViewListener(CvCameraViewListener2 listener) { mListener = listener; } public void setCvCameraViewListener(CvCameraViewListener listener) { CvCameraViewListenerAdapter adapter = new CvCameraViewListenerAdapter(listener); adapter.setFrameFormat(mPreviewFormat); mListener = adapter; } /** * This method sets the maximum size that camera frame is allowed to be. When selecting * size - the biggest size which less or equal the size set will be selected. * As an example - we set setMaxFrameSize(200,200) and we have 176x152 and 320x240 sizes. The * preview frame will be selected with 176x152 size. * This method is useful when need to restrict the size of preview frame for some reason (for example for video recording) * * @param maxWidth - the maximum width allowed for camera frame. * @param maxHeight - the maximum height allowed for camera frame */ public void setMaxFrameSize(int maxWidth, int maxHeight) { mMaxWidth = maxWidth; mMaxHeight = maxHeight; } public void SetCaptureFormat(int format) { mPreviewFormat = format; if (mListener instanceof CvCameraViewListenerAdapter) { CvCameraViewListenerAdapter adapter = (CvCameraViewListenerAdapter) mListener; adapter.setFrameFormat(mPreviewFormat); } } /** * Called when mSyncObject lock is held */ private void checkCurrentState() { Log.d(TAG, "call checkCurrentState"); int targetState; if (mEnabled && mSurfaceExist && getVisibility() == VISIBLE) { targetState = STARTED; } else { targetState = STOPPED; } if (targetState != mState) { /* The state change detected. Need to exit the current state and enter target state */ processExitState(mState); mState = targetState; processEnterState(mState); } } private void processEnterState(int state) { Log.d(TAG, "call processEnterState: " + state); switch (state) { case STARTED: onEnterStartedState(); if (mListener != null) { mListener.onCameraViewStarted(mFrameWidth, mFrameHeight); } break; case STOPPED: onEnterStoppedState(); if (mListener != null) { mListener.onCameraViewStopped(); } break; } ; } private void processExitState(int state) { Log.d(TAG, "call processExitState: " + state); switch (state) { case STARTED: onExitStartedState(); break; case STOPPED: onExitStoppedState(); break; } ; } private void onEnterStoppedState() { /* nothing to do */ } private void onExitStoppedState() { /* nothing to do */ } // NOTE: The order of bitmap constructor and camera connection is important for android 4.1.x // Bitmap must be constructed before surface private void onEnterStartedState() { Log.d(TAG, "call onEnterStartedState"); /* Connect camera */ if (!connectCamera(getWidth(), getHeight())) { AlertDialog ad = new AlertDialog.Builder(getContext()).create(); ad.setCancelable(false); // This blocks the ''BACK'' button ad.setMessage("It seems that you device does not support camera (or it is locked). Application will be closed."); ad.setButton(DialogInterface.BUTTON_NEUTRAL, "OK", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { dialog.dismiss(); ((Activity) getContext()).finish(); } }); ad.show(); } } private void onExitStartedState() { disconnectCamera(); if (mCacheBitmap != null) { mCacheBitmap.recycle(); } } /** * This method shall be called by the subclasses when they have valid * object and want it to be delivered to external client (via callback) and * then displayed on the screen. * * @param frame - the current frame to be delivered */ protected void deliverAndDrawFrame(CvCameraViewFrame frame) { Mat modified; if (mListener != null) { modified = mListener.onCameraFrame(frame); } else { modified = frame.rgba(); } boolean bmpValid = true; if (modified != null) { try { Utils.matToBitmap(modified, mCacheBitmap); } catch (Exception e) { Log.e(TAG, "Mat type: " + modified); Log.e(TAG, "Bitmap type: " + mCacheBitmap.getWidth() + "*" + mCacheBitmap.getHeight()); Log.e(TAG, "Utils.matToBitmap() throws an exception: " + e.getMessage()); bmpValid = false; } } if (bmpValid && mCacheBitmap != null) { Canvas canvas = getHolder().lockCanvas(); if (canvas != null) { int saveCount = canvas.save(); //synchronized (mMatrix) { canvas.setMatrix(mMatrix); //} /* No longer use, we use matrix to improve performance boolean isFrontCamera = mCameraIndex == Camera.CameraInfo.CAMERA_FACING_FRONT; if(isFrontCamera) { canvas.scale(-1, 1, canvas.getWidth() / 2, canvas.getHeight() / 2); } canvas.translate(canvas.getWidth() / 2, canvas.getHeight() / 2); if(isFrontCamera) canvas.rotate(270); else canvas.rotate(90); canvas.translate(-canvas.getWidth() / 2, -canvas.getHeight() / 2); */ mScale = Math.max((float) canvas.getHeight() / mCacheBitmap.getWidth(), (float) canvas.getWidth() / mCacheBitmap.getHeight()); canvas.drawColor(0, android.graphics.PorterDuff.Mode.CLEAR); Log.d(TAG, "mStretch value: " + mScale); if (mScale != 0) { mPaint.setFilterBitmap(true); mPaint.setAntiAlias(true); mPaint.setDither(true); canvas.drawBitmap(mCacheBitmap, new Rect(0, 0, mCacheBitmap.getWidth(), mCacheBitmap.getHeight()), new Rect((int) ((canvas.getWidth() - mScale * mCacheBitmap.getWidth()) / 2), (int) ((canvas.getHeight() - mScale * mCacheBitmap.getHeight()) / 2), (int) ((canvas.getWidth() - mScale * mCacheBitmap.getWidth()) / 2 + mScale * mCacheBitmap.getWidth()), (int) ((canvas.getHeight() - mScale * mCacheBitmap.getHeight()) / 2 + mScale * mCacheBitmap.getHeight())), mPaint); } else { canvas.drawBitmap(mCacheBitmap, new Rect(0, 0, mCacheBitmap.getWidth(), mCacheBitmap.getHeight()), new Rect((canvas.getWidth() - mCacheBitmap.getWidth()) / 2, (canvas.getHeight() - mCacheBitmap.getHeight()) / 2, (canvas.getWidth() - mCacheBitmap.getWidth()) / 2 + mCacheBitmap.getWidth(), (canvas.getHeight() - mCacheBitmap.getHeight()) / 2 + mCacheBitmap.getHeight()), null); } canvas.restoreToCount(saveCount); if (mFpsMeter != null) { mFpsMeter.measure(); mFpsMeter.draw(canvas, 20, 30); } getHolder().unlockCanvasAndPost(canvas); } } } /** * This method is invoked shall perform concrete operation to initialize the camera. * CONTRACT: as a result of this method variables mFrameWidth and mFrameHeight MUST be * initialized with the size of the Camera frames that will be delivered to external processor. * * @param width - the width of this SurfaceView * @param height - the height of this SurfaceView */ protected abstract boolean connectCamera(int width, int height); /** * Disconnects and release the particular camera object being connected to this surface view. * Called when syncObject lock is held */ protected abstract void disconnectCamera(); // NOTE: On Android 4.1.x the function must be called before SurfaceTexture constructor! protected void AllocateCache() { mCacheBitmap = Bitmap.createBitmap(mFrameWidth, mFrameHeight, Bitmap.Config.ARGB_8888); } public interface ListItemAccessor { public int getWidth(Object obj); public int getHeight(Object obj); } ; /** * This helper method can be called by subclasses to select camera preview size. * It goes over the list of the supported preview sizes and selects the maximum one which * fits both values set via setMaxFrameSize() and surface frame allocated for this view * * @param supportedSizes * @param surfaceWidth * @param surfaceHeight * @return optimal frame size */ protected Size calculateCameraFrameSize(List<?> supportedSizes, ListItemAccessor accessor, int surfaceWidth, int surfaceHeight) { int calcWidth = 0; int calcHeight = 0; int maxAllowedWidth = (mMaxWidth != MAX_UNSPECIFIED && mMaxWidth < surfaceWidth) ? mMaxWidth : surfaceWidth; int maxAllowedHeight = (mMaxHeight != MAX_UNSPECIFIED && mMaxHeight < surfaceHeight) ? mMaxHeight : surfaceHeight; for (Object size : supportedSizes) { int width = accessor.getWidth(size); int height = accessor.getHeight(size); if (width <= maxAllowedWidth && height <= maxAllowedHeight) { if (width >= calcWidth && height >= calcHeight) { calcWidth = (int) width; calcHeight = (int) height; } } } return new Size(calcWidth, calcHeight); } }


El problema es que el código que pinta no verifica los parámetros de la cámara. El tapete se dibuja en la vista de superficie en la función "deliverAndDrawFrame" en la clase "CameraBridgeViewBase".

Con una modificación muy simple en la clase CameraBridgeViewBase, podemos hacer una función que rote la forma en que se dibuja el mapa de bits.

int userRotation= 0; public void setUserRotation(int userRotation) { this.userRotation = userRotation; } /** * This method shall be called by the subclasses when they have valid * object and want it to be delivered to external client (via callback) and * then displayed on the screen. * @param frame - the current frame to be delivered */ protected void deliverAndDrawFrame(CvCameraViewFrame frame) { Mat modified; if (mListener != null) { modified = mListener.onCameraFrame(frame); } else { modified = frame.rgba(); } boolean bmpValid = true; if (modified != null) { try { Utils.matToBitmap(modified, mCacheBitmap); } catch(Exception e) { Log.e(TAG, "Mat type: " + modified); Log.e(TAG, "Bitmap type: " + mCacheBitmap.getWidth() + "*" + mCacheBitmap.getHeight()); Log.e(TAG, "Utils.matToBitmap() throws an exception: " + e.getMessage()); bmpValid = false; } } if (bmpValid && mCacheBitmap != null) { Canvas canvas = getHolder().lockCanvas(); if (canvas != null) { canvas.drawColor(Color.parseColor("#8BC34A"), PorterDuff.Mode.SRC_IN); //this is the rotation part canvas.save(); canvas.rotate(userRotation, (canvas.getWidth()/ 2),(canvas.getHeight()/ 2)); if (mScale != 0) { canvas.drawBitmap(mCacheBitmap, new Rect(0,0,mCacheBitmap.getWidth(), mCacheBitmap.getHeight()), new Rect((int)((canvas.getWidth() - mScale*mCacheBitmap.getWidth()) / 2), (int)((canvas.getHeight() - mScale*mCacheBitmap.getHeight()) / 2), (int)((canvas.getWidth() - mScale*mCacheBitmap.getWidth()) / 2 + mScale*mCacheBitmap.getWidth()), (int)((canvas.getHeight() - mScale*mCacheBitmap.getHeight()) / 2 + mScale*mCacheBitmap.getHeight())), null); } else { canvas.drawBitmap(mCacheBitmap, new Rect(0,0,mCacheBitmap.getWidth(), mCacheBitmap.getHeight()), new Rect((canvas.getWidth() - mCacheBitmap.getWidth()) / 2, (canvas.getHeight() - mCacheBitmap.getHeight()) / 2, (canvas.getWidth() - mCacheBitmap.getWidth()) / 2 + mCacheBitmap.getWidth(), (canvas.getHeight() - mCacheBitmap.getHeight()) / 2 + mCacheBitmap.getHeight()), null); } if (mFpsMeter != null) { mFpsMeter.measure(); mFpsMeter.draw(canvas, 20, 30); } //remember to restore the canvas canvas.restore(); getHolder().unlockCanvasAndPost(canvas); } } }

Probé la solución más común que utiliza la función Core.flip que gira el Mat pero consume muchos recursos, esta solución no afecta a la detección y no afecta el rendimiento, solo cambia la forma en que se dibuja la imagen en el lienzo

Espero que esto ayude.


Prueba este en tu onCameraFrame

mRgba = inputFrame.rgba(); Mat mRgbaT = mRgba.t(); Core.flip(mRgba.t(), mRgbaT, 1); Imgproc.resize(mRgbaT, mRgbaT, mRgba.size()); return mRgbaT;