此代码在 Android 上遇到障碍,知道为什么吗?

This code hits a roadblock on Android, any idea why?

下面的方法 运行 在 ios Iphone 5、Mac、PC 和 Web 上表现很好,但在 Android 上速度很慢。这是在 unity 5 c# 中。

using UnityEngine;
using UnityEngine.UI;
using System.Collections;
using OpenCVForUnity;

public class MouthDetect : MonoBehaviour
{

    private WebCamTexture _webCamTexture;
    private Color32[] _colors;
    public bool isFrontFacing = true;
    public int webCamTextureWidth = 640;
    public int webCamTextureHeight = 480;
    public int webCamTextureFPS = 10;
    private string _firstCam; // For debuging with no front facing camera.
    private Mat _rgbaMat;
    private Mat _grayMat;
    private Mat _mouthMATGray;
    private Mat _mouthMATRGBA;
    private Texture2D _texture;
    private CascadeClassifier _cascadeFace;
    private CascadeClassifier _cascadeMouth;
    private MatOfRect _faces;
    private MatOfRect _mouths;
    private bool _initDone = false;
    private OpenCVForUnity.Rect _mouthROI = new OpenCVForUnity.Rect();

    // Chew detection
    private UnityEngine.Rect _rectMouth;
    private int _mouthWidth;  // 
    private int _mouthHeight;
    private double _totalBright = new double();  // Total brightness inside the mouth
    private double _lastBright = 0; // The last extreme value of open or closed.
    private Texture2D _textMouth;
    private bool _mouthIsOpenBoolean = false;  // Is the mouth open? boolean
    public double mouthChangeDiff = 0.2;  // The difference between brightness before a mout is opened or closed.  Smaller detects more chews but can be prone to false chews.
    public int mouthDetectPixelCount = 10;  // How many pixels should we use for detection?
    private bool _mouthFound = false;
    private bool _faceFound = false;

    // Debug logic
    private GameObject _previewToggle;
    private GameObject _previewScreen;
    private Image _previewImage;
    private FaceDetPreview _previewScript;
    private bool _debugPreviewOn = false;
    private Sprite _webCamSprite;
    private string _cameraInf = "";
    private GameObject _cameraList;
    private DebugCameras _cameraListScript;
    private string _selectedCamera = "";

    // Create an array of OpenCV rectangles from the array of faces. Andy
    OpenCVForUnity.Rect[] rects;
    // Create an array of OpenCV rectangles from the array of mouths.
    OpenCVForUnity.Rect[] rectsM;

    // Detection logic
    public int logicSolution = 1; // Which solution are we using?  0 = brightness, 1 = find/lose mouth.
    private bool _capturingFace = false;

    public bool mouthIsOpenBoolean {
        get { return _mouthIsOpenBoolean; }
        set { 
            // Make sure we've changed.
            if (_mouthIsOpenBoolean==value) 
                return;

            _mouthIsOpenBoolean = value; 

            // Add a chew if we've changed to false
            if (value == false) {
                GameObject chewText = GameObject.Find ("Chews");
                Chewing chewing = chewText.GetComponent<Chewing>();
                chewing.AddChew();
            }
        }
    }


    // Use this for initialization
    void Start ()
    {
        // Get the debug preview toggle and image
        _previewToggle = GameObject.Find ("FacePreviewToggle");
        _previewScript = _previewToggle.GetComponent<FaceDetPreview> ();
        _previewScreen =  GameObject.Find ("FacePreview");
        _previewImage = _previewScreen.GetComponent<Image> ();
        _cameraList = GameObject.Find ("Cameras");
        if(_cameraList!=null)
            _cameraListScript = _cameraList.GetComponent<DebugCameras>();

        // Setup camera and texture
        StartCoroutine (init ());
    }

    private IEnumerator init ()
    {
        // Have we already initialised?  Restart if we have.
        if (_webCamTexture != null) {
            _webCamTexture.Stop ();
            _initDone = false;

            _rgbaMat.Dispose ();
            _grayMat.Dispose ();
        }

        // Checks how many and which cameras are available on the device
        _cameraInf = "Cameras=" + WebCamTexture.devices.Length.ToString ();
        for (int cameraIndex = 0; cameraIndex < WebCamTexture.devices.Length; cameraIndex++) {
            // Debug information
            AddToDebug( "Camera " + cameraIndex.ToString() + " - " + WebCamTexture.devices [cameraIndex].name + 
                       " - Front:" + WebCamTexture.devices [cameraIndex].isFrontFacing);

            // We want to be using the correct camera, make sure it's front facing
            if (WebCamTexture.devices [cameraIndex].isFrontFacing == isFrontFacing) {
                // Tell us which camera we're using
                Debug.Log (cameraIndex + " name " + WebCamTexture.devices [cameraIndex].name + " isFrontFacing " + WebCamTexture.devices [cameraIndex].isFrontFacing);
                Debug.Log("Here");

                // Set the camera texture
                _webCamTexture = new WebCamTexture (WebCamTexture.devices [cameraIndex].name, webCamTextureWidth, webCamTextureHeight);
                break;
            }
            else {

                // We only want the first one
                if (_firstCam=="") {
                    _firstCam=WebCamTexture.devices [cameraIndex].name;

                }
            }
        }

        // Did we find a camera?
        if (_webCamTexture == null) {
            // If we're in unity this is ok...
            #if UNITY_EDITOR
            // Just create a new camera
            _webCamTexture = new WebCamTexture (_firstCam, webCamTextureWidth, webCamTextureHeight);
            #endif

            // ****** We need a stop here!!!!!! ***************************************************************************
            // TODO: The device has no forward facing camera
            // We can't chew without a front facing camera!!!
        }

        // Debug...
        AddToDebug ("Selected Camera: " + _webCamTexture.deviceName);
        Debug.Log(_webCamTexture.deviceName);

        // How many fps do we want??  We need to control the performance.  Although, this request is usually ignored! :/
        _webCamTexture.requestedFPS = webCamTextureFPS;

        // Start the camera
        _webCamTexture.Play ();

        while (true) {
            //If you want to use webcamTexture.width and webcamTexture.height on iOS, you have to wait until webcamTexture.didUpdateThisFrame == 1, otherwise these two values will be equal to 16. (http://forum.unity3d.com/threads/webcamtexture-and-error-0x0502.123922/)
            if (_webCamTexture.width > 16 && _webCamTexture.height > 16) {
                // Create a new colour and matrices
                _colors = new Color32[_webCamTexture.width * _webCamTexture.height];
                _rgbaMat = new Mat (_webCamTexture.height, _webCamTexture.width, CvType.CV_8UC4);
                _grayMat = new Mat (_webCamTexture.height, _webCamTexture.width, CvType.CV_8UC1);
                _mouthMATRGBA = new Mat (_webCamTexture.height, _webCamTexture.width, CvType.CV_8UC4);
                _mouthMATGray = new Mat (_webCamTexture.height, _webCamTexture.width, CvType.CV_8UC1);
                _texture = new Texture2D (_webCamTexture.width, _webCamTexture.height, TextureFormat.RGBA32, false);
                _textMouth = new Texture2D (_webCamTexture.width, _webCamTexture.height, TextureFormat.RGBA32, false);

                // Set the rotation
                //              gameObject.transform.eulerAngles = new Vector3 (0, 0, 0);

                // For Android and iPhone the rotation needs to change to correct the z axis
                /*              #if (UNITY_ANDROID || UNITY_IPHONE) && !UNITY_EDITOR
                    gameObject.transform.eulerAngles = new Vector3 (0, 0, -90);
                #endif
*/
                // I'm not sure why this was removed in the sample, I'll leave it out until I figure out what it's ueseful for,  
                // It looks like the developer is trying to dynamically correct rotated webcams.
                //  gameObject.transform.rotation = gameObject.transform.rotation * Quaternion.AngleAxis (_webCamTexture.videoRotationAngle, Vector3.back);

                // It looks like we were dynamically fixing scale????  Why is the dev removing this???  I may re-introduce it.
                /*  bool videoVerticallyMirrored = _webCamTexture.videoVerticallyMirrored;
                    float scaleX = 1;
                    float scaleY = videoVerticallyMirrored ? -1.0f : 1.0f;
                    if (_webCamTexture.videoRotationAngle == 270)
                        scaleY = -1.0f;
                    gameObject.transform.localScale = new Vector3 (scaleX * gameObject.transform.localScale.x, scaleY * gameObject.transform.localScale.y, 1);
                */

                // Create the new classifiers - TODO we have some work to do with this path to make it work with Android and iPhone...
                _cascadeFace = new CascadeClassifier (Utils.getFilePath ("haarcascade_frontalface_alt.xml")); 
                _cascadeMouth = new CascadeClassifier (Utils.getFilePath ("haarcascade_mouth.xml"));  


                // Create a matrix of faces (we'll load it at some point???....)
                _faces = new MatOfRect ();
                _mouths = new MatOfRect ();

                // Set the camera ortho size, don't this this will work with the frog camera.
                Camera.main.orthographicSize = _webCamTexture.width / 2;

                // We're initilised
                _initDone = true;

                break;
            } 
            else {
                // Come back and try again, we're not ready yet.
                yield return 0;
            }
        }
    }

    // Update is called once per frame
    void Update ()
    {
        // Make sure we've initialised.
        if ((_initDone) && (!_capturingFace)) {
            StartCoroutine ("CaptureFace");
        }
    }

    private IEnumerator CaptureFace() {
        _capturingFace = true;

        // Make sure that the texture has been correctly formated, if not we'll have to come back later.
        if (_webCamTexture.width > 16 && _webCamTexture.height > 16) {

            // Pass the web cam texture to a OpenCV matrix
            Utils.webCamTextureToMat (_webCamTexture, _rgbaMat, _colors);

            // iPhones buggering about with mirroring again...
            #if UNITY_IPHONE && !UNITY_EDITOR
            // Flip if neccessary
            if (_webCamTexture.videoVerticallyMirrored){
                if(isFrontFacing){
                    Core.flip (_rgbaMat, _rgbaMat, 1);
                }else{
                    Core.flip (_rgbaMat, _rgbaMat, 0);
                }
            }else{
                if(isFrontFacing){
                    Core.flip (_rgbaMat, _rgbaMat, -1);
                }
            }
            #endif

            // Convert the rgb web texture matrix to gray
            Imgproc.cvtColor (_rgbaMat, _grayMat, Imgproc.COLOR_RGBA2GRAY);

            // Adjust the contrast - this can impact performance, try without for faster performance
            Imgproc.equalizeHist (_grayMat, _grayMat);

            // Set the cascade to detect different sized targets
            if (_cascadeFace != null)
                _cascadeFace.detectMultiScale (_grayMat, _faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
                                               new Size (_webCamTexture.width * 0.15, _webCamTexture.width * 0.15), new Size ());

            // Create an array of OpenCV rectangles from the array of faces.
            rects = _faces.toArray ();

            // Find a mouth in each face each face.
            _faceFound = false;
            for (int i = 0; i < rects.Length; i++) {

                // We found a face
                _faceFound = true;

                // For debugging, show us where the face is.
                Core.rectangle (
                    _rgbaMat, 
                    new Point (rects [i].x ,rects [i].y), 
                    new Point (rects [i].x + rects [i].width, rects [i].y + rects [i].height), 
                    new Scalar (0, 255, 0, 255), 
                    2);

                // Create a rectangle around the region of the face we're interested in
                // Keep it inside the face box, otherwise we get errors when we're at the edge of the screen
                /*              OpenCVForUnity.Rect _mouthROI = new OpenCVForUnity.Rect(
                    (int)rects [i].x,
                    (int)rects [i].y + (rects [i].height / 2),
                    (int)rects [i].width,
                    (int)(rects [i].height- (rects [i].height / 2)));
*/              OpenCVForUnity.Rect _mouthROI = new OpenCVForUnity.Rect(            // IMPROVED SENSITIVITY!!!!
                                                                             (int)rects [i].x,
                                                                             (int)rects [i].y + ((rects [i].height / 3) * 2),
                                                                             (int)rects [i].width,
                                                                             (int)(rects [i].height - ((rects [i].height / 3) * 2)));

                // Create a new matrix using the ROI 
                _mouthMATGray = _grayMat.submat(_mouthROI);
                _mouthMATRGBA = _rgbaMat.submat(_mouthROI);

                // Detect the mouth (we're only going to use the first mouth captured
                // Set the cascade to detect different sized targets
                if (_cascadeMouth != null)
                    _cascadeMouth.detectMultiScale (_mouthMATGray, _mouths, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
                                                    new Size (_webCamTexture.width * 0.04, _webCamTexture.width * 0.04), new Size ());
                // Create an array of OpenCV rectangles from the array of mouths.
                rectsM = _mouths.toArray ();

                // Put a rectangle around the first mouth on each face
                _mouthFound = false;
                for (int j = 0; j < rectsM.Length; j++) {
                    // Set the mouth box and make sure that the x and y are correct (remember, these co-ords are inside the ROI)
                    _rectMouth = new UnityEngine.Rect(
                        rectsM[j].x + _mouthROI.x,
                        _webCamTexture.height - _mouthROI.y - rectsM[j].y - rectsM[j].height,
                        rectsM[j].width,
                        rectsM[j].height
                        );

                    // We've found a mouth! 
                    _mouthFound = true;

                    Core.rectangle (
                        _rgbaMat, 
                        new Point (rectsM [j].x + _mouthROI.x,rectsM [j].y + _mouthROI.y), 
                        new Point (rectsM [j].x + rectsM [j].width + _mouthROI.x, rectsM [j].y + rectsM [j].height + _mouthROI.y), 
                        new Scalar (0, 0, 255, 255), 
                        2);
                    break;
                }
            }

            // Add the output to the asssigned texture!
            Utils.matToTexture2D (_rgbaMat, _texture, _colors);

            // Are we debugging?
            _debugPreviewOn = _previewScript.isToggled;
            if (_debugPreviewOn) {
                _webCamSprite = Sprite.Create(
                    _texture, 
                    new UnityEngine.Rect(
                    0,
                    0,
                    _texture.width,
                    _texture.height), 
                    new Vector2(0,0));
                _previewImage.sprite = _webCamSprite;
            }

            //Debug.Log("Face:" + _faceFound.ToString() + " Mouth:" + _mouthFound.ToString());



            // Which version are we using??  The fact that we have a mouth with satisfy the "find/lose mouth" method
            if (logicSolution==1) {
                // We're setting the oposite of mouth found (ie, if we've found the mouth it's closed
                mouthIsOpenBoolean = !_mouthFound;
            }

            // Only continue for solution 0 and we have a mouth.
            if ((logicSolution==0) && (_mouthFound==true)) {
                Debug.Log("We shouldn't see this");

                // How high and wide are the mouth
                _mouthWidth = (int)_rectMouth.width;
                _mouthHeight = (int)_rectMouth.height;

                // Create the mouth texture
                Color[] pixels = _texture.GetPixels(
                    (int)_rectMouth.x,
                    (int)_rectMouth.y,
                    (int)_rectMouth.width,
                    (int)_rectMouth.height);
                _textMouth.Resize(_mouthWidth, _mouthHeight);
                _textMouth.Apply();
                _textMouth.SetPixels(pixels);
                _textMouth.Apply();

                // Work out detection pixels
                int mouthCentre = _mouthWidth / 2;
                _totalBright = 0;
                int mouthGap = _mouthHeight / mouthDetectPixelCount;
                UnityEngine.Rect rectShapePreview = 
                    new UnityEngine.Rect((float)mouthCentre - 1, (float)mouthGap,3, (float)mouthGap*(float)mouthDetectPixelCount);

                // Work out the gap between the pixels
                for (int i = 1; i <= mouthDetectPixelCount; i++)
                {
                    // Work out the total brightness
                    int pixelY = mouthGap * i;
                    _totalBright += Brightness(_textMouth.GetPixel(mouthCentre, pixelY));
                }

                // Have we opened or closed.
                //
                // Rules will be (for boolean):
                //  1. The initial change will be 2 tenths in eather direction (double mouthChangeDiff) to change open or closed.  
                //      Smaller = Open, Bigger = Closed.
                //  2. Record the current number into _lastBright (double) and start closed (bool - false) in mouthIsOpenBoolean
                //  3. If the number gets bigger keep updating the _lastBright variable to make note of the "close" brightness.
                //  4. If the number gets smaller measure it, if it's more then 2/10s (or what ever the variable is we're now open:
                //      change the bool to true and record the number.
                //      If it's less just leave it, this will help us if we capture the mouth when it's mid open or mid close.
                //  5. If we've moved to closed do stops to to 4 but in the oposite direction
                //
                // Rules will be (for integer)
                //  1. Use boolean logic but the change diff maximimum is 100 (open - oposite to boolean :S) and minimum is 0 (closed);
                //  2. Use the increment to tie the setting to the nearest increment.

                // Record the current number into _lastBright if this is the first capture
                if (_lastBright == 0) {
                    _lastBright = _totalBright;
                } 
                else {
                    //Get the difference
                    double diff = new double();
                    diff = _lastBright - _totalBright;

                    // We need a positive diff too
                    double posDiff = diff;
                    if (posDiff < 0) {posDiff = -posDiff;}

                    // Have we gone more open or more closed?
                    if (((_lastBright > _totalBright) && (mouthIsOpenBoolean)) ||
                        ((_lastBright < _totalBright) && (!mouthIsOpenBoolean))) {

                        // We're going further in the same direction, log the last brighness
                        _lastBright = _totalBright;
                    }
                    else {
                        // We've changed direction.  If it is more than the diff level set???
                        if (posDiff > mouthChangeDiff) {
                            // We've changed direction!
                            if (mouthIsOpenBoolean) {
                                mouthIsOpenBoolean = false;
                            }
                            else {
                                mouthIsOpenBoolean = true;
                            }

                            // Set the new last brightness
                            _lastBright = _totalBright;
                        }   
                    }
                }
            }
        }

        yield return new WaitForSeconds (0.1f);     // Massive performance boost.
        _capturingFace = false;
        yield return null;
    }

    void OnDisable ()
    {
        // Stop the webcam.
        _webCamTexture.Stop ();
    }

    void OnGUI ()
    {
        float screenScale = Screen.width / 240.0f;
        Matrix4x4 scaledMatrix = Matrix4x4.Scale (new Vector3 (screenScale, screenScale, screenScale));
        GUI.matrix = scaledMatrix;

        // How big is our GUI Box?
        //      UnityEngine.Rect devRect = new UnityEngine.Rect (1, 1, (_texture.width/10)+2, (_texture.height/10)+90);

    }

    // Return the brighness of the colour of a pixel
    private double Brightness(Color c) 
    {
        return Mathf.Sqrt (
            c.r * c.r * .241f + 
            c.g * c.g * .691f + 
            c.b * c.b * .068f);
    }

    public void AddToDebug(string newLine) {

        // Do we need to add a carraige return
        if (_cameraInf != "") 
            _cameraInf += "\n";

        // Add the line and update us
        _cameraInf += newLine;
        if (_cameraListScript!=null)
            _cameraListScript.cameraList = _cameraInf;

    }

}

特别是在 update() Ienumerator captureface() 中,根据分析器 运行 需要 150 毫秒以上。

因为您使用了 CPU 重函数,特别是

_textMouth.SetPixels(pixels);
_textMouth.Apply();

而且这不是平台相关的问题。