Newbie trying and failing at using resource drawable in image detection.

693 views
Skip to first unread message

Richard

unread,
Apr 16, 2012, 1:41:46 PM4/16/12
to android...@googlegroups.com
Ill start with my terrible newbie code for reference.

package com.objectdetectionpoc;


import java.io.IOException;
import java.util.ArrayList;
import java.util.List;


import org.opencv.android.Utils;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.CvType;
import org.opencv.highgui.Highgui;
import org.opencv.imgproc.Imgproc;
import org.opencv.features2d.FeatureDetector;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.Features2d;
import org.opencv.features2d.KeyPoint;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.DMatch;


import android.content.Context;
import android.graphics.Bitmap;
import android.view.SurfaceHolder;
import android.util.Log;


class Sample1View extends SampleViewBase {
   
private Mat mYuv;
   
private Mat mRgba;
   
private Mat mGraySubmat;
   
private Mat mIntermediateMat;
   
private Mat mIntermediateMat2;
   
   
private Mat img1;
   
private Mat descriptors;
   
private List<KeyPoint> keypoints;
   
private FeatureDetector detector;
   
private DescriptorExtractor descriptor;
   
private DescriptorMatcher matcher;
   
   
private static final double THETA = Math.PI/180;
   
private static final Scalar GREEN = new Scalar(0, 255, 0);
   
private static final Scalar RED = new Scalar(255, 0, 0);
   
private static final Scalar BLUE = new Scalar(0, 0, 255);
   
private static final List<Byte> MATCH_MASK = new ArrayList<Byte>();


   
public Sample1View(Context context) {
       
super(context);
       


 
try {
 img1
=Utils.loadResource(getContext(), R.drawable.wings);
 
} catch (IOException e) {
 
// TODO Auto-generated catch block
 
Log.w("Activity::LoadResource","Unable to load resource R.drawable.wings");
 e
.printStackTrace();
 
}
        descriptors
= new Mat();
        keypoints
= new ArrayList<KeyPoint>();
        detector
= FeatureDetector.create(FeatureDetector.FAST);
        detector
.detect(img1, keypoints);
        descriptor
= DescriptorExtractor.create(DescriptorExtractor.ORB);
        descriptor
.compute(img1, keypoints, descriptors);
        matcher
= DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
   
}


   
@Override
   
public void surfaceChanged(SurfaceHolder _holder, int format, int width, int height) {
       
super.surfaceChanged(_holder, format, width, height);


       
synchronized (this) {
           
// initialize Mats before usage
            mYuv
= new Mat(getFrameHeight() + getFrameHeight() / 2, getFrameWidth(), CvType.CV_8UC1);
            mGraySubmat
= mYuv.submat(0, getFrameHeight(), 0, getFrameWidth());


            mRgba
= new Mat();
            mIntermediateMat
= new Mat();
            mIntermediateMat2
= new Mat();
       
}
   
}


   
@Override
   
protected Bitmap processFrame(byte[] data) {
        mYuv
.put(0, 0, data);


       
switch (AndroidObjectDetectionPOCActivity.viewMode) {
       
case AndroidObjectDetectionPOCActivity.VIEW_MODE_GRAY:
           
Imgproc.cvtColor(mGraySubmat, mRgba, Imgproc.COLOR_GRAY2RGBA, 4);
           
break;
       
case AndroidObjectDetectionPOCActivity.VIEW_MODE_RGBA:
           
Imgproc.cvtColor(mYuv, mRgba, Imgproc.COLOR_YUV420sp2RGB, 4);
           
Core.putText(mRgba, "OpenCV + Android", new Point(10, 100), 3/* CV_FONT_HERSHEY_COMPLEX */, 2, new Scalar(255, 0, 0, 255), 3);
           
break;
       
case AndroidObjectDetectionPOCActivity.VIEW_MODE_CANNY:
           
Imgproc.Canny(mGraySubmat, mIntermediateMat, 80, 100);
           
Imgproc.cvtColor(mIntermediateMat, mRgba, Imgproc.COLOR_GRAY2BGRA, 4);
           
break;
       
case AndroidObjectDetectionPOCActivity.VIEW_MODE_CIRCLE:
       
Imgproc.Canny(mGraySubmat, mIntermediateMat, 80, 100);
           
Imgproc.HoughCircles(mIntermediateMat, mIntermediateMat2, Imgproc.CV_HOUGH_GRADIENT, 16, 300);
           
Imgproc.cvtColor(mIntermediateMat, mRgba, Imgproc.COLOR_GRAY2BGRA, 4);
       
Log.i("CircleDraw::mIntermediateMat", mIntermediateMat2.cols()+" "+mIntermediateMat2.rows());
           
for(int i=0; i<mIntermediateMat2.cols(); i++){
           
double vCircle[]=mIntermediateMat2.get(0,i);
           
Log.i("CircleDraw::vCircle0", vCircle[0]+"");
           
Log.i("CircleDraw::vCircle1", vCircle[1]+"");
           
Log.i("CircleDraw::vCircle2", vCircle[2]+"");
           
int radius = (int)vCircle[2];
           
Core.circle(mRgba,new Point(vCircle[0],vCircle[1]), radius, GREEN);
           
}
           
break;
       
case AndroidObjectDetectionPOCActivity.VIEW_MODE_LINE:
       
Imgproc.Canny(mGraySubmat, mIntermediateMat, 80, 100, 3);
       
Imgproc.HoughLinesP(mIntermediateMat, mIntermediateMat2, 1, THETA, 100, 500, 10);
             
Imgproc.cvtColor(mIntermediateMat, mRgba, Imgproc.COLOR_GRAY2BGRA, 4);
         
Log.i("LineDraw::mIntermediateMat", mIntermediateMat2.cols()+" "+mIntermediateMat2.rows());
             
for(int i=0; i<mIntermediateMat2.cols(); i++){
             
double vec[]=mIntermediateMat2.get(0,i);
             
Log.i("LineDraw::vec0", vec[0]+"");
             
Log.i("LineDraw::vec1", vec[1]+"");
             
Log.i("LineDraw::vec2", vec[2]+"");
             
Log.i("LineDraw::vec3", vec[3]+"");
             
Point start = new Point(vec[0], vec[1]);
             
Point end = new Point(vec[2], vec[3]);
             
Core.line(mRgba, start, end, RED, 3);
             
}
             
break;
       
case AndroidObjectDetectionPOCActivity.VIEW_MODE_DETECT:
         
// List<KeyPoint> mKeyPoints = new ArrayList<KeyPoint>();
         
// List<DMatch> matches = new ArrayList<DMatch>();
         
// detector.detect(mGraySubmat, mKeyPoints);
         
// descriptor.compute(mGraySubmat, mKeyPoints, mIntermediateMat);
         
// matcher.match(mIntermediateMat, descriptors, matches);
         
// Imgproc.cvtColor(mGraySubmat, mRgba, Imgproc.COLOR_GRAY2BGRA, 4);
         
// Features2d.drawMatches(img1, keypoints, mGraySubmat, mKeyPoints, matches, mRgba, GREEN, RED, MATCH_MASK, 0);
         
Bitmap bm1 = Bitmap.createBitmap(img1.cols(),img1.rows(),Bitmap.Config.RGB_565);
         
Utils.matToBitmap(img1, bm1);
         
Bitmap bm2 = bm1.copy(Bitmap.Config.ARGB_8888, false);
         mRgba
= Utils.bitmapToMat(bm2);
         
//Imgproc.resize(img1, mRgba, mRgba.size());
       
break;
       
}


       
Bitmap bmp = Bitmap.createBitmap(getFrameWidth(), getFrameHeight(), Bitmap.Config.ARGB_8888);


       
if (Utils.matToBitmap(mRgba, bmp))
           
return bmp;


        bmp
.recycle();
       
return null;
   
}


   
@Override
   
public void run() {
       
super.run();


       
synchronized (this) {
           
// Explicitly deallocate Mats
           
if (mYuv != null)
                mYuv
.release();
           
if (mRgba != null)
                mRgba
.release();
           
if (mGraySubmat != null)
                mGraySubmat
.release();
           
if (mIntermediateMat != null)
                mIntermediateMat
.release();


            mYuv
= null;
            mRgba
= null;
            mGraySubmat
= null;
            mIntermediateMat
= null;
       
}
   
}
}


So im playing with the tutorial files to try and get image detection working (eventually) and where Im running into problems is that the png file (wings.png) I have in my drawable folders gets scrambled when I put it through this code.  Specifically...

Imgproc.resize(img1, mRgba, mRgba.size());

Produces attached img1.jpg

         Bitmap bm1 = Bitmap.createBitmap(img1.cols(),img1.rows(),Bitmap.Config.RGB_565);
         
Utils.matToBitmap(img1, bm1);
         
Bitmap bm2 = bm1.copy(Bitmap.Config.ARGB_8888, false);
         mRgba
= Utils.bitmapToMat(bm2);

Produces the attached img2.jpg

Both are followed immediately by the app terminating with no output of an error to log.  I'v tried everything with no luck, either making errors or 

I think it has to be something with the image color format that im including in the drawable folder but I cant find any conversion that doesn't throw an error.

Any wiser older android opencv developers have a solution to this or can point out my noob mistake that Iv probably made somewhere?

P.S.  The commented out detector code does almost the same thing as img1.jpg.

img1.jpg
img2.jpg

Richard

unread,
Apr 16, 2012, 3:30:52 PM4/16/12
to android...@googlegroups.com
Ok, I think it has something to do with the alpha channel of the image.  I save the image with the alpha channel enabled and I got something closer to what I expected but the colors are all messed up.  so PNG must have alpha channel is the moral of the story so far.  Lets see if I cant fix this strange color issue. :)

Richard

unread,
Apr 16, 2012, 4:18:38 PM4/16/12
to android...@googlegroups.com
Fixed the color by Imgproc.cvtColor(img1, mIntermediateMat, Imgproc.COLOR_RGBA2BGRA);  So RGBA -> BGRA.  However the alpha channel is still messed up and displays strange pixels all around the edge.  They also seem to be animating which confuses me.

Richard

unread,
Apr 18, 2012, 9:57:49 AM4/18/12
to android...@googlegroups.com
Success! So how I fixed it.

Removed transparency from resource drawable. (More of a work around than a fix);

I needed to convert the resource drawable out of RGBA.
Imgproc.cvtColor(img1, img1, Imgproc.COLOR_RGBA2GRAY);

Then I had to make a canvas that could fit the result.
private Size resultSize = new Size();

//inside constructor function added
resultSize.width = img1.cols() +getFrameWidth()+100;
resultSize.height = img1.rows() + getFrameHeight()+100;

Uncommented my detector code with a few modifications and added a resize so I don't get that weird no error crash! and a cvtColor to move result into BGRA color format.
               case AndroidObjectDetectionPOCActivity.VIEW_MODE_DETECT:
         List<KeyPoint> mKeyPoints = new ArrayList<KeyPoint>();
         List<DMatch> matches = new ArrayList<DMatch>();
         detector.detect(mGraySubmat, mKeyPoints);
         descriptor.compute(mGraySubmat, mKeyPoints, mIntermediateMat);
         matcher.match(mIntermediateMat, descriptors, matches);
         mIntermediateMat2.create(resultSize, CvType.CV_8UC1);
         Features2d.drawMatches(img1, keypoints, mGraySubmat, mKeyPoints, matches, mIntermediateMat2, GREEN, RED, MATCH_MASK, Features2d.NOT_DRAW_SINGLE_POINTS);
         Imgproc.resize(mIntermediateMat2, mIntermediateMat2, mRgba.size());
         Imgproc.cvtColor(mIntermediateMat2, mRgba, Imgproc.COLOR_RGBA2BGRA, 4);
        break;

And that's a Bingo! I have the beginnings of image detection with a poor 1fps frame rate on a galaxy tab!

Rui Marques

unread,
Apr 18, 2012, 11:57:24 AM4/18/12
to android...@googlegroups.com
Have the following in mind:

- openCV functions treats matrices as if they were BGR that's why you should convert the frame that comes from the camera from RGB to BGR. And after all processing BGR -> RGB.

- When you use transparency (RGBA or BGRA) you have to treat colours with 4 parameters:

- Scalar(r, g, b, 255) for zero transparency

- Scalar(r, g, b, 0) for full transparency

- if you use Scalar(r, g, b) with RGBA matrices i think the result is full transparency.

(this is from my experience, everybody feel free to correct)
Reply all
Reply to author
Forward
0 new messages