HTML5   发布时间:2022-04-27  发布网站:大佬教程  code.js-code.com
大佬教程收集整理的这篇文章主要介绍了iOS:从背景图像中检索矩形图像大佬教程大佬觉得挺不错的,现在分享给大家,也给大家做个参考。
我正在开发一个实现,我在大背景图像中有一个矩形图像.我试图以编程方式从大图像中检索矩形图像,并从该特定矩形图像中检索文本信息.我正在尝试使用Open-CV第三方框架,但无法从大背景图像中检索矩形图像.有人可以指导我,我怎么能做到这一点?

更新:

我发现Link使用OpenCV找出方形.我可以修改它以找到矩形形状吗?有人可以指导我吗?

更新的最新消息:

我终于得到了代码,下面是它.

- (cv::Mat)cvMatWithImage:(UIImage *)image
{
    CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImagE);
    CGFloat cols = image.size.width;
    CGFloat rows = image.size.height;

    cv::Mat cvMat(rows,cols,CV_8UC4); // 8 bits per component,4 chAnnels

    CGContextRef contextRef = CGBitmapContextCreate(cvMat.data,// Pointer to backing data
                                                    cols,// Width of bitmap
                                                    rows,// Height of bitmap
                                                    8,// Bits per component
                                                    cvMat.step[0],// Bytes per row
                                                    colorSpace,// Colorspace
                                                    kCGImageAlphaNoneskipLast |
                                                    kCGBitmapByteOrderDefault); // Bitmap info flags

    CGContextDrawImage(contextRef,CGRectMake(0,rows),image.CGImagE);
    CGContextRelease(contextRef);

    return cvMat;
}
-(UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
    NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.@R_913_10586@l()];
    CGColorSpaceRef colorSpace;
    if ( cvMat.elemSize() == 1 ) {
        colorSpace = CGColorSpaceCreateDeviceGray();
    }
    else {
        colorSpace = CGColorSpaceCreateDeviceRGB();
    }

    //CFDataRef data;
    CGDataProviderRef provider = CGDataProviderCreateWithCFData( (CFDataRef) data ); // It SHOULD BE (__bridge CFDataRef)data
    CGImageRef imageRef = CGImageCreate( cvMat.cols,cvMat.rows,8,8 * cvMat.elemSize(),cvMat.step[0],colorSpace,kCGImageAlphaNone|kCGBitmapByteOrderDefault,provider,NULL,false,kCGRenderingIntentDefault );
    UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
    CGImageRelease( imageRef );
    CGDataProviderRelease( provider );
    CGColorSpaceRelease( colorSpace );
    return finalImage;
}
-(void)forOpenCV
{
    imageView = [UIImage imagenamed:@"myimage.jpg"];
    if( imageView != nil )
    {
        cv::Mat tempMat = [imageView CVMat];

        cv::Mat greymat = [self cvMatWithImage:imageView];
        cv::vector<cv::vector<cv::Point> > squares;

        cv::Mat img= [self debugSquares: squares: greymat];

        imageView = [self UIImageFromCVMat: img];

        self.imageView.image = imageView;
    }
}

double angle( cv::Point pt1,cv::Point pt2,cv::Point pt0 ) {
    double dx1 = pt1.x - pt0.x;
    double dy1 = pt1.y - pt0.y;
    double dx2 = pt2.x - pt0.x;
    double dy2 = pt2.y - pt0.y;
    return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}

- (cv::Mat) debugSquares: (std::vector<std::vector<cv::Point> >) squares : (cv::Mat &)image
{
    NSLog(@"%lu",squares.size());

    // blur will enhance edge detection

    //cv::Mat blurred(imagE);
    cv::Mat blurred = image.clone();
    medianBlur(image,blurred,9);

    cv::Mat gray0(image.size(),CV_8U),gray;
    cv::vector<cv::vector<cv::Point> > contours;

    // find squares in every color plane of the image
    for (int c = 0; c < 3; c++)
    {
        int ch[] = {C,0};
        mixChAnnels(&image,1,&gray0,ch,1);

        // try several threshold levels
        const int threshold_level = 2;
        for (int l = 0; l < threshold_level; L++)
        {
            // Use CAnny instead of zero threshold level!
            // CAnny Helps to catch squares with gradient shading
            if (l == 0)
            {
                CAnny(gray0,gray,10,20,3); //

                // Dilate Helps to remove potential holes between edge segments
                dilate(gray,cv::Mat(),cv::Point(-1,-1));
            }
            else
            {
                gray = gray0 >= (l+1) * 255 / threshold_level;
            }

            // Find contours and store them in a list
            findContours(gray,contours,CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE);

            // Test contours
            cv::vector<cv::Point> approx;
            for (size_t i = 0; i < contours.size(); i++)
            {
                // approximate contour with accuracy proportional
                // to the contour perimeter
                approxPolyDP(cv::Mat(contours[i]),approx,arcLength(cv::Mat(contours[i]),truE)*0.02,truE);

                // Note: absolute value of an area is used because
                // area may be positive or negative - in accordance with the
                // contour orientation
                if (approx.size() == 4 &&
                    fabs(contourArea(cv::Mat(approX))) > 1000 &&
                    isContourConvex(cv::Mat(approX)))
                {
                    double maxCosine = 0;

                    for (int j = 2; j < 5; j++)
                    {
                        double cosine = fabs(angle(approx[j%4],approx[j-2],approx[j-1]));
                        maxCosine = MAX(maxCosine,cosinE);
                    }

                    if (maxCosine < 0.3)
                        squares.push_BACk(approX);
                }
            }
        }
    }

    NSLog(@"squares.size(): %lu",squares.size());


    for( size_t i = 0; i < squares.size(); i++ )
    {
        cv::rect rectangle = boundingRect(cv::Mat(squares[i]));
        NSLog(@"rectangle.x: %d",rectangle.X);
        NSLog(@"rectangle.y: %d",rectangle.y);

        if(i==squares.size()-1)////DetecTing Rectangle here
        {
            const cv::Point* p = &squares[i][0];

            int n = (int)squares[i].size();

            NSLog(@"%d",n);

            line(image,cv::Point(507,418),cv::Point(507+1776,418+1372),cv::Scalar(255,0),2,8);

            polylines(image,&p,&n,true,255,5,CV_AA);

            int fx1=rectangle.x;
                NSLog(@"X: %d",fx1);
            int fy1=rectangle.y;
                NSLog(@"Y: %d",fy1);
            int fx2=rectangle.x+rectangle.width;
                NSLog(@"Width: %d",fx2);
            int fy2=rectangle.y+rectangle.height;
                NSLog(@"Height: %d",fy2);

            line(image,cv::Point(fx1,fy1),cv::Point(fx2,fy2),cv::Scalar(0,255),8);

        }

    }

    return image;
}

谢谢.

解决方法

下面是一个完整的答案,使用一个小的包装类将c与objective-c代码分开.

我不得不使用raise another question on stackoverflow来处理我糟糕的c知识 – 但是我已经找到了我们需要的所有东西,以使用objective.cpp示例代码作为示例干净地与Objective-c代码进行交互.目的是保持原始c代码尽可能保持原始状态,并将openCV的大部分工作保留在纯c文件中以实现(im)可移植性.

我已将原来的答案留在原处,因为这似乎超出了编辑范围. The complete demo project is on github

CVViewController.h / CVViewController.m

>纯Objective-C
>通过WRAPPER与openCV c代码通信……它既不知道也不关心c正在处理包装器后面的这些方法调用.

CVWrapper.h / CVWrapper.mm

>目标-C

做得尽可能少,真的只有两件事……

>调用UIImage objC类别以转换为UIImage和来自UIImage<> CV ::垫
>介于CVViewController的Obj-C方法和CVSquares c(类)函数调用之间

CVSquares.h / CVSquares.cpp

>纯C
> CVSquares.cpp在类定义中声明公共函数(在本例中为一个静态函数).
这取代了原始文件中main {}的工作.
>我们尽量保持CVSquares.cpp尽可能接近C原件以便于移植.

CVViewController.m

//remove 'magic numbers' from original C++ source so we can manipulate them from Obj-C
#define TOLERANCE 0.01
#define THRESHOLD 50
#define LEVELS 9

UIImage* image =
        [CVSquaresWrapper detectedSquaresInImage:self.image
                                       tolerance:TOLERANCE
                                       threshold:THRESHOLD
                                          levels:LEVELS];

CVSquaresWrapper.h

//  CVSquaresWrapper.h

#import <Foundation/Foundation.h>

@interface CVSquaresWrapper : NSObject

+ (UIImage*) detectedSquaresInImage:(UIImage*)image
                          tolerance:(CGFloat)tolerance
                          threshold:(NSInteger)threshold
                             levels:(NSInteger)levels;

@end

CVSquaresWrapper.mm

//  CVSquaresWrapper.mm
//  wrapper that talks to c++ and to Obj-C classes

#import "CVSquaresWrapper.h"
#import "CVSquares.h"
#import "UIImage+OpenCV.h"

@implementation CVSquaresWrapper

+ (UIImage*) detectedSquaresInImage:(UIImage*) image
                          tolerance:(CGFloat)tolerance
                          threshold:(NSInteger)threshold
                             levels:(NSInteger)levels
{
    UIImage* result = nil;

        //convert from UIImage to cv::Mat openCV image format
        //this is a category on UIImage
    cv::Mat matImage = [image CVMat]; 


        //call the c++ class static member function
        //we want this function signature to exactly 
        //mirror the form of the calling method 
    matImage = CVSquares::detectedSquaresInImage (matImage,tolerance,threshold,levels);


        //convert BACk from cv::Mat openCV image format
        //to UIImage image format (category on UIImagE)
    result = [UIImage imageFromCVMat:matImage]; 

    return result;
}

@end

CVSquares.h

//  CVSquares.h

#ifndef __OpenCVClient__CVSquares__
#define __OpenCVClient__CVSquares__

    //class deFinition
    //in this example we do not need a class 
    //as we have no instance variables and just one static function. 
    //We Could instead just declare the function but this form seems clearer

class CVSquares
{
public:
    static cv::Mat detectedSquaresInImage (cv::Mat image,float tol,int threshold,int levels);
};

#endif /* defined(__OpenCVClient__CVSquares__) */

CVSquares.cpp

//  CVSquares.cpp

#include "CVSquares.h"

using namespace std;
using namespace cv;

static int thresh = 50,N = 11;
static float tolerance = 0.01;

    //declarations added so that we can move our 
    //public function to the top of the file
static void findSquares(  const Mat& image,vector<vector<Point> >& squares );
static void drawSquares( Mat& image,vector<vector<Point> >& squares );

    //this public function performs the role of 
    //main{} in the original file (main{} is deleted)
cv::Mat CVSquares::detectedSquaresInImage (cv::Mat image,int levels)
{
    vector<vector<Point> > squares;

    if( image.empty() )
        {
        cout << "Couldn't load " << endl;
        }

    tolerance = tol;
    thresh = threshold;
    N = levels;
    findSquares(image,squares);
    drawSquares(image,squares);

    return image;
}


// the rest of this file is identical to the original squares.cpp except:
// main{} is removed
// this line is removed from drawSquares: 
// imshow(wndname,imagE); 
// (Obj-C will do the drawing)

UIImage OpenCV.h

UIImage类一个objC文件,包含在UIImage和cv :: Mat图像格式之间转换的代码.这是你移动你的两个方法的地方 – (UIImage *)UIImageFromCVMat:(cv :: Mat)cvMat和 – (cv :: Mat)cvMatWithImage:(UIImage *)图像
  

//UIImage+OpenCV.h

#import <UIKit/UIKit.h>

@interface UIImage (UIImage_OpenCV)

    //cv::Mat to UIImage
+ (UIImage *)imageFromCVMat:(cv::Mat&)cvMat;

    //UIImage to cv::Mat
- (cv::Mat)CVMat;


@end

这里的方法实现与你的代码没有变化(我们没有传递UIImage进行转换,而是我们引用self)

大佬总结

以上是大佬教程为你收集整理的iOS:从背景图像中检索矩形图像全部内容,希望文章能够帮你解决iOS:从背景图像中检索矩形图像所遇到的程序开发问题。

如果觉得大佬教程网站内容还不错,欢迎将大佬教程推荐给程序员好友。

本图文内容来源于网友网络收集整理提供,作为学习参考使用,版权属于原作者。
如您有任何意见或建议可联系处理。小编QQ:384754419,请注明来意。