//分割代码
#include "stdafx.h" //stdafx.h要自己建一个这样的库 // meanshift_segmentation.cpp : 定义控制台应用程序的入口点。 // #include"opencv2/core/core.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "iostream" #pragma comment(lib,"opencv_highgui2413d.lib") #pragma comment(lib,"opencv_core2413d.lib") #pragma comment(lib,"opencv_imgproc2413d.lib") using namespace cv; using namespace std; Mat src, dst; int spatialRad, colorRad, maxPryLevel; int main(int argc, uchar* argv[]) { int maxPyrLevel = 3; //金字塔层数 Mat res; double duration = static_cast<double>(getTickCount()); Mat img = imread("E:/Codes/CprimerPlus/chapter13/分割/31.jpg"); //读图路径 //路径要加引号,一般程序出错最有可能是路径问题,有绝对路径和相对路径之分,绝对路径:/ //相对路径:可直接引用程序下的图片 int spatialRad = 4; //值越大时间会越长 int colorRad = 30; //值越大图像会分割的区域数越小 pyrMeanShiftFiltering(img, res, spatialRad, colorRad, maxPyrLevel); imshow("res", res); RNG rng = theRNG(); Mat mask(res.rows + 2, res.cols + 2, CV_8UC1, Scalar::all(0)); //掩模 for (int y = 0; y < res.rows; y++) { for (int x = 0; x < res.cols; x++) { if (mask.at<uchar>(y + 1, x + 1) == 0) //非0处即为1,表示已经经过填充,不再处理 { //Scalar newVal(rng(256), rng(256), rng(256)); //floodFill(res, mask, Point(x, y), newVal, 0, Scalar::all(5), Scalar::all(5)); //执行漫水填充 } } } imshow("meanShift图像分割", res); imwrite("result.jpg", res); duration = ((double)getTickCount() - duration) / getTickFrequency(); cout << "运行时间" << duration << "秒" << endl; waitKey(); return 0; } Surf部分没什么好写的,基本是OPENCV3书上原装程序,要说所有程序完全是我自己写的只有得到分割后将它扣出来,其他都是主程序,自己修修小数据 #include "stdafx.h" #include "opencv2/core/core.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/opencv.hpp" #include "iostream" using namespace cv; using namespace std; int main(int argc, char** argv) { Mat binary; Mat result = imread("2.jpg", 0);//这里0表示读取图像并转为灰度图即[0-255] Mat tongji; threshold(result, binary, 50, 255, CV_THRESH_BINARY);//阀值化,固定阀值吧,不是自适应的。 //里面的参数分别代表;输入图像;输出图像;最低阀值,往上为1,下为0;这个图像里像素值的最大///值;二值化的表示,也有其他表示方法 Mat element = getStructuringElement(MORPH_RECT, Size(50, 50));//膨胀操作的一个自定义核 Mat out; double maxArea = 0; vector<cv::Point> maxContour; dilate(binary, out, element);//膨胀。一般写前面三个参数,后面有默认的 vector<vector<cv::Point>> contours;//vector是结构体或者容器 findContours(out, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);//寻找轮廓 //里面参数:输入;输出;检测轮廓类别,这个是检测最外围轮廓(还有所有轮廓并放list中,提取所有轮廓//并组织为双层结构,最后一个是提取轮廓建立网状结构);获取轮廓的每个像素。 for (int i = 0; i < contours.size(); i++) { double area = contourArea(contours[i]);//轮廓面积 if (area > maxArea) { maxArea = area; maxContour = contours[i]; } } Rect maxRect = boundingRect(maxContour);//求最大轮廓面积,并返回轮廓的最大矩形 Mat original = imread("1.jpg");//原图 Mat gray = result; Mat hole(gray.size(), CV_8U, Scalar(0));//建立模版方便抠图所用的转换,与原图大小一样 rectangle(hole, Rect(maxRect.x, maxRect.y, maxRect.width, maxRect.height), Scalar(255, 255, 255), -1, 1, 0);//刚刚矩形的位置,在模版相同的矩形位置变白色 double a = maxRect.x; double b = maxRect.y; double c = maxRect.width; double d = maxRect.height; printf("左上点x:%f\n", a); printf("左上点y:%f\n", b); printf("宽:%f\n", c); printf("高:%f\n", d);//显示矩形的位置数据 namedWindow("My hole"); imshow("My hole", hole); Mat crop(original.rows, original.cols, CV_8UC3); original.copyTo(crop, hole);//将原图像拷贝进遮罩图层 namedWindow("My warpPerspective"); imshow("My warpPerspective", crop); imwrite("result.jpg", crop); imshow("image", result); waitKey(); return 0; }