diff --git a/Negcut b/Negcut new file mode 100644 index 0000000..ebf6ea5 --- /dev/null +++ b/Negcut @@ -0,0 +1,52 @@ +import os +import cv2 +import time + +NegPath='C:\\detectProject\\Neg\\Negdata' + +for root, dirs, files in os.walk(NegPath): + for negData in files: + print negData + myfile = open(NegPath+"\\"+negData) + allWords = [] + line = myfile.readline() + i=0 + c=0 + while line: + if(c>1480):break + rec = line.split(',') + c = c + 1 + if(c%3==0): + for i in range(0,len(rec)-1): + data=rec[i].split(' ') + xmin=data[0] + ymin=int(data[1])+140 + xmax=data[2] + ymax=int(data[3])+140 + model=data[4].split('_') + if(model[0]=="small" ):size='S' + if(model[0]=="middle"):size='M' + if(model[0]=="large" ):size='L' + if(model[1]=="front" ):Model='front' + if(model[1]=="side"):Model='side' + if(model[1]=="ride" ):Model='ride' + print xmin,xmax,ymin,ymax + path="C:/detectProject/Neg/frame/"+str(c-1)+".jpg" + img = cv2.imread(path) + # cv2.waitKey(0) + if( not img is None): + img2 = img[int(ymin):int(ymax), int(xmin):int(xmax)] + if(size=='S'): + img2=cv2.resize(img2,(50,100)) + if(size=='M'): + img2=cv2.resize(img2,(100,200)) + if(size=='L'): + img2=cv2.resize(img2,(200,400)) + if(not cv2.imwrite("C:/detectProject/Neg/Negcut/" + Model+"/"+ size+str(c-1)+'FILE'+ str(i)+time.strftime(('-%m-%d-%H-%M'),time.localtime(time.time())) +'.jpg', img2) is None): + print "C:/detectProject/Neg/Negcut/" + Model+"/"+ size+str(c-1)+'FILE'+ str(i)+time.strftime(('-%m-%d-%H-%M'),time.localtime(time.time())) +'.jpg' + # if word[-1] == ',': + # #allWords.append(word[:-1]) + # else: + # allWords.append(word) + line = myfile.readline() + myfile.close() diff --git a/ShowHOG.cpp b/ShowHOG.cpp new file mode 100644 index 0000000..b83b853 --- /dev/null +++ b/ShowHOG.cpp @@ -0,0 +1,520 @@ +/******************************************************************************************************** +HOG特征描述符的实现 +算法思路: +1)将图片加载入内存,并且利用cvtColor将图像转换为grayImg +2)利用一阶微分算子Sobel函数,分别计算出grayImg图像X方向和Y方向上的一阶微分/梯度图像 +3)根据得到的两幅梯度图像(X方向上的梯度图像和Y方向上的梯度图像),然后利用cartToPolar函数,计算出这 +两幅梯度图像所对应的角度矩阵图像angleMat和梯度幅值矩阵图像magnMat +4)将角度矩阵图像angleMat里面的像素强度值归一化为强度范围在[0,9)这9个范围,每一个范围就代表HOG中 +的一个bins +5)以角度为为索引,将梯度幅值图像矩阵magnMat按照九个方向的梯度角度拆分为9幅梯度幅值图像矩阵 +6)根据这9个角度,每个角度所对应的梯度幅值图像矩阵,并且利用OpenCv中的积分函数integral分别计算出这9 +幅图像所对应的积分图像 +==============至此,我们9个梯度方向上,分别对应的的9幅梯度幅值积分图已经计算完毕================== +7)计算整幅图像的梯度方向直方图HOG:要计算整幅图像的,需要先计算每个Block的HOG;要计算每个Block的HOG +要先计算每个Cell的HOG +8)计算单个Cell的HOG:由于9个梯度方向上的9张梯度幅值积分图像已经计算出来,所以这一步的计算很简单,只需 +要记性加减计算,具体的函数为cacHOGinCell +9)计算单个Block的HOG:将计算出来的4个Cell的HOG组成一个Block的HOG +10)计算整幅图像的HOG:将计算出来的所有的Block的HOG梯度方向直方图的特征向量首尾相接组成一个维度很大的 +整幅图像的梯度方向直方图的HOG特征向量,这个特征向量就是整幅图像的梯度方向直方图特征,这个特征 +向量也可以被用于SVM的分类 +算法难点: +1)积分图像的概念:网上有关积分图像的Blog一大推,但是很多讲的都不准确,最好的办法是看OpenCv的官方文档 +关乎积分函数的讲解,可以结合网上的资料看 +2)笛卡尔空间坐标和极坐标的转换(关键是理解一些它们之间相互转换的前提条件) +3)L1范数和L2范数:在使用归一化normalize函数时,考虑一些CV_L2到底是向量的L2范数还是矩阵的L2范数,自己 +可以推到一下公式 +4)关于HOG的论文,没有使用到积分图的概念,其实在HOG中使用积分图像加速了HOG的计算速度,如果使用先计算 +梯度,在计算各个区域的梯度方向和梯度幅值的话,这样计算了太大,会导致HOG的性能有所下降 +5)还有,这里的每个Cell的大小是20p*20p,每个Block的大小为4个Cell;当然如果用于行人检测的话,也可以使用 +其他的3*3或者5*5组合 +*********************************************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace cv; +using namespace std; + +#define NBINS 9 +#define THETA 180 / NBINS +#define CELLSIZE 4 +#define BLOCKSIZE 2 +#define R (CELLSIZE * (BLOCKSIZE) * 0.5) +#define IMAGELENGTH 400 +#define IMAGEWIDTH 200 +//static string ImageName = "CCHN_0018_229730_46_130718_0907_00216_Front1040.xml0"; +//static string ImageName = "imageCCHN_0018_229730_46_130716_2338_00171_Front.mp4460.xml0"; +string ImageName = "imageCCHN_15503_229730_46_130220_0856_00069_Front.mp4205200"; +//string ImageName = "L1firstimageFILE0185190"; +string ModelName = "SVM_Lw"; +string ModelPath = "C:\\detectProject\\data\\sourceData\\MODEL\\\\"; + +/******************************************************************************************************** +函数功能: +计算积分图像 +参数说明: +Mat& srcMat-----------------------存储每个cellHOG特征的行特征向量 +2)cv::Rect roi--------------------单个cell的矩形位置 +3)std::vector& integrals-----存储的9幅积分图像,每一幅积分图像代表一个角度范围或者一个bins +*********************************************************************************************************/ +// 计算积分图 +std::vector CalculateIntegralHOG(Mat& srcMat) +{ + //【1】计算一阶微分的梯度图像 + cv::Mat sobelMatX; + cv::Mat sobelMatY; + + cv::Sobel(srcMat, sobelMatX, CV_32F, 1, 0); + cv::Sobel(srcMat, sobelMatY, CV_32F, 0, 1); + + std::vector bins(NBINS); + for (int i = 0; i < NBINS; i++) + { + bins[i] = Mat::zeros(srcMat.size(), CV_32F); + } + cv::Mat magnMat; + cv::Mat angleMat; + //【2】坐标转换,根据每一个点X方向和Y方向上的梯度,实现笛卡尔坐标和极坐标的转换 + cartToPolar(sobelMatX, sobelMatY, magnMat, angleMat, true); + //【3】下面这这两行代码起始是做安全处理的,因为在将笛卡尔坐标转换为极坐标之后,角度的范围在[0,360] + // 下面这两行代码让所有的角度收缩在[0,180]这个返回 + add(angleMat, Scalar(180), angleMat, angleMat<0); //如果angleMat<0,则加180 + add(angleMat, Scalar(-180), angleMat, angleMat >= 180); //如果angleMat>=180,则减180 + //【4】下面这行代码将角度矩阵转换为一个灰度值范围在[0,9]之间的图像 + angleMat /= THETA; + //【5】下面这个循环,其实是将图像的梯度幅值矩阵按九个不同方向的梯度角度,将每个角度范围内相应点的梯度幅值 + // 存储在相应的矩阵图像之上,其实就是将梯度幅值矩阵图像按照不同的梯度幅值角度分为9幅梯度幅值的图像 + for (int y = 0; y < srcMat.rows; y++) + { + for (int x = 0; x < srcMat.cols; x++) + { + int ind = angleMat.at(y, x); + bins[ind].at(y, x) += magnMat.at(y, x); + } + } + //【6】根据上面生成的9张不同角度的梯度幅值矩阵生成9张不同的梯度幅值的积分图像,至此以后, + // 积分图像的每一点就代表,这一点左上角,所有梯度幅值之和;生成的9幅积分图也就是9个 + // bins,不同bins上的HOG强度 + std::vector integrals(NBINS); + for (int i = 0; i < NBINS; i++) + { + integral(bins[i], integrals[i]); + } + return integrals; +} +/******************************************************************************************************** +函数功能: +计算单个cell HOG特征 +参数说明: +1)cv::Mat& HOGCellMat-------------存储每个cellHOG特征的行特征向量 +2)cv::Rect roi--------------------单个cell的矩形位置 +3)std::vector& integrals-----存储的9幅积分图像,每一幅积分图像代表一个角度范围或者一个bins +*********************************************************************************************************/ +void cacHOGinCell(cv::Mat& HOGCellMat, cv::Rect roi, std::vector& integrals) +{ + //【1】通过9幅积分图像快速实现HOG的计算,HOG这个直方图有9个bins,每个bins就对应一张积分图像 + int x0 = roi.x; //确定单个矩形cell的左上角点坐标 + int y0 = roi.y; + int x1 = x0 + roi.width; + int y1 = y0 + roi.height; //确定单个矩形cell的右下角点坐标 + + for (int i = 0; i (y0, x0); + float b = integral.at(y1, x1); + float c = integral.at(y0, x1); + float d = integral.at(y1, x0); + + HOGCellMat.at(0, i) = b - c - d + a;//每循环一次,计算一个梯度方向上的HOG特征,其实就是 + //每循环一次,就计算梯度方向直方图上的一个bins + } +} +/******************************************************************************************************** +函数功能: +获取当前窗口的HOG直方图----此块其实就是在计算单个Block的HOG梯度方向直方图 +参数说明: +1)cv::Point pt--------------------单个Block的中心点坐标 +2)std::vector& integrals-----存储的9幅积分图像,每一幅积分图像代表一个角度范围或者一个bins +*********************************************************************************************************/ +cv::Mat getHog(cv::Point pt, std::vector& integrals) +{ + if (pt.x - R<0 || pt.y - R<0 || pt.x + R >= integrals[0].cols || pt.y + R >= integrals[0].rows) + { + return cv::Mat(); + } + //【1】BLOCK的HOG直方图---具体的来说,BLOCKSIZE*BLOCKSIZE即4个cell的HOG特征直方图特征向量 + // 组成一个BLOCK的HOG特征直方图的特征向量 + cv::Mat hist(cv::Size(NBINS*BLOCKSIZE*BLOCKSIZE, 1), CV_32F); + cv::Point t1(0, pt.y - R); + int c = 0; + //【2】遍历块:通过下面这两个循环,就遍历了4个cell,并且将4个cell的HOG特征向量组成了一个 + // 维数比较大的BLOCK的HOG特征向量 + for (int i = 0; i cacHOGFeature(cv::Mat srcImage) +{ + cv::Mat grayImage; + std::vector HOGMatVector; + cv::cvtColor(srcImage, grayImage, CV_RGB2GRAY); + grayImage.convertTo(grayImage, CV_8UC1); + //【1】9个不同梯度方向上的9张梯度幅值的积分图像的生成 + std::vector integrals = CalculateIntegralHOG(grayImage); + Mat image = grayImage.clone(); + image *= 0.5; + //【2】变量全图像,计算最终的梯度方向直方图HOG + int count = 0; + for (int y = 0; y < grayImage.rows; y += CELLSIZE) + { + for (int x = 0; x < grayImage.cols; x += CELLSIZE) + { + //cout << "x=" << x << " y=" << y << endl; + cv::Mat HOGBlockMat(Size(NBINS, 1), CV_32F); + cv::Mat LHOGBlockMat(cv::Size(NBINS*BLOCKSIZE*BLOCKSIZE, 1), CV_32F); + //【3】获取当前窗口HOG,其实当前的窗口就是一个Block,每个Block由四个cell组成,每个Cell为20*20 + // 此块,计算的就是单个Block的梯度方向直方图HOG + cv::Mat hist = getHog(Point(x, y), integrals); + LHOGBlockMat = hist; + if (hist.empty())continue; + HOGBlockMat = Scalar(0); + for (int i = 0; i < NBINS; i++) + { + for (int j = 0; j < BLOCKSIZE; j++) + { + HOGBlockMat.at(0, i) += hist.at(0, i + j*NBINS); + } + } + //【4】L2范数归一化:对其得到的每个Block的的矩阵进行L2范数归一化,使其转变为一个Block的HOG特征向量 + normalize(HOGBlockMat, HOGBlockMat, 1, 0, CV_L2); + //【5】最后,每得到一个Block的HOG特征向量就存入HOGMatVector,这个HOGMatVector其实就是整个图像的HOG特征向量, + // 当然,现在这个HOGMatVector还是个二维数组的形式,如果想要利用SVM对其进行分类的话,还需要将其拉伸为一 + // 维特征向量 + HOGMatVector.push_back(LHOGBlockMat); + count++; + //cout << count << endl; + Point center(x, y); + //【6】绘制HOG特征图 + //在每一个block的中心画线 线段的颜色深度表示直方图数据 + for (int i = 0; i < NBINS; i++) + { + double theta = (i * THETA) * CV_PI / 180.0; + Point rd(CELLSIZE*0.5*cos(theta), CELLSIZE*0.5*sin(theta)); + Point rp = center - rd; + Point lp = center + rd; + line(image, rp, lp, Scalar(255 * HOGBlockMat.at(0, i), 255, 255)); + cout << HOGBlockMat.at(0, i)< cacAvgHOGFeature(std::vector& ToatlMatVector, cv::Mat srcImage) +{ + cv::Mat grayImage; + std::vector ImageVector; + cv::cvtColor(srcImage, grayImage, CV_RGB2GRAY); + grayImage.convertTo(grayImage, CV_8UC1); + //【1】9个不同梯度方向上的9张梯度幅值的积分图像的生成 + std::vector integrals = CalculateIntegralHOG(grayImage); + Mat image = grayImage.clone(); + image *= 0.5; + //【2】变量全图像,计算最终的梯度方向直方图HOG + int count = 0; + for (int y = 0; y < grayImage.rows; y += CELLSIZE) + { + for (int x = 0; x < grayImage.cols; x += CELLSIZE) + { + //cout << "x=" << x << " y=" << y << endl; + cv::Mat HOGBlockMat(Size(NBINS, 1), CV_32F); + //【3】获取当前窗口HOG,其实当前的窗口就是一个Block,每个Block由四个cell组成,每个Cell为20*20 + // 此块,计算的就是单个Block的梯度方向直方图HOG + cv::Mat hist = getHog(Point(x, y), integrals); + if (hist.empty())continue; + HOGBlockMat = Scalar(0); + for (int i = 0; i < NBINS; i++) + { + for (int j = 0; j < BLOCKSIZE; j++) + { + HOGBlockMat.at(0, i) += hist.at(0, i + j*NBINS); + } + } + //【4】L2范数归一化:对其得到的每个Block的的矩阵进行L2范数归一化,使其转变为一个Block的HOG特征向量 + normalize(HOGBlockMat, HOGBlockMat, 1, 0, CV_L2); + //【5】最后,每得到一个Block的HOG特征向量就存入HOGMatVector,这个HOGMatVector其实就是整个图像的HOG特征向量, + // 当然,现在这个HOGMatVector还是个二维数组的形式,如果想要利用SVM对其进行分类的话,还需要将其拉伸为一 + // 维特征向量 + ImageVector.push_back(HOGBlockMat);//ImageVector存储blockMat数据55x9 + for (int i = 0; i < NBINS; i++) + { + ToatlMatVector[count].at(0, i) += HOGBlockMat.at(0, i); + } + count++; + } + } + //imshow("out", image); + //imwrite("C:\\detectProject\\data\\sourceData\\MODEL\\" + ImageName + "HOG.jpg", image); + return ToatlMatVector; +} +std::vector Draw(std::vector TotalMat) +{ + int count = 0; + cv::Mat image = cv::imread(ModelPath + ModelName + ".jpg"); + for (int y = CELLSIZE; y < IMAGELENGTH-CELLSIZE; y += CELLSIZE) + { + for (int x = CELLSIZE; x < IMAGEWIDTH-CELLSIZE; x += CELLSIZE) + { + cout << "x=" << x << " y=" << y << endl; + cv::Mat blockMat= TotalMat[count]; + + //【3】获取当前窗口HOG,其实当前的窗口就是一个Block,每个Block由四个cell组成,每个Cell为20*20 + // 此块,计算的就是单个Block的梯度方向直方图HOG + + Point center(x, y); + //【6】绘制HOG特征图 + //在每一个block的中心画线 线段的颜色深度表示直方图数据 + for (int i = 0; i < NBINS; i++) + { + double theta = (i * THETA) * CV_PI / 180.0; + Point rd(CELLSIZE*0.5*cos(theta), CELLSIZE*0.5*sin(theta)); + Point rp = center - rd; + Point lp = center + rd; + if (blockMat.at(0, i) > 0.6) + line(image, rp, lp, Scalar(255 * blockMat.at(0, i), 255, 255)); + } + count++; + cout << count << endl; + } + } + //imshow("out", image); + imwrite(ModelPath + ModelName + "HOG.jpg", image); + return TotalMat; +} + + +int writeMatVector(ofstream& outfile, std::vector& mat, int imageNumber) +{ + float data; + int count = 0; + for (int i = 0; i < mat.size(); i++) + { + for (int j = 0; j < mat[i].cols; j++) + { + data = mat[i].at(0, j)/imageNumber; + outfile << data << "\t"; + count++; + } + //cout << mat[i] << endl; + + //cout << "r(c) = " << "\n" << format(mat, "C") << " , " << endl << endl; + } + cout << count; + outfile.close(); + return 0; +} + + +#define MAX_PATH 1024 //最长路径长度 + +static char* ImagePath = "C:\\NDS\\traindata\\man_front\\man_front_L\\"; +/*---------------------------- +* 功能 : 递归遍历文件夹,找到其中包含的所有文件 +*---------------------------- +* 函数 : find +* 访问 : public +* +* 参数 : lpPath [in] 需遍历的文件夹目录 +* 参数 : fileList [in] 以文件名称的形式存储遍历后的文件 +*/ +void find(char* lpPath, std::vector &fileList) +{ + char szFind[MAX_PATH]; + WIN32_FIND_DATA FindFileData; + string impath = lpPath; + impath = impath.substr(0, impath.size() - 1); + const char* path= impath.c_str(); + + strcpy(szFind, path); + strcat(szFind, "\\*.*"); + + HANDLE hFind = ::FindFirstFile(szFind, &FindFileData); + if (INVALID_HANDLE_VALUE == hFind) return; + + while (true) + { + if (FindFileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) + { + if (FindFileData.cFileName[0] != '.') + { + char szFile[MAX_PATH]; + strcpy(szFile, path); + strcat(szFile, "\\"); + strcat(szFile, (char*)(FindFileData.cFileName)); + find(szFile, fileList); + } + } + else + { + //std::cout << FindFileData.cFileName << std::endl; + fileList.push_back(FindFileData.cFileName); + } + if (!FindNextFile(hFind, &FindFileData)) break; + } + FindClose(hFind); +} + + +int AverageImageHOG(char* ImPath) +{ + std::vector fileList;//定义一个存放结果文件名称的链表 + + //遍历一次结果的所有文件,获取文件名列表 + find(ImPath, fileList);//之后可对文件列表中的文件进行相应的操作 + + std::vector ToatlMatVector; + for (int s = 0; s < (IMAGEWIDTH/CELLSIZE-1)*(IMAGELENGTH/CELLSIZE-1); s++) + { + cv::Mat tempMat(cv::Size(NBINS, 1), CV_32F); + ToatlMatVector.push_back(tempMat); + } + for (int i = 0; i < (IMAGEWIDTH / CELLSIZE - 1)*(IMAGELENGTH / CELLSIZE - 1); i++) + { + for (int j = 0; j < NBINS; j++) + { + ToatlMatVector[i].at(0, j) = 0.0; + } + } + //输出文件夹下所有文件的名称 + int fileNumber = 0; + for (; fileNumber < fileList.size(); fileNumber++) + { + string str_pic_name = ImPath + fileList[fileNumber]; + cv::Mat srcImage = cv::imread(str_pic_name); + if (srcImage.empty()) + return -1; + cacAvgHOGFeature(ToatlMatVector,srcImage); + cout << fileNumber<<"out "<< str_pic_name << endl; + } + string outfileName = ModelPath + ModelName + ".txt"; + ofstream outFile(outfileName, ios_base::out); //按新建或覆盖方式写入 + if (!outFile.is_open()) return -1; + writeMatVector(outFile, ToatlMatVector, fileNumber); + Draw(ToatlMatVector); + system("pause"); + return 0; +} + + void sharpenImage1(const cv::Mat &image, cv::Mat &result) + { + //创建并初始化滤波模板 + cv::Mat kernel(3, 3, CV_32F, cv::Scalar(0)); + kernel.at(1, 1) = 5.0; + kernel.at(0, 1) = -1.0; + kernel.at(1, 0) = -1.0; + kernel.at(1, 2) = -1.0; + kernel.at(2, 1) = -1.0; + result.create(image.size(), image.type()); + + //对图像进行滤波 + cv::filter2D(image, result, image.depth(), kernel); + } +int AverageImage(char* ImPath) +{ + std::vector fileList;//定义一个存放结果文件名称的链表 + find(ImPath, fileList);//之后可对文件列表中的文件进行相应的操作 + //输出文件夹下所有文件的名称 + int fileNumber = 0; + cv::Mat avgImage; + cv::Mat dis; + float alpha = 0.95; + float beta = (1.0 - alpha); + cv::Mat grayImage; + for (; fileNumber < fileList.size(); fileNumber++) + { + + string str_pic_name = ImPath + fileList[fileNumber]; + cv::Mat srcImage = cv::imread(str_pic_name); + if (srcImage.empty()) + return -1; + cv::cvtColor(srcImage, grayImage, CV_RGB2GRAY); + grayImage.convertTo(grayImage, CV_8UC1); + if (fileNumber == 0) + avgImage = grayImage; + else + avgImage =dis; + cout << fileNumber << "out " << str_pic_name << endl; + if (fileNumber == 208) + int i = 0; + addWeighted(avgImage, alpha, grayImage, beta, 0.0, dis); + + } + //avgImage /= fileNumber; + //string outfileName = "C:\\detectProject\\data\\sourceData\\MODEL\\" + ModelName + ".txt"; + //ofstream outFile(outfileName, ios_base::out); //按新建或覆盖方式写入 + //if (!outFile.is_open()) return -1; + //writeMatVector(outFile, ToatlMatVector); + //Draw(ToatlMatVector, fileNumber); + imwrite("C:\\detectProject\\data\\sourceData\\MODEL\\HOGavg_side_L.jpg", dis); + system("pause"); + return 0; +} + +/******************************************************************************************************** +模块功能: +控制台应用程序的入口:Main函数 +*********************************************************************************************************/ + +int main() +{ + cv::Mat srcImage = cv::imread(ModelPath+ImageName+".jpg"); + //imwrite("C:\\detectProject\\data\\sourceData\\MODEL\\" + ImageName + ".jpg", srcImage); + if (srcImage.empty()) + return -1; + cv::imshow("srcImage ", srcImage); + std::vector HOGFeatureMat=cacHOGFeature(srcImage); + string outfileName = ModelPath + ImageName + ".txt"; + ofstream outFile(outfileName, ios_base::out); //按新建或覆盖方式写入 + if (!outFile.is_open()) return -1; + //writeMatVector(outFile,HOGFeatureMat); + + //AverageImageHOG(ImagePath); + cv::waitKey(0); + return 0; +} \ No newline at end of file diff --git a/getDataList.py b/getDataList.py new file mode 100644 index 0000000..4194f2e --- /dev/null +++ b/getDataList.py @@ -0,0 +1,263 @@ +import os +#coding=utf-8 + +trainDataPath1 = 'C:\\NDS\\traindata\\man_front' +trainDataPath2 = 'C:\\NDS\\traindata\\man_side' +trainDataPath3 = 'C:\\NDS\\traindata\\man_ride' +backgroundDataPath1 = 'C:\\NDS\\traindata\\background_front' +backgroundDataPath2 = 'C:\\NDS\\traindata\\background_side' +backgroundDataPath3 = 'C:\\NDS\\traindata\\background_ride' +hardDataPath1 = 'C:\\NDS\\traindata\\hard_front' +hardDataPath2 = 'C:\\NDS\\traindata\\hard_side' +hardDataPath3 = 'C:\\NDS\\traindata\\hard_ride' +file_objectNeg1s = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\SmallNegData1.txt', 'w') +file_objectNeg1m = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\MiddleNegData1.txt', 'w') +file_objectNeg1l = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\LargeNegData1.txt', 'w') +file_objectTrain1s = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\SmallTrainData1.txt', 'w') +file_objectTrain1m = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\MiddleTrainData1.txt', 'w') +file_objectTrain1l = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\LargeTrainData1.txt', 'w') +file_objectHard1s = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\SmallHard1.txt', 'w') +file_objectHard1m = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\MiddleHard1.txt', 'w') +file_objectHard1l = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\LargeHard1.txt', 'w') + +file_objectNeg2s = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\SmallNegData2.txt', 'w') +file_objectNeg2m = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\MiddleNegData2.txt', 'w') +file_objectNeg2l = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\LargeNegData2.txt', 'w') +file_objectTrain2s = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\SmallTrainData2.txt', 'w') +file_objectTrain2m = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\MiddleTrainData2.txt', 'w') +file_objectTrain2l = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\LargeTrainData2.txt', 'w') +file_objectHard2s = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\SmallHard2.txt', 'w') +file_objectHard2m = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\MiddleHard2.txt', 'w') +file_objectHard2l = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\LargeHard2.txt', 'w') + +file_objectNeg3s = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\SmallNegData3.txt', 'w') +file_objectNeg3m = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\MiddleNegData3.txt', 'w') +file_objectNeg3l = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\LargeNegData3.txt', 'w') +file_objectTrain3s = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\SmallTrainData3.txt', 'w') +file_objectTrain3m = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\MiddleTrainData3.txt', 'w') +file_objectTrain3l = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\LargeTrainData3.txt', 'w') +file_objectHard3s = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\SmallHard3.txt', 'w') +file_objectHard3m = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\MiddleHard3.txt', 'w') +file_objectHard3l = open('C:\\detectProject\\data\\sourceData\\TRAINDATA\\LargeHard3.txt', 'w') +smalln1, middlen1, largen1, smallt1, middlet1, larget1, smallh1, middleh1, largeh1 = 0, 0, 0, 0, 0, 0, 0, 0, 0 +smalln2, middlen2, largen2, smallt2, middlet2, larget2, smallh2, middleh2, largeh2 = 0, 0, 0, 0, 0, 0, 0, 0, 0 +smalln3, middlen3, largen3, smallt3, middlet3, larget3, smallh3, middleh3, largeh3 = 0, 0, 0, 0, 0, 0, 0, 0, 0 + +for root, dirs, files in os.walk(backgroundDataPath1): + for Data in files: + Path = str(Data) + if str(Data)[0] == 'S': + file_objectNeg1s.write(backgroundDataPath1 + "\\" + Path + '\n') + smalln1 += 1 + elif str(Data)[0] == 'M': + file_objectNeg1m.write(backgroundDataPath1 + "\\" + Path + '\n') + middlen1 += 1 + elif str(Data)[0] == 'L': + file_objectNeg1l.write(backgroundDataPath1 + "\\" + Path + '\n') + largen1 += 1 +for root, dirs, files in os.walk(backgroundDataPath2): + for Data in files: + Path = str(Data) + if str(Data)[0] == 'S': + file_objectNeg2s.write(backgroundDataPath2 + "\\" + Path + '\n') + smalln2 += 1 + elif str(Data)[0] == 'M': + file_objectNeg2m.write(backgroundDataPath2 + "\\" + Path + '\n') + middlen2 += 1 + elif str(Data)[0] == 'L': + file_objectNeg2l.write(backgroundDataPath2 + "\\" + Path + '\n') + largen2 += 1 +for root, dirs, files in os.walk(backgroundDataPath3): + for Data in files: + Path = str(Data) + if str(Data)[0] == 'S': + file_objectNeg3s.write(backgroundDataPath3 + "\\" + Path + '\n') + smalln3 += 1 + elif str(Data)[0] == 'M': + file_objectNeg3m.write(backgroundDataPath3 + "\\" + Path + '\n') + middlen3 += 1 + elif str(Data)[0] == 'L': + file_objectNeg3l.write(backgroundDataPath3 + "\\" + Path + '\n') + largen3 += 1 + +for root, dirs, files in os.walk(trainDataPath1): + for Data in files: + Path = str(Data) + if str(Data)[0] == 'S': + file_objectTrain1s.write(trainDataPath1+"\\"+Path + '\n') + smallt1 += 1 + elif str(Data)[0] == 'M': + file_objectTrain1m.write(trainDataPath1+"\\"+Path + '\n') + middlet1 += 1 + elif str(Data)[0] == 'L': + file_objectTrain1l.write(trainDataPath1+"\\"+Path + '\n') + larget1 += 1 +for root, dirs, files in os.walk(trainDataPath2): + for Data in files: + Path = str(Data) + if str(Data)[0] == 'S': + file_objectTrain2s.write(trainDataPath2+"\\"+Path + '\n') + smallt2 += 1 + elif str(Data)[0] == 'M': + file_objectTrain2m.write(trainDataPath2+"\\"+Path + '\n') + middlet2 += 1 + elif str(Data)[0] == 'L': + file_objectTrain2l.write(trainDataPath2+"\\"+Path + '\n') + larget2 += 1 +for root, dirs, files in os.walk(trainDataPath3): + for Data in files: + Path = str(Data) + if str(Data)[0] == 'S': + file_objectTrain3s.write(trainDataPath3+"\\"+Path + '\n') + smallt3 += 1 + elif str(Data)[0] == 'M': + file_objectTrain3m.write(trainDataPath3+"\\"+Path + '\n') + middlet3 += 1 + elif str(Data)[0] == 'L': + file_objectTrain3l.write(trainDataPath3+"\\"+Path + '\n') + larget3 += 1 + +for root, dirs, files in os.walk(hardDataPath1): + for Data in files: + Path = str(Data) + if str(Data)[0] == 'S': + file_objectHard1s.write(hardDataPath1+"\\"+Path + '\n') + smallh1 += 1 + elif str(Data)[0] == 'M': + file_objectHard1m.write(hardDataPath1+"\\"+Path + '\n') + middleh1 += 1 + elif str(Data)[0] == 'L': + file_objectHard1l.write(hardDataPath1+"\\"+Path + '\n') + largeh1 += 1 +for root, dirs, files in os.walk(hardDataPath2): + for Data in files: + Path = str(Data) + if str(Data)[0] == 'S': + file_objectHard2s.write(hardDataPath2 + "\\" + Path + '\n') + smallh2 += 1 + elif str(Data)[0] == 'M': + file_objectHard2m.write(hardDataPath2 + "\\" + Path + '\n') + middleh2 += 1 + elif str(Data)[0] == 'L': + file_objectHard2l.write(hardDataPath2 + "\\" + Path + '\n') + largeh2 += 1 +for root, dirs, files in os.walk(hardDataPath3): + for Data in files: + Path = str(Data) + if str(Data)[0] == 'S': + file_objectHard3s.write(hardDataPath3 + "\\" + Path + '\n') + smallh3 += 1 + elif str(Data)[0] == 'M': + file_objectHard3m.write(hardDataPath3 + "\\" + Path + '\n') + middleh3 += 1 + elif str(Data)[0] == 'L': + file_objectHard3l.write(hardDataPath3 + "\\" + Path + '\n') + largeh3 += 1 + +file_objectTrain1s.close() +file_objectTrain2s.close() +file_objectTrain3s.close() +file_objectTrain1m.close() +file_objectTrain2m.close() +file_objectTrain3m.close() +file_objectTrain1l.close() +file_objectTrain2l.close() +file_objectTrain3l.close() +file_objectNeg1s.close() +file_objectNeg2s.close() +file_objectNeg3s.close() +file_objectNeg1m.close() +file_objectNeg2m.close() +file_objectNeg3m.close() +file_objectNeg1l.close() +file_objectNeg2l.close() +file_objectNeg3l.close() +file_objectHard1s.close() +file_objectHard2s.close() +file_objectHard3s.close() +file_objectHard1m.close() +file_objectHard2m.close() +file_objectHard3m.close() +file_objectHard1l.close() +file_objectHard2l.close() +file_objectHard3l.close() + +lines=[] +f=open('D:\\projects\\pedestrianDec\\pedestrianDec\\ndsconfig.txt','r') +for line in f: + lines.append(line) +f.close() +lines.__delslice__(15,24) +lines.insert(15,str(largeh1)+"\n") +lines.insert(15,str(largen1)+"\n") +lines.insert(15,str(larget1)+"\n") +lines.insert(15,str(middleh1)+"\n") +lines.insert(15,str(middlen1)+"\n") +lines.insert(15,str(middlet1)+"\n") +lines.insert(15,str(smallh1)+"\n") +lines.insert(15,str(smalln1)+"\n") +lines.insert(15,str(smallt1)+"\n") +lines.__delslice__(39,48) +lines.insert(39,str(largeh2)+"\n") +lines.insert(39,str(largen2)+"\n") +lines.insert(39,str(larget2)+"\n") +lines.insert(39,str(middleh2)+"\n") +lines.insert(39,str(middlen2)+"\n") +lines.insert(39,str(middlet2)+"\n") +lines.insert(39,str(smallh2)+"\n") +lines.insert(39,str(smalln2)+"\n") +lines.insert(39,str(smallt2)+"\n") +lines.__delslice__(63,72) +lines.insert(63,str(largeh3)+"\n") +lines.insert(63,str(largen3)+"\n") +lines.insert(63,str(larget3)+"\n") +lines.insert(63,str(middleh3)+"\n") +lines.insert(63,str(middlen3)+"\n") +lines.insert(63,str(middlet3)+"\n") +lines.insert(63,str(smallh3)+"\n") +lines.insert(63,str(smalln3)+"\n") +lines.insert(63,str(smallt3)+"\n") +s=''.join(lines) +f=open('D:\\projects\\pedestrianDec\\pedestrianDec\\ndsconfig.txt','w+') +f.write(s) +f.close() + +print 'front:' +print 'Train:' +print 'small: ' + str(smallt1) +print 'middle: ' + str(middlet1) +print 'large: ' + str(larget1) +print 'Neg:' +print 'small: ' + str(smalln1) +print 'middle: ' + str(middlen1) +print 'large: ' + str(largen1) +print 'Hard:' +print 'small: ' + str(smallh1) +print 'middle: ' + str(middleh1) +print 'large: ' + str(largeh1) +print 'side:' +print 'Train:' +print 'small: ' + str(smallt2) +print 'middle: ' + str(middlet2) +print 'large: ' + str(larget2) +print 'Neg:' +print 'small: ' + str(smalln2) +print 'middle: ' + str(middlen2) +print 'large: ' + str(largen2) +print 'Hard:' +print 'small: ' + str(smallh2) +print 'middle: ' + str(middleh2) +print 'large: ' + str(largeh2) +print 'ride:' +print 'Train:' +print 'small: ' + str(smallt3) +print 'middle: ' + str(middlet3) +print 'large: ' + str(larget3) +print 'Neg:' +print 'small: ' + str(smalln3) +print 'middle: ' + str(middlen3) +print 'large: ' + str(largen3) +print 'Hard:' +print 'small: ' + str(smallh3) +print 'middle: ' + str(middleh3) +print 'large: ' + str(largeh3) diff --git a/main.cpp b/main.cpp index f36d775..d518392 100644 --- a/main.cpp +++ b/main.cpp @@ -1,890 +1,1228 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -using namespace std; -using namespace cv; - -bool TRAIN = false; //是否进行训练,true表示重新训练,false表示读取xml文件中的SVM模型 -bool CENTRAL_CROP = false; //true:训练时,对96*160的INRIA正样本图片剪裁出中间的64*128大小人体 -//int TRAINTYPE = 0; - - -//继承自CvSVM的类,因为生成setSVMDetector()中用到的检测子参数时,需要用到训练好的SVM的decision_func参数, -//但通过查看CvSVM源码可知decision_func参数是protected类型变量,无法直接访问到,只能继承之后通过函数访问 -class MySVM : public CvSVM -{ -public: - //获得SVM的决策函数中的alpha数组 - double * get_alpha_vector() - { - return this->decision_func->alpha; - } - - //获得SVM的决策函数中的rho参数,即偏移量 - float get_rho() - { - return this->decision_func->rho; - } -}; - -class myRect -{ -public: - string group; - double w; - Rect rect; -}; -void generateDescriptors(ifstream& imagePath, HOGDescriptor& hog, vector& descriptors, int& descriptorDim, - Mat& sampleFeatureMat, Mat& sampleLabelMat, int trainClass,int PosSamNO,int NegSamNO,int HardExampleNO) { - string imgName; - int numLimit; - if (0 == trainClass) - { - numLimit = PosSamNO; - } - else if (1 == trainClass) - { - numLimit = NegSamNO; - } - else if (2 == trainClass) - { - numLimit = HardExampleNO; - } - for (int num = 0; num < numLimit && getline(imagePath, imgName); num++) - { - //cout << imgName << endl; - Mat src = imread(imgName);//读取图片 - - if (CENTRAL_CROP) - resize(src, src, hog.winSize); - //src = src(rectCrop);//将96*160的INRIA正样本图片剪裁为64*128,即剪去上下左右各16个像素 - /* imshow("....", src); - waitKey(6000); */ //resize(src,src,Size(64,128)); - hog.compute(src, descriptors, hog.blockStride);//计算HOG描述子,检测窗口移动步长(8,8) - //处理第一个样本时初始化特征向量矩阵和类别矩阵,因为只有知道了特征向量的维数才能初始化特征向量矩阵 - //将计算好的HOG描述子复制到样本特征矩阵sampleFeatureMat - if (0 == trainClass) - { - if (0 == num) - { - descriptorDim = descriptors.size(); //HOG描述子的维数 - //初始化所有训练样本的特征向量组成的矩阵,行数等于所有样本的个数,列数等于HOG描述子维数sampleFeatureMat - sampleFeatureMat = Mat::zeros(PosSamNO + NegSamNO + HardExampleNO, descriptorDim, CV_32FC1); - //初始化训练样本的类别向量,行数等于所有样本的个数,列数等于1;1表示有人,0表示无人 - sampleLabelMat = Mat::zeros(PosSamNO + NegSamNO + HardExampleNO, 1, CV_32FC1); - } - for (int i = 0; i < descriptorDim; i++) - sampleFeatureMat.at(num, i) = descriptors[i];//第num个样本的特征向量中的第i个元素 - sampleLabelMat.at(num, 0) = 1;//正样本类别为1,有人 - } - else if (1 == trainClass) { - if (0 == num) - descriptorDim = sampleFeatureMat.cols; - for (int i = 0; i < descriptorDim; i++) - sampleFeatureMat.at(num + PosSamNO, i) = descriptors[i];//第num个样本的特征向量中的第i个元素 - sampleLabelMat.at(num + PosSamNO, 0) = -1;//正样本类别为1,有人 - } - else if (2 == trainClass) - { - if (0 == num) - descriptorDim = sampleFeatureMat.cols; - for (int i = 0; i < descriptorDim; i++) - sampleFeatureMat.at(num + PosSamNO + NegSamNO, i) = descriptors[i];//第num个样本的特征向量中的第i个元素 - sampleLabelMat.at(num + PosSamNO + NegSamNO, 0) = -1;//正样本类别为1,有人 - } - - } - descriptors.clear(); - return; -} - -void trainSVM(string posPath,string negPath, string hardPath, HOGDescriptor& hog, string modelPath, vector& descriptors, int PosSamNO, int NegSamNO, int HardExampleNO) { - - ifstream finPos(posPath.data()); - ifstream finNeg(negPath.data()); - ifstream finHard(hardPath.data()); - int DescriptorDim;//HOG描述子的维数,由图片大小、检测窗口大小、块大小、细胞单元中直方图bin个数决定 - MySVM svm;//SVM分类器 - //HOG描述子向量 - string ImgName;//图片名(绝对路径) - Mat sampleFeatureMat;//所有训练样本的特征向量组成的矩阵,行数等于所有样本的个数,列数等于HOG描述子维数 - Mat sampleLabelMat;//训练样本的类别向量,行数等于所有样本的个数,列数等于1;1表示有人,-1表示无人 - - cout << "开始计算正样本检测子" << endl; - generateDescriptors(finPos, hog, descriptors, DescriptorDim, sampleFeatureMat, sampleLabelMat, 0, PosSamNO, NegSamNO, HardExampleNO); - cout << "计算完成" << endl; - cout << "开始计算负样本检测子" << endl; - generateDescriptors(finNeg, hog, descriptors, DescriptorDim, sampleFeatureMat, sampleLabelMat, 1, PosSamNO, NegSamNO, HardExampleNO); - cout << "计算完成" << endl; - if (HardExampleNO > 0) - //依次读取HardExample负样本图片,生成HOG描述子 - generateDescriptors(finHard, hog, descriptors, DescriptorDim, sampleFeatureMat, sampleLabelMat, 2, PosSamNO, NegSamNO, HardExampleNO); - - CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 1000, FLT_EPSILON); - //SVM参数:SVM类型为C_SVC;线性核函数;松弛因子C=0.01 - CvSVMParams param(CvSVM::C_SVC, CvSVM::LINEAR, 0, 1, 0, 0.01, 0, 0, 0, criteria); - cout << "开始训练SVM分类器" << endl; - svm.train(sampleFeatureMat, sampleLabelMat, Mat(), Mat(), param);//训练分类器 - cout << "训练完成" << endl; - svm.save(modelPath.data());//将训练好的SVM模型保存为xml文件 - descriptors.clear(); - finPos.close(); - finNeg.close(); - finHard.close(); - return; -} - /******************************************************************************************************************* - 线性SVM训练完成后得到的XML文件里面,有一个数组,叫做support vector,还有一个数组,叫做alpha,有一个浮点数,叫做rho; - 将alpha矩阵同support vector相乘,注意,alpha*supportVector,将得到一个列向量。之后,再该列向量的最后添加一个元素rho。 - 如此,变得到了一个分类器,利用该分类器,直接替换opencv中行人检测默认的那个分类器(cv::HOGDescriptor::setSVMDetector()) - ,就可以利用你的训练样本训练出来的分类器进行行人检测了。 - ********************************************************************************************************************/ -void setDetector(MySVM& svm, vector& myDetector, string detectorPath){ - int DescriptorDim = svm.get_var_count();//特征向量的维数,即HOG描述子的维数 - int supportVectorNum = svm.get_support_vector_count();//支持向量的个数 - //cout << "支持向量个数:" << supportVectorNum << endl; - - Mat alphaMat = Mat::zeros(1, supportVectorNum, CV_32FC1);//alpha向量,长度等于支持向量个数 - Mat supportVectorMat = Mat::zeros(supportVectorNum, DescriptorDim, CV_32FC1);//支持向量矩阵 - Mat resultMat = Mat::zeros(1, DescriptorDim, CV_32FC1);//alpha向量乘以支持向量矩阵的结果 - - //将支持向量的数据复制到supportVectorMat矩阵中 - for (int i = 0; i < supportVectorNum; i++) - { - const float * pSVData = svm.get_support_vector(i);//返回第i个支持向量的数据指针 - for (int j = 0; j < DescriptorDim; j++) - { - //cout<(i, j) = pSVData[j]; - } - } - - //将alpha向量的数据复制到alphaMat中 - double * pAlphaData = svm.get_alpha_vector();//返回SVM的决策函数中的alpha向量 - for (int i = 0; i < supportVectorNum; i++) - { - alphaMat.at(0, i) = pAlphaData[i]; - } - - //计算-(alphaMat * supportVectorMat),结果放到resultMat中 - //gemm(alphaMat, supportVectorMat, -1, 0, 1, resultMat);//不知道为什么加负号? - resultMat = -1 * alphaMat * supportVectorMat; - - //将resultMat中的数据复制到数组myDetector中 - for (int i = 0; i < DescriptorDim; i++) - { - myDetector.push_back(resultMat.at(0, i)); - } - //最后添加偏移量rho,得到检测子 - myDetector.push_back(svm.get_rho()); - cout << "检测子维数:" << myDetector.size() << endl; - - //保存检测子参数到文件 - ofstream fout(detectorPath.data()); - for (int i = 0; i < myDetector.size(); i++) - fout << myDetector[i] << endl; - fout.close(); - - return; -} - -void DetectAndDraw(Mat& src, Mat &trtd, HOGDescriptor& hog1, HOGDescriptor& hog2, HOGDescriptor& hog3, vector& found, - vector& found_tmp, vector& found_filtered, vector& weight){ - //string path - //对图片进行多尺度行人检测 - string dirPath = "D:\\detectProject\\testdata\\"; - Rect r; - myRect mr; - hog1.detectMultiScale(src(Range(140, 300), Range(0, 480)), found_tmp, weight, 0.1, hog1.blockStride, Size(0, 0),1.05,2,false); - //0.05~~0.1 - for (int i = found_tmp.size() - 1; i >= 0; i--) - { - r = found_tmp[i]; - r.x += cvRound(r.width*0.1); - r.width = cvRound(r.width*0.8); - r.y += 140; - r.y += cvRound(r.height*0.07); - r.height = cvRound(r.height*0.8); - if (r.tl().y <= 190 && r.br().y >= 200) - { - mr.rect = found_tmp[i]; - mr.w = weight[i]; - mr.group = "small"; - found.push_back(mr); - //found_tmp.erase(found_tmp.begin() + i); - } - } - //found.insert(found.end(),found_tmp.begin(),found_tmp.end()); - weight.clear(); - found_tmp.clear(); - hog2.detectMultiScale(src(Range(140, 300), Range(0, 480)), found_tmp, weight, 0.12, hog2.blockStride, Size(0, 0), 1.05, 2); - //0.1~~0.15 - for (int i = found_tmp.size() - 1; i >= 0; i--) - { - r = found_tmp[i]; - r.x += cvRound(r.width*0.1); - r.width = cvRound(r.width*0.8); - r.y += 140; - r.y += cvRound(r.height*0.07); - r.height = cvRound(r.height*0.8); - if (r.tl().y <= 190 && r.br().y >= 215) - { - mr.rect = found_tmp[i]; - mr.w = weight[i]; - mr.group = "middle"; - found.push_back(mr); - //found_tmp.erase(found_tmp.begin() + i); - } - } - ////found.insert(found.end(), found_tmp.begin(), found_tmp.end()); - weight.clear(); - found_tmp.clear(); - hog3.detectMultiScale(src(Range(140, 300), Range(0, 480)), found_tmp, weight, 0.2, hog3.blockStride, Size(0, 0), 1.05, 2); - //0.2~0.25 - for (int i = found_tmp.size() - 1; i >= 0; i--) - { - r = found_tmp[i]; - r.x += cvRound(r.width*0.1); - r.width = cvRound(r.width*0.8); - r.y += 140; - r.y += cvRound(r.height*0.07); - r.height = cvRound(r.height*0.8); - if (r.tl().y <= 190 && r.br().y >= 230) - { - mr.rect = found_tmp[i]; - mr.w = weight[i]; - mr.group = "large"; - found.push_back(mr); - //found_tmp.erase(found_tmp.begin() + i); - } - } - //found.insert(found.end(), found_tmp.begin(), found_tmp.end()); - weight.clear(); - found_tmp.clear(); - //!!!!!!!!!!!!!!!!!!!!!!!!!!!边界确定注意(Range(300, 570), Range(0, 1280)) - - //找出所有没有嵌套的矩形框r,并放入found_filtered中,如果有嵌套的话,则取外面最大的那个矩形框放入found_filtered中 - int x1, x2, y1, y2; - for (int i = 0; i < found.size(); i++) - { - mr = found[i]; - int j = 0; - //for (; j < found.size(); j++) - // if (j != i && (r & found[j]) == r) - // break; - for(; j 480) { - // x1 = 480 - winSize.width; - // x2 = 480; - //} - //if (y2 > 356){ - // y1 = 356 - winSize.height; - // y2 = 356; - //} - /* if (TRAINTYPE == 1) - { - headString = "she_"; - } - else if(TRAINTYPE == 2) - { - headString = "mhe_"; - } - else if(TRAINTYPE == 3) - { - headString = "bhe_"; - }*/ - - //ss.str(""); - //ss << i; - //hePath = dirPath + headString + num + "_" + ss.str() + ".jpg"; - //imwrite(hePath, src(Range(y1, y2), Range(x1, x2))); - - mr.rect.x += cvRound(mr.rect.width*0.1); - mr.rect.width = cvRound(mr.rect.width*0.8); - mr.rect.y += 140; - mr.rect.y += cvRound(mr.rect.height*0.07); - mr.rect.height = cvRound(mr.rect.height*0.8); - ////!!!!与上面的边界对应 - //rectangle(trtd, Rect(0, 120, 480, 180), Scalar(0, 255, 0), 1);//上下界 - //rectangle(trtd, Rect(0, 190, 480, 1), Scalar(255, 255, 255), 1);//视平线 - //rectangle(trtd, Rect(0, 205, 480, 1), Scalar(255, 255, 0), 1);//30m线 - //rectangle(trtd, Rect(0, 220, 480, 1), Scalar(255, 0, 255), 1);//15m线 - //rectangle(trtd, Rect(0, 235, 480, 1), Scalar(0, 0, 255), 1);//10m线 - - - //if(r.tl().y <190 && r.br().y>190) - rectangle(trtd, mr.rect.tl(), mr.rect.br(), Scalar(0, 255, 0), 1); - } - return; -} - -void processedImgToVideo(string dirPath,char * videoPath,int tolFrame) { - IplImage* img; - string imgPath; - char const *fimgPath; - CvVideoWriter* writer = cvCreateVideoWriter(videoPath, CV_FOURCC('X', 'V', 'I', 'D'), 14, Size(480, 356)); - stringstream ss; - for (int i = 0; i < tolFrame; i++) - { - ss.str(""); - ss << i; - imgPath = dirPath + "pimage" + ss.str() + ".jpg"; - fimgPath = imgPath.c_str(); - img = cvLoadImage(fimgPath); - cvWriteFrame(writer, img); - cvReleaseImage(&img); - cout << imgPath << endl; - } - cvReleaseVideoWriter(&writer); -} - -//int main() -//{ -// bool bbbb = true; -// if (bbbb == false) -// { -// int a = 1; -// cout << a << endl; -// } -// stringstream ss; -// int a = 100l; -// int b = 2002; -// ss << a; -// cout << ss.str() << endl; -// cout << "hhe" << endl; -// ss.str(""); -// ss << b; -// cout << ss.str() << endl; -// system("pause"); - //cout<< CV_VERSION< descriptors; - if (TRAIN == true) - { - trainSVM(posPath1, negPath1, hardPath1, hog1, modelPath1, descriptors, PosSamNO1, NegSamNO1, HardExampleNO1); - trainSVM(posPath2, negPath2, hardPath2, hog2, modelPath2, descriptors, PosSamNO2, NegSamNO2, HardExampleNO2); - trainSVM(posPath3, negPath3, hardPath3, hog3, modelPath3, descriptors, PosSamNO3, NegSamNO3, HardExampleNO3); - } - MySVM svm1, svm2, svm3; - vector myDetector; - svm1.load(modelPath1.data()); - setDetector(svm1, myDetector, detectorPath1); - hog1.setSVMDetector(myDetector); - myDetector.clear(); - svm2.load(modelPath2.data()); - setDetector(svm2, myDetector, detectorPath2); - hog2.setSVMDetector(myDetector); - myDetector.clear(); - svm3.load(modelPath3.data()); - setDetector(svm3, myDetector, detectorPath3); - hog3.setSVMDetector(myDetector); - myDetector.clear(); - - /**************读入图片进行HOG行人检测******************/ - cout << "Start Detecting..." << endl; - vector found_tmp;//矩形框数组 - vector found_filtered, found; - vector weight; - ifstream finDetect(detectDataPath.data()); - string detectData, videoPath, rectFilePath; - Mat src,trtd; - IplImage* iplimage; - string imgPath; - stringstream ss; - VideoCapture cap; - CvVideoWriter* writer; - double totalFrame; - - while(getline(finDetect, detectData)) - { - cout << "Detecting "<(num, i) = descriptors[i];//第num个样本的特征向量中的第i个元素 -// sampleLabelMat.at(num, 0) = 1;//正样本类别为1,有人 -// descriptors.clear(); -//} - -////依次读取负样本图片,生成HOG描述子 -//for (int num = 0; num < NegSamNO && getline(finNeg, ImgName); num++) -//{ -// //cout << "处理:" << ImgName << num << endl; -// ImgName = "D:\\detectProject\\negativedata\\" + ImgName;//加上负样本的路径名 -// Mat src = imread(ImgName);//读取图片 -// //resize(src,img,Size(64,128)); -// //imshow("....", src); -// //waitKey(6000); -// hog.compute(src, descriptors, Size(8, 8));//计算HOG描述子,检测窗口移动步长(8,8) -// //cout<<"描述子维数:"<(num + PosSamNO, i) = descriptors[i];//第PosSamNO+num个样本的特征向量中的第i个元素 -// sampleLabelMat.at(num + PosSamNO, 0) = -1;//负样本类别为-1,无人 -// descriptors.clear(); -//} - -//for (int num = 0; num < HardExampleNO && getline(finHardExample, ImgName); num++) -//{ -// cout << "处理:" << ImgName << endl; -// ImgName = "D:\\DataSet\\HardExample_2400PosINRIA_12000Neg\\" + ImgName;//加上HardExample负样本的路径名 -// Mat src = imread(ImgName);//读取图片 -// //resize(src,img,Size(64,128)); -// hog.compute(src, descriptors, Size(8, 8));//计算HOG描述子,检测窗口移动步长(8,8) -// //cout<<"描述子维数:"<(num + PosSamNO + NegSamNO, i) = descriptors[i];//第PosSamNO+num个样本的特征向量中的第i个元素 -// sampleLabelMat.at(num + PosSamNO + NegSamNO, 0) = -1;//负样本类别为-1,无人 -// descriptors.clear(); -//} - - -////检测窗口(64,128),块尺寸(16,16),块步长(8,8),cell尺寸(8,8),直方图bin个数9 -//HOGDescriptor hog(winSize, blockSize, blockStride, cellSize, 9);//HOG检测器,用来计算HOG描述子的 -//int DescriptorDim;//HOG描述子的维数,由图片大小、检测窗口大小、块大小、细胞单元中直方图bin个数决定 -//MySVM svm;//SVM分类器 -//vector descriptors;//HOG描述子向量 -////namedWindow("~.~"); -// //若TRAIN为true,重新训练分类器 -//if (TRAIN) -//{ -// string ImgName;//图片名(绝对路径) -// ifstream finPos("D:\\detectProject\\LargeTrainData.txt");//正样本图片的文件名列表 -// ifstream finNeg("D:\\detectProject\\NegativeData3.txt");//负样本图片的文件名列表 - -// Mat sampleFeatureMat;//所有训练样本的特征向量组成的矩阵,行数等于所有样本的个数,列数等于HOG描述子维数 -// Mat sampleLabelMat;//训练样本的类别向量,行数等于所有样本的个数,列数等于1;1表示有人,-1表示无人 - -// string trainPath = "D:\\detectProject\\traindata\\"; -// string bgPath = "D:\\detectProject\\negativedata\\"; -// //依次读取正样本图片,生成HOG描述子 -// generateDescriptors(finPos, hog, descriptors, DescriptorDim, sampleFeatureMat, sampleLabelMat, 0, trainPath); -// //依次读取负样本图片,生成HOG描述子 -// generateDescriptors(finNeg, hog, descriptors, DescriptorDim, sampleFeatureMat, sampleLabelMat, 1, bgPath); -// -// //处理HardExample负样本 -// if (HardExampleNO > 0) -// { -// ifstream finHardExample("HardExample_2400PosINRIA_12000NegList.txt");//HardExample负样本图片的文件名列表 -// string hardPath = "D:\\DataSet\\HardExample_2400PosINRIA_12000Neg\\"; -// generateDescriptors(finHardExample, hog, descriptors, DescriptorDim, sampleFeatureMat, sampleLabelMat, 2, hardPath); //依次读取HardExample负样本图片,生成HOG描述子 -// } - -// ////输出样本的HOG特征向量矩阵到文件 -// /*ofstream fout("D:\\detectProject\\SampleFeatureMat.txt"); -// for(int i=0; i(i,j)<<" "; -// fout<(i, j) = pSVData[j]; -// } -//} - -////将alpha向量的数据复制到alphaMat中 -//double * pAlphaData = svm.get_alpha_vector();//返回SVM的决策函数中的alpha向量 -//for (int i = 0; i < supportVectorNum; i++) -//{ -// alphaMat.at(0, i) = pAlphaData[i]; -//} - -////计算-(alphaMat * supportVectorMat),结果放到resultMat中 -////gemm(alphaMat, supportVectorMat, -1, 0, 1, resultMat);//不知道为什么加负号? -//resultMat = -1 * alphaMat * supportVectorMat; - -////得到最终的setSVMDetector(const vector& detector)参数中可用的检测子 -//vector myDetector; -////将resultMat中的数据复制到数组myDetector中 -//for (int i = 0; i < DescriptorDim; i++) -//{ -// myDetector.push_back(resultMat.at(0, i)); -//} -////最后添加偏移量rho,得到检测子 -//myDetector.push_back(svm.get_rho()); -//cout << "检测子维数:" << myDetector.size() << endl; -////设置HOGDescriptor的检测子 -//hog.setSVMDetector(myDetector); -////myHOG.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector()); - -////保存检测子参数到文件 -//ofstream fout("D:\\detectProject\\HOGDetectorForOpenCV.txt"); -//for (int i = 0; i < myDetector.size(); i++) -//{ -// fout << myDetector[i] << endl; -//} - -/******************读入单个64*128的测试图并对其HOG描述子进行分类*********************/ -////读取测试图片(64*128大小),并计算其HOG描述子 -////Mat testImg = imread("person014142.jpg"); -//Mat testImg = imread("noperson000026.jpg"); -//vector descriptor; -//hog.compute(testImg,descriptor,Size(8,8));//计算HOG描述子,检测窗口移动步长(8,8) -//Mat testFeatureMat = Mat::zeros(1,3780,CV_32FC1);//测试样本的特征向量矩阵 -////将计算好的HOG描述子复制到testFeatureMat矩阵中 -//for(int i=0; i(0,i) = descriptor[i]; - -////用训练好的SVM分类器对测试图片的特征向量进行分类 -//int result = svm.predict(testFeatureMat);//返回类标 -//cout<<"分类结果:"< +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +using namespace std; +using namespace cv; + +bool TRAIN = false; //是否进行训练,true表示重新训练,false表示读取xml文件中的SVM模型 +bool CENTRAL_CROP = false; //true:训练时,对96*160的INRIA正样本图片剪裁出中间的64*128大小人体 + //int TRAINTYPE = 0; + + + //继承自CvSVM的类,因为生成setSVMDetector()中用到的检测子参数时,需要用到训练好的SVM的decision_func参数, + //但通过查看CvSVM源码可知decision_func参数是protected类型变量,无法直接访问到,只能继承之后通过函数访问 +class MySVM : public CvSVM +{ +public: + //获得SVM的决策函数中的alpha数组 + double * get_alpha_vector() + { + return this->decision_func->alpha; + } + + //获得SVM的决策函数中的rho参数,即偏移量 + float get_rho() + { + return this->decision_func->rho; + } +}; + +class myRect +{ +public: + string group; + double w; + Rect rect; +}; +void generateDescriptors(ifstream& imagePath, HOGDescriptor& hog, vector& descriptors, int& descriptorDim, + Mat& sampleFeatureMat, Mat& sampleLabelMat, int trainClass, int PosSamNO, int NegSamNO, int HardExampleNO) { + string imgName; + int numLimit; + if (0 == trainClass) //训练类型是正类型 + { + numLimit = PosSamNO; //positiveSampleNumber + } + else if (1 == trainClass) //训练类型是负类型 + { + numLimit = NegSamNO; + } + else if (2 == trainClass) //训练类型是困难(负)类型 + { + numLimit = HardExampleNO; + } + for (int num = 0; num < numLimit && getline(imagePath, imgName); num++) + { + //cout << imgName << endl; + cv::Mat src = imread(imgName);//读取图片 + if (src.empty()) + cout<<" -1"; + namedWindow("yuanshitu", CV_WINDOW_AUTOSIZE); + imshow("n", src); + waitKey(30); + cv::Mat newsrc = imread(imgName);//读取图片 + //CENTRAL_CROP = false; + if (CENTRAL_CROP) + resize(src, newsrc, hog.winSize); + //src = src(rectCrop);//将96*160的INRIA正样本图片剪裁为64*128,即剪去上下左右各16个像素 + /* imshow("....", src); + waitKey(6000); */ //resize(src,src,Size(64,128)); + if (cv::imwrite("C:\\detectProject\\data\\sourceData\\SAMPLE\\" + imgName + "HOG.jpg", newsrc)) + cout << "success"; + imshow("new", newsrc); + waitKey(30); + hog.compute(newsrc, descriptors, hog.blockStride);//计算HOG描述子,检测窗口移动步长(8,8) + //处理第一个样本时初始化特征向量矩阵和类别矩阵,因为只有知道了特征向量的维数才能初始化特征向量矩阵 + //将计算好的HOG描述子复制到样本特征矩阵sampleFeatureMat + if (0 == trainClass) + { + if (0 == num) + { + descriptorDim = descriptors.size(); //HOG描述子的维数 + //初始化所有训练样本的特征向量组成的矩阵,行数等于所有样本的个数,列数等于HOG描述子维数sampleFeatureMat + sampleFeatureMat = Mat::zeros(PosSamNO + NegSamNO + HardExampleNO, descriptorDim, CV_32FC1); + //初始化训练样本的类别向量,行数等于所有样本的个数,列数等于1;1表示有人,0表示无人 + sampleLabelMat = Mat::zeros(PosSamNO + NegSamNO + HardExampleNO, 1, CV_32FC1); + } + for (int i = 0; i < descriptorDim; i++) + sampleFeatureMat.at(num, i) = descriptors[i];//第num个样本的特征向量中的第i个元素 + sampleLabelMat.at(num, 0) = 1;//正样本类别为1,有人 + } + else if (1 == trainClass) { + if (0 == num) + descriptorDim = sampleFeatureMat.cols; + for (int i = 0; i < descriptorDim; i++) + sampleFeatureMat.at(num + PosSamNO, i) = descriptors[i];//第num个样本的特征向量中的第i个元素 + sampleLabelMat.at(num + PosSamNO, 0) = -1;//负样本类别为-1,没人 + } + else if (2 == trainClass) + { + if (0 == num) + descriptorDim = sampleFeatureMat.cols; + for (int i = 0; i < descriptorDim; i++) + sampleFeatureMat.at(num + PosSamNO + NegSamNO, i) = descriptors[i];//第num个样本的特征向量中的第i个元素 + sampleLabelMat.at(num + PosSamNO + NegSamNO, 0) = -1;//困难样本类别为-1,没人 + } + + } + descriptors.clear(); + return; +} + +void trainSVM(string posPath, string negPath, string hardPath, HOGDescriptor& hog, string modelPath, vector& descriptors, int PosSamNO, int NegSamNO, int HardExampleNO) { + + ifstream finPos(posPath.data()); + ifstream finNeg(negPath.data()); + ifstream finHard(hardPath.data()); + int DescriptorDim;//HOG描述子的维数,由图片大小、检测窗口大小、块大小、细胞单元中直方图bin个数决定 + MySVM svm;//SVM分类器 + //HOG描述子向量 + string ImgName;//绝对路径的图片名 + Mat sampleFeatureMat;//所有训练样本的特征向量组成的矩阵,行数等于所有样本的个数,列数等于HOG描述子维数 + Mat sampleLabelMat;//训练样本的类别向量,行数等于所有样本的个数,列数等于1;1表示有人,-1表示无人 + + cout << "开始计算正样本检测子" << endl; + generateDescriptors(finPos, hog, descriptors, DescriptorDim, sampleFeatureMat, sampleLabelMat, 0, PosSamNO, NegSamNO, HardExampleNO); + cout << "计算完成" << endl; + cout << "开始计算负样本检测子" << endl; + generateDescriptors(finNeg, hog, descriptors, DescriptorDim, sampleFeatureMat, sampleLabelMat, 1, PosSamNO, NegSamNO, HardExampleNO); + cout << "计算完成" << endl; + if (HardExampleNO > 0) + //依次读取HardExample困难负样本图片,生成HOG描述子 + generateDescriptors(finHard, hog, descriptors, DescriptorDim, sampleFeatureMat, sampleLabelMat, 2, PosSamNO, NegSamNO, HardExampleNO); + + CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 1000, FLT_EPSILON); + //SVM参数:SVM类型为C_SVC;线性核函数;松弛因子C=0.01 + CvSVMParams param(CvSVM::C_SVC, CvSVM::LINEAR, 0, 1, 0, 0.01, 0, 0, 0, criteria); + cout << "开始训练SVM分类器" << endl; + svm.train(sampleFeatureMat, sampleLabelMat, Mat(), Mat(), param);//训练分类器 + cout << "训练完成" << endl; + svm.save(modelPath.data());//将训练好的SVM模型保存为xml文件 + cout << "SVMmodel:" << modelPath << endl; + descriptors.clear(); + finPos.close(); + finNeg.close(); + finHard.close(); + return; +} +/******************************************************************************************************************* +线性SVM训练完成后得到的XML文件里面,有一个数组,叫做support vector(支持向量,列向量),还有一个数组,叫做alpha,有一个浮点数,叫做rho; +将alpha矩阵同support vector相乘,注意,alpha*supportVector,将得到一个列向量。之后,在该列向量的最后添加一个元素rho。 +如此,便得到了一个分类器,利用该分类器,直接替换opencv中行人检测默认的那个分类器(cv::HOGDescriptor::setSVMDetector()) +,就可以利用你的训练样本训练出来的分类器进行行人检测了。 +********************************************************************************************************************/ +void setDetector(MySVM& svm, vector& myDetector, string detectorPath) { + int DescriptorDim = svm.get_var_count();//特征向量的维数,即HOG描述子的维数 + int supportVectorNum = svm.get_support_vector_count();//支持向量的个数 + //cout << "支持向量个数:" << supportVectorNum << endl; + // 创建需要的多维矩阵 + Mat alphaMat = Mat::zeros(1, supportVectorNum, CV_32FC1);//初始化alphaMat,值全为0,行数为1,列数等于支持向量的维度 + Mat supportVectorMat = Mat::zeros(supportVectorNum, DescriptorDim, CV_32FC1);//初始化支持向量矩阵 + Mat resultMat = Mat::zeros(1, DescriptorDim, CV_32FC1);//初始化行向量,用来存储迭代alpha向量乘以支持向量矩阵的结果 + + //将支持向量的数据复制到supportVectorMat矩阵中 + for (int i = 0; i < supportVectorNum; i++) + { + const float * pSVData = svm.get_support_vector(i);//返回第i个支持向量的数据指针 + for (int j = 0; j < DescriptorDim; j++) + { + //cout<(i, j) = pSVData[j]; + } + } + + //将alpha向量的数据复制到alphaMat中 + double * pAlphaData = svm.get_alpha_vector();//返回SVM的决策函数中的alpha向量 + for (int i = 0; i < supportVectorNum; i++) + { + alphaMat.at(0, i) = pAlphaData[i]; //alphaMat只一行,是一个行向量 + } + + //计算-(alphaMat * supportVectorMat),结果放到resultMat中 + //gemm(alphaMat, supportVectorMat, -1, 0, 1, resultMat);//不知道为什么加负号? + resultMat = -1 * alphaMat * supportVectorMat; + + //将resultMat中的数据复制到数组myDetector中 + for (int i = 0; i < DescriptorDim; i++) + { + myDetector.push_back(resultMat.at(0, i)); + } + //最后添加偏移量rho,得到检测子 + myDetector.push_back(svm.get_rho()); //在vector定义下的myDetector一维数组末尾添加一项rho + cout << "检测子维数:" << myDetector.size() << endl; + + //保存检测子参数到文件 + ofstream fout(detectorPath.data()); + for (int i = 0; i < myDetector.size(); i++) + fout << myDetector[i] << endl; + fout.close(); + + return; +} + +double deteUpLimitS1, deteUpLimitS2, deteUpLimitM1, deteUpLimitM2, deteUpLimitL1, deteUpLimitL2; +double deteLowLimitS, deteLowLimitM, deteLowLimitL; +void DetectAndDraw(Mat& src, Mat &trtd, HOGDescriptor& hog1s, HOGDescriptor& hog1m, HOGDescriptor& hog1l, HOGDescriptor& hog2s, HOGDescriptor& hog2m, HOGDescriptor& hog2l, + HOGDescriptor& hog3s, HOGDescriptor& hog3m, HOGDescriptor& hog3l, vector& found_tmp, vector& found, vector& found_filtered, vector& weight,int plate_number_int) +{ + //string path + //对视频转帧形成的图片进行多尺度行人检测 + //string dirPath = "C:\\detectProject\\testdata\\"; + Rect r; + myRect mr; + //cout << "here:" << plate_number << endl; + int deteUpLimit = 140; int deteLowLimit = 300; + + switch(plate_number_int){ + // 不同车牌对应不同的检测上下限以及SLM的不同存在范围 + case 229726: + { + deteUpLimitS1 = 176.24; deteUpLimitS2 = 189.04; deteLowLimitS = 227.24; + deteUpLimitM1 = 174.36; deteUpLimitM2 = 188.49; deteLowLimitM = 243.36; + deteUpLimitL1 = 166.7; deteUpLimitL2 = 194.61; deteLowLimitL = 283.71; + } + case 229727: + { + deteUpLimitS1 = 177.24; deteUpLimitS2 = 190.04; deteLowLimitS = 228.24; + deteUpLimitM1 = 175.36; deteUpLimitM2 = 189.49; deteLowLimitM = 243.36; + deteUpLimitL1 = 191; deteUpLimitL2 = 218.9; deteLowLimitL = 308; + } + case 229728: + { + deteUpLimitS1 = 150.24; deteUpLimitS2 = 164.37; deteLowLimitS = 219.24; + deteUpLimitM1 = 166.36; deteUpLimitM2 = 180.49; deteLowLimitM = 235.36; + deteUpLimitL1 = 166.7; deteUpLimitL2 = 194.61; deteLowLimitL = 283.71; + } + case 229729: + { + deteUpLimitS1 = 150.24; deteUpLimitS2 = 164.37; deteLowLimitS = 219.24; + deteUpLimitM1 = 166.36; deteUpLimitM2 = 180.49; deteLowLimitM = 235.36; + deteUpLimitL1 = 166.7; deteUpLimitL2 = 194.61; deteLowLimitL = 283.71; + } + case 229730: + { + deteUpLimitS1 = 155.24; deteUpLimitS2 = 168.04; deteLowLimitS = 206.24; + deteUpLimitM1 = 153.36; deteUpLimitM2 = 167.49; deteLowLimitM = 222.36; + deteUpLimitL1 = 166.7; deteUpLimitL2 = 194.61; deteLowLimitL = 283.71; + } + } + + hog1s.detectMultiScale(src(Range(deteUpLimitS1, deteLowLimitS), Range(0, 480)), found_tmp, weight, 0.15, hog1s.blockStride, Size(0, 0), 1.08, 2, false); + //0.05~~0.1 + for (int i = found_tmp.size() - 1; i >= 0; i--) + { + r = found_tmp[i]; + // the HOG detector returns slightly larger rectangles than the real objects. + // so we slightly shrink the rectangles to get a nicer output. + // cvRound() : 返回四舍五入整数值 + r.x += cvRound(r.width*0.1); + r.width = cvRound(r.width*0.8); + r.y += 140; + r.y += cvRound(r.height*0.07); + r.height = cvRound(r.height*0.8); + //if (r.tl().y <= 190 && r.br().y >= 200) // small小行人模型 + if (deteUpLimitS1 <= r.tl().y <= deteUpLimitS2 && r.br().y <= deteLowLimitS) + { + mr.rect = found_tmp[i]; + mr.w = weight[i]; + mr.group = "small_front"; + found.push_back(mr); + //found_tmp.erase(found_tmp.begin() + i); + } + } + //found.insert(found.end(),found_tmp.begin(),found_tmp.end()); + weight.clear(); + found_tmp.clear(); + hog1m.detectMultiScale(src(Range(deteUpLimitM1, deteLowLimitM), Range(0, 480)), found_tmp, weight, 0.15, hog1m.blockStride, Size(0, 0), 1.05, 2); + // 针对不同远近大小模型的参数有所不同 + //0.1~~0.15 + for (int i = found_tmp.size() - 1; i >= 0; i--) + { + r = found_tmp[i]; + r.x += cvRound(r.width*0.1); + r.width = cvRound(r.width*0.8); + r.y += 140; + r.y += cvRound(r.height*0.07); + r.height = cvRound(r.height*0.8); + //if (r.tl().y <= 190 && r.br().y >= 215) //中行人模型 + if (deteUpLimitM1 <= r.tl().y <= deteUpLimitM2 && r.br().y <= deteLowLimitM) + { + mr.rect = found_tmp[i]; + mr.w = weight[i]; + mr.group = "middle_front"; + found.push_back(mr); + //found_tmp.erase(found_tmp.begin() + i); + } + } + ////found.insert(found.end(), found_tmp.begin(), found_tmp.end()); + weight.clear(); + found_tmp.clear(); + hog1l.detectMultiScale(src(Range(deteUpLimitL1, deteLowLimitL), Range(0, 480)), found_tmp, weight, 0.15, hog1l.blockStride, Size(0, 0), 1.01, 2); + //0.2~0.25 + for (int i = found_tmp.size() - 1; i >= 0; i--) + { + r = found_tmp[i]; + r.x += cvRound(r.width*0.1); + r.width = cvRound(r.width*0.8); + r.y += 140; + r.y += cvRound(r.height*0.07); + r.height = cvRound(r.height*0.8); + //if (r.tl().y <= 190 && r.br().y >= 230) //大行人模型 + if (deteUpLimitL1 <= r.tl().y <= deteUpLimitL2 && r.br().y <= deteLowLimitL) + { + mr.rect = found_tmp[i]; + mr.w = weight[i]; + mr.group = "large_front"; + found.push_back(mr); + //found_tmp.erase(found_tmp.begin() + i); + } + } + //found.insert(found.end(), found_tmp.begin(), found_tmp.end()); + weight.clear(); + found_tmp.clear(); + + hog2s.detectMultiScale(src(Range(deteUpLimitS1, deteLowLimitS), Range(0, 480)), found_tmp, weight, 0.15, hog2s.blockStride, Size(0, 0), 1.08, 2, false); + //0.05~~0.1 + for (int i = found_tmp.size() - 1; i >= 0; i--) + { + r = found_tmp[i]; + // the HOG detector returns slightly larger rectangles than the real objects. + // so we slightly shrink the rectangles to get a nicer output. + // cvRound() : 返回四舍五入整数值 + r.x += cvRound(r.width*0.1); + r.width = cvRound(r.width*0.8); + r.y += 140; + r.y += cvRound(r.height*0.07); + r.height = cvRound(r.height*0.8); + //if (r.tl().y <= 190 && r.br().y >= 200) // small小行人模型 + if (deteUpLimitS1 <= r.tl().y <= deteUpLimitS2 && r.br().y <= deteLowLimitS) + { + mr.rect = found_tmp[i]; + mr.w = weight[i]; + mr.group = "small_side"; + found.push_back(mr); + //found_tmp.erase(found_tmp.begin() + i); + } + } + //found.insert(found.end(),found_tmp.begin(),found_tmp.end()); + weight.clear(); + found_tmp.clear(); + hog2m.detectMultiScale(src(Range(deteUpLimitM1, deteLowLimitM), Range(0, 480)), found_tmp, weight, 0.15, hog2m.blockStride, Size(0, 0), 1.05, 2); + // 针对不同远近大小模型的参数有所不同 + //0.1~~0.15 + for (int i = found_tmp.size() - 1; i >= 0; i--) + { + r = found_tmp[i]; + r.x += cvRound(r.width*0.1); + r.width = cvRound(r.width*0.8); + r.y += 140; + r.y += cvRound(r.height*0.07); + r.height = cvRound(r.height*0.8); + //if (r.tl().y <= 190 && r.br().y >= 215) //中行人模型 + if (deteUpLimitM1 <= r.tl().y <= deteUpLimitM2 && r.br().y <= deteLowLimitM) + { + mr.rect = found_tmp[i]; + mr.w = weight[i]; + mr.group = "middle_side"; + found.push_back(mr); + //found_tmp.erase(found_tmp.begin() + i); + } + } + ////found.insert(found.end(), found_tmp.begin(), found_tmp.end()); + weight.clear(); + found_tmp.clear(); + hog2l.detectMultiScale(src(Range(deteUpLimitL1, deteLowLimitL), Range(0, 480)), found_tmp, weight, 0.15, hog2l.blockStride, Size(0, 0), 1.01, 2); + //0.2~0.25 + for (int i = found_tmp.size() - 1; i >= 0; i--) + { + r = found_tmp[i]; + r.x += cvRound(r.width*0.1); + r.width = cvRound(r.width*0.8); + r.y += 140; + r.y += cvRound(r.height*0.07); + r.height = cvRound(r.height*0.8); + //if (r.tl().y <= 190 && r.br().y >= 230) //大行人模型 + if (deteUpLimitL1 <= r.tl().y <= deteUpLimitL2 && r.br().y <= deteLowLimitL) + { + mr.rect = found_tmp[i]; + mr.w = weight[i]; + mr.group = "large_side"; + found.push_back(mr); + //found_tmp.erase(found_tmp.begin() + i); + } + } + //found.insert(found.end(), found_tmp.begin(), found_tmp.end()); + weight.clear(); + found_tmp.clear(); + + hog3s.detectMultiScale(src(Range(deteUpLimitS1, deteLowLimitS), Range(0, 480)), found_tmp, weight, 0.15, hog3s.blockStride, Size(0, 0), 1.08, 2, false); + //0.05~~0.1 + for (int i = found_tmp.size() - 1; i >= 0; i--) + { + r = found_tmp[i]; + // the HOG detector returns slightly larger rectangles than the real objects. + // so we slightly shrink the rectangles to get a nicer output. + // cvRound() : 返回四舍五入整数值 + r.x += cvRound(r.width*0.1); + r.width = cvRound(r.width*0.8); + r.y += 140; + r.y += cvRound(r.height*0.07); + r.height = cvRound(r.height*0.8); + //if (r.tl().y <= 190 && r.br().y >= 200) // small小行人模型 + if (deteUpLimitS1 <= r.tl().y <= deteUpLimitS2 && r.br().y <= deteLowLimitS) + { + mr.rect = found_tmp[i]; + mr.w = weight[i]; + mr.group = "small_ride"; + found.push_back(mr); + //found_tmp.erase(found_tmp.begin() + i); + } + } + //found.insert(found.end(),found_tmp.begin(),found_tmp.end()); + weight.clear(); + found_tmp.clear(); + hog3m.detectMultiScale(src(Range(deteUpLimitM1, deteLowLimitM), Range(0, 480)), found_tmp, weight, 0.15, hog3m.blockStride, Size(0, 0), 1.05, 2); + // 针对不同远近大小模型的参数有所不同 + //0.1~~0.15 + for (int i = found_tmp.size() - 1; i >= 0; i--) + { + r = found_tmp[i]; + r.x += cvRound(r.width*0.1); + r.width = cvRound(r.width*0.8); + r.y += 140; + r.y += cvRound(r.height*0.07); + r.height = cvRound(r.height*0.8); + //if (r.tl().y <= 190 && r.br().y >= 215) //中行人模型 + if (deteUpLimitM1 <= r.tl().y <= deteUpLimitM2 && r.br().y <= deteLowLimitM) + { + mr.rect = found_tmp[i]; + mr.w = weight[i]; + mr.group = "middle_ride"; + found.push_back(mr); + //found_tmp.erase(found_tmp.begin() + i); + } + } + ////found.insert(found.end(), found_tmp.begin(), found_tmp.end()); + weight.clear(); + found_tmp.clear(); + hog3l.detectMultiScale(src(Range(deteUpLimitL1, deteLowLimitL), Range(0, 480)), found_tmp, weight, 0.15, hog3l.blockStride, Size(0, 0), 1.01, 2); + //0.2~0.25 + for (int i = found_tmp.size() - 1; i >= 0; i--) + { + r = found_tmp[i]; + r.x += cvRound(r.width*0.1); + r.width = cvRound(r.width*0.8); + r.y += 140; + r.y += cvRound(r.height*0.07); + r.height = cvRound(r.height*0.8); + //if (r.tl().y <= 190 && r.br().y >= 230) //大行人模型 + if (deteUpLimitL1 <= r.tl().y <= deteUpLimitL2 && r.br().y <= deteLowLimitL) + { + mr.rect = found_tmp[i]; + mr.w = weight[i]; + mr.group = "large_ride"; + found.push_back(mr); + //found_tmp.erase(found_tmp.begin() + i); + } + } + //found.insert(found.end(), found_tmp.begin(), found_tmp.end()); + weight.clear(); + found_tmp.clear(); + + + //!!!!!!!!!!!!!!!!!!!!!!!!!!!边界确定注意(Range(300, 570), Range(0, 1280)) + + //找出所有没有嵌套的矩形框r,并放入found_filtered中,如果有嵌套的话,则取外面最大的那个矩形框放入found_filtered中 + int x1, x2, y1, y2; + for (int i = 0; i < found.size(); i++) + { + mr = found[i]; + int j = 0; + //for (; j < found.size(); j++) + // if (j != i && (r & found[j]) == r) + // break; + for (; j 480) { + // x1 = 480 - winSize.width; + // x2 = 480; + //} + //if (y2 > 356){ + // y1 = 356 - winSize.height; + // y2 = 356; + //} + /* if (TRAINTYPE == 1) + { + headString = "she_"; + } + else if(TRAINTYPE == 2) + { + headString = "mhe_"; + } + else if(TRAINTYPE == 3) + { + headString = "bhe_"; + }*/ + + //ss.str(""); + //ss << i; + //hePath = dirPath + headString + num + "_" + ss.str() + ".jpg"; + //imwrite(hePath, src(Range(y1, y2), Range(x1, x2))); + + //画矩形框,因为hog检测出的矩形框比实际人体框要稍微大些,so we slightly shrink the rectangles to get a nicer output. + mr.rect.x += cvRound(mr.rect.width*0.1); + mr.rect.width = cvRound(mr.rect.width*0.8); + mr.rect.y += 140; + mr.rect.y += cvRound(mr.rect.height*0.07); + mr.rect.height = cvRound(mr.rect.height*0.8); + ////!!!!与上面的边界对应 + //rectangle(trtd, Rect(0, 120, 480, 180), Scalar(0, 255, 0), 1);//上下界 + //rectangle(trtd, Rect(0, 190, 480, 1), Scalar(255, 255, 255), 1);//视平线 + //rectangle(trtd, Rect(0, 205, 480, 1), Scalar(255, 255, 0), 1);//30m线 + //rectangle(trtd, Rect(0, 220, 480, 1), Scalar(255, 0, 255), 1);//15m线 + //rectangle(trtd, Rect(0, 235, 480, 1), Scalar(0, 0, 255), 1);//10m线 + + + //if(r.tl().y <190 && r.br().y>190) + if (mr.group == "small_front") + rectangle(trtd, mr.rect.tl(), mr.rect.br(), Scalar(255, 220, 215), 1); + if (mr.group == "middle_front") + rectangle(trtd, mr.rect.tl(), mr.rect.br(), Scalar(255, 220, 210), 1); + if (mr.group == "large_front") + rectangle(trtd, mr.rect.tl(), mr.rect.br(), Scalar(255, 220, 205), 1); + if (mr.group == "small_side") + rectangle(trtd, mr.rect.tl(), mr.rect.br(), Scalar(0, 245, 255), 1); + if (mr.group == "middle_side") + rectangle(trtd, mr.rect.tl(), mr.rect.br(), Scalar(0, 235, 250), 1); + if (mr.group == "large_side") + rectangle(trtd, mr.rect.tl(), mr.rect.br(), Scalar(0, 225, 245), 1); + if (mr.group == "small_ride") + rectangle(trtd, mr.rect.tl(), mr.rect.br(), Scalar(65, 200, 125), 1); + if (mr.group == "middle_ride") + rectangle(trtd, mr.rect.tl(), mr.rect.br(), Scalar(65, 200, 135), 1); + if (mr.group == "large_ride") + rectangle(trtd, mr.rect.tl(), mr.rect.br(), Scalar(65, 200, 145), 1); + if(mr.rect.tl().y>5) + putText(trtd, mr.group.data(), cvPoint(mr.rect.tl().x, mr.rect.tl().y - 5),FONT_HERSHEY_PLAIN, 0.3, Scalar(65, 255, 255),0.05,4); + + } + return; +} + +// 图片转avi视频的函数,不过好像没用到 +//void processedImgToVideo(string dirPath, char * videoPath, int tolFrame) { +// IplImage* img; +// string imgPath; +// char const *fimgPath; +// CvVideoWriter* writer = cvCreateVideoWriter(videoPath, CV_FOURCC('X', 'V', 'I', 'D'), 14, Size(480, 356)); +// stringstream ss; +// for (int i = 0; i < tolFrame; i++) +// { +// ss.str(""); +// ss << i; +// imgPath = dirPath + "pimage" + ss.str() + ".jpg"; +// fimgPath = imgPath.c_str(); +// img = cvLoadImage(fimgPath); +// cvWriteFrame(writer, img); +// cvReleaseImage(&img); +// cout << imgPath << endl; +// } +// cvReleaseVideoWriter(&writer); +//} + +//int main() +//{ +// bool bbbb = true; +// if (bbbb == false) +// { +// int a = 1; +// cout << a << endl; +// } +// stringstream ss; +// int a = 100l; +// int b = 2002; +// ss << a; +// cout << ss.str() << endl; +// cout << "hhe" << endl; +// ss.str(""); +// ss << b; +// cout << ss.str() << endl; +// system("pause"); +//cout<< CV_VERSION< descriptors; + if (TRAIN == true) + { + trainSVM(posPath1s, negPath1s, hardPath1s, hog1s, modelPath1s, descriptors, PosSamNO1s, NegSamNO1s, HardExampleNO1s); + trainSVM(posPath1m, negPath1m, hardPath1m, hog1m, modelPath1m, descriptors, PosSamNO1m, NegSamNO1m, HardExampleNO1m); + trainSVM(posPath1l, negPath1l, hardPath1l, hog1l, modelPath1l, descriptors, PosSamNO1l, NegSamNO1l, HardExampleNO1l); + trainSVM(posPath2s, negPath2s, hardPath2s, hog2s, modelPath2s, descriptors, PosSamNO2s, NegSamNO2s, HardExampleNO2s); + trainSVM(posPath2m, negPath2m, hardPath2m, hog2m, modelPath2m, descriptors, PosSamNO2m, NegSamNO2m, HardExampleNO2m); + trainSVM(posPath2l, negPath2l, hardPath2l, hog2l, modelPath2l, descriptors, PosSamNO2l, NegSamNO2l, HardExampleNO2l); + trainSVM(posPath3s, negPath3s, hardPath3s, hog3s, modelPath3s, descriptors, PosSamNO3s, NegSamNO3s, HardExampleNO3s); + trainSVM(posPath3m, negPath3m, hardPath3m, hog3m, modelPath3m, descriptors, PosSamNO3m, NegSamNO3m, HardExampleNO3m); + trainSVM(posPath3l, negPath3l, hardPath3l, hog3l, modelPath3l, descriptors, PosSamNO3l, NegSamNO3l, HardExampleNO3l); + } + + MySVM svm1s, svm1m, svm1l, svm2s, svm2m, svm2l, svm3s, svm3m, svm3l; + vector myDetector; + + svm1s.load(modelPath1s.data()); + setDetector(svm1s, myDetector, detectorPath1s); + hog1s.setSVMDetector(myDetector); + myDetector.clear(); + + svm1m.load(modelPath1m.data()); + setDetector(svm1m, myDetector, detectorPath1m); + hog1m.setSVMDetector(myDetector); + myDetector.clear(); + + svm1l.load(modelPath1l.data()); + setDetector(svm1l, myDetector, detectorPath1l); + hog1l.setSVMDetector(myDetector); + myDetector.clear(); + + svm2s.load(modelPath2s.data()); + setDetector(svm2s, myDetector, detectorPath2s); + hog2s.setSVMDetector(myDetector); + myDetector.clear(); + + svm2m.load(modelPath2m.data()); + setDetector(svm1m, myDetector, detectorPath2m); + hog2m.setSVMDetector(myDetector); + myDetector.clear(); + + svm2l.load(modelPath2l.data()); + setDetector(svm2l, myDetector, detectorPath2l); + hog2l.setSVMDetector(myDetector); + myDetector.clear(); + + svm3s.load(modelPath3s.data()); + setDetector(svm3s, myDetector, detectorPath3s); + hog3s.setSVMDetector(myDetector); + myDetector.clear(); + + svm3m.load(modelPath3m.data()); + setDetector(svm3m, myDetector, detectorPath3m); + hog3m.setSVMDetector(myDetector); + myDetector.clear(); + + svm3l.load(modelPath3l.data()); + setDetector(svm3l, myDetector, detectorPath3l); + hog3l.setSVMDetector(myDetector); + myDetector.clear(); + + /**************读入图片进行HOG行人检测******************/ + cout << "Start Detecting..." << endl; + vector found_tmp;//矩形框数组 + vector found_filtered, found; + vector weight; + ifstream finDetect(detectDataPath.data()); + //cout << detectDataPath; //.../videoLists.txt + string detectData, videoPath, rectFilePath; + Mat src, trtd; + IplImage* iplimage; //图片转视频用到的变量 + string imgPath; + stringstream ss; + VideoCapture cap; + CvVideoWriter* writer; + double totalFrame; + vector splitString1; + vector splitString2; + + while (getline(finDetect, detectData)) + { + cout << "Detecting " << detectData << endl; + videoPath = detectData; + cap.open(videoPath.data()); //打开用以测试的videos文件夹 + if (!cap.isOpened()) { + cout << "Cannot open the video whose path is " << videoPath << endl; + continue; + } + // string ---> char * + char *path_video = (char *)videoPath.c_str(); + // 以'\'分割的字符串 + const char *split = "\\"; + char *p = strtok(path_video, split); + while (p != NULL) { + splitString1.push_back(p); + //nums[i] = p; + p = strtok(NULL, split); + } + string videoname = splitString1[splitString1.size() - 1]; //以'\'分割的最后一项就是视频文件的全名 + cout << videoname << endl; + char *name_video = (char *)videoname.c_str(); + // 以'\'分割的字符串 + const char *split2 = "_"; + char *p2 = strtok(name_video, split2); + while (p2 != NULL) { + splitString2.push_back(p2); + //nums[i] = p; + p2 = strtok(NULL, split2); + } + string plate_number = splitString2[2]; // 车牌号读取完毕 + int plate_number_int = atoi(plate_number.c_str()); + //cout << plate_number_int << endl; + totalFrame = cap.get(CV_CAP_PROP_FRAME_COUNT); //获取帧数 + //******* + videoPath = detectData.substr(0, detectData.length() - 4) + "p.avi"; + //对视频检测加框后的结果视频存放路径和名称,父文件夹一样,文件名一致基本,将.mp4替换为p.avi + rectFilePath = detectData.substr(0, detectData.length() - 4) + "r.txt"; // 检测结果文件存放路径和名称 + ofstream fout(rectFilePath.data()); + writer = cvCreateVideoWriter(videoPath.data(), CV_FOURCC('X', 'V', 'I', 'D'), 14, Size(480, 356)); //写结果视频操作相关 + for (int num = 0; num(num, i) = descriptors[i];//第num个样本的特征向量中的第i个元素 +// sampleLabelMat.at(num, 0) = 1;//正样本类别为1,有人 +// descriptors.clear(); +//} + +////依次读取负样本图片,生成HOG描述子 +//for (int num = 0; num < NegSamNO && getline(finNeg, ImgName); num++) +//{ +// //cout << "处理:" << ImgName << num << endl; +// ImgName = "D:\\detectProject\\negativedata\\" + ImgName;//加上负样本的路径名 +// Mat src = imread(ImgName);//读取图片 +// //resize(src,img,Size(64,128)); +// //imshow("....", src); +// //waitKey(6000); +// hog.compute(src, descriptors, Size(8, 8));//计算HOG描述子,检测窗口移动步长(8,8) +// //cout<<"描述子维数:"<(num + PosSamNO, i) = descriptors[i];//第PosSamNO+num个样本的特征向量中的第i个元素 +// sampleLabelMat.at(num + PosSamNO, 0) = -1;//负样本类别为-1,无人 +// descriptors.clear(); +//} + +//for (int num = 0; num < HardExampleNO && getline(finHardExample, ImgName); num++) +//{ +// cout << "处理:" << ImgName << endl; +// ImgName = "D:\\DataSet\\HardExample_2400PosINRIA_12000Neg\\" + ImgName;//加上HardExample负样本的路径名 +// Mat src = imread(ImgName);//读取图片 +// //resize(src,img,Size(64,128)); +// hog.compute(src, descriptors, Size(8, 8));//计算HOG描述子,检测窗口移动步长(8,8) +// //cout<<"描述子维数:"<(num + PosSamNO + NegSamNO, i) = descriptors[i];//第PosSamNO+num个样本的特征向量中的第i个元素 +// sampleLabelMat.at(num + PosSamNO + NegSamNO, 0) = -1;//负样本类别为-1,无人 +// descriptors.clear(); +//} + + +////检测窗口(64,128),块尺寸(16,16),块步长(8,8),cell尺寸(8,8),直方图bin个数9 +//HOGDescriptor hog(winSize, blockSize, blockStride, cellSize, 9);//HOG检测器,用来计算HOG描述子的 +//int DescriptorDim;//HOG描述子的维数,由图片大小、检测窗口大小、块大小、细胞单元中直方图bin个数决定 +//MySVM svm;//SVM分类器 +//vector descriptors;//HOG描述子向量 +////namedWindow("~.~"); +// //若TRAIN为true,重新训练分类器 +//if (TRAIN) +//{ +// string ImgName;//图片名(绝对路径) +// ifstream finPos("D:\\detectProject\\LargeTrainData.txt");//正样本图片的文件名列表 +// ifstream finNeg("D:\\detectProject\\NegativeData3.txt");//负样本图片的文件名列表 + +// Mat sampleFeatureMat;//所有训练样本的特征向量组成的矩阵,行数等于所有样本的个数,列数等于HOG描述子维数 +// Mat sampleLabelMat;//训练样本的类别向量,行数等于所有样本的个数,列数等于1;1表示有人,-1表示无人 + +// string trainPath = "D:\\detectProject\\traindata\\"; +// string bgPath = "D:\\detectProject\\negativedata\\"; +// //依次读取正样本图片,生成HOG描述子 +// generateDescriptors(finPos, hog, descriptors, DescriptorDim, sampleFeatureMat, sampleLabelMat, 0, trainPath); +// //依次读取负样本图片,生成HOG描述子 +// generateDescriptors(finNeg, hog, descriptors, DescriptorDim, sampleFeatureMat, sampleLabelMat, 1, bgPath); +// +// //处理HardExample负样本 +// if (HardExampleNO > 0) +// { +// ifstream finHardExample("HardExample_2400PosINRIA_12000NegList.txt");//HardExample负样本图片的文件名列表 +// string hardPath = "D:\\DataSet\\HardExample_2400PosINRIA_12000Neg\\"; +// generateDescriptors(finHardExample, hog, descriptors, DescriptorDim, sampleFeatureMat, sampleLabelMat, 2, hardPath); //依次读取HardExample负样本图片,生成HOG描述子 +// } + +// ////输出样本的HOG特征向量矩阵到文件 +// /*ofstream fout("D:\\detectProject\\SampleFeatureMat.txt"); +// for(int i=0; i(i,j)<<" "; +// fout<(i, j) = pSVData[j]; +// } +//} + +////将alpha向量的数据复制到alphaMat中 +//double * pAlphaData = svm.get_alpha_vector();//返回SVM的决策函数中的alpha向量 +//for (int i = 0; i < supportVectorNum; i++) +//{ +// alphaMat.at(0, i) = pAlphaData[i]; +//} + +////计算-(alphaMat * supportVectorMat),结果放到resultMat中 +////gemm(alphaMat, supportVectorMat, -1, 0, 1, resultMat);//不知道为什么加负号? +//resultMat = -1 * alphaMat * supportVectorMat; + +////得到最终的setSVMDetector(const vector& detector)参数中可用的检测子 +//vector myDetector; +////将resultMat中的数据复制到数组myDetector中 +//for (int i = 0; i < DescriptorDim; i++) +//{ +// myDetector.push_back(resultMat.at(0, i)); +//} +////最后添加偏移量rho,得到检测子 +//myDetector.push_back(svm.get_rho()); +//cout << "检测子维数:" << myDetector.size() << endl; +////设置HOGDescriptor的检测子 +//hog.setSVMDetector(myDetector); +////myHOG.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector()); + +////保存检测子参数到文件 +//ofstream fout("D:\\detectProject\\HOGDetectorForOpenCV.txt"); +//for (int i = 0; i < myDetector.size(); i++) +//{ +// fout << myDetector[i] << endl; +//} + +/******************读入单个64*128的测试图并对其HOG描述子进行分类*********************/ +////读取测试图片(64*128大小),并计算其HOG描述子 +////Mat testImg = imread("person014142.jpg"); +//Mat testImg = imread("noperson000026.jpg"); +//vector descriptor; +//hog.compute(testImg,descriptor,Size(8,8));//计算HOG描述子,检测窗口移动步长(8,8) +//Mat testFeatureMat = Mat::zeros(1,3780,CV_32FC1);//测试样本的特征向量矩阵 +////将计算好的HOG描述子复制到testFeatureMat矩阵中 +//for(int i=0; i(0,i) = descriptor[i]; + +////用训练好的SVM分类器对测试图片的特征向量进行分类 +//int result = svm.predict(testFeatureMat);//返回类标 +//cout<<"分类结果:"<