opencv 背景差分法 改進OTSU閾值去噪
/*
*1)頭文件cvaux.h的庫文件需要鏈接到linker. cvaux210d.lib, cvaux210.lib分別是debug和release版
本。
* 否則有些函數會出現error:: LINK 2101
*2)cvAdaptiveThreshold, cvThreshold的src和dst圖像必須是同類型的單通道圖像
*3)重要: 函數退出之后,函數中的動態變量會隨著棧的退出全部清空。
* 要保存上次操作的結果,則在函數內聲明為靜態變量。或者在要調用的函數里先聲明
*4)cvAdaptiveThreshold()的處理結果不好,此函數試圖找出所有的物體輪廓。用在背景差分中,會找到不想要的物體
*5)當沒有前景物體時,OTSU算法會把路面顯示出來。因為閾值是自動調整的。解決辦法,做一個閾值篩選
*6)VedioControl() 未實現。
* 將cvWaitKey()封裝到VedioControl()中, 如果不觸發按鍵,VedioControl()不會退出。造成無法自動播放
*
*Date: 2012/4/6
*Author: Rocky Chen
*/
#include 《stdio.h》
#include “stdafx.h”
#include 《cv.h》
#include 《cxcore.h》
#include 《highgui.h》
#include 《iostream》
#include “cvaux.h”
#include “cxmisc.h”
using namespace std;
void BackgroundDiff(IplImage* SrcImg, IplImage* FroundImg, IplImage* BackgroundImg, int
nFrmNum, int threshold_method);
void cvOtsu(IplImage *src, int *thresholdValue);
void PrintVedioInfo(CvCapture* pCapture, IplImage* img);
void VedioControl(); //未實現
//視頻控制全局變量,
// ‘s’ 畫面stop
// ‘q’ 退出播放
// ‘p’ 打印OTSU算法中找到的閾值
char ctrl = NULL;
int main( int argc, char** argv )
{
//聲明IplImage指針
IplImage* pFrame = NULL;
IplImage* pFroundImg = NULL;
IplImage* pBackgroundImg = NULL;
IplImage* pFroundImg_c = NULL;
IplImage* pBackgroundImg_c = NULL;
//大門背景建模良好 best
// CvCapture* pCapture = cvCreateFileCapture(“D:\\C++ Projects\\OpenCV_project\\test_video
\\gate_11ms_00-30s.avi”);
// CvCapture* pCapture = cvCreateFileCapture(“D:\\C++ Projects\\OpenCV_project\\img_video\
\video.short.mjpg.avi”);
CvCapture* pCapture = cvCreateFileCapture(“D:\\C++ Projects\\OpenCV_project\\img_video\
\video.long.mjpg.avi”);
int nFrmNum = 0;
//創建窗口
cvNamedWindow(“video”, 1);
cvNamedWindow(“background”,1);
cvNamedWindow(“OTSU foreground”,1);
cvNamedWindow(“改進的OTSU foreground”,1);
//使窗口有序排列
cvMoveWindow(“video”, 30, 0);
cvMoveWindow(“background”, 360, 0);
cvMoveWindow(“OTSU foreground”, 690, 0);
cvMoveWindow(“改進的OTSU foreground”, 690, 320);
//逐幀讀取視頻
while(pFrame = cvQueryFrame( pCapture ))
{
nFrmNum++;
//視頻控制
if( (ctrl = cvWaitKey(1000/180)) ==‘s’ ) cvWaitKey();
else if(ctrl == ‘p’) cout 《《 “Current Frame = ” 《《 nFrmNum 《《 endl;
else if( ctrl ==‘q’ )
break;
if(nFrmNum ==1)
{
pBackgroundImg = cvCreateImage(cvGetSize(pFrame), 8,1);
pFroundImg = cvCreateImage(cvGetSize(pFrame), 8,1);
pBackgroundImg_c = cvCreateImage(cvGetSize(pFrame), 8,1); //對比算法的圖像
pFroundImg_c = cvCreateImage(cvGetSize(pFrame), 8,1);
}
BackgroundDiff(pFrame,pFroundImg,pBackgroundImg, nFrmNum, CV_THRESH_OTSU); //普通OTSU
BackgroundDiff(pFrame,pFroundImg_c,pBackgroundImg_c, nFrmNum, CV_THRESH_BINARY); //閾值篩選
后的OTSU
//打印視頻信息,畫面控制
PrintVedioInfo(pCapture, pFroundImg);
//顯示圖像
cvShowImage(“video”, pFrame);
cvShowImage(“background”, pBackgroundImg);
cvShowImage(“OTSU foreground”, pFroundImg);
cvShowImage(“改進的OTSU foreground”, pFroundImg_c);
} //while
//銷毀窗口
cvDestroyAllWindows();
//釋放圖像和矩陣
cvReleaseImage(&pFroundImg);
cvReleaseImage(&pBackgroundImg);
cvReleaseCapture(&pCapture);
return 0;
}
void VedioControl()
{
}
/*
*輸出文字到圖像
*/
void PrintVedioInfo(CvCapture* pCapture, IplImage* img)
{
assert( pCapture != NULL);
double frames = cvGetCaptureProperty(pCapture, CV_CAP_PROP_POS_FRAMES); //視頻當前幀數
double fps = cvGetCaptureProperty(pCapture,CV_CAP_PROP_FPS); //獲得視頻每秒幀數
char str[255];
sprintf(str,“%4.2f FPS %4.2f frames”,fps,frames); // 將浮點數轉化為字符串
CvPoint location = cvPoint(20,20); // 建立字符串打印的位置
CvScalar color = cvScalar(255,255,255);
CvFont font; //建立字體變量
cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 1.0,1.0); //字體設置
cvPutText(img, str, location, &font,color); //打印文本到圖像
}
/********
*背景差分函數,求前景目標
*重要: 函數退出之后,函數中的動態變量會隨著棧的退出全部清空。
*要保存上次操作的結果,則在函數內聲明為靜態變量。或者在要調用的函數里先聲明
*
********/
void BackgroundDiff(IplImage* SrcImg, IplImage* FroundImg, IplImage* BackgroundImg, int
nFrmNum, int threshold_method = CV_THRESH_OTSU)
{
static IplImage* SrcImg_gray = NULL;//源圖像的灰度圖像
static IplImage* SrcImg_grayf =NULL; //單通道浮點圖像用于背景建模
static IplImage* FroundImgf = NULL;
static IplImage* BackgroundImgf = NULL;
static IplImage* FroundImg_temp = NULL;
if(nFrmNum == 1)
{
SrcImg_gray = cvCreateImage(cvGetSize(SrcImg), 8,1);
FroundImg_temp = cvCreateImage(cvGetSize(SrcImg), 8,1);
BackgroundImgf = cvCreateImage(cvGetSize(SrcImg), 32,1); //浮點圖像
FroundImgf = cvCreateImage(cvGetSize(SrcImg), 32,1);
SrcImg_grayf = cvCreateImage(cvGetSize(SrcImg), 32,1);
//RGB圖像先轉化成8位單通道圖像,再轉化為浮點。
cvCvtColor(SrcImg, BackgroundImg, CV_BGR2GRAY);
cvCvtColor(SrcImg, FroundImg, CV_BGR2GRAY);
cvConvert(BackgroundImg,BackgroundImgf);
cvConvert(FroundImg,FroundImgf);
}
else
{
cvCvtColor(SrcImg, SrcImg_gray, CV_BGR2GRAY); //SrcImg_gray在上次函數退出的時候被程序棧回收
cvConvert(SrcImg_gray,SrcImg_grayf);
//當前幀跟背景圖相減
cvAbsDiff(SrcImg_grayf, BackgroundImgf, FroundImgf);
cvConvert(FroundImgf,FroundImg_temp); //浮點轉化為整點
//二值化前景圖
int threshold_otsu =0;
cvOtsu(FroundImg_temp, &threshold_otsu);
if(threshold_method == CV_THRESH_OTSU)
{
cvThreshold(FroundImg_temp, FroundImg, 0, 255.0, CV_THRESH_OTSU); //對比自適應閾值化
// cvAdaptiveThreshold(FroundImg_temp, FroundImg, 255.0, 0, 0, 51); //src和dst必須同時是
8bit或浮點圖像
}
else
{
cvThreshold(FroundImg_temp, FroundImg, threshold_otsu, 255.0, CV_THRESH_BINARY);
}
cvSegmentFGMask( FroundImg ); //對前景做連通域分割
//更新背景
cvRunningAvg(SrcImg_grayf, BackgroundImgf, 0.003, 0); //必須是浮點圖像,因為會有小數出現
cvConvert(BackgroundImgf,BackgroundImg);
}
}
/********
*OTSU大津法
* thresholdValue 為使類間方差最大的閾值
* 當找到的閾值小于一個修正閾值,返回此修正閾值。防止沒有前景物體時,將背景找出來
********/
void cvOtsu(IplImage *src, int *thresholdValue)
{
int deltaT = 0; //光照調節參數
uchar grayflag =1;
IplImage* gray = NULL;
if(src-》nChannels != 1) //檢查源圖像是否為灰度圖像
{
gray = cvCreateImage(cvGetSize(src), 8, 1);
cvCvtColor(src, gray, CV_BGR2GRAY);
grayflag = 0;
}
else gray = src;
uchar* ImgData=(uchar*)(gray-》imageData);
int thresholdValue_temp = 1;
int ihist[256]; //圖像直方圖,256個點
int i, imgsize; //循環變量,圖像尺寸
int n, n1, n2; //n 非零像素個數, n1 前景像素個數, n2 背景像素個數
double m1, m2, sum, csum, fmax, sb;//m1前景灰度均值,m2背景灰度均值
//對直方圖置零
memset(ihist, 0, sizeof(ihist));
//生成直方圖
imgsize = (gray-》widthStep)*(gray-》height);//圖像數據總數
for (i=0; i《imgsize;i++)
{
ihist[((int)(*ImgData))&255]++;//灰度統計 ‘&255’防止指針溢出
ImgData++;//像素遍歷
}
// set up everything
sum=csum=0.0;
n=0;
for (i=0; i《255; i++)
{
sum+=(double)i*(double)ihist[i]; // x*f(x)質量矩
n+= ihist[i]; //f(x)質量 像素總數
}
deltaT = (int)(sum/imgsize); //像素平均灰度
deltaT = deltaT》》1; //與之矯正,delatT = v*n; v=0.5
if (!n)
{//圖像全黑,輸出警告
fprintf (stderr, “NOT NORMAL thresholdValue=160\n”);
}
// OTSU算法
fmax=-1.0;
n1=0;
for (i=0; i《255; i++)
{
n1+= ihist[i];
if (n1==0) {continue;}
n2=n-n1;
if (n2==0) {break;}
csum += (double)i *ihist[i];
m1=csum/n1;
m2=(sum-csum)/n2;
sb=(double)n1*(double)n2*(m1-m2)*(m1-m2); //計算類間方差, 公式已簡化
if (sb》fmax)
{
fmax=sb;
thresholdValue_temp=i; //找到使類間方差最大的灰度值i
}
}
if(thresholdValue_temp 《 20)
*thresholdValue = 20; //閾值篩選
else *thresholdValue = thresholdValue_temp;
if( ctrl == ‘p’) //ctrl = cvWaitKey(100),且是全局變量
{
cout 《《 “OTSU thresholdValue = ” 《《 thresholdValue_temp《《
“, Returned thresholdValue = ” 《《 *thresholdValue《《‘\n’《《endl;
}
if(!grayflag) cvReleaseImage(&gray);
}
/***********
*輪廓提取
************/
void Labeling(IplImage *src, IplImage *dst)
{
CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0); //開辟默認大小的空間
CvSeq* contour=0;
cvCopy(src,dst,0);
cvFindContours( dst, storage, &contour, sizeof(CvContour),
CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE ); //外邊緣
int num=0;
for( ;contour!=0; contour=contour-》h_next)
{
CvRect rect;
rect = cvBoundingRect(contour,0);//得到目標外接矩形
num++;
if((rect.height + rect.width) 》= 16)
cvRectangle(src,cvPoint(rect.x,rect.y),cvPoint(rect.x+rect.width,rect.y
+rect.height),
CV_RGB(255, 255,255),1,8);//繪制目標外接矩形
// cvRectangle(dst,cvPoint(rect.x,rect.y),cvPoint(rect.x+rect.width,rect.y+rect.height),
// CV_RGB(255, 255,255),1,8);//繪制目標外接矩形
}
}
========
評論