使用卡尔曼滤波进行多目标跟踪

4

我是OpenCV的新手。我一直希望学习新的图像处理技术/编程知识。我已经看了一些关于目标检测、跟踪、计数等方面的教程。我希望能够学习这些知识,并尝试制作自己类似的项目。

在互联网和论文中进行了大量搜索后,我最终了解到了用于目标跟踪的卡尔曼滤波器。根据以下代码,我使用了以下方法:

  1. 背景减法
  2. 平滑、模糊等滤波器。
  3. 查找轮廓
  4. 绘制矩形并查找质心。
  5. 应用卡尔曼滤波器

现在,我可以使用我的代码跟踪一个对象。我想要跟踪多个对象(例如在道路上奔跑的人、行驶的车辆等)。

如果有人能够指导我或提供示例代码供我尝试,我将不胜感激。

期待您的积极回复。 Shan

using namespace std;
using namespace cv;

#define drawCross( img, center, color, d )\
line(img, Point(center.x - d, center.y - d), Point(center.x + d, center.y + d), color, 2, CV_AA, 0);\
line(img, Point(center.x + d, center.y - d), Point(center.x - d, center.y + d), color, 2, CV_AA, 0 )\

vector<Point> mousev,kalmanv;


cv::KalmanFilter KF;
cv::Mat_<float> measurement(2,1); 
Mat_<float> state(4, 1); // (x, y, Vx, Vy)
int incr=0;


void initKalman(float x, float y)
{
    // Instantate Kalman Filter with
    // 4 dynamic parameters and 2 measurement parameters,
    // where my measurement is: 2D location of object,
    // and dynamic is: 2D location and 2D velocity.
    KF.init(4, 2, 0);

    measurement = Mat_<float>::zeros(2,1);
    measurement.at<float>(0, 0) = x;
    measurement.at<float>(0, 0) = y;


    KF.statePre.setTo(0);
    KF.statePre.at<float>(0, 0) = x;
    KF.statePre.at<float>(1, 0) = y;

    KF.statePost.setTo(0);
    KF.statePost.at<float>(0, 0) = x;
    KF.statePost.at<float>(1, 0) = y; 

    setIdentity(KF.transitionMatrix);
    setIdentity(KF.measurementMatrix);
    setIdentity(KF.processNoiseCov, Scalar::all(.005)); //adjust this for faster convergence - but higher noise
    setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1));
    setIdentity(KF.errorCovPost, Scalar::all(.1));
}

Point kalmanPredict() 
{
    Mat prediction = KF.predict();
    Point predictPt(prediction.at<float>(0),prediction.at<float>(1));

    KF.statePre.copyTo(KF.statePost);
    KF.errorCovPre.copyTo(KF.errorCovPost);

    return predictPt;
}

Point kalmanCorrect(float x, float y)
{
    measurement(0) = x;
    measurement(1) = y;
    Mat estimated = KF.correct(measurement);
    Point statePt(estimated.at<float>(0),estimated.at<float>(1));
    return statePt;
}

int main()
{
  Mat frame, thresh_frame;
  vector<Mat> channels;
  VideoCapture capture;
  vector<Vec4i> hierarchy;
  vector<vector<Point> > contours;

   // cv::Mat frame;
    cv::Mat back;
    cv::Mat fore;
    cv::BackgroundSubtractorMOG2 bg;

    bg.nmixtures = 3;
    bg.bShadowDetection = false;
    int incr=0;
    int track=0;

    capture.open("E:/demo1.avi");

  if(!capture.isOpened())
    cerr << "Problem opening video source" << endl;


  mousev.clear();
  kalmanv.clear();

initKalman(0, 0);

  while((char)waitKey(1) != 'q' && capture.grab())
    {

   Point s, p;

  capture.retrieve(frame);

        bg.operator ()(frame,fore);
        bg.getBackgroundImage(back);
        erode(fore,fore,Mat());
        erode(fore,fore,Mat());
        dilate(fore,fore,Mat());
        dilate(fore,fore,Mat());
        dilate(fore,fore,Mat());
        dilate(fore,fore,Mat());
        dilate(fore,fore,Mat());
        dilate(fore,fore,Mat());
        dilate(fore,fore,Mat());

        cv::normalize(fore, fore, 0, 1., cv::NORM_MINMAX);
        cv::threshold(fore, fore, .5, 1., CV_THRESH_BINARY);


      split(frame, channels);
      add(channels[0], channels[1], channels[1]);
      subtract(channels[2], channels[1], channels[2]);
      threshold(channels[2], thresh_frame, 50, 255, CV_THRESH_BINARY);
      medianBlur(thresh_frame, thresh_frame, 5);

//       imshow("Red", channels[1]);
      findContours(fore, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
      vector<vector<Point> > contours_poly( contours.size() );
      vector<Rect> boundRect( contours.size() );

      Mat drawing = Mat::zeros(thresh_frame.size(), CV_8UC1);
      for(size_t i = 0; i < contours.size(); i++)
        {
//          cout << contourArea(contours[i]) << endl;
          if(contourArea(contours[i]) > 500)
            drawContours(drawing, contours, i, Scalar::all(255), CV_FILLED, 8, vector<Vec4i>(), 0, Point());
        }
      thresh_frame = drawing;

      findContours(fore, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

      drawing = Mat::zeros(thresh_frame.size(), CV_8UC1);
      for(size_t i = 0; i < contours.size(); i++)
        {
//          cout << contourArea(contours[i]) << endl;
          if(contourArea(contours[i]) > 3000)
            drawContours(drawing, contours, i, Scalar::all(255), CV_FILLED, 8, vector<Vec4i>(), 0, Point());
      }
      thresh_frame = drawing;

// Get the moments
      vector<Moments> mu(contours.size() );
      for( size_t i = 0; i < contours.size(); i++ )
      { 
          mu[i] = moments( contours[i], false ); }

//  Get the mass centers:
      vector<Point2f> mc( contours.size() );
      for( size_t i = 0; i < contours.size(); i++ ) 

      { 
          mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); 


     /*  
      for(size_t i = 0; i < mc.size(); i++)
        {

     //       drawCross(frame, mc[i], Scalar(255, 0, 0), 5);
          //measurement(0) = mc[i].x;
          //measurement(1) = mc[i].y;


//        line(frame, p, s, Scalar(255,255,0), 1);

//          if (measurement(1) <= 130 && measurement(1) >= 120) {
  //            incr++;          
    //         cout << "Conter " << incr << " Loation " << measurement(1) << endl;
      //   }
      }*/
      }


        for( size_t i = 0; i < contours.size(); i++ )
       { approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
         boundRect[i] = boundingRect( Mat(contours_poly[i]) );

     }

              p = kalmanPredict();
//        cout << "kalman prediction: " << p.x << " " << p.y << endl;
          mousev.push_back(p);

      for( size_t i = 0; i < contours.size(); i++ )
       {
           if(contourArea(contours[i]) > 1000){
         rectangle( frame, boundRect[i].tl(), boundRect[i].br(), Scalar(0, 255, 0), 2, 8, 0 );
        Point center = Point(boundRect[i].x + (boundRect[i].width /2), boundRect[i].y + (boundRect[i].height/2));
        cv::circle(frame,center, 8, Scalar(0, 0, 255), -1, 1,0);



         s = kalmanCorrect(center.x, center.y);
        drawCross(frame, s, Scalar(255, 255, 255), 5);

        if (s.y <= 130 && p.y > 130 && s.x > 15) {
            incr++;
             cout << "Counter " << incr << endl;
           }


             for (int i = mousev.size()-20; i < mousev.size()-1; i++) {
                 line(frame, mousev[i], mousev[i+1], Scalar(0,255,0), 1);
                 }

              }
       }


      imshow("Video", frame);
      imshow("Red", channels[2]);
      imshow("Binary", thresh_frame);
    }
  return 0;
}

你需要使用匈牙利算法进行数据关联。卡尔曼只是一种平滑技术。看看这个视频http://www.youtube.com/watch?v=lWXq12qCtkY。我记得测试过一个基于那个教程的俄罗斯人的C++代码。应该不难找到。它工作得非常好。祝好运。 - Zaw Lin
1个回答

1
如果您正在跟踪多个不相关的对象,您可以为每个目标复制您的恒定速度卡尔曼滤波器。这将是最有效的方法。
如果您认为您的目标在某种程度上相关(例如,测量误差具有共同组成部分,或者一个目标的行为会影响您对另一个目标行为的预测),那么您可以将它们全部组合在一起。您将采取现有的4x4矩阵(x,y,vx,vy)并以块对角线方式组合它们以形成更大的矩阵。较大的协方差矩阵现在将具有指示一个测量的不确定性如何与另一个测量相关的区域。假设您认为相机可能会晃动。然后您将期望所有对象可能具有相同的角度噪声。您可以在R矩阵中包含该想法,并使用非对角线项将所有测量绑定在一起。这种形式在计算上将更加昂贵(因为矩阵运算的非线性成本),并且在添加或删除目标时也会增加复杂性。

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接