mirror of
https://github.com/zebrajr/opencv.git
synced 2026-01-15 12:15:17 +00:00
Add Java and Python code for cascade classifier and HDR tutorials.
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
#include "opencv2/highgui.hpp"
|
||||
#include "opencv2/imgproc.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
@@ -11,48 +11,63 @@ using namespace cv;
|
||||
void detectAndDisplay( Mat frame );
|
||||
|
||||
/** Global variables */
|
||||
String face_cascade_name, eyes_cascade_name;
|
||||
CascadeClassifier face_cascade;
|
||||
CascadeClassifier eyes_cascade;
|
||||
String window_name = "Capture - Face detection";
|
||||
|
||||
/** @function main */
|
||||
int main( int argc, const char** argv )
|
||||
{
|
||||
CommandLineParser parser(argc, argv,
|
||||
"{help h||}"
|
||||
"{face_cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}"
|
||||
"{eyes_cascade|../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}");
|
||||
"{help h||}"
|
||||
"{face_cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|Path to face cascade.}"
|
||||
"{eyes_cascade|../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|Path to eyes cascade.}"
|
||||
"{camera|0|Camera device number.}");
|
||||
|
||||
parser.about( "\nThis program demonstrates using the cv::CascadeClassifier class to detect objects (Face + eyes) in a video stream.\n"
|
||||
"You can use Haar or LBP features.\n\n" );
|
||||
parser.printMessage();
|
||||
|
||||
face_cascade_name = parser.get<String>("face_cascade");
|
||||
eyes_cascade_name = parser.get<String>("eyes_cascade");
|
||||
VideoCapture capture;
|
||||
Mat frame;
|
||||
String face_cascade_name = parser.get<String>("face_cascade");
|
||||
String eyes_cascade_name = parser.get<String>("eyes_cascade");
|
||||
|
||||
//-- 1. Load the cascades
|
||||
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
|
||||
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading eyes cascade\n"); return -1; };
|
||||
if( !face_cascade.load( face_cascade_name ) )
|
||||
{
|
||||
cout << "--(!)Error loading face cascade\n";
|
||||
return -1;
|
||||
};
|
||||
if( !eyes_cascade.load( eyes_cascade_name ) )
|
||||
{
|
||||
cout << "--(!)Error loading eyes cascade\n";
|
||||
return -1;
|
||||
};
|
||||
|
||||
int camera_device = parser.get<int>("camera");
|
||||
VideoCapture capture;
|
||||
//-- 2. Read the video stream
|
||||
capture.open( 0 );
|
||||
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
|
||||
capture.open( camera_device );
|
||||
if ( ! capture.isOpened() )
|
||||
{
|
||||
cout << "--(!)Error opening video capture\n";
|
||||
return -1;
|
||||
}
|
||||
|
||||
Mat frame;
|
||||
while ( capture.read(frame) )
|
||||
{
|
||||
if( frame.empty() )
|
||||
{
|
||||
printf(" --(!) No captured frame -- Break!");
|
||||
cout << "--(!) No captured frame -- Break!\n";
|
||||
break;
|
||||
}
|
||||
|
||||
//-- 3. Apply the classifier to the frame
|
||||
detectAndDisplay( frame );
|
||||
|
||||
if( waitKey(10) == 27 ) { break; } // escape
|
||||
if( waitKey(10) == 27 )
|
||||
{
|
||||
break; // escape
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -60,33 +75,33 @@ int main( int argc, const char** argv )
|
||||
/** @function detectAndDisplay */
|
||||
void detectAndDisplay( Mat frame )
|
||||
{
|
||||
std::vector<Rect> faces;
|
||||
Mat frame_gray;
|
||||
|
||||
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
|
||||
equalizeHist( frame_gray, frame_gray );
|
||||
|
||||
//-- Detect faces
|
||||
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(60, 60) );
|
||||
std::vector<Rect> faces;
|
||||
face_cascade.detectMultiScale( frame_gray, faces );
|
||||
|
||||
for ( size_t i = 0; i < faces.size(); i++ )
|
||||
{
|
||||
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
|
||||
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
|
||||
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4 );
|
||||
|
||||
Mat faceROI = frame_gray( faces[i] );
|
||||
std::vector<Rect> eyes;
|
||||
|
||||
//-- In each face, detect eyes
|
||||
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
|
||||
std::vector<Rect> eyes;
|
||||
eyes_cascade.detectMultiScale( faceROI, eyes );
|
||||
|
||||
for ( size_t j = 0; j < eyes.size(); j++ )
|
||||
{
|
||||
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
|
||||
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
|
||||
circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
|
||||
circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4 );
|
||||
}
|
||||
}
|
||||
|
||||
//-- Show what you got
|
||||
imshow( window_name, frame );
|
||||
imshow( "Capture - Face detection", frame );
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#include <opencv2/photo.hpp>
|
||||
#include "opencv2/photo.hpp"
|
||||
#include "opencv2/imgcodecs.hpp"
|
||||
#include <opencv2/highgui.hpp>
|
||||
#include "opencv2/highgui.hpp"
|
||||
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
@@ -10,38 +11,52 @@ using namespace std;
|
||||
|
||||
void loadExposureSeq(String, vector<Mat>&, vector<float>&);
|
||||
|
||||
int main(int, char**argv)
|
||||
int main(int argc, char**argv)
|
||||
{
|
||||
CommandLineParser parser( argc, argv, "{@input | | Input directory that contains images and exposure times. }" );
|
||||
|
||||
//! [Load images and exposure times]
|
||||
vector<Mat> images;
|
||||
vector<float> times;
|
||||
loadExposureSeq(argv[1], images, times);
|
||||
loadExposureSeq(parser.get<String>( "@input" ), images, times);
|
||||
//! [Load images and exposure times]
|
||||
|
||||
//! [Estimate camera response]
|
||||
Mat response;
|
||||
Ptr<CalibrateDebevec> calibrate = createCalibrateDebevec();
|
||||
calibrate->process(images, response, times);
|
||||
//! [Estimate camera response]
|
||||
|
||||
//! [Make HDR image]
|
||||
Mat hdr;
|
||||
Ptr<MergeDebevec> merge_debevec = createMergeDebevec();
|
||||
merge_debevec->process(images, hdr, times, response);
|
||||
//! [Make HDR image]
|
||||
|
||||
//! [Tonemap HDR image]
|
||||
Mat ldr;
|
||||
Ptr<TonemapDurand> tonemap = createTonemapDurand(2.2f);
|
||||
tonemap->process(hdr, ldr);
|
||||
//! [Tonemap HDR image]
|
||||
|
||||
//! [Perform exposure fusion]
|
||||
Mat fusion;
|
||||
Ptr<MergeMertens> merge_mertens = createMergeMertens();
|
||||
merge_mertens->process(images, fusion);
|
||||
//! [Perform exposure fusion]
|
||||
|
||||
//! [Write results]
|
||||
imwrite("fusion.png", fusion * 255);
|
||||
imwrite("ldr.png", ldr * 255);
|
||||
imwrite("hdr.hdr", hdr);
|
||||
//! [Write results]
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void loadExposureSeq(String path, vector<Mat>& images, vector<float>& times)
|
||||
{
|
||||
path = path + std::string("/");
|
||||
path = path + "/";
|
||||
ifstream list_file((path + "list.txt").c_str());
|
||||
string name;
|
||||
float val;
|
||||
|
||||
Reference in New Issue
Block a user