relaxrate new contour synthesis
added a rficv folder where we can change ofxcv as needed
This commit is contained in:
parent
25a423f4ea
commit
3b9bdde619
@ -36,4 +36,9 @@
|
||||
<acquiretime_colormulti> 40 </acquiretime_colormulti>
|
||||
<acquiretime_diffnoise> 30 </acquiretime_diffnoise>
|
||||
</analysis_time>
|
||||
|
||||
<relaxrate>
|
||||
<treshold>51</treshold>
|
||||
</relaxrate>
|
||||
|
||||
</config>
|
||||
@ -12,6 +12,7 @@ using Poco::Thread;
|
||||
|
||||
#define NUMBER_RUNS 1
|
||||
#define ACQUIRE_TIME 20
|
||||
#define TRESHOLD 80
|
||||
|
||||
void RelaxRateAnalysis::setup(int camWidth, int camHeight)
|
||||
{
|
||||
@ -23,6 +24,9 @@ void RelaxRateAnalysis::setup(int camWidth, int camHeight)
|
||||
acq_run_time = RefractiveIndex::XML.getValue("config:analysis_time:acquiretime_relaxrate", ACQUIRE_TIME);
|
||||
cout << "ACQUIRE_TIME RelaxRateAnalysis " << acq_run_time << endl;
|
||||
|
||||
_treshold = RefractiveIndex::XML.getValue("config:relaxrate:treshold", TRESHOLD);
|
||||
cout << "TRESHOLD RelaxRateAnalysis " << _treshold << endl;
|
||||
|
||||
//int acq_run_time = 20; // 20 seconds of acquiring per run
|
||||
|
||||
DELTA_T_SAVE = 2*(10*acq_run_time/2); // for 20 seconds, we want this to be around 200 files
|
||||
@ -124,63 +128,26 @@ void RelaxRateAnalysis::synthesise()
|
||||
//cout << "IResponseAnalysis::saving synthesis...\n";
|
||||
if(_state == STATE_STOP) return;
|
||||
|
||||
for(float i=1;i<_saved_filenames_analysis.size()-1;i++){
|
||||
|
||||
//cout << "IResponseAnalysis::synthesis FOR LOOP...\n";
|
||||
|
||||
//cout << "_saved_filenames_analysis[i]" << _saved_filenames_analysis[i] << endl;
|
||||
|
||||
cvContourFinderVect.clear();
|
||||
|
||||
for(float i=1;i<_saved_filenames_analysis.size();i++){
|
||||
|
||||
if(_state == STATE_STOP) return;
|
||||
|
||||
if(!image1.loadImage(_saved_filenames_analysis[i])){
|
||||
//couldn't load image
|
||||
cout << "didn't load image" << endl;
|
||||
}
|
||||
|
||||
|
||||
if(image1.loadImage(_saved_filenames_analysis[i])){
|
||||
//cout << "LOADED image1!!!" << endl;
|
||||
if(image5.loadImage(_saved_filenames_analysis[i+1])){
|
||||
|
||||
///////////////////////// PROCESS THE SAVED CAMERA IMAGES OF SHIT TO THE IMAGES //////////////////////////
|
||||
|
||||
cvColorImage1.setFromPixels(image1.getPixels(), image1.width, image1.height);
|
||||
cvColorImage2.setFromPixels(image5.getPixels(), image5.width, image5.height);
|
||||
|
||||
cvGrayImage1 = cvColorImage1;
|
||||
cvGrayImage2 = cvColorImage2;
|
||||
|
||||
cvGrayDiff1.absDiff(cvGrayImage2, cvGrayImage1);
|
||||
cvGrayDiff1.threshold(80);
|
||||
|
||||
cvContourFinder1.findContours(cvGrayDiff1, 20, (image1.width * image1.height) / 4, 25, true);
|
||||
|
||||
|
||||
/////////////////////////////////// SAVE TO DISK IN THE SYNTHESIS FOLDER ////////////////////////////////
|
||||
string file_name;
|
||||
|
||||
file_name = ofToString(_synth_save_cnt, 2)+"_RelaxRateAnalysis_"+ofToString(_run_cnt,2)+".jpg";
|
||||
|
||||
|
||||
//<---- THE OLD WAY OF SAVING - works on OSX but generates BLACK FRAMES on WINDOWS ---->
|
||||
// ofSaveImage(cvGrayImage1.getPixelsRef(),_whole_file_path_synthesis+"/"+file_name, OF_IMAGE_QUALITY_BEST);
|
||||
|
||||
|
||||
//<---- NEW SAVING - seems to fix WINDOWS saving out BLACK FRAMES PROBLEM ---->
|
||||
//ofImage image;
|
||||
//image.allocate(cvGrayDiff1.width, cvGrayDiff1.height, OF_IMAGE_GRAYSCALE);
|
||||
|
||||
//*** This needs to be here for OSX of we get a BAD ACCESS ERROR. DOES IT BREAK WINDOWS? ***//
|
||||
//image.setUseTexture(false);
|
||||
|
||||
//image.setFromPixels(cvGrayDiff1.getPixels(), cvGrayDiff1.width, cvGrayDiff1.height, OF_IMAGE_GRAYSCALE);
|
||||
//image.saveImage(_whole_file_path_synthesis+"/"+file_name);
|
||||
|
||||
//_saved_filenames_synthesis.push_back(_whole_file_path_synthesis+"/"+file_name);
|
||||
|
||||
// <--- REALLY NEW SAVING METHOD --- 26 feb 2012 --- consolidated the save function into Abstract Analysis> ///
|
||||
saveImageSynthesis(file_name, &cvGrayDiff1, OF_IMAGE_GRAYSCALE);
|
||||
_synth_save_cnt++;
|
||||
}
|
||||
///////////////////////// PROCESS THE SAVED CAMERA IMAGES OF SHIT TO THE IMAGES //////////////////////////
|
||||
|
||||
cvColorImage1.setFromPixels(image1.getPixels(), image1.width, image1.height);
|
||||
cvGrayDiff1 = cvColorImage1;
|
||||
cvGrayDiff1.threshold(_treshold);
|
||||
|
||||
rfiCvContourFinder* cf = new rfiCvContourFinder();
|
||||
|
||||
cf->findContours(cvGrayDiff1, 20, (image1.width * image1.height) / 4, 25, true);
|
||||
|
||||
cvContourFinderVect.push_back(cf);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -192,31 +159,27 @@ void RelaxRateAnalysis::synthesise()
|
||||
|
||||
void RelaxRateAnalysis::displayresults()
|
||||
{
|
||||
|
||||
for(float i=1;i<_saved_filenames_synthesis.size();i++){
|
||||
|
||||
if(_state == STATE_STOP) return;
|
||||
cvContourFinderVectDisplay.clear();
|
||||
|
||||
for(int i=1;i<cvContourFinderVect.size();i++){
|
||||
|
||||
if(_state == STATE_STOP) return;
|
||||
|
||||
|
||||
//cout << "_saved_filenames_analysis[i] - " << _saved_filenames_synthesis[i] << endl;
|
||||
|
||||
while(!_image_shown){
|
||||
Thread::sleep(2);
|
||||
if(_state == STATE_STOP) return;
|
||||
//cout << "!_image_shown" << endl;
|
||||
}
|
||||
|
||||
if(!image3.loadImage(_saved_filenames_synthesis[i])){
|
||||
//couldn't load image
|
||||
// cout << "didn't load image" << endl;
|
||||
}
|
||||
cvContourFinderVectDisplay.push_back(cvContourFinderVect[i]);
|
||||
_show_image = true;
|
||||
_image_shown = false;
|
||||
|
||||
if(image3.loadImage(_saved_filenames_synthesis[i])){
|
||||
image3.loadImage(_saved_filenames_synthesis[i]);
|
||||
//cout << "_show_image = true;" << endl;
|
||||
_show_image = true;
|
||||
_image_shown = false;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -228,9 +191,6 @@ void RelaxRateAnalysis::draw()
|
||||
switch (_state) {
|
||||
case STATE_ACQUIRING:
|
||||
{
|
||||
/// *** TODO *** ///
|
||||
// still need to deal with latency frames here - i.e.: there are frames
|
||||
/// *** TODO *** ///
|
||||
|
||||
if (_frame_cnt < _frame_cnt_max)
|
||||
{
|
||||
@ -361,15 +321,10 @@ void RelaxRateAnalysis::draw()
|
||||
|
||||
if (_show_image)
|
||||
{
|
||||
//cout << "_show_image...\n" << endl;
|
||||
|
||||
ofEnableAlphaBlending();
|
||||
|
||||
ofSetColor(255, 255, 255);
|
||||
image2.setFromPixels(image3.getPixels(),image3.width,image3.height, OF_IMAGE_GRAYSCALE);
|
||||
image2.draw(0,0, ofGetWidth(), ofGetHeight());
|
||||
|
||||
ofDisableAlphaBlending();
|
||||
for(int i=0;i<cvContourFinderVectDisplay.size();i++){
|
||||
cvContourFinderVectDisplay[i]->draw(0,0, ofGetWidth(), ofGetHeight());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// display results of the synthesis
|
||||
|
||||
@ -5,8 +5,12 @@
|
||||
|
||||
#include "Poco/Timer.h"
|
||||
|
||||
#include "rfiCvContourFinder.h"
|
||||
|
||||
#include "ofxOpenCv.h"
|
||||
|
||||
|
||||
|
||||
class RelaxRateAnalysis : public AbstractAnalysis
|
||||
{
|
||||
public:
|
||||
@ -31,6 +35,8 @@ protected:
|
||||
int _run_cnt, _save_cnt, _synth_save_cnt, _anim_cnt;
|
||||
float c, _frame_cnt, _frame_cnt_max, _anim_cnt_max;
|
||||
|
||||
int _treshold;
|
||||
|
||||
bool _show_image, _image_shown;
|
||||
ofImage image1;
|
||||
ofImage image2;
|
||||
@ -54,6 +60,7 @@ protected:
|
||||
ofxCvGrayscaleImage cvGrayImage3;
|
||||
ofxCvGrayscaleImage cvGrayImage4;
|
||||
|
||||
ofxCvContourFinder cvContourFinder1;
|
||||
vector<rfiCvContourFinder*> cvContourFinderVect;
|
||||
vector<rfiCvContourFinder*> cvContourFinderVectDisplay;
|
||||
|
||||
};
|
||||
|
||||
211
src/rficv/rfiCvContourFinder.cpp
Normal file
211
src/rficv/rfiCvContourFinder.cpp
Normal file
@ -0,0 +1,211 @@
|
||||
|
||||
#include "rfiCvContourFinder.h"
|
||||
|
||||
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
static bool sort_carea_compare( const CvSeq* a, const CvSeq* b) {
|
||||
// use opencv to calc size, then sort based on size
|
||||
float areaa = fabs(cvContourArea(a, CV_WHOLE_SEQ));
|
||||
float areab = fabs(cvContourArea(b, CV_WHOLE_SEQ));
|
||||
|
||||
//return 0;
|
||||
return (areaa > areab);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
rfiCvContourFinder::rfiCvContourFinder() {
|
||||
_width = 0;
|
||||
_height = 0;
|
||||
myMoments = (CvMoments*)malloc( sizeof(CvMoments) );
|
||||
reset();
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
rfiCvContourFinder::~rfiCvContourFinder() {
|
||||
free( myMoments );
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
void rfiCvContourFinder::reset() {
|
||||
cvSeqBlobs.clear();
|
||||
blobs.clear();
|
||||
nBlobs = 0;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
int rfiCvContourFinder::findContours( ofxCvGrayscaleImage& input,
|
||||
int minArea,
|
||||
int maxArea,
|
||||
int nConsidered,
|
||||
bool bFindHoles,
|
||||
bool bUseApproximation) {
|
||||
|
||||
// get width/height disregarding ROI
|
||||
IplImage* ipltemp = input.getCvImage();
|
||||
_width = ipltemp->width;
|
||||
_height = ipltemp->height;
|
||||
|
||||
reset();
|
||||
|
||||
// opencv will clober the image it detects contours on, so we want to
|
||||
// copy it into a copy before we detect contours. That copy is allocated
|
||||
// if necessary (necessary = (a) not allocated or (b) wrong size)
|
||||
// so be careful if you pass in different sized images to "findContours"
|
||||
// there is a performance penalty, but we think there is not a memory leak
|
||||
// to worry about better to create mutiple contour finders for different
|
||||
// sizes, ie, if you are finding contours in a 640x480 image but also a
|
||||
// 320x240 image better to make two rfiCvContourFinder objects then to use
|
||||
// one, because you will get penalized less.
|
||||
|
||||
if( inputCopy.getWidth() == 0 ) {
|
||||
inputCopy.setUseTexture(false);
|
||||
inputCopy.allocate( _width, _height );
|
||||
} else if( inputCopy.getWidth() != _width || inputCopy.getHeight() != _height ) {
|
||||
// reallocate to new size
|
||||
inputCopy.clear();
|
||||
inputCopy.setUseTexture(false);
|
||||
inputCopy.allocate( _width, _height );
|
||||
}
|
||||
|
||||
inputCopy.setROI( input.getROI() );
|
||||
inputCopy = input;
|
||||
|
||||
CvSeq* contour_list = NULL;
|
||||
contour_storage = cvCreateMemStorage( 1000 );
|
||||
storage = cvCreateMemStorage( 1000 );
|
||||
|
||||
CvContourRetrievalMode retrieve_mode
|
||||
= (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL;
|
||||
cvFindContours( inputCopy.getCvImage(), contour_storage, &contour_list,
|
||||
sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE );
|
||||
CvSeq* contour_ptr = contour_list;
|
||||
|
||||
// put the contours from the linked list, into an array for sorting
|
||||
while( (contour_ptr != NULL) ) {
|
||||
float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) );
|
||||
if( (area > minArea) && (area < maxArea) ) {
|
||||
cvSeqBlobs.push_back(contour_ptr);
|
||||
}
|
||||
contour_ptr = contour_ptr->h_next;
|
||||
}
|
||||
|
||||
|
||||
// sort the pointers based on size
|
||||
if( cvSeqBlobs.size() > 1 ) {
|
||||
sort( cvSeqBlobs.begin(), cvSeqBlobs.end(), sort_carea_compare );
|
||||
}
|
||||
|
||||
|
||||
// now, we have cvSeqBlobs.size() contours, sorted by size in the array
|
||||
// cvSeqBlobs let's get the data out and into our structures that we like
|
||||
for( int i = 0; i < MIN(nConsidered, (int)cvSeqBlobs.size()); i++ ) {
|
||||
blobs.push_back( ofxCvBlob() );
|
||||
float area = cvContourArea( cvSeqBlobs[i], CV_WHOLE_SEQ );
|
||||
CvRect rect = cvBoundingRect( cvSeqBlobs[i], 0 );
|
||||
cvMoments( cvSeqBlobs[i], myMoments );
|
||||
|
||||
blobs[i].area = fabs(area);
|
||||
blobs[i].hole = area < 0 ? true : false;
|
||||
blobs[i].length = cvArcLength(cvSeqBlobs[i]);
|
||||
blobs[i].boundingRect.x = rect.x;
|
||||
blobs[i].boundingRect.y = rect.y;
|
||||
blobs[i].boundingRect.width = rect.width;
|
||||
blobs[i].boundingRect.height = rect.height;
|
||||
blobs[i].centroid.x = (myMoments->m10 / myMoments->m00);
|
||||
blobs[i].centroid.y = (myMoments->m01 / myMoments->m00);
|
||||
|
||||
// get the points for the blob:
|
||||
CvPoint pt;
|
||||
CvSeqReader reader;
|
||||
cvStartReadSeq( cvSeqBlobs[i], &reader, 0 );
|
||||
|
||||
for( int j=0; j < cvSeqBlobs[i]->total; j++ ) {
|
||||
CV_READ_SEQ_ELEM( pt, reader );
|
||||
blobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) );
|
||||
}
|
||||
blobs[i].nPts = blobs[i].pts.size();
|
||||
|
||||
}
|
||||
|
||||
nBlobs = blobs.size();
|
||||
|
||||
// Free the storage memory.
|
||||
// Warning: do this inside this function otherwise a strange memory leak
|
||||
if( contour_storage != NULL ) { cvReleaseMemStorage(&contour_storage); }
|
||||
if( storage != NULL ) { cvReleaseMemStorage(&storage); }
|
||||
|
||||
return nBlobs;
|
||||
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
void rfiCvContourFinder::draw( float x, float y, float w, float h ) {
|
||||
|
||||
float scalex = 0.0f;
|
||||
float scaley = 0.0f;
|
||||
if( _width != 0 ) { scalex = w/_width; } else { scalex = 1.0f; }
|
||||
if( _height != 0 ) { scaley = h/_height; } else { scaley = 1.0f; }
|
||||
|
||||
if(bAnchorIsPct){
|
||||
x -= anchor.x * w;
|
||||
y -= anchor.y * h;
|
||||
}else{
|
||||
x -= anchor.x;
|
||||
y -= anchor.y;
|
||||
}
|
||||
|
||||
ofPushStyle();
|
||||
glPushMatrix();
|
||||
glTranslatef( x, y, 0.0 );
|
||||
glScalef( scalex, scaley, 0.0 );
|
||||
ofSetHexColor(0xFFFFFF);
|
||||
|
||||
for( int i=0; i<(int)blobs.size(); i++ ) {
|
||||
ofNoFill();
|
||||
ofBeginShape();
|
||||
for( int j=0; j<blobs[i].nPts; j++ ) {
|
||||
ofVertex( blobs[i].pts[j].x, blobs[i].pts[j].y );
|
||||
}
|
||||
ofEndShape();
|
||||
|
||||
}
|
||||
glPopMatrix();
|
||||
ofPopStyle();
|
||||
}
|
||||
|
||||
|
||||
//----------------------------------------------------------
|
||||
void rfiCvContourFinder::draw(const ofPoint & point){
|
||||
draw(point.x, point.y);
|
||||
}
|
||||
|
||||
//----------------------------------------------------------
|
||||
void rfiCvContourFinder::draw(const ofRectangle & rect){
|
||||
draw(rect.x, rect.y, rect.width, rect.height);
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
void rfiCvContourFinder::setAnchorPercent( float xPct, float yPct ){
|
||||
anchor.x = xPct;
|
||||
anchor.y = yPct;
|
||||
bAnchorIsPct = true;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
void rfiCvContourFinder::setAnchorPoint( int x, int y ){
|
||||
anchor.x = x;
|
||||
anchor.y = y;
|
||||
bAnchorIsPct = false;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
void rfiCvContourFinder::resetAnchor(){
|
||||
anchor.set(0,0);
|
||||
bAnchorIsPct = false;
|
||||
}
|
||||
|
||||
|
||||
73
src/rficv/rfiCvContourFinder.h
Normal file
73
src/rficv/rfiCvContourFinder.h
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
* rfiCvContourFinder.h
|
||||
*
|
||||
* Finds white blobs in binary images and identifies
|
||||
* centroid, bounding box, area, length and polygonal contour
|
||||
* The result is placed in a vector of ofxCvBlob objects.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef RFI_CV_CONTOUR_FINDER
|
||||
#define RFI_CV_CONTOUR_FINDER
|
||||
|
||||
|
||||
|
||||
#include "ofxCvConstants.h"
|
||||
#include "ofxCvBlob.h"
|
||||
#include "ofxCvGrayscaleImage.h"
|
||||
#include <algorithm>
|
||||
|
||||
class rfiCvContourFinder : public ofBaseDraws {
|
||||
|
||||
public:
|
||||
|
||||
vector<ofxCvBlob> blobs;
|
||||
int nBlobs; // DEPRECATED: use blobs.size() instead
|
||||
|
||||
|
||||
rfiCvContourFinder();
|
||||
virtual ~rfiCvContourFinder();
|
||||
|
||||
virtual float getWidth() { return _width; }; //set after first findContours call
|
||||
virtual float getHeight() { return _height; }; //set after first findContours call
|
||||
|
||||
virtual int findContours( ofxCvGrayscaleImage& input,
|
||||
int minArea, int maxArea,
|
||||
int nConsidered, bool bFindHoles,
|
||||
bool bUseApproximation = true);
|
||||
// approximation = don't do points for all points
|
||||
// of the contour, if the contour runs
|
||||
// along a straight line, for example...
|
||||
|
||||
virtual void draw() { draw(0,0, _width, _height); };
|
||||
virtual void draw( float x, float y ) { draw(x,y, _width, _height); };
|
||||
virtual void draw( float x, float y, float w, float h );
|
||||
virtual void draw(const ofPoint & point);
|
||||
virtual void draw(const ofRectangle & rect);
|
||||
virtual void setAnchorPercent(float xPct, float yPct);
|
||||
virtual void setAnchorPoint(int x, int y);
|
||||
virtual void resetAnchor();
|
||||
//virtual ofxCvBlob getBlob(int num);
|
||||
|
||||
|
||||
|
||||
protected:
|
||||
|
||||
int _width;
|
||||
int _height;
|
||||
ofxCvGrayscaleImage inputCopy;
|
||||
CvMemStorage* contour_storage;
|
||||
CvMemStorage* storage;
|
||||
CvMoments* myMoments;
|
||||
vector<CvSeq*> cvSeqBlobs; //these will become blobs
|
||||
|
||||
ofPoint anchor;
|
||||
bool bAnchorIsPct;
|
||||
|
||||
virtual void reset();
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
Loading…
x
Reference in New Issue
Block a user