From 3b9bdde619458c5d22d3dfd81786a7786778bb8a Mon Sep 17 00:00:00 2001 From: dviid Date: Mon, 27 Feb 2012 18:14:36 +0100 Subject: [PATCH] relaxrate new contour synthesis added a rficv folder where we can change ofxcv as needed --- config.refindx | 5 + src/RelaxRateAnalysis.cpp | 117 ++++++----------- src/RelaxRateAnalysis.h | 9 +- src/rficv/rfiCvContourFinder.cpp | 211 +++++++++++++++++++++++++++++++ src/rficv/rfiCvContourFinder.h | 73 +++++++++++ 5 files changed, 333 insertions(+), 82 deletions(-) create mode 100644 src/rficv/rfiCvContourFinder.cpp create mode 100644 src/rficv/rfiCvContourFinder.h diff --git a/config.refindx b/config.refindx index 9dd552e..392ceb3 100644 --- a/config.refindx +++ b/config.refindx @@ -36,4 +36,9 @@ 40 30 + + + 51 + + \ No newline at end of file diff --git a/src/RelaxRateAnalysis.cpp b/src/RelaxRateAnalysis.cpp index 2de1a55..f636b9e 100755 --- a/src/RelaxRateAnalysis.cpp +++ b/src/RelaxRateAnalysis.cpp @@ -12,6 +12,7 @@ using Poco::Thread; #define NUMBER_RUNS 1 #define ACQUIRE_TIME 20 +#define TRESHOLD 80 void RelaxRateAnalysis::setup(int camWidth, int camHeight) { @@ -23,6 +24,9 @@ void RelaxRateAnalysis::setup(int camWidth, int camHeight) acq_run_time = RefractiveIndex::XML.getValue("config:analysis_time:acquiretime_relaxrate", ACQUIRE_TIME); cout << "ACQUIRE_TIME RelaxRateAnalysis " << acq_run_time << endl; + _treshold = RefractiveIndex::XML.getValue("config:relaxrate:treshold", TRESHOLD); + cout << "TRESHOLD RelaxRateAnalysis " << _treshold << endl; + //int acq_run_time = 20; // 20 seconds of acquiring per run DELTA_T_SAVE = 2*(10*acq_run_time/2); // for 20 seconds, we want this to be around 200 files @@ -124,63 +128,26 @@ void RelaxRateAnalysis::synthesise() //cout << "IResponseAnalysis::saving synthesis...\n"; if(_state == STATE_STOP) return; - for(float i=1;i<_saved_filenames_analysis.size()-1;i++){ - - //cout << "IResponseAnalysis::synthesis FOR LOOP...\n"; - - //cout << "_saved_filenames_analysis[i]" << _saved_filenames_analysis[i] << endl; - + cvContourFinderVect.clear(); + + for(float i=1;i<_saved_filenames_analysis.size();i++){ + if(_state == STATE_STOP) return; - - if(!image1.loadImage(_saved_filenames_analysis[i])){ - //couldn't load image - cout << "didn't load image" << endl; - } - + if(image1.loadImage(_saved_filenames_analysis[i])){ - //cout << "LOADED image1!!!" << endl; - if(image5.loadImage(_saved_filenames_analysis[i+1])){ - ///////////////////////// PROCESS THE SAVED CAMERA IMAGES OF SHIT TO THE IMAGES ////////////////////////// - - cvColorImage1.setFromPixels(image1.getPixels(), image1.width, image1.height); - cvColorImage2.setFromPixels(image5.getPixels(), image5.width, image5.height); - - cvGrayImage1 = cvColorImage1; - cvGrayImage2 = cvColorImage2; - - cvGrayDiff1.absDiff(cvGrayImage2, cvGrayImage1); - cvGrayDiff1.threshold(80); - - cvContourFinder1.findContours(cvGrayDiff1, 20, (image1.width * image1.height) / 4, 25, true); - - - /////////////////////////////////// SAVE TO DISK IN THE SYNTHESIS FOLDER //////////////////////////////// - string file_name; - - file_name = ofToString(_synth_save_cnt, 2)+"_RelaxRateAnalysis_"+ofToString(_run_cnt,2)+".jpg"; - - - //<---- THE OLD WAY OF SAVING - works on OSX but generates BLACK FRAMES on WINDOWS ----> - // ofSaveImage(cvGrayImage1.getPixelsRef(),_whole_file_path_synthesis+"/"+file_name, OF_IMAGE_QUALITY_BEST); - - - //<---- NEW SAVING - seems to fix WINDOWS saving out BLACK FRAMES PROBLEM ----> - //ofImage image; - //image.allocate(cvGrayDiff1.width, cvGrayDiff1.height, OF_IMAGE_GRAYSCALE); - - //*** This needs to be here for OSX of we get a BAD ACCESS ERROR. DOES IT BREAK WINDOWS? ***// - //image.setUseTexture(false); - - //image.setFromPixels(cvGrayDiff1.getPixels(), cvGrayDiff1.width, cvGrayDiff1.height, OF_IMAGE_GRAYSCALE); - //image.saveImage(_whole_file_path_synthesis+"/"+file_name); - - //_saved_filenames_synthesis.push_back(_whole_file_path_synthesis+"/"+file_name); - - // <--- REALLY NEW SAVING METHOD --- 26 feb 2012 --- consolidated the save function into Abstract Analysis> /// - saveImageSynthesis(file_name, &cvGrayDiff1, OF_IMAGE_GRAYSCALE); - _synth_save_cnt++; - } + ///////////////////////// PROCESS THE SAVED CAMERA IMAGES OF SHIT TO THE IMAGES ////////////////////////// + + cvColorImage1.setFromPixels(image1.getPixels(), image1.width, image1.height); + cvGrayDiff1 = cvColorImage1; + cvGrayDiff1.threshold(_treshold); + + rfiCvContourFinder* cf = new rfiCvContourFinder(); + + cf->findContours(cvGrayDiff1, 20, (image1.width * image1.height) / 4, 25, true); + + cvContourFinderVect.push_back(cf); + } } @@ -192,31 +159,27 @@ void RelaxRateAnalysis::synthesise() void RelaxRateAnalysis::displayresults() { - - for(float i=1;i<_saved_filenames_synthesis.size();i++){ - if(_state == STATE_STOP) return; + cvContourFinderVectDisplay.clear(); + + for(int i=1;idraw(0,0, ofGetWidth(), ofGetHeight()); + } + } // display results of the synthesis diff --git a/src/RelaxRateAnalysis.h b/src/RelaxRateAnalysis.h index a929304..2b75cef 100755 --- a/src/RelaxRateAnalysis.h +++ b/src/RelaxRateAnalysis.h @@ -5,8 +5,12 @@ #include "Poco/Timer.h" +#include "rfiCvContourFinder.h" + #include "ofxOpenCv.h" + + class RelaxRateAnalysis : public AbstractAnalysis { public: @@ -31,6 +35,8 @@ protected: int _run_cnt, _save_cnt, _synth_save_cnt, _anim_cnt; float c, _frame_cnt, _frame_cnt_max, _anim_cnt_max; + int _treshold; + bool _show_image, _image_shown; ofImage image1; ofImage image2; @@ -54,6 +60,7 @@ protected: ofxCvGrayscaleImage cvGrayImage3; ofxCvGrayscaleImage cvGrayImage4; - ofxCvContourFinder cvContourFinder1; + vector cvContourFinderVect; + vector cvContourFinderVectDisplay; }; diff --git a/src/rficv/rfiCvContourFinder.cpp b/src/rficv/rfiCvContourFinder.cpp new file mode 100644 index 0000000..41ccd5c --- /dev/null +++ b/src/rficv/rfiCvContourFinder.cpp @@ -0,0 +1,211 @@ + +#include "rfiCvContourFinder.h" + + + +//-------------------------------------------------------------------------------- +static bool sort_carea_compare( const CvSeq* a, const CvSeq* b) { + // use opencv to calc size, then sort based on size + float areaa = fabs(cvContourArea(a, CV_WHOLE_SEQ)); + float areab = fabs(cvContourArea(b, CV_WHOLE_SEQ)); + + //return 0; + return (areaa > areab); +} + + + + +//-------------------------------------------------------------------------------- +rfiCvContourFinder::rfiCvContourFinder() { + _width = 0; + _height = 0; + myMoments = (CvMoments*)malloc( sizeof(CvMoments) ); + reset(); +} + +//-------------------------------------------------------------------------------- +rfiCvContourFinder::~rfiCvContourFinder() { + free( myMoments ); +} + +//-------------------------------------------------------------------------------- +void rfiCvContourFinder::reset() { + cvSeqBlobs.clear(); + blobs.clear(); + nBlobs = 0; +} + +//-------------------------------------------------------------------------------- +int rfiCvContourFinder::findContours( ofxCvGrayscaleImage& input, + int minArea, + int maxArea, + int nConsidered, + bool bFindHoles, + bool bUseApproximation) { + + // get width/height disregarding ROI + IplImage* ipltemp = input.getCvImage(); + _width = ipltemp->width; + _height = ipltemp->height; + + reset(); + + // opencv will clober the image it detects contours on, so we want to + // copy it into a copy before we detect contours. That copy is allocated + // if necessary (necessary = (a) not allocated or (b) wrong size) + // so be careful if you pass in different sized images to "findContours" + // there is a performance penalty, but we think there is not a memory leak + // to worry about better to create mutiple contour finders for different + // sizes, ie, if you are finding contours in a 640x480 image but also a + // 320x240 image better to make two rfiCvContourFinder objects then to use + // one, because you will get penalized less. + + if( inputCopy.getWidth() == 0 ) { + inputCopy.setUseTexture(false); + inputCopy.allocate( _width, _height ); + } else if( inputCopy.getWidth() != _width || inputCopy.getHeight() != _height ) { + // reallocate to new size + inputCopy.clear(); + inputCopy.setUseTexture(false); + inputCopy.allocate( _width, _height ); + } + + inputCopy.setROI( input.getROI() ); + inputCopy = input; + + CvSeq* contour_list = NULL; + contour_storage = cvCreateMemStorage( 1000 ); + storage = cvCreateMemStorage( 1000 ); + + CvContourRetrievalMode retrieve_mode + = (bFindHoles) ? CV_RETR_LIST : CV_RETR_EXTERNAL; + cvFindContours( inputCopy.getCvImage(), contour_storage, &contour_list, + sizeof(CvContour), retrieve_mode, bUseApproximation ? CV_CHAIN_APPROX_SIMPLE : CV_CHAIN_APPROX_NONE ); + CvSeq* contour_ptr = contour_list; + + // put the contours from the linked list, into an array for sorting + while( (contour_ptr != NULL) ) { + float area = fabs( cvContourArea(contour_ptr, CV_WHOLE_SEQ) ); + if( (area > minArea) && (area < maxArea) ) { + cvSeqBlobs.push_back(contour_ptr); + } + contour_ptr = contour_ptr->h_next; + } + + + // sort the pointers based on size + if( cvSeqBlobs.size() > 1 ) { + sort( cvSeqBlobs.begin(), cvSeqBlobs.end(), sort_carea_compare ); + } + + + // now, we have cvSeqBlobs.size() contours, sorted by size in the array + // cvSeqBlobs let's get the data out and into our structures that we like + for( int i = 0; i < MIN(nConsidered, (int)cvSeqBlobs.size()); i++ ) { + blobs.push_back( ofxCvBlob() ); + float area = cvContourArea( cvSeqBlobs[i], CV_WHOLE_SEQ ); + CvRect rect = cvBoundingRect( cvSeqBlobs[i], 0 ); + cvMoments( cvSeqBlobs[i], myMoments ); + + blobs[i].area = fabs(area); + blobs[i].hole = area < 0 ? true : false; + blobs[i].length = cvArcLength(cvSeqBlobs[i]); + blobs[i].boundingRect.x = rect.x; + blobs[i].boundingRect.y = rect.y; + blobs[i].boundingRect.width = rect.width; + blobs[i].boundingRect.height = rect.height; + blobs[i].centroid.x = (myMoments->m10 / myMoments->m00); + blobs[i].centroid.y = (myMoments->m01 / myMoments->m00); + + // get the points for the blob: + CvPoint pt; + CvSeqReader reader; + cvStartReadSeq( cvSeqBlobs[i], &reader, 0 ); + + for( int j=0; j < cvSeqBlobs[i]->total; j++ ) { + CV_READ_SEQ_ELEM( pt, reader ); + blobs[i].pts.push_back( ofPoint((float)pt.x, (float)pt.y) ); + } + blobs[i].nPts = blobs[i].pts.size(); + + } + + nBlobs = blobs.size(); + + // Free the storage memory. + // Warning: do this inside this function otherwise a strange memory leak + if( contour_storage != NULL ) { cvReleaseMemStorage(&contour_storage); } + if( storage != NULL ) { cvReleaseMemStorage(&storage); } + + return nBlobs; + +} + +//-------------------------------------------------------------------------------- +void rfiCvContourFinder::draw( float x, float y, float w, float h ) { + + float scalex = 0.0f; + float scaley = 0.0f; + if( _width != 0 ) { scalex = w/_width; } else { scalex = 1.0f; } + if( _height != 0 ) { scaley = h/_height; } else { scaley = 1.0f; } + + if(bAnchorIsPct){ + x -= anchor.x * w; + y -= anchor.y * h; + }else{ + x -= anchor.x; + y -= anchor.y; + } + + ofPushStyle(); + glPushMatrix(); + glTranslatef( x, y, 0.0 ); + glScalef( scalex, scaley, 0.0 ); + ofSetHexColor(0xFFFFFF); + + for( int i=0; i<(int)blobs.size(); i++ ) { + ofNoFill(); + ofBeginShape(); + for( int j=0; j + +class rfiCvContourFinder : public ofBaseDraws { + + public: + + vector blobs; + int nBlobs; // DEPRECATED: use blobs.size() instead + + + rfiCvContourFinder(); + virtual ~rfiCvContourFinder(); + + virtual float getWidth() { return _width; }; //set after first findContours call + virtual float getHeight() { return _height; }; //set after first findContours call + + virtual int findContours( ofxCvGrayscaleImage& input, + int minArea, int maxArea, + int nConsidered, bool bFindHoles, + bool bUseApproximation = true); + // approximation = don't do points for all points + // of the contour, if the contour runs + // along a straight line, for example... + + virtual void draw() { draw(0,0, _width, _height); }; + virtual void draw( float x, float y ) { draw(x,y, _width, _height); }; + virtual void draw( float x, float y, float w, float h ); + virtual void draw(const ofPoint & point); + virtual void draw(const ofRectangle & rect); + virtual void setAnchorPercent(float xPct, float yPct); + virtual void setAnchorPoint(int x, int y); + virtual void resetAnchor(); + //virtual ofxCvBlob getBlob(int num); + + + + protected: + + int _width; + int _height; + ofxCvGrayscaleImage inputCopy; + CvMemStorage* contour_storage; + CvMemStorage* storage; + CvMoments* myMoments; + vector cvSeqBlobs; //these will become blobs + + ofPoint anchor; + bool bAnchorIsPct; + + virtual void reset(); + +}; + + + +#endif