- Timestamp:
- Oct 17, 2019, 2:49:35 PM (5 years ago)
- Location:
- trunk
- Files:
-
- 2 deleted
- 32 edited
- 2 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk
-
Property svn:mergeinfo
set to
/branches/sanscv merged eligible
-
Property svn:mergeinfo
set to
-
trunk/cmake-modules/FlairUseFile.cmake
r302 r338 26 26 27 27 SET(FLAIR_INCLUDE_DIR 28 # ${LIBXML2_INCLUDE_DIR}29 ${CMAKE_SYSROOT}/usr/include/opencv130 28 $ENV{FLAIR_ROOT}/flair-install/include/FlairCore 31 29 ) … … 36 34 SET(FLAIR_LIBRARIES 37 35 ${LIBXML2_LIBRARIES} 38 udt pthread cv cxcore highguiFileLib rt z36 udt pthread FileLib rt z 39 37 ) 40 38 -
trunk/demos/OpticalFlow/uav/src/DemoOpticalFlow.cpp
r314 r338 47 47 48 48 DemoOpticalFlow::DemoOpticalFlow(TargetController *controller): UavStateMachine(controller) { 49 Uav* uav=GetUav();49 Uav* uav=GetUav(); 50 50 if (uav->GetVerticalCamera() == NULL) { 51 51 Err("no vertical camera found\n"); … … 57 57 greyCameraImage=new CvtColor(uav->GetVerticalCamera(),"gray",CvtColor::Conversion_t::ToGray); 58 58 59 uav->GetVerticalCamera()->UseDefaultPlot(greyCameraImage->Output()); // Le defaultPlot de la caméra peut afficher n'importe quoi?59 uav->GetVerticalCamera()->UseDefaultPlot(greyCameraImage->Output()); 60 60 61 61 //optical flow stack … … 77 77 DataPlot1D* xVelocityPlot=new DataPlot1D(opticalFlowTab->NewRow(),"x speed (px/s)",-250,250); 78 78 DataPlot1D* yVelocityPlot=new DataPlot1D(opticalFlowTab->LastRowLastCol(),"y speed (px/s)",-250,250); 79 DataPlot1D* xFirstPointPlot=new DataPlot1D(opticalFlowTab->NewRow(),"x movement first point",-25,25); 80 DataPlot1D* yFirstPointPlot=new DataPlot1D(opticalFlowTab->LastRowLastCol(),"y movement first point",-25,25); 81 // DataPlot1D* xAccelerationPlot=new DataPlot1D(opticalFlowTab->NewRow(),"x_acceleration",-250,250); 82 // DataPlot1D* yAccelerationPlot=new DataPlot1D(opticalFlowTab->LastRowLastCol(),"y_acceleration",-250,250); 83 79 84 80 xVelocityPlot->AddCurve(opticalFlowSpeedRaw->Output()->Element(0,0)); 85 81 xVelocityPlot->AddCurve(opticalFlowSpeed->GetMatrix()->Element(0,0),DataPlot::Blue); 86 82 yVelocityPlot->AddCurve(opticalFlowSpeedRaw->Output()->Element(1,0)); 87 83 yVelocityPlot->AddCurve(opticalFlowSpeed->GetMatrix()->Element(1,0),DataPlot::Blue); 88 xFirstPointPlot->AddCurve(opticalFlowCompensated->GetFirstPointDisplacement()->Element(0,0)); 89 xFirstPointPlot->AddCurve(opticalFlowCompensated->GetFirstPointDisplacement()->Element(1,0),DataPlot::Blue); 90 xFirstPointPlot->AddCurve(opticalFlowCompensated->GetFirstPointDisplacement()->Element(2,0),DataPlot::Green); 91 yFirstPointPlot->AddCurve(opticalFlowCompensated->GetFirstPointDisplacement()->Element(0,1)); 92 yFirstPointPlot->AddCurve(opticalFlowCompensated->GetFirstPointDisplacement()->Element(1,1),DataPlot::Blue); 93 yFirstPointPlot->AddCurve(opticalFlowCompensated->GetFirstPointDisplacement()->Element(2,1),DataPlot::Green); 94 // xAccelerationPlot->AddCurve(opticalFlowAccelerationRaw->Matrix()->Element(0,0)); 95 // xAccelerationPlot->AddCurve(opticalFlowAcceleration->Matrix()->Element(0,0),DataPlot::Blue); 96 // yAccelerationPlot->AddCurve(opticalFlowAccelerationRaw->Matrix()->Element(1,0)); 97 // yAccelerationPlot->AddCurve(opticalFlowAcceleration->Matrix()->Element(1,0),DataPlot::Blue); 98 84 99 85 u_x=new Pid(setupLawTab->At(1,0),"u_x"); 100 86 u_x->UseDefaultPlot(graphLawTab->NewRow()); … … 129 115 130 116 void DemoOpticalFlow::SignalEvent(Event_t event) { 117 UavStateMachine::SignalEvent(event); 131 118 switch(event) { 132 119 case Event_t::EnteringControlLoop: -
trunk/demos/OpticalFlow/uav/src/DemoOpticalFlow.h
r214 r338 66 66 flair::filter::OpticalFlowSpeed *opticalFlowSpeedRaw; 67 67 flair::filter::EulerDerivative *opticalFlowAccelerationRaw; 68 flair::gui::PushButton *startOpticalflow,*stopOpticalflow;69 void StartOpticalFlow(void);68 flair::gui::PushButton *startOpticalflow,*stopOpticalflow; 69 void StartOpticalFlow(void); 70 70 flair::filter::LowPassFilter* opticalFlowSpeed; 71 71 flair::filter::LowPassFilter* opticalFlowAcceleration; -
trunk/lib/FlairCore/src/Picture.cpp
r330 r338 17 17 18 18 #include "Picture.h" 19 #include " cvimage.h"19 #include "Image.h" 20 20 #include "Layout.h" 21 21 #include "LayoutPosition.h" … … 30 30 31 31 Picture::Picture(const LayoutPosition *position, string name, 32 const cvimage *image)32 const Image *image) 33 33 : SendData(position, name, "Picture", 200) { 34 34 this->image = image; -
trunk/lib/FlairCore/src/Picture.h
r330 r338 18 18 namespace flair { 19 19 namespace core { 20 class cvimage;20 class Image; 21 21 } 22 22 … … 45 45 */ 46 46 Picture(const LayoutPosition *position, std::string name, 47 const core:: cvimage *image);47 const core::Image *image); 48 48 49 49 /*! … … 70 70 void ExtraXmlEvent(void){}; 71 71 72 const core:: cvimage *image;72 const core::Image *image; 73 73 }; 74 74 -
trunk/lib/FlairSensorActuator/CMakeLists.txt
r302 r338 8 8 ${CMAKE_CURRENT_SOURCE_DIR}/../FlairCore/src 9 9 ${CMAKE_CURRENT_SOURCE_DIR}/../FlairVisionFilter/src 10 ${CMAKE_SYSROOT}/usr/include/opencv111 10 ${CMAKE_SYSROOT}/usr/include/vrpn 12 11 ) -
trunk/lib/FlairSensorActuator/src/Camera.cpp
r330 r338 24 24 #include <DataPlot1D.h> 25 25 #include <Picture.h> 26 #include <VisionFilter.h>27 #include <highgui.h>28 26 #include <fstream> 29 27 … … 36 34 37 35 Camera::Camera(string name, uint16_t width, 38 uint16_t height, cvimage::Type::Format format)36 uint16_t height, Image::Type::Format format) 39 37 : IODevice(getFrameworkManager(), name) { 40 38 plot_tab = NULL; … … 42 40 43 41 // do not allocate imagedata, allocation is done by the camera 44 output = new cvimage((IODevice *)this, width, height, format, "out", false);42 output = new Image((IODevice *)this, width, height, format, "out", false); 45 43 46 44 // station sol … … 76 74 case LogFormat::RAW: 77 75 AddDataToLog(output); 78 Warn("raw log of cvimage is not yet implemented\n");76 Warn("raw log of Image is not yet implemented\n"); 79 77 break; 80 78 case LogFormat::JPG: 81 Warn("logging cvimage to jpeg\n");79 Warn("logging Image to jpeg\n"); 82 80 Warn("jpeg are not included in classical dbt file, as dbt does not handle variable length\n"); 83 81 break; … … 95 93 GridLayout *Camera::GetLayout(void) const { return setup_layout; } 96 94 97 void Camera::UseDefaultPlot(const core:: cvimage *image) {95 void Camera::UseDefaultPlot(const core::Image *image) { 98 96 if (tab == NULL) { 99 97 Err("not applicable for simulation part.\n"); … … 113 111 } 114 112 115 core:: cvimage *Camera::Output(void) { return output; }113 core::Image *Camera::Output(void) { return output; } 116 114 117 115 void Camera::ProcessUpdate(core::io_data* data) { … … 121 119 if(logFormat==LogFormat::JPG) { 122 120 data->GetMutex(); 123 //IplImage *img=(( cvimage*)data)->img;124 const cvimage* input = dynamic_cast<const cvimage*>(data);121 //IplImage *img=((Image*)data)->img; 122 const Image* input = dynamic_cast<const Image*>(data); 125 123 if (!input) { 126 Warn("casting %s to cvimage failed\n",data->ObjectName().c_str());124 Warn("casting %s to Image failed\n",data->ObjectName().c_str()); 127 125 return; 128 126 } … … 130 128 131 129 string filename=getFrameworkManager()->GetLogPath()+"/"+ObjectName()+"_"+std::to_string(data->DataTime())+".jpg"; 132 switch((( cvimage*)data)->GetDataType().GetFormat()) {133 case cvimage::Type::Format::Gray:130 switch(((Image*)data)->GetDataType().GetFormat()) { 131 case Image::Type::Format::Gray: 134 132 saveToJpeg(img,filename,PictureFormat_t::Gray,PictureFormat_t::Gray); 135 133 break; 136 case cvimage::Type::Format::BGR:134 case Image::Type::Format::BGR: 137 135 saveToJpeg(img,filename,PictureFormat_t::RGB,PictureFormat_t::RGB); 138 136 break; 139 case cvimage::Type::Format::UYVY:137 case Image::Type::Format::UYVY: 140 138 saveToJpeg(img,filename,PictureFormat_t::YUV_422ile,PictureFormat_t::YUV_422p); 141 139 break; … … 160 158 output->GetMutex(); 161 159 if(extension=="jpg") { 162 if(output->GetDataType().GetFormat()== cvimage::Type::Format::Gray) saveToJpeg(output->img,filename,PictureFormat_t::Gray,PictureFormat_t::Gray);163 if(output->GetDataType().GetFormat()== cvimage::Type::Format::BGR) saveToJpeg(output->img,filename,PictureFormat_t::RGB,PictureFormat_t::RGB);164 if(output->GetDataType().GetFormat()== cvimage::Type::Format::UYVY) saveToJpeg(output->img,filename,PictureFormat_t::YUV_422ile,PictureFormat_t::YUV_422p);160 if(output->GetDataType().GetFormat()==Image::Type::Format::Gray) saveToJpeg(output->img,filename,PictureFormat_t::Gray,PictureFormat_t::Gray); 161 if(output->GetDataType().GetFormat()==Image::Type::Format::BGR) saveToJpeg(output->img,filename,PictureFormat_t::RGB,PictureFormat_t::RGB); 162 if(output->GetDataType().GetFormat()==Image::Type::Format::UYVY) saveToJpeg(output->img,filename,PictureFormat_t::YUV_422ile,PictureFormat_t::YUV_422p); 165 163 } else { 166 164 cvSaveImage(filename.c_str(),output->img); -
trunk/lib/FlairSensorActuator/src/Camera.h
r137 r338 16 16 #include <IODevice.h> 17 17 #include <stdint.h> 18 #include < cvimage.h>18 #include <Image.h> 19 19 20 20 namespace flair { … … 51 51 */ 52 52 Camera(std::string name, uint16_t width, 53 uint16_t height, core:: cvimage::Type::Format format);53 uint16_t height, core::Image::Type::Format format); 54 54 55 55 /*! … … 75 75 * \param image image to display 76 76 */ 77 void UseDefaultPlot(const core:: cvimage *image);77 void UseDefaultPlot(const core::Image *image); 78 78 79 79 /*! … … 126 126 * \return the output matrix 127 127 */ 128 core:: cvimage *Output(void);128 core::Image *Output(void); 129 129 130 130 core::DataType const &GetOutputDataType() const; … … 159 159 gui::GroupBox *GetGroupBox(void) const; 160 160 161 core:: cvimage *output;161 core::Image *output; 162 162 163 163 private: -
trunk/lib/FlairSensorActuator/src/Ps3Eye.cpp
r268 r338 28 28 uint8_t priority) 29 29 : V4LCamera( name, camera_index, 320, 240, 30 cvimage::Type::Format::YUYV, priority) {30 Image::Type::Format::YUYV, priority) { 31 31 SetIsReady(true); 32 32 } -
trunk/lib/FlairSensorActuator/src/SimulatedCamera.cpp
r330 r338 36 36 uint32_t modelId,uint32_t deviceId, uint8_t priority) 37 37 : Thread(getFrameworkManager(), name, priority), 38 Camera(name, width, height, cvimage::Type::Format::BGR) {38 Camera(name, width, height, Image::Type::Format::BGR) { 39 39 40 40 buf_size = width * height * channels+sizeof(Time); -
trunk/lib/FlairSensorActuator/src/V4LCamera.cpp
r330 r338 21 21 #include <CheckBox.h> 22 22 #include <Label.h> 23 #include < cvimage.h>23 #include <Image.h> 24 24 #include <FrameworkManager.h> 25 25 #include <fcntl.h> 26 26 #include <linux/videodev2.h> 27 28 27 #include <sys/ioctl.h> 29 28 #include <unistd.h> 30 29 #include <cstring> 31 30 #include <sys/mman.h> 32 31 #include <VisionFilter.h> 33 32 34 33 #define DEFAULT_V4L_BUFFERS 4 … … 43 42 V4LCamera::V4LCamera(string name, 44 43 uint8_t camera_index, uint16_t width, uint16_t height, 45 cvimage::Type::Format format, uint8_t priority)44 Image::Type::Format format, uint8_t priority) 46 45 : Thread(getFrameworkManager(), name, priority), 47 46 Camera(name, width, height, format) { 48 47 49 string deviceName="/dev/video"+std::to_string(camera_index); 50 device = open(deviceName.c_str(), O_RDWR | O_NONBLOCK); 51 if (device == -1) { 52 Thread::Err("Cannot open %s\n"); 53 } 48 string deviceName="/dev/video"+std::to_string(camera_index); 49 device = open(deviceName.c_str(), O_RDWR | O_NONBLOCK); 50 if (device == -1) { 51 Thread::Err("Cannot open %s\n",deviceName.c_str()); 52 } else { 53 Printf("V4LCamera %s, opened %s\n",name.c_str(),deviceName.c_str()); 54 } 55 56 struct v4l2_capability cap; 57 memset(&cap, 0, sizeof (v4l2_capability)); 58 if (xioctl (device, VIDIOC_QUERYCAP, &cap)==-1) { 59 Thread::Err("VIDIOC_QUERYCAP xioctl\n"); 60 } 61 if ((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) { 62 Thread::Err("device is unable to capture video memory.\n"); 63 } 64 65 //get v4l2_format 66 struct v4l2_format form; 67 form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 68 if(xioctl (device, VIDIOC_G_FMT,&form)==-1) { 69 Thread::Err("VIDIOC_G_FMT xioctl\n"); 70 } 71 72 //set width, height and format 73 if (format == Image::Type::Format::UYVY) { 74 form.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY; 75 } else if (format == Image::Type::Format::YUYV) { 76 form.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; 77 } else { 78 Thread::Err("format not supported\n"); 79 } 80 81 form.fmt.pix.width = width; 82 form.fmt.pix.height = height; 83 form.fmt.win.chromakey = 0; 84 form.fmt.win.field = V4L2_FIELD_ANY; 85 form.fmt.win.clips = 0; 86 form.fmt.win.clipcount = 0; 87 form.fmt.pix.field = V4L2_FIELD_ANY; 88 if(xioctl (device, VIDIOC_S_FMT,&form)==-1) { 89 Thread::Err("VIDIOC_S_FMT xioctl\n"); 90 } 91 92 //alloc and queue bufs 93 AllocBuffers(); 94 for (int bufferIndex = 0; bufferIndex < nbBuffers;++bufferIndex) { 95 QueueBuffer(bufferIndex); 96 } 54 97 55 //get v4l2_format 56 struct v4l2_format form; 57 form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 58 xioctl (device, VIDIOC_G_FMT,&form); 59 60 //set width, height and format 61 if (format == cvimage::Type::Format::UYVY) { 62 form.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY; 63 } else if (format == cvimage::Type::Format::YUYV) { 64 form.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; 65 } else { 66 Thread::Err("format not supported\n"); 67 } 68 69 form.fmt.pix.width = width; 70 form.fmt.pix.height = height; 71 form.fmt.win.chromakey = 0; 72 form.fmt.win.field = V4L2_FIELD_ANY; 73 form.fmt.win.clips = 0; 74 form.fmt.win.clipcount = 0; 75 form.fmt.pix.field = V4L2_FIELD_ANY; 76 xioctl (device, VIDIOC_S_FMT, &form); 77 78 /* This is just a technicality, but all buffers must be filled up before any 79 staggered SYNC is applied. SO, filler up. (see V4L HowTo) */ 80 81 AllocBuffers(); 82 83 for (int bufferIndex = 0; bufferIndex < ((int)requestbuffers.count);++bufferIndex) { 84 struct v4l2_buffer buf; 85 86 memset(&buf, 0, sizeof (v4l2_buffer)); 87 88 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 89 buf.memory = V4L2_MEMORY_MMAP; 90 buf.index = (unsigned long)bufferIndex; 91 92 if (-1 == xioctl (device, VIDIOC_QBUF, &buf)) { 93 Thread::Err("VIDIOC_QBUF xioctl\n"); 94 break; 95 } 96 } 97 98 // enable the streaming 99 v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 100 if (-1 == xioctl (device, VIDIOC_STREAMON,&type)) { 101 Thread::Err("VIDIOC_STREAMON xioctl\n"); 102 } 103 104 105 // skip first frame. it is often bad -- this is unnotied in traditional apps, 106 // but could be fatal if bad jpeg is enabled 107 GrabFrame(); 108 109 110 111 // station sol 112 gain = new DoubleSpinBox(GetGroupBox()->NewRow(), "gain:", 0, 1, 0.1); 113 exposure = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "exposure:", 0, 114 1, 0.1); 115 bright = 116 new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "bright:", 0, 1, 0.1); 117 contrast = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "contrast:", 0, 118 1, 0.1); 119 hue = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "hue:", 0, 1, 0.1); 120 sharpness = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "sharpness:", 121 0, 1, 0.1); 122 sat = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "saturation:", 0, 1, 123 0.1); 124 autogain = new CheckBox(GetGroupBox()->NewRow(), "autogain:"); 125 autoexposure = new CheckBox(GetGroupBox()->LastRowLastCol(), "autoexposure:"); 126 awb = new CheckBox(GetGroupBox()->LastRowLastCol(), "awb:"); 127 fps = new Label(GetGroupBox()->NewRow(), "fps"); 128 129 hasProblems=false; 98 // enable the streaming 99 v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 100 if (xioctl (device, VIDIOC_STREAMON,&type)==-1) { 101 Thread::Err("VIDIOC_STREAMON xioctl\n"); 102 } 103 104 // skip first frame. it is often bad -- this is unnotied in traditional apps, 105 // but could be fatal if bad jpeg is enabled 106 bufferIndex=-1; 107 GrabFrame(); 108 109 // ground station 110 gain = new DoubleSpinBox(GetGroupBox()->NewRow(), "gain:", 0, 1, 0.1); 111 exposure = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "exposure:", 0,1, 0.1); 112 bright = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "bright:", 0, 1, 0.1); 113 contrast = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "contrast:", 0,1, 0.1); 114 hue = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "hue:", 0, 1, 0.1); 115 sharpness = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "sharpness:", 0, 1, 0.1); 116 sat = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "saturation:", 0, 1,0.1); 117 autogain = new CheckBox(GetGroupBox()->NewRow(), "autogain:"); 118 autoexposure = new CheckBox(GetGroupBox()->LastRowLastCol(), "autoexposure:"); 119 awb = new CheckBox(GetGroupBox()->LastRowLastCol(), "awb:"); 120 fps = new Label(GetGroupBox()->NewRow(), "fps"); 121 122 hasProblems=false; 130 123 } 131 124 132 125 V4LCamera::~V4LCamera() { 133 SafeStop(); 134 Join(); 126 for (int n_buffers = 0; n_buffers < nbBuffers; n_buffers++) { 127 FreeFunction((char*)buffers[n_buffers].start); 128 } 129 SafeStop(); 130 Join(); 135 131 } 136 132 137 133 void V4LCamera::Run(void) { 138 134 Time cam_time, new_time, fpsNow, fpsPrev; 139 char* buffer; // raw image140 135 int fpsCounter = 0; 141 136 … … 160 155 fpsCounter++; 161 156 if (GetTime() > (fpsPrev + 5 * (Time)1000000000)) { 162 // toute les5 secondes157 // every 5 secondes 163 158 fpsNow = GetTime(); 164 159 fps->SetText("fps: %.1f", … … 204 199 SetProperty(V4L2_CID_AUTO_WHITE_BALANCE, awb->Value()); 205 200 206 // cam pictures 207 buffer = RetrieveRawFrame(); 201 // get picture 208 202 GrabFrame(); 209 203 new_time = GetTime(); … … 216 210 217 211 output->GetMutex(); 218 output->buffer = buffer;212 output->buffer=(char*)buffers[bufferIndex].start; 219 213 output->ReleaseMutex(); 220 214 … … 228 222 } 229 223 230 void V4LCamera::GrabFrame(void) { 231 unsigned int count; 232 233 count = 1; 234 235 while (count-- > 0) { 236 for (;;) { 237 fd_set fds; 238 struct timeval tv; 239 int r; 240 241 FD_ZERO (&fds); 242 FD_SET (device, &fds); 243 244 /* Timeout. */ 245 tv.tv_sec = 2; 246 tv.tv_usec = 0; 247 248 r = select (device+1, &fds, NULL, NULL, &tv); 249 250 if (-1 == r) { 251 if (EINTR == errno) continue; 252 Thread::Err("select\n"); 224 int V4LCamera::QueueBuffer(int index) { 225 struct v4l2_buffer buf; 226 if(index>=0 && index<nbBuffers) { 227 memset(&buf, 0, sizeof (v4l2_buffer)); 228 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 229 buf.memory = V4L2_MEMORY_USERPTR;//V4L2_MEMORY_MMAP; 230 buf.index = (unsigned long)index; 231 buf.m.userptr=(unsigned long)(buffers[index].start); 232 buf.length=buffers[index].length; 233 234 int ret=xioctl (device, VIDIOC_QBUF, &buf); 235 if (ret==-1) { 236 Thread::Err("VIDIOC_QBUF xioctl %s\n",strerror(-ret)); 237 return -1; 238 } 239 } 240 return 0; 241 } 242 243 int V4LCamera::GrabFrame(void) { 244 //queue previous buffer 245 if(QueueBuffer(bufferIndex)<0) return -1; 246 247 fd_set fds; 248 struct timeval tv; 249 FD_ZERO (&fds); 250 FD_SET (device, &fds); 251 252 tv.tv_sec = 0; 253 tv.tv_usec = 100000; 254 255 int r = select (device+1, &fds, NULL, NULL, &tv); 256 257 if (-1 == r) { 258 char errorMsg[256]; 259 Thread::Err("select (%s)\n", strerror_r(-r, errorMsg, sizeof(errorMsg))); 260 return -1; 261 } 262 263 if (0 == r) { 264 Thread::Err("select timeout\n"); 265 return -1; 266 } 267 268 struct v4l2_buffer buf; 269 memset(&buf, 0, sizeof (v4l2_buffer)); 270 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 271 buf.memory = V4L2_MEMORY_USERPTR;//V4L2_MEMORY_MMAP; 272 273 //get last captured image 274 int prevDQbuf=-1; 275 for(int i=0;i<4;i++) { 276 if (xioctl (device, VIDIOC_DQBUF, &buf)==-1) { 277 if (errno==EAGAIN) { 278 break; 279 } else { 280 Thread::Err("VIDIOC_DQBUF xioctl\n"); 281 return -1; 253 282 } 254 255 if (0 == r) { 256 Thread::Err("select timeout\n"); 257 /* end the infinite loop */ 258 break; 259 } 260 261 if (read_frame_v4l2 ()) break; 262 } 263 } 264 } 265 266 int V4LCamera::read_frame_v4l2(void) { 267 struct v4l2_buffer buf; 268 memset(&buf, 0, sizeof (v4l2_buffer)); 269 270 271 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 272 buf.memory = V4L2_MEMORY_MMAP; 273 274 if (-1 == xioctl (device, VIDIOC_DQBUF, &buf)) { 275 switch (errno) { 276 case EAGAIN: 277 return 0; 278 279 case EIO: 280 /* Could ignore EIO, see spec. */ 281 282 /* fall through */ 283 284 default: 285 /* display the error and stop processing */ 286 Thread::Err("VIDIOC_DQBUF xioctl\n"); 287 return 1; 288 } 289 } 290 291 if(buf.index >= requestbuffers.count) { 292 Thread::Err("buf.index >= requestbuffers.count\n"); 293 } 283 } else { 284 if(prevDQbuf!=-1) { 285 QueueBuffer(prevDQbuf); 286 } 287 for (int n_buffers = 0; n_buffers < nbBuffers; n_buffers++) { 288 if((void*)(buf.m.userptr)==buffers[n_buffers].start) { 289 prevDQbuf=n_buffers; 290 bufferIndex=n_buffers; 291 break; 292 } 293 } 294 } 295 } 294 296 295 #ifdef USE_TEMP_BUFFER296 memcpy(capture->buffers[MAX_V4L_BUFFERS].start,297 capture->buffers[buf.index].start,298 capture->buffers[MAX_V4L_BUFFERS].length );299 capture->bufferIndex = MAX_V4L_BUFFERS;300 //printf("got data in buff %d, len=%d, flags=0x%X, seq=%d, used=%d)\n",301 // buf.index, buf.length, buf.flags, buf.sequence, buf.bytesused);302 #else303 bufferIndex = buf.index;304 #endif305 306 if (-1 == xioctl (device, VIDIOC_QBUF, &buf)) {307 Thread::Err ("VIDIOC_QBUF xioctl\n");308 }309 310 297 return 1; 311 298 } 312 299 313 300 int V4LCamera::AllocBuffers(void) { 301 struct v4l2_requestbuffers requestbuffers; 314 302 memset(&requestbuffers, 0, sizeof (v4l2_requestbuffers)); 315 303 … … 320 308 requestbuffers.count = buffer_number; 321 309 requestbuffers.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 322 requestbuffers.memory = V4L2_MEMORY_ MMAP;323 324 if ( -1 == xioctl (device, VIDIOC_REQBUFS, &requestbuffers)) {325 if ( EINVAL == errno) {326 Thread::Err(" not support memory mapping not supportted\n");310 requestbuffers.memory = V4L2_MEMORY_USERPTR;//V4L2_MEMORY_MMAP; 311 312 if (xioctl (device, VIDIOC_REQBUFS, &requestbuffers)==-1) { 313 if (errno==EINVAL) { 314 Thread::Err("VIDIOC_REQBUFS user memory not supported\n"); 327 315 } else { 328 316 Thread::Err ("VIDIOC_REQBUFS xioctl\n"); … … 330 318 return -1; 331 319 } 332 333 if (requestbuffers.count < buffer_number) { 334 if (buffer_number == 1) { 335 Thread::Err("Insufficient buffer memory\n"); 336 return -1; 337 } else { 338 buffer_number--; 339 Thread::Warn ("Insufficient buffer memory, decreasing buffers\n"); 340 goto try_again; 341 } 320 321 nbBuffers=DEFAULT_V4L_BUFFERS; 322 for (int n_buffers = 0; n_buffers < nbBuffers; n_buffers++) { 323 buffers[n_buffers].length = output->GetDataType().GetSize(); 324 buffers[n_buffers].start =AllocFunction(output->GetDataType().GetSize()); 342 325 } 343 326 344 for (int n_buffers = 0; n_buffers < requestbuffers.count; ++n_buffers) {345 struct v4l2_buffer buf;346 347 memset(&buf, 0, sizeof (v4l2_buffer));348 349 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;350 buf.memory = V4L2_MEMORY_MMAP;351 buf.index = n_buffers;352 353 if (-1 == xioctl (device, VIDIOC_QUERYBUF, &buf)) {354 Thread::Err("VIDIOC_QUERYBUF xioctl\n");355 return -1;356 }357 358 buffers[n_buffers].length = buf.length;359 buffers[n_buffers].start =360 mmap (NULL /* start anywhere */,361 buf.length,362 PROT_READ | PROT_WRITE /* required */,363 MAP_SHARED /* recommended */,364 device, buf.m.offset);365 366 if (MAP_FAILED == buffers[n_buffers].start) {367 Thread::Err("mmap\n");368 return -1;369 }370 }371 372 //todo: verifier cette alloc, pas de double buffeinrg?373 //peut on initialiser l'image dans le constrcteur de la camera?374 375 output->buffer=output->allocFunction(output->dataType.GetSize());376 327 return 1; 377 328 }; 378 329 379 char *V4LCamera::RetrieveRawFrame(void) {380 381 /* [FD] this really belongs here */382 if (ioctl(device, VIDIOCSYNC, &mmaps[bufferIndex].frame) == -1) {383 Thread::Err("Could not SYNC to video stream. %s\n", strerror(errno));384 }385 386 /* Now get what has already been captured as a IplImage return */387 if (output->dataType.GetFormat() == cvimage::Type::Format::YUYV || output->dataType.GetFormat() == cvimage::Type::Format::UYVY) {388 #ifdef USE_TEMP_BUFFER389 capture->frame.imageData=(char*)capture->buffers[capture->bufferIndex].start;390 #else391 Printf("frame is not allocated\n");392 memcpy((char *)frame,(char *)buffers[bufferIndex].start,output->GetDataType().GetSize());393 #endif394 } else {395 Thread::Err("palette %d not supported for raw output\n",output->dataType.GetFormat());396 }397 398 return(frame);399 }400 401 330 bool V4LCamera::HasProblems(void) { 402 331 return hasProblems; … … 408 337 409 338 void V4LCamera::SetAutoExposure(bool value) { 410 Thread::Warn("not implemented in opencv\n");339 Thread::Warn("not implemented\n"); 411 340 } 412 341 -
trunk/lib/FlairSensorActuator/src/V4LCamera.h
r330 r338 23 23 namespace flair { 24 24 namespace core { 25 class cvimage;25 class Image; 26 26 } 27 27 namespace gui { … … 55 55 V4LCamera(std::string name, 56 56 uint8_t camera_index, uint16_t width, uint16_t height, 57 core:: cvimage::Type::Format format, uint8_t priority);57 core::Image::Type::Format format, uint8_t priority); 58 58 59 59 /*! … … 151 151 void SetProperty(int property,float value); 152 152 float GetProperty(int property); 153 void GrabFrame(void); 154 int read_frame_v4l2(void); 155 char *RetrieveRawFrame(void); 153 int GrabFrame(void); 156 154 int AllocBuffers(void); 155 int QueueBuffer(int index); 157 156 struct video_mmap *mmaps; 158 157 int bufferIndex; 159 struct v4l2_requestbuffers requestbuffers;160 158 struct buffer { 161 159 void * start; 162 160 size_t length; 163 161 }; 164 buffer buffers[MAX_V4L_BUFFERS + 1];162 buffer buffers[MAX_V4L_BUFFERS]; 165 163 char* frame; 164 int nbBuffers; 166 165 }; 167 166 } // end namespace sensor -
trunk/lib/FlairVisionFilter/CMakeLists.txt
r302 r338 7 7 ${CMAKE_CURRENT_SOURCE_DIR}/../FlairCore/src 8 8 ${CMAKE_CURRENT_SOURCE_DIR}/../FlairFilter/src 9 ${CMAKE_SYSROOT}/usr/include/opencv110 9 ) 11 10 -
trunk/lib/FlairVisionFilter/src/CvtColor.cpp
r330 r338 13 13 14 14 #include "CvtColor.h" 15 #include <cvimage.h> 15 #include "VisionFilter.h" 16 #include <Image.h> 16 17 #include <typeinfo> 17 18 … … 19 20 using namespace flair::core; 20 21 22 class CvtColor_impl { 23 public: 24 CvtColor_impl(flair::filter::CvtColor *self,flair::filter::CvtColor::Conversion_t conversion):output(0) { 25 this->conversion=conversion; 26 this->self=self; 27 28 switch(conversion) { 29 case flair::filter::CvtColor::Conversion_t::ToGray: 30 try{ 31 Image::Type const &imageType=dynamic_cast<Image::Type const &>(((IODevice*)(self->Parent()))->GetOutputDataType()); 32 output=new Image(self,imageType.GetWidth(),imageType.GetHeight(),Image::Type::Format::Gray,"conversion",true,2); 33 //inIplImage = (IplImage*)AllocFunction(sizeof(IplImage)); 34 //outIplImage = (IplImage*)AllocFunction(sizeof(IplImage)); 35 } catch(std::bad_cast& bc) { 36 self->Err("io type mismatch\n"); 37 } 38 break; 39 default: 40 self->Err("conversion not supported\n"); 41 } 42 } 43 44 ~CvtColor_impl() { 45 //FreeFunction((char*)(inIplImage)); 46 //FreeFunction((char*)(outIplImage)); 47 } 48 49 void UpdateFrom(const io_data *data){ 50 Image *img=(Image*)data; 51 52 if(output!=NULL) { 53 data->GetMutex();/* 54 inIplImage->width=img->GetDataType().GetWidth(); 55 inIplImage->height=img->GetDataType().GetHeight(); 56 inIplImage->imageData=img->buffer; 57 inIplImage->imageSize=img->GetDataType().GetSize(); 58 */ 59 output->GetMutex();/* 60 outIplImage->width=output->GetDataType().GetWidth(); 61 outIplImage->height=output->GetDataType().GetHeight(); 62 outIplImage->imageData=output->buffer; 63 outIplImage->imageSize=output->GetDataType().GetSize(); 64 */ 65 switch(conversion) { 66 case flair::filter::CvtColor::Conversion_t::ToGray: 67 switch(((Image*)data)->GetDataType().GetFormat()) { 68 case Image::Type::Format::YUYV: 69 //dspCvtColor(inIplImage,outIplImage,DSP_YUYV2GRAY); 70 break; 71 case Image::Type::Format::UYVY: 72 //dspCvtColor(inIplImage,outIplImage,DSP_UYVY2GRAY); 73 break; 74 case Image::Type::Format::BGR: 75 //dspCvtColor(inIplImage,outIplImage,DSP_BGR2GRAY); 76 break; 77 default: 78 self->Err("input format not supported\n"); 79 } 80 break; 81 default: 82 self->Err("conversion not supported\n"); 83 } 84 85 output->ReleaseMutex(); 86 data->ReleaseMutex(); 87 88 output->SetDataTime(data->DataTime()); 89 } 90 }; 91 92 Image *output; 93 94 private: 95 flair::filter::CvtColor::Conversion_t conversion; 96 //IplImage *inIplImage,*outIplImage; 97 flair::filter::CvtColor *self; 98 }; 99 100 21 101 namespace flair { namespace filter { 22 102 23 CvtColor::CvtColor(const core::IODevice* parent,std::string name,Conversion_t conversion) : IODevice(parent,name),output(0) { 24 this->conversion=conversion; 25 26 switch(conversion) { 27 case Conversion_t::ToGray: 28 try{ 29 cvimage::Type const &imageType=dynamic_cast<cvimage::Type const &>(parent->GetOutputDataType()); 30 output=new cvimage(this,imageType.GetWidth(),imageType.GetHeight(),cvimage::Type::Format::Gray,"conversion",true,2); 31 32 } catch(std::bad_cast& bc) { 33 Err("io type mismatch\n"); 34 } 35 break; 36 default: 37 Err("conversion not supported\n"); 38 } 39 40 SetIsReady(true); 103 CvtColor::CvtColor(const core::IODevice* parent,std::string name,Conversion_t conversion) : IODevice(parent,name) { 104 pimpl_=new CvtColor_impl(this,conversion); 41 105 } 42 106 43 CvtColor::~CvtColor(void) {} 107 CvtColor::~CvtColor(void) { 108 delete pimpl_; 109 } 44 110 45 cvimage* CvtColor::Output(void) {46 return output;111 Image* CvtColor::Output(void) { 112 return pimpl_->output; 47 113 48 114 } 49 115 50 116 DataType const &CvtColor::GetOutputDataType() const { 51 if( output!=NULL) {52 return output->GetDataType();117 if(pimpl_->output!=NULL) { 118 return pimpl_->output->GetDataType(); 53 119 } else { 54 120 return dummyType; … … 57 123 58 124 void CvtColor::UpdateFrom(const io_data *data) { 59 cvimage *img=(cvimage*)data; 60 61 data->GetMutex(); 62 output->GetMutex(); 63 64 switch(conversion) { 65 case Conversion_t::ToGray: 66 switch(img->GetDataType().GetFormat()) { 67 case cvimage::Type::Format::YUYV: 68 //dspCvtColor(img,output->img,DSP_YUYV2GRAY); 69 break; 70 case cvimage::Type::Format::UYVY: 71 //dspCvtColor(img,output->img,DSP_UYVY2GRAY); 72 break; 73 case cvimage::Type::Format::BGR: 74 //dspCvtColor(img,output->img,DSP_BGR2GRAY); 75 break; 76 default: 77 Err("input format not supported\n"); 78 } 79 break; 80 default: 81 Err("conversion not supported\n"); 82 } 83 84 output->ReleaseMutex(); 85 data->ReleaseMutex(); 86 87 output->SetDataTime(data->DataTime()); 88 ProcessUpdate(output); 125 pimpl_->UpdateFrom(data); 126 if(pimpl_->output!=NULL) ProcessUpdate(pimpl_->output); 127 89 128 } 90 129 -
trunk/lib/FlairVisionFilter/src/CvtColor.h
r124 r338 13 13 #include <IODevice.h> 14 14 15 namespace flair 16 { 17 namespace core 18 { 19 class cvimage; 15 namespace flair { 16 namespace core { 17 class Image; 20 18 } 21 19 } 22 20 23 namespace flair 24 { 21 class CvtColor_impl; 22 23 namespace flair { 25 24 namespace filter 26 25 { … … 68 67 * \return the output image 69 68 */ 70 core:: cvimage* Output(void);69 core::Image* Output(void); 71 70 72 71 core::DataType const &GetOutputDataType() const; … … 82 81 void UpdateFrom(const core::io_data *data); 83 82 84 core::cvimage *output; 85 Conversion_t conversion; 83 class CvtColor_impl *pimpl_; 86 84 }; 87 85 } // end namespace filter -
trunk/lib/FlairVisionFilter/src/HoughLines.cpp
r318 r338 13 13 14 14 #include "HoughLines.h" 15 #include <cvimage.h> 15 #include "VisionFilter.h" 16 #include <Image.h> 17 #include <OneAxisRotation.h> 16 18 #include <Matrix.h> 17 19 #include <Layout.h> … … 20 22 #include <DoubleSpinBox.h> 21 23 #include <typeinfo> 24 #include <math.h> 22 25 23 26 #define MAX_LINES 100 … … 27 30 using namespace flair::gui; 28 31 32 class HoughLines_impl { 33 public: 34 HoughLines_impl(flair::filter::HoughLines *self,const LayoutPosition* position,string name,const Vector2Df *inPtRefGlobal,float inThetaRefGlobal) { 35 this->self=self; 36 GroupBox* reglages_groupbox=new GroupBox(position,name); 37 rotation=new OneAxisRotation(reglages_groupbox->NewRow(),"pre rotation",OneAxisRotation::PostRotation); 38 fullRhoStep=new SpinBox(reglages_groupbox->NewRow(),"full rho step:","pixels",0,255,1,1); 39 fullThetaStep=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"full theta step:","degrees",0,90,1,1); 40 trackingRhoStep=new SpinBox(reglages_groupbox->NewRow(),"tracking rho step:","pixels",0,255,1,1); 41 trackingThetaStep=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"tracking theta step:","degrees",0,90,1,1); 42 trackingDeltaTheta=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"tracking delta theta:","degrees",0,90,1,1); 43 nbPoints=new SpinBox(reglages_groupbox->NewRow(),"nb points:",0,10000,10,100); 44 45 isTracking=false; 46 //linesStorage = (CvMat*)AllocFunction(sizeof(CvMat)); 47 //linesStorage->data.fl = (float*)AllocFunction(MAX_LINES*2*sizeof(float));//was CV_32FC2, 2 channels 48 //gimg = (IplImage*)AllocFunction(sizeof(IplImage)); 49 50 //init output matrix of same size as init 51 MatrixDescriptor* desc=new MatrixDescriptor(4,1); 52 desc->SetElementName(0,0,"distance"); 53 desc->SetElementName(1,0,"orientation rad"); 54 desc->SetElementName(2,0,"orientation deg"); 55 desc->SetElementName(3,0,"line_detected"); 56 output=new Matrix(self,desc,floatType,name); 57 delete desc; 58 59 try{ 60 Image::Type const &imageType=dynamic_cast<Image::Type const &>(((IODevice*)(self->Parent()))->GetOutputDataType()); 61 if(imageType.GetFormat()!=Image::Type::Format::Gray) { 62 self->Err("input image is not gray\n"); 63 } 64 } catch(std::bad_cast& bc) { 65 self->Err("io type mismatch\n"); 66 } 67 68 thetaRefGlobal=inThetaRefGlobal; 69 if (inPtRefGlobal==NULL) { 70 ptRefGlobal=NULL; 71 } else { //rotation from global coordinates to hough space 72 ptRefGlobal =new Vector2Df(inPtRefGlobal->x,inPtRefGlobal->y); 73 } 74 75 } 76 77 ~HoughLines_impl() { 78 //FreeFunction((char*)(linesStorage->data.fl)); 79 //FreeFunction((char*)linesStorage); 80 //FreeFunction((char*)gimg); 81 if(ptRefGlobal!=NULL) delete ptRefGlobal; 82 } 83 84 void UpdateFrom(const io_data *data){ 85 Image *img=(Image*)data; 86 /* 87 gimg->width=img->GetDataType().GetWidth(); 88 gimg->height=img->GetDataType().GetHeight(); 89 gimg->imageData=img->buffer; 90 gimg->imageSize=img->GetDataType().GetSize(); 91 92 size_t nbLines; 93 Vector2Df ptRef; 94 float thetaRef; 95 96 if (ptRefGlobal==NULL) { 97 ptRef.x=img->GetDataType().GetWidth()/2; 98 ptRef.y=img->GetDataType().GetHeight()/2; 99 } else { //rotation from global coordinates to hough space 100 Vector3Df ptRef3D(ptRefGlobal->x,ptRefGlobal->y,0); 101 rotation->ComputeRotation(ptRef3D); 102 ptRef.x=ptRef3D.x; 103 ptRef.y=ptRef3D.y; 104 } 105 106 //orientation in global space is rotated by pi/2 compared to orientation in hough space 107 //eg: vertical line has a 0 orientation in global space (north), but a pi/2 (or -pi/2) orientation in hough space (theta) 108 thetaRef=thetaRefGlobal+CV_PI/2+rotation->GetAngle(); 109 if (thetaRef>CV_PI) thetaRef-=CV_PI; 110 if (thetaRef<0) thetaRef+=CV_PI; 111 112 data->GetMutex(); 113 if(!isTracking) { 114 nbLines=dspHoughLines2(gimg,linesStorage,CV_HOUGH_STANDARD, 115 fullRhoStep->Value(),fullThetaStep->Value()*CV_PI/180, 116 nbPoints->Value()); 117 } else { 118 nbLines=dspHoughLinesTracking(gimg,linesStorage,CV_HOUGH_STANDARD, 119 trackingRhoStep->Value(), 120 theta,trackingDeltaTheta->Value()*CV_PI/180, 121 trackingThetaStep->Value()*CV_PI/180, 122 nbPoints->Value()); 123 // nbLines=dspHoughLines2_test(gimg,linesStorage,CV_HOUGH_STANDARD,trackingRhoStep->Value(),thetaPrev-trackingDeltaTheta->Value()*CV_PI/180,thetaPrev+trackingDeltaTheta->Value()*CV_PI/180,trackingThetaStep->Value()*CV_PI/180,nbPoints->Value()); 124 } 125 data->ReleaseMutex(); 126 127 //saturation sur le nb max de ligne, au cas ou le DSP n'aurait pas la meme valeur 128 if(nbLines>MAX_LINES) { 129 self->Warn("erreur nb lines %u>%u\n",nbLines,MAX_LINES); 130 nbLines=MAX_LINES; 131 } 132 float rho; 133 bool noLine=!SelectBestLine(linesStorage,nbLines,rho,theta); 134 135 if (noLine) { 136 isTracking=false; 137 } else { 138 isTracking=true; 139 // float thetaRef=0; 140 141 //line equation is ax+by+c=0 with a=cos(theta), b=sin(theta) and c=-rho 142 //distance from point xRef,yRef to the line is (a.xRef+b.yRef+c)/sqrt(a*a+b*b) 143 distance=-(cosf(theta)*ptRef.x+sinf(theta)*ptRef.y-rho); 144 145 orientation=theta-thetaRef; 146 if (orientation<-CV_PI/2) { 147 orientation+=CV_PI; 148 distance=-distance; 149 } 150 if (orientation>CV_PI/2) { 151 orientation-=CV_PI; 152 distance=-distance; 153 } 154 155 //printf("=> pour theta=%f et rho=%f, distance au point(%f,%f)=%f\n",theta,rho,xRef,yRef,distance); 156 } 157 158 output->GetMutex(); 159 output->SetValueNoMutex(0,0,distance); 160 output->SetValueNoMutex(1,0,orientation); 161 output->SetValueNoMutex(2,0,orientation*180/CV_PI); 162 if(noLine) { 163 output->SetValueNoMutex(3,0,0); 164 } else { 165 output->SetValueNoMutex(3,0,1); 166 } 167 output->ReleaseMutex(); 168 */ 169 output->SetDataTime(data->DataTime()); 170 }; 171 172 Matrix *output; 173 174 private:/* 175 //select best line. Returns false if no line found 176 bool SelectBestLine(CvMat* linesStorage, size_t nbLines, float &rho, float &theta) { 177 if(nbLines==0) { 178 return false; 179 } 180 //one line is found 181 if (nbLines==1) { 182 rho=linesStorage->data.fl[0]; 183 theta=linesStorage->data.fl[1]; 184 //printf("rho=%f,theta=%f (one line)\n",rho,theta); 185 return true; 186 } 187 //lines are ordered by quality, the first one will be our reference 188 float thetaRef=linesStorage->data.fl[1]; 189 float thetaRefErrorSum=0; 190 float rhoSum=linesStorage->data.fl[0]; 191 //printf("rho=%f,theta=%f (first of multilines)\n",rhoSum,thetaRef); 192 for(int i=1;i<nbLines;i++) { 193 //printf("rho=%f,theta=%f (multilines)\n",linesStorage->data.fl[2*i],linesStorage->data.fl[2*i+1]); 194 float thetaDiff=linesStorage->data.fl[2*i+1]-thetaRef; 195 float rhoLine=linesStorage->data.fl[2*i]; 196 if (thetaDiff>CV_PI/2) { 197 thetaDiff-=CV_PI; 198 rhoLine=-rhoLine; 199 } else if (thetaDiff<-CV_PI/2) { 200 thetaDiff+=CV_PI; 201 rhoLine=-rhoLine; 202 } 203 thetaRefErrorSum += thetaDiff; 204 rhoSum+=rhoLine; 205 } 206 rho=rhoSum/nbLines; 207 theta=thetaRef+thetaRefErrorSum/nbLines; 208 if (theta<0) { 209 theta+=CV_PI; 210 rho=-rho; 211 } 212 if (theta>CV_PI) { 213 theta-=CV_PI; 214 rho=-rho; 215 } 216 return true; 217 } 218 */ 219 flair::filter::HoughLines *self; 220 OneAxisRotation* rotation; 221 SpinBox *fullRhoStep,*trackingRhoStep,*nbPoints; 222 DoubleSpinBox *fullThetaStep,*trackingThetaStep,*trackingDeltaTheta; 223 bool isTracking; 224 float theta; 225 float distance,orientation; 226 Vector2Df* ptRefGlobal; 227 float thetaRefGlobal; 228 //CvMat* linesStorage; 229 //IplImage *gimg; 230 }; 231 29 232 namespace flair { namespace filter { 30 233 31 HoughLines::HoughLines(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name) { 32 GroupBox* reglages_groupbox=new GroupBox(position,name); 33 fullRhoStep=new SpinBox(reglages_groupbox->NewRow(),"full rho step:","pixels",0,255,1,1); 34 fullThetaStep=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"full theta step:","degrees",0,90,1,1); 35 trackingRhoStep=new SpinBox(reglages_groupbox->NewRow(),"tracking rho step:","pixels",0,255,1,1); 36 trackingThetaStep=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"tracking theta step:","degrees",0,90,1,1); 37 trackingDeltaTheta=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"tracking delta theta:","degrees",0,90,1,1); 38 nbPoints=new SpinBox(reglages_groupbox->NewRow(),"nb points:",0,10000,10,100); 39 40 isTracking=false; 41 lostLine=false; 42 initLine=false; 43 linesStorage = cvCreateMat(MAX_LINES, 1, CV_32FC2); 44 45 //init output matrix of same size as init 46 MatrixDescriptor* desc=new MatrixDescriptor(4,1); 47 desc->SetElementName(0,0,"distance"); 48 desc->SetElementName(1,0,"orientation rad"); 49 desc->SetElementName(2,0,"orientation deg"); 50 desc->SetElementName(3,0,"line_detected"); 51 output=new Matrix(this,desc,floatType,name); 52 delete desc; 53 54 try{ 55 cvimage::Type const &imageType=dynamic_cast<cvimage::Type const &>(parent->GetOutputDataType()); 56 if(imageType.GetFormat()!=cvimage::Type::Format::Gray) { 57 Err("input image is not gray\n"); 58 return; 59 } 60 } catch(std::bad_cast& bc) { 61 Err("io type mismatch\n"); 62 return; 63 } 64 65 SetIsReady(true); 234 HoughLines::HoughLines(const IODevice* parent,const LayoutPosition* position,string name,const Vector2Df *inPtRefGlobal,float inThetaRefGlobal) : IODevice(parent,name) { 235 pimpl_=new HoughLines_impl(this,position,name,inPtRefGlobal,inThetaRefGlobal); 236 66 237 } 67 238 68 239 HoughLines::~HoughLines(void) { 69 cvReleaseMat(&linesStorage);240 delete pimpl_; 70 241 } 71 242 72 243 void HoughLines::UpdateFrom(const io_data *data) { 73 244 pimpl_->UpdateFrom(data); 245 ProcessUpdate(pimpl_->output); 74 246 } 75 247 76 248 bool HoughLines::isLineDetected() const { 77 if( output->Value(3,0)==1) {249 if(pimpl_->output->Value(3,0)==1) { 78 250 return true; 79 251 } else { … … 83 255 84 256 float HoughLines::GetOrientation(void) const { 85 return output->Value(1,0);257 return pimpl_->output->Value(1,0); 86 258 } 87 259 88 260 float HoughLines::GetDistance(void) const { 89 return output->Value(0,0);261 return pimpl_->output->Value(0,0); 90 262 } 91 263 92 264 Matrix *HoughLines::Output(void) const { 93 return output;265 return pimpl_->output; 94 266 } 95 267 -
trunk/lib/FlairVisionFilter/src/HoughLines.h
r214 r338 12 12 13 13 #include <IODevice.h> 14 #include < cv.h>14 #include <Vector2D.h> 15 15 16 16 namespace flair { 17 17 namespace core { 18 class cvimage;19 18 class Matrix; 20 19 } 21 20 namespace gui { 22 21 class LayoutPosition; 23 class SpinBox;24 class DoubleSpinBox;25 22 } 26 23 } 24 25 class HoughLines_impl; 27 26 28 27 namespace flair { namespace filter { … … 44 43 * \param position position 45 44 * \param name name 45 * \param ptRefGlobal reference point used to compute distance to the line 46 * \param thetaRefGlobal reference angle used to compute orientation in range [-pi/2,pi/2]. Default value is 0 which defines a vertical line 46 47 */ 47 HoughLines(const core::IODevice* parent,const gui::LayoutPosition* position,std::string name );48 HoughLines(const core::IODevice* parent,const gui::LayoutPosition* position,std::string name,const core::Vector2Df *ptRefGlobal=NULL,float thetaRefGlobal=0); 48 49 49 50 /*! … … 60 61 private: 61 62 void UpdateFrom(const core::io_data *data); 62 gui::SpinBox *fullRhoStep,*trackingRhoStep,*nbPoints; 63 gui::DoubleSpinBox *fullThetaStep,*trackingThetaStep,*trackingDeltaTheta; 64 bool isTracking,lostLine,initLine,noLine; 65 int nbLines; 66 float yawMean,prevYawMean; 67 float prevRhoMean,rhoMean; 68 float prevThetaMean,thetaMean; 69 float distance,orientation; 70 CvMat* linesStorage; 71 core::Matrix *output; 63 class HoughLines_impl *pimpl_; 64 65 72 66 }; 73 67 } // end namespace filter -
trunk/lib/FlairVisionFilter/src/ImgThreshold.cpp
r330 r338 13 13 14 14 #include "ImgThreshold.h" 15 #include <cvimage.h> 15 #include "VisionFilter.h" 16 #include <Image.h> 16 17 #include <Layout.h> 17 18 #include <GroupBox.h> … … 23 24 using namespace flair::gui; 24 25 26 class ImgThreshold_impl { 27 public: 28 ImgThreshold_impl(flair::filter::ImgThreshold *self,const LayoutPosition* position,string name):output(0) { 29 this->self=self; 30 31 GroupBox* reglages_groupbox=new GroupBox(position,name); 32 threshold=new SpinBox(reglages_groupbox->NewRow(),"threshold:",0,255,1,127); 33 34 Printf("todo: pouvoir reutiliser la meme image en sortie\n"); 35 try{ 36 Image::Type const &imageType=dynamic_cast<Image::Type const &>(((IODevice*)(self->Parent()))->GetOutputDataType()); 37 if(imageType.GetFormat()==Image::Type::Format::Gray) { 38 output=new Image(self,imageType.GetWidth(),imageType.GetHeight(),imageType.GetFormat(),"threshold"); 39 //inIplImage = (IplImage*)AllocFunction(sizeof(IplImage)); 40 //outIplImage = (IplImage*)AllocFunction(sizeof(IplImage)); 41 } else { 42 self->Err("input image is not gray\n"); 43 } 44 } catch(std::bad_cast& bc) { 45 self->Err("io type mismatch\n"); 46 } 47 48 49 } 50 51 ~ImgThreshold_impl() { 52 //FreeFunction((char*)(inIplImage)); 53 //FreeFunction((char*)(outIplImage)); 54 } 55 56 void UpdateFrom(const io_data *data){ 57 Image *img=(Image*)data; 58 /* 59 data->GetMutex(); 60 inIplImage->width=img->GetDataType().GetWidth(); 61 inIplImage->height=img->GetDataType().GetHeight(); 62 inIplImage->imageData=img->buffer; 63 inIplImage->imageSize=img->GetDataType().GetSize(); 64 65 output->GetMutex(); 66 outIplImage->width=output->GetDataType().GetWidth(); 67 outIplImage->height=output->GetDataType().GetHeight(); 68 outIplImage->imageData=output->buffer; 69 outIplImage->imageSize=output->GetDataType().GetSize(); 70 71 dspThreshold(inIplImage,outIplImage, threshold->Value(), 255, CV_THRESH_BINARY); 72 output->ReleaseMutex(); 73 data->ReleaseMutex(); 74 */ 75 output->SetDataTime(data->DataTime()); 76 }; 77 78 Image *output; 79 80 private: 81 flair::filter::ImgThreshold *self; 82 SpinBox *threshold; 83 //IplImage *inIplImage,*outIplImage; 84 }; 85 25 86 namespace flair { namespace filter { 26 87 27 ImgThreshold::ImgThreshold(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name),output(0) { 28 GroupBox* reglages_groupbox=new GroupBox(position,name); 29 threshold=new SpinBox(reglages_groupbox->NewRow(),"threshold:",0,255,1,127); 30 31 Printf("todo: pouvoir reutiliser la meme image en sortie\n"); 32 try{ 33 cvimage::Type const &imageType=dynamic_cast<cvimage::Type const &>(parent->GetOutputDataType()); 34 if(imageType.GetFormat()==cvimage::Type::Format::Gray) { 35 output=new cvimage(this,imageType.GetWidth(),imageType.GetHeight(),imageType.GetFormat(),"threshold"); 36 } else { 37 Err("input image is not gray\n"); 38 return; 39 } 40 } catch(std::bad_cast& bc) { 41 Err("io type mismatch\n"); 42 return; 43 } 44 SetIsReady(true); 88 ImgThreshold::ImgThreshold(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name) { 89 pimpl_=new ImgThreshold_impl(this,position,name); 45 90 } 46 91 47 92 ImgThreshold::~ImgThreshold(void) { 93 delete pimpl_; 48 94 } 49 95 50 cvimage* ImgThreshold::Output(void) {51 return output;96 Image* ImgThreshold::Output(void) { 97 return pimpl_->output; 52 98 } 53 99 54 100 void ImgThreshold::UpdateFrom(const io_data *data) { 55 /* cvimage *cvImage=(cvimage*)data; 56 IplImage *gimg=cvImage->img; 57 58 data->GetMutex(); 59 output->GetMutex(); 60 dspThreshold(gimg, output->img, threshold->Value(), 255, CV_THRESH_BINARY); 61 output->ReleaseMutex(); 62 data->ReleaseMutex(); 63 */ 64 output->SetDataTime(data->DataTime()); 65 ProcessUpdate(output); 101 pimpl_->UpdateFrom(data); 102 ProcessUpdate(pimpl_->output); 66 103 } 67 104 68 105 DataType const &ImgThreshold::GetOutputDataType() const { 69 if( output!=NULL) {70 return output->GetDataType();106 if(pimpl_->output!=NULL) { 107 return pimpl_->output->GetDataType(); 71 108 } else { 72 109 return dummyType; -
trunk/lib/FlairVisionFilter/src/ImgThreshold.h
r122 r338 12 12 13 13 #include <IODevice.h> 14 #include <cv.h>15 14 16 15 namespace flair { 17 16 namespace core { 18 class cvimage;17 class Image; 19 18 } 20 19 namespace gui { 21 20 class LayoutPosition; 22 class SpinBox;23 21 } 24 22 } 23 24 class ImgThreshold_impl; 25 25 26 26 namespace flair { namespace filter { … … 58 58 * \return the output image 59 59 */ 60 core:: cvimage* Output(void);60 core::Image* Output(void); 61 61 62 62 core::DataType const &GetOutputDataType() const; … … 64 64 private: 65 65 void UpdateFrom(const core::io_data *data); 66 core::cvimage *output; 67 gui::SpinBox *threshold; 66 class ImgThreshold_impl *pimpl_; 68 67 }; 69 68 } // end namespace filter -
trunk/lib/FlairVisionFilter/src/OpticalFlow.cpp
r330 r338 14 14 #include "OpticalFlow.h" 15 15 #include "OpticalFlowData.h" 16 #include <cvimage.h> 16 #include "VisionFilter.h" 17 #include <Image.h> 17 18 #include <Layout.h> 18 19 #include <GroupBox.h> 19 20 #include <SpinBox.h> 20 //#include <algorithm>21 21 #include <OneAxisRotation.h> 22 #include <Vector3D.h>23 22 #include <typeinfo> 24 23 … … 30 29 using namespace flair::gui; 31 30 31 class OpticalFlow_impl { 32 public: 33 OpticalFlow_impl(flair::filter::OpticalFlow *self,const LayoutPosition* position,string name) { 34 this->self=self; 35 is_init=false; 36 37 GroupBox* reglages_groupbox=new GroupBox(position,name); 38 rotation=new OneAxisRotation(reglages_groupbox->NewRow(),"post rotation",OneAxisRotation::PostRotation); 39 max_features=new SpinBox(reglages_groupbox->NewRow(),"max features:",1,65535,1,1); 40 41 try{ 42 Image::Type const &imageType=dynamic_cast<Image::Type const &>(((IODevice*)(self->Parent()))->GetOutputDataType()); 43 pyr=new Image(self,imageType.GetWidth(),imageType.GetHeight(),Image::Type::Format::Gray); 44 pyr_old=new Image(self,imageType.GetWidth(),imageType.GetHeight(),Image::Type::Format::Gray); 45 output=new flair::filter::OpticalFlowData(self->Parent(),max_features->Value()); 46 /* 47 pointsA=(CvPoint*)AllocFunction(max_features->Value()*sizeof(CvPoint)); 48 pointsB=(CvPoint*)AllocFunction(max_features->Value()*sizeof(CvPoint)); 49 50 found_feature=(char*)AllocFunction(max_features->Value()*sizeof(char)); 51 feature_error=(unsigned int*)AllocFunction(max_features->Value()*sizeof(unsigned int)); 52 53 iplgimg = (IplImage*)AllocFunction(sizeof(IplImage)); 54 iplgimg_old = (IplImage*)AllocFunction(sizeof(IplImage)); 55 iplpyr = (IplImage*)AllocFunction(sizeof(IplImage)); 56 iplpyr_old = (IplImage*)AllocFunction(sizeof(IplImage)); 57 58 iplpyr->width=imageType.GetWidth(); 59 iplpyr->height=imageType.GetHeight(); 60 iplpyr->imageSize=imageType.GetSize(); 61 iplpyr_old->width=imageType.GetWidth(); 62 iplpyr_old->height=imageType.GetHeight(); 63 iplpyr_old->imageSize=imageType.GetSize(); 64 65 iplgimg->roi=NULL; 66 iplgimg_old->roi=NULL; 67 iplpyr->roi=NULL; 68 iplpyr_old->roi=NULL; 69 */ 70 } catch(std::bad_cast& bc) { 71 self->Err("io type mismatch\n"); 72 pyr=NULL; 73 pyr_old=NULL; 74 } 75 76 } 77 78 ~OpticalFlow_impl() { 79 /* 80 FreeFunction((char*)pointsA); 81 FreeFunction((char*)pointsB); 82 FreeFunction((char*)found_feature); 83 FreeFunction((char*)feature_error); 84 FreeFunction((char*)iplgimg); 85 FreeFunction((char*)iplgimg_old); 86 FreeFunction((char*)iplpyr); 87 FreeFunction((char*)iplpyr_old);*/ 88 } 89 90 void UpdateFrom(const io_data *data){ 91 //Time tStart=GetTime(); 92 Image *img=(Image*)data; 93 Image *img_old=((Image*)data->Prev(1)); 94 /* 95 iplgimg->width=img->GetDataType().GetWidth(); 96 iplgimg->height=img->GetDataType().GetHeight(); 97 iplgimg->imageData=img->buffer; 98 iplgimg->imageSize=img->GetDataType().GetSize(); 99 100 iplgimg_old->width=img_old->GetDataType().GetWidth(); 101 iplgimg_old->height=img_old->GetDataType().GetHeight(); 102 iplgimg_old->imageData=img_old->buffer; 103 iplgimg_old->imageSize=img_old->GetDataType().GetSize(); 104 105 iplpyr->imageData=pyr->buffer; 106 iplpyr_old->imageData=pyr_old->buffer; 107 108 unsigned int count; 109 CvSize window = {3,3}; 110 CvTermCriteria termination_criteria ={CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 }; 111 unsigned int i; 112 113 if(max_features->ValueChanged()==true) { 114 FreeFunction((char*)pointsA); 115 FreeFunction((char*)pointsB); 116 FreeFunction((char*)found_feature); 117 FreeFunction((char*)feature_error); 118 pointsA=(CvPoint*)AllocFunction(max_features->Value()*sizeof(CvPoint)); 119 pointsB=(CvPoint*)AllocFunction(max_features->Value()*sizeof(CvPoint)); 120 found_feature=(char*)AllocFunction(max_features->Value()*sizeof(char)); 121 feature_error=(unsigned int*)AllocFunction(max_features->Value()*sizeof(unsigned int)); 122 123 output->Resize(max_features->Value()); 124 } 125 126 if(is_init==false) { 127 data->GetMutex(); 128 //init image old 129 dspPyrDown(iplgimg,iplpyr_old,LEVEL_PYR); 130 data->ReleaseMutex(); 131 is_init=true; 132 printf("ajouter mise a 0 des points\n"); 133 return; 134 } 135 136 data->GetMutex(); 137 data->Prev(1)->GetMutex(); 138 139 //select good features 140 count=max_features->Value(); 141 dspGoodFeaturesToTrack(iplgimg_old,pointsA,&count,0.08,5); 142 //pyramide 143 dspPyrDown(iplgimg,iplpyr,LEVEL_PYR); 144 //lk 145 dspCalcOpticalFlowPyrLK(iplgimg_old,iplgimg,iplpyr_old,iplpyr,pointsA,pointsB,count,window,LEVEL_PYR,found_feature,feature_error,termination_criteria,0) ; 146 147 data->Prev(1)->ReleaseMutex(); 148 data->ReleaseMutex(); 149 150 //apply rotation 151 for(i=0;i<count;i++) { 152 Vector3Df tmp; 153 tmp.x=pointsA[i].x; 154 tmp.y=pointsA[i].y; 155 tmp.z=0; 156 rotation->ComputeRotation(tmp); 157 pointsA[i].x=tmp.x; 158 pointsA[i].y=tmp.y; 159 160 tmp.x=pointsB[i].x; 161 tmp.y=pointsB[i].y; 162 tmp.z=0; 163 rotation->ComputeRotation(tmp); 164 pointsB[i].x=tmp.x; 165 pointsB[i].y=tmp.y; 166 } 167 168 output->GetMutex(); 169 CvPoint2D32f* pointsBf= output->PointsB(); 170 for(i=0;i<count;i++) { 171 pointsBf[i].x=pointsA[i].x+((float)pointsB[i].x)/256; 172 pointsBf[i].y=pointsA[i].y+((float)pointsB[i].y)/256; 173 } 174 output->ReleaseMutex(); 175 176 output->SetPointsA(pointsA); 177 output->SetFoundFeature(found_feature); 178 output->SetFeatureError(feature_error); 179 output->SetNbFeatures(count); 180 181 //rotation 182 swap(pyr,pyr_old); 183 */ 184 output->SetDataTime(data->DataTime()); 185 //Printf("Optical flow computation time=%f\n",(GetTime()-tStart)/(1000.*1000)); 186 }; 187 188 flair::filter::OpticalFlowData *output; 189 190 private: 191 flair::filter::OpticalFlow *self; 192 SpinBox *max_features; 193 OneAxisRotation* rotation; 194 195 //CvPoint* pointsA; 196 //CvPoint* pointsB; 197 char *found_feature; 198 unsigned int *feature_error; 199 Image *pyr,*pyr_old; 200 //IplImage *iplgimg,*iplgimg_old; 201 //IplImage *iplpyr,*iplpyr_old; 202 203 bool is_init; 204 }; 205 206 32 207 namespace flair 33 208 { … … 35 210 { 36 211 37 OpticalFlow::OpticalFlow(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name) 38 { 212 OpticalFlow::OpticalFlow(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name) { 39 213 Printf("optical flow: voir pour faire du multiple output\n");//pour pts A et B, found et error 40 41 try{ 42 cvimage::Type const &imageType=dynamic_cast<cvimage::Type const &>(parent->GetOutputDataType()); 43 pyr=cvCreateImage(cvSize(imageType.GetWidth(),imageType.GetHeight()),IPL_DEPTH_8U,1); 44 pyr_old=cvCreateImage(cvSize(imageType.GetWidth(),imageType.GetHeight()),IPL_DEPTH_8U,1); 45 46 } catch(std::bad_cast& bc) { 47 Err("io type mismatch\n"); 48 pyr=NULL; 49 pyr_old=NULL; 50 return; 214 pimpl_=new OpticalFlow_impl(this,position,name); 215 } 216 217 OpticalFlow::~OpticalFlow(void) { 218 delete pimpl_; 219 } 220 221 void OpticalFlow::UpdateFrom(const io_data *data) { 222 pimpl_->UpdateFrom(data); 223 ProcessUpdate(pimpl_->output); 224 } 225 226 OpticalFlowData* OpticalFlow::Output(void) { 227 return pimpl_->output; 228 229 } 230 231 DataType const &OpticalFlow::GetOutputDataType() const { 232 if(pimpl_->output!=NULL) { 233 return pimpl_->output->GetDataType(); 234 } else { 235 return dummyType; 51 236 } 52 53 is_init=false;54 55 GroupBox* reglages_groupbox=new GroupBox(position,name);56 rotation=new OneAxisRotation(reglages_groupbox->NewRow(),"post rotation",OneAxisRotation::PostRotation);57 max_features=new SpinBox(reglages_groupbox->NewRow(),"max features:",1,65535,1,1);58 59 output=new OpticalFlowData(parent,max_features->Value());60 61 pointsA=(CvPoint *)cvAlloc(max_features->Value()*sizeof(CvPoint));62 pointsB=(CvPoint *)cvAlloc(max_features->Value()*sizeof(CvPoint));63 64 found_feature=(char *)cvAlloc(max_features->Value()*sizeof(char));65 feature_error=(unsigned int *)cvAlloc(max_features->Value()*sizeof(unsigned int));66 67 SetIsReady(true);68 }69 70 OpticalFlow::~OpticalFlow(void)71 {72 cvReleaseImage(&pyr);73 cvReleaseImage(&pyr_old);74 cvFree(&pointsA);75 cvFree(&pointsB);76 cvFree(&found_feature);77 cvFree(&feature_error);78 }79 80 void OpticalFlow::UpdateFrom(const io_data *data)81 {/*82 IplImage *gimg=((cvimage*)data)->img;83 IplImage *gimg_old=((cvimage*)data->Prev(1))->img;84 85 unsigned int count;86 CvSize window = cvSize(3,3);87 CvTermCriteria termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );88 unsigned int i;89 90 if(max_features->ValueChanged()==true) {91 cvFree(&pointsA);92 cvFree(&pointsB);93 cvFree(&found_feature);94 cvFree(&feature_error);95 pointsA=(CvPoint *)cvAlloc(max_features->Value()*sizeof(CvPoint));96 pointsB=(CvPoint *)cvAlloc(max_features->Value()*sizeof(CvPoint));97 found_feature=(char *)cvAlloc(max_features->Value()*sizeof(char));98 feature_error=(unsigned int *)cvAlloc(max_features->Value()*sizeof(unsigned int));99 100 output->Resize(max_features->Value());101 }102 103 if(is_init==false)104 {105 data->GetMutex();106 //init image old107 dspPyrDown(gimg,pyr_old,LEVEL_PYR);108 data->ReleaseMutex();109 is_init=true;110 printf("ajouter mise a 0 des points\n");111 return;112 }113 114 data->GetMutex();115 data->Prev(1)->GetMutex();116 117 //select good features118 count=max_features->Value();119 dspGoodFeaturesToTrack(gimg_old,pointsA,&count,0.08,5);120 //pyramide121 dspPyrDown(gimg,pyr,LEVEL_PYR);122 //lk123 dspCalcOpticalFlowPyrLK(gimg_old,gimg,pyr_old,pyr,pointsA,pointsB,count,window,LEVEL_PYR,found_feature,feature_error,termination_criteria,0) ;124 125 data->Prev(1)->ReleaseMutex();126 data->ReleaseMutex();127 128 //apply rotation129 for(i=0;i<count;i++) {130 Vector3Df tmp;131 tmp.x=pointsA[i].x;132 tmp.y=pointsA[i].y;133 tmp.z=0;134 rotation->ComputeRotation(tmp);135 pointsA[i].x=tmp.x;136 pointsA[i].y=tmp.y;137 138 tmp.x=pointsB[i].x;139 tmp.y=pointsB[i].y;140 tmp.z=0;141 rotation->ComputeRotation(tmp);142 pointsB[i].x=tmp.x;143 pointsB[i].y=tmp.y;144 }145 146 output->GetMutex();147 CvPoint2D32f* pointsBf= output->PointsB();148 for(i=0;i<count;i++)149 {150 pointsBf[i].x=pointsA[i].x+((float)pointsB[i].x)/256;151 pointsBf[i].y=pointsA[i].y+((float)pointsB[i].y)/256;152 }153 output->ReleaseMutex();154 155 output->SetPointsA(pointsA);156 output->SetFoundFeature(found_feature);157 output->SetFeatureError(feature_error);158 output->SetNbFeatures(count);159 160 //rotation161 swap(pyr,pyr_old);162 */163 output->SetDataTime(data->DataTime());164 ProcessUpdate(output);165 237 } 166 238 -
trunk/lib/FlairVisionFilter/src/OpticalFlow.h
r122 r338 12 12 13 13 #include <IODevice.h> 14 #include <cv.h>15 14 16 namespace flair 17 { 18 namespace core 19 { 20 class cvimage; 21 class OneAxisRotation; 15 namespace flair { 16 namespace gui { 17 class LayoutPosition; 22 18 } 23 namespace gui 24 { 25 class LayoutPosition; 26 class SpinBox; 27 } 28 namespace filter 29 { 19 namespace filter { 30 20 class OpticalFlowData; 31 21 } 32 22 } 33 23 34 namespace flair 35 { 36 namespace f ilter37 {24 class OpticalFlow_impl; 25 26 namespace flair { 27 namespace filter { 38 28 /*! \class OpticalFlow 39 29 * … … 63 53 */ 64 54 ~OpticalFlow(); 55 56 filter::OpticalFlowData* Output(void); 57 58 core::DataType const &GetOutputDataType() const; 65 59 66 60 private: 67 61 void UpdateFrom(const core::io_data *data); 68 OpticalFlowData *output; 69 gui::SpinBox *max_features; 70 core::OneAxisRotation* rotation; 71 72 CvPoint* pointsA; 73 CvPoint* pointsB; 74 char *found_feature; 75 unsigned int *feature_error; 76 IplImage *pyr,*pyr_old; 77 78 bool is_init; 62 63 class OpticalFlow_impl *pimpl_; 64 79 65 }; 80 66 } // end namespace filter -
trunk/lib/FlairVisionFilter/src/OpticalFlowCompensated.cpp
r318 r338 32 32 using namespace flair::gui; 33 33 34 class OpticalFlowCompensated_impl { 35 public: 36 OpticalFlowCompensated_impl(flair::filter::OpticalFlowCompensated *self,const flair::filter::Ahrs *ahrs, const LayoutPosition* position, string name): ahrs(ahrs), output(NULL) { 37 this->self=self; 38 previousStepsAngularRates=new Vector3Df*[10]; 39 for (int i=0; i<10; i++) previousStepsAngularRates[i]=NULL; 40 previousStepsAngularRatesIndex=0; 41 42 GroupBox* reglages_groupbox=new GroupBox(position,name); 43 //TODO: the gyroDelay is set to compensate for the time difference between image snapshot et gyro measurements 44 //it is equal to the time between 2 images (because optical flow always lags 1 frame) + the time to compute optical flow 45 //here it is approximated by 2 video frames 46 gyroDelay=new SpinBox(reglages_groupbox->NewRow(),"gyro delay (in video frames):",0,10,1,2); 47 gyroGain=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"gyro gain:",0.,500.,10.,2,300.); 48 49 50 } 51 52 ~OpticalFlowCompensated_impl() { 53 delete output; 54 } 55 56 void UpdateFrom(const io_data *data){ 57 flair::filter::OpticalFlowData *input=(flair::filter::OpticalFlowData *)data; 58 if (!output) { //first pass 59 output=new flair::filter::OpticalFlowData(self,input->MaxFeatures(),input->ObjectName()+"_filtered"); 60 previousTime=input->DataTime(); 61 return; 62 } 63 // float kX=320/Euler::ToRadian(70); //TEST: only for default simuCameraGL. fov=70° and image width=320 => k=320*180/(70*pi) 64 // float kY=240/Euler::ToRadian(70); //TEST: only for default simuCameraGL. fov=70° and image height=240 65 float kX=gyroGain->Value(); 66 float kY=gyroGain->Value(); 67 float deltaT=(input->DataTime()-previousTime)/(1000*1000*1000.); 68 previousTime=input->DataTime(); 69 70 int delayedIndex=previousStepsAngularRatesIndex-gyroDelay->Value(); // Ahhh décalage, esprit canal... 71 if (delayedIndex<0) delayedIndex+=10; 72 73 if (!previousStepsAngularRates[previousStepsAngularRatesIndex]) { 74 previousStepsAngularRates[previousStepsAngularRatesIndex]=new Vector3Df(); 75 } 76 *previousStepsAngularRates[previousStepsAngularRatesIndex++]=ahrs->GetDatas()->GetAngularRates(); 77 78 if (!previousStepsAngularRates[delayedIndex]) return; 79 float rotationFlowX=previousStepsAngularRates[delayedIndex]->y*deltaT*kY; 80 float rotationFlowY=-previousStepsAngularRates[delayedIndex]->x*deltaT*kX; 81 if (previousStepsAngularRatesIndex==10) previousStepsAngularRatesIndex=0; 82 input->GetMutex(); 83 output->GetMutex(); 84 85 for (int i=0; i<input->NbFeatures(); i++) { 86 output->PointsA()[i].x=input->PointsA()[i].x; 87 output->PointsA()[i].y=input->PointsA()[i].y; 88 output->PointsB()[i].x=input->PointsB()[i].x-rotationFlowX; 89 output->PointsB()[i].y=input->PointsB()[i].y-rotationFlowY; 90 } 91 output->SetNbFeatures(input->NbFeatures()); 92 output->SetFoundFeature(input->FoundFeature()); 93 output->SetFeatureError(input->FeatureError()); 94 95 output->ReleaseMutex(); 96 input->ReleaseMutex(); 97 98 output->SetDataTime(input->DataTime()); 99 100 }; 101 102 flair::filter::OpticalFlowData *output; 103 104 private: 105 flair::filter::OpticalFlowCompensated *self; 106 Time previousTime; 107 const flair::filter::Ahrs *ahrs; 108 Vector3Df **previousStepsAngularRates; 109 unsigned int previousStepsAngularRatesIndex; 110 SpinBox *gyroDelay; 111 DoubleSpinBox *gyroGain; 112 }; 113 114 34 115 namespace flair { 35 116 namespace filter { 36 117 37 OpticalFlowCompensated::OpticalFlowCompensated(const OpticalFlow *parent, const Ahrs *ahrs, const LayoutPosition* position, string name) : IODevice(parent, name), ahrs(ahrs), output(NULL) { 38 MatrixDescriptor* desc=new MatrixDescriptor(3,2); 39 desc->SetElementName(0,0,"raw displacement X"); 40 desc->SetElementName(0,1,"raw displacement Y"); 41 desc->SetElementName(1,0,"compensation X"); 42 desc->SetElementName(1,1,"compensation Y"); 43 desc->SetElementName(2,0,"displacement with compensation X"); 44 desc->SetElementName(2,0,"displacement with compensation Y"); 45 firstPointDisplacement=new Matrix(this,desc,floatType,name); 46 delete desc; 47 previousStepsAngularRates=new Vector3Df*[10]; 48 for (int i=0; i<10; i++) previousStepsAngularRates[i]=NULL; 49 previousStepsAngularRatesIndex=0; 50 51 GroupBox* reglages_groupbox=new GroupBox(position,name); 52 //TODO: the gyroDelay is set to compensate for the time difference between image snapshot et gyro measurements 53 //it is equal to the time between 2 images (because optical flow always lags 1 frame) + the time to compute optical flow 54 //here it is approximated by 2 video frames 55 gyroDelay=new SpinBox(reglages_groupbox->NewRow(),"gyro delay (in video frames):",0,10,1,2); 56 gyroGain=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"gyro gain:",0.,500.,10.,2,300.); 118 OpticalFlowCompensated::OpticalFlowCompensated(const OpticalFlow *parent, const Ahrs *ahrs, const LayoutPosition* position, string name) : IODevice(parent, name) { 119 pimpl_=new OpticalFlowCompensated_impl(this,ahrs,position,name); 57 120 58 121 } 59 122 60 123 OpticalFlowCompensated::~OpticalFlowCompensated() { 61 delete output;124 delete pimpl_; 62 125 } 63 126 64 127 void OpticalFlowCompensated::UpdateFrom(const io_data *data) { 65 OpticalFlowData *input=(OpticalFlowData *)data; 66 if (!output) { //first pass 67 output=new OpticalFlowData(this,input->MaxFeatures(),input->ObjectName()+"_filtered"); 68 previousTime=input->DataTime(); 69 return; 70 } 71 // float kX=320/Euler::ToRadian(70); //TEST: only for default simuCameraGL. fov=70° and image width=320 => k=320*180/(70*pi) 72 // float kY=240/Euler::ToRadian(70); //TEST: only for default simuCameraGL. fov=70° and image height=240 73 float kX=gyroGain->Value(); 74 float kY=gyroGain->Value(); 75 float deltaT=(input->DataTime()-previousTime)/(1000*1000*1000.); 76 previousTime=input->DataTime(); 77 78 int delayedIndex=previousStepsAngularRatesIndex-gyroDelay->Value(); // Ahhh décalage, esprit canal... 79 if (delayedIndex<0) delayedIndex+=10; 80 81 if (!previousStepsAngularRates[previousStepsAngularRatesIndex]) { 82 previousStepsAngularRates[previousStepsAngularRatesIndex]=new Vector3Df(); 83 } 84 *previousStepsAngularRates[previousStepsAngularRatesIndex++]=ahrs->GetDatas()->GetAngularRates(); 85 86 if (!previousStepsAngularRates[delayedIndex]) return; 87 float rotationFlowX=previousStepsAngularRates[delayedIndex]->y*deltaT*kY; 88 float rotationFlowY=-previousStepsAngularRates[delayedIndex]->x*deltaT*kX; 89 if (previousStepsAngularRatesIndex==10) previousStepsAngularRatesIndex=0; 90 input->GetMutex(); 91 output->GetMutex(); 92 93 for (int i=0; i<input->NbFeatures(); i++) { 94 if (!i) { 95 firstPointDisplacement->SetValue(0,0,input->PointsB()[i].x-input->PointsA()[i].x); 96 firstPointDisplacement->SetValue(0,1,input->PointsB()[i].y-input->PointsA()[i].y); 97 firstPointDisplacement->SetValue(1,0,-rotationFlowX); 98 firstPointDisplacement->SetValue(1,1,-rotationFlowY); 99 firstPointDisplacement->SetValue(2,0,input->PointsB()[i].x-input->PointsA()[i].x-rotationFlowX); 100 firstPointDisplacement->SetValue(2,1,input->PointsB()[i].y-input->PointsA()[i].y-rotationFlowY); 101 } 102 output->PointsA()[i].x=input->PointsA()[i].x; 103 output->PointsA()[i].y=input->PointsA()[i].y; 104 output->PointsB()[i].x=input->PointsB()[i].x-rotationFlowX; 105 output->PointsB()[i].y=input->PointsB()[i].y-rotationFlowY; 106 } 107 output->SetNbFeatures(input->NbFeatures()); 108 output->SetFoundFeature(input->FoundFeature()); 109 output->SetFeatureError(input->FeatureError()); 110 111 output->ReleaseMutex(); 112 input->ReleaseMutex(); 113 114 output->SetDataTime(input->DataTime()); 115 ProcessUpdate(output); 128 pimpl_->UpdateFrom(data); 129 ProcessUpdate(pimpl_->output); 116 130 } 117 131 118 Matrix *OpticalFlowCompensated::GetFirstPointDisplacement() const {119 return firstPointDisplacement;120 }121 132 122 133 } // end namespace filter -
trunk/lib/FlairVisionFilter/src/OpticalFlowCompensated.h
r274 r338 16 16 #include <IODevice.h> 17 17 #include <Object.h> 18 #include <Vector3D.h>19 18 20 19 namespace flair { 21 namespace core {22 class io_data;23 class Matrix;24 }25 20 namespace gui { 26 21 class LayoutPosition; 27 class SpinBox;28 class DoubleSpinBox;29 22 } 30 23 namespace filter { 31 24 class Ahrs; 32 25 class OpticalFlow; 33 class OpticalFlowData;34 26 } 35 27 } 28 29 class OpticalFlowCompensated_impl; 36 30 37 31 namespace flair { namespace filter { … … 61 55 ~OpticalFlowCompensated(); 62 56 63 void UpdateFrom(const core::io_data *data);64 65 core::Matrix *GetFirstPointDisplacement() const;66 57 private: 67 OpticalFlowData *output; 68 core::Time previousTime; 69 const Ahrs *ahrs; 70 core::Matrix *firstPointDisplacement; 71 core::Vector3Df **previousStepsAngularRates; 72 unsigned int previousStepsAngularRatesIndex; 73 gui::SpinBox *gyroDelay; 74 gui::DoubleSpinBox *gyroGain; 58 void UpdateFrom(const core::io_data *data); 59 OpticalFlowCompensated_impl *pimpl_; 60 75 61 }; 76 62 -
trunk/lib/FlairVisionFilter/src/OpticalFlowData.cpp
r252 r338 12 12 13 13 #include "OpticalFlowData.h" 14 #include "VisionFilter.h" 15 #include <string.h> 14 16 15 17 using std::string; 16 18 17 namespace flair 18 { 19 namespace filter 20 { 19 class OpticalFlowData_impl { 20 public: 21 OpticalFlowData_impl(flair::filter::OpticalFlowData* self,uint32_t max_features) { 22 this->self=self; 23 this->max_features=max_features; 24 nb_features=0; 25 pointsA=(IntPoint*)malloc(max_features*sizeof(IntPoint)); 26 pointsB=(FloatPoint*)malloc(max_features*sizeof(FloatPoint)); 21 27 22 OpticalFlowData::OpticalFlowData(const Object* parent,uint32_t max_features,string name,uint32_t n): io_data(parent,name,n) 23 { 24 this->max_features=max_features; 25 nb_features=0; 28 found_features=(char*)malloc(max_features*sizeof(char)); 29 features_error=(uint32_t*)malloc(max_features*sizeof(uint32_t)); 30 } 26 31 27 pointsA=(CvPoint *)cvAlloc(max_features*sizeof(CvPoint)); 28 pointsB=(CvPoint2D32f *)cvAlloc(max_features*sizeof(CvPoint2D32f)); 32 ~OpticalFlowData_impl() { 33 free((char*)pointsA); 34 free((char*)pointsB); 29 35 30 found_features=(char *)cvAlloc(max_features*sizeof(char)); 31 features_error=(uint32_t*)cvAlloc(max_features*sizeof(uint32_t)); 36 free((char*)found_features); 37 free((char*)features_error); 38 } 39 40 void Resize(uint32_t value) { 41 IntPoint *new_pointsA; 42 FloatPoint *new_pointsB; 43 char *new_found_features; 44 uint32_t *new_features_error; 45 46 new_pointsA=(IntPoint*)malloc(value*sizeof(IntPoint)); 47 new_pointsB=(FloatPoint*)malloc(value*sizeof(FloatPoint)); 48 new_found_features=(char*)malloc(value*sizeof(char)); 49 new_features_error=(uint32_t*)malloc(value*sizeof(uint32_t)); 50 51 self->GetMutex(); 52 if(value>max_features) { 53 memcpy(new_pointsA,pointsA,max_features*sizeof(IntPoint)); 54 memcpy(new_pointsB,pointsB,max_features*sizeof(FloatPoint)); 55 memcpy(new_found_features,found_features,max_features*sizeof(char)); 56 memcpy(new_features_error,features_error,max_features*sizeof(uint32_t)); 57 } else { 58 memcpy(new_pointsA,pointsA,value*sizeof(IntPoint)); 59 memcpy(new_pointsB,pointsB,value*sizeof(FloatPoint)); 60 memcpy(new_found_features,found_features,value*sizeof(char)); 61 memcpy(new_features_error,features_error,value*sizeof(uint32_t)); 62 if(nb_features>value) nb_features=value; //si nb_features est le nombre de point qui ont effectivement une correspondande, ça me parait louche. Sauf si les points sont classés et que ceux qui ont une correpondance sont toujours au début 63 } 64 max_features=value; 65 self->ReleaseMutex(); 66 67 free((char*)pointsA); 68 free((char*)pointsB); 69 free((char*)found_features); 70 free((char*)features_error); 71 72 pointsA=new_pointsA; 73 pointsB=new_pointsB; 74 found_features=new_found_features; 75 features_error=new_features_error; 76 } 77 78 uint32_t max_features; 79 uint32_t nb_features; 80 IntPoint* pointsA; 81 FloatPoint* pointsB; 82 char *found_features; 83 uint32_t *features_error; 84 85 private: 86 flair::filter::OpticalFlowData *self; 87 }; 88 89 namespace flair { 90 namespace filter { 91 92 OpticalFlowData::OpticalFlowData(const Object* parent,uint32_t max_features,string name,uint32_t n): io_data(parent,name,n) { 93 pimpl_=new OpticalFlowData_impl(this,max_features); 32 94 } 33 95 34 OpticalFlowData::~OpticalFlowData() 35 { 36 cvFree(&pointsA); 37 cvFree(&pointsB); 38 39 cvFree(&found_features); 40 cvFree(&features_error); 96 OpticalFlowData::~OpticalFlowData() { 97 delete pimpl_; 41 98 } 42 99 43 CvPoint* OpticalFlowData::PointsA(void) const 44 { 45 return pointsA; 100 IntPoint* OpticalFlowData::PointsA(void) const { 101 return pimpl_->pointsA; 46 102 } 47 103 48 CvPoint2D32f* OpticalFlowData::PointsB(void) const 49 { 50 return pointsB; 104 FloatPoint* OpticalFlowData::PointsB(void) const { 105 return pimpl_->pointsB; 51 106 } 52 107 53 char *OpticalFlowData::FoundFeature(void) const 54 { 55 return found_features; 108 char *OpticalFlowData::FoundFeature(void) const { 109 return pimpl_->found_features; 56 110 } 57 111 58 uint32_t *OpticalFlowData::FeatureError(void) const 59 { 60 return features_error; 112 uint32_t *OpticalFlowData::FeatureError(void) const { 113 return pimpl_->features_error; 61 114 } 62 115 63 116 // value is new max_features value 64 117 void OpticalFlowData::Resize(uint32_t value) { 65 CvPoint *new_pointsA; 66 CvPoint2D32f *new_pointsB; 67 char *new_found_features; 68 uint32_t *new_features_error; 69 70 new_pointsA=(CvPoint *)cvAlloc(value*sizeof(CvPoint)); 71 new_pointsB=(CvPoint2D32f *)cvAlloc(value*sizeof(CvPoint2D32f)); 72 new_found_features=(char *)cvAlloc(value*sizeof(char)); 73 new_features_error=(uint32_t *)cvAlloc(value*sizeof(uint32_t)); 74 75 GetMutex(); 76 if(value>max_features) 77 { 78 memcpy(new_pointsA,pointsA,max_features*sizeof(CvPoint)); 79 memcpy(new_pointsB,pointsB,max_features*sizeof(CvPoint2D32f)); 80 memcpy(new_found_features,found_features,max_features*sizeof(char)); 81 memcpy(new_features_error,features_error,max_features*sizeof(uint32_t)); 82 } 83 else 84 { 85 memcpy(new_pointsA,pointsA,value*sizeof(CvPoint)); 86 memcpy(new_pointsB,pointsB,value*sizeof(CvPoint2D32f)); 87 memcpy(new_found_features,found_features,value*sizeof(char)); 88 memcpy(new_features_error,features_error,value*sizeof(uint32_t)); 89 if(nb_features>value) nb_features=value; //si nb_features est le nombre de point qui ont effectivement une correspondande, ça me parait louche. Sauf si les points sont classés et que ceux qui ont une correpondance sont toujours au début 90 } 91 max_features=value; 92 ReleaseMutex(); 93 94 cvFree(&pointsA); 95 cvFree(&pointsB); 96 cvFree(&found_features); 97 cvFree(&features_error); 98 99 pointsA=new_pointsA; 100 pointsB=new_pointsB; 101 found_features=new_found_features; 102 features_error=new_features_error; 118 pimpl_->Resize(value); 103 119 } 104 120 105 void OpticalFlowData::RawRead(char* dst) const 106 { 121 void OpticalFlowData::RawRead(char* dst) const { 107 122 Warn("non implementé\n"); 108 123 } 109 124 110 void OpticalFlowData::SetPointsA(const CvPoint* points) 111 { 125 void OpticalFlowData::SetPointsA(const IntPoint* points) { 112 126 GetMutex(); 113 memcpy(p ointsA,points,max_features*sizeof(CvPoint));127 memcpy(pimpl_->pointsA,points,pimpl_->max_features*sizeof(IntPoint)); 114 128 ReleaseMutex(); 115 129 } 116 130 117 void OpticalFlowData::SetPointsB(const CvPoint2D32f* points) 118 { 131 void OpticalFlowData::SetPointsB(const FloatPoint* points) { 119 132 GetMutex(); 120 memcpy(p ointsB,points,max_features*sizeof(CvPoint2D32f));133 memcpy(pimpl_->pointsB,points,pimpl_->max_features*sizeof(FloatPoint)); 121 134 ReleaseMutex(); 122 135 } 123 136 124 void OpticalFlowData::SetFoundFeature(const char *found_features) 125 { 137 void OpticalFlowData::SetFoundFeature(const char *found_features) { 126 138 GetMutex(); 127 memcpy( this->found_features,found_features,max_features*sizeof(char));139 memcpy(pimpl_->found_features,found_features,pimpl_->max_features*sizeof(char)); 128 140 ReleaseMutex(); 129 141 } 130 142 131 void OpticalFlowData::SetFeatureError(const uint32_t *features_error) 132 { 143 void OpticalFlowData::SetFeatureError(const uint32_t *features_error) { 133 144 GetMutex(); 134 memcpy( this->features_error,features_error,max_features*sizeof(uint32_t));145 memcpy(pimpl_->features_error,features_error,pimpl_->max_features*sizeof(uint32_t)); 135 146 ReleaseMutex(); 136 147 } 137 148 138 uint32_t OpticalFlowData::MaxFeatures(void) const 139 { 140 return max_features; 149 uint32_t OpticalFlowData::MaxFeatures(void) const { 150 return pimpl_->max_features; 141 151 } 142 152 143 uint32_t OpticalFlowData::NbFeatures(void) const 144 { 145 return nb_features; 153 uint32_t OpticalFlowData::NbFeatures(void) const { 154 return pimpl_->nb_features; 146 155 } 147 156 148 void OpticalFlowData::SetNbFeatures(uint32_t value) 149 { 157 void OpticalFlowData::SetNbFeatures(uint32_t value) { 150 158 GetMutex(); 151 nb_features=value;159 pimpl_->nb_features=value; 152 160 ReleaseMutex(); 153 161 } -
trunk/lib/FlairVisionFilter/src/OpticalFlowData.h
r252 r338 10 10 #define OPTICALFLOWDATA_H 11 11 12 #include <cxcore.h>13 12 #include <io_data.h> 13 14 //TODO: use flair::core::Vector2D 15 typedef struct IntPoint { 16 int x; 17 int y; 18 } 19 IntPoint; 20 21 typedef struct FloatPoint { 22 float x; 23 float y; 24 } 25 FloatPoint; 26 27 class OpticalFlowData_impl; 14 28 15 29 namespace flair { namespace filter { … … 53 67 * 54 68 */ 55 CvPoint* PointsA(void) const;69 IntPoint* PointsA(void) const; 56 70 57 71 /*! … … 59 73 * 60 74 */ 61 CvPoint2D32f* PointsB(void) const;75 FloatPoint* PointsB(void) const; 62 76 63 77 /*! … … 79 93 * \param points points 80 94 */ 81 void SetPointsA(const CvPoint *points);95 void SetPointsA(const IntPoint *points); 82 96 83 97 /*! … … 86 100 * \param points points 87 101 */ 88 void SetPointsB(const CvPoint2D32f*points);102 void SetPointsB(const FloatPoint *points); 89 103 90 104 /*! … … 139 153 */ 140 154 void RawRead(char *dst) const; 141 155 142 156 private: 143 uint32_t max_features;144 uint32_t nb_features;145 CvPoint* pointsA;146 CvPoint2D32f* pointsB;147 char *found_features;148 uint32_t *features_error;149 157 Type dataType; 158 OpticalFlowData_impl *pimpl_; 150 159 }; 151 160 } // end namespace filter -
trunk/lib/FlairVisionFilter/src/OpticalFlowSpeed.cpp
r318 r338 26 26 using namespace flair::gui; 27 27 28 class OpticalFlowSpeed_impl { 29 public: 30 OpticalFlowSpeed_impl(flair::filter::OpticalFlowSpeed *self,const LayoutPosition* position,string name):output(0) { 31 this->self=self; 32 MatrixDescriptor* desc=new MatrixDescriptor(2,1); 33 desc->SetElementName(0,0,"vx"); 34 desc->SetElementName(1,0,"vy"); 35 output=new Matrix(self,desc,floatType,name); 36 delete desc; 37 38 self->AddDataToLog(output); 39 40 GroupBox* reglages_groupbox=new GroupBox(position,name); 41 quality=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"optical flow quality:",0.,100.,1.,1,5.); 42 weightedAverage=new CheckBox(reglages_groupbox->LastRowLastCol(),"Weighted average", true); 43 timeMultiplication=new CheckBox(reglages_groupbox->LastRowLastCol(),"Time multiplication", true); 44 45 46 } 47 48 ~OpticalFlowSpeed_impl() { 49 50 } 51 52 void UpdateFrom(const io_data *data){ 53 flair::filter::OpticalFlowData *input=(flair::filter::OpticalFlowData*)data; 54 float deplx,deply; 55 float nb_depl=0; 56 57 deplx=0; 58 deply=0; 59 60 //error is 0 if perfect match and 7x7x255x255 at worst 61 float qualityThreshold=quality->Value()/100.*7*7*255*255; 62 input->GetMutex(); 63 int nbUsedPoints=0; 64 for(int i=0;i<input->NbFeatures();i++) { 65 //if point is found in both images and quality is sufficient 66 if((input->FoundFeature()[i]!=0)&&(input->FeatureError()[i]<qualityThreshold)) { 67 nbUsedPoints++; 68 float qualityFactor=1.0; 69 if (weightedAverage->Value()) { 70 //displacement is weigthed by quality 71 qualityFactor/=(1+input->FeatureError()[i]); 72 } 73 deplx+=(input->PointsB()[i].x-input->PointsA()[i].x)*qualityFactor; 74 deply+=(input->PointsB()[i].y-input->PointsA()[i].y)*qualityFactor; 75 nb_depl+=qualityFactor; 76 } 77 } 78 input->ReleaseMutex(); 79 float deltaT; 80 if (timeMultiplication->Value()) deltaT=(float)(data->DataTime()-output->DataTime())/(1000.*1000.*1000.); 81 else deltaT=1.; 82 output->SetDataTime(data->DataTime()); 83 84 if(nb_depl!=0) { 85 //Printf("Nombre de points=%d/%d (nb_depl=%f,pondération=%d), deltaT=%f\n",nbUsedPoints,input->NbFeatures(),nb_depl,weightedAverage->Value(),deltaT); 86 output->SetValue(0,0,deplx/(nb_depl*deltaT)); 87 output->SetValue(1,0,deply/(nb_depl*deltaT)); 88 } 89 }; 90 91 Matrix *output; 92 93 private: 94 flair::filter::OpticalFlowSpeed *self; 95 DoubleSpinBox *quality; 96 CheckBox *weightedAverage; 97 CheckBox *timeMultiplication; 98 }; 99 28 100 namespace flair { namespace filter { 29 101 30 102 OpticalFlowSpeed::OpticalFlowSpeed(const IODevice* parent, const LayoutPosition* position,string name) : IODevice(parent,name) { 31 MatrixDescriptor* desc=new MatrixDescriptor(2,1); 32 desc->SetElementName(0,0,"vx"); 33 desc->SetElementName(1,0,"vy"); 34 output=new Matrix(this,desc,floatType,name); 35 delete desc; 36 37 AddDataToLog(output); 38 39 GroupBox* reglages_groupbox=new GroupBox(position,name); 40 quality=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"optical flow quality:",0.,100.,1.,1,5.); 41 weightedAverage=new CheckBox(reglages_groupbox->LastRowLastCol(),"Weighted average", true); 42 timeMultiplication=new CheckBox(reglages_groupbox->LastRowLastCol(),"Time multiplication", true); 103 pimpl_=new OpticalFlowSpeed_impl(this,position,name); 43 104 } 44 105 45 OpticalFlowSpeed::~OpticalFlowSpeed(void) { } 106 OpticalFlowSpeed::~OpticalFlowSpeed(void) { 107 delete pimpl_; 108 } 46 109 47 110 void OpticalFlowSpeed::UpdateFrom(const io_data *data) { 48 OpticalFlowData *input=(OpticalFlowData*)data; 49 float deplx,deply; 50 float nb_depl=0; 51 52 deplx=0; 53 deply=0; 54 55 //error is 0 if perfect match and 7x7x255x255 at worst 56 float qualityThreshold=quality->Value()/100.*7*7*255*255; 57 input->GetMutex(); 58 int nbUsedPoints=0; 59 for(int i=0;i<input->NbFeatures();i++) { 60 //if point is found in both images and quality is sufficient 61 if((input->FoundFeature()[i]!=0)&&(input->FeatureError()[i]<qualityThreshold)) { 62 nbUsedPoints++; 63 float qualityFactor=1.0; 64 if (weightedAverage->Value()) { 65 //displacement is weigthed by quality 66 qualityFactor/=(1+input->FeatureError()[i]); 67 } 68 deplx+=(input->PointsB()[i].x-input->PointsA()[i].x)*qualityFactor; 69 deply+=(input->PointsB()[i].y-input->PointsA()[i].y)*qualityFactor; 70 nb_depl+=qualityFactor; 71 } 72 } 73 input->ReleaseMutex(); 74 float deltaT; 75 if (timeMultiplication->Value()) deltaT=(float)(data->DataTime()-output->DataTime())/(1000.*1000.*1000.); 76 else deltaT=1.; 77 output->SetDataTime(data->DataTime()); 78 79 if(nb_depl!=0) { 80 //Printf("Nombre de points=%d/%d (nb_depl=%f,pondération=%d), deltaT=%f\n",nbUsedPoints,input->NbFeatures(),nb_depl,weightedAverage->Value(),deltaT); 81 output->SetValue(0,0,deplx/(nb_depl*deltaT)); 82 output->SetValue(1,0,deply/(nb_depl*deltaT)); 83 } 84 // output->SetDataTime(data->DataTime()); 85 ProcessUpdate(output); 111 pimpl_->UpdateFrom(data); 112 ProcessUpdate(pimpl_->output); 86 113 } 87 114 88 115 float OpticalFlowSpeed::Vx(void) const { 89 return output->Value(0,0);116 return pimpl_->output->Value(0,0); 90 117 } 91 118 92 119 float OpticalFlowSpeed::Vy(void) const { 93 return output->Value(1,0);120 return pimpl_->output->Value(1,0); 94 121 } 95 122 96 123 core::Matrix *OpticalFlowSpeed::Output() const { 97 return output;124 return pimpl_->output; 98 125 } 99 126 } // end namespace filter -
trunk/lib/FlairVisionFilter/src/OpticalFlowSpeed.h
r274 r338 21 21 namespace gui { 22 22 class LayoutPosition; 23 class SpinBox;24 class DoubleSpinBox;25 class CheckBox;26 23 } 27 24 } 25 26 class OpticalFlowSpeed_impl; 28 27 29 28 namespace flair … … 88 87 */ 89 88 void UpdateFrom(const core::io_data *data); 90 91 core::Matrix *output; 92 gui::DoubleSpinBox *quality; 93 gui::CheckBox *weightedAverage; 94 gui::CheckBox *timeMultiplication; 89 90 OpticalFlowSpeed_impl *pimpl_; 95 91 }; 96 92 -
trunk/lib/FlairVisionFilter/src/Sobel.cpp
r330 r338 13 13 14 14 #include "Sobel.h" 15 #include <cvimage.h> 15 #include "VisionFilter.h" 16 #include <Image.h> 16 17 #include <Layout.h> 17 18 #include <GroupBox.h> … … 23 24 using namespace flair::gui; 24 25 26 class Sobel_impl { 27 public: 28 Sobel_impl(flair::filter::Sobel *self,const LayoutPosition* position,string name):output(0) { 29 this->self=self; 30 31 GroupBox* reglages_groupbox=new GroupBox(position,name); 32 dx=new SpinBox(reglages_groupbox->NewRow(),"dx:",0,1,1,1); 33 dy=new SpinBox(reglages_groupbox->NewRow(),"dy:",0,1,1,1); 34 35 Printf("TODO: IODevice doit faire un check de GetInputDataType et GetOutputDataType\n"); 36 //Image devrait accepter un type dans son constructeur pour construire un type identique 37 try{ 38 Image::Type const &imageType=dynamic_cast<Image::Type const &>(((IODevice*)(self->Parent()))->GetOutputDataType()); 39 if(imageType.GetFormat()==Image::Type::Format::Gray) { 40 output=new Image(self,imageType.GetWidth(),imageType.GetHeight(),imageType.GetFormat(),"sobel"); 41 //inIplImage = (IplImage*)AllocFunction(sizeof(IplImage)); 42 //outIplImage = (IplImage*)AllocFunction(sizeof(IplImage)); 43 } else { 44 self->Err("input image is not gray\n"); 45 } 46 47 } catch(std::bad_cast& bc) { 48 self->Err("io type mismatch\n"); 49 } 50 } 51 52 ~Sobel_impl() { 53 //FreeFunction((char*)(inIplImage)); 54 //FreeFunction((char*)(outIplImage)); 55 } 56 57 void UpdateFrom(const io_data *data){ 58 Image *image=(Image*)data; 59 /* 60 data->GetMutex(); 61 inIplImage->width=image->GetDataType().GetWidth(); 62 inIplImage->height=image->GetDataType().GetHeight(); 63 inIplImage->imageData=image->buffer; 64 inIplImage->imageSize=image->GetDataType().GetSize(); 65 66 output->GetMutex(); 67 outIplImage->width=output->GetDataType().GetWidth(); 68 outIplImage->height=output->GetDataType().GetHeight(); 69 outIplImage->imageData=output->buffer; 70 outIplImage->imageSize=output->GetDataType().GetSize(); 71 72 dspSobel(inIplImage,outIplImage,dx->Value(),dy->Value()); 73 output->ReleaseMutex(); 74 data->ReleaseMutex(); 75 */ 76 output->SetDataTime(data->DataTime()); 77 78 }; 79 80 Image *output; 81 82 private: 83 flair::filter::Sobel *self; 84 SpinBox *dx; 85 SpinBox *dy; 86 //IplImage *inIplImage,*outIplImage; 87 }; 88 25 89 namespace flair { namespace filter { 26 90 27 Sobel::Sobel(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name),output(0) { 28 GroupBox* reglages_groupbox=new GroupBox(position,name); 29 dx=new SpinBox(reglages_groupbox->NewRow(),"dx:",0,1,1,1); 30 dy=new SpinBox(reglages_groupbox->NewRow(),"dy:",0,1,1,1); 31 32 Printf("TODO: IODevice doit faire un check de GetInputDataType et GetOutputDataType\n"); 33 //cvimage devrait accepter un type dans son constructeur pour construire un type identique 34 try{ 35 cvimage::Type const &imageType=dynamic_cast<cvimage::Type const &>(parent->GetOutputDataType()); 36 if(imageType.GetFormat()==cvimage::Type::Format::Gray) { 37 output=new cvimage(this,imageType.GetWidth(),imageType.GetHeight(),imageType.GetFormat(),"sobel"); 38 } else { 39 Err("input image is not gray\n"); 40 return; 41 } 42 43 } catch(std::bad_cast& bc) { 44 Err("io type mismatch\n"); 45 return; 46 } 47 SetIsReady(true); 91 Sobel::Sobel(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name) { 92 pimpl_=new Sobel_impl(this,position,name); 48 93 } 49 94 50 95 Sobel::~Sobel(void) { 96 delete pimpl_; 51 97 } 52 98 53 cvimage* Sobel::Output(void) {54 return output;99 Image* Sobel::Output(void) { 100 return pimpl_->output; 55 101 } 56 102 57 void Sobel::UpdateFrom(const io_data *data) {/* 58 cvimage *cvImage=(cvimage*)data; 59 IplImage *gimg=cvImage->img; 60 61 data->GetMutex(); 62 output->GetMutex(); 63 dspSobel(gimg,output->img,dx->Value(),dy->Value()); 64 output->ReleaseMutex(); 65 data->ReleaseMutex(); 66 */ 67 output->SetDataTime(data->DataTime()); 68 ProcessUpdate(output); 103 void Sobel::UpdateFrom(const io_data *data) { 104 pimpl_->UpdateFrom(data); 105 ProcessUpdate(pimpl_->output); 69 106 } 70 107 71 108 DataType const &Sobel::GetOutputDataType() const { 72 if( output!=NULL) {73 return output->GetDataType();109 if(pimpl_->output!=NULL) { 110 return pimpl_->output->GetDataType(); 74 111 } else { 75 112 return dummyType; -
trunk/lib/FlairVisionFilter/src/Sobel.h
r122 r338 12 12 13 13 #include <IODevice.h> 14 #include <cv.h> 14 15 15 16 16 namespace flair { 17 17 namespace core { 18 class cvimage;18 class Image; 19 19 } 20 20 namespace gui { 21 21 class LayoutPosition; 22 class SpinBox;23 22 } 24 23 } 24 25 class Sobel_impl; 25 26 26 27 namespace flair { namespace filter { … … 58 59 * \return the output image 59 60 */ 60 core:: cvimage* Output(void);61 core::Image* Output(void); 61 62 62 63 core::DataType const &GetOutputDataType() const; … … 64 65 private: 65 66 void UpdateFrom(const core::io_data *data); 66 core::cvimage *output; 67 gui::SpinBox *dx; 68 gui::SpinBox *dy; 67 Sobel_impl *pimpl_; 69 68 }; 70 69 } // end namespace filter -
trunk/lib/FlairVisionFilter/src/VisionFilter.cpp
r124 r338 1 1 #include "VisionFilter.h" 2 2 #include "compile_info.h" 3 #include <highgui.h>4 3 5 4 static void constructor() __attribute__((constructor)); … … 9 8 } 10 9 11 void saveToJpeg(IplImage* src_img,std::string filename,PictureFormat_t input_format,PictureFormat_t output_format,unsigned char compression_level) { 12 if(!cvSaveImage(filename.c_str(),src_img)) printf("Could not save.\n"); 10 void saveToJpeg(flair::core::Image* src_img,std::string filename,PictureFormat_t input_format,PictureFormat_t output_format,unsigned char compression_level) { 11 printf("saveToJpeg todo\n"); 12 //if(!cvSaveImage(filename.c_str(),src_img)) printf("Could not save.\n"); 13 13 } 14 14 … … 21 21 22 22 } 23 24 char* AllocFunction(ssize_t size) { 25 return (char*)malloc(size); 26 } 27 28 void FreeFunction(char* buffer) { 29 free(buffer); 30 } -
trunk/lib/FlairVisionFilter/src/VisionFilter.h
r125 r338 1 1 #include <string> 2 #include <cxtypes.h> 2 3 namespace flair { 4 namespace core { 5 class Image; 6 } 7 } 8 3 9 4 10 /*! … … 19 25 } ; 20 26 21 void saveToJpeg(IplImage* src_img,std::string filename,PictureFormat_t input_format,PictureFormat_t output_format,unsigned char compression_level=95); 27 void saveToJpeg(flair::core::Image* src_img,std::string filename,PictureFormat_t input_format,PictureFormat_t output_format,unsigned char compression_level=95); 28 29 //for amrv7a, these functions use CMEM 30 char* AllocFunction(ssize_t size); 31 void FreeFunction(char* buffer);
Note:
See TracChangeset
for help on using the changeset viewer.