Changeset 338 in flair-src


Ignore:
Timestamp:
Oct 17, 2019, 2:49:35 PM (5 years ago)
Author:
Sanahuja Guillaume
Message:

remove opencv dep

Location:
trunk
Files:
2 deleted
32 edited
2 copied

Legend:

Unmodified
Added
Removed
  • trunk

  • trunk/cmake-modules/FlairUseFile.cmake

    r302 r338  
    2626
    2727SET(FLAIR_INCLUDE_DIR   
    28 #       ${LIBXML2_INCLUDE_DIR} 
    29         ${CMAKE_SYSROOT}/usr/include/opencv1
    3028        $ENV{FLAIR_ROOT}/flair-install/include/FlairCore
    3129)
     
    3634SET(FLAIR_LIBRARIES     
    3735        ${LIBXML2_LIBRARIES}
    38         udt pthread cv cxcore highgui FileLib rt z
     36        udt pthread FileLib rt z
    3937)
    4038
  • trunk/demos/OpticalFlow/uav/src/DemoOpticalFlow.cpp

    r314 r338  
    4747
    4848DemoOpticalFlow::DemoOpticalFlow(TargetController *controller): UavStateMachine(controller) {
    49   Uav* uav=GetUav();
     49    Uav* uav=GetUav();
    5050        if (uav->GetVerticalCamera() == NULL) {
    5151                                Err("no vertical camera found\n");
     
    5757        greyCameraImage=new CvtColor(uav->GetVerticalCamera(),"gray",CvtColor::Conversion_t::ToGray);
    5858
    59         uav->GetVerticalCamera()->UseDefaultPlot(greyCameraImage->Output()); // Le defaultPlot de la caméra peut afficher n'importe quoi?
     59        uav->GetVerticalCamera()->UseDefaultPlot(greyCameraImage->Output());
    6060
    6161        //optical flow stack
     
    7777  DataPlot1D* xVelocityPlot=new DataPlot1D(opticalFlowTab->NewRow(),"x speed (px/s)",-250,250);
    7878  DataPlot1D* yVelocityPlot=new DataPlot1D(opticalFlowTab->LastRowLastCol(),"y speed (px/s)",-250,250);
    79   DataPlot1D* xFirstPointPlot=new DataPlot1D(opticalFlowTab->NewRow(),"x movement first point",-25,25);
    80   DataPlot1D* yFirstPointPlot=new DataPlot1D(opticalFlowTab->LastRowLastCol(),"y movement first point",-25,25);
    81 //  DataPlot1D* xAccelerationPlot=new DataPlot1D(opticalFlowTab->NewRow(),"x_acceleration",-250,250);
    82 //  DataPlot1D* yAccelerationPlot=new DataPlot1D(opticalFlowTab->LastRowLastCol(),"y_acceleration",-250,250);
    83 
     79 
    8480  xVelocityPlot->AddCurve(opticalFlowSpeedRaw->Output()->Element(0,0));
    8581  xVelocityPlot->AddCurve(opticalFlowSpeed->GetMatrix()->Element(0,0),DataPlot::Blue);
    8682  yVelocityPlot->AddCurve(opticalFlowSpeedRaw->Output()->Element(1,0));
    8783  yVelocityPlot->AddCurve(opticalFlowSpeed->GetMatrix()->Element(1,0),DataPlot::Blue);
    88   xFirstPointPlot->AddCurve(opticalFlowCompensated->GetFirstPointDisplacement()->Element(0,0));
    89   xFirstPointPlot->AddCurve(opticalFlowCompensated->GetFirstPointDisplacement()->Element(1,0),DataPlot::Blue);
    90   xFirstPointPlot->AddCurve(opticalFlowCompensated->GetFirstPointDisplacement()->Element(2,0),DataPlot::Green);
    91   yFirstPointPlot->AddCurve(opticalFlowCompensated->GetFirstPointDisplacement()->Element(0,1));
    92   yFirstPointPlot->AddCurve(opticalFlowCompensated->GetFirstPointDisplacement()->Element(1,1),DataPlot::Blue);
    93   yFirstPointPlot->AddCurve(opticalFlowCompensated->GetFirstPointDisplacement()->Element(2,1),DataPlot::Green);
    94 //  xAccelerationPlot->AddCurve(opticalFlowAccelerationRaw->Matrix()->Element(0,0));
    95 //  xAccelerationPlot->AddCurve(opticalFlowAcceleration->Matrix()->Element(0,0),DataPlot::Blue);
    96 //  yAccelerationPlot->AddCurve(opticalFlowAccelerationRaw->Matrix()->Element(1,0));
    97 //  yAccelerationPlot->AddCurve(opticalFlowAcceleration->Matrix()->Element(1,0),DataPlot::Blue);
    98 
     84 
    9985  u_x=new Pid(setupLawTab->At(1,0),"u_x");
    10086  u_x->UseDefaultPlot(graphLawTab->NewRow());
     
    129115
    130116void DemoOpticalFlow::SignalEvent(Event_t event) {
     117    UavStateMachine::SignalEvent(event);
    131118    switch(event) {
    132119    case Event_t::EnteringControlLoop:
  • trunk/demos/OpticalFlow/uav/src/DemoOpticalFlow.h

    r214 r338  
    6666        flair::filter::OpticalFlowSpeed *opticalFlowSpeedRaw;
    6767        flair::filter::EulerDerivative *opticalFlowAccelerationRaw;
    68     flair::gui::PushButton *startOpticalflow,*stopOpticalflow;
    69     void StartOpticalFlow(void);
     68        flair::gui::PushButton *startOpticalflow,*stopOpticalflow;
     69        void StartOpticalFlow(void);
    7070        flair::filter::LowPassFilter* opticalFlowSpeed;
    7171        flair::filter::LowPassFilter* opticalFlowAcceleration;
  • trunk/lib/FlairCore/src/Picture.cpp

    r330 r338  
    1717
    1818#include "Picture.h"
    19 #include "cvimage.h"
     19#include "Image.h"
    2020#include "Layout.h"
    2121#include "LayoutPosition.h"
     
    3030
    3131Picture::Picture(const LayoutPosition *position, string name,
    32                  const cvimage *image)
     32                 const Image *image)
    3333    : SendData(position, name, "Picture", 200) {
    3434  this->image = image;
  • trunk/lib/FlairCore/src/Picture.h

    r330 r338  
    1818namespace flair {
    1919namespace core {
    20 class cvimage;
     20class Image;
    2121}
    2222
     
    4545  */
    4646  Picture(const LayoutPosition *position, std::string name,
    47           const core::cvimage *image);
     47          const core::Image *image);
    4848
    4949  /*!
     
    7070  void ExtraXmlEvent(void){};
    7171
    72   const core::cvimage *image;
     72  const core::Image *image;
    7373};
    7474
  • trunk/lib/FlairSensorActuator/CMakeLists.txt

    r302 r338  
    88        ${CMAKE_CURRENT_SOURCE_DIR}/../FlairCore/src
    99        ${CMAKE_CURRENT_SOURCE_DIR}/../FlairVisionFilter/src
    10         ${CMAKE_SYSROOT}/usr/include/opencv1
    1110        ${CMAKE_SYSROOT}/usr/include/vrpn
    1211)
  • trunk/lib/FlairSensorActuator/src/Camera.cpp

    r330 r338  
    2424#include <DataPlot1D.h>
    2525#include <Picture.h>
    26 #include <VisionFilter.h>
    27 #include <highgui.h>
    2826#include <fstream>
    2927
     
    3634
    3735Camera::Camera(string name, uint16_t width,
    38                uint16_t height, cvimage::Type::Format format)
     36               uint16_t height, Image::Type::Format format)
    3937    : IODevice(getFrameworkManager(), name) {
    4038  plot_tab = NULL;
     
    4240
    4341  // do not allocate imagedata, allocation is done by the camera
    44   output = new cvimage((IODevice *)this, width, height, format, "out", false);
     42  output = new Image((IODevice *)this, width, height, format, "out", false);
    4543       
    4644  // station sol
     
    7674                case LogFormat::RAW:
    7775                        AddDataToLog(output);
    78                         Warn("raw log of cvimage is not yet implemented\n");
     76                        Warn("raw log of Image is not yet implemented\n");
    7977                        break;
    8078                case LogFormat::JPG:
    81                                 Warn("logging cvimage to jpeg\n");
     79                                Warn("logging Image to jpeg\n");
    8280                                Warn("jpeg are not included in classical dbt file, as dbt does not handle variable length\n");
    8381                        break;
     
    9593GridLayout *Camera::GetLayout(void) const { return setup_layout; }
    9694
    97 void Camera::UseDefaultPlot(const core::cvimage *image) {
     95void Camera::UseDefaultPlot(const core::Image *image) {
    9896  if (tab == NULL) {
    9997    Err("not applicable for simulation part.\n");
     
    113111}
    114112
    115 core::cvimage *Camera::Output(void) { return output; }
     113core::Image *Camera::Output(void) { return output; }
    116114
    117115void Camera::ProcessUpdate(core::io_data* data) {
     
    121119                if(logFormat==LogFormat::JPG) {
    122120                        data->GetMutex();
    123       //IplImage *img=((cvimage*)data)->img;
    124       const cvimage* input = dynamic_cast<const cvimage*>(data);
     121      //IplImage *img=((Image*)data)->img;
     122      const Image* input = dynamic_cast<const Image*>(data);
    125123      if (!input) {
    126           Warn("casting %s to cvimage failed\n",data->ObjectName().c_str());
     124          Warn("casting %s to Image failed\n",data->ObjectName().c_str());
    127125          return;
    128126      }
     
    130128     
    131129                        string filename=getFrameworkManager()->GetLogPath()+"/"+ObjectName()+"_"+std::to_string(data->DataTime())+".jpg";
    132                         switch(((cvimage*)data)->GetDataType().GetFormat()) {
    133                                 case cvimage::Type::Format::Gray:
     130                        switch(((Image*)data)->GetDataType().GetFormat()) {
     131                                case Image::Type::Format::Gray:
    134132                                        saveToJpeg(img,filename,PictureFormat_t::Gray,PictureFormat_t::Gray);
    135133                                        break;
    136                                 case cvimage::Type::Format::BGR:
     134                                case Image::Type::Format::BGR:
    137135                                        saveToJpeg(img,filename,PictureFormat_t::RGB,PictureFormat_t::RGB);
    138136                                        break;
    139                                 case cvimage::Type::Format::UYVY:
     137                                case Image::Type::Format::UYVY:
    140138                                        saveToJpeg(img,filename,PictureFormat_t::YUV_422ile,PictureFormat_t::YUV_422p);
    141139                                        break;
     
    160158                output->GetMutex();
    161159                if(extension=="jpg") {
    162                         if(output->GetDataType().GetFormat()==cvimage::Type::Format::Gray) saveToJpeg(output->img,filename,PictureFormat_t::Gray,PictureFormat_t::Gray);
    163                         if(output->GetDataType().GetFormat()==cvimage::Type::Format::BGR) saveToJpeg(output->img,filename,PictureFormat_t::RGB,PictureFormat_t::RGB);
    164                         if(output->GetDataType().GetFormat()==cvimage::Type::Format::UYVY) saveToJpeg(output->img,filename,PictureFormat_t::YUV_422ile,PictureFormat_t::YUV_422p);
     160                        if(output->GetDataType().GetFormat()==Image::Type::Format::Gray) saveToJpeg(output->img,filename,PictureFormat_t::Gray,PictureFormat_t::Gray);
     161                        if(output->GetDataType().GetFormat()==Image::Type::Format::BGR) saveToJpeg(output->img,filename,PictureFormat_t::RGB,PictureFormat_t::RGB);
     162                        if(output->GetDataType().GetFormat()==Image::Type::Format::UYVY) saveToJpeg(output->img,filename,PictureFormat_t::YUV_422ile,PictureFormat_t::YUV_422p);
    165163                } else {
    166164                        cvSaveImage(filename.c_str(),output->img);
  • trunk/lib/FlairSensorActuator/src/Camera.h

    r137 r338  
    1616#include <IODevice.h>
    1717#include <stdint.h>
    18 #include <cvimage.h>
     18#include <Image.h>
    1919
    2020namespace flair {
     
    5151  */
    5252  Camera(std::string name, uint16_t width,
    53          uint16_t height, core::cvimage::Type::Format format);
     53         uint16_t height, core::Image::Type::Format format);
    5454
    5555  /*!
     
    7575  * \param image image to display
    7676  */
    77   void UseDefaultPlot(const core::cvimage *image);
     77  void UseDefaultPlot(const core::Image *image);
    7878
    7979  /*!
     
    126126 * \return the output matrix
    127127 */
    128   core::cvimage *Output(void);
     128  core::Image *Output(void);
    129129
    130130  core::DataType const &GetOutputDataType() const;
     
    159159  gui::GroupBox *GetGroupBox(void) const;
    160160
    161   core::cvimage *output;
     161  core::Image *output;
    162162
    163163private:
  • trunk/lib/FlairSensorActuator/src/Ps3Eye.cpp

    r268 r338  
    2828               uint8_t priority)
    2929    : V4LCamera( name, camera_index, 320, 240,
    30                 cvimage::Type::Format::YUYV, priority) {
     30                Image::Type::Format::YUYV, priority) {
    3131  SetIsReady(true);                 
    3232}
  • trunk/lib/FlairSensorActuator/src/SimulatedCamera.cpp

    r330 r338  
    3636                       uint32_t modelId,uint32_t deviceId, uint8_t priority)
    3737    : Thread(getFrameworkManager(), name, priority),
    38       Camera(name, width, height, cvimage::Type::Format::BGR) {
     38      Camera(name, width, height, Image::Type::Format::BGR) {
    3939
    4040  buf_size = width * height * channels+sizeof(Time);
  • trunk/lib/FlairSensorActuator/src/V4LCamera.cpp

    r330 r338  
    2121#include <CheckBox.h>
    2222#include <Label.h>
    23 #include <cvimage.h>
     23#include <Image.h>
    2424#include <FrameworkManager.h>
    2525#include <fcntl.h>
    2626#include <linux/videodev2.h>
    27 
    2827#include <sys/ioctl.h>
    2928#include <unistd.h>
    3029#include <cstring>
    3130#include <sys/mman.h>
    32 
     31#include <VisionFilter.h>
    3332
    3433#define DEFAULT_V4L_BUFFERS 4
     
    4342V4LCamera::V4LCamera(string name,
    4443                     uint8_t camera_index, uint16_t width, uint16_t height,
    45                      cvimage::Type::Format format, uint8_t priority)
     44                     Image::Type::Format format, uint8_t priority)
    4645    : Thread(getFrameworkManager(), name, priority),
    4746      Camera(name, width, height, format) {
    4847 
    49   string deviceName="/dev/video"+std::to_string(camera_index);
    50   device = open(deviceName.c_str(), O_RDWR | O_NONBLOCK);
    51   if (device == -1) {
    52      Thread::Err("Cannot open %s\n");
    53   }
     48    string deviceName="/dev/video"+std::to_string(camera_index);
     49    device = open(deviceName.c_str(), O_RDWR | O_NONBLOCK);
     50    if (device == -1) {
     51        Thread::Err("Cannot open %s\n",deviceName.c_str());
     52    } else {
     53        Printf("V4LCamera %s, opened %s\n",name.c_str(),deviceName.c_str());
     54    }
     55
     56    struct v4l2_capability cap;
     57    memset(&cap, 0, sizeof (v4l2_capability));
     58    if (xioctl (device, VIDIOC_QUERYCAP, &cap)==-1) {
     59        Thread::Err("VIDIOC_QUERYCAP xioctl\n");
     60    }
     61    if ((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) {
     62        Thread::Err("device is unable to capture video memory.\n");
     63    }
     64
     65    //get v4l2_format
     66    struct v4l2_format form;
     67    form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
     68    if(xioctl (device, VIDIOC_G_FMT,&form)==-1) {
     69        Thread::Err("VIDIOC_G_FMT xioctl\n");
     70    }
     71 
     72    //set width, height and format
     73    if (format == Image::Type::Format::UYVY) {
     74        form.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
     75    } else if (format == Image::Type::Format::YUYV) {
     76        form.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
     77    } else {
     78        Thread::Err("format not supported\n");
     79    }
     80 
     81    form.fmt.pix.width = width;
     82    form.fmt.pix.height = height;
     83    form.fmt.win.chromakey = 0;
     84    form.fmt.win.field = V4L2_FIELD_ANY;
     85    form.fmt.win.clips = 0;
     86    form.fmt.win.clipcount = 0;
     87    form.fmt.pix.field = V4L2_FIELD_ANY;
     88    if(xioctl (device, VIDIOC_S_FMT,&form)==-1) {
     89        Thread::Err("VIDIOC_S_FMT xioctl\n");
     90    }
     91 
     92    //alloc and queue bufs
     93    AllocBuffers();
     94    for (int bufferIndex = 0; bufferIndex < nbBuffers;++bufferIndex) {
     95        QueueBuffer(bufferIndex);
     96    }
    5497   
    55   //get v4l2_format
    56   struct v4l2_format form;
    57   form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    58   xioctl (device, VIDIOC_G_FMT,&form);
    59  
    60   //set width, height and format
    61   if (format == cvimage::Type::Format::UYVY) {
    62     form.fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY;
    63   } else if (format == cvimage::Type::Format::YUYV) {
    64     form.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
    65   } else {
    66     Thread::Err("format not supported\n");
    67   }
    68  
    69   form.fmt.pix.width = width;
    70   form.fmt.pix.height = height;
    71   form.fmt.win.chromakey = 0;
    72   form.fmt.win.field = V4L2_FIELD_ANY;
    73   form.fmt.win.clips = 0;
    74   form.fmt.win.clipcount = 0;
    75   form.fmt.pix.field = V4L2_FIELD_ANY;
    76   xioctl (device, VIDIOC_S_FMT, &form);
    77  
    78   /* This is just a technicality, but all buffers must be filled up before any
    79    staggered SYNC is applied.  SO, filler up. (see V4L HowTo) */
    80 
    81   AllocBuffers();
    82 
    83   for (int bufferIndex = 0; bufferIndex < ((int)requestbuffers.count);++bufferIndex) {
    84     struct v4l2_buffer buf;
    85 
    86     memset(&buf, 0, sizeof (v4l2_buffer));
    87 
    88     buf.type        = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    89     buf.memory      = V4L2_MEMORY_MMAP;
    90     buf.index       = (unsigned long)bufferIndex;
    91 
    92     if (-1 == xioctl (device, VIDIOC_QBUF, &buf)) {
    93         Thread::Err("VIDIOC_QBUF xioctl\n");
    94         break;
    95     }
    96   }
    97 
    98   // enable the streaming
    99   v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    100   if (-1 == xioctl (device, VIDIOC_STREAMON,&type)) {
    101       Thread::Err("VIDIOC_STREAMON xioctl\n");
    102   }
    103 
    104 
    105   // skip first frame. it is often bad -- this is unnotied in traditional apps,
    106   //  but could be fatal if bad jpeg is enabled
    107   GrabFrame();
    108    
    109    
    110    
    111   // station sol
    112   gain = new DoubleSpinBox(GetGroupBox()->NewRow(), "gain:", 0, 1, 0.1);
    113   exposure = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "exposure:", 0,
    114                                1, 0.1);
    115   bright =
    116       new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "bright:", 0, 1, 0.1);
    117   contrast = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "contrast:", 0,
    118                                1, 0.1);
    119   hue = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "hue:", 0, 1, 0.1);
    120   sharpness = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "sharpness:",
    121                                 0, 1, 0.1);
    122   sat = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "saturation:", 0, 1,
    123                           0.1);
    124   autogain = new CheckBox(GetGroupBox()->NewRow(), "autogain:");
    125   autoexposure = new CheckBox(GetGroupBox()->LastRowLastCol(), "autoexposure:");
    126   awb = new CheckBox(GetGroupBox()->LastRowLastCol(), "awb:");
    127   fps = new Label(GetGroupBox()->NewRow(), "fps");
    128  
    129   hasProblems=false;
     98    // enable the streaming
     99    v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
     100    if (xioctl (device, VIDIOC_STREAMON,&type)==-1) {
     101        Thread::Err("VIDIOC_STREAMON xioctl\n");
     102    }
     103
     104    // skip first frame. it is often bad -- this is unnotied in traditional apps,
     105    //  but could be fatal if bad jpeg is enabled
     106    bufferIndex=-1;
     107    GrabFrame();
     108
     109    // ground station
     110    gain = new DoubleSpinBox(GetGroupBox()->NewRow(), "gain:", 0, 1, 0.1);
     111    exposure = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "exposure:", 0,1, 0.1);
     112    bright = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "bright:", 0, 1, 0.1);
     113    contrast = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "contrast:", 0,1, 0.1);
     114    hue = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "hue:", 0, 1, 0.1);
     115    sharpness = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "sharpness:", 0, 1, 0.1);
     116    sat = new DoubleSpinBox(GetGroupBox()->LastRowLastCol(), "saturation:", 0, 1,0.1);
     117    autogain = new CheckBox(GetGroupBox()->NewRow(), "autogain:");
     118    autoexposure = new CheckBox(GetGroupBox()->LastRowLastCol(), "autoexposure:");
     119    awb = new CheckBox(GetGroupBox()->LastRowLastCol(), "awb:");
     120    fps = new Label(GetGroupBox()->NewRow(), "fps");
     121
     122    hasProblems=false;
    130123}
    131124
    132125V4LCamera::~V4LCamera() {
    133   SafeStop();
    134   Join();
     126    for (int n_buffers = 0; n_buffers < nbBuffers; n_buffers++) {
     127       FreeFunction((char*)buffers[n_buffers].start);
     128   }
     129    SafeStop();
     130    Join();
    135131}
    136132
    137133void V4LCamera::Run(void) {
    138134  Time cam_time, new_time, fpsNow, fpsPrev;
    139   char* buffer; // raw image
    140135  int fpsCounter = 0;
    141136
     
    160155    fpsCounter++;
    161156    if (GetTime() > (fpsPrev + 5 * (Time)1000000000)) {
    162       // toute les 5 secondes
     157      // every 5 secondes
    163158      fpsNow = GetTime();
    164159      fps->SetText("fps: %.1f",
     
    204199      SetProperty(V4L2_CID_AUTO_WHITE_BALANCE, awb->Value());
    205200
    206     // cam pictures
    207     buffer = RetrieveRawFrame();
     201    // get picture
    208202    GrabFrame();
    209203    new_time = GetTime();
     
    216210
    217211    output->GetMutex();
    218     output->buffer = buffer;
     212    output->buffer=(char*)buffers[bufferIndex].start;
    219213    output->ReleaseMutex();
    220214
     
    228222}
    229223
    230 void V4LCamera::GrabFrame(void) {
    231     unsigned int count;
    232 
    233     count = 1;
    234 
    235     while (count-- > 0) {
    236         for (;;) {
    237             fd_set fds;
    238             struct timeval tv;
    239             int r;
    240 
    241             FD_ZERO (&fds);
    242             FD_SET (device, &fds);
    243 
    244             /* Timeout. */
    245             tv.tv_sec = 2;
    246             tv.tv_usec = 0;
    247 
    248             r = select (device+1, &fds, NULL, NULL, &tv);
    249 
    250             if (-1 == r) {
    251                 if (EINTR == errno) continue;
    252                 Thread::Err("select\n");
     224int V4LCamera::QueueBuffer(int index) {
     225    struct v4l2_buffer buf;
     226    if(index>=0 && index<nbBuffers) {
     227        memset(&buf, 0, sizeof (v4l2_buffer));
     228        buf.type        = V4L2_BUF_TYPE_VIDEO_CAPTURE;
     229        buf.memory      = V4L2_MEMORY_USERPTR;//V4L2_MEMORY_MMAP;
     230        buf.index       = (unsigned long)index;
     231        buf.m.userptr=(unsigned long)(buffers[index].start);
     232        buf.length=buffers[index].length;
     233       
     234        int ret=xioctl (device, VIDIOC_QBUF, &buf);
     235        if (ret==-1) {
     236            Thread::Err("VIDIOC_QBUF xioctl %s\n",strerror(-ret));
     237            return -1;
     238        }
     239    }
     240    return 0;
     241}
     242
     243int V4LCamera::GrabFrame(void) {
     244    //queue previous buffer
     245    if(QueueBuffer(bufferIndex)<0) return -1;
     246   
     247    fd_set fds;
     248    struct timeval tv;
     249    FD_ZERO (&fds);
     250    FD_SET (device, &fds);
     251
     252    tv.tv_sec = 0;
     253    tv.tv_usec = 100000;
     254
     255    int r = select (device+1, &fds, NULL, NULL, &tv);
     256
     257    if (-1 == r) {
     258        char errorMsg[256];
     259        Thread::Err("select (%s)\n", strerror_r(-r, errorMsg, sizeof(errorMsg)));
     260        return -1;
     261    }
     262
     263    if (0 == r) {
     264        Thread::Err("select timeout\n");
     265        return -1;
     266    }
     267
     268    struct v4l2_buffer buf;
     269    memset(&buf, 0, sizeof (v4l2_buffer));
     270    buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
     271    buf.memory = V4L2_MEMORY_USERPTR;//V4L2_MEMORY_MMAP;
     272
     273    //get last captured image
     274    int prevDQbuf=-1;
     275    for(int i=0;i<4;i++) {
     276        if (xioctl (device, VIDIOC_DQBUF, &buf)==-1) {
     277            if (errno==EAGAIN) {
     278                break;
     279            } else {
     280                Thread::Err("VIDIOC_DQBUF xioctl\n");
     281                return -1;
    253282            }
    254 
    255             if (0 == r) {
    256                 Thread::Err("select timeout\n");
    257                 /* end the infinite loop */
    258                 break;
    259             }
    260 
    261             if (read_frame_v4l2 ()) break;
    262         }
    263     }
    264 }
    265 
    266 int V4LCamera::read_frame_v4l2(void) {
    267   struct v4l2_buffer buf;
    268   memset(&buf, 0, sizeof (v4l2_buffer));
    269  
    270 
    271     buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    272     buf.memory = V4L2_MEMORY_MMAP;
    273 
    274     if (-1 == xioctl (device, VIDIOC_DQBUF, &buf)) {
    275         switch (errno) {
    276         case EAGAIN:
    277             return 0;
    278 
    279         case EIO:
    280             /* Could ignore EIO, see spec. */
    281 
    282             /* fall through */
    283 
    284         default:
    285             /* display the error and stop processing */
    286             Thread::Err("VIDIOC_DQBUF xioctl\n");
    287             return 1;
    288         }
    289    }
    290 
    291    if(buf.index >= requestbuffers.count) {
    292      Thread::Err("buf.index >= requestbuffers.count\n");
    293    }
     283       } else {
     284           if(prevDQbuf!=-1) {
     285               QueueBuffer(prevDQbuf);
     286           }
     287           for (int n_buffers = 0; n_buffers < nbBuffers; n_buffers++) {
     288               if((void*)(buf.m.userptr)==buffers[n_buffers].start) {
     289                   prevDQbuf=n_buffers;
     290                   bufferIndex=n_buffers;
     291                   break;
     292               }
     293           }
     294       }
     295    }
    294296   
    295 #ifdef USE_TEMP_BUFFER
    296    memcpy(capture->buffers[MAX_V4L_BUFFERS].start,
    297           capture->buffers[buf.index].start,
    298           capture->buffers[MAX_V4L_BUFFERS].length );
    299    capture->bufferIndex = MAX_V4L_BUFFERS;
    300    //printf("got data in buff %d, len=%d, flags=0x%X, seq=%d, used=%d)\n",
    301    //     buf.index, buf.length, buf.flags, buf.sequence, buf.bytesused);
    302 #else
    303    bufferIndex = buf.index;
    304 #endif
    305 
    306    if (-1 == xioctl (device, VIDIOC_QBUF, &buf)) {
    307        Thread::Err ("VIDIOC_QBUF xioctl\n");
    308    }
    309 
    310297   return 1;
    311298}
    312299
    313300int V4LCamera::AllocBuffers(void) {
     301    struct v4l2_requestbuffers requestbuffers;
    314302   memset(&requestbuffers, 0, sizeof (v4l2_requestbuffers));
    315303   
     
    320308   requestbuffers.count = buffer_number;
    321309   requestbuffers.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    322    requestbuffers.memory = V4L2_MEMORY_MMAP;
    323 
    324    if (-1 == xioctl (device, VIDIOC_REQBUFS, &requestbuffers)) {
    325        if (EINVAL == errno) {
    326          Thread::Err("not support memory mapping not supportted\n");
     310   requestbuffers.memory = V4L2_MEMORY_USERPTR;//V4L2_MEMORY_MMAP;
     311
     312   if (xioctl (device, VIDIOC_REQBUFS, &requestbuffers)==-1) {
     313       if (errno==EINVAL) {
     314         Thread::Err("VIDIOC_REQBUFS user memory not supported\n");
    327315       } else {
    328316         Thread::Err ("VIDIOC_REQBUFS xioctl\n");
     
    330318       return -1;
    331319   }
    332 
    333    if (requestbuffers.count < buffer_number) {
    334        if (buffer_number == 1) {
    335         Thread::Err("Insufficient buffer memory\n");
    336         return -1;
    337        } else {
    338         buffer_number--;
    339         Thread::Warn ("Insufficient buffer memory, decreasing buffers\n");
    340         goto try_again;
    341        }
     320   
     321   nbBuffers=DEFAULT_V4L_BUFFERS;
     322   for (int n_buffers = 0; n_buffers < nbBuffers; n_buffers++) {
     323       buffers[n_buffers].length = output->GetDataType().GetSize();
     324       buffers[n_buffers].start =AllocFunction(output->GetDataType().GetSize());
    342325   }
    343326
    344    for (int n_buffers = 0; n_buffers < requestbuffers.count; ++n_buffers) {
    345        struct v4l2_buffer buf;
    346 
    347        memset(&buf, 0, sizeof (v4l2_buffer));
    348 
    349        buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    350        buf.memory = V4L2_MEMORY_MMAP;
    351        buf.index = n_buffers;
    352 
    353        if (-1 == xioctl (device, VIDIOC_QUERYBUF, &buf)) {
    354            Thread::Err("VIDIOC_QUERYBUF xioctl\n");
    355            return -1;
    356        }
    357 
    358        buffers[n_buffers].length = buf.length;
    359        buffers[n_buffers].start =
    360          mmap (NULL /* start anywhere */,
    361                buf.length,
    362                PROT_READ | PROT_WRITE /* required */,
    363                MAP_SHARED /* recommended */,
    364                device, buf.m.offset);
    365 
    366        if (MAP_FAILED == buffers[n_buffers].start) {
    367            Thread::Err("mmap\n");
    368            return -1;
    369        }
    370    }
    371 
    372   //todo: verifier cette alloc, pas de double buffeinrg?
    373   //peut on initialiser l'image dans le constrcteur de la camera?
    374    
    375    output->buffer=output->allocFunction(output->dataType.GetSize());
    376327   return 1;
    377328};
    378329
    379 char *V4LCamera::RetrieveRawFrame(void) {
    380  
    381   /* [FD] this really belongs here */
    382   if (ioctl(device, VIDIOCSYNC, &mmaps[bufferIndex].frame) == -1) {
    383     Thread::Err("Could not SYNC to video stream. %s\n", strerror(errno));
    384   }
    385 
    386   /* Now get what has already been captured as a IplImage return */
    387   if (output->dataType.GetFormat() == cvimage::Type::Format::YUYV || output->dataType.GetFormat() == cvimage::Type::Format::UYVY) {
    388     #ifdef USE_TEMP_BUFFER
    389     capture->frame.imageData=(char*)capture->buffers[capture->bufferIndex].start;
    390     #else
    391 Printf("frame is not allocated\n");
    392     memcpy((char *)frame,(char *)buffers[bufferIndex].start,output->GetDataType().GetSize());
    393     #endif
    394   } else {
    395     Thread::Err("palette %d not supported for raw output\n",output->dataType.GetFormat());
    396   }
    397 
    398   return(frame);
    399 }
    400 
    401330bool V4LCamera::HasProblems(void) {
    402331  return hasProblems;
     
    408337
    409338void V4LCamera::SetAutoExposure(bool value) {
    410   Thread::Warn("not implemented in opencv\n");
     339  Thread::Warn("not implemented\n");
    411340}
    412341
  • trunk/lib/FlairSensorActuator/src/V4LCamera.h

    r330 r338  
    2323namespace flair {
    2424namespace core {
    25 class cvimage;
     25class Image;
    2626}
    2727namespace gui {
     
    5555  V4LCamera(std::string name,
    5656            uint8_t camera_index, uint16_t width, uint16_t height,
    57             core::cvimage::Type::Format format, uint8_t priority);
     57            core::Image::Type::Format format, uint8_t priority);
    5858
    5959  /*!
     
    151151  void SetProperty(int property,float value);
    152152  float GetProperty(int property);
    153   void GrabFrame(void);
    154   int read_frame_v4l2(void);
    155   char *RetrieveRawFrame(void);
     153  int GrabFrame(void);
    156154  int AllocBuffers(void);
     155  int QueueBuffer(int index);
    157156  struct video_mmap *mmaps;
    158157  int bufferIndex;
    159   struct v4l2_requestbuffers requestbuffers;
    160158  struct buffer {
    161159    void *  start;
    162160    size_t  length;
    163161  };
    164   buffer buffers[MAX_V4L_BUFFERS + 1];
     162  buffer buffers[MAX_V4L_BUFFERS];
    165163  char* frame;
     164  int nbBuffers;
    166165};
    167166} // end namespace sensor
  • trunk/lib/FlairVisionFilter/CMakeLists.txt

    r302 r338  
    77        ${CMAKE_CURRENT_SOURCE_DIR}/../FlairCore/src
    88    ${CMAKE_CURRENT_SOURCE_DIR}/../FlairFilter/src
    9         ${CMAKE_SYSROOT}/usr/include/opencv1
    109)
    1110
  • trunk/lib/FlairVisionFilter/src/CvtColor.cpp

    r330 r338  
    1313
    1414#include "CvtColor.h"
    15 #include <cvimage.h>
     15#include "VisionFilter.h"
     16#include <Image.h>
    1617#include <typeinfo>
    1718
     
    1920using namespace flair::core;
    2021
     22class CvtColor_impl {
     23public:
     24    CvtColor_impl(flair::filter::CvtColor *self,flair::filter::CvtColor::Conversion_t conversion):output(0) {
     25        this->conversion=conversion;
     26        this->self=self;
     27
     28        switch(conversion) {
     29        case flair::filter::CvtColor::Conversion_t::ToGray:
     30            try{
     31                Image::Type const &imageType=dynamic_cast<Image::Type const &>(((IODevice*)(self->Parent()))->GetOutputDataType());
     32                output=new Image(self,imageType.GetWidth(),imageType.GetHeight(),Image::Type::Format::Gray,"conversion",true,2);
     33                //inIplImage = (IplImage*)AllocFunction(sizeof(IplImage));
     34                //outIplImage = (IplImage*)AllocFunction(sizeof(IplImage));
     35            } catch(std::bad_cast& bc) {
     36                self->Err("io type mismatch\n");
     37            }
     38            break;
     39        default:
     40            self->Err("conversion not supported\n");
     41        }
     42    }
     43
     44    ~CvtColor_impl() {
     45        //FreeFunction((char*)(inIplImage));
     46        //FreeFunction((char*)(outIplImage));
     47    }
     48   
     49    void UpdateFrom(const io_data *data){
     50        Image *img=(Image*)data;
     51
     52        if(output!=NULL) {
     53            data->GetMutex();/*
     54            inIplImage->width=img->GetDataType().GetWidth();
     55            inIplImage->height=img->GetDataType().GetHeight();
     56            inIplImage->imageData=img->buffer;
     57            inIplImage->imageSize=img->GetDataType().GetSize();
     58            */
     59            output->GetMutex();/*
     60            outIplImage->width=output->GetDataType().GetWidth();
     61            outIplImage->height=output->GetDataType().GetHeight();
     62            outIplImage->imageData=output->buffer;
     63            outIplImage->imageSize=output->GetDataType().GetSize();
     64*/
     65            switch(conversion) {
     66            case flair::filter::CvtColor::Conversion_t::ToGray:
     67                switch(((Image*)data)->GetDataType().GetFormat()) {
     68                case Image::Type::Format::YUYV:
     69                    //dspCvtColor(inIplImage,outIplImage,DSP_YUYV2GRAY);
     70                    break;
     71                case Image::Type::Format::UYVY:
     72                    //dspCvtColor(inIplImage,outIplImage,DSP_UYVY2GRAY);
     73                    break;
     74                case Image::Type::Format::BGR:
     75                    //dspCvtColor(inIplImage,outIplImage,DSP_BGR2GRAY);
     76                    break;
     77                default:
     78                    self->Err("input format not supported\n");
     79                }
     80                break;
     81            default:
     82                self->Err("conversion not supported\n");
     83            }
     84
     85            output->ReleaseMutex();
     86            data->ReleaseMutex();
     87
     88            output->SetDataTime(data->DataTime());
     89        }
     90    };
     91   
     92    Image *output;
     93
     94private:
     95    flair::filter::CvtColor::Conversion_t conversion;
     96    //IplImage *inIplImage,*outIplImage;
     97    flair::filter::CvtColor *self;
     98};
     99
     100
    21101namespace flair { namespace filter {
    22102
    23 CvtColor::CvtColor(const core::IODevice* parent,std::string name,Conversion_t conversion) : IODevice(parent,name),output(0) {
    24     this->conversion=conversion;
    25 
    26     switch(conversion) {
    27     case Conversion_t::ToGray:
    28         try{
    29             cvimage::Type const &imageType=dynamic_cast<cvimage::Type const &>(parent->GetOutputDataType());
    30             output=new cvimage(this,imageType.GetWidth(),imageType.GetHeight(),cvimage::Type::Format::Gray,"conversion",true,2);
    31 
    32         } catch(std::bad_cast& bc) {
    33             Err("io type mismatch\n");
    34         }
    35         break;
    36     default:
    37         Err("conversion not supported\n");
    38     }
    39    
    40     SetIsReady(true);
     103CvtColor::CvtColor(const core::IODevice* parent,std::string name,Conversion_t conversion) : IODevice(parent,name) {
     104    pimpl_=new CvtColor_impl(this,conversion);
    41105}
    42106
    43 CvtColor::~CvtColor(void) {}
     107CvtColor::~CvtColor(void) {
     108    delete pimpl_;
     109}
    44110
    45 cvimage* CvtColor::Output(void) {
    46     return output;
     111Image* CvtColor::Output(void) {
     112    return pimpl_->output;
    47113
    48114}
    49115
    50116DataType const &CvtColor::GetOutputDataType() const {
    51     if(output!=NULL) {
    52         return output->GetDataType();
     117    if(pimpl_->output!=NULL) {
     118        return pimpl_->output->GetDataType();
    53119    } else {
    54120        return dummyType;
     
    57123
    58124void CvtColor::UpdateFrom(const io_data *data) {
    59     cvimage *img=(cvimage*)data;
    60 
    61     data->GetMutex();
    62     output->GetMutex();
    63 
    64     switch(conversion) {
    65     case Conversion_t::ToGray:
    66         switch(img->GetDataType().GetFormat()) {
    67         case cvimage::Type::Format::YUYV:
    68             //dspCvtColor(img,output->img,DSP_YUYV2GRAY);
    69             break;
    70         case cvimage::Type::Format::UYVY:
    71             //dspCvtColor(img,output->img,DSP_UYVY2GRAY);
    72             break;
    73         case cvimage::Type::Format::BGR:
    74             //dspCvtColor(img,output->img,DSP_BGR2GRAY);
    75             break;
    76         default:
    77             Err("input format not supported\n");
    78         }
    79         break;
    80     default:
    81         Err("conversion not supported\n");
    82     }
    83 
    84     output->ReleaseMutex();
    85     data->ReleaseMutex();
    86 
    87     output->SetDataTime(data->DataTime());
    88     ProcessUpdate(output);
     125    pimpl_->UpdateFrom(data);
     126    if(pimpl_->output!=NULL) ProcessUpdate(pimpl_->output);
     127   
    89128}
    90129
  • trunk/lib/FlairVisionFilter/src/CvtColor.h

    r124 r338  
    1313#include <IODevice.h>
    1414
    15 namespace flair
    16 {
    17     namespace core
    18     {
    19         class cvimage;
     15namespace flair {
     16    namespace core {
     17        class Image;
    2018    }
    2119}
    2220
    23 namespace flair
    24 {
     21class CvtColor_impl;
     22
     23namespace flair {
    2524namespace filter
    2625{
     
    6867            * \return the output image
    6968            */
    70             core::cvimage* Output(void);
     69            core::Image* Output(void);
    7170
    7271            core::DataType const &GetOutputDataType() const;
     
    8281            void UpdateFrom(const core::io_data *data);
    8382
    84             core::cvimage *output;
    85             Conversion_t conversion;
     83            class CvtColor_impl *pimpl_;
    8684    };
    8785} // end namespace filter
  • trunk/lib/FlairVisionFilter/src/HoughLines.cpp

    r318 r338  
    1313
    1414#include "HoughLines.h"
    15 #include <cvimage.h>
     15#include "VisionFilter.h"
     16#include <Image.h>
     17#include <OneAxisRotation.h>
    1618#include <Matrix.h>
    1719#include <Layout.h>
     
    2022#include <DoubleSpinBox.h>
    2123#include <typeinfo>
     24#include <math.h>
    2225
    2326#define MAX_LINES 100
     
    2730using namespace flair::gui;
    2831
     32class HoughLines_impl {
     33public:
     34    HoughLines_impl(flair::filter::HoughLines *self,const LayoutPosition* position,string name,const Vector2Df *inPtRefGlobal,float inThetaRefGlobal) {
     35        this->self=self;
     36        GroupBox* reglages_groupbox=new GroupBox(position,name);
     37        rotation=new OneAxisRotation(reglages_groupbox->NewRow(),"pre rotation",OneAxisRotation::PostRotation);
     38        fullRhoStep=new SpinBox(reglages_groupbox->NewRow(),"full rho step:","pixels",0,255,1,1);
     39        fullThetaStep=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"full theta step:","degrees",0,90,1,1);
     40        trackingRhoStep=new SpinBox(reglages_groupbox->NewRow(),"tracking rho step:","pixels",0,255,1,1);
     41        trackingThetaStep=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"tracking theta step:","degrees",0,90,1,1);
     42        trackingDeltaTheta=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"tracking delta theta:","degrees",0,90,1,1);
     43        nbPoints=new SpinBox(reglages_groupbox->NewRow(),"nb points:",0,10000,10,100);
     44
     45        isTracking=false;
     46        //linesStorage = (CvMat*)AllocFunction(sizeof(CvMat));
     47        //linesStorage->data.fl = (float*)AllocFunction(MAX_LINES*2*sizeof(float));//was CV_32FC2, 2 channels
     48        //gimg = (IplImage*)AllocFunction(sizeof(IplImage));
     49
     50        //init output matrix of same size as init
     51        MatrixDescriptor* desc=new MatrixDescriptor(4,1);
     52        desc->SetElementName(0,0,"distance");
     53        desc->SetElementName(1,0,"orientation rad");
     54        desc->SetElementName(2,0,"orientation deg");
     55        desc->SetElementName(3,0,"line_detected");
     56        output=new Matrix(self,desc,floatType,name);
     57        delete desc;
     58
     59        try{
     60            Image::Type const &imageType=dynamic_cast<Image::Type const &>(((IODevice*)(self->Parent()))->GetOutputDataType());
     61            if(imageType.GetFormat()!=Image::Type::Format::Gray) {
     62                self->Err("input image is not gray\n");
     63            }
     64        } catch(std::bad_cast& bc) {
     65            self->Err("io type mismatch\n");
     66        }
     67
     68        thetaRefGlobal=inThetaRefGlobal;
     69        if (inPtRefGlobal==NULL) {
     70            ptRefGlobal=NULL;
     71        } else { //rotation from global coordinates to hough space
     72            ptRefGlobal =new Vector2Df(inPtRefGlobal->x,inPtRefGlobal->y);
     73        }
     74       
     75    }
     76
     77    ~HoughLines_impl() {
     78        //FreeFunction((char*)(linesStorage->data.fl));
     79        //FreeFunction((char*)linesStorage);
     80        //FreeFunction((char*)gimg);
     81        if(ptRefGlobal!=NULL) delete ptRefGlobal;
     82    }
     83   
     84    void UpdateFrom(const io_data *data){
     85        Image *img=(Image*)data;
     86  /*
     87        gimg->width=img->GetDataType().GetWidth();
     88        gimg->height=img->GetDataType().GetHeight();
     89        gimg->imageData=img->buffer;
     90        gimg->imageSize=img->GetDataType().GetSize();
     91   
     92        size_t nbLines;
     93        Vector2Df ptRef;
     94        float thetaRef;
     95
     96        if (ptRefGlobal==NULL) {
     97          ptRef.x=img->GetDataType().GetWidth()/2;
     98          ptRef.y=img->GetDataType().GetHeight()/2;
     99        } else { //rotation from global coordinates to hough space
     100          Vector3Df ptRef3D(ptRefGlobal->x,ptRefGlobal->y,0);
     101          rotation->ComputeRotation(ptRef3D);
     102          ptRef.x=ptRef3D.x;
     103          ptRef.y=ptRef3D.y;
     104        }
     105         
     106        //orientation in global space is rotated by pi/2 compared to orientation in hough space
     107        //eg: vertical line has a 0 orientation in global space (north), but a pi/2 (or -pi/2) orientation in hough space (theta)
     108        thetaRef=thetaRefGlobal+CV_PI/2+rotation->GetAngle();
     109        if (thetaRef>CV_PI) thetaRef-=CV_PI;
     110        if (thetaRef<0) thetaRef+=CV_PI;
     111
     112        data->GetMutex();
     113        if(!isTracking) {
     114        nbLines=dspHoughLines2(gimg,linesStorage,CV_HOUGH_STANDARD,
     115                                fullRhoStep->Value(),fullThetaStep->Value()*CV_PI/180,
     116                                nbPoints->Value());
     117        } else {
     118        nbLines=dspHoughLinesTracking(gimg,linesStorage,CV_HOUGH_STANDARD,
     119                                     trackingRhoStep->Value(),
     120                                     theta,trackingDeltaTheta->Value()*CV_PI/180,
     121                                     trackingThetaStep->Value()*CV_PI/180,
     122                                     nbPoints->Value());
     123        //        nbLines=dspHoughLines2_test(gimg,linesStorage,CV_HOUGH_STANDARD,trackingRhoStep->Value(),thetaPrev-trackingDeltaTheta->Value()*CV_PI/180,thetaPrev+trackingDeltaTheta->Value()*CV_PI/180,trackingThetaStep->Value()*CV_PI/180,nbPoints->Value());
     124        }
     125        data->ReleaseMutex();
     126
     127        //saturation sur le nb max de ligne, au cas ou le DSP n'aurait pas la meme valeur
     128        if(nbLines>MAX_LINES) {
     129            self->Warn("erreur nb lines %u>%u\n",nbLines,MAX_LINES);
     130            nbLines=MAX_LINES;
     131        }
     132        float rho;
     133        bool noLine=!SelectBestLine(linesStorage,nbLines,rho,theta);
     134
     135        if (noLine) {
     136            isTracking=false;
     137        } else {
     138            isTracking=true;
     139        //        float thetaRef=0;
     140
     141            //line equation is ax+by+c=0 with a=cos(theta), b=sin(theta) and c=-rho
     142            //distance from point xRef,yRef to the line is (a.xRef+b.yRef+c)/sqrt(a*a+b*b)
     143            distance=-(cosf(theta)*ptRef.x+sinf(theta)*ptRef.y-rho);
     144
     145            orientation=theta-thetaRef;
     146            if (orientation<-CV_PI/2) {
     147              orientation+=CV_PI;
     148              distance=-distance;
     149            }
     150            if (orientation>CV_PI/2) {
     151              orientation-=CV_PI;
     152              distance=-distance;
     153            }
     154
     155            //printf("=> pour theta=%f et rho=%f, distance au point(%f,%f)=%f\n",theta,rho,xRef,yRef,distance);
     156        }
     157
     158        output->GetMutex();
     159        output->SetValueNoMutex(0,0,distance);
     160        output->SetValueNoMutex(1,0,orientation);
     161        output->SetValueNoMutex(2,0,orientation*180/CV_PI);
     162        if(noLine) {
     163            output->SetValueNoMutex(3,0,0);
     164        } else {
     165            output->SetValueNoMutex(3,0,1);
     166        }
     167        output->ReleaseMutex();
     168*/
     169        output->SetDataTime(data->DataTime());
     170    };
     171   
     172    Matrix *output;
     173   
     174private:/*
     175    //select best line. Returns false if no line found
     176    bool SelectBestLine(CvMat* linesStorage, size_t nbLines, float &rho, float &theta) {
     177      if(nbLines==0) {
     178        return false;
     179      }
     180      //one line is found
     181      if (nbLines==1) {
     182        rho=linesStorage->data.fl[0];
     183        theta=linesStorage->data.fl[1];
     184        //printf("rho=%f,theta=%f (one line)\n",rho,theta);
     185        return true;
     186      }
     187      //lines are ordered by quality, the first one will be our reference
     188      float thetaRef=linesStorage->data.fl[1];
     189      float thetaRefErrorSum=0;
     190      float rhoSum=linesStorage->data.fl[0];
     191      //printf("rho=%f,theta=%f (first of multilines)\n",rhoSum,thetaRef);
     192      for(int i=1;i<nbLines;i++) {
     193        //printf("rho=%f,theta=%f (multilines)\n",linesStorage->data.fl[2*i],linesStorage->data.fl[2*i+1]);
     194        float thetaDiff=linesStorage->data.fl[2*i+1]-thetaRef;
     195        float rhoLine=linesStorage->data.fl[2*i];
     196        if (thetaDiff>CV_PI/2) {
     197          thetaDiff-=CV_PI;
     198          rhoLine=-rhoLine;
     199        } else if (thetaDiff<-CV_PI/2) {
     200          thetaDiff+=CV_PI;
     201          rhoLine=-rhoLine;
     202        }
     203        thetaRefErrorSum += thetaDiff;
     204        rhoSum+=rhoLine;
     205      }
     206      rho=rhoSum/nbLines;
     207      theta=thetaRef+thetaRefErrorSum/nbLines;
     208      if (theta<0) {
     209        theta+=CV_PI;
     210        rho=-rho;
     211      }
     212      if (theta>CV_PI) {
     213        theta-=CV_PI;
     214        rho=-rho;
     215      }
     216      return true;
     217    }
     218*/
     219    flair::filter::HoughLines *self;
     220    OneAxisRotation* rotation;
     221    SpinBox *fullRhoStep,*trackingRhoStep,*nbPoints;
     222    DoubleSpinBox *fullThetaStep,*trackingThetaStep,*trackingDeltaTheta;
     223    bool isTracking;
     224    float theta;
     225    float distance,orientation;
     226    Vector2Df* ptRefGlobal;
     227    float thetaRefGlobal;
     228    //CvMat* linesStorage;
     229    //IplImage *gimg;
     230};
     231
    29232namespace flair { namespace filter {
    30233
    31 HoughLines::HoughLines(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name) {
    32     GroupBox* reglages_groupbox=new GroupBox(position,name);
    33     fullRhoStep=new SpinBox(reglages_groupbox->NewRow(),"full rho step:","pixels",0,255,1,1);
    34     fullThetaStep=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"full theta step:","degrees",0,90,1,1);
    35     trackingRhoStep=new SpinBox(reglages_groupbox->NewRow(),"tracking rho step:","pixels",0,255,1,1);
    36     trackingThetaStep=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"tracking theta step:","degrees",0,90,1,1);
    37     trackingDeltaTheta=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"tracking delta theta:","degrees",0,90,1,1);
    38     nbPoints=new SpinBox(reglages_groupbox->NewRow(),"nb points:",0,10000,10,100);
    39 
    40     isTracking=false;
    41     lostLine=false;
    42     initLine=false;
    43     linesStorage = cvCreateMat(MAX_LINES, 1, CV_32FC2);
    44 
    45     //init output matrix of same size as init
    46     MatrixDescriptor* desc=new MatrixDescriptor(4,1);
    47     desc->SetElementName(0,0,"distance");
    48     desc->SetElementName(1,0,"orientation rad");
    49     desc->SetElementName(2,0,"orientation deg");
    50     desc->SetElementName(3,0,"line_detected");
    51     output=new Matrix(this,desc,floatType,name);
    52     delete desc;
    53 
    54     try{
    55         cvimage::Type const &imageType=dynamic_cast<cvimage::Type const &>(parent->GetOutputDataType());
    56         if(imageType.GetFormat()!=cvimage::Type::Format::Gray) {
    57             Err("input image is not gray\n");
    58             return;
    59         }
    60     } catch(std::bad_cast& bc) {
    61         Err("io type mismatch\n");
    62         return;
    63     }
    64    
    65     SetIsReady(true);
     234HoughLines::HoughLines(const IODevice* parent,const LayoutPosition* position,string name,const Vector2Df *inPtRefGlobal,float inThetaRefGlobal) : IODevice(parent,name) {
     235  pimpl_=new HoughLines_impl(this,position,name,inPtRefGlobal,inThetaRefGlobal);
     236   
    66237}
    67238
    68239HoughLines::~HoughLines(void) {
    69     cvReleaseMat(&linesStorage);
     240   delete  pimpl_;
    70241}
    71242
    72243void HoughLines::UpdateFrom(const io_data *data) {
    73  
     244    pimpl_->UpdateFrom(data);
     245    ProcessUpdate(pimpl_->output);
    74246}
    75247
    76248bool HoughLines::isLineDetected() const {
    77     if(output->Value(3,0)==1) {
     249    if(pimpl_->output->Value(3,0)==1) {
    78250        return true;
    79251    } else {
     
    83255
    84256float HoughLines::GetOrientation(void) const {
    85     return output->Value(1,0);
     257    return pimpl_->output->Value(1,0);
    86258}
    87259
    88260float HoughLines::GetDistance(void) const {
    89     return output->Value(0,0);
     261    return pimpl_->output->Value(0,0);
    90262}
    91263
    92264Matrix *HoughLines::Output(void) const {
    93     return output;
     265    return pimpl_->output;
    94266}
    95267
  • trunk/lib/FlairVisionFilter/src/HoughLines.h

    r214 r338  
    1212
    1313#include <IODevice.h>
    14 #include <cv.h>
     14#include <Vector2D.h>
    1515
    1616namespace flair {
    1717    namespace core {
    18         class cvimage;
    1918        class Matrix;
    2019    }
    2120    namespace gui {
    2221        class LayoutPosition;
    23         class SpinBox;
    24         class DoubleSpinBox;
    2522    }
    2623}
     24
     25class HoughLines_impl;
    2726
    2827namespace flair { namespace filter {
     
    4443            * \param position position
    4544            * \param name name
     45            * \param ptRefGlobal reference point used to compute distance to the line
     46            * \param thetaRefGlobal reference angle used to compute orientation in range [-pi/2,pi/2]. Default value is 0 which defines a vertical line
    4647            */
    47             HoughLines(const core::IODevice* parent,const gui::LayoutPosition* position,std::string name);
     48            HoughLines(const core::IODevice* parent,const gui::LayoutPosition* position,std::string name,const core::Vector2Df *ptRefGlobal=NULL,float thetaRefGlobal=0);
    4849
    4950            /*!
     
    6061        private:
    6162            void UpdateFrom(const core::io_data *data);
    62             gui::SpinBox *fullRhoStep,*trackingRhoStep,*nbPoints;
    63             gui::DoubleSpinBox *fullThetaStep,*trackingThetaStep,*trackingDeltaTheta;
    64             bool isTracking,lostLine,initLine,noLine;
    65             int nbLines;
    66             float yawMean,prevYawMean;
    67             float prevRhoMean,rhoMean;
    68             float prevThetaMean,thetaMean;
    69             float distance,orientation;
    70             CvMat* linesStorage;
    71             core::Matrix *output;
     63            class HoughLines_impl *pimpl_;
     64           
     65           
    7266    };
    7367} // end namespace filter
  • trunk/lib/FlairVisionFilter/src/ImgThreshold.cpp

    r330 r338  
    1313
    1414#include "ImgThreshold.h"
    15 #include <cvimage.h>
     15#include "VisionFilter.h"
     16#include <Image.h>
    1617#include <Layout.h>
    1718#include <GroupBox.h>
     
    2324using namespace flair::gui;
    2425
     26class ImgThreshold_impl {
     27public:
     28    ImgThreshold_impl(flair::filter::ImgThreshold *self,const LayoutPosition* position,string name):output(0) {
     29        this->self=self;
     30       
     31        GroupBox* reglages_groupbox=new GroupBox(position,name);
     32        threshold=new SpinBox(reglages_groupbox->NewRow(),"threshold:",0,255,1,127);
     33
     34        Printf("todo: pouvoir reutiliser la meme image en sortie\n");
     35        try{
     36            Image::Type const &imageType=dynamic_cast<Image::Type const &>(((IODevice*)(self->Parent()))->GetOutputDataType());
     37            if(imageType.GetFormat()==Image::Type::Format::Gray) {
     38                output=new Image(self,imageType.GetWidth(),imageType.GetHeight(),imageType.GetFormat(),"threshold");
     39                //inIplImage = (IplImage*)AllocFunction(sizeof(IplImage));
     40                //outIplImage = (IplImage*)AllocFunction(sizeof(IplImage));
     41            } else {
     42                self->Err("input image is not gray\n");
     43            }
     44        } catch(std::bad_cast& bc) {
     45            self->Err("io type mismatch\n");
     46        }
     47
     48       
     49    }
     50
     51    ~ImgThreshold_impl() {
     52        //FreeFunction((char*)(inIplImage));
     53        //FreeFunction((char*)(outIplImage));
     54    }
     55   
     56    void UpdateFrom(const io_data *data){
     57        Image *img=(Image*)data;
     58/*
     59        data->GetMutex();
     60        inIplImage->width=img->GetDataType().GetWidth();
     61        inIplImage->height=img->GetDataType().GetHeight();
     62        inIplImage->imageData=img->buffer;
     63        inIplImage->imageSize=img->GetDataType().GetSize();
     64       
     65        output->GetMutex();
     66        outIplImage->width=output->GetDataType().GetWidth();
     67        outIplImage->height=output->GetDataType().GetHeight();
     68        outIplImage->imageData=output->buffer;
     69        outIplImage->imageSize=output->GetDataType().GetSize();
     70               
     71        dspThreshold(inIplImage,outIplImage, threshold->Value(), 255, CV_THRESH_BINARY);
     72        output->ReleaseMutex();
     73        data->ReleaseMutex();
     74*/
     75        output->SetDataTime(data->DataTime());
     76    };
     77   
     78    Image *output;
     79
     80private:
     81    flair::filter::ImgThreshold *self;
     82    SpinBox *threshold;
     83    //IplImage *inIplImage,*outIplImage;
     84};
     85
    2586namespace flair { namespace filter {
    2687
    27 ImgThreshold::ImgThreshold(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name),output(0) {
    28     GroupBox* reglages_groupbox=new GroupBox(position,name);
    29     threshold=new SpinBox(reglages_groupbox->NewRow(),"threshold:",0,255,1,127);
    30 
    31     Printf("todo: pouvoir reutiliser la meme image en sortie\n");
    32     try{
    33         cvimage::Type const &imageType=dynamic_cast<cvimage::Type const &>(parent->GetOutputDataType());
    34         if(imageType.GetFormat()==cvimage::Type::Format::Gray) {
    35             output=new cvimage(this,imageType.GetWidth(),imageType.GetHeight(),imageType.GetFormat(),"threshold");
    36         } else {
    37             Err("input image is not gray\n");
    38             return;
    39         }
    40     } catch(std::bad_cast& bc) {
    41         Err("io type mismatch\n");
    42         return;
    43     }
    44     SetIsReady(true);
     88ImgThreshold::ImgThreshold(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name) {
     89    pimpl_=new ImgThreshold_impl(this,position,name);
    4590}
    4691
    4792ImgThreshold::~ImgThreshold(void) {
     93    delete pimpl_;
    4894}
    4995
    50 cvimage* ImgThreshold::Output(void) {
    51     return output;
     96Image* ImgThreshold::Output(void) {
     97    return pimpl_->output;
    5298}
    5399
    54100void ImgThreshold::UpdateFrom(const io_data *data) {
    55  /*   cvimage *cvImage=(cvimage*)data;
    56     IplImage *gimg=cvImage->img;
    57 
    58     data->GetMutex();
    59     output->GetMutex();
    60     dspThreshold(gimg, output->img, threshold->Value(), 255, CV_THRESH_BINARY);
    61     output->ReleaseMutex();
    62     data->ReleaseMutex();
    63 */
    64     output->SetDataTime(data->DataTime());
    65     ProcessUpdate(output);
     101    pimpl_->UpdateFrom(data);
     102    ProcessUpdate(pimpl_->output);
    66103}
    67104
    68105DataType const &ImgThreshold::GetOutputDataType() const {
    69     if(output!=NULL) {
    70         return output->GetDataType();
     106    if(pimpl_->output!=NULL) {
     107        return pimpl_->output->GetDataType();
    71108    } else {
    72109        return dummyType;
  • trunk/lib/FlairVisionFilter/src/ImgThreshold.h

    r122 r338  
    1212
    1313#include <IODevice.h>
    14 #include <cv.h>
    1514
    1615namespace flair {
    1716    namespace core {
    18         class cvimage;
     17        class Image;
    1918    }
    2019    namespace gui {
    2120        class LayoutPosition;
    22         class SpinBox;
    2321    }
    2422}
     23
     24class ImgThreshold_impl;
    2525
    2626namespace flair { namespace filter {
     
    5858            * \return the output image
    5959            */
    60             core::cvimage* Output(void);
     60            core::Image* Output(void);
    6161
    6262            core::DataType const &GetOutputDataType() const;
     
    6464        private:
    6565            void UpdateFrom(const core::io_data *data);
    66             core::cvimage *output;
    67             gui::SpinBox *threshold;
     66            class ImgThreshold_impl *pimpl_;
    6867    };
    6968} // end namespace filter
  • trunk/lib/FlairVisionFilter/src/OpticalFlow.cpp

    r330 r338  
    1414#include "OpticalFlow.h"
    1515#include "OpticalFlowData.h"
    16 #include <cvimage.h>
     16#include "VisionFilter.h"
     17#include <Image.h>
    1718#include <Layout.h>
    1819#include <GroupBox.h>
    1920#include <SpinBox.h>
    20 //#include <algorithm>
    2121#include <OneAxisRotation.h>
    22 #include <Vector3D.h>
    2322#include <typeinfo>
    2423
     
    3029using namespace flair::gui;
    3130
     31class OpticalFlow_impl {
     32public:
     33    OpticalFlow_impl(flair::filter::OpticalFlow *self,const LayoutPosition* position,string name) {
     34        this->self=self;
     35        is_init=false;
     36
     37        GroupBox* reglages_groupbox=new GroupBox(position,name);
     38        rotation=new OneAxisRotation(reglages_groupbox->NewRow(),"post rotation",OneAxisRotation::PostRotation);
     39        max_features=new SpinBox(reglages_groupbox->NewRow(),"max features:",1,65535,1,1);
     40       
     41        try{
     42            Image::Type const &imageType=dynamic_cast<Image::Type const &>(((IODevice*)(self->Parent()))->GetOutputDataType());
     43            pyr=new Image(self,imageType.GetWidth(),imageType.GetHeight(),Image::Type::Format::Gray);
     44            pyr_old=new Image(self,imageType.GetWidth(),imageType.GetHeight(),Image::Type::Format::Gray);
     45            output=new flair::filter::OpticalFlowData(self->Parent(),max_features->Value());
     46/*
     47            pointsA=(CvPoint*)AllocFunction(max_features->Value()*sizeof(CvPoint));
     48            pointsB=(CvPoint*)AllocFunction(max_features->Value()*sizeof(CvPoint));
     49
     50            found_feature=(char*)AllocFunction(max_features->Value()*sizeof(char));
     51            feature_error=(unsigned int*)AllocFunction(max_features->Value()*sizeof(unsigned int));
     52           
     53            iplgimg = (IplImage*)AllocFunction(sizeof(IplImage));
     54            iplgimg_old = (IplImage*)AllocFunction(sizeof(IplImage));
     55            iplpyr = (IplImage*)AllocFunction(sizeof(IplImage));
     56            iplpyr_old = (IplImage*)AllocFunction(sizeof(IplImage));
     57           
     58            iplpyr->width=imageType.GetWidth();
     59            iplpyr->height=imageType.GetHeight();
     60            iplpyr->imageSize=imageType.GetSize();
     61            iplpyr_old->width=imageType.GetWidth();
     62            iplpyr_old->height=imageType.GetHeight();
     63            iplpyr_old->imageSize=imageType.GetSize();
     64           
     65            iplgimg->roi=NULL;
     66            iplgimg_old->roi=NULL;
     67            iplpyr->roi=NULL;
     68            iplpyr_old->roi=NULL;
     69*/ 
     70      } catch(std::bad_cast& bc) {
     71            self->Err("io type mismatch\n");
     72            pyr=NULL;
     73            pyr_old=NULL;
     74        }
     75     
     76    }
     77
     78    ~OpticalFlow_impl() {
     79        /*
     80        FreeFunction((char*)pointsA);
     81        FreeFunction((char*)pointsB);
     82        FreeFunction((char*)found_feature);
     83        FreeFunction((char*)feature_error);
     84        FreeFunction((char*)iplgimg);
     85        FreeFunction((char*)iplgimg_old);
     86        FreeFunction((char*)iplpyr);
     87        FreeFunction((char*)iplpyr_old);*/
     88    }
     89   
     90    void UpdateFrom(const io_data *data){
     91        //Time tStart=GetTime();
     92        Image *img=(Image*)data;
     93        Image *img_old=((Image*)data->Prev(1));
     94/*
     95        iplgimg->width=img->GetDataType().GetWidth();
     96        iplgimg->height=img->GetDataType().GetHeight();
     97        iplgimg->imageData=img->buffer;
     98        iplgimg->imageSize=img->GetDataType().GetSize();
     99       
     100        iplgimg_old->width=img_old->GetDataType().GetWidth();
     101        iplgimg_old->height=img_old->GetDataType().GetHeight();
     102        iplgimg_old->imageData=img_old->buffer;
     103        iplgimg_old->imageSize=img_old->GetDataType().GetSize();
     104       
     105        iplpyr->imageData=pyr->buffer;
     106        iplpyr_old->imageData=pyr_old->buffer;
     107
     108        unsigned int count;
     109        CvSize window = {3,3};
     110        CvTermCriteria termination_criteria ={CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 };
     111        unsigned int i;
     112
     113        if(max_features->ValueChanged()==true) {
     114            FreeFunction((char*)pointsA);
     115            FreeFunction((char*)pointsB);
     116            FreeFunction((char*)found_feature);
     117            FreeFunction((char*)feature_error);
     118            pointsA=(CvPoint*)AllocFunction(max_features->Value()*sizeof(CvPoint));
     119            pointsB=(CvPoint*)AllocFunction(max_features->Value()*sizeof(CvPoint));
     120            found_feature=(char*)AllocFunction(max_features->Value()*sizeof(char));
     121            feature_error=(unsigned int*)AllocFunction(max_features->Value()*sizeof(unsigned int));
     122
     123            output->Resize(max_features->Value());
     124        }
     125
     126        if(is_init==false) {
     127            data->GetMutex();
     128            //init image old
     129            dspPyrDown(iplgimg,iplpyr_old,LEVEL_PYR);
     130            data->ReleaseMutex();
     131            is_init=true;
     132            printf("ajouter mise a 0 des points\n");
     133            return;
     134        }
     135
     136        data->GetMutex();
     137        data->Prev(1)->GetMutex();
     138
     139        //select good features
     140        count=max_features->Value();
     141        dspGoodFeaturesToTrack(iplgimg_old,pointsA,&count,0.08,5);
     142        //pyramide
     143        dspPyrDown(iplgimg,iplpyr,LEVEL_PYR);
     144        //lk
     145        dspCalcOpticalFlowPyrLK(iplgimg_old,iplgimg,iplpyr_old,iplpyr,pointsA,pointsB,count,window,LEVEL_PYR,found_feature,feature_error,termination_criteria,0) ;
     146
     147        data->Prev(1)->ReleaseMutex();
     148        data->ReleaseMutex();
     149
     150        //apply rotation
     151        for(i=0;i<count;i++) {
     152            Vector3Df tmp;
     153            tmp.x=pointsA[i].x;
     154            tmp.y=pointsA[i].y;
     155            tmp.z=0;
     156            rotation->ComputeRotation(tmp);
     157            pointsA[i].x=tmp.x;
     158            pointsA[i].y=tmp.y;
     159
     160            tmp.x=pointsB[i].x;
     161            tmp.y=pointsB[i].y;
     162            tmp.z=0;
     163            rotation->ComputeRotation(tmp);
     164            pointsB[i].x=tmp.x;
     165            pointsB[i].y=tmp.y;
     166        }
     167
     168        output->GetMutex();
     169        CvPoint2D32f* pointsBf= output->PointsB();
     170        for(i=0;i<count;i++) {
     171            pointsBf[i].x=pointsA[i].x+((float)pointsB[i].x)/256;
     172            pointsBf[i].y=pointsA[i].y+((float)pointsB[i].y)/256;
     173        }
     174        output->ReleaseMutex();
     175
     176        output->SetPointsA(pointsA);
     177        output->SetFoundFeature(found_feature);
     178        output->SetFeatureError(feature_error);
     179        output->SetNbFeatures(count);
     180
     181        //rotation
     182        swap(pyr,pyr_old);
     183*/
     184        output->SetDataTime(data->DataTime());
     185        //Printf("Optical flow computation time=%f\n",(GetTime()-tStart)/(1000.*1000));
     186    };
     187   
     188    flair::filter::OpticalFlowData *output;
     189
     190private:
     191    flair::filter::OpticalFlow *self;
     192    SpinBox *max_features;
     193    OneAxisRotation* rotation;
     194
     195    //CvPoint* pointsA;
     196    //CvPoint* pointsB;
     197    char *found_feature;
     198    unsigned int *feature_error;
     199    Image *pyr,*pyr_old;
     200    //IplImage *iplgimg,*iplgimg_old;
     201    //IplImage *iplpyr,*iplpyr_old;
     202
     203    bool is_init;
     204};
     205
     206
    32207namespace flair
    33208{
     
    35210{
    36211
    37 OpticalFlow::OpticalFlow(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name)
    38 {
     212OpticalFlow::OpticalFlow(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name) {
    39213    Printf("optical flow: voir pour faire du multiple output\n");//pour pts A et B, found et error
    40 
    41     try{
    42         cvimage::Type const &imageType=dynamic_cast<cvimage::Type const &>(parent->GetOutputDataType());
    43         pyr=cvCreateImage(cvSize(imageType.GetWidth(),imageType.GetHeight()),IPL_DEPTH_8U,1);
    44         pyr_old=cvCreateImage(cvSize(imageType.GetWidth(),imageType.GetHeight()),IPL_DEPTH_8U,1);
    45 
    46     } catch(std::bad_cast& bc) {
    47         Err("io type mismatch\n");
    48         pyr=NULL;
    49         pyr_old=NULL;
    50         return;
     214    pimpl_=new OpticalFlow_impl(this,position,name);
     215}
     216
     217OpticalFlow::~OpticalFlow(void) {
     218    delete pimpl_;
     219}
     220
     221void OpticalFlow::UpdateFrom(const io_data *data) {
     222    pimpl_->UpdateFrom(data);
     223    ProcessUpdate(pimpl_->output);
     224}
     225
     226OpticalFlowData* OpticalFlow::Output(void) {
     227    return pimpl_->output;
     228
     229}
     230
     231DataType const &OpticalFlow::GetOutputDataType() const {
     232    if(pimpl_->output!=NULL) {
     233        return pimpl_->output->GetDataType();
     234    } else {
     235        return dummyType;
    51236    }
    52 
    53     is_init=false;
    54 
    55     GroupBox* reglages_groupbox=new GroupBox(position,name);
    56     rotation=new OneAxisRotation(reglages_groupbox->NewRow(),"post rotation",OneAxisRotation::PostRotation);
    57     max_features=new SpinBox(reglages_groupbox->NewRow(),"max features:",1,65535,1,1);
    58 
    59     output=new OpticalFlowData(parent,max_features->Value());
    60 
    61     pointsA=(CvPoint *)cvAlloc(max_features->Value()*sizeof(CvPoint));
    62     pointsB=(CvPoint *)cvAlloc(max_features->Value()*sizeof(CvPoint));
    63 
    64     found_feature=(char *)cvAlloc(max_features->Value()*sizeof(char));
    65     feature_error=(unsigned int *)cvAlloc(max_features->Value()*sizeof(unsigned int));
    66    
    67     SetIsReady(true);
    68 }
    69 
    70 OpticalFlow::~OpticalFlow(void)
    71 {
    72     cvReleaseImage(&pyr);
    73     cvReleaseImage(&pyr_old);
    74     cvFree(&pointsA);
    75     cvFree(&pointsB);
    76     cvFree(&found_feature);
    77     cvFree(&feature_error);
    78 }
    79 
    80 void OpticalFlow::UpdateFrom(const io_data *data)
    81 {/*
    82     IplImage *gimg=((cvimage*)data)->img;
    83     IplImage *gimg_old=((cvimage*)data->Prev(1))->img;
    84 
    85     unsigned int count;
    86     CvSize window = cvSize(3,3);
    87     CvTermCriteria termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );
    88     unsigned int i;
    89 
    90     if(max_features->ValueChanged()==true) {
    91         cvFree(&pointsA);
    92         cvFree(&pointsB);
    93         cvFree(&found_feature);
    94         cvFree(&feature_error);
    95         pointsA=(CvPoint *)cvAlloc(max_features->Value()*sizeof(CvPoint));
    96         pointsB=(CvPoint *)cvAlloc(max_features->Value()*sizeof(CvPoint));
    97         found_feature=(char *)cvAlloc(max_features->Value()*sizeof(char));
    98         feature_error=(unsigned int *)cvAlloc(max_features->Value()*sizeof(unsigned int));
    99 
    100         output->Resize(max_features->Value());
    101     }
    102 
    103     if(is_init==false)
    104     {
    105         data->GetMutex();
    106         //init image old
    107         dspPyrDown(gimg,pyr_old,LEVEL_PYR);
    108         data->ReleaseMutex();
    109         is_init=true;
    110         printf("ajouter mise a 0 des points\n");
    111         return;
    112     }
    113 
    114     data->GetMutex();
    115     data->Prev(1)->GetMutex();
    116 
    117     //select good features
    118     count=max_features->Value();
    119     dspGoodFeaturesToTrack(gimg_old,pointsA,&count,0.08,5);
    120     //pyramide
    121     dspPyrDown(gimg,pyr,LEVEL_PYR);
    122     //lk
    123     dspCalcOpticalFlowPyrLK(gimg_old,gimg,pyr_old,pyr,pointsA,pointsB,count,window,LEVEL_PYR,found_feature,feature_error,termination_criteria,0) ;
    124 
    125     data->Prev(1)->ReleaseMutex();
    126     data->ReleaseMutex();
    127 
    128     //apply rotation
    129     for(i=0;i<count;i++) {
    130         Vector3Df tmp;
    131         tmp.x=pointsA[i].x;
    132         tmp.y=pointsA[i].y;
    133         tmp.z=0;
    134         rotation->ComputeRotation(tmp);
    135         pointsA[i].x=tmp.x;
    136         pointsA[i].y=tmp.y;
    137 
    138         tmp.x=pointsB[i].x;
    139         tmp.y=pointsB[i].y;
    140         tmp.z=0;
    141         rotation->ComputeRotation(tmp);
    142         pointsB[i].x=tmp.x;
    143         pointsB[i].y=tmp.y;
    144     }
    145 
    146     output->GetMutex();
    147     CvPoint2D32f* pointsBf= output->PointsB();
    148     for(i=0;i<count;i++)
    149     {
    150         pointsBf[i].x=pointsA[i].x+((float)pointsB[i].x)/256;
    151         pointsBf[i].y=pointsA[i].y+((float)pointsB[i].y)/256;
    152     }
    153     output->ReleaseMutex();
    154 
    155     output->SetPointsA(pointsA);
    156     output->SetFoundFeature(found_feature);
    157     output->SetFeatureError(feature_error);
    158     output->SetNbFeatures(count);
    159 
    160     //rotation
    161     swap(pyr,pyr_old);
    162 */
    163     output->SetDataTime(data->DataTime());
    164     ProcessUpdate(output);
    165237}
    166238
  • trunk/lib/FlairVisionFilter/src/OpticalFlow.h

    r122 r338  
    1212
    1313#include <IODevice.h>
    14 #include <cv.h>
    1514
    16 namespace flair
    17 {
    18     namespace core
    19     {
    20         class cvimage;
    21         class OneAxisRotation;
     15namespace flair {
     16    namespace gui {
     17        class LayoutPosition;
    2218    }
    23     namespace gui
    24     {
    25         class LayoutPosition;
    26         class SpinBox;
    27     }
    28     namespace filter
    29     {
     19    namespace filter {
    3020        class OpticalFlowData;
    3121    }
    3222}
    3323
    34 namespace flair
    35 {
    36 namespace filter
    37 {
     24class OpticalFlow_impl;
     25
     26namespace flair {
     27namespace filter {
    3828    /*! \class OpticalFlow
    3929    *
     
    6353            */
    6454            ~OpticalFlow();
     55           
     56            filter::OpticalFlowData* Output(void);
     57
     58            core::DataType const &GetOutputDataType() const;
    6559
    6660        private:
    6761            void UpdateFrom(const core::io_data *data);
    68             OpticalFlowData *output;
    69             gui::SpinBox *max_features;
    70             core::OneAxisRotation* rotation;
    71 
    72             CvPoint* pointsA;
    73             CvPoint* pointsB;
    74             char *found_feature;
    75             unsigned int *feature_error;
    76             IplImage *pyr,*pyr_old;
    77 
    78             bool is_init;
     62           
     63            class OpticalFlow_impl *pimpl_;
     64           
    7965    };
    8066} // end namespace filter
  • trunk/lib/FlairVisionFilter/src/OpticalFlowCompensated.cpp

    r318 r338  
    3232using namespace flair::gui;
    3333
     34class OpticalFlowCompensated_impl {
     35public:
     36    OpticalFlowCompensated_impl(flair::filter::OpticalFlowCompensated *self,const flair::filter::Ahrs *ahrs, const LayoutPosition* position, string name): ahrs(ahrs), output(NULL) {
     37        this->self=self;
     38        previousStepsAngularRates=new Vector3Df*[10];
     39        for (int i=0; i<10; i++) previousStepsAngularRates[i]=NULL;
     40        previousStepsAngularRatesIndex=0;
     41
     42        GroupBox* reglages_groupbox=new GroupBox(position,name);
     43        //TODO: the gyroDelay is set to compensate for the time difference between image snapshot et gyro measurements
     44        //it is equal to the time between 2 images (because optical flow always lags 1 frame) + the time to compute optical flow
     45        //here it is approximated by 2 video frames
     46        gyroDelay=new SpinBox(reglages_groupbox->NewRow(),"gyro delay (in video frames):",0,10,1,2);
     47        gyroGain=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"gyro gain:",0.,500.,10.,2,300.);
     48
     49       
     50    }
     51
     52    ~OpticalFlowCompensated_impl() {
     53        delete output;
     54    }
     55   
     56    void UpdateFrom(const io_data *data){
     57        flair::filter::OpticalFlowData *input=(flair::filter::OpticalFlowData *)data;
     58        if (!output) { //first pass
     59            output=new flair::filter::OpticalFlowData(self,input->MaxFeatures(),input->ObjectName()+"_filtered");
     60            previousTime=input->DataTime();
     61            return;
     62        }
     63        //  float kX=320/Euler::ToRadian(70); //TEST: only for default simuCameraGL. fov=70° and image width=320 => k=320*180/(70*pi)
     64        //  float kY=240/Euler::ToRadian(70); //TEST: only for default simuCameraGL. fov=70° and image height=240
     65        float kX=gyroGain->Value();
     66        float kY=gyroGain->Value();
     67        float deltaT=(input->DataTime()-previousTime)/(1000*1000*1000.);
     68        previousTime=input->DataTime();
     69
     70        int delayedIndex=previousStepsAngularRatesIndex-gyroDelay->Value(); // Ahhh décalage, esprit canal...
     71        if (delayedIndex<0) delayedIndex+=10;
     72
     73        if (!previousStepsAngularRates[previousStepsAngularRatesIndex]) {
     74            previousStepsAngularRates[previousStepsAngularRatesIndex]=new Vector3Df();
     75        }
     76        *previousStepsAngularRates[previousStepsAngularRatesIndex++]=ahrs->GetDatas()->GetAngularRates();
     77
     78        if (!previousStepsAngularRates[delayedIndex]) return;
     79        float rotationFlowX=previousStepsAngularRates[delayedIndex]->y*deltaT*kY;
     80        float rotationFlowY=-previousStepsAngularRates[delayedIndex]->x*deltaT*kX;
     81        if (previousStepsAngularRatesIndex==10) previousStepsAngularRatesIndex=0;
     82        input->GetMutex();
     83        output->GetMutex();
     84
     85        for (int i=0; i<input->NbFeatures(); i++) {
     86            output->PointsA()[i].x=input->PointsA()[i].x;
     87            output->PointsA()[i].y=input->PointsA()[i].y;
     88            output->PointsB()[i].x=input->PointsB()[i].x-rotationFlowX;
     89            output->PointsB()[i].y=input->PointsB()[i].y-rotationFlowY;
     90        }
     91        output->SetNbFeatures(input->NbFeatures());
     92        output->SetFoundFeature(input->FoundFeature());
     93        output->SetFeatureError(input->FeatureError());
     94
     95        output->ReleaseMutex();
     96        input->ReleaseMutex();
     97
     98        output->SetDataTime(input->DataTime());
     99           
     100    };
     101   
     102    flair::filter::OpticalFlowData *output;
     103
     104private:
     105    flair::filter::OpticalFlowCompensated *self;
     106    Time previousTime;
     107    const flair::filter::Ahrs *ahrs;
     108    Vector3Df **previousStepsAngularRates;
     109    unsigned int previousStepsAngularRatesIndex;
     110    SpinBox *gyroDelay;
     111    DoubleSpinBox *gyroGain;
     112};
     113
     114
    34115namespace flair {
    35116namespace filter {
    36117
    37 OpticalFlowCompensated::OpticalFlowCompensated(const OpticalFlow *parent, const Ahrs *ahrs, const LayoutPosition* position, string name) : IODevice(parent, name), ahrs(ahrs), output(NULL) {
    38   MatrixDescriptor* desc=new MatrixDescriptor(3,2);
    39   desc->SetElementName(0,0,"raw displacement X");
    40   desc->SetElementName(0,1,"raw displacement Y");
    41   desc->SetElementName(1,0,"compensation X");
    42   desc->SetElementName(1,1,"compensation Y");
    43   desc->SetElementName(2,0,"displacement with compensation X");
    44   desc->SetElementName(2,0,"displacement with compensation Y");
    45   firstPointDisplacement=new Matrix(this,desc,floatType,name);
    46   delete desc;
    47   previousStepsAngularRates=new Vector3Df*[10];
    48   for (int i=0; i<10; i++) previousStepsAngularRates[i]=NULL;
    49   previousStepsAngularRatesIndex=0;
    50 
    51   GroupBox* reglages_groupbox=new GroupBox(position,name);
    52   //TODO: the gyroDelay is set to compensate for the time difference between image snapshot et gyro measurements
    53 //it is equal to the time between 2 images (because optical flow always lags 1 frame) + the time to compute optical flow
    54 //here it is approximated by 2 video frames
    55   gyroDelay=new SpinBox(reglages_groupbox->NewRow(),"gyro delay (in video frames):",0,10,1,2);
    56   gyroGain=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"gyro gain:",0.,500.,10.,2,300.);
     118OpticalFlowCompensated::OpticalFlowCompensated(const OpticalFlow *parent, const Ahrs *ahrs, const LayoutPosition* position, string name) : IODevice(parent, name) {
     119    pimpl_=new OpticalFlowCompensated_impl(this,ahrs,position,name);
    57120
    58121}
    59122
    60123OpticalFlowCompensated::~OpticalFlowCompensated() {
    61   delete output;
     124    delete pimpl_;
    62125}
    63126
    64127void OpticalFlowCompensated::UpdateFrom(const io_data *data) {
    65   OpticalFlowData *input=(OpticalFlowData *)data;
    66   if (!output) { //first pass
    67     output=new OpticalFlowData(this,input->MaxFeatures(),input->ObjectName()+"_filtered");
    68     previousTime=input->DataTime();
    69     return;
    70   }
    71 //  float kX=320/Euler::ToRadian(70); //TEST: only for default simuCameraGL. fov=70° and image width=320 => k=320*180/(70*pi)
    72 //  float kY=240/Euler::ToRadian(70); //TEST: only for default simuCameraGL. fov=70° and image height=240
    73   float kX=gyroGain->Value();
    74   float kY=gyroGain->Value();
    75   float deltaT=(input->DataTime()-previousTime)/(1000*1000*1000.);
    76   previousTime=input->DataTime();
    77 
    78   int delayedIndex=previousStepsAngularRatesIndex-gyroDelay->Value(); // Ahhh décalage, esprit canal...
    79   if (delayedIndex<0) delayedIndex+=10;
    80 
    81   if (!previousStepsAngularRates[previousStepsAngularRatesIndex]) {
    82     previousStepsAngularRates[previousStepsAngularRatesIndex]=new Vector3Df();
    83   }
    84   *previousStepsAngularRates[previousStepsAngularRatesIndex++]=ahrs->GetDatas()->GetAngularRates();
    85 
    86   if (!previousStepsAngularRates[delayedIndex]) return;
    87   float rotationFlowX=previousStepsAngularRates[delayedIndex]->y*deltaT*kY;
    88   float rotationFlowY=-previousStepsAngularRates[delayedIndex]->x*deltaT*kX;
    89   if (previousStepsAngularRatesIndex==10) previousStepsAngularRatesIndex=0;
    90   input->GetMutex();
    91   output->GetMutex();
    92 
    93   for (int i=0; i<input->NbFeatures(); i++) {
    94     if (!i) {
    95       firstPointDisplacement->SetValue(0,0,input->PointsB()[i].x-input->PointsA()[i].x);
    96       firstPointDisplacement->SetValue(0,1,input->PointsB()[i].y-input->PointsA()[i].y);
    97       firstPointDisplacement->SetValue(1,0,-rotationFlowX);
    98       firstPointDisplacement->SetValue(1,1,-rotationFlowY);
    99       firstPointDisplacement->SetValue(2,0,input->PointsB()[i].x-input->PointsA()[i].x-rotationFlowX);
    100       firstPointDisplacement->SetValue(2,1,input->PointsB()[i].y-input->PointsA()[i].y-rotationFlowY);
    101     }
    102     output->PointsA()[i].x=input->PointsA()[i].x;
    103     output->PointsA()[i].y=input->PointsA()[i].y;
    104     output->PointsB()[i].x=input->PointsB()[i].x-rotationFlowX;
    105     output->PointsB()[i].y=input->PointsB()[i].y-rotationFlowY;
    106   }
    107   output->SetNbFeatures(input->NbFeatures());
    108   output->SetFoundFeature(input->FoundFeature());
    109   output->SetFeatureError(input->FeatureError());
    110 
    111   output->ReleaseMutex();
    112   input->ReleaseMutex();
    113 
    114   output->SetDataTime(input->DataTime());
    115   ProcessUpdate(output);
     128    pimpl_->UpdateFrom(data);
     129    ProcessUpdate(pimpl_->output);
    116130}
    117131
    118   Matrix *OpticalFlowCompensated::GetFirstPointDisplacement() const {
    119     return firstPointDisplacement;
    120   }
    121132
    122133} // end namespace filter
  • trunk/lib/FlairVisionFilter/src/OpticalFlowCompensated.h

    r274 r338  
    1616#include <IODevice.h>
    1717#include <Object.h>
    18 #include <Vector3D.h>
    1918
    2019namespace flair {
    21   namespace core {
    22     class io_data;
    23     class Matrix;
    24   }
    2520  namespace gui {
    2621    class LayoutPosition;
    27     class SpinBox;
    28     class DoubleSpinBox;
    2922  }
    3023  namespace filter {
    3124    class Ahrs;
    3225    class OpticalFlow;
    33     class OpticalFlowData;
    3426  }
    3527}
     28
     29class OpticalFlowCompensated_impl;
    3630
    3731namespace flair { namespace filter {
     
    6155  ~OpticalFlowCompensated();
    6256
    63   void UpdateFrom(const core::io_data *data);
    64 
    65   core::Matrix *GetFirstPointDisplacement() const;
    6657private:
    67   OpticalFlowData *output;
    68   core::Time previousTime;
    69   const Ahrs *ahrs;
    70   core::Matrix *firstPointDisplacement;
    71   core::Vector3Df **previousStepsAngularRates;
    72   unsigned int previousStepsAngularRatesIndex;
    73   gui::SpinBox *gyroDelay;
    74   gui::DoubleSpinBox *gyroGain;
     58    void UpdateFrom(const core::io_data *data);
     59    OpticalFlowCompensated_impl *pimpl_;
     60 
    7561};
    7662
  • trunk/lib/FlairVisionFilter/src/OpticalFlowData.cpp

    r252 r338  
    1212
    1313#include "OpticalFlowData.h"
     14#include "VisionFilter.h"
     15#include <string.h>
    1416
    1517using std::string;
    1618
    17 namespace flair
    18 {
    19 namespace filter
    20 {
     19class OpticalFlowData_impl {
     20public:
     21    OpticalFlowData_impl(flair::filter::OpticalFlowData* self,uint32_t max_features) {
     22        this->self=self;
     23        this->max_features=max_features;
     24        nb_features=0;
     25        pointsA=(IntPoint*)malloc(max_features*sizeof(IntPoint));
     26        pointsB=(FloatPoint*)malloc(max_features*sizeof(FloatPoint));
    2127
    22 OpticalFlowData::OpticalFlowData(const Object* parent,uint32_t max_features,string name,uint32_t n): io_data(parent,name,n)
    23 {
    24     this->max_features=max_features;
    25     nb_features=0;
     28        found_features=(char*)malloc(max_features*sizeof(char));
     29        features_error=(uint32_t*)malloc(max_features*sizeof(uint32_t));
     30    }
    2631
    27     pointsA=(CvPoint *)cvAlloc(max_features*sizeof(CvPoint));
    28     pointsB=(CvPoint2D32f *)cvAlloc(max_features*sizeof(CvPoint2D32f));
     32    ~OpticalFlowData_impl() {
     33        free((char*)pointsA);
     34        free((char*)pointsB);
    2935
    30     found_features=(char *)cvAlloc(max_features*sizeof(char));
    31     features_error=(uint32_t*)cvAlloc(max_features*sizeof(uint32_t));
     36        free((char*)found_features);
     37        free((char*)features_error);
     38    }
     39   
     40    void Resize(uint32_t value) {
     41        IntPoint *new_pointsA;
     42        FloatPoint *new_pointsB;
     43        char *new_found_features;
     44        uint32_t *new_features_error;
     45
     46        new_pointsA=(IntPoint*)malloc(value*sizeof(IntPoint));
     47        new_pointsB=(FloatPoint*)malloc(value*sizeof(FloatPoint));
     48        new_found_features=(char*)malloc(value*sizeof(char));
     49        new_features_error=(uint32_t*)malloc(value*sizeof(uint32_t));
     50
     51        self->GetMutex();
     52        if(value>max_features) {
     53            memcpy(new_pointsA,pointsA,max_features*sizeof(IntPoint));
     54            memcpy(new_pointsB,pointsB,max_features*sizeof(FloatPoint));
     55            memcpy(new_found_features,found_features,max_features*sizeof(char));
     56            memcpy(new_features_error,features_error,max_features*sizeof(uint32_t));
     57        } else {
     58            memcpy(new_pointsA,pointsA,value*sizeof(IntPoint));
     59            memcpy(new_pointsB,pointsB,value*sizeof(FloatPoint));
     60            memcpy(new_found_features,found_features,value*sizeof(char));
     61            memcpy(new_features_error,features_error,value*sizeof(uint32_t));
     62            if(nb_features>value) nb_features=value; //si nb_features est le nombre de point qui ont effectivement une correspondande, ça me parait louche. Sauf si les points sont classés et que ceux qui ont une correpondance sont toujours au début
     63        }
     64        max_features=value;
     65        self->ReleaseMutex();
     66
     67        free((char*)pointsA);
     68        free((char*)pointsB);
     69        free((char*)found_features);
     70        free((char*)features_error);
     71
     72        pointsA=new_pointsA;
     73        pointsB=new_pointsB;
     74        found_features=new_found_features;
     75        features_error=new_features_error;
     76    }
     77       
     78    uint32_t max_features;
     79    uint32_t nb_features;
     80    IntPoint* pointsA;
     81    FloatPoint* pointsB;
     82    char *found_features;
     83    uint32_t *features_error;
     84
     85private:
     86    flair::filter::OpticalFlowData *self;
     87};
     88
     89namespace flair {
     90namespace filter {
     91
     92OpticalFlowData::OpticalFlowData(const Object* parent,uint32_t max_features,string name,uint32_t n): io_data(parent,name,n) {
     93    pimpl_=new OpticalFlowData_impl(this,max_features);
    3294}
    3395
    34 OpticalFlowData::~OpticalFlowData()
    35 {
    36     cvFree(&pointsA);
    37     cvFree(&pointsB);
    38 
    39     cvFree(&found_features);
    40     cvFree(&features_error);
     96OpticalFlowData::~OpticalFlowData() {
     97    delete pimpl_;
    4198}
    4299
    43 CvPoint* OpticalFlowData::PointsA(void) const
    44 {
    45     return pointsA;
     100IntPoint* OpticalFlowData::PointsA(void) const {
     101    return pimpl_->pointsA;
    46102}
    47103
    48 CvPoint2D32f* OpticalFlowData::PointsB(void) const
    49 {
    50     return pointsB;
     104FloatPoint* OpticalFlowData::PointsB(void) const {
     105    return pimpl_->pointsB;
    51106}
    52107
    53 char *OpticalFlowData::FoundFeature(void) const
    54 {
    55     return found_features;
     108char *OpticalFlowData::FoundFeature(void) const {
     109    return pimpl_->found_features;
    56110}
    57111
    58 uint32_t *OpticalFlowData::FeatureError(void) const
    59 {
    60     return features_error;
     112uint32_t *OpticalFlowData::FeatureError(void) const {
     113    return pimpl_->features_error;
    61114}
    62115
    63116// value is new max_features value
    64117void OpticalFlowData::Resize(uint32_t value) {
    65     CvPoint *new_pointsA;
    66     CvPoint2D32f *new_pointsB;
    67     char *new_found_features;
    68     uint32_t *new_features_error;
    69 
    70     new_pointsA=(CvPoint *)cvAlloc(value*sizeof(CvPoint));
    71     new_pointsB=(CvPoint2D32f *)cvAlloc(value*sizeof(CvPoint2D32f));
    72     new_found_features=(char *)cvAlloc(value*sizeof(char));
    73     new_features_error=(uint32_t *)cvAlloc(value*sizeof(uint32_t));
    74 
    75     GetMutex();
    76     if(value>max_features)
    77     {
    78         memcpy(new_pointsA,pointsA,max_features*sizeof(CvPoint));
    79         memcpy(new_pointsB,pointsB,max_features*sizeof(CvPoint2D32f));
    80         memcpy(new_found_features,found_features,max_features*sizeof(char));
    81         memcpy(new_features_error,features_error,max_features*sizeof(uint32_t));
    82     }
    83     else
    84     {
    85         memcpy(new_pointsA,pointsA,value*sizeof(CvPoint));
    86         memcpy(new_pointsB,pointsB,value*sizeof(CvPoint2D32f));
    87         memcpy(new_found_features,found_features,value*sizeof(char));
    88         memcpy(new_features_error,features_error,value*sizeof(uint32_t));
    89         if(nb_features>value) nb_features=value; //si nb_features est le nombre de point qui ont effectivement une correspondande, ça me parait louche. Sauf si les points sont classés et que ceux qui ont une correpondance sont toujours au début
    90     }
    91     max_features=value;
    92     ReleaseMutex();
    93 
    94     cvFree(&pointsA);
    95     cvFree(&pointsB);
    96     cvFree(&found_features);
    97     cvFree(&features_error);
    98 
    99     pointsA=new_pointsA;
    100     pointsB=new_pointsB;
    101     found_features=new_found_features;
    102     features_error=new_features_error;
     118    pimpl_->Resize(value);
    103119}
    104120
    105 void OpticalFlowData::RawRead(char* dst) const
    106 {
     121void OpticalFlowData::RawRead(char* dst) const {
    107122    Warn("non implementé\n");
    108123}
    109124
    110 void OpticalFlowData::SetPointsA(const CvPoint* points)
    111 {
     125void OpticalFlowData::SetPointsA(const IntPoint* points) {
    112126    GetMutex();
    113     memcpy(pointsA,points,max_features*sizeof(CvPoint));
     127    memcpy(pimpl_->pointsA,points,pimpl_->max_features*sizeof(IntPoint));
    114128    ReleaseMutex();
    115129}
    116130
    117 void OpticalFlowData::SetPointsB(const CvPoint2D32f* points)
    118 {
     131void OpticalFlowData::SetPointsB(const FloatPoint* points) {
    119132    GetMutex();
    120     memcpy(pointsB,points,max_features*sizeof(CvPoint2D32f));
     133    memcpy(pimpl_->pointsB,points,pimpl_->max_features*sizeof(FloatPoint));
    121134    ReleaseMutex();
    122135}
    123136
    124 void OpticalFlowData::SetFoundFeature(const char *found_features)
    125 {
     137void OpticalFlowData::SetFoundFeature(const char *found_features) {
    126138    GetMutex();
    127     memcpy(this->found_features,found_features,max_features*sizeof(char));
     139    memcpy(pimpl_->found_features,found_features,pimpl_->max_features*sizeof(char));
    128140    ReleaseMutex();
    129141}
    130142
    131 void OpticalFlowData::SetFeatureError(const uint32_t *features_error)
    132 {
     143void OpticalFlowData::SetFeatureError(const uint32_t *features_error) {
    133144    GetMutex();
    134     memcpy(this->features_error,features_error,max_features*sizeof(uint32_t));
     145    memcpy(pimpl_->features_error,features_error,pimpl_->max_features*sizeof(uint32_t));
    135146    ReleaseMutex();
    136147}
    137148
    138 uint32_t OpticalFlowData::MaxFeatures(void) const
    139 {
    140     return max_features;
     149uint32_t OpticalFlowData::MaxFeatures(void) const {
     150    return pimpl_->max_features;
    141151}
    142152
    143 uint32_t OpticalFlowData::NbFeatures(void) const
    144 {
    145     return nb_features;
     153uint32_t OpticalFlowData::NbFeatures(void) const {
     154    return pimpl_->nb_features;
    146155}
    147156
    148 void OpticalFlowData::SetNbFeatures(uint32_t value)
    149 {
     157void OpticalFlowData::SetNbFeatures(uint32_t value) {
    150158    GetMutex();
    151     nb_features=value;
     159    pimpl_->nb_features=value;
    152160    ReleaseMutex();
    153161}
  • trunk/lib/FlairVisionFilter/src/OpticalFlowData.h

    r252 r338  
    1010#define OPTICALFLOWDATA_H
    1111
    12 #include <cxcore.h>
    1312#include <io_data.h>
     13
     14//TODO: use flair::core::Vector2D
     15typedef struct IntPoint {
     16    int x;
     17    int y;
     18}
     19IntPoint;
     20
     21typedef struct FloatPoint {
     22    float x;
     23    float y;
     24}
     25FloatPoint;
     26
     27class OpticalFlowData_impl;
    1428
    1529namespace flair { namespace filter {
     
    5367        *
    5468    */
    55         CvPoint* PointsA(void) const;
     69        IntPoint* PointsA(void) const;
    5670
    5771        /*!
     
    5973        *
    6074        */
    61         CvPoint2D32f* PointsB(void) const;
     75        FloatPoint* PointsB(void) const;
    6276
    6377        /*!
     
    7993        * \param points points
    8094        */
    81         void SetPointsA(const CvPoint *points);
     95        void SetPointsA(const IntPoint *points);
    8296
    8397        /*!
     
    86100        * \param points points
    87101        */
    88         void SetPointsB(const CvPoint2D32f *points);
     102        void SetPointsB(const FloatPoint *points);
    89103
    90104        /*!
     
    139153        */
    140154        void RawRead(char *dst) const;
    141 
     155       
    142156    private:
    143         uint32_t max_features;
    144         uint32_t nb_features;
    145         CvPoint* pointsA;
    146         CvPoint2D32f* pointsB;
    147         char *found_features;
    148         uint32_t *features_error;
    149157        Type dataType;
     158        OpticalFlowData_impl *pimpl_;
    150159    };
    151160} // end namespace filter
  • trunk/lib/FlairVisionFilter/src/OpticalFlowSpeed.cpp

    r318 r338  
    2626using namespace flair::gui;
    2727
     28class OpticalFlowSpeed_impl {
     29public:
     30    OpticalFlowSpeed_impl(flair::filter::OpticalFlowSpeed *self,const LayoutPosition* position,string name):output(0) {
     31        this->self=self;
     32        MatrixDescriptor* desc=new MatrixDescriptor(2,1);
     33        desc->SetElementName(0,0,"vx");
     34        desc->SetElementName(1,0,"vy");
     35        output=new Matrix(self,desc,floatType,name);
     36        delete desc;
     37
     38        self->AddDataToLog(output);
     39
     40        GroupBox* reglages_groupbox=new GroupBox(position,name);
     41        quality=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"optical flow quality:",0.,100.,1.,1,5.);
     42        weightedAverage=new CheckBox(reglages_groupbox->LastRowLastCol(),"Weighted average", true);
     43        timeMultiplication=new CheckBox(reglages_groupbox->LastRowLastCol(),"Time multiplication", true);
     44
     45       
     46    }
     47
     48    ~OpticalFlowSpeed_impl() {
     49       
     50    }
     51   
     52    void UpdateFrom(const io_data *data){
     53        flair::filter::OpticalFlowData *input=(flair::filter::OpticalFlowData*)data;
     54        float deplx,deply;
     55        float nb_depl=0;
     56
     57        deplx=0;
     58        deply=0;
     59
     60        //error is 0 if perfect match and 7x7x255x255 at worst
     61        float qualityThreshold=quality->Value()/100.*7*7*255*255;
     62        input->GetMutex();
     63        int nbUsedPoints=0;
     64        for(int i=0;i<input->NbFeatures();i++) {
     65            //if point is found in both images and quality is sufficient
     66            if((input->FoundFeature()[i]!=0)&&(input->FeatureError()[i]<qualityThreshold)) {
     67              nbUsedPoints++;
     68              float qualityFactor=1.0;
     69              if (weightedAverage->Value()) {
     70                //displacement is weigthed by quality
     71                qualityFactor/=(1+input->FeatureError()[i]);
     72              }
     73                deplx+=(input->PointsB()[i].x-input->PointsA()[i].x)*qualityFactor;
     74                deply+=(input->PointsB()[i].y-input->PointsA()[i].y)*qualityFactor;
     75                nb_depl+=qualityFactor;
     76            }
     77        }
     78        input->ReleaseMutex();
     79        float deltaT;
     80        if (timeMultiplication->Value()) deltaT=(float)(data->DataTime()-output->DataTime())/(1000.*1000.*1000.);
     81        else deltaT=1.;
     82        output->SetDataTime(data->DataTime());
     83
     84        if(nb_depl!=0) {
     85    //Printf("Nombre de points=%d/%d (nb_depl=%f,pondération=%d), deltaT=%f\n",nbUsedPoints,input->NbFeatures(),nb_depl,weightedAverage->Value(),deltaT);
     86            output->SetValue(0,0,deplx/(nb_depl*deltaT));
     87            output->SetValue(1,0,deply/(nb_depl*deltaT));
     88        }
     89    };
     90   
     91    Matrix *output;
     92   
     93private:
     94    flair::filter::OpticalFlowSpeed *self;   
     95    DoubleSpinBox *quality;
     96    CheckBox *weightedAverage;
     97    CheckBox *timeMultiplication;
     98};
     99
    28100namespace flair { namespace filter {
    29101
    30102OpticalFlowSpeed::OpticalFlowSpeed(const IODevice* parent, const LayoutPosition* position,string name) : IODevice(parent,name) {
    31   MatrixDescriptor* desc=new MatrixDescriptor(2,1);
    32   desc->SetElementName(0,0,"vx");
    33   desc->SetElementName(1,0,"vy");
    34   output=new Matrix(this,desc,floatType,name);
    35   delete desc;
    36 
    37   AddDataToLog(output);
    38 
    39   GroupBox* reglages_groupbox=new GroupBox(position,name);
    40   quality=new DoubleSpinBox(reglages_groupbox->LastRowLastCol(),"optical flow quality:",0.,100.,1.,1,5.);
    41   weightedAverage=new CheckBox(reglages_groupbox->LastRowLastCol(),"Weighted average", true);
    42   timeMultiplication=new CheckBox(reglages_groupbox->LastRowLastCol(),"Time multiplication", true);
     103    pimpl_=new OpticalFlowSpeed_impl(this,position,name);
    43104}
    44105
    45 OpticalFlowSpeed::~OpticalFlowSpeed(void) { }
     106OpticalFlowSpeed::~OpticalFlowSpeed(void) {
     107    delete pimpl_;
     108}
    46109
    47110void OpticalFlowSpeed::UpdateFrom(const io_data *data) {
    48     OpticalFlowData *input=(OpticalFlowData*)data;
    49     float deplx,deply;
    50     float nb_depl=0;
    51 
    52     deplx=0;
    53     deply=0;
    54 
    55     //error is 0 if perfect match and 7x7x255x255 at worst
    56     float qualityThreshold=quality->Value()/100.*7*7*255*255;
    57     input->GetMutex();
    58     int nbUsedPoints=0;
    59     for(int i=0;i<input->NbFeatures();i++) {
    60         //if point is found in both images and quality is sufficient
    61         if((input->FoundFeature()[i]!=0)&&(input->FeatureError()[i]<qualityThreshold)) {
    62           nbUsedPoints++;
    63           float qualityFactor=1.0;
    64           if (weightedAverage->Value()) {
    65             //displacement is weigthed by quality
    66             qualityFactor/=(1+input->FeatureError()[i]);
    67           }
    68             deplx+=(input->PointsB()[i].x-input->PointsA()[i].x)*qualityFactor;
    69             deply+=(input->PointsB()[i].y-input->PointsA()[i].y)*qualityFactor;
    70             nb_depl+=qualityFactor;
    71         }
    72     }
    73     input->ReleaseMutex();
    74     float deltaT;
    75     if (timeMultiplication->Value()) deltaT=(float)(data->DataTime()-output->DataTime())/(1000.*1000.*1000.);
    76     else deltaT=1.;
    77     output->SetDataTime(data->DataTime());
    78 
    79     if(nb_depl!=0) {
    80 //Printf("Nombre de points=%d/%d (nb_depl=%f,pondération=%d), deltaT=%f\n",nbUsedPoints,input->NbFeatures(),nb_depl,weightedAverage->Value(),deltaT);
    81         output->SetValue(0,0,deplx/(nb_depl*deltaT));
    82         output->SetValue(1,0,deply/(nb_depl*deltaT));
    83     }
    84 //    output->SetDataTime(data->DataTime());
    85     ProcessUpdate(output);
     111    pimpl_->UpdateFrom(data);
     112    ProcessUpdate(pimpl_->output);
    86113}
    87114
    88115float OpticalFlowSpeed::Vx(void) const {
    89     return output->Value(0,0);
     116    return pimpl_->output->Value(0,0);
    90117}
    91118
    92119float OpticalFlowSpeed::Vy(void) const {
    93     return output->Value(1,0);
     120    return pimpl_->output->Value(1,0);
    94121}
    95122
    96123core::Matrix *OpticalFlowSpeed::Output() const {
    97     return output;
     124    return pimpl_->output;
    98125}
    99126} // end namespace filter
  • trunk/lib/FlairVisionFilter/src/OpticalFlowSpeed.h

    r274 r338  
    2121  namespace gui {
    2222    class LayoutPosition;
    23     class SpinBox;
    24     class DoubleSpinBox;
    25     class CheckBox;
    2623  }
    2724}
     25
     26class OpticalFlowSpeed_impl;
    2827
    2928namespace flair
     
    8887            */
    8988            void UpdateFrom(const core::io_data *data);
    90 
    91             core::Matrix *output;
    92       gui::DoubleSpinBox *quality;
    93       gui::CheckBox *weightedAverage;
    94       gui::CheckBox *timeMultiplication;
     89           
     90            OpticalFlowSpeed_impl *pimpl_;
    9591};
    9692
  • trunk/lib/FlairVisionFilter/src/Sobel.cpp

    r330 r338  
    1313
    1414#include "Sobel.h"
    15 #include <cvimage.h>
     15#include "VisionFilter.h"
     16#include <Image.h>
    1617#include <Layout.h>
    1718#include <GroupBox.h>
     
    2324using namespace flair::gui;
    2425
     26class Sobel_impl {
     27public:
     28    Sobel_impl(flair::filter::Sobel *self,const LayoutPosition* position,string name):output(0) {
     29        this->self=self;
     30       
     31        GroupBox* reglages_groupbox=new GroupBox(position,name);
     32        dx=new SpinBox(reglages_groupbox->NewRow(),"dx:",0,1,1,1);
     33        dy=new SpinBox(reglages_groupbox->NewRow(),"dy:",0,1,1,1);
     34
     35        Printf("TODO: IODevice doit faire un check de GetInputDataType et GetOutputDataType\n");
     36        //Image devrait accepter un type dans son constructeur pour construire un type identique
     37        try{
     38            Image::Type const &imageType=dynamic_cast<Image::Type const &>(((IODevice*)(self->Parent()))->GetOutputDataType());
     39            if(imageType.GetFormat()==Image::Type::Format::Gray) {
     40                output=new Image(self,imageType.GetWidth(),imageType.GetHeight(),imageType.GetFormat(),"sobel");
     41                //inIplImage = (IplImage*)AllocFunction(sizeof(IplImage));
     42                //outIplImage = (IplImage*)AllocFunction(sizeof(IplImage));
     43            } else {
     44                self->Err("input image is not gray\n");
     45            }
     46
     47        } catch(std::bad_cast& bc) {
     48            self->Err("io type mismatch\n");
     49        }
     50    }
     51
     52    ~Sobel_impl() {
     53        //FreeFunction((char*)(inIplImage));
     54        //FreeFunction((char*)(outIplImage));
     55    }
     56   
     57    void UpdateFrom(const io_data *data){
     58        Image *image=(Image*)data;
     59/*
     60        data->GetMutex();
     61        inIplImage->width=image->GetDataType().GetWidth();
     62        inIplImage->height=image->GetDataType().GetHeight();
     63        inIplImage->imageData=image->buffer;
     64        inIplImage->imageSize=image->GetDataType().GetSize();
     65       
     66        output->GetMutex();
     67        outIplImage->width=output->GetDataType().GetWidth();
     68        outIplImage->height=output->GetDataType().GetHeight();
     69        outIplImage->imageData=output->buffer;
     70        outIplImage->imageSize=output->GetDataType().GetSize();
     71               
     72        dspSobel(inIplImage,outIplImage,dx->Value(),dy->Value());
     73        output->ReleaseMutex();
     74        data->ReleaseMutex();
     75*/
     76        output->SetDataTime(data->DataTime());
     77
     78    };
     79   
     80    Image *output;
     81
     82private:
     83    flair::filter::Sobel *self;
     84    SpinBox *dx;
     85    SpinBox *dy;
     86    //IplImage *inIplImage,*outIplImage;
     87};
     88
    2589namespace flair { namespace filter {
    2690
    27 Sobel::Sobel(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name),output(0) {
    28     GroupBox* reglages_groupbox=new GroupBox(position,name);
    29     dx=new SpinBox(reglages_groupbox->NewRow(),"dx:",0,1,1,1);
    30     dy=new SpinBox(reglages_groupbox->NewRow(),"dy:",0,1,1,1);
    31 
    32     Printf("TODO: IODevice doit faire un check de GetInputDataType et GetOutputDataType\n");
    33     //cvimage devrait accepter un type dans son constructeur pour construire un type identique
    34     try{
    35         cvimage::Type const &imageType=dynamic_cast<cvimage::Type const &>(parent->GetOutputDataType());
    36         if(imageType.GetFormat()==cvimage::Type::Format::Gray) {
    37             output=new cvimage(this,imageType.GetWidth(),imageType.GetHeight(),imageType.GetFormat(),"sobel");
    38         } else {
    39             Err("input image is not gray\n");
    40             return;
    41         }
    42 
    43     } catch(std::bad_cast& bc) {
    44         Err("io type mismatch\n");
    45         return;
    46     }
    47     SetIsReady(true);
     91Sobel::Sobel(const IODevice* parent,const LayoutPosition* position,string name) : IODevice(parent,name) {
     92   pimpl_=new Sobel_impl(this,position,name);
    4893}
    4994
    5095Sobel::~Sobel(void) {
     96    delete pimpl_;
    5197}
    5298
    53 cvimage* Sobel::Output(void) {
    54     return output;
     99Image* Sobel::Output(void) {
     100    return pimpl_->output;
    55101}
    56102
    57 void Sobel::UpdateFrom(const io_data *data) {/*
    58     cvimage *cvImage=(cvimage*)data;
    59     IplImage *gimg=cvImage->img;
    60 
    61     data->GetMutex();
    62     output->GetMutex();
    63     dspSobel(gimg,output->img,dx->Value(),dy->Value());
    64     output->ReleaseMutex();
    65     data->ReleaseMutex();
    66 */
    67     output->SetDataTime(data->DataTime());
    68     ProcessUpdate(output);
     103void Sobel::UpdateFrom(const io_data *data) {
     104    pimpl_->UpdateFrom(data);
     105    ProcessUpdate(pimpl_->output);
    69106}
    70107
    71108DataType const &Sobel::GetOutputDataType() const {
    72     if(output!=NULL) {
    73         return output->GetDataType();
     109    if(pimpl_->output!=NULL) {
     110        return pimpl_->output->GetDataType();
    74111    } else {
    75112        return dummyType;
  • trunk/lib/FlairVisionFilter/src/Sobel.h

    r122 r338  
    1212
    1313#include <IODevice.h>
    14 #include <cv.h>
     14
    1515
    1616namespace flair {
    1717    namespace core {
    18         class cvimage;
     18        class Image;
    1919    }
    2020    namespace gui {
    2121        class LayoutPosition;
    22         class SpinBox;
    2322    }
    2423}
     24
     25 class Sobel_impl;
    2526
    2627namespace flair { namespace filter {
     
    5859            * \return the output image
    5960            */
    60             core::cvimage* Output(void);
     61            core::Image* Output(void);
    6162
    6263            core::DataType const &GetOutputDataType() const;
     
    6465        private:
    6566            void UpdateFrom(const core::io_data *data);
    66             core::cvimage *output;
    67             gui::SpinBox *dx;
    68             gui::SpinBox *dy;
     67            Sobel_impl *pimpl_;
    6968    };
    7069} // end namespace filter
  • trunk/lib/FlairVisionFilter/src/VisionFilter.cpp

    r124 r338  
    11#include "VisionFilter.h"
    22#include "compile_info.h"
    3 #include <highgui.h>
    43
    54static void constructor() __attribute__((constructor));
     
    98}
    109
    11 void saveToJpeg(IplImage* src_img,std::string filename,PictureFormat_t input_format,PictureFormat_t output_format,unsigned char compression_level) {
    12         if(!cvSaveImage(filename.c_str(),src_img)) printf("Could not save.\n");
     10void saveToJpeg(flair::core::Image* src_img,std::string filename,PictureFormat_t input_format,PictureFormat_t output_format,unsigned char compression_level) {
     11        printf("saveToJpeg todo\n");
     12    //if(!cvSaveImage(filename.c_str(),src_img)) printf("Could not save.\n");
    1313}
    1414
     
    2121 
    2222}
     23
     24char* AllocFunction(ssize_t size) {
     25    return (char*)malloc(size);
     26}
     27
     28void FreeFunction(char* buffer) {
     29    free(buffer);
     30}
  • trunk/lib/FlairVisionFilter/src/VisionFilter.h

    r125 r338  
    11#include <string>
    2 #include <cxtypes.h>
     2
     3namespace flair {
     4    namespace core {
     5        class Image;
     6    }
     7}
     8
    39
    410/*!
     
    1925                } ;
    2026                                                               
    21 void saveToJpeg(IplImage* src_img,std::string filename,PictureFormat_t input_format,PictureFormat_t output_format,unsigned char compression_level=95);
     27void saveToJpeg(flair::core::Image* src_img,std::string filename,PictureFormat_t input_format,PictureFormat_t output_format,unsigned char compression_level=95);
     28
     29//for amrv7a, these functions use CMEM
     30char* AllocFunction(ssize_t size);
     31void FreeFunction(char* buffer);
Note: See TracChangeset for help on using the changeset viewer.