changeset 3503:46cf170ba121

ImageProcessing::SeparableConvolution()
author Sebastien Jodogne <s.jodogne@gmail.com>
date Tue, 27 Aug 2019 12:10:13 +0200
parents c160eafc42a9
children 18566f9e1831
files Core/Images/ImageProcessing.cpp Core/Images/ImageProcessing.h UnitTestsSources/ImageProcessingTests.cpp
diffstat 3 files changed, 481 insertions(+), 3 deletions(-) [+]
line wrap: on
line diff
--- a/Core/Images/ImageProcessing.cpp	Mon Aug 26 10:23:51 2019 +0200
+++ b/Core/Images/ImageProcessing.cpp	Tue Aug 27 12:10:13 2019 +0200
@@ -1765,4 +1765,266 @@
         throw OrthancException(ErrorCode_NotImplemented);
     }
   }
+
+
+  // This is a slow implementation of horizontal convolution on one
+  // individual channel, that checks for out-of-image values
+  template <typename RawPixel, unsigned int ChannelsCount>
+  static float GetHorizontalConvolutionFloatSecure(const Orthanc::ImageAccessor& source,
+                                                   const std::vector<float>& horizontal,
+                                                   size_t horizontalAnchor,
+                                                   unsigned int x,
+                                                   unsigned int y,
+                                                   float leftBorder,
+                                                   float rightBorder,
+                                                   unsigned int channel)
+  {
+    const RawPixel* row = reinterpret_cast<const RawPixel*>(source.GetConstRow(y)) + channel;
+
+    float p = 0;
+
+    for (unsigned int k = 0; k < horizontal.size(); k++)
+    {
+      float value;
+
+      if (x + k < horizontalAnchor)   // Negation of "x - horizontalAnchor + k >= 0"
+      {
+        value = leftBorder;
+      }
+      else if (x + k >= source.GetWidth() + horizontalAnchor)   // Negation of "x - horizontalAnchor + k < width"
+      {
+        value = rightBorder;
+      }
+      else
+      {
+        // The value lies within the image
+        value = row[(x - horizontalAnchor + k) * ChannelsCount];
+      }
+
+      p += value * horizontal[k];
+    }
+
+    return p;
+  }
+  
+
+  
+  // This is an implementation of separable convolution that uses
+  // floating-point arithmetics, and an intermediate Float32
+  // image. The out-of-image values are taken as the border
+  // value. Further optimization is possible.
+  template <typename RawPixel, unsigned int ChannelsCount>
+  static void SeparableConvolutionFloat(ImageAccessor& image /* inplace */,
+                                        const std::vector<float>& horizontal,
+                                        size_t horizontalAnchor,
+                                        const std::vector<float>& vertical,
+                                        size_t verticalAnchor,
+                                        float normalization)
+  {
+    const unsigned int width = image.GetWidth();
+    const unsigned int height = image.GetHeight();
+    
+
+    /**
+     * Horizontal convolution
+     **/
+
+    Image tmp(PixelFormat_Float32, ChannelsCount * width, height, false);
+
+    for (unsigned int y = 0; y < height; y++)
+    {
+      const RawPixel* row = reinterpret_cast<const RawPixel*>(image.GetConstRow(y));
+
+      float leftBorder[ChannelsCount], rightBorder[ChannelsCount];
+      
+      for (unsigned int c = 0; c < ChannelsCount; c++)
+      {
+        leftBorder[c] = row[c];
+        rightBorder[c] = row[ChannelsCount * (width - 1) + c];
+      }
+
+      float* p = static_cast<float*>(tmp.GetRow(y));
+
+      if (width < horizontal.size())
+      {
+        // It is not possible to have the full kernel within the image, use the direct implementation
+        for (unsigned int x = 0; x < width; x++)
+        {
+          for (unsigned int c = 0; c < ChannelsCount; c++, p++)
+          {
+            *p = GetHorizontalConvolutionFloatSecure<RawPixel, ChannelsCount>
+              (image, horizontal, horizontalAnchor, x, y, leftBorder[c], rightBorder[c], c);
+          }
+        }
+      }
+      else
+      {
+        // Deal with the left border
+        for (unsigned int x = 0; x < horizontalAnchor; x++)
+        {
+          for (unsigned int c = 0; c < ChannelsCount; c++, p++)
+          {
+            *p = GetHorizontalConvolutionFloatSecure<RawPixel, ChannelsCount>
+              (image, horizontal, horizontalAnchor, x, y, leftBorder[c], rightBorder[c], c);
+          }
+        }
+
+        // Deal with the central portion of the image (all pixel values
+        // scanned by the kernel lie inside the image)
+
+        for (unsigned int x = 0; x < width - horizontal.size() + 1; x++)
+        {
+          for (unsigned int c = 0; c < ChannelsCount; c++, p++)
+          {
+            *p = 0;
+            for (unsigned int k = 0; k < horizontal.size(); k++)
+            {
+              *p += static_cast<float>(row[(x + k) * ChannelsCount + c]) * horizontal[k];
+            }
+          }
+        }
+
+        // Deal with the right border
+        for (unsigned int x = horizontalAnchor + width - horizontal.size() + 1; x < width; x++)
+        {
+          for (unsigned int c = 0; c < ChannelsCount; c++, p++)
+          {
+            *p = GetHorizontalConvolutionFloatSecure<RawPixel, ChannelsCount>
+              (image, horizontal, horizontalAnchor, x, y, leftBorder[c], rightBorder[c], c);
+          }
+        }
+      }
+    }
+
+
+    /**
+     * Vertical convolution
+     **/
+
+    std::vector<const float*> rows(vertical.size());
+
+    for (unsigned int y = 0; y < height; y++)
+    {
+      for (unsigned int k = 0; k < vertical.size(); k++)
+      {
+        if (y < verticalAnchor)
+        {
+          rows[k] = reinterpret_cast<const float*>(tmp.GetConstRow(0));   // Use top border
+        }
+        else if (y + k >= height + verticalAnchor)
+        {
+          rows[k] = reinterpret_cast<const float*>(tmp.GetConstRow(height - 1));  // Use bottom border
+        }
+        else
+        {
+          rows[k] = reinterpret_cast<const float*>(tmp.GetConstRow(y + k - verticalAnchor));
+        }
+      }
+
+      RawPixel* p = reinterpret_cast<RawPixel*>(image.GetRow(y));
+        
+      for (unsigned int x = 0; x < width; x++)
+      {
+        for (unsigned int c = 0; c < ChannelsCount; c++, p++)
+        {
+          float accumulator = 0;
+        
+          for (unsigned int k = 0; k < vertical.size(); k++)
+          {
+            accumulator += rows[k][ChannelsCount * x + c] * vertical[k];
+          }
+
+          accumulator *= normalization;
+
+          if (accumulator <= static_cast<float>(std::numeric_limits<RawPixel>::min()))
+          {
+            *p = std::numeric_limits<RawPixel>::min();
+          }
+          else if (accumulator >= static_cast<float>(std::numeric_limits<RawPixel>::max()))
+          {
+            *p = std::numeric_limits<RawPixel>::max();
+          }
+          else
+          {
+            *p = static_cast<RawPixel>(accumulator);
+          }
+        }
+      }
+    }
+  }
+
+
+  void ImageProcessing::SeparableConvolution(ImageAccessor& image /* inplace */,
+                                             const std::vector<float>& horizontal,
+                                             size_t horizontalAnchor,
+                                             const std::vector<float>& vertical,
+                                             size_t verticalAnchor)
+  {
+    if (horizontal.size() == 0 ||
+        vertical.size() == 0 ||
+        horizontalAnchor >= horizontal.size() ||
+        verticalAnchor >= vertical.size())
+    {
+      throw OrthancException(ErrorCode_ParameterOutOfRange);
+    }
+    
+    if (image.GetWidth() == 0 ||
+        image.GetHeight() == 0)
+    {
+      return;
+    }
+    
+    /**
+     * Compute normalization
+     **/
+    
+    float sumHorizontal = 0;
+    for (size_t i = 0; i < horizontal.size(); i++)
+    {
+      sumHorizontal += horizontal[i];
+    }
+    
+    float sumVertical = 0;
+    for (size_t i = 0; i < vertical.size(); i++)
+    {
+      sumVertical += vertical[i];
+    }
+
+    if (fabsf(sumHorizontal) <= std::numeric_limits<float>::epsilon() ||
+        fabsf(sumVertical) <= std::numeric_limits<float>::epsilon())
+    {
+      throw OrthancException(ErrorCode_ParameterOutOfRange, "Singular convolution kernel");
+    }      
+
+    const float normalization = 1.0f / (sumHorizontal * sumVertical);
+
+    switch (image.GetFormat())
+    {
+      case PixelFormat_Grayscale8:
+        SeparableConvolutionFloat<uint8_t, 1u>
+          (image, horizontal, horizontalAnchor, vertical, verticalAnchor, normalization);
+        break;
+
+      case PixelFormat_RGB24:
+        SeparableConvolutionFloat<uint8_t, 3u>
+          (image, horizontal, horizontalAnchor, vertical, verticalAnchor, normalization);
+        break;
+
+      default:
+        throw OrthancException(ErrorCode_NotImplemented);
+    }
+  }
+
+
+  void ImageProcessing::SmoothGaussian5x5(ImageAccessor& image)
+  {
+    std::vector<float> kernel(5);
+    kernel[0] = 1;
+    kernel[1] = 4;
+    kernel[2] = 6;
+    kernel[3] = 4;
+    kernel[4] = 1;
+
+    SeparableConvolution(image, kernel, 2, kernel, 2);
+  }
 }
--- a/Core/Images/ImageProcessing.h	Mon Aug 26 10:23:51 2019 +0200
+++ b/Core/Images/ImageProcessing.h	Tue Aug 27 12:10:13 2019 +0200
@@ -148,5 +148,13 @@
     void FlipX(ImageAccessor& image);
 
     void FlipY(ImageAccessor& image);
+
+    void SeparableConvolution(ImageAccessor& image /* inplace */,
+                              const std::vector<float>& horizontal,
+                              size_t horizontalAnchor,
+                              const std::vector<float>& vertical,
+                              size_t verticalAnchor);
+
+    void SmoothGaussian5x5(ImageAccessor& image);
   }
 }
--- a/UnitTestsSources/ImageProcessingTests.cpp	Mon Aug 26 10:23:51 2019 +0200
+++ b/UnitTestsSources/ImageProcessingTests.cpp	Tue Aug 27 12:10:13 2019 +0200
@@ -212,7 +212,7 @@
   points.push_back(ImageProcessing::ImagePoint(1,5));
   points.push_back(ImageProcessing::ImagePoint(5,5));
 
-  Orthanc::ImageProcessing::FillPolygon(image, points, 255);
+  ImageProcessing::FillPolygon(image, points, 255);
 
   // outside polygon
   ASSERT_FLOAT_EQ(128, TestFixture::ImageTraits::GetFloatPixel(image, 0, 0));
@@ -239,7 +239,7 @@
   points.push_back(ImageProcessing::ImagePoint(image.GetWidth(),image.GetHeight()));
   points.push_back(ImageProcessing::ImagePoint(0,image.GetHeight()));
 
-  ASSERT_THROW(Orthanc::ImageProcessing::FillPolygon(image, points, 255), Orthanc::OrthancException);
+  ASSERT_THROW(ImageProcessing::FillPolygon(image, points, 255), OrthancException);
 }
 
 TYPED_TEST(TestIntegerImageTraits, FillPolygonFullImage)
@@ -254,8 +254,216 @@
   points.push_back(ImageProcessing::ImagePoint(image.GetWidth() - 1,image.GetHeight() - 1));
   points.push_back(ImageProcessing::ImagePoint(0,image.GetHeight() - 1));
 
-  Orthanc::ImageProcessing::FillPolygon(image, points, 255);
+  ImageProcessing::FillPolygon(image, points, 255);
 
   ASSERT_FLOAT_EQ(255, TestFixture::ImageTraits::GetFloatPixel(image, 0, 0));
   ASSERT_FLOAT_EQ(255, TestFixture::ImageTraits::GetFloatPixel(image, image.GetWidth() - 1, image.GetHeight() - 1));
 }
+
+
+
+
+static void SetGrayscale8Pixel(ImageAccessor& image,
+                               unsigned int x,
+                               unsigned int y,
+                               uint8_t value)
+{
+  ImageTraits<PixelFormat_Grayscale8>::SetPixel(image, value, x, y);
+}
+
+static bool TestGrayscale8Pixel(const ImageAccessor& image,
+                                unsigned int x,
+                                unsigned int y,
+                                uint8_t value)
+{
+  PixelTraits<PixelFormat_Grayscale8>::PixelType p;
+  ImageTraits<PixelFormat_Grayscale8>::GetPixel(p, image, x, y);
+  return p == value;
+}
+
+static void SetRGB24Pixel(ImageAccessor& image,
+                          unsigned int x,
+                          unsigned int y,
+                          uint8_t red,
+                          uint8_t green,
+                          uint8_t blue)
+{
+  PixelTraits<PixelFormat_RGB24>::PixelType p;
+  p.red_ = red;
+  p.green_ = green;
+  p.blue_ = blue;
+  ImageTraits<PixelFormat_RGB24>::SetPixel(image, p, x, y);
+}
+
+static bool TestRGB24Pixel(const ImageAccessor& image,
+                           unsigned int x,
+                           unsigned int y,
+                           uint8_t red,
+                           uint8_t green,
+                           uint8_t blue)
+{
+  PixelTraits<PixelFormat_RGB24>::PixelType p;
+  ImageTraits<PixelFormat_RGB24>::GetPixel(p, image, x, y);
+  return (p.red_ == red &&
+          p.green_ == green &&
+          p.blue_ == blue);
+}
+
+
+TEST(ImageProcessing, FlipGrayscale8)
+{
+  {
+    Image image(PixelFormat_Grayscale8, 0, 0, false);
+    ImageProcessing::FlipX(image);
+    ImageProcessing::FlipY(image);
+  }
+
+  {
+    Image image(PixelFormat_Grayscale8, 1, 1, false);
+    SetGrayscale8Pixel(image, 0, 0, 128);
+    ImageProcessing::FlipX(image);
+    ImageProcessing::FlipY(image);
+    ASSERT_TRUE(TestGrayscale8Pixel(image, 0, 0, 128));
+  }
+
+  {
+    Image image(PixelFormat_Grayscale8, 3, 2, false);
+    SetGrayscale8Pixel(image, 0, 0, 10);
+    SetGrayscale8Pixel(image, 1, 0, 20);
+    SetGrayscale8Pixel(image, 2, 0, 30);
+    SetGrayscale8Pixel(image, 0, 1, 40);
+    SetGrayscale8Pixel(image, 1, 1, 50);
+    SetGrayscale8Pixel(image, 2, 1, 60);
+
+    ImageProcessing::FlipX(image);
+    ASSERT_TRUE(TestGrayscale8Pixel(image, 0, 0, 30));
+    ASSERT_TRUE(TestGrayscale8Pixel(image, 1, 0, 20));
+    ASSERT_TRUE(TestGrayscale8Pixel(image, 2, 0, 10));
+    ASSERT_TRUE(TestGrayscale8Pixel(image, 0, 1, 60));
+    ASSERT_TRUE(TestGrayscale8Pixel(image, 1, 1, 50));
+    ASSERT_TRUE(TestGrayscale8Pixel(image, 2, 1, 40));
+
+    ImageProcessing::FlipY(image);
+    ASSERT_TRUE(TestGrayscale8Pixel(image, 0, 0, 60));
+    ASSERT_TRUE(TestGrayscale8Pixel(image, 1, 0, 50));
+    ASSERT_TRUE(TestGrayscale8Pixel(image, 2, 0, 40));
+    ASSERT_TRUE(TestGrayscale8Pixel(image, 0, 1, 30));
+    ASSERT_TRUE(TestGrayscale8Pixel(image, 1, 1, 20));
+    ASSERT_TRUE(TestGrayscale8Pixel(image, 2, 1, 10));
+  }
+}
+
+
+
+TEST(ImageProcessing, FlipRGB24)
+{
+  Image image(PixelFormat_RGB24, 2, 2, false);
+  SetRGB24Pixel(image, 0, 0, 10, 100, 110);
+  SetRGB24Pixel(image, 1, 0, 20, 100, 110);
+  SetRGB24Pixel(image, 0, 1, 30, 100, 110);
+  SetRGB24Pixel(image, 1, 1, 40, 100, 110);
+
+  ImageProcessing::FlipX(image);
+  ASSERT_TRUE(TestRGB24Pixel(image, 0, 0, 20, 100, 110));
+  ASSERT_TRUE(TestRGB24Pixel(image, 1, 0, 10, 100, 110));
+  ASSERT_TRUE(TestRGB24Pixel(image, 0, 1, 40, 100, 110));
+  ASSERT_TRUE(TestRGB24Pixel(image, 1, 1, 30, 100, 110));
+
+  ImageProcessing::FlipY(image);
+  ASSERT_TRUE(TestRGB24Pixel(image, 0, 0, 40, 100, 110));
+  ASSERT_TRUE(TestRGB24Pixel(image, 1, 0, 30, 100, 110));
+  ASSERT_TRUE(TestRGB24Pixel(image, 0, 1, 20, 100, 110));
+  ASSERT_TRUE(TestRGB24Pixel(image, 1, 1, 10, 100, 110));
+}
+
+
+TEST(ImageProcessing, ResizeBasicGrayscale8)
+{
+  Image source(PixelFormat_Grayscale8, 2, 2, false);
+  SetGrayscale8Pixel(source, 0, 0, 10);
+  SetGrayscale8Pixel(source, 1, 0, 20);
+  SetGrayscale8Pixel(source, 0, 1, 30);
+  SetGrayscale8Pixel(source, 1, 1, 40);
+
+  {
+    Image target(PixelFormat_Grayscale8, 2, 4, false);
+    ImageProcessing::Resize(target, source);
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 0, 0, 10));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 1, 0, 20));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 0, 1, 10));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 1, 1, 20));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 0, 2, 30));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 1, 2, 40));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 0, 3, 30));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 1, 3, 40));
+  }
+
+  {
+    Image target(PixelFormat_Grayscale8, 4, 2, false);
+    ImageProcessing::Resize(target, source);
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 0, 0, 10));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 1, 0, 10));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 2, 0, 20));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 3, 0, 20));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 0, 1, 30));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 1, 1, 30));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 2, 1, 40));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 3, 1, 40));
+  }
+}
+
+
+TEST(ImageProcessing, ResizeBasicRGB24)
+{
+  Image source(PixelFormat_RGB24, 2, 2, false);
+  SetRGB24Pixel(source, 0, 0, 10, 100, 110);
+  SetRGB24Pixel(source, 1, 0, 20, 100, 110);
+  SetRGB24Pixel(source, 0, 1, 30, 100, 110);
+  SetRGB24Pixel(source, 1, 1, 40, 100, 110);
+
+  {
+    Image target(PixelFormat_RGB24, 2, 4, false);
+    ImageProcessing::Resize(target, source);
+    ASSERT_TRUE(TestRGB24Pixel(target, 0, 0, 10, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 1, 0, 20, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 0, 1, 10, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 1, 1, 20, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 0, 2, 30, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 1, 2, 40, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 0, 3, 30, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 1, 3, 40, 100, 110));
+  }
+
+  {
+    Image target(PixelFormat_RGB24, 4, 2, false);
+    ImageProcessing::Resize(target, source);
+    ASSERT_TRUE(TestRGB24Pixel(target, 0, 0, 10, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 1, 0, 10, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 2, 0, 20, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 3, 0, 20, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 0, 1, 30, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 1, 1, 30, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 2, 1, 40, 100, 110));
+    ASSERT_TRUE(TestRGB24Pixel(target, 3, 1, 40, 100, 110));
+  }
+}
+
+
+TEST(ImageProcessing, ResizeEmptyGrayscale8)
+{
+  {
+    Image source(PixelFormat_Grayscale8, 0, 0, false);
+    Image target(PixelFormat_Grayscale8, 2, 2, false);
+    ImageProcessing::Resize(target, source);
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 0, 0, 0));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 1, 0, 0));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 0, 1, 0));
+    ASSERT_TRUE(TestGrayscale8Pixel(target, 1, 1, 0));
+  }
+
+  {
+    Image source(PixelFormat_Grayscale8, 2, 2, false);
+    Image target(PixelFormat_Grayscale8, 0, 0, false);
+    ImageProcessing::Resize(target, source);
+  }
+}