Mercurial > hg > orthanc
changeset 3600:4066998150ef
/instances/{id}/preview route now takes the windowing into account
author | Alain Mazy <alain@mazy.be> |
---|---|
date | Thu, 09 Jan 2020 18:54:40 +0100 |
parents | e01900f913e7 |
children | a77e7839012a |
files | Core/Images/ImageProcessing.cpp Core/Images/ImageProcessing.h NEWS OrthancServer/OrthancRestApi/OrthancRestResources.cpp UnitTestsSources/ImageProcessingTests.cpp |
diffstat | 5 files changed, 310 insertions(+), 72 deletions(-) [+] |
line wrap: on
line diff
--- a/Core/Images/ImageProcessing.cpp Tue Jan 07 10:53:32 2020 +0100 +++ b/Core/Images/ImageProcessing.cpp Thu Jan 09 18:54:40 2020 +0100 @@ -489,89 +489,120 @@ } } + template <typename TargetType, typename SourceType> + static void ApplyWindowingInternal(ImageAccessor& target, + const ImageAccessor& source, + float windowCenter, + float windowWidth, + float rescaleSlope, + float rescaleIntercept, + bool invert) + { + // WARNING - "::min()" should be replaced by "::lowest()" if + // dealing with float or double (which is not the case so far) + assert(sizeof(TargetType) <= 2); // Safeguard to remember about "float/double" + const TargetType minTargetValue = std::numeric_limits<TargetType>::min(); + const TargetType maxTargetValue = std::numeric_limits<TargetType>::max(); + const float minFloatValue = static_cast<float>(minTargetValue); + const float maxFloatValue = static_cast<float>(maxTargetValue); + + const float windowIntercept = windowCenter - windowWidth / 2.0f; + const float windowSlope = (maxFloatValue + 1.0f) / windowWidth; + + const unsigned int width = source.GetWidth(); + const unsigned int height = source.GetHeight(); + + for (unsigned int y = 0; y < height; y++) + { + TargetType* t = reinterpret_cast<TargetType*>(target.GetRow(y)); + const SourceType* s = reinterpret_cast<const SourceType*>(source.GetConstRow(y)); + + for (unsigned int x = 0; x < width; x++, t++, s++) + { + float rescaledValue = *s * rescaleSlope + rescaleIntercept; + float v = (rescaledValue - windowIntercept) * windowSlope; + if (v <= minFloatValue) + { + v = minFloatValue; + } + else if (v >= maxFloatValue) + { + v = maxFloatValue; + } + + TargetType vv = static_cast<TargetType>(v); + + if (invert) + { + vv = maxTargetValue - vv; + } + + *t = static_cast<TargetType>(vv); + } + } + } void ImageProcessing::ApplyWindowing(ImageAccessor& target, const ImageAccessor& source, float windowCenter, float windowWidth, - Orthanc::PhotometricInterpretation sourcePhotometricInterpretation) + float rescaleSlope, + float rescaleIntercept, + bool invert) { - if (source.GetFormat() != Orthanc::PixelFormat_Float32) - { - throw OrthancException(ErrorCode_NotImplemented); - } - - if (sourcePhotometricInterpretation != Orthanc::PhotometricInterpretation_Monochrome1 - && sourcePhotometricInterpretation != Orthanc::PhotometricInterpretation_Monochrome2) - { - throw OrthancException(ErrorCode_ParameterOutOfRange); - } - if (target.GetWidth() != source.GetWidth() || target.GetHeight() != source.GetHeight()) { throw OrthancException(ErrorCode_IncompatibleImageSize); } - unsigned int targetBytesPerPixels = target.GetBytesPerPixel(); - unsigned int targetChannelsPerPixels = 0; - switch (target.GetFormat()) - { - case Orthanc::PixelFormat_Grayscale8: - targetChannelsPerPixels = 1; - break; - case Orthanc::PixelFormat_RGBA32: - case Orthanc::PixelFormat_BGRA32: - case Orthanc::PixelFormat_RGB24: - targetChannelsPerPixels = 3; - break; - default: - throw OrthancException(ErrorCode_NotImplemented); - } - - const float a = windowCenter - windowWidth / 2.0f; - const float slope = 256.0f / windowWidth; - bool isInverted = sourcePhotometricInterpretation == Orthanc::PhotometricInterpretation_Monochrome1; - - const unsigned int width = source.GetWidth(); - const unsigned int height = source.GetHeight(); - - assert(sizeof(float) == 4); - - for (unsigned int y = 0; y < height; y++) + switch (source.GetFormat()) { - const float* p = reinterpret_cast<const float*>(source.GetConstRow(y)); - uint8_t* q = reinterpret_cast<uint8_t*>(target.GetRow(y)); - - for (unsigned int x = 0; x < width; x++) + case Orthanc::PixelFormat_Float32: { - float v = (*p - a) * slope; - if (v <= 0) + switch (target.GetFormat()) { - v = 0; - } - else if (v >= 255) - { - v = 255; + case Orthanc::PixelFormat_Grayscale8: + ApplyWindowingInternal<uint8_t, float>(target, source, windowCenter, windowWidth, rescaleSlope, rescaleIntercept, invert); + break; + case Orthanc::PixelFormat_Grayscale16: + ApplyWindowingInternal<uint16_t, float>(target, source, windowCenter, windowWidth, rescaleSlope, rescaleIntercept, invert); + break; + default: + throw OrthancException(ErrorCode_NotImplemented); } - - uint8_t vv = static_cast<uint8_t>(v); - - if (isInverted) + };break; + case Orthanc::PixelFormat_Grayscale8: + { + switch (target.GetFormat()) { - vv = 255 - vv; + case Orthanc::PixelFormat_Grayscale8: + ApplyWindowingInternal<uint8_t, uint8_t>(target, source, windowCenter, windowWidth, rescaleSlope, rescaleIntercept, invert); + break; + case Orthanc::PixelFormat_Grayscale16: + ApplyWindowingInternal<uint16_t, uint8_t>(target, source, windowCenter, windowWidth, rescaleSlope, rescaleIntercept, invert); + break; + default: + throw OrthancException(ErrorCode_NotImplemented); } - - for (unsigned int c = 0; c < targetChannelsPerPixels; c++) + };break; + case Orthanc::PixelFormat_Grayscale16: + { + switch (target.GetFormat()) { - q[c] = vv; + case Orthanc::PixelFormat_Grayscale8: + ApplyWindowingInternal<uint8_t, uint16_t>(target, source, windowCenter, windowWidth, rescaleSlope, rescaleIntercept, invert); + break; + case Orthanc::PixelFormat_Grayscale16: + ApplyWindowingInternal<uint16_t, uint16_t>(target, source, windowCenter, windowWidth, rescaleSlope, rescaleIntercept, invert); + break; + default: + throw OrthancException(ErrorCode_NotImplemented); } - - p++; - q += targetBytesPerPixels; - } + };break; + default: + throw OrthancException(ErrorCode_NotImplemented); } - }
--- a/Core/Images/ImageProcessing.h Tue Jan 07 10:53:32 2020 +0100 +++ b/Core/Images/ImageProcessing.h Thu Jan 09 18:54:40 2020 +0100 @@ -86,7 +86,9 @@ const ImageAccessor& source, float windowCenter, float windowWidth, - Orthanc::PhotometricInterpretation sourcePhotometricInterpretation); + float rescaleSlope, + float rescaleIntercept, + bool invert); void Set(ImageAccessor& image, int64_t value);
--- a/NEWS Tue Jan 07 10:53:32 2020 +0100 +++ b/NEWS Thu Jan 09 18:54:40 2020 +0100 @@ -15,6 +15,7 @@ * C-Find SCU at Instance level now sets the 0008,0052 tag to IMAGE per default (was INSTANCE). Therefore, the "ClearCanvas" and "Dcm4Chee" modality manufacturer have now been deprecated. * Fix issue #156 (Chunked Dicom-web transfer uses 100% CPU) +* /instances/{id}/preview route now takes the windowing into account Version 1.5.8 (2019-10-16) ==========================
--- a/OrthancServer/OrthancRestApi/OrthancRestResources.cpp Tue Jan 07 10:53:32 2020 +0100 +++ b/OrthancServer/OrthancRestApi/OrthancRestResources.cpp Thu Jan 09 18:54:40 2020 +0100 @@ -39,6 +39,7 @@ #include "../../Core/DicomParsing/FromDcmtkBridge.h" #include "../../Core/DicomParsing/Internals/DicomImageDecoder.h" #include "../../Core/HttpServer/HttpContentNegociation.h" +#include "../../Core/Images/ImageProcessing.h" #include "../../Core/Logging.h" #include "../DefaultDicomImageDecoder.h" #include "../OrthancConfiguration.h" @@ -502,6 +503,38 @@ } + void LookupWindowingTags(const ParsedDicomFile& dicom, float& windowCenter, float& windowWidth, float& rescaleSlope, float& rescaleIntercept, bool& invert) + { + DicomMap dicomTags; + dicom.ExtractDicomSummary(dicomTags); + + + unsigned int bitsStored = boost::lexical_cast<unsigned int>(dicomTags.GetStringValue(Orthanc::DICOM_TAG_BITS_STORED, "8", false)); + windowWidth = static_cast<float>(2 << (bitsStored - 1)); + windowCenter = windowWidth / 2; + rescaleSlope = 1.0f; + rescaleIntercept = 0.0f; + invert = false; + + if (dicomTags.HasTag(Orthanc::DICOM_TAG_WINDOW_CENTER) && dicomTags.HasTag(Orthanc::DICOM_TAG_WINDOW_WIDTH)) + { + windowCenter = boost::lexical_cast<float>(dicomTags.GetStringValue(Orthanc::DICOM_TAG_WINDOW_CENTER, "", false)); + windowWidth = boost::lexical_cast<float>(dicomTags.GetStringValue(Orthanc::DICOM_TAG_WINDOW_WIDTH, "", false)); + } + + if (dicomTags.HasTag(Orthanc::DICOM_TAG_RESCALE_SLOPE) && dicomTags.HasTag(Orthanc::DICOM_TAG_RESCALE_INTERCEPT)) + { + rescaleSlope = boost::lexical_cast<float>(dicomTags.GetStringValue(Orthanc::DICOM_TAG_RESCALE_SLOPE, "", false)); + rescaleIntercept = boost::lexical_cast<float>(dicomTags.GetStringValue(Orthanc::DICOM_TAG_RESCALE_INTERCEPT, "", false)); + } + + PhotometricInterpretation photometric; + if (dicom.LookupPhotometricInterpretation(photometric)) + { + invert = (photometric == PhotometricInterpretation_Monochrome1); + } + } + template <enum ImageExtractionMode mode> static void GetImage(RestApiGetCall& call) { @@ -520,6 +553,11 @@ } bool invert = false; + float windowCenter = 128.0f; + float windowWidth = 256.0f; + float rescaleSlope = 1.0f; + float rescaleIntercept = 0.0f; + std::auto_ptr<ImageAccessor> decoded; try @@ -549,11 +587,7 @@ // twice the DICOM file ParsedDicomFile parsed(dicomContent); - PhotometricInterpretation photometric; - if (parsed.LookupPhotometricInterpretation(photometric)) - { - invert = (photometric == PhotometricInterpretation_Monochrome1); - } + LookupWindowingTags(dicomContent, windowCenter, windowWidth, rescaleSlope, rescaleIntercept, invert); } } #endif @@ -564,12 +598,11 @@ // things on multi-frame images ServerContext::DicomCacheLocker locker(context, publicId); decoded.reset(DicomImageDecoder::Decode(locker.GetDicom(), frame)); + LookupWindowingTags(locker.GetDicom(), windowCenter, windowWidth, rescaleSlope, rescaleIntercept, invert); - PhotometricInterpretation photometric; - if (mode == ImageExtractionMode_Preview && - locker.GetDicom().LookupPhotometricInterpretation(photometric)) + if (mode != ImageExtractionMode_Preview) { - invert = (photometric == PhotometricInterpretation_Monochrome1); + invert = false; } } } @@ -593,6 +626,13 @@ return; } + if (mode == ImageExtractionMode_Preview + && (decoded->GetFormat() == Orthanc::PixelFormat_Grayscale8 || decoded->GetFormat() == Orthanc::PixelFormat_Grayscale16)) + { + ImageProcessing::ApplyWindowing(*decoded, *decoded, windowCenter, windowWidth, rescaleSlope, rescaleIntercept, invert); + invert = false; // don't invert it later on when encoding it, it has been inverted in the ApplyWindowing function + } + ImageToEncode image(decoded, mode, invert); HttpContentNegociation negociation;
--- a/UnitTestsSources/ImageProcessingTests.cpp Tue Jan 07 10:53:32 2020 +0100 +++ b/UnitTestsSources/ImageProcessingTests.cpp Thu Jan 09 18:54:40 2020 +0100 @@ -282,6 +282,25 @@ return p == value; } +static void SetGrayscale16Pixel(ImageAccessor& image, + unsigned int x, + unsigned int y, + uint8_t value) +{ + ImageTraits<PixelFormat_Grayscale16>::SetPixel(image, value, x, y); +} + +static bool TestGrayscale16Pixel(const ImageAccessor& image, + unsigned int x, + unsigned int y, + uint16_t value) +{ + PixelTraits<PixelFormat_Grayscale16>::PixelType p; + ImageTraits<PixelFormat_Grayscale16>::GetPixel(p, image, x, y); + if (p != value) printf("%d %d\n", p, value); + return p == value; +} + static void SetRGB24Pixel(ImageAccessor& image, unsigned int x, unsigned int y, @@ -774,3 +793,148 @@ ASSERT_TRUE(TestRGB24Pixel(image, 4, 4, 0, 0, 0)); } } + +TEST(ImageProcessing, ApplyWindowingFloatToGrayScale8) +{ + { + Image image(PixelFormat_Float32, 6, 1, false); + ImageTraits<PixelFormat_Float32>::SetFloatPixel(image, -5.0f, 0, 0); + ImageTraits<PixelFormat_Float32>::SetFloatPixel(image, 0.0f, 1, 0); + ImageTraits<PixelFormat_Float32>::SetFloatPixel(image, 5.0f, 2, 0); + ImageTraits<PixelFormat_Float32>::SetFloatPixel(image, 10.0f, 3, 0); + ImageTraits<PixelFormat_Float32>::SetFloatPixel(image, 1000.0f, 4, 0); + ImageTraits<PixelFormat_Float32>::SetFloatPixel(image, 2.0f, 5, 0); + + { + Image target(PixelFormat_Grayscale8, 6, 1, false); + ImageProcessing::ApplyWindowing(target, image, 5.0f, 10.0f, 1.0f, 0.0f, false); + + ASSERT_TRUE(TestGrayscale8Pixel(target, 0, 0, 0)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 1, 0, 0)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 2, 0, 128)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 3, 0, 255)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 4, 0, 255)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 5, 0, 255*2/10)); + } + + { + Image target(PixelFormat_Grayscale8, 6, 1, false); + ImageProcessing::ApplyWindowing(target, image, 5.0f, 10.0f, 1.0f, 0.0f, true); + + ASSERT_TRUE(TestGrayscale8Pixel(target, 0, 0, 255)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 1, 0, 255)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 2, 0, 127)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 3, 0, 0)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 4, 0, 0)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 5, 0, 255 - 255*2/10)); + } + + { + Image target(PixelFormat_Grayscale8, 6, 1, false); + ImageProcessing::ApplyWindowing(target, image, 5000.0f, 10000.0f, 1000.0f, 0.0f, false); + + ASSERT_TRUE(TestGrayscale8Pixel(target, 0, 0, 0)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 1, 0, 0)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 2, 0, 128)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 3, 0, 255)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 4, 0, 255)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 5, 0, 255*2/10)); + } + + { + Image target(PixelFormat_Grayscale8, 6, 1, false); + ImageProcessing::ApplyWindowing(target, image, 5000.0f, 10000.0f, 1000.0f, 0.0f, true); + + ASSERT_TRUE(TestGrayscale8Pixel(target, 0, 0, 255)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 1, 0, 255)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 2, 0, 127)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 3, 0, 0)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 4, 0, 0)); + ASSERT_TRUE(TestGrayscale8Pixel(target, 5, 0, 255 - 256*2/10)); + } + + { + Image target(PixelFormat_Grayscale8, 6, 1, false); + ImageProcessing::ApplyWindowing(target, image, 50.0f, 100.0f, 10.0f, 30.0f, false); + + ASSERT_TRUE(TestGrayscale8Pixel(target, 0, 0, 0)); // (-5 * 10) + 30 => pixel value = -20 => 0 + ASSERT_TRUE(TestGrayscale8Pixel(target, 1, 0, 256*30/100)); // ((0 * 10) + 30 => pixel value = 30 => 30% + ASSERT_TRUE(TestGrayscale8Pixel(target, 2, 0, 256*80/100)); // ((5 * 10) + 30 => pixel value = 80 => 80% + ASSERT_TRUE(TestGrayscale8Pixel(target, 3, 0, 255)); // ((10 * 10) + 30 => pixel value = 130 => 100% + ASSERT_TRUE(TestGrayscale8Pixel(target, 4, 0, 255)); // ((1000 * 10) + 30 => pixel value = 10030 => 100% + ASSERT_TRUE(TestGrayscale8Pixel(target, 5, 0, 128)); // ((2 * 10) + 30 => pixel value = 50 => 50% + } + + } +} + +TEST(ImageProcessing, ApplyWindowingFloatToGrayScale16) +{ + { + Image image(PixelFormat_Float32, 6, 1, false); + ImageTraits<PixelFormat_Float32>::SetFloatPixel(image, -5.0f, 0, 0); + ImageTraits<PixelFormat_Float32>::SetFloatPixel(image, 0.0f, 1, 0); + ImageTraits<PixelFormat_Float32>::SetFloatPixel(image, 5.0f, 2, 0); + ImageTraits<PixelFormat_Float32>::SetFloatPixel(image, 10.0f, 3, 0); + ImageTraits<PixelFormat_Float32>::SetFloatPixel(image, 1000.0f, 4, 0); + ImageTraits<PixelFormat_Float32>::SetFloatPixel(image, 2.0f, 5, 0); + + { + Image target(PixelFormat_Grayscale16, 6, 1, false); + ImageProcessing::ApplyWindowing(target, image, 5.0f, 10.0f, 1.0f, 0.0f, false); + + ASSERT_TRUE(TestGrayscale16Pixel(target, 0, 0, 0)); + ASSERT_TRUE(TestGrayscale16Pixel(target, 1, 0, 0)); + ASSERT_TRUE(TestGrayscale16Pixel(target, 2, 0, 32768)); + ASSERT_TRUE(TestGrayscale16Pixel(target, 3, 0, 65535)); + ASSERT_TRUE(TestGrayscale16Pixel(target, 4, 0, 65535)); + ASSERT_TRUE(TestGrayscale16Pixel(target, 5, 0, 65536*2/10)); + } + } +} + +TEST(ImageProcessing, ApplyWindowingGrayScale8ToGrayScale16) +{ + { + Image image(PixelFormat_Grayscale8, 6, 1, false); + SetGrayscale8Pixel(image, 0, 0, 0); + SetGrayscale8Pixel(image, 1, 0, 2); + SetGrayscale8Pixel(image, 2, 0, 5); + SetGrayscale8Pixel(image, 3, 0, 10); + SetGrayscale8Pixel(image, 4, 0, 255); + + { + Image target(PixelFormat_Grayscale16, 6, 1, false); + ImageProcessing::ApplyWindowing(target, image, 5.0f, 10.0f, 1.0f, 0.0f, false); + + ASSERT_TRUE(TestGrayscale16Pixel(target, 0, 0, 0)); + ASSERT_TRUE(TestGrayscale16Pixel(target, 1, 0, 65536*2/10)); + ASSERT_TRUE(TestGrayscale16Pixel(target, 2, 0, 65536*5/10)); + ASSERT_TRUE(TestGrayscale16Pixel(target, 3, 0, 65535)); + ASSERT_TRUE(TestGrayscale16Pixel(target, 4, 0, 65535)); + } + } +} + +TEST(ImageProcessing, ApplyWindowingGrayScale16ToGrayScale16) +{ + { + Image image(PixelFormat_Grayscale16, 6, 1, false); + SetGrayscale16Pixel(image, 0, 0, 0); + SetGrayscale16Pixel(image, 1, 0, 2); + SetGrayscale16Pixel(image, 2, 0, 5); + SetGrayscale16Pixel(image, 3, 0, 10); + SetGrayscale16Pixel(image, 4, 0, 255); + + { + Image target(PixelFormat_Grayscale16, 6, 1, false); + ImageProcessing::ApplyWindowing(target, image, 5.0f, 10.0f, 1.0f, 0.0f, false); + + ASSERT_TRUE(TestGrayscale16Pixel(target, 0, 0, 0)); + ASSERT_TRUE(TestGrayscale16Pixel(target, 1, 0, 65536*2/10)); + ASSERT_TRUE(TestGrayscale16Pixel(target, 2, 0, 65536*5/10)); + ASSERT_TRUE(TestGrayscale16Pixel(target, 3, 0, 65535)); + ASSERT_TRUE(TestGrayscale16Pixel(target, 4, 0, 65535)); + } + } +}