Mercurial > hg > orthanc-stone
changeset 1953:0661115af939 deep-learning
first successful application of deep learning
author | Sebastien Jodogne <s.jodogne@gmail.com> |
---|---|
date | Tue, 16 Aug 2022 15:05:51 +0200 |
parents | a1e0aae9c17f |
children | 2034ae383cfd |
files | Applications/StoneWebViewer/WebAssembly/StoneWebViewer.cpp |
diffstat | 1 files changed, 86 insertions(+), 10 deletions(-) [+] |
line wrap: on
line diff
--- a/Applications/StoneWebViewer/WebAssembly/StoneWebViewer.cpp Tue Aug 16 13:49:52 2022 +0200 +++ b/Applications/StoneWebViewer/WebAssembly/StoneWebViewer.cpp Tue Aug 16 15:05:51 2022 +0200 @@ -1585,10 +1585,11 @@ private: static const int LAYER_TEXTURE = 0; - static const int LAYER_OVERLAY = 1; - static const int LAYER_REFERENCE_LINES = 2; - static const int LAYER_ANNOTATIONS_OSIRIX = 3; - static const int LAYER_ANNOTATIONS_STONE = 4; + static const int LAYER_DEEP_LEARNING = 1; + static const int LAYER_OVERLAY = 2; + static const int LAYER_REFERENCE_LINES = 3; + static const int LAYER_ANNOTATIONS_OSIRIX = 4; + static const int LAYER_ANNOTATIONS_STONE = 5; class ICommand : public Orthanc::IDynamicObject @@ -2007,6 +2008,10 @@ // the center of the top-left pixel boost::shared_ptr<OrthancStone::AnnotationsSceneLayer> stoneAnnotations_; + boost::shared_ptr<Orthanc::ImageAccessor> deepLearningMask_; + std::string deepLearningSopInstanceUid_; + unsigned int deepLearningFrameNumber_; + void ScheduleNextPrefetch() { @@ -2228,6 +2233,28 @@ } } + std::unique_ptr<OrthancStone::LookupTableTextureSceneLayer> deepLearningLayer; + + if (deepLearningMask_.get() != NULL && + deepLearningSopInstanceUid_ == instance.GetSopInstanceUid() && + deepLearningFrameNumber_ == frameIndex) + { + std::vector<uint8_t> lut(4 * 256); + for (unsigned int v = 128; v < 256; v++) + { + lut[4 * v] = 196; + lut[4 * v + 1] = 0; + lut[4 * v + 2] = 0; + lut[4 * v + 3] = 196; + } + + deepLearningLayer.reset(new OrthancStone::LookupTableTextureSceneLayer(*deepLearningMask_)); + deepLearningLayer->SetLookupTable(lut); + deepLearningLayer->SetPixelSpacing(pixelSpacingX, pixelSpacingY); + deepLearningLayer->SetFlipX(flipX_); + deepLearningLayer->SetFlipY(flipY_); + } + StoneAnnotationsRegistry::GetInstance().Load(*stoneAnnotations_, instance.GetSopInstanceUid(), frameIndex); { @@ -2255,6 +2282,15 @@ scene.DeleteLayer(LAYER_ANNOTATIONS_OSIRIX); } + if (deepLearningLayer.get() != NULL) + { + scene.SetLayer(LAYER_DEEP_LEARNING, deepLearningLayer.release()); + } + else + { + scene.DeleteLayer(LAYER_DEEP_LEARNING); + } + stoneAnnotations_->Render(scene); // Necessary for "FitContent()" to work if (fitNextContent_) @@ -3319,8 +3355,8 @@ } - bool GetCurrentFrame(std::string& sopInstanceUid, - unsigned int& frameNumber) const + bool GetCurrentFrame(std::string& sopInstanceUid /* out */, + unsigned int& frameNumber /* out */) const { if (cursor_.get() != NULL && frames_.get() != NULL) @@ -3336,6 +3372,24 @@ return false; } } + + + void SetDeepLearningMask(const std::string& sopInstanceUid, + unsigned int frameNumber, + const Orthanc::ImageAccessor& mask) + { + std::string currentSopInstanceUid; + unsigned int currentFrameNumber; + if (GetCurrentFrame(currentSopInstanceUid, currentFrameNumber) && + sopInstanceUid == currentSopInstanceUid && + frameNumber == currentFrameNumber) + { + deepLearningSopInstanceUid_ = sopInstanceUid; + deepLearningFrameNumber_ = frameNumber; + deepLearningMask_.reset(Orthanc::Image::Clone(mask)); + Redraw(); + } + } }; @@ -3814,10 +3868,32 @@ if (response.step().done()) { deepLearningState_ = DeepLearningState_Waiting; - LOG(WARNING) << "SUCCESS! Mask: " << response.step().output().width() << "x" - << response.step().output().height() << " for frame " - << response.step().output().sop_instance_uid() << " / " - << response.step().output().frame_number(); + + const unsigned int height = response.step().mask().height(); + const unsigned int width = response.step().mask().width(); + + LOG(WARNING) << "SUCCESS! Mask: " << width << "x" << height << " for frame " + << response.step().mask().sop_instance_uid() << " / " + << response.step().mask().frame_number(); + + Orthanc::Image mask(Orthanc::PixelFormat_Grayscale8, width, height, false); + + size_t pos = 0; + for (unsigned int y = 0; y < height; y++) + { + uint8_t* p = reinterpret_cast<uint8_t*>(mask.GetRow(y)); + for (unsigned int x = 0; x < width; x++, p++, pos++) + { + *p = response.step().mask().values(pos) ? 255 : 0; + } + } + + for (Viewports::iterator it = allViewports_.begin(); it != allViewports_.end(); ++it) + { + assert(it->second != NULL); + it->second->SetDeepLearningMask(response.step().mask().sop_instance_uid(), + response.step().mask().frame_number(), mask); + } } else {