# HG changeset patch # User Sebastien Jodogne # Date 1670225389 -3600 # Node ID 04148de691a7ae37e7e80eb60e015088605a6e30 # Parent 37d6805b80ee3ed62cdda79d5207d235b65d973b# Parent 8ff083f67628fe74719ba49170da89e4cda6b660 integration mainline->deep-learning diff -r 8ff083f67628 -r 04148de691a7 Applications/StoneWebViewer/WebApplication/app.js --- a/Applications/StoneWebViewer/WebApplication/app.js Mon Dec 05 08:19:49 2022 +0100 +++ b/Applications/StoneWebViewer/WebApplication/app.js Mon Dec 05 08:29:49 2022 +0100 @@ -603,7 +603,10 @@ series: [], studies: [], seriesIndex: {}, // Maps "SeriesInstanceUID" to "index in this.series" - virtualSeriesThumbnails: {} + virtualSeriesThumbnails: {}, + + deepLearningReady: false, + deepLearningProgress: 0 // Floating-point number in the range [0..1] } }, computed: { @@ -1289,6 +1292,10 @@ }); }, + ApplyDeepLearning: function() { + stone.ApplyDeepLearningModel(this.GetActiveCanvas()); + }, + ChangeActiveSeries: function(offset) { var seriesTags = this.GetActiveViewportSeriesTags(); if (seriesTags !== null) { @@ -1714,3 +1721,17 @@ } } }); + + +window.addEventListener('DeepLearningInitialized', function() { + stone.LoadDeepLearningModel('model.message'); +}); + +window.addEventListener('DeepLearningModelReady', function() { + app.deepLearningReady = true; + app.deepLearningProgress = 0; +}); + +window.addEventListener('DeepLearningStep', function(args) { + app.deepLearningProgress = args.detail.progress; +}); diff -r 8ff083f67628 -r 04148de691a7 Applications/StoneWebViewer/WebApplication/index.html --- a/Applications/StoneWebViewer/WebApplication/index.html Mon Dec 05 08:19:49 2022 +0100 +++ b/Applications/StoneWebViewer/WebApplication/index.html Mon Dec 05 08:29:49 2022 +0100 @@ -316,11 +316,23 @@
+
+
+ +
+ +
+
+
+
+
+
+
-
+
stoneAnnotations_; - + bool linearInterpolation_; + boost::shared_ptr deepLearningMask_; + std::string deepLearningSopInstanceUid_; + unsigned int deepLearningFrameNumber_; void ScheduleNextPrefetch() { @@ -2246,6 +2250,26 @@ } } + std::unique_ptr deepLearningLayer; + + if (deepLearningMask_.get() != NULL && + deepLearningSopInstanceUid_ == instance.GetSopInstanceUid() && + deepLearningFrameNumber_ == frameIndex) + { + std::vector lut(4 * 256); + for (unsigned int v = 128; v < 256; v++) + { + lut[4 * v] = 196; + lut[4 * v + 1] = 0; + lut[4 * v + 2] = 0; + lut[4 * v + 3] = 196; + } + + deepLearningLayer.reset(new OrthancStone::LookupTableTextureSceneLayer(*deepLearningMask_)); + deepLearningLayer->SetLookupTable(lut); + deepLearningLayer->SetPixelSpacing(pixelSpacingX, pixelSpacingY); + } + StoneAnnotationsRegistry::GetInstance().Load(*stoneAnnotations_, instance.GetSopInstanceUid(), frameIndex); // Orientation markers, new in Stone Web viewer 2.4 @@ -2322,6 +2346,15 @@ scene.DeleteLayer(LAYER_ORIENTATION_MARKERS); } + if (deepLearningLayer.get() != NULL) + { + scene.SetLayer(LAYER_DEEP_LEARNING, deepLearningLayer.release()); + } + else + { + scene.DeleteLayer(LAYER_DEEP_LEARNING); + } + stoneAnnotations_->Render(scene); // Necessary for "FitContent()" to work if (fitNextContent_) @@ -2348,7 +2381,7 @@ { const size_t cursorIndex = cursor_->GetCurrentIndex(); const OrthancStone::DicomInstanceParameters& instance = frames_->GetInstanceOfFrame(cursorIndex); - const size_t frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); + const unsigned int frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); // Only change the scene if the loaded frame still corresponds to the current cursor if (instance.GetSopInstanceUid() == loadedSopInstanceUid && @@ -2654,7 +2687,7 @@ { const size_t cursorIndex = cursor_->GetCurrentIndex(); const OrthancStone::DicomInstanceParameters& instance = frames_->GetInstanceOfFrame(cursorIndex); - const size_t frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); + const unsigned int frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); StoneAnnotationsRegistry::GetInstance().Save(instance.GetSopInstanceUid(), frameNumber, *stoneAnnotations_); @@ -2880,7 +2913,7 @@ const size_t cursorIndex = cursor_->GetCurrentIndex(); const OrthancStone::DicomInstanceParameters& instance = frames_->GetInstanceOfFrame(cursorIndex); - const size_t frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); + const unsigned int frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); FramesCache::Accessor accessor(*framesCache_, instance.GetSopInstanceUid(), frameNumber); if (accessor.IsValid()) @@ -3426,7 +3459,7 @@ { const size_t cursorIndex = cursor_->GetCurrentIndex(); const OrthancStone::DicomInstanceParameters& instance = frames_->GetInstanceOfFrame(cursorIndex); - const size_t frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); + const unsigned int frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); if (instance.GetSopInstanceUid() == sopInstanceUid && frameNumber == frame) @@ -3442,6 +3475,7 @@ } } + void SetLinearInterpolation(bool linearInterpolation) { if (linearInterpolation_ != linearInterpolation) @@ -3451,6 +3485,7 @@ } } + void AddTextAnnotation(const std::string& label, const OrthancStone::ScenePoint2D& pointedPosition, const OrthancStone::ScenePoint2D& labelPosition) @@ -3460,6 +3495,43 @@ } + bool GetCurrentFrame(std::string& sopInstanceUid /* out */, + unsigned int& frameNumber /* out */) const + { + if (cursor_.get() != NULL && + frames_.get() != NULL) + { + const size_t cursorIndex = cursor_->GetCurrentIndex(); + const OrthancStone::DicomInstanceParameters& instance = frames_->GetInstanceOfFrame(cursorIndex); + sopInstanceUid = instance.GetSopInstanceUid(); + frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); + return true; + } + else + { + return false; + } + } + + + void SetDeepLearningMask(const std::string& sopInstanceUid, + unsigned int frameNumber, + const Orthanc::ImageAccessor& mask) + { + std::string currentSopInstanceUid; + unsigned int currentFrameNumber; + if (GetCurrentFrame(currentSopInstanceUid, currentFrameNumber) && + sopInstanceUid == currentSopInstanceUid && + frameNumber == currentFrameNumber) + { + deepLearningSopInstanceUid_ = sopInstanceUid; + deepLearningFrameNumber_ = frameNumber; + deepLearningMask_.reset(Orthanc::Image::Clone(mask)); + Redraw(); + } + } + + void SignalSynchronizedBrowsing() { if (synchronizationEnabled_ && @@ -3826,6 +3898,218 @@ } +#include +#include "deep-learning/WebAssembly/Worker.pb.h" + +enum DeepLearningState +{ + DeepLearningState_Waiting, + DeepLearningState_Pending, + DeepLearningState_Running +}; + +static DeepLearningState deepLearningState_ = DeepLearningState_Waiting; +static worker_handle deepLearningWorker_; +static std::string deepLearningPendingSopInstanceUid_; +static unsigned int deepLearningPendingFrameNumber_; + +// Forward declaration +static void DeepLearningCallback(char* data, + int size, + void* payload); + +static void SendRequestToWebWorker(const OrthancStone::Messages::Request& request) +{ + std::string s; + if (request.SerializeToString(&s) && + !s.empty()) + { + emscripten_call_worker(deepLearningWorker_, "Execute", &s[0], s.size(), DeepLearningCallback, NULL); + } + else + { + throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError, + "Cannot send command to the Web worker"); + } +} + +static void DeepLearningSchedule(const std::string& sopInstanceUid, + unsigned int frameNumber) +{ + if (deepLearningState_ == DeepLearningState_Waiting) + { + LOG(WARNING) << "Starting deep learning on: " << sopInstanceUid << " / " << frameNumber; + + FramesCache::Accessor accessor(*framesCache_, sopInstanceUid, frameNumber); + if (accessor.IsValid() && + accessor.GetImage().GetFormat() == Orthanc::PixelFormat_Float32) + { + const Orthanc::ImageAccessor& image = accessor.GetImage(); + + OrthancStone::Messages::Request request; + request.set_type(OrthancStone::Messages::RequestType::LOAD_IMAGE); + request.mutable_load_image()->set_sop_instance_uid(sopInstanceUid); + request.mutable_load_image()->set_frame_number(frameNumber); + request.mutable_load_image()->set_width(image.GetWidth()); + request.mutable_load_image()->set_height(image.GetHeight()); + + const unsigned int height = image.GetHeight(); + const unsigned int width = image.GetWidth(); + for (unsigned int y = 0; y < height; y++) + { + const float* p = reinterpret_cast(image.GetConstRow(y)); + for (unsigned int x = 0; x < width; x++, p++) + { + request.mutable_load_image()->mutable_values()->Add(*p); + } + } + + deepLearningState_ = DeepLearningState_Running; + SendRequestToWebWorker(request); + } + else + { + LOG(ERROR) << "Cannot access the frame content, maybe a color image?"; + + EM_ASM({ + const customEvent = document.createEvent("CustomEvent"); + customEvent.initCustomEvent("DeepLearningStep", false, false, + { "progress" : "0" }); + window.dispatchEvent(customEvent); + }); + } + } + else + { + deepLearningState_ = DeepLearningState_Pending; + deepLearningPendingSopInstanceUid_ = sopInstanceUid; + deepLearningPendingFrameNumber_ = frameNumber; + } +} + +static void DeepLearningNextStep() +{ + switch (deepLearningState_) + { + case DeepLearningState_Pending: + deepLearningState_ = DeepLearningState_Waiting; + DeepLearningSchedule(deepLearningPendingSopInstanceUid_, deepLearningPendingFrameNumber_); + break; + + case DeepLearningState_Running: + { + OrthancStone::Messages::Request request; + request.set_type(OrthancStone::Messages::RequestType::EXECUTE_STEP); + SendRequestToWebWorker(request); + break; + } + + default: + throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError, "Bad state for deep learning"); + } +} + +static void DeepLearningCallback(char* data, + int size, + void* payload) +{ + try + { + OrthancStone::Messages::Response response; + if (response.ParseFromArray(data, size)) + { + switch (response.type()) + { + case OrthancStone::Messages::ResponseType::INITIALIZED: + DISPATCH_JAVASCRIPT_EVENT("DeepLearningInitialized"); + break; + + case OrthancStone::Messages::ResponseType::PARSED_MODEL: + LOG(WARNING) << "Number of steps in the model: " << response.parse_model().number_of_steps(); + DISPATCH_JAVASCRIPT_EVENT("DeepLearningModelReady"); + break; + + case OrthancStone::Messages::ResponseType::LOADED_IMAGE: + DeepLearningNextStep(); + break; + + case OrthancStone::Messages::ResponseType::STEP_DONE: + { + EM_ASM({ + const customEvent = document.createEvent("CustomEvent"); + customEvent.initCustomEvent("DeepLearningStep", false, false, + { "progress" : $0 }); + window.dispatchEvent(customEvent); + }, + response.step().progress() + ); + + if (response.step().done()) + { + deepLearningState_ = DeepLearningState_Waiting; + + const unsigned int height = response.step().mask().height(); + const unsigned int width = response.step().mask().width(); + + LOG(WARNING) << "SUCCESS! Mask: " << width << "x" << height << " for frame " + << response.step().mask().sop_instance_uid() << " / " + << response.step().mask().frame_number(); + + Orthanc::Image mask(Orthanc::PixelFormat_Grayscale8, width, height, false); + + size_t pos = 0; + for (unsigned int y = 0; y < height; y++) + { + uint8_t* p = reinterpret_cast(mask.GetRow(y)); + for (unsigned int x = 0; x < width; x++, p++, pos++) + { + *p = response.step().mask().values(pos) ? 255 : 0; + } + } + + for (Viewports::iterator it = allViewports_.begin(); it != allViewports_.end(); ++it) + { + assert(it->second != NULL); + it->second->SetDeepLearningMask(response.step().mask().sop_instance_uid(), + response.step().mask().frame_number(), mask); + } + } + else + { + DeepLearningNextStep(); + } + + break; + } + + default: + LOG(ERROR) << "Unsupported response type from the deep learning worker"; + } + } + else + { + LOG(ERROR) << "Bad response received from the deep learning worker"; + } + } + EXTERN_CATCH_EXCEPTIONS; +} + +static void DeepLearningModelLoaded(emscripten_fetch_t *fetch) +{ + try + { + LOG(WARNING) << "Deep learning model loaded: " << fetch->numBytes; + + OrthancStone::Messages::Request request; + request.set_type(OrthancStone::Messages::RequestType::PARSE_MODEL); + request.mutable_parse_model()->mutable_content()->assign(fetch->data, fetch->numBytes); + + emscripten_fetch_close(fetch); // Don't use "fetch" below + SendRequestToWebWorker(request); + } + EXTERN_CATCH_EXCEPTIONS; +} + extern "C" { int main(int argc, char const *argv[]) @@ -3843,11 +4127,55 @@ framesCache_.reset(new FramesCache); osiriXAnnotations_.reset(new OrthancStone::OsiriX::CollectionOfAnnotations); + deepLearningWorker_ = emscripten_create_worker("DeepLearningWorker.js"); + emscripten_call_worker(deepLearningWorker_, "Initialize", NULL, 0, DeepLearningCallback, NULL); + DISPATCH_JAVASCRIPT_EVENT("StoneInitialized"); } EMSCRIPTEN_KEEPALIVE + void LoadDeepLearningModel(const char* uri) + { + try + { + LOG(WARNING) << "Loading deep learning model: " << uri; + + emscripten_fetch_attr_t attr; + emscripten_fetch_attr_init(&attr); + strcpy(attr.requestMethod, "GET"); + attr.attributes = EMSCRIPTEN_FETCH_LOAD_TO_MEMORY; + attr.onsuccess = DeepLearningModelLoaded; + attr.onerror = NULL; + emscripten_fetch(&attr, uri); + } + EXTERN_CATCH_EXCEPTIONS; + } + + + EMSCRIPTEN_KEEPALIVE + void ApplyDeepLearningModel(const char* canvas) + { + try + { + boost::shared_ptr viewport = GetViewport(canvas); + + std::string sopInstanceUid; + unsigned int frameNumber; + if (viewport->GetCurrentFrame(sopInstanceUid, frameNumber)) + { + DeepLearningSchedule(sopInstanceUid, frameNumber); + } + else + { + LOG(WARNING) << "No active frame"; + } + } + EXTERN_CATCH_EXCEPTIONS; + } + + + EMSCRIPTEN_KEEPALIVE void SetDicomWebRoot(const char* uri, int useRendered) {