Mercurial > hg > orthanc-stone
changeset 2003:963f28eb40cb deep-learning
integration default->deep-learning
author | Sebastien Jodogne <s.jodogne@gmail.com> |
---|---|
date | Wed, 02 Nov 2022 15:14:56 +0100 |
parents | 2034ae383cfd (diff) 1bb0a9716876 (current diff) |
children | 37d6805b80ee |
files | Applications/StoneWebViewer/WebApplication/app.js Applications/StoneWebViewer/WebApplication/index.html Applications/StoneWebViewer/WebAssembly/StoneWebViewer.cpp |
diffstat | 4 files changed, 377 insertions(+), 11 deletions(-) [+] |
line wrap: on
line diff
--- a/Applications/StoneWebViewer/WebApplication/app.js Wed Nov 02 15:07:06 2022 +0100 +++ b/Applications/StoneWebViewer/WebApplication/app.js Wed Nov 02 15:14:56 2022 +0100 @@ -485,7 +485,10 @@ series: [], studies: [], seriesIndex: {}, // Maps "SeriesInstanceUID" to "index in this.series" - virtualSeriesThumbnails: {} + virtualSeriesThumbnails: {}, + + deepLearningReady: false, + deepLearningProgress: 0 // Floating-point number in the range [0..1] } }, computed: { @@ -1143,6 +1146,11 @@ alert('Cannot find the study in Orthanc'); }); + }, + + ApplyDeepLearning: function() + { + stone.ApplyDeepLearningModel(this.GetActiveCanvas()); } }, @@ -1438,3 +1446,17 @@ } } }); + + +window.addEventListener('DeepLearningInitialized', function() { + stone.LoadDeepLearningModel('model.message'); +}); + +window.addEventListener('DeepLearningModelReady', function() { + app.deepLearningReady = true; + app.deepLearningProgress = 0; +}); + +window.addEventListener('DeepLearningStep', function(args) { + app.deepLearningProgress = args.detail.progress; +});
--- a/Applications/StoneWebViewer/WebApplication/index.html Wed Nov 02 15:07:06 2022 +0100 +++ b/Applications/StoneWebViewer/WebApplication/index.html Wed Nov 02 15:14:56 2022 +0100 @@ -314,11 +314,23 @@ </div> <div class="wvLayoutLeft__contentBottom"> + <div v-if="deepLearningReady"> + <div style="width:100%;padding:10px;text-align:center;"> + <button class="btn btn-primary" @click="ApplyDeepLearning()">Apply deep learning</button> + </div> + + <div style="padding: 10px; position: relative; width:100%;"> + <div style="background-color: #007000; position: relative; height: 10px;"> + <div v-bind:style="{ 'background-color': '#00ff00', position: 'absolute', height: '100%', width: (deepLearningProgress*100) + '%' }"></div> + </div> + </div> + </div> + <div style="width:100%;padding:10px;text-align:center;" v-if="globalConfiguration.InstitutionLogo != ''"> <img style="max-width:100%" v-bind:src="globalConfiguration.InstitutionLogo" /> </div> - </div> + </div> </div> </div> <div class="wvLayout__main"
--- a/Applications/StoneWebViewer/WebAssembly/CMakeLists.txt Wed Nov 02 15:07:06 2022 +0100 +++ b/Applications/StoneWebViewer/WebAssembly/CMakeLists.txt Wed Nov 02 15:14:56 2022 +0100 @@ -24,6 +24,8 @@ project(OrthancStone) include(${CMAKE_SOURCE_DIR}/../Version.cmake) +include(${CMAKE_SOURCE_DIR}/deep-learning/WebAssembly/Protobuf.cmake) # TODO + set(ORTHANC_STONE_INSTALL_PREFIX "${CMAKE_SOURCE_DIR}/../../../wasm-binaries/StoneWebViewer" CACHE PATH "Where to put the WebAssembly binaries") @@ -131,6 +133,8 @@ add_executable(StoneWebViewer ${ORTHANC_STONE_SOURCES} ${AUTOGENERATED_SOURCES} + ${PROTOBUF_SOURCES} # TODO + ${CMAKE_SOURCE_DIR}/deep-learning/WebAssembly/Worker.pb.cc # TODO StoneWebViewer.cpp )
--- a/Applications/StoneWebViewer/WebAssembly/StoneWebViewer.cpp Wed Nov 02 15:07:06 2022 +0100 +++ b/Applications/StoneWebViewer/WebAssembly/StoneWebViewer.cpp Wed Nov 02 15:14:56 2022 +0100 @@ -1606,10 +1606,11 @@ private: static const int LAYER_TEXTURE = 0; static const int LAYER_OVERLAY = 1; - static const int LAYER_ORIENTATION_MARKERS = 2; - static const int LAYER_REFERENCE_LINES = 3; - static const int LAYER_ANNOTATIONS_OSIRIX = 4; - static const int LAYER_ANNOTATIONS_STONE = 5; + static const int LAYER_DEEP_LEARNING = 2; + static const int LAYER_ORIENTATION_MARKERS = 3; + static const int LAYER_REFERENCE_LINES = 4; + static const int LAYER_ANNOTATIONS_OSIRIX = 5; + static const int LAYER_ANNOTATIONS_STONE = 6; class ICommand : public Orthanc::IDynamicObject @@ -2025,9 +2026,12 @@ // coordinates of the current texture, with (0,0) corresponding to // the center of the top-left pixel boost::shared_ptr<OrthancStone::AnnotationsSceneLayer> stoneAnnotations_; - + bool linearInterpolation_; + boost::shared_ptr<Orthanc::ImageAccessor> deepLearningMask_; + std::string deepLearningSopInstanceUid_; + unsigned int deepLearningFrameNumber_; void ScheduleNextPrefetch() { @@ -2246,6 +2250,26 @@ } } + std::unique_ptr<OrthancStone::LookupTableTextureSceneLayer> deepLearningLayer; + + if (deepLearningMask_.get() != NULL && + deepLearningSopInstanceUid_ == instance.GetSopInstanceUid() && + deepLearningFrameNumber_ == frameIndex) + { + std::vector<uint8_t> lut(4 * 256); + for (unsigned int v = 128; v < 256; v++) + { + lut[4 * v] = 196; + lut[4 * v + 1] = 0; + lut[4 * v + 2] = 0; + lut[4 * v + 3] = 196; + } + + deepLearningLayer.reset(new OrthancStone::LookupTableTextureSceneLayer(*deepLearningMask_)); + deepLearningLayer->SetLookupTable(lut); + deepLearningLayer->SetPixelSpacing(pixelSpacingX, pixelSpacingY); + } + StoneAnnotationsRegistry::GetInstance().Load(*stoneAnnotations_, instance.GetSopInstanceUid(), frameIndex); // Orientation markers, new in Stone Web viewer 2.4 @@ -2322,6 +2346,15 @@ scene.DeleteLayer(LAYER_ORIENTATION_MARKERS); } + if (deepLearningLayer.get() != NULL) + { + scene.SetLayer(LAYER_DEEP_LEARNING, deepLearningLayer.release()); + } + else + { + scene.DeleteLayer(LAYER_DEEP_LEARNING); + } + stoneAnnotations_->Render(scene); // Necessary for "FitContent()" to work if (fitNextContent_) @@ -2348,7 +2381,7 @@ { const size_t cursorIndex = cursor_->GetCurrentIndex(); const OrthancStone::DicomInstanceParameters& instance = frames_->GetInstanceOfFrame(cursorIndex); - const size_t frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); + const unsigned int frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); // Only change the scene if the loaded frame still corresponds to the current cursor if (instance.GetSopInstanceUid() == loadedSopInstanceUid && @@ -2654,7 +2687,7 @@ { const size_t cursorIndex = cursor_->GetCurrentIndex(); const OrthancStone::DicomInstanceParameters& instance = frames_->GetInstanceOfFrame(cursorIndex); - const size_t frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); + const unsigned int frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); StoneAnnotationsRegistry::GetInstance().Save(instance.GetSopInstanceUid(), frameNumber, *stoneAnnotations_); @@ -2880,7 +2913,7 @@ const size_t cursorIndex = cursor_->GetCurrentIndex(); const OrthancStone::DicomInstanceParameters& instance = frames_->GetInstanceOfFrame(cursorIndex); - const size_t frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); + const unsigned int frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); FramesCache::Accessor accessor(*framesCache_, instance.GetSopInstanceUid(), frameNumber); if (accessor.IsValid()) @@ -3426,7 +3459,7 @@ { const size_t cursorIndex = cursor_->GetCurrentIndex(); const OrthancStone::DicomInstanceParameters& instance = frames_->GetInstanceOfFrame(cursorIndex); - const size_t frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); + const unsigned int frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); if (instance.GetSopInstanceUid() == sopInstanceUid && frameNumber == frame) @@ -3442,6 +3475,7 @@ } } + void SetLinearInterpolation(bool linearInterpolation) { if (linearInterpolation_ != linearInterpolation) @@ -3451,6 +3485,7 @@ } } + void AddTextAnnotation(const std::string& label, const OrthancStone::ScenePoint2D& pointedPosition, const OrthancStone::ScenePoint2D& labelPosition) @@ -3458,6 +3493,43 @@ stoneAnnotations_->AddTextAnnotation(label, pointedPosition, labelPosition); Redraw(); } + + + bool GetCurrentFrame(std::string& sopInstanceUid /* out */, + unsigned int& frameNumber /* out */) const + { + if (cursor_.get() != NULL && + frames_.get() != NULL) + { + const size_t cursorIndex = cursor_->GetCurrentIndex(); + const OrthancStone::DicomInstanceParameters& instance = frames_->GetInstanceOfFrame(cursorIndex); + sopInstanceUid = instance.GetSopInstanceUid(); + frameNumber = frames_->GetFrameNumberInInstance(cursorIndex); + return true; + } + else + { + return false; + } + } + + + void SetDeepLearningMask(const std::string& sopInstanceUid, + unsigned int frameNumber, + const Orthanc::ImageAccessor& mask) + { + std::string currentSopInstanceUid; + unsigned int currentFrameNumber; + if (GetCurrentFrame(currentSopInstanceUid, currentFrameNumber) && + sopInstanceUid == currentSopInstanceUid && + frameNumber == currentFrameNumber) + { + deepLearningSopInstanceUid_ = sopInstanceUid; + deepLearningFrameNumber_ = frameNumber; + deepLearningMask_.reset(Orthanc::Image::Clone(mask)); + Redraw(); + } + } }; @@ -3809,6 +3881,218 @@ } +#include <emscripten/fetch.h> +#include "deep-learning/WebAssembly/Worker.pb.h" + +enum DeepLearningState +{ + DeepLearningState_Waiting, + DeepLearningState_Pending, + DeepLearningState_Running +}; + +static DeepLearningState deepLearningState_ = DeepLearningState_Waiting; +static worker_handle deepLearningWorker_; +static std::string deepLearningPendingSopInstanceUid_; +static unsigned int deepLearningPendingFrameNumber_; + +// Forward declaration +static void DeepLearningCallback(char* data, + int size, + void* payload); + +static void SendRequestToWebWorker(const OrthancStone::Messages::Request& request) +{ + std::string s; + if (request.SerializeToString(&s) && + !s.empty()) + { + emscripten_call_worker(deepLearningWorker_, "Execute", &s[0], s.size(), DeepLearningCallback, NULL); + } + else + { + throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError, + "Cannot send command to the Web worker"); + } +} + +static void DeepLearningSchedule(const std::string& sopInstanceUid, + unsigned int frameNumber) +{ + if (deepLearningState_ == DeepLearningState_Waiting) + { + LOG(WARNING) << "Starting deep learning on: " << sopInstanceUid << " / " << frameNumber; + + FramesCache::Accessor accessor(*framesCache_, sopInstanceUid, frameNumber); + if (accessor.IsValid() && + accessor.GetImage().GetFormat() == Orthanc::PixelFormat_Float32) + { + const Orthanc::ImageAccessor& image = accessor.GetImage(); + + OrthancStone::Messages::Request request; + request.set_type(OrthancStone::Messages::RequestType::LOAD_IMAGE); + request.mutable_load_image()->set_sop_instance_uid(sopInstanceUid); + request.mutable_load_image()->set_frame_number(frameNumber); + request.mutable_load_image()->set_width(image.GetWidth()); + request.mutable_load_image()->set_height(image.GetHeight()); + + const unsigned int height = image.GetHeight(); + const unsigned int width = image.GetWidth(); + for (unsigned int y = 0; y < height; y++) + { + const float* p = reinterpret_cast<const float*>(image.GetConstRow(y)); + for (unsigned int x = 0; x < width; x++, p++) + { + request.mutable_load_image()->mutable_values()->Add(*p); + } + } + + deepLearningState_ = DeepLearningState_Running; + SendRequestToWebWorker(request); + } + else + { + LOG(ERROR) << "Cannot access the frame content, maybe a color image?"; + + EM_ASM({ + const customEvent = document.createEvent("CustomEvent"); + customEvent.initCustomEvent("DeepLearningStep", false, false, + { "progress" : "0" }); + window.dispatchEvent(customEvent); + }); + } + } + else + { + deepLearningState_ = DeepLearningState_Pending; + deepLearningPendingSopInstanceUid_ = sopInstanceUid; + deepLearningPendingFrameNumber_ = frameNumber; + } +} + +static void DeepLearningNextStep() +{ + switch (deepLearningState_) + { + case DeepLearningState_Pending: + deepLearningState_ = DeepLearningState_Waiting; + DeepLearningSchedule(deepLearningPendingSopInstanceUid_, deepLearningPendingFrameNumber_); + break; + + case DeepLearningState_Running: + { + OrthancStone::Messages::Request request; + request.set_type(OrthancStone::Messages::RequestType::EXECUTE_STEP); + SendRequestToWebWorker(request); + break; + } + + default: + throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError, "Bad state for deep learning"); + } +} + +static void DeepLearningCallback(char* data, + int size, + void* payload) +{ + try + { + OrthancStone::Messages::Response response; + if (response.ParseFromArray(data, size)) + { + switch (response.type()) + { + case OrthancStone::Messages::ResponseType::INITIALIZED: + DISPATCH_JAVASCRIPT_EVENT("DeepLearningInitialized"); + break; + + case OrthancStone::Messages::ResponseType::PARSED_MODEL: + LOG(WARNING) << "Number of steps in the model: " << response.parse_model().number_of_steps(); + DISPATCH_JAVASCRIPT_EVENT("DeepLearningModelReady"); + break; + + case OrthancStone::Messages::ResponseType::LOADED_IMAGE: + DeepLearningNextStep(); + break; + + case OrthancStone::Messages::ResponseType::STEP_DONE: + { + EM_ASM({ + const customEvent = document.createEvent("CustomEvent"); + customEvent.initCustomEvent("DeepLearningStep", false, false, + { "progress" : $0 }); + window.dispatchEvent(customEvent); + }, + response.step().progress() + ); + + if (response.step().done()) + { + deepLearningState_ = DeepLearningState_Waiting; + + const unsigned int height = response.step().mask().height(); + const unsigned int width = response.step().mask().width(); + + LOG(WARNING) << "SUCCESS! Mask: " << width << "x" << height << " for frame " + << response.step().mask().sop_instance_uid() << " / " + << response.step().mask().frame_number(); + + Orthanc::Image mask(Orthanc::PixelFormat_Grayscale8, width, height, false); + + size_t pos = 0; + for (unsigned int y = 0; y < height; y++) + { + uint8_t* p = reinterpret_cast<uint8_t*>(mask.GetRow(y)); + for (unsigned int x = 0; x < width; x++, p++, pos++) + { + *p = response.step().mask().values(pos) ? 255 : 0; + } + } + + for (Viewports::iterator it = allViewports_.begin(); it != allViewports_.end(); ++it) + { + assert(it->second != NULL); + it->second->SetDeepLearningMask(response.step().mask().sop_instance_uid(), + response.step().mask().frame_number(), mask); + } + } + else + { + DeepLearningNextStep(); + } + + break; + } + + default: + LOG(ERROR) << "Unsupported response type from the deep learning worker"; + } + } + else + { + LOG(ERROR) << "Bad response received from the deep learning worker"; + } + } + EXTERN_CATCH_EXCEPTIONS; +} + +static void DeepLearningModelLoaded(emscripten_fetch_t *fetch) +{ + try + { + LOG(WARNING) << "Deep learning model loaded: " << fetch->numBytes; + + OrthancStone::Messages::Request request; + request.set_type(OrthancStone::Messages::RequestType::PARSE_MODEL); + request.mutable_parse_model()->mutable_content()->assign(fetch->data, fetch->numBytes); + + emscripten_fetch_close(fetch); // Don't use "fetch" below + SendRequestToWebWorker(request); + } + EXTERN_CATCH_EXCEPTIONS; +} + extern "C" { int main(int argc, char const *argv[]) @@ -3824,11 +4108,55 @@ framesCache_.reset(new FramesCache); osiriXAnnotations_.reset(new OrthancStone::OsiriX::CollectionOfAnnotations); + deepLearningWorker_ = emscripten_create_worker("DeepLearningWorker.js"); + emscripten_call_worker(deepLearningWorker_, "Initialize", NULL, 0, DeepLearningCallback, NULL); + DISPATCH_JAVASCRIPT_EVENT("StoneInitialized"); } EMSCRIPTEN_KEEPALIVE + void LoadDeepLearningModel(const char* uri) + { + try + { + LOG(WARNING) << "Loading deep learning model: " << uri; + + emscripten_fetch_attr_t attr; + emscripten_fetch_attr_init(&attr); + strcpy(attr.requestMethod, "GET"); + attr.attributes = EMSCRIPTEN_FETCH_LOAD_TO_MEMORY; + attr.onsuccess = DeepLearningModelLoaded; + attr.onerror = NULL; + emscripten_fetch(&attr, uri); + } + EXTERN_CATCH_EXCEPTIONS; + } + + + EMSCRIPTEN_KEEPALIVE + void ApplyDeepLearningModel(const char* canvas) + { + try + { + boost::shared_ptr<ViewerViewport> viewport = GetViewport(canvas); + + std::string sopInstanceUid; + unsigned int frameNumber; + if (viewport->GetCurrentFrame(sopInstanceUid, frameNumber)) + { + DeepLearningSchedule(sopInstanceUid, frameNumber); + } + else + { + LOG(WARNING) << "No active frame"; + } + } + EXTERN_CATCH_EXCEPTIONS; + } + + + EMSCRIPTEN_KEEPALIVE void SetDicomWebRoot(const char* uri, int useRendered) {