Mercurial > hg > orthanc
comparison OrthancServer/Sources/ServerContext.cpp @ 4044:d25f4c0fa160 framework
splitting code into OrthancFramework and OrthancServer
author | Sebastien Jodogne <s.jodogne@gmail.com> |
---|---|
date | Wed, 10 Jun 2020 20:30:34 +0200 |
parents | OrthancServer/ServerContext.cpp@058b5ade8acd |
children | 05b8fd21089c |
comparison
equal
deleted
inserted
replaced
4043:6c6239aec462 | 4044:d25f4c0fa160 |
---|---|
1 /** | |
2 * Orthanc - A Lightweight, RESTful DICOM Store | |
3 * Copyright (C) 2012-2016 Sebastien Jodogne, Medical Physics | |
4 * Department, University Hospital of Liege, Belgium | |
5 * Copyright (C) 2017-2020 Osimis S.A., Belgium | |
6 * | |
7 * This program is free software: you can redistribute it and/or | |
8 * modify it under the terms of the GNU General Public License as | |
9 * published by the Free Software Foundation, either version 3 of the | |
10 * License, or (at your option) any later version. | |
11 * | |
12 * In addition, as a special exception, the copyright holders of this | |
13 * program give permission to link the code of its release with the | |
14 * OpenSSL project's "OpenSSL" library (or with modified versions of it | |
15 * that use the same license as the "OpenSSL" library), and distribute | |
16 * the linked executables. You must obey the GNU General Public License | |
17 * in all respects for all of the code used other than "OpenSSL". If you | |
18 * modify file(s) with this exception, you may extend this exception to | |
19 * your version of the file(s), but you are not obligated to do so. If | |
20 * you do not wish to do so, delete this exception statement from your | |
21 * version. If you delete this exception statement from all source files | |
22 * in the program, then also delete it here. | |
23 * | |
24 * This program is distributed in the hope that it will be useful, but | |
25 * WITHOUT ANY WARRANTY; without even the implied warranty of | |
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
27 * General Public License for more details. | |
28 * | |
29 * You should have received a copy of the GNU General Public License | |
30 * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
31 **/ | |
32 | |
33 | |
34 #include "PrecompiledHeadersServer.h" | |
35 #include "ServerContext.h" | |
36 | |
37 #include "../Core/DicomParsing/Internals/DicomImageDecoder.h" | |
38 #include "../Core/Cache/SharedArchive.h" | |
39 #include "../Core/DicomParsing/DcmtkTranscoder.h" | |
40 #include "../Core/DicomParsing/FromDcmtkBridge.h" | |
41 #include "../Core/FileStorage/StorageAccessor.h" | |
42 #include "../Core/HttpServer/FilesystemHttpSender.h" | |
43 #include "../Core/HttpServer/HttpStreamTranscoder.h" | |
44 #include "../Core/JobsEngine/SetOfInstancesJob.h" | |
45 #include "../Core/Logging.h" | |
46 #include "../Core/MetricsRegistry.h" | |
47 #include "../Plugins/Engine/OrthancPlugins.h" | |
48 | |
49 #include "OrthancConfiguration.h" | |
50 #include "OrthancRestApi/OrthancRestApi.h" | |
51 #include "Search/DatabaseLookup.h" | |
52 #include "ServerJobs/OrthancJobUnserializer.h" | |
53 #include "ServerToolbox.h" | |
54 #include "StorageCommitmentReports.h" | |
55 | |
56 #include <dcmtk/dcmdata/dcfilefo.h> | |
57 | |
58 | |
59 | |
60 #define ENABLE_DICOM_CACHE 1 | |
61 | |
62 static const size_t DICOM_CACHE_SIZE = 2; | |
63 | |
64 /** | |
65 * IMPORTANT: We make the assumption that the same instance of | |
66 * FileStorage can be accessed from multiple threads. This seems OK | |
67 * since the filesystem implements the required locking mechanisms, | |
68 * but maybe a read-writer lock on the "FileStorage" could be | |
69 * useful. Conversely, "ServerIndex" already implements mutex-based | |
70 * locking. | |
71 **/ | |
72 | |
73 namespace Orthanc | |
74 { | |
75 void ServerContext::ChangeThread(ServerContext* that, | |
76 unsigned int sleepDelay) | |
77 { | |
78 while (!that->done_) | |
79 { | |
80 std::unique_ptr<IDynamicObject> obj(that->pendingChanges_.Dequeue(sleepDelay)); | |
81 | |
82 if (obj.get() != NULL) | |
83 { | |
84 const ServerIndexChange& change = dynamic_cast<const ServerIndexChange&>(*obj.get()); | |
85 | |
86 boost::shared_lock<boost::shared_mutex> lock(that->listenersMutex_); | |
87 for (ServerListeners::iterator it = that->listeners_.begin(); | |
88 it != that->listeners_.end(); ++it) | |
89 { | |
90 try | |
91 { | |
92 try | |
93 { | |
94 it->GetListener().SignalChange(change); | |
95 } | |
96 catch (std::bad_alloc&) | |
97 { | |
98 LOG(ERROR) << "Not enough memory while signaling a change"; | |
99 } | |
100 catch (...) | |
101 { | |
102 throw OrthancException(ErrorCode_InternalError); | |
103 } | |
104 } | |
105 catch (OrthancException& e) | |
106 { | |
107 LOG(ERROR) << "Error in the " << it->GetDescription() | |
108 << " callback while signaling a change: " << e.What() | |
109 << " (code " << e.GetErrorCode() << ")"; | |
110 } | |
111 } | |
112 } | |
113 } | |
114 } | |
115 | |
116 | |
117 void ServerContext::SaveJobsThread(ServerContext* that, | |
118 unsigned int sleepDelay) | |
119 { | |
120 static const boost::posix_time::time_duration PERIODICITY = | |
121 boost::posix_time::seconds(10); | |
122 | |
123 boost::posix_time::ptime next = | |
124 boost::posix_time::microsec_clock::universal_time() + PERIODICITY; | |
125 | |
126 while (!that->done_) | |
127 { | |
128 boost::this_thread::sleep(boost::posix_time::milliseconds(sleepDelay)); | |
129 | |
130 if (that->haveJobsChanged_ || | |
131 boost::posix_time::microsec_clock::universal_time() >= next) | |
132 { | |
133 that->haveJobsChanged_ = false; | |
134 that->SaveJobsEngine(); | |
135 next = boost::posix_time::microsec_clock::universal_time() + PERIODICITY; | |
136 } | |
137 } | |
138 } | |
139 | |
140 | |
141 void ServerContext::SignalJobSubmitted(const std::string& jobId) | |
142 { | |
143 haveJobsChanged_ = true; | |
144 mainLua_.SignalJobSubmitted(jobId); | |
145 plugins_->SignalJobSubmitted(jobId); | |
146 } | |
147 | |
148 | |
149 void ServerContext::SignalJobSuccess(const std::string& jobId) | |
150 { | |
151 haveJobsChanged_ = true; | |
152 mainLua_.SignalJobSuccess(jobId); | |
153 plugins_->SignalJobSuccess(jobId); | |
154 } | |
155 | |
156 | |
157 void ServerContext::SignalJobFailure(const std::string& jobId) | |
158 { | |
159 haveJobsChanged_ = true; | |
160 mainLua_.SignalJobFailure(jobId); | |
161 plugins_->SignalJobFailure(jobId); | |
162 } | |
163 | |
164 | |
165 void ServerContext::SetupJobsEngine(bool unitTesting, | |
166 bool loadJobsFromDatabase) | |
167 { | |
168 if (loadJobsFromDatabase) | |
169 { | |
170 std::string serialized; | |
171 if (index_.LookupGlobalProperty(serialized, GlobalProperty_JobsRegistry)) | |
172 { | |
173 LOG(WARNING) << "Reloading the jobs from the last execution of Orthanc"; | |
174 OrthancJobUnserializer unserializer(*this); | |
175 | |
176 try | |
177 { | |
178 jobsEngine_.LoadRegistryFromString(unserializer, serialized); | |
179 } | |
180 catch (OrthancException& e) | |
181 { | |
182 LOG(WARNING) << "Cannot unserialize the jobs engine, starting anyway: " << e.What(); | |
183 } | |
184 } | |
185 else | |
186 { | |
187 LOG(INFO) << "The last execution of Orthanc has archived no job"; | |
188 } | |
189 } | |
190 else | |
191 { | |
192 LOG(INFO) << "Not reloading the jobs from the last execution of Orthanc"; | |
193 } | |
194 | |
195 jobsEngine_.GetRegistry().SetObserver(*this); | |
196 jobsEngine_.Start(); | |
197 isJobsEngineUnserialized_ = true; | |
198 | |
199 saveJobsThread_ = boost::thread(SaveJobsThread, this, (unitTesting ? 20 : 100)); | |
200 } | |
201 | |
202 | |
203 void ServerContext::SaveJobsEngine() | |
204 { | |
205 if (saveJobs_) | |
206 { | |
207 VLOG(1) << "Serializing the content of the jobs engine"; | |
208 | |
209 try | |
210 { | |
211 Json::Value value; | |
212 jobsEngine_.GetRegistry().Serialize(value); | |
213 | |
214 Json::FastWriter writer; | |
215 std::string serialized = writer.write(value); | |
216 | |
217 index_.SetGlobalProperty(GlobalProperty_JobsRegistry, serialized); | |
218 } | |
219 catch (OrthancException& e) | |
220 { | |
221 LOG(ERROR) << "Cannot serialize the jobs engine: " << e.What(); | |
222 } | |
223 } | |
224 } | |
225 | |
226 | |
227 ServerContext::ServerContext(IDatabaseWrapper& database, | |
228 IStorageArea& area, | |
229 bool unitTesting, | |
230 size_t maxCompletedJobs) : | |
231 index_(*this, database, (unitTesting ? 20 : 500)), | |
232 area_(area), | |
233 compressionEnabled_(false), | |
234 storeMD5_(true), | |
235 provider_(*this), | |
236 dicomCache_(provider_, DICOM_CACHE_SIZE), | |
237 mainLua_(*this), | |
238 filterLua_(*this), | |
239 luaListener_(*this), | |
240 jobsEngine_(maxCompletedJobs), | |
241 #if ORTHANC_ENABLE_PLUGINS == 1 | |
242 plugins_(NULL), | |
243 #endif | |
244 done_(false), | |
245 haveJobsChanged_(false), | |
246 isJobsEngineUnserialized_(false), | |
247 metricsRegistry_(new MetricsRegistry), | |
248 isHttpServerSecure_(true), | |
249 isExecuteLuaEnabled_(false), | |
250 overwriteInstances_(false), | |
251 dcmtkTranscoder_(new DcmtkTranscoder), | |
252 isIngestTranscoding_(false) | |
253 { | |
254 try | |
255 { | |
256 unsigned int lossyQuality; | |
257 | |
258 { | |
259 OrthancConfiguration::ReaderLock lock; | |
260 | |
261 queryRetrieveArchive_.reset( | |
262 new SharedArchive(lock.GetConfiguration().GetUnsignedIntegerParameter("QueryRetrieveSize", 100))); | |
263 mediaArchive_.reset( | |
264 new SharedArchive(lock.GetConfiguration().GetUnsignedIntegerParameter("MediaArchiveSize", 1))); | |
265 defaultLocalAet_ = lock.GetConfiguration().GetStringParameter("DicomAet", "ORTHANC"); | |
266 jobsEngine_.SetWorkersCount(lock.GetConfiguration().GetUnsignedIntegerParameter("ConcurrentJobs", 2)); | |
267 saveJobs_ = lock.GetConfiguration().GetBooleanParameter("SaveJobs", true); | |
268 metricsRegistry_->SetEnabled(lock.GetConfiguration().GetBooleanParameter("MetricsEnabled", true)); | |
269 | |
270 // New configuration options in Orthanc 1.5.1 | |
271 findStorageAccessMode_ = StringToFindStorageAccessMode(lock.GetConfiguration().GetStringParameter("StorageAccessOnFind", "Always")); | |
272 limitFindInstances_ = lock.GetConfiguration().GetUnsignedIntegerParameter("LimitFindInstances", 0); | |
273 limitFindResults_ = lock.GetConfiguration().GetUnsignedIntegerParameter("LimitFindResults", 0); | |
274 | |
275 // New configuration option in Orthanc 1.6.0 | |
276 storageCommitmentReports_.reset(new StorageCommitmentReports(lock.GetConfiguration().GetUnsignedIntegerParameter("StorageCommitmentReportsSize", 100))); | |
277 | |
278 // New options in Orthanc 1.7.0 | |
279 transcodeDicomProtocol_ = lock.GetConfiguration().GetBooleanParameter("TranscodeDicomProtocol", true); | |
280 builtinDecoderTranscoderOrder_ = StringToBuiltinDecoderTranscoderOrder(lock.GetConfiguration().GetStringParameter("BuiltinDecoderTranscoderOrder", "After")); | |
281 lossyQuality = lock.GetConfiguration().GetUnsignedIntegerParameter("DicomLossyTranscodingQuality", 90); | |
282 | |
283 std::string s; | |
284 if (lock.GetConfiguration().LookupStringParameter(s, "IngestTranscoding")) | |
285 { | |
286 if (LookupTransferSyntax(ingestTransferSyntax_, s)) | |
287 { | |
288 isIngestTranscoding_ = true; | |
289 LOG(WARNING) << "Incoming DICOM instances will automatically be transcoded to " | |
290 << "transfer syntax: " << GetTransferSyntaxUid(ingestTransferSyntax_); | |
291 } | |
292 else | |
293 { | |
294 throw OrthancException(ErrorCode_ParameterOutOfRange, | |
295 "Unknown transfer syntax for ingest transcoding: " + s); | |
296 } | |
297 } | |
298 else | |
299 { | |
300 isIngestTranscoding_ = false; | |
301 LOG(INFO) << "Automated transcoding of incoming DICOM instances is disabled"; | |
302 } | |
303 } | |
304 | |
305 jobsEngine_.SetThreadSleep(unitTesting ? 20 : 200); | |
306 | |
307 listeners_.push_back(ServerListener(luaListener_, "Lua")); | |
308 changeThread_ = boost::thread(ChangeThread, this, (unitTesting ? 20 : 100)); | |
309 | |
310 dynamic_cast<DcmtkTranscoder&>(*dcmtkTranscoder_).SetLossyQuality(lossyQuality); | |
311 } | |
312 catch (OrthancException&) | |
313 { | |
314 Stop(); | |
315 throw; | |
316 } | |
317 } | |
318 | |
319 | |
320 | |
321 ServerContext::~ServerContext() | |
322 { | |
323 if (!done_) | |
324 { | |
325 LOG(ERROR) << "INTERNAL ERROR: ServerContext::Stop() should be invoked manually to avoid mess in the destruction order!"; | |
326 Stop(); | |
327 } | |
328 } | |
329 | |
330 | |
331 void ServerContext::Stop() | |
332 { | |
333 if (!done_) | |
334 { | |
335 { | |
336 boost::unique_lock<boost::shared_mutex> lock(listenersMutex_); | |
337 listeners_.clear(); | |
338 } | |
339 | |
340 done_ = true; | |
341 | |
342 if (changeThread_.joinable()) | |
343 { | |
344 changeThread_.join(); | |
345 } | |
346 | |
347 if (saveJobsThread_.joinable()) | |
348 { | |
349 saveJobsThread_.join(); | |
350 } | |
351 | |
352 jobsEngine_.GetRegistry().ResetObserver(); | |
353 | |
354 if (isJobsEngineUnserialized_) | |
355 { | |
356 // Avoid losing jobs if the JobsRegistry cannot be unserialized | |
357 SaveJobsEngine(); | |
358 } | |
359 | |
360 // Do not change the order below! | |
361 jobsEngine_.Stop(); | |
362 index_.Stop(); | |
363 } | |
364 } | |
365 | |
366 | |
367 void ServerContext::SetCompressionEnabled(bool enabled) | |
368 { | |
369 if (enabled) | |
370 LOG(WARNING) << "Disk compression is enabled"; | |
371 else | |
372 LOG(WARNING) << "Disk compression is disabled"; | |
373 | |
374 compressionEnabled_ = enabled; | |
375 } | |
376 | |
377 | |
378 void ServerContext::RemoveFile(const std::string& fileUuid, | |
379 FileContentType type) | |
380 { | |
381 StorageAccessor accessor(area_, GetMetricsRegistry()); | |
382 accessor.Remove(fileUuid, type); | |
383 } | |
384 | |
385 | |
386 StoreStatus ServerContext::StoreAfterTranscoding(std::string& resultPublicId, | |
387 DicomInstanceToStore& dicom, | |
388 StoreInstanceMode mode) | |
389 { | |
390 bool overwrite; | |
391 switch (mode) | |
392 { | |
393 case StoreInstanceMode_Default: | |
394 overwrite = overwriteInstances_; | |
395 break; | |
396 | |
397 case StoreInstanceMode_OverwriteDuplicate: | |
398 overwrite = true; | |
399 break; | |
400 | |
401 case StoreInstanceMode_IgnoreDuplicate: | |
402 overwrite = false; | |
403 break; | |
404 | |
405 default: | |
406 throw OrthancException(ErrorCode_ParameterOutOfRange); | |
407 } | |
408 | |
409 try | |
410 { | |
411 MetricsRegistry::Timer timer(GetMetricsRegistry(), "orthanc_store_dicom_duration_ms"); | |
412 StorageAccessor accessor(area_, GetMetricsRegistry()); | |
413 | |
414 resultPublicId = dicom.GetHasher().HashInstance(); | |
415 | |
416 Json::Value simplifiedTags; | |
417 ServerToolbox::SimplifyTags(simplifiedTags, dicom.GetJson(), DicomToJsonFormat_Human); | |
418 | |
419 // Test if the instance must be filtered out | |
420 bool accepted = true; | |
421 | |
422 { | |
423 boost::shared_lock<boost::shared_mutex> lock(listenersMutex_); | |
424 | |
425 for (ServerListeners::iterator it = listeners_.begin(); it != listeners_.end(); ++it) | |
426 { | |
427 try | |
428 { | |
429 if (!it->GetListener().FilterIncomingInstance(dicom, simplifiedTags)) | |
430 { | |
431 accepted = false; | |
432 break; | |
433 } | |
434 } | |
435 catch (OrthancException& e) | |
436 { | |
437 LOG(ERROR) << "Error in the " << it->GetDescription() | |
438 << " callback while receiving an instance: " << e.What() | |
439 << " (code " << e.GetErrorCode() << ")"; | |
440 throw; | |
441 } | |
442 } | |
443 } | |
444 | |
445 if (!accepted) | |
446 { | |
447 LOG(INFO) << "An incoming instance has been discarded by the filter"; | |
448 return StoreStatus_FilteredOut; | |
449 } | |
450 | |
451 { | |
452 // Remove the file from the DicomCache (useful if | |
453 // "OverwriteInstances" is set to "true") | |
454 boost::mutex::scoped_lock lock(dicomCacheMutex_); | |
455 dicomCache_.Invalidate(resultPublicId); | |
456 } | |
457 | |
458 // TODO Should we use "gzip" instead? | |
459 CompressionType compression = (compressionEnabled_ ? CompressionType_ZlibWithSize : CompressionType_None); | |
460 | |
461 FileInfo dicomInfo = accessor.Write(dicom.GetBufferData(), dicom.GetBufferSize(), | |
462 FileContentType_Dicom, compression, storeMD5_); | |
463 FileInfo jsonInfo = accessor.Write(dicom.GetJson().toStyledString(), | |
464 FileContentType_DicomAsJson, compression, storeMD5_); | |
465 | |
466 ServerIndex::Attachments attachments; | |
467 attachments.push_back(dicomInfo); | |
468 attachments.push_back(jsonInfo); | |
469 | |
470 typedef std::map<MetadataType, std::string> InstanceMetadata; | |
471 InstanceMetadata instanceMetadata; | |
472 StoreStatus status = index_.Store( | |
473 instanceMetadata, dicom, attachments, overwrite); | |
474 | |
475 // Only keep the metadata for the "instance" level | |
476 dicom.GetMetadata().clear(); | |
477 | |
478 for (InstanceMetadata::const_iterator it = instanceMetadata.begin(); | |
479 it != instanceMetadata.end(); ++it) | |
480 { | |
481 dicom.GetMetadata().insert(std::make_pair(std::make_pair(ResourceType_Instance, it->first), | |
482 it->second)); | |
483 } | |
484 | |
485 if (status != StoreStatus_Success) | |
486 { | |
487 accessor.Remove(dicomInfo); | |
488 accessor.Remove(jsonInfo); | |
489 } | |
490 | |
491 switch (status) | |
492 { | |
493 case StoreStatus_Success: | |
494 LOG(INFO) << "New instance stored"; | |
495 break; | |
496 | |
497 case StoreStatus_AlreadyStored: | |
498 LOG(INFO) << "Already stored"; | |
499 break; | |
500 | |
501 case StoreStatus_Failure: | |
502 LOG(ERROR) << "Store failure"; | |
503 break; | |
504 | |
505 default: | |
506 // This should never happen | |
507 break; | |
508 } | |
509 | |
510 if (status == StoreStatus_Success || | |
511 status == StoreStatus_AlreadyStored) | |
512 { | |
513 boost::shared_lock<boost::shared_mutex> lock(listenersMutex_); | |
514 | |
515 for (ServerListeners::iterator it = listeners_.begin(); it != listeners_.end(); ++it) | |
516 { | |
517 try | |
518 { | |
519 it->GetListener().SignalStoredInstance(resultPublicId, dicom, simplifiedTags); | |
520 } | |
521 catch (OrthancException& e) | |
522 { | |
523 LOG(ERROR) << "Error in the " << it->GetDescription() | |
524 << " callback while receiving an instance: " << e.What() | |
525 << " (code " << e.GetErrorCode() << ")"; | |
526 } | |
527 } | |
528 } | |
529 | |
530 return status; | |
531 } | |
532 catch (OrthancException& e) | |
533 { | |
534 if (e.GetErrorCode() == ErrorCode_InexistentTag) | |
535 { | |
536 dicom.GetSummary().LogMissingTagsForStore(); | |
537 } | |
538 | |
539 throw; | |
540 } | |
541 } | |
542 | |
543 | |
544 StoreStatus ServerContext::Store(std::string& resultPublicId, | |
545 DicomInstanceToStore& dicom, | |
546 StoreInstanceMode mode) | |
547 { | |
548 if (!isIngestTranscoding_) | |
549 { | |
550 // No automated transcoding. This was the only path in Orthanc <= 1.6.1. | |
551 return StoreAfterTranscoding(resultPublicId, dicom, mode); | |
552 } | |
553 else | |
554 { | |
555 // Automated transcoding of incoming DICOM files | |
556 | |
557 DicomTransferSyntax sourceSyntax; | |
558 if (!FromDcmtkBridge::LookupOrthancTransferSyntax( | |
559 sourceSyntax, dicom.GetParsedDicomFile().GetDcmtkObject()) || | |
560 sourceSyntax == ingestTransferSyntax_) | |
561 { | |
562 // No transcoding | |
563 return StoreAfterTranscoding(resultPublicId, dicom, mode); | |
564 } | |
565 else | |
566 { | |
567 std::set<DicomTransferSyntax> syntaxes; | |
568 syntaxes.insert(ingestTransferSyntax_); | |
569 | |
570 IDicomTranscoder::DicomImage source; | |
571 source.SetExternalBuffer(dicom.GetBufferData(), dicom.GetBufferSize()); | |
572 | |
573 IDicomTranscoder::DicomImage transcoded; | |
574 if (Transcode(transcoded, source, syntaxes, true /* allow new SOP instance UID */)) | |
575 { | |
576 std::unique_ptr<ParsedDicomFile> tmp(transcoded.ReleaseAsParsedDicomFile()); | |
577 | |
578 DicomInstanceToStore toStore; | |
579 toStore.SetParsedDicomFile(*tmp); | |
580 toStore.SetOrigin(dicom.GetOrigin()); | |
581 | |
582 StoreStatus ok = StoreAfterTranscoding(resultPublicId, toStore, mode); | |
583 assert(resultPublicId == tmp->GetHasher().HashInstance()); | |
584 | |
585 return ok; | |
586 } | |
587 else | |
588 { | |
589 // Cannot transcode => store the original file | |
590 return StoreAfterTranscoding(resultPublicId, dicom, mode); | |
591 } | |
592 } | |
593 } | |
594 } | |
595 | |
596 | |
597 void ServerContext::AnswerAttachment(RestApiOutput& output, | |
598 const std::string& resourceId, | |
599 FileContentType content) | |
600 { | |
601 FileInfo attachment; | |
602 if (!index_.LookupAttachment(attachment, resourceId, content)) | |
603 { | |
604 throw OrthancException(ErrorCode_UnknownResource); | |
605 } | |
606 | |
607 StorageAccessor accessor(area_, GetMetricsRegistry()); | |
608 accessor.AnswerFile(output, attachment, GetFileContentMime(content)); | |
609 } | |
610 | |
611 | |
612 void ServerContext::ChangeAttachmentCompression(const std::string& resourceId, | |
613 FileContentType attachmentType, | |
614 CompressionType compression) | |
615 { | |
616 LOG(INFO) << "Changing compression type for attachment " | |
617 << EnumerationToString(attachmentType) | |
618 << " of resource " << resourceId << " to " | |
619 << compression; | |
620 | |
621 FileInfo attachment; | |
622 if (!index_.LookupAttachment(attachment, resourceId, attachmentType)) | |
623 { | |
624 throw OrthancException(ErrorCode_UnknownResource); | |
625 } | |
626 | |
627 if (attachment.GetCompressionType() == compression) | |
628 { | |
629 // Nothing to do | |
630 return; | |
631 } | |
632 | |
633 std::string content; | |
634 | |
635 StorageAccessor accessor(area_, GetMetricsRegistry()); | |
636 accessor.Read(content, attachment); | |
637 | |
638 FileInfo modified = accessor.Write(content.empty() ? NULL : content.c_str(), | |
639 content.size(), attachmentType, compression, storeMD5_); | |
640 | |
641 try | |
642 { | |
643 StoreStatus status = index_.AddAttachment(modified, resourceId); | |
644 if (status != StoreStatus_Success) | |
645 { | |
646 accessor.Remove(modified); | |
647 throw OrthancException(ErrorCode_Database); | |
648 } | |
649 } | |
650 catch (OrthancException&) | |
651 { | |
652 accessor.Remove(modified); | |
653 throw; | |
654 } | |
655 } | |
656 | |
657 | |
658 void ServerContext::ReadDicomAsJsonInternal(std::string& result, | |
659 const std::string& instancePublicId) | |
660 { | |
661 FileInfo attachment; | |
662 if (index_.LookupAttachment(attachment, instancePublicId, FileContentType_DicomAsJson)) | |
663 { | |
664 ReadAttachment(result, attachment); | |
665 } | |
666 else | |
667 { | |
668 // The "DICOM as JSON" summary is not available from the Orthanc | |
669 // store (most probably deleted), reconstruct it from the DICOM file | |
670 std::string dicom; | |
671 ReadDicom(dicom, instancePublicId); | |
672 | |
673 LOG(INFO) << "Reconstructing the missing DICOM-as-JSON summary for instance: " | |
674 << instancePublicId; | |
675 | |
676 ParsedDicomFile parsed(dicom); | |
677 | |
678 Json::Value summary; | |
679 parsed.DatasetToJson(summary); | |
680 | |
681 result = summary.toStyledString(); | |
682 | |
683 if (!AddAttachment(instancePublicId, FileContentType_DicomAsJson, | |
684 result.c_str(), result.size())) | |
685 { | |
686 throw OrthancException(ErrorCode_InternalError, | |
687 "Cannot associate the DICOM-as-JSON summary to instance: " + instancePublicId); | |
688 } | |
689 } | |
690 } | |
691 | |
692 | |
693 void ServerContext::ReadDicomAsJson(std::string& result, | |
694 const std::string& instancePublicId, | |
695 const std::set<DicomTag>& ignoreTagLength) | |
696 { | |
697 if (ignoreTagLength.empty()) | |
698 { | |
699 ReadDicomAsJsonInternal(result, instancePublicId); | |
700 } | |
701 else | |
702 { | |
703 Json::Value tmp; | |
704 ReadDicomAsJson(tmp, instancePublicId, ignoreTagLength); | |
705 result = tmp.toStyledString(); | |
706 } | |
707 } | |
708 | |
709 | |
710 void ServerContext::ReadDicomAsJson(Json::Value& result, | |
711 const std::string& instancePublicId, | |
712 const std::set<DicomTag>& ignoreTagLength) | |
713 { | |
714 if (ignoreTagLength.empty()) | |
715 { | |
716 std::string tmp; | |
717 ReadDicomAsJsonInternal(tmp, instancePublicId); | |
718 | |
719 Json::Reader reader; | |
720 if (!reader.parse(tmp, result)) | |
721 { | |
722 throw OrthancException(ErrorCode_CorruptedFile); | |
723 } | |
724 } | |
725 else | |
726 { | |
727 // The "DicomAsJson" attachment might have stored some tags as | |
728 // "too long". We are forced to re-parse the DICOM file. | |
729 std::string dicom; | |
730 ReadDicom(dicom, instancePublicId); | |
731 | |
732 ParsedDicomFile parsed(dicom); | |
733 parsed.DatasetToJson(result, ignoreTagLength); | |
734 } | |
735 } | |
736 | |
737 | |
738 void ServerContext::ReadAttachment(std::string& result, | |
739 const std::string& instancePublicId, | |
740 FileContentType content, | |
741 bool uncompressIfNeeded) | |
742 { | |
743 FileInfo attachment; | |
744 if (!index_.LookupAttachment(attachment, instancePublicId, content)) | |
745 { | |
746 throw OrthancException(ErrorCode_InternalError, | |
747 "Unable to read attachment " + EnumerationToString(content) + | |
748 " of instance " + instancePublicId); | |
749 } | |
750 | |
751 assert(attachment.GetContentType() == content); | |
752 | |
753 if (uncompressIfNeeded) | |
754 { | |
755 ReadAttachment(result, attachment); | |
756 } | |
757 else | |
758 { | |
759 // Do not uncompress the content of the storage area, return the | |
760 // raw data | |
761 StorageAccessor accessor(area_, GetMetricsRegistry()); | |
762 accessor.ReadRaw(result, attachment); | |
763 } | |
764 } | |
765 | |
766 | |
767 void ServerContext::ReadAttachment(std::string& result, | |
768 const FileInfo& attachment) | |
769 { | |
770 // This will decompress the attachment | |
771 StorageAccessor accessor(area_, GetMetricsRegistry()); | |
772 accessor.Read(result, attachment); | |
773 } | |
774 | |
775 | |
776 IDynamicObject* ServerContext::DicomCacheProvider::Provide(const std::string& instancePublicId) | |
777 { | |
778 std::string content; | |
779 context_.ReadDicom(content, instancePublicId); | |
780 return new ParsedDicomFile(content); | |
781 } | |
782 | |
783 | |
784 ServerContext::DicomCacheLocker::DicomCacheLocker(ServerContext& that, | |
785 const std::string& instancePublicId) : | |
786 that_(that), | |
787 lock_(that_.dicomCacheMutex_) | |
788 { | |
789 #if ENABLE_DICOM_CACHE == 0 | |
790 static std::unique_ptr<IDynamicObject> p; | |
791 p.reset(that_.provider_.Provide(instancePublicId)); | |
792 dicom_ = dynamic_cast<ParsedDicomFile*>(p.get()); | |
793 #else | |
794 dicom_ = &dynamic_cast<ParsedDicomFile&>(that_.dicomCache_.Access(instancePublicId)); | |
795 #endif | |
796 } | |
797 | |
798 | |
799 ServerContext::DicomCacheLocker::~DicomCacheLocker() | |
800 { | |
801 } | |
802 | |
803 | |
804 void ServerContext::SetStoreMD5ForAttachments(bool storeMD5) | |
805 { | |
806 LOG(INFO) << "Storing MD5 for attachments: " << (storeMD5 ? "yes" : "no"); | |
807 storeMD5_ = storeMD5; | |
808 } | |
809 | |
810 | |
811 bool ServerContext::AddAttachment(const std::string& resourceId, | |
812 FileContentType attachmentType, | |
813 const void* data, | |
814 size_t size) | |
815 { | |
816 LOG(INFO) << "Adding attachment " << EnumerationToString(attachmentType) << " to resource " << resourceId; | |
817 | |
818 // TODO Should we use "gzip" instead? | |
819 CompressionType compression = (compressionEnabled_ ? CompressionType_ZlibWithSize : CompressionType_None); | |
820 | |
821 StorageAccessor accessor(area_, GetMetricsRegistry()); | |
822 FileInfo attachment = accessor.Write(data, size, attachmentType, compression, storeMD5_); | |
823 | |
824 StoreStatus status = index_.AddAttachment(attachment, resourceId); | |
825 if (status != StoreStatus_Success) | |
826 { | |
827 accessor.Remove(attachment); | |
828 return false; | |
829 } | |
830 else | |
831 { | |
832 return true; | |
833 } | |
834 } | |
835 | |
836 | |
837 bool ServerContext::DeleteResource(Json::Value& target, | |
838 const std::string& uuid, | |
839 ResourceType expectedType) | |
840 { | |
841 if (expectedType == ResourceType_Instance) | |
842 { | |
843 // remove the file from the DicomCache | |
844 boost::mutex::scoped_lock lock(dicomCacheMutex_); | |
845 dicomCache_.Invalidate(uuid); | |
846 } | |
847 | |
848 return index_.DeleteResource(target, uuid, expectedType); | |
849 } | |
850 | |
851 | |
852 void ServerContext::SignalChange(const ServerIndexChange& change) | |
853 { | |
854 pendingChanges_.Enqueue(change.Clone()); | |
855 } | |
856 | |
857 | |
858 #if ORTHANC_ENABLE_PLUGINS == 1 | |
859 void ServerContext::SetPlugins(OrthancPlugins& plugins) | |
860 { | |
861 boost::unique_lock<boost::shared_mutex> lock(listenersMutex_); | |
862 | |
863 plugins_ = &plugins; | |
864 | |
865 // TODO REFACTOR THIS | |
866 listeners_.clear(); | |
867 listeners_.push_back(ServerListener(luaListener_, "Lua")); | |
868 listeners_.push_back(ServerListener(plugins, "plugin")); | |
869 } | |
870 | |
871 | |
872 void ServerContext::ResetPlugins() | |
873 { | |
874 boost::unique_lock<boost::shared_mutex> lock(listenersMutex_); | |
875 | |
876 plugins_ = NULL; | |
877 | |
878 // TODO REFACTOR THIS | |
879 listeners_.clear(); | |
880 listeners_.push_back(ServerListener(luaListener_, "Lua")); | |
881 } | |
882 | |
883 | |
884 const OrthancPlugins& ServerContext::GetPlugins() const | |
885 { | |
886 if (HasPlugins()) | |
887 { | |
888 return *plugins_; | |
889 } | |
890 else | |
891 { | |
892 throw OrthancException(ErrorCode_InternalError); | |
893 } | |
894 } | |
895 | |
896 OrthancPlugins& ServerContext::GetPlugins() | |
897 { | |
898 if (HasPlugins()) | |
899 { | |
900 return *plugins_; | |
901 } | |
902 else | |
903 { | |
904 throw OrthancException(ErrorCode_InternalError); | |
905 } | |
906 } | |
907 | |
908 #endif | |
909 | |
910 | |
911 bool ServerContext::HasPlugins() const | |
912 { | |
913 #if ORTHANC_ENABLE_PLUGINS == 1 | |
914 return (plugins_ != NULL); | |
915 #else | |
916 return false; | |
917 #endif | |
918 } | |
919 | |
920 | |
921 void ServerContext::Apply(ILookupVisitor& visitor, | |
922 const DatabaseLookup& lookup, | |
923 ResourceType queryLevel, | |
924 size_t since, | |
925 size_t limit) | |
926 { | |
927 unsigned int databaseLimit = (queryLevel == ResourceType_Instance ? | |
928 limitFindInstances_ : limitFindResults_); | |
929 | |
930 std::vector<std::string> resources, instances; | |
931 | |
932 { | |
933 const size_t lookupLimit = (databaseLimit == 0 ? 0 : databaseLimit + 1); | |
934 GetIndex().ApplyLookupResources(resources, &instances, lookup, queryLevel, lookupLimit); | |
935 } | |
936 | |
937 bool complete = (databaseLimit == 0 || | |
938 resources.size() <= databaseLimit); | |
939 | |
940 LOG(INFO) << "Number of candidate resources after fast DB filtering on main DICOM tags: " << resources.size(); | |
941 | |
942 /** | |
943 * "resources" contains the Orthanc ID of the resource at level | |
944 * "queryLevel", "instances" contains one the Orthanc ID of one | |
945 * sample instance from this resource. | |
946 **/ | |
947 assert(resources.size() == instances.size()); | |
948 | |
949 size_t countResults = 0; | |
950 size_t skipped = 0; | |
951 | |
952 const bool isDicomAsJsonNeeded = visitor.IsDicomAsJsonNeeded(); | |
953 | |
954 for (size_t i = 0; i < instances.size(); i++) | |
955 { | |
956 // Optimization in Orthanc 1.5.1 - Don't read the full JSON from | |
957 // the disk if only "main DICOM tags" are to be returned | |
958 | |
959 std::unique_ptr<Json::Value> dicomAsJson; | |
960 | |
961 bool hasOnlyMainDicomTags; | |
962 DicomMap dicom; | |
963 | |
964 if (findStorageAccessMode_ == FindStorageAccessMode_DatabaseOnly || | |
965 findStorageAccessMode_ == FindStorageAccessMode_DiskOnAnswer || | |
966 lookup.HasOnlyMainDicomTags()) | |
967 { | |
968 // Case (1): The main DICOM tags, as stored in the database, | |
969 // are sufficient to look for match | |
970 | |
971 DicomMap tmp; | |
972 if (!GetIndex().GetAllMainDicomTags(tmp, instances[i])) | |
973 { | |
974 // The instance has been removed during the execution of the | |
975 // lookup, ignore it | |
976 continue; | |
977 } | |
978 | |
979 #if 1 | |
980 // New in Orthanc 1.6.0: Only keep the main DICOM tags at the | |
981 // level of interest for the query | |
982 switch (queryLevel) | |
983 { | |
984 // WARNING: Don't reorder cases below, and don't add "break" | |
985 case ResourceType_Instance: | |
986 dicom.MergeMainDicomTags(tmp, ResourceType_Instance); | |
987 | |
988 case ResourceType_Series: | |
989 dicom.MergeMainDicomTags(tmp, ResourceType_Series); | |
990 | |
991 case ResourceType_Study: | |
992 dicom.MergeMainDicomTags(tmp, ResourceType_Study); | |
993 | |
994 case ResourceType_Patient: | |
995 dicom.MergeMainDicomTags(tmp, ResourceType_Patient); | |
996 break; | |
997 | |
998 default: | |
999 throw OrthancException(ErrorCode_InternalError); | |
1000 } | |
1001 | |
1002 // Special case of the "Modality" at the study level, in order | |
1003 // to deal with C-FIND on "ModalitiesInStudy" (0008,0061). | |
1004 // Check out integration test "test_rest_modalities_in_study". | |
1005 if (queryLevel == ResourceType_Study) | |
1006 { | |
1007 dicom.CopyTagIfExists(tmp, DICOM_TAG_MODALITY); | |
1008 } | |
1009 #else | |
1010 dicom.Assign(tmp); // This emulates Orthanc <= 1.5.8 | |
1011 #endif | |
1012 | |
1013 hasOnlyMainDicomTags = true; | |
1014 } | |
1015 else | |
1016 { | |
1017 // Case (2): Need to read the "DICOM-as-JSON" attachment from | |
1018 // the storage area | |
1019 dicomAsJson.reset(new Json::Value); | |
1020 ReadDicomAsJson(*dicomAsJson, instances[i]); | |
1021 | |
1022 dicom.FromDicomAsJson(*dicomAsJson); | |
1023 | |
1024 // This map contains the entire JSON, i.e. more than the main DICOM tags | |
1025 hasOnlyMainDicomTags = false; | |
1026 } | |
1027 | |
1028 if (lookup.IsMatch(dicom)) | |
1029 { | |
1030 if (skipped < since) | |
1031 { | |
1032 skipped++; | |
1033 } | |
1034 else if (limit != 0 && | |
1035 countResults >= limit) | |
1036 { | |
1037 // Too many results, don't mark as complete | |
1038 complete = false; | |
1039 break; | |
1040 } | |
1041 else | |
1042 { | |
1043 if ((findStorageAccessMode_ == FindStorageAccessMode_DiskOnLookupAndAnswer || | |
1044 findStorageAccessMode_ == FindStorageAccessMode_DiskOnAnswer) && | |
1045 dicomAsJson.get() == NULL && | |
1046 isDicomAsJsonNeeded) | |
1047 { | |
1048 dicomAsJson.reset(new Json::Value); | |
1049 ReadDicomAsJson(*dicomAsJson, instances[i]); | |
1050 } | |
1051 | |
1052 if (hasOnlyMainDicomTags) | |
1053 { | |
1054 // This is Case (1): The variable "dicom" only contains the main DICOM tags | |
1055 visitor.Visit(resources[i], instances[i], dicom, dicomAsJson.get()); | |
1056 } | |
1057 else | |
1058 { | |
1059 // Remove the non-main DICOM tags from "dicom" if Case (2) | |
1060 // was used, for consistency with Case (1) | |
1061 | |
1062 DicomMap mainDicomTags; | |
1063 mainDicomTags.ExtractMainDicomTags(dicom); | |
1064 visitor.Visit(resources[i], instances[i], mainDicomTags, dicomAsJson.get()); | |
1065 } | |
1066 | |
1067 countResults ++; | |
1068 } | |
1069 } | |
1070 } | |
1071 | |
1072 if (complete) | |
1073 { | |
1074 visitor.MarkAsComplete(); | |
1075 } | |
1076 | |
1077 LOG(INFO) << "Number of matching resources: " << countResults; | |
1078 } | |
1079 | |
1080 | |
1081 bool ServerContext::LookupOrReconstructMetadata(std::string& target, | |
1082 const std::string& publicId, | |
1083 MetadataType metadata) | |
1084 { | |
1085 // This is a backwards-compatibility function, that can | |
1086 // reconstruct metadata that were not generated by an older | |
1087 // release of Orthanc | |
1088 | |
1089 if (metadata == MetadataType_Instance_SopClassUid || | |
1090 metadata == MetadataType_Instance_TransferSyntax) | |
1091 { | |
1092 if (index_.LookupMetadata(target, publicId, metadata)) | |
1093 { | |
1094 return true; | |
1095 } | |
1096 else | |
1097 { | |
1098 // These metadata are mandatory in DICOM instances, and were | |
1099 // introduced in Orthanc 1.2.0. The fact that | |
1100 // "LookupMetadata()" has failed indicates that this database | |
1101 // comes from an older release of Orthanc. | |
1102 | |
1103 DicomTag tag(0, 0); | |
1104 | |
1105 switch (metadata) | |
1106 { | |
1107 case MetadataType_Instance_SopClassUid: | |
1108 tag = DICOM_TAG_SOP_CLASS_UID; | |
1109 break; | |
1110 | |
1111 case MetadataType_Instance_TransferSyntax: | |
1112 tag = DICOM_TAG_TRANSFER_SYNTAX_UID; | |
1113 break; | |
1114 | |
1115 default: | |
1116 throw OrthancException(ErrorCode_InternalError); | |
1117 } | |
1118 | |
1119 Json::Value dicomAsJson; | |
1120 ReadDicomAsJson(dicomAsJson, publicId); | |
1121 | |
1122 DicomMap tags; | |
1123 tags.FromDicomAsJson(dicomAsJson); | |
1124 | |
1125 const DicomValue* value = tags.TestAndGetValue(tag); | |
1126 | |
1127 if (value != NULL && | |
1128 !value->IsNull() && | |
1129 !value->IsBinary()) | |
1130 { | |
1131 target = value->GetContent(); | |
1132 | |
1133 // Store for reuse | |
1134 index_.SetMetadata(publicId, metadata, target); | |
1135 return true; | |
1136 } | |
1137 else | |
1138 { | |
1139 // Should never happen | |
1140 return false; | |
1141 } | |
1142 } | |
1143 } | |
1144 else | |
1145 { | |
1146 // No backward | |
1147 return index_.LookupMetadata(target, publicId, metadata); | |
1148 } | |
1149 } | |
1150 | |
1151 | |
1152 void ServerContext::AddChildInstances(SetOfInstancesJob& job, | |
1153 const std::string& publicId) | |
1154 { | |
1155 std::list<std::string> instances; | |
1156 GetIndex().GetChildInstances(instances, publicId); | |
1157 | |
1158 job.Reserve(job.GetInstancesCount() + instances.size()); | |
1159 | |
1160 for (std::list<std::string>::const_iterator | |
1161 it = instances.begin(); it != instances.end(); ++it) | |
1162 { | |
1163 job.AddInstance(*it); | |
1164 } | |
1165 } | |
1166 | |
1167 | |
1168 void ServerContext::SignalUpdatedModalities() | |
1169 { | |
1170 #if ORTHANC_ENABLE_PLUGINS == 1 | |
1171 if (HasPlugins()) | |
1172 { | |
1173 GetPlugins().SignalUpdatedModalities(); | |
1174 } | |
1175 #endif | |
1176 } | |
1177 | |
1178 | |
1179 void ServerContext::SignalUpdatedPeers() | |
1180 { | |
1181 #if ORTHANC_ENABLE_PLUGINS == 1 | |
1182 if (HasPlugins()) | |
1183 { | |
1184 GetPlugins().SignalUpdatedPeers(); | |
1185 } | |
1186 #endif | |
1187 } | |
1188 | |
1189 | |
1190 IStorageCommitmentFactory::ILookupHandler* | |
1191 ServerContext::CreateStorageCommitment(const std::string& jobId, | |
1192 const std::string& transactionUid, | |
1193 const std::vector<std::string>& sopClassUids, | |
1194 const std::vector<std::string>& sopInstanceUids, | |
1195 const std::string& remoteAet, | |
1196 const std::string& calledAet) | |
1197 { | |
1198 #if ORTHANC_ENABLE_PLUGINS == 1 | |
1199 if (HasPlugins()) | |
1200 { | |
1201 return GetPlugins().CreateStorageCommitment( | |
1202 jobId, transactionUid, sopClassUids, sopInstanceUids, remoteAet, calledAet); | |
1203 } | |
1204 #endif | |
1205 | |
1206 return NULL; | |
1207 } | |
1208 | |
1209 | |
1210 ImageAccessor* ServerContext::DecodeDicomFrame(const std::string& publicId, | |
1211 unsigned int frameIndex) | |
1212 { | |
1213 if (builtinDecoderTranscoderOrder_ == BuiltinDecoderTranscoderOrder_Before) | |
1214 { | |
1215 // Use Orthanc's built-in decoder, using the cache to speed-up | |
1216 // things on multi-frame images | |
1217 ServerContext::DicomCacheLocker locker(*this, publicId); | |
1218 std::unique_ptr<ImageAccessor> decoded( | |
1219 DicomImageDecoder::Decode(locker.GetDicom(), frameIndex)); | |
1220 if (decoded.get() != NULL) | |
1221 { | |
1222 return decoded.release(); | |
1223 } | |
1224 } | |
1225 | |
1226 #if ORTHANC_ENABLE_PLUGINS == 1 | |
1227 if (HasPlugins() && | |
1228 GetPlugins().HasCustomImageDecoder()) | |
1229 { | |
1230 // TODO: Store the raw buffer in the DicomCacheLocker | |
1231 std::string dicomContent; | |
1232 ReadDicom(dicomContent, publicId); | |
1233 std::unique_ptr<ImageAccessor> decoded( | |
1234 GetPlugins().Decode(dicomContent.c_str(), dicomContent.size(), frameIndex)); | |
1235 if (decoded.get() != NULL) | |
1236 { | |
1237 return decoded.release(); | |
1238 } | |
1239 else if (builtinDecoderTranscoderOrder_ == BuiltinDecoderTranscoderOrder_After) | |
1240 { | |
1241 LOG(INFO) << "The installed image decoding plugins cannot handle an image, " | |
1242 << "fallback to the built-in DCMTK decoder"; | |
1243 } | |
1244 } | |
1245 #endif | |
1246 | |
1247 if (builtinDecoderTranscoderOrder_ == BuiltinDecoderTranscoderOrder_After) | |
1248 { | |
1249 ServerContext::DicomCacheLocker locker(*this, publicId); | |
1250 return DicomImageDecoder::Decode(locker.GetDicom(), frameIndex); | |
1251 } | |
1252 else | |
1253 { | |
1254 return NULL; // Built-in decoder is disabled | |
1255 } | |
1256 } | |
1257 | |
1258 | |
1259 ImageAccessor* ServerContext::DecodeDicomFrame(const DicomInstanceToStore& dicom, | |
1260 unsigned int frameIndex) | |
1261 { | |
1262 if (builtinDecoderTranscoderOrder_ == BuiltinDecoderTranscoderOrder_Before) | |
1263 { | |
1264 std::unique_ptr<ImageAccessor> decoded( | |
1265 DicomImageDecoder::Decode(dicom.GetParsedDicomFile(), frameIndex)); | |
1266 if (decoded.get() != NULL) | |
1267 { | |
1268 return decoded.release(); | |
1269 } | |
1270 } | |
1271 | |
1272 #if ORTHANC_ENABLE_PLUGINS == 1 | |
1273 if (HasPlugins() && | |
1274 GetPlugins().HasCustomImageDecoder()) | |
1275 { | |
1276 std::unique_ptr<ImageAccessor> decoded( | |
1277 GetPlugins().Decode(dicom.GetBufferData(), dicom.GetBufferSize(), frameIndex)); | |
1278 if (decoded.get() != NULL) | |
1279 { | |
1280 return decoded.release(); | |
1281 } | |
1282 else if (builtinDecoderTranscoderOrder_ == BuiltinDecoderTranscoderOrder_After) | |
1283 { | |
1284 LOG(INFO) << "The installed image decoding plugins cannot handle an image, " | |
1285 << "fallback to the built-in DCMTK decoder"; | |
1286 } | |
1287 } | |
1288 #endif | |
1289 | |
1290 if (builtinDecoderTranscoderOrder_ == BuiltinDecoderTranscoderOrder_After) | |
1291 { | |
1292 return DicomImageDecoder::Decode(dicom.GetParsedDicomFile(), frameIndex); | |
1293 } | |
1294 else | |
1295 { | |
1296 return NULL; | |
1297 } | |
1298 } | |
1299 | |
1300 | |
1301 ImageAccessor* ServerContext::DecodeDicomFrame(const void* dicom, | |
1302 size_t size, | |
1303 unsigned int frameIndex) | |
1304 { | |
1305 DicomInstanceToStore instance; | |
1306 instance.SetBuffer(dicom, size); | |
1307 return DecodeDicomFrame(instance, frameIndex); | |
1308 } | |
1309 | |
1310 | |
1311 void ServerContext::StoreWithTranscoding(std::string& sopClassUid, | |
1312 std::string& sopInstanceUid, | |
1313 DicomStoreUserConnection& connection, | |
1314 const std::string& dicom, | |
1315 bool hasMoveOriginator, | |
1316 const std::string& moveOriginatorAet, | |
1317 uint16_t moveOriginatorId) | |
1318 { | |
1319 const void* data = dicom.empty() ? NULL : dicom.c_str(); | |
1320 | |
1321 if (!transcodeDicomProtocol_ || | |
1322 !connection.GetParameters().GetRemoteModality().IsTranscodingAllowed()) | |
1323 { | |
1324 connection.Store(sopClassUid, sopInstanceUid, data, dicom.size(), | |
1325 hasMoveOriginator, moveOriginatorAet, moveOriginatorId); | |
1326 } | |
1327 else | |
1328 { | |
1329 connection.Transcode(sopClassUid, sopInstanceUid, *this, data, dicom.size(), | |
1330 hasMoveOriginator, moveOriginatorAet, moveOriginatorId); | |
1331 } | |
1332 } | |
1333 | |
1334 | |
1335 bool ServerContext::Transcode(DicomImage& target, | |
1336 DicomImage& source /* in, "GetParsed()" possibly modified */, | |
1337 const std::set<DicomTransferSyntax>& allowedSyntaxes, | |
1338 bool allowNewSopInstanceUid) | |
1339 { | |
1340 if (builtinDecoderTranscoderOrder_ == BuiltinDecoderTranscoderOrder_Before) | |
1341 { | |
1342 if (dcmtkTranscoder_->Transcode(target, source, allowedSyntaxes, allowNewSopInstanceUid)) | |
1343 { | |
1344 return true; | |
1345 } | |
1346 } | |
1347 | |
1348 #if ORTHANC_ENABLE_PLUGINS == 1 | |
1349 if (HasPlugins() && | |
1350 GetPlugins().HasCustomTranscoder()) | |
1351 { | |
1352 if (GetPlugins().Transcode(target, source, allowedSyntaxes, allowNewSopInstanceUid)) | |
1353 { | |
1354 return true; | |
1355 } | |
1356 else if (builtinDecoderTranscoderOrder_ == BuiltinDecoderTranscoderOrder_After) | |
1357 { | |
1358 LOG(INFO) << "The installed transcoding plugins cannot handle an image, " | |
1359 << "fallback to the built-in DCMTK transcoder"; | |
1360 } | |
1361 } | |
1362 #endif | |
1363 | |
1364 if (builtinDecoderTranscoderOrder_ == BuiltinDecoderTranscoderOrder_After) | |
1365 { | |
1366 return dcmtkTranscoder_->Transcode(target, source, allowedSyntaxes, allowNewSopInstanceUid); | |
1367 } | |
1368 else | |
1369 { | |
1370 return false; | |
1371 } | |
1372 } | |
1373 } |