Mercurial > hg > orthanc
annotate OrthancServer/Scheduler/ServerScheduler.cpp @ 783:c9cdd53a6b31 lua-scripting
main scheduler added to the server context
author | Sebastien Jodogne <s.jodogne@gmail.com> |
---|---|
date | Wed, 30 Apr 2014 18:36:20 +0200 |
parents | f0ac3a53ccf2 |
children | 8c67382f44a7 |
rev | line source |
---|---|
781 | 1 /** |
2 * Orthanc - A Lightweight, RESTful DICOM Store | |
3 * Copyright (C) 2012-2014 Medical Physics Department, CHU of Liege, | |
4 * Belgium | |
5 * | |
6 * This program is free software: you can redistribute it and/or | |
7 * modify it under the terms of the GNU General Public License as | |
8 * published by the Free Software Foundation, either version 3 of the | |
9 * License, or (at your option) any later version. | |
10 * | |
11 * In addition, as a special exception, the copyright holders of this | |
12 * program give permission to link the code of its release with the | |
13 * OpenSSL project's "OpenSSL" library (or with modified versions of it | |
14 * that use the same license as the "OpenSSL" library), and distribute | |
15 * the linked executables. You must obey the GNU General Public License | |
16 * in all respects for all of the code used other than "OpenSSL". If you | |
17 * modify file(s) with this exception, you may extend this exception to | |
18 * your version of the file(s), but you are not obligated to do so. If | |
19 * you do not wish to do so, delete this exception statement from your | |
20 * version. If you delete this exception statement from all source files | |
21 * in the program, then also delete it here. | |
22 * | |
23 * This program is distributed in the hope that it will be useful, but | |
24 * WITHOUT ANY WARRANTY; without even the implied warranty of | |
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
26 * General Public License for more details. | |
27 * | |
28 * You should have received a copy of the GNU General Public License | |
29 * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
30 **/ | |
31 | |
32 | |
33 #include "ServerScheduler.h" | |
34 | |
35 #include "../../Core/OrthancException.h" | |
36 | |
37 #include <glog/logging.h> | |
38 | |
39 namespace Orthanc | |
40 { | |
41 namespace | |
42 { | |
43 // Anonymous namespace to avoid clashes between compilation modules | |
44 class Sink : public IServerFilter | |
45 { | |
46 private: | |
47 ListOfStrings& target_; | |
48 | |
49 public: | |
50 Sink(ListOfStrings& target) : target_(target) | |
51 { | |
52 } | |
53 | |
54 virtual bool SendOutputsToSink() const | |
55 { | |
56 return false; | |
57 } | |
58 | |
59 virtual bool Apply(ListOfStrings& outputs, | |
60 const ListOfStrings& inputs) | |
61 { | |
62 for (ListOfStrings::const_iterator | |
63 it = inputs.begin(); it != inputs.end(); it++) | |
64 { | |
65 target_.push_back(*it); | |
66 } | |
67 | |
68 return true; | |
69 } | |
70 }; | |
71 } | |
72 | |
73 | |
74 ServerScheduler::JobInfo& ServerScheduler::GetJobInfo(const std::string& jobId) | |
75 { | |
76 Jobs::iterator info = jobs_.find(jobId); | |
77 | |
78 if (info == jobs_.end()) | |
79 { | |
80 throw OrthancException(ErrorCode_InternalError); | |
81 } | |
82 | |
83 return info->second; | |
84 } | |
85 | |
86 | |
87 void ServerScheduler::SignalSuccess(const std::string& jobId) | |
88 { | |
89 boost::mutex::scoped_lock lock(mutex_); | |
90 | |
91 JobInfo& info = GetJobInfo(jobId); | |
92 info.success_++; | |
93 | |
94 assert(info.failures_ == 0); | |
95 | |
96 if (info.success_ >= info.size_) | |
97 { | |
98 if (info.watched_) | |
99 { | |
100 watchedJobStatus_[jobId] = JobStatus_Success; | |
101 jobFinished_.notify_all(); | |
102 } | |
103 | |
104 LOG(INFO) << "Job successfully finished (" << info.description_ << ")"; | |
105 jobs_.erase(jobId); | |
106 } | |
107 } | |
108 | |
109 | |
110 void ServerScheduler::SignalFailure(const std::string& jobId) | |
111 { | |
112 boost::mutex::scoped_lock lock(mutex_); | |
113 | |
114 JobInfo& info = GetJobInfo(jobId); | |
115 info.failures_++; | |
116 | |
117 if (info.success_ + info.failures_ >= info.size_) | |
118 { | |
119 if (info.watched_) | |
120 { | |
121 watchedJobStatus_[jobId] = JobStatus_Failure; | |
122 jobFinished_.notify_all(); | |
123 } | |
124 | |
125 LOG(ERROR) << "Job has failed (" << info.description_ << ")"; | |
126 jobs_.erase(jobId); | |
127 } | |
128 } | |
129 | |
130 | |
131 void ServerScheduler::Worker(ServerScheduler* that) | |
132 { | |
133 static const int32_t TIMEOUT = 100; | |
134 | |
783
c9cdd53a6b31
main scheduler added to the server context
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
781
diff
changeset
|
135 LOG(WARNING) << "The server scheduler has started"; |
c9cdd53a6b31
main scheduler added to the server context
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
781
diff
changeset
|
136 |
781 | 137 while (!that->finish_) |
138 { | |
139 std::auto_ptr<IDynamicObject> object(that->queue_.Dequeue(TIMEOUT)); | |
140 if (object.get() != NULL) | |
141 { | |
142 ServerFilterInstance& filter = dynamic_cast<ServerFilterInstance&>(*object); | |
143 | |
144 // Skip the execution of this filter if its parent job has | |
145 // previously failed. | |
146 bool jobHasFailed; | |
147 { | |
148 boost::mutex::scoped_lock lock(that->mutex_); | |
149 JobInfo& info = that->GetJobInfo(filter.GetJobId()); | |
150 jobHasFailed = (info.failures_ > 0 || info.cancel_); | |
151 } | |
152 | |
153 if (jobHasFailed) | |
154 { | |
155 that->SignalFailure(filter.GetJobId()); | |
156 } | |
157 else | |
158 { | |
159 filter.Execute(*that); | |
160 } | |
161 } | |
162 } | |
163 } | |
164 | |
165 | |
166 void ServerScheduler::SubmitInternal(ServerJob& job, | |
167 bool watched) | |
168 { | |
169 boost::mutex::scoped_lock lock(mutex_); | |
170 | |
171 JobInfo info; | |
172 info.size_ = job.Submit(queue_, *this); | |
173 info.cancel_ = false; | |
174 info.success_ = 0; | |
175 info.failures_ = 0; | |
176 info.description_ = job.GetDescription(); | |
177 info.watched_ = watched; | |
178 | |
179 assert(info.size_ > 0); | |
180 | |
181 if (watched) | |
182 { | |
183 watchedJobStatus_[job.GetId()] = JobStatus_Running; | |
184 } | |
185 | |
186 jobs_[job.GetId()] = info; | |
187 | |
188 LOG(INFO) << "New job submitted (" << job.description_ << ")"; | |
189 } | |
190 | |
191 | |
192 ServerScheduler::ServerScheduler() | |
193 { | |
194 finish_ = false; | |
195 worker_ = boost::thread(Worker, this); | |
196 } | |
197 | |
198 | |
199 ServerScheduler::~ServerScheduler() | |
200 { | |
201 finish_ = true; | |
202 worker_.join(); | |
203 } | |
204 | |
205 | |
206 void ServerScheduler::Submit(ServerJob& job) | |
207 { | |
208 if (job.filters_.empty()) | |
209 { | |
210 return; | |
211 } | |
212 | |
213 SubmitInternal(job, false); | |
214 } | |
215 | |
216 | |
217 bool ServerScheduler::SubmitAndWait(ListOfStrings& outputs, | |
218 ServerJob& job) | |
219 { | |
220 std::string jobId = job.GetId(); | |
221 | |
222 outputs.clear(); | |
223 | |
224 if (job.filters_.empty()) | |
225 { | |
226 return true; | |
227 } | |
228 | |
229 // Add a sink filter to collect all the results of the filters | |
230 // that have no next filter. | |
231 ServerFilterInstance& sink = job.AddFilter(new Sink(outputs)); | |
232 | |
233 for (std::list<ServerFilterInstance*>::iterator | |
234 it = job.filters_.begin(); it != job.filters_.end(); it++) | |
235 { | |
236 if ((*it) != &sink && | |
237 (*it)->GetNextFilters().size() == 0 && | |
238 (*it)->GetFilter().SendOutputsToSink()) | |
239 { | |
240 (*it)->ConnectNext(sink); | |
241 } | |
242 } | |
243 | |
244 // Submit the job | |
245 SubmitInternal(job, true); | |
246 | |
247 // Wait for the job to complete (either success or failure) | |
248 JobStatus status; | |
249 | |
250 { | |
251 boost::mutex::scoped_lock lock(mutex_); | |
252 | |
253 assert(watchedJobStatus_.find(jobId) != watchedJobStatus_.end()); | |
254 | |
255 while (watchedJobStatus_[jobId] == JobStatus_Running) | |
256 { | |
257 jobFinished_.wait(lock); | |
258 } | |
259 | |
260 status = watchedJobStatus_[jobId]; | |
261 watchedJobStatus_.erase(jobId); | |
262 } | |
263 | |
264 return (status == JobStatus_Success); | |
265 } | |
266 | |
267 | |
268 bool ServerScheduler::IsRunning(const std::string& jobId) | |
269 { | |
270 boost::mutex::scoped_lock lock(mutex_); | |
271 return jobs_.find(jobId) != jobs_.end(); | |
272 } | |
273 | |
274 | |
275 void ServerScheduler::Cancel(const std::string& jobId) | |
276 { | |
277 boost::mutex::scoped_lock lock(mutex_); | |
278 | |
279 Jobs::iterator job = jobs_.find(jobId); | |
280 | |
281 if (job != jobs_.end()) | |
282 { | |
283 job->second.cancel_ = true; | |
284 LOG(WARNING) << "Canceling a job (" << job->second.description_ << ")"; | |
285 } | |
286 } | |
287 | |
288 | |
289 float ServerScheduler::GetProgress(const std::string& jobId) | |
290 { | |
291 boost::mutex::scoped_lock lock(mutex_); | |
292 | |
293 Jobs::iterator job = jobs_.find(jobId); | |
294 | |
295 if (job == jobs_.end() || | |
296 job->second.size_ == 0 /* should never happen */) | |
297 { | |
298 // This job is not running | |
299 return 1; | |
300 } | |
301 | |
302 if (job->second.failures_ != 0) | |
303 { | |
304 return 1; | |
305 } | |
306 | |
307 if (job->second.size_ == 1) | |
308 { | |
309 return job->second.success_; | |
310 } | |
311 | |
312 return (static_cast<float>(job->second.success_) / | |
313 static_cast<float>(job->second.size_ - 1)); | |
314 } | |
315 | |
316 | |
317 void ServerScheduler::GetListOfJobs(ListOfStrings& jobs) | |
318 { | |
319 boost::mutex::scoped_lock lock(mutex_); | |
320 | |
321 jobs.clear(); | |
322 | |
323 for (Jobs::const_iterator | |
324 it = jobs_.begin(); it != jobs_.end(); it++) | |
325 { | |
326 jobs.push_back(it->first); | |
327 } | |
328 } | |
329 } |