Mercurial > hg > orthanc
annotate OrthancServer/Scheduler/ServerScheduler.cpp @ 995:8c67382f44a7 lua-scripting
limit number of jobs in the scheduler
author | Sebastien Jodogne <s.jodogne@gmail.com> |
---|---|
date | Thu, 03 Jul 2014 15:58:53 +0200 |
parents | c9cdd53a6b31 |
children | db18c071fbd7 |
rev | line source |
---|---|
781 | 1 /** |
2 * Orthanc - A Lightweight, RESTful DICOM Store | |
3 * Copyright (C) 2012-2014 Medical Physics Department, CHU of Liege, | |
4 * Belgium | |
5 * | |
6 * This program is free software: you can redistribute it and/or | |
7 * modify it under the terms of the GNU General Public License as | |
8 * published by the Free Software Foundation, either version 3 of the | |
9 * License, or (at your option) any later version. | |
10 * | |
11 * In addition, as a special exception, the copyright holders of this | |
12 * program give permission to link the code of its release with the | |
13 * OpenSSL project's "OpenSSL" library (or with modified versions of it | |
14 * that use the same license as the "OpenSSL" library), and distribute | |
15 * the linked executables. You must obey the GNU General Public License | |
16 * in all respects for all of the code used other than "OpenSSL". If you | |
17 * modify file(s) with this exception, you may extend this exception to | |
18 * your version of the file(s), but you are not obligated to do so. If | |
19 * you do not wish to do so, delete this exception statement from your | |
20 * version. If you delete this exception statement from all source files | |
21 * in the program, then also delete it here. | |
22 * | |
23 * This program is distributed in the hope that it will be useful, but | |
24 * WITHOUT ANY WARRANTY; without even the implied warranty of | |
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
26 * General Public License for more details. | |
27 * | |
28 * You should have received a copy of the GNU General Public License | |
29 * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
30 **/ | |
31 | |
32 | |
33 #include "ServerScheduler.h" | |
34 | |
35 #include "../../Core/OrthancException.h" | |
36 | |
37 #include <glog/logging.h> | |
38 | |
39 namespace Orthanc | |
40 { | |
41 namespace | |
42 { | |
43 // Anonymous namespace to avoid clashes between compilation modules | |
44 class Sink : public IServerFilter | |
45 { | |
46 private: | |
47 ListOfStrings& target_; | |
48 | |
49 public: | |
50 Sink(ListOfStrings& target) : target_(target) | |
51 { | |
52 } | |
53 | |
54 virtual bool SendOutputsToSink() const | |
55 { | |
56 return false; | |
57 } | |
58 | |
59 virtual bool Apply(ListOfStrings& outputs, | |
60 const ListOfStrings& inputs) | |
61 { | |
62 for (ListOfStrings::const_iterator | |
63 it = inputs.begin(); it != inputs.end(); it++) | |
64 { | |
65 target_.push_back(*it); | |
66 } | |
67 | |
68 return true; | |
69 } | |
70 }; | |
71 } | |
72 | |
73 | |
74 ServerScheduler::JobInfo& ServerScheduler::GetJobInfo(const std::string& jobId) | |
75 { | |
76 Jobs::iterator info = jobs_.find(jobId); | |
77 | |
78 if (info == jobs_.end()) | |
79 { | |
80 throw OrthancException(ErrorCode_InternalError); | |
81 } | |
82 | |
83 return info->second; | |
84 } | |
85 | |
86 | |
87 void ServerScheduler::SignalSuccess(const std::string& jobId) | |
88 { | |
89 boost::mutex::scoped_lock lock(mutex_); | |
90 | |
91 JobInfo& info = GetJobInfo(jobId); | |
92 info.success_++; | |
93 | |
94 assert(info.failures_ == 0); | |
95 | |
96 if (info.success_ >= info.size_) | |
97 { | |
98 if (info.watched_) | |
99 { | |
100 watchedJobStatus_[jobId] = JobStatus_Success; | |
995
8c67382f44a7
limit number of jobs in the scheduler
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
783
diff
changeset
|
101 watchedJobFinished_.notify_all(); |
781 | 102 } |
103 | |
104 LOG(INFO) << "Job successfully finished (" << info.description_ << ")"; | |
105 jobs_.erase(jobId); | |
995
8c67382f44a7
limit number of jobs in the scheduler
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
783
diff
changeset
|
106 |
8c67382f44a7
limit number of jobs in the scheduler
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
783
diff
changeset
|
107 availableJob_.Release(); |
781 | 108 } |
109 } | |
110 | |
111 | |
112 void ServerScheduler::SignalFailure(const std::string& jobId) | |
113 { | |
114 boost::mutex::scoped_lock lock(mutex_); | |
115 | |
116 JobInfo& info = GetJobInfo(jobId); | |
117 info.failures_++; | |
118 | |
119 if (info.success_ + info.failures_ >= info.size_) | |
120 { | |
121 if (info.watched_) | |
122 { | |
123 watchedJobStatus_[jobId] = JobStatus_Failure; | |
995
8c67382f44a7
limit number of jobs in the scheduler
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
783
diff
changeset
|
124 watchedJobFinished_.notify_all(); |
781 | 125 } |
126 | |
127 LOG(ERROR) << "Job has failed (" << info.description_ << ")"; | |
128 jobs_.erase(jobId); | |
995
8c67382f44a7
limit number of jobs in the scheduler
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
783
diff
changeset
|
129 |
8c67382f44a7
limit number of jobs in the scheduler
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
783
diff
changeset
|
130 availableJob_.Release(); |
781 | 131 } |
132 } | |
133 | |
134 | |
135 void ServerScheduler::Worker(ServerScheduler* that) | |
136 { | |
137 static const int32_t TIMEOUT = 100; | |
138 | |
783
c9cdd53a6b31
main scheduler added to the server context
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
781
diff
changeset
|
139 LOG(WARNING) << "The server scheduler has started"; |
c9cdd53a6b31
main scheduler added to the server context
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
781
diff
changeset
|
140 |
781 | 141 while (!that->finish_) |
142 { | |
143 std::auto_ptr<IDynamicObject> object(that->queue_.Dequeue(TIMEOUT)); | |
144 if (object.get() != NULL) | |
145 { | |
146 ServerFilterInstance& filter = dynamic_cast<ServerFilterInstance&>(*object); | |
147 | |
148 // Skip the execution of this filter if its parent job has | |
149 // previously failed. | |
150 bool jobHasFailed; | |
151 { | |
152 boost::mutex::scoped_lock lock(that->mutex_); | |
153 JobInfo& info = that->GetJobInfo(filter.GetJobId()); | |
154 jobHasFailed = (info.failures_ > 0 || info.cancel_); | |
155 } | |
156 | |
157 if (jobHasFailed) | |
158 { | |
159 that->SignalFailure(filter.GetJobId()); | |
160 } | |
161 else | |
162 { | |
163 filter.Execute(*that); | |
164 } | |
165 } | |
166 } | |
167 } | |
168 | |
169 | |
170 void ServerScheduler::SubmitInternal(ServerJob& job, | |
171 bool watched) | |
172 { | |
995
8c67382f44a7
limit number of jobs in the scheduler
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
783
diff
changeset
|
173 availableJob_.Acquire(); |
8c67382f44a7
limit number of jobs in the scheduler
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
783
diff
changeset
|
174 |
781 | 175 boost::mutex::scoped_lock lock(mutex_); |
176 | |
177 JobInfo info; | |
178 info.size_ = job.Submit(queue_, *this); | |
179 info.cancel_ = false; | |
180 info.success_ = 0; | |
181 info.failures_ = 0; | |
182 info.description_ = job.GetDescription(); | |
183 info.watched_ = watched; | |
184 | |
185 assert(info.size_ > 0); | |
186 | |
187 if (watched) | |
188 { | |
189 watchedJobStatus_[job.GetId()] = JobStatus_Running; | |
190 } | |
191 | |
192 jobs_[job.GetId()] = info; | |
193 | |
194 LOG(INFO) << "New job submitted (" << job.description_ << ")"; | |
195 } | |
196 | |
197 | |
995
8c67382f44a7
limit number of jobs in the scheduler
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
783
diff
changeset
|
198 ServerScheduler::ServerScheduler(unsigned int maxJobs) : availableJob_(maxJobs) |
781 | 199 { |
200 finish_ = false; | |
201 worker_ = boost::thread(Worker, this); | |
202 } | |
203 | |
204 | |
205 ServerScheduler::~ServerScheduler() | |
206 { | |
207 finish_ = true; | |
208 worker_.join(); | |
209 } | |
210 | |
211 | |
212 void ServerScheduler::Submit(ServerJob& job) | |
213 { | |
214 if (job.filters_.empty()) | |
215 { | |
216 return; | |
217 } | |
218 | |
219 SubmitInternal(job, false); | |
220 } | |
221 | |
222 | |
223 bool ServerScheduler::SubmitAndWait(ListOfStrings& outputs, | |
224 ServerJob& job) | |
225 { | |
226 std::string jobId = job.GetId(); | |
227 | |
228 outputs.clear(); | |
229 | |
230 if (job.filters_.empty()) | |
231 { | |
232 return true; | |
233 } | |
234 | |
235 // Add a sink filter to collect all the results of the filters | |
236 // that have no next filter. | |
237 ServerFilterInstance& sink = job.AddFilter(new Sink(outputs)); | |
238 | |
239 for (std::list<ServerFilterInstance*>::iterator | |
240 it = job.filters_.begin(); it != job.filters_.end(); it++) | |
241 { | |
242 if ((*it) != &sink && | |
243 (*it)->GetNextFilters().size() == 0 && | |
244 (*it)->GetFilter().SendOutputsToSink()) | |
245 { | |
246 (*it)->ConnectNext(sink); | |
247 } | |
248 } | |
249 | |
250 // Submit the job | |
251 SubmitInternal(job, true); | |
252 | |
253 // Wait for the job to complete (either success or failure) | |
254 JobStatus status; | |
255 | |
256 { | |
257 boost::mutex::scoped_lock lock(mutex_); | |
258 | |
259 assert(watchedJobStatus_.find(jobId) != watchedJobStatus_.end()); | |
260 | |
261 while (watchedJobStatus_[jobId] == JobStatus_Running) | |
262 { | |
995
8c67382f44a7
limit number of jobs in the scheduler
Sebastien Jodogne <s.jodogne@gmail.com>
parents:
783
diff
changeset
|
263 watchedJobFinished_.wait(lock); |
781 | 264 } |
265 | |
266 status = watchedJobStatus_[jobId]; | |
267 watchedJobStatus_.erase(jobId); | |
268 } | |
269 | |
270 return (status == JobStatus_Success); | |
271 } | |
272 | |
273 | |
274 bool ServerScheduler::IsRunning(const std::string& jobId) | |
275 { | |
276 boost::mutex::scoped_lock lock(mutex_); | |
277 return jobs_.find(jobId) != jobs_.end(); | |
278 } | |
279 | |
280 | |
281 void ServerScheduler::Cancel(const std::string& jobId) | |
282 { | |
283 boost::mutex::scoped_lock lock(mutex_); | |
284 | |
285 Jobs::iterator job = jobs_.find(jobId); | |
286 | |
287 if (job != jobs_.end()) | |
288 { | |
289 job->second.cancel_ = true; | |
290 LOG(WARNING) << "Canceling a job (" << job->second.description_ << ")"; | |
291 } | |
292 } | |
293 | |
294 | |
295 float ServerScheduler::GetProgress(const std::string& jobId) | |
296 { | |
297 boost::mutex::scoped_lock lock(mutex_); | |
298 | |
299 Jobs::iterator job = jobs_.find(jobId); | |
300 | |
301 if (job == jobs_.end() || | |
302 job->second.size_ == 0 /* should never happen */) | |
303 { | |
304 // This job is not running | |
305 return 1; | |
306 } | |
307 | |
308 if (job->second.failures_ != 0) | |
309 { | |
310 return 1; | |
311 } | |
312 | |
313 if (job->second.size_ == 1) | |
314 { | |
315 return job->second.success_; | |
316 } | |
317 | |
318 return (static_cast<float>(job->second.success_) / | |
319 static_cast<float>(job->second.size_ - 1)); | |
320 } | |
321 | |
322 | |
323 void ServerScheduler::GetListOfJobs(ListOfStrings& jobs) | |
324 { | |
325 boost::mutex::scoped_lock lock(mutex_); | |
326 | |
327 jobs.clear(); | |
328 | |
329 for (Jobs::const_iterator | |
330 it = jobs_.begin(); it != jobs_.end(); it++) | |
331 { | |
332 jobs.push_back(it->first); | |
333 } | |
334 } | |
335 } |