723
|
1 #include "gtest/gtest.h"
|
765
|
2 #include <glog/logging.h>
|
723
|
3
|
|
4 #include "../Core/OrthancException.h"
|
|
5 #include "../Core/Toolbox.h"
|
|
6 #include "../Core/MultiThreading/ArrayFilledByThreads.h"
|
760
|
7 #include "../Core/MultiThreading/Locker.h"
|
|
8 #include "../Core/MultiThreading/Mutex.h"
|
|
9 #include "../Core/MultiThreading/ReaderWriterLock.h"
|
723
|
10 #include "../Core/MultiThreading/ThreadedCommandProcessor.h"
|
|
11
|
|
12 using namespace Orthanc;
|
|
13
|
|
14 namespace
|
|
15 {
|
|
16 class DynamicInteger : public ICommand
|
|
17 {
|
|
18 private:
|
|
19 int value_;
|
|
20 std::set<int>& target_;
|
|
21
|
|
22 public:
|
|
23 DynamicInteger(int value, std::set<int>& target) :
|
|
24 value_(value), target_(target)
|
|
25 {
|
|
26 }
|
|
27
|
|
28 int GetValue() const
|
|
29 {
|
|
30 return value_;
|
|
31 }
|
|
32
|
|
33 virtual bool Execute()
|
|
34 {
|
|
35 static boost::mutex mutex;
|
|
36 boost::mutex::scoped_lock lock(mutex);
|
|
37 target_.insert(value_);
|
|
38 return true;
|
|
39 }
|
|
40 };
|
|
41
|
|
42 class MyFiller : public ArrayFilledByThreads::IFiller
|
|
43 {
|
|
44 private:
|
|
45 int size_;
|
|
46 unsigned int created_;
|
|
47 std::set<int> set_;
|
|
48
|
|
49 public:
|
|
50 MyFiller(int size) : size_(size), created_(0)
|
|
51 {
|
|
52 }
|
|
53
|
|
54 virtual size_t GetFillerSize()
|
|
55 {
|
|
56 return size_;
|
|
57 }
|
|
58
|
|
59 virtual IDynamicObject* GetFillerItem(size_t index)
|
|
60 {
|
|
61 static boost::mutex mutex;
|
|
62 boost::mutex::scoped_lock lock(mutex);
|
|
63 created_++;
|
|
64 return new DynamicInteger(index * 2, set_);
|
|
65 }
|
|
66
|
|
67 unsigned int GetCreatedCount() const
|
|
68 {
|
|
69 return created_;
|
|
70 }
|
|
71
|
|
72 std::set<int> GetSet()
|
|
73 {
|
|
74 return set_;
|
|
75 }
|
|
76 };
|
|
77 }
|
|
78
|
|
79
|
|
80
|
|
81
|
|
82 TEST(MultiThreading, SharedMessageQueueBasic)
|
|
83 {
|
|
84 std::set<int> s;
|
|
85
|
|
86 SharedMessageQueue q;
|
|
87 ASSERT_TRUE(q.WaitEmpty(0));
|
|
88 q.Enqueue(new DynamicInteger(10, s));
|
|
89 ASSERT_FALSE(q.WaitEmpty(1));
|
|
90 q.Enqueue(new DynamicInteger(20, s));
|
|
91 q.Enqueue(new DynamicInteger(30, s));
|
|
92 q.Enqueue(new DynamicInteger(40, s));
|
|
93
|
|
94 std::auto_ptr<DynamicInteger> i;
|
|
95 i.reset(dynamic_cast<DynamicInteger*>(q.Dequeue(1))); ASSERT_EQ(10, i->GetValue());
|
|
96 i.reset(dynamic_cast<DynamicInteger*>(q.Dequeue(1))); ASSERT_EQ(20, i->GetValue());
|
|
97 i.reset(dynamic_cast<DynamicInteger*>(q.Dequeue(1))); ASSERT_EQ(30, i->GetValue());
|
|
98 ASSERT_FALSE(q.WaitEmpty(1));
|
|
99 i.reset(dynamic_cast<DynamicInteger*>(q.Dequeue(1))); ASSERT_EQ(40, i->GetValue());
|
|
100 ASSERT_TRUE(q.WaitEmpty(0));
|
|
101 ASSERT_EQ(NULL, q.Dequeue(1));
|
|
102 }
|
|
103
|
|
104
|
|
105 TEST(MultiThreading, SharedMessageQueueClean)
|
|
106 {
|
|
107 std::set<int> s;
|
|
108
|
|
109 try
|
|
110 {
|
|
111 SharedMessageQueue q;
|
|
112 q.Enqueue(new DynamicInteger(10, s));
|
|
113 q.Enqueue(new DynamicInteger(20, s));
|
|
114 throw OrthancException("Nope");
|
|
115 }
|
|
116 catch (OrthancException&)
|
|
117 {
|
|
118 }
|
|
119 }
|
|
120
|
|
121
|
|
122 TEST(MultiThreading, ArrayFilledByThreadEmpty)
|
|
123 {
|
|
124 MyFiller f(0);
|
|
125 ArrayFilledByThreads a(f);
|
|
126 a.SetThreadCount(1);
|
|
127 ASSERT_EQ(0, a.GetSize());
|
|
128 }
|
|
129
|
|
130
|
|
131 TEST(MultiThreading, ArrayFilledByThread1)
|
|
132 {
|
|
133 MyFiller f(100);
|
|
134 ArrayFilledByThreads a(f);
|
|
135 a.SetThreadCount(1);
|
|
136 ASSERT_EQ(100, a.GetSize());
|
|
137 for (size_t i = 0; i < a.GetSize(); i++)
|
|
138 {
|
|
139 ASSERT_EQ(2 * i, dynamic_cast<DynamicInteger&>(a.GetItem(i)).GetValue());
|
|
140 }
|
|
141 }
|
|
142
|
|
143
|
|
144 TEST(MultiThreading, ArrayFilledByThread4)
|
|
145 {
|
|
146 MyFiller f(100);
|
|
147 ArrayFilledByThreads a(f);
|
|
148 a.SetThreadCount(4);
|
|
149 ASSERT_EQ(100, a.GetSize());
|
|
150 for (size_t i = 0; i < a.GetSize(); i++)
|
|
151 {
|
|
152 ASSERT_EQ(2 * i, dynamic_cast<DynamicInteger&>(a.GetItem(i)).GetValue());
|
|
153 }
|
|
154
|
|
155 ASSERT_EQ(100u, f.GetCreatedCount());
|
|
156
|
|
157 a.Invalidate();
|
|
158
|
|
159 ASSERT_EQ(100, a.GetSize());
|
|
160 ASSERT_EQ(200u, f.GetCreatedCount());
|
|
161 ASSERT_EQ(4u, a.GetThreadCount());
|
|
162 ASSERT_TRUE(f.GetSet().empty());
|
|
163
|
|
164 for (size_t i = 0; i < a.GetSize(); i++)
|
|
165 {
|
|
166 ASSERT_EQ(2 * i, dynamic_cast<DynamicInteger&>(a.GetItem(i)).GetValue());
|
|
167 }
|
|
168 }
|
|
169
|
|
170
|
|
171 TEST(MultiThreading, CommandProcessor)
|
|
172 {
|
|
173 ThreadedCommandProcessor p(4);
|
|
174
|
|
175 std::set<int> s;
|
|
176
|
|
177 for (size_t i = 0; i < 100; i++)
|
|
178 {
|
|
179 p.Post(new DynamicInteger(i * 2, s));
|
|
180 }
|
|
181
|
|
182 p.Join();
|
|
183
|
|
184 for (size_t i = 0; i < 200; i++)
|
|
185 {
|
|
186 if (i % 2)
|
|
187 ASSERT_TRUE(s.find(i) == s.end());
|
|
188 else
|
|
189 ASSERT_TRUE(s.find(i) != s.end());
|
|
190 }
|
|
191 }
|
760
|
192
|
|
193
|
|
194 TEST(MultiThreading, Mutex)
|
|
195 {
|
|
196 Mutex mutex;
|
|
197 Locker locker(mutex);
|
|
198 }
|
|
199
|
|
200
|
|
201 TEST(MultiThreading, ReaderWriterLock)
|
|
202 {
|
|
203 ReaderWriterLock lock;
|
|
204
|
|
205 {
|
|
206 Locker locker1(lock.ForReader());
|
|
207 Locker locker2(lock.ForReader());
|
|
208 }
|
|
209
|
|
210 {
|
|
211 Locker locker3(lock.ForWriter());
|
|
212 }
|
|
213 }
|
765
|
214
|
|
215
|
|
216
|
|
217
|
|
218
|
|
219
|
|
220
|
|
221 #include "../Core/ICommand.h"
|
|
222 #include "../Core/Toolbox.h"
|
|
223 #include "../Core/Uuid.h"
|
|
224 #include "../Core/MultiThreading/SharedMessageQueue.h"
|
|
225 #include <boost/lexical_cast.hpp>
|
|
226
|
|
227
|
|
228 namespace Orthanc
|
|
229 {
|
|
230 typedef std::list<std::string> ListOfStrings;
|
|
231
|
|
232 class IServerFilter
|
|
233 {
|
|
234 public:
|
|
235 virtual ~IServerFilter()
|
|
236 {
|
|
237 }
|
|
238
|
|
239 virtual bool Apply(ListOfStrings& outputs,
|
|
240 const ListOfStrings& inputs) = 0;
|
|
241 };
|
|
242
|
|
243
|
|
244 class Sink : public IServerFilter
|
|
245 {
|
|
246 private:
|
|
247 ListOfStrings& target_;
|
|
248
|
|
249 public:
|
|
250 Sink(ListOfStrings& target) : target_(target)
|
|
251 {
|
|
252 }
|
|
253
|
|
254 virtual bool Apply(ListOfStrings& outputs,
|
|
255 const ListOfStrings& inputs)
|
|
256 {
|
|
257 for (ListOfStrings::const_iterator
|
|
258 it = inputs.begin(); it != inputs.end(); it++)
|
|
259 {
|
|
260 target_.push_back(*it);
|
|
261 }
|
|
262
|
|
263 return true;
|
|
264 }
|
|
265 };
|
|
266
|
|
267
|
|
268
|
|
269 class IServerFilterListener
|
|
270 {
|
|
271 public:
|
|
272 virtual ~IServerFilterListener()
|
|
273 {
|
|
274 }
|
|
275
|
|
276 virtual void SignalSuccess(const std::string& jobId) = 0;
|
|
277
|
|
278 virtual void SignalFailure(const std::string& jobId) = 0;
|
|
279 };
|
|
280
|
|
281
|
|
282 class FilterWrapper : public IDynamicObject
|
|
283 {
|
768
|
284 friend class ServerScheduler;
|
|
285
|
765
|
286 private:
|
|
287 IServerFilter *filter_;
|
|
288 std::string jobId_;
|
|
289 ListOfStrings inputs_;
|
|
290 std::list<FilterWrapper*> next_;
|
|
291
|
768
|
292 bool Execute(IServerFilterListener& listener)
|
|
293 {
|
|
294 ListOfStrings outputs;
|
|
295 if (!filter_->Apply(outputs, inputs_))
|
|
296 {
|
|
297 listener.SignalFailure(jobId_);
|
|
298 return true;
|
|
299 }
|
|
300
|
|
301 for (std::list<FilterWrapper*>::iterator
|
|
302 it = next_.begin(); it != next_.end(); it++)
|
|
303 {
|
|
304 for (ListOfStrings::const_iterator
|
|
305 output = outputs.begin(); output != outputs.end(); output++)
|
|
306 {
|
|
307 (*it)->AddInput(*output);
|
|
308 }
|
|
309 }
|
|
310
|
|
311 listener.SignalSuccess(jobId_);
|
|
312 return true;
|
|
313 }
|
|
314
|
|
315
|
765
|
316 public:
|
|
317 FilterWrapper(IServerFilter *filter,
|
|
318 const std::string& jobId) :
|
|
319 filter_(filter),
|
|
320 jobId_(jobId)
|
|
321 {
|
|
322 if (filter_ == NULL)
|
|
323 {
|
|
324 throw OrthancException(ErrorCode_ParameterOutOfRange);
|
|
325 }
|
|
326 }
|
|
327
|
|
328 virtual ~FilterWrapper()
|
|
329 {
|
|
330 if (filter_ != NULL)
|
|
331 {
|
|
332 delete filter_;
|
|
333 }
|
|
334 }
|
|
335
|
|
336 const std::string& GetJobId() const
|
|
337 {
|
|
338 return jobId_;
|
|
339 }
|
|
340
|
|
341 void AddInput(const std::string& input)
|
|
342 {
|
|
343 inputs_.push_back(input);
|
|
344 }
|
|
345
|
|
346 void ConnectNext(FilterWrapper& filter)
|
|
347 {
|
|
348 next_.push_back(&filter);
|
|
349 }
|
|
350
|
|
351 const std::list<FilterWrapper*>& GetNextFilters() const
|
|
352 {
|
|
353 return next_;
|
|
354 }
|
|
355 };
|
|
356
|
|
357
|
|
358 class ServerJob
|
|
359 {
|
|
360 friend class ServerScheduler;
|
|
361
|
|
362 private:
|
|
363 std::list<FilterWrapper*> filters_;
|
|
364 std::string jobId_;
|
|
365 bool submitted_;
|
|
366 std::string description_;
|
|
367
|
|
368
|
|
369 void CheckOrdering()
|
|
370 {
|
|
371 std::map<FilterWrapper*, unsigned int> index;
|
|
372
|
|
373 unsigned int count = 0;
|
|
374 for (std::list<FilterWrapper*>::const_iterator
|
|
375 it = filters_.begin(); it != filters_.end(); it++)
|
|
376 {
|
|
377 index[*it] = count++;
|
|
378 }
|
|
379
|
|
380 for (std::list<FilterWrapper*>::const_iterator
|
|
381 it = filters_.begin(); it != filters_.end(); it++)
|
|
382 {
|
|
383 const std::list<FilterWrapper*>& nextFilters = (*it)->GetNextFilters();
|
|
384
|
|
385 for (std::list<FilterWrapper*>::const_iterator
|
|
386 next = nextFilters.begin(); next != nextFilters.end(); next++)
|
|
387 {
|
|
388 if (index.find(*next) == index.end() ||
|
|
389 index[*next] <= index[*it])
|
|
390 {
|
|
391 // You must reorder your calls to "ServerJob::AddFilter"
|
|
392 throw OrthancException("Bad ordering of filters in a job");
|
|
393 }
|
|
394 }
|
|
395 }
|
|
396 }
|
|
397
|
|
398
|
|
399 size_t Submit(SharedMessageQueue& target,
|
|
400 IServerFilterListener& listener)
|
|
401 {
|
|
402 if (submitted_)
|
|
403 {
|
|
404 // This job has already been submitted
|
|
405 throw OrthancException(ErrorCode_BadSequenceOfCalls);
|
|
406 }
|
|
407
|
|
408 CheckOrdering();
|
|
409
|
|
410 size_t size = filters_.size();
|
|
411
|
|
412 for (std::list<FilterWrapper*>::iterator
|
|
413 it = filters_.begin(); it != filters_.end(); it++)
|
|
414 {
|
|
415 target.Enqueue(*it);
|
|
416 }
|
|
417
|
|
418 filters_.clear();
|
|
419 submitted_ = true;
|
|
420
|
|
421 return size;
|
|
422 }
|
|
423
|
|
424 public:
|
|
425 ServerJob()
|
|
426 {
|
|
427 jobId_ = Toolbox::GenerateUuid();
|
|
428 submitted_ = false;
|
|
429 description_ = "no description";
|
|
430 }
|
|
431
|
|
432 ~ServerJob()
|
|
433 {
|
|
434 for (std::list<FilterWrapper*>::iterator
|
|
435 it = filters_.begin(); it != filters_.end(); it++)
|
|
436 {
|
|
437 delete *it;
|
|
438 }
|
|
439 }
|
|
440
|
|
441 const std::string& GetId() const
|
|
442 {
|
|
443 return jobId_;
|
|
444 }
|
|
445
|
|
446 void SetDescription(const char* description)
|
|
447 {
|
|
448 description_ = description;
|
|
449 }
|
|
450
|
|
451 const std::string& GetDescription() const
|
|
452 {
|
|
453 return description_;
|
|
454 }
|
|
455
|
|
456 FilterWrapper& AddFilter(IServerFilter* filter)
|
|
457 {
|
|
458 if (submitted_)
|
|
459 {
|
|
460 throw OrthancException(ErrorCode_BadSequenceOfCalls);
|
|
461 }
|
|
462
|
|
463 filters_.push_back(new FilterWrapper(filter, jobId_));
|
|
464
|
|
465 return *filters_.back();
|
|
466 }
|
|
467 };
|
|
468
|
|
469
|
|
470 class ServerScheduler : public IServerFilterListener
|
|
471 {
|
|
472 private:
|
|
473 struct JobInfo
|
|
474 {
|
|
475 bool watched_;
|
|
476 bool cancel_;
|
|
477 size_t size_;
|
|
478 size_t success_;
|
|
479 size_t failures_;
|
|
480 std::string description_;
|
|
481 };
|
|
482
|
768
|
483 enum JobStatus
|
|
484 {
|
|
485 JobStatus_Running = 1,
|
|
486 JobStatus_Success = 2,
|
|
487 JobStatus_Failure = 3
|
|
488 };
|
|
489
|
765
|
490 typedef std::map<std::string, JobInfo> Jobs;
|
|
491
|
|
492 boost::mutex mutex_;
|
|
493 boost::condition_variable jobFinished_;
|
|
494 Jobs jobs_;
|
|
495 SharedMessageQueue queue_;
|
|
496 bool finish_;
|
|
497 boost::thread worker_;
|
768
|
498 std::map<std::string, JobStatus> watchedJobStatus_;
|
765
|
499
|
|
500 JobInfo& GetJobInfo(const std::string& jobId)
|
|
501 {
|
|
502 Jobs::iterator info = jobs_.find(jobId);
|
|
503
|
|
504 if (info == jobs_.end())
|
|
505 {
|
|
506 throw OrthancException(ErrorCode_InternalError);
|
|
507 }
|
|
508
|
|
509 return info->second;
|
|
510 }
|
|
511
|
|
512 virtual void SignalSuccess(const std::string& jobId)
|
|
513 {
|
|
514 boost::mutex::scoped_lock lock(mutex_);
|
|
515
|
|
516 JobInfo& info = GetJobInfo(jobId);
|
|
517 info.success_++;
|
|
518
|
|
519 assert(info.failures_ == 0);
|
|
520
|
|
521 if (info.success_ >= info.size_)
|
|
522 {
|
|
523 if (info.watched_)
|
|
524 {
|
768
|
525 watchedJobStatus_[jobId] = JobStatus_Success;
|
765
|
526 jobFinished_.notify_all();
|
|
527 }
|
|
528
|
|
529 LOG(INFO) << "Job successfully finished (" << info.description_ << ")";
|
|
530 jobs_.erase(jobId);
|
|
531 }
|
|
532 }
|
|
533
|
|
534 virtual void SignalFailure(const std::string& jobId)
|
|
535 {
|
|
536 boost::mutex::scoped_lock lock(mutex_);
|
|
537
|
|
538 JobInfo& info = GetJobInfo(jobId);
|
|
539 info.failures_++;
|
|
540
|
|
541 if (info.success_ + info.failures_ >= info.size_)
|
|
542 {
|
|
543 if (info.watched_)
|
|
544 {
|
768
|
545 watchedJobStatus_[jobId] = JobStatus_Failure;
|
765
|
546 jobFinished_.notify_all();
|
|
547 }
|
|
548
|
|
549 LOG(ERROR) << "Job has failed (" << info.description_ << ")";
|
|
550 jobs_.erase(jobId);
|
|
551 }
|
|
552 }
|
|
553
|
|
554 static void Worker(ServerScheduler* that)
|
|
555 {
|
|
556 static const int32_t TIMEOUT = 100;
|
|
557
|
|
558 while (!that->finish_)
|
|
559 {
|
|
560 std::auto_ptr<IDynamicObject> object(that->queue_.Dequeue(TIMEOUT));
|
|
561 if (object.get() != NULL)
|
|
562 {
|
|
563 FilterWrapper& filter = dynamic_cast<FilterWrapper&>(*object);
|
|
564
|
|
565 // Skip the execution of this filter if its parent job has
|
|
566 // previously failed.
|
|
567 bool jobHasFailed;
|
|
568 {
|
|
569 boost::mutex::scoped_lock lock(that->mutex_);
|
|
570 JobInfo& info = that->GetJobInfo(filter.GetJobId());
|
|
571 jobHasFailed = (info.failures_ > 0 || info.cancel_);
|
|
572 }
|
|
573
|
|
574 if (jobHasFailed)
|
|
575 {
|
|
576 that->SignalFailure(filter.GetJobId());
|
|
577 }
|
|
578 else
|
|
579 {
|
768
|
580 filter.Execute(*that);
|
765
|
581 }
|
|
582 }
|
|
583 }
|
|
584 }
|
|
585
|
|
586 void SubmitInternal(ServerJob& job,
|
|
587 bool watched)
|
|
588 {
|
|
589 boost::mutex::scoped_lock lock(mutex_);
|
|
590
|
|
591 JobInfo info;
|
|
592 info.size_ = job.Submit(queue_, *this);
|
|
593 info.cancel_ = false;
|
|
594 info.success_ = 0;
|
|
595 info.failures_ = 0;
|
|
596 info.description_ = job.GetDescription();
|
|
597 info.watched_ = watched;
|
|
598
|
|
599 assert(info.size_ > 0);
|
|
600
|
|
601 if (watched)
|
|
602 {
|
768
|
603 watchedJobStatus_[job.GetId()] = JobStatus_Running;
|
765
|
604 }
|
|
605
|
|
606 jobs_[job.GetId()] = info;
|
|
607
|
|
608 LOG(INFO) << "New job submitted (" << job.description_ << ")";
|
|
609 }
|
|
610
|
|
611 public:
|
|
612 ServerScheduler()
|
|
613 {
|
|
614 finish_ = false;
|
|
615 worker_ = boost::thread(Worker, this);
|
|
616 }
|
|
617
|
|
618 ~ServerScheduler()
|
|
619 {
|
|
620 finish_ = true;
|
|
621 worker_.join();
|
|
622 }
|
|
623
|
|
624 void Submit(ServerJob& job)
|
|
625 {
|
|
626 if (job.filters_.empty())
|
|
627 {
|
|
628 return;
|
|
629 }
|
|
630
|
|
631 SubmitInternal(job, false);
|
|
632 }
|
|
633
|
|
634 bool SubmitAndWait(ListOfStrings& outputs,
|
|
635 ServerJob& job)
|
|
636 {
|
|
637 std::string jobId = job.GetId();
|
|
638
|
|
639 outputs.clear();
|
|
640
|
|
641 if (job.filters_.empty())
|
|
642 {
|
|
643 return true;
|
|
644 }
|
|
645
|
|
646 // Add a sink filter to collect all the results of the filters
|
|
647 // that have no next filter.
|
|
648 FilterWrapper& sink = job.AddFilter(new Sink(outputs));
|
|
649
|
|
650 for (std::list<FilterWrapper*>::iterator
|
|
651 it = job.filters_.begin(); it != job.filters_.end(); it++)
|
|
652 {
|
|
653 if ((*it) != &sink &&
|
|
654 (*it)->GetNextFilters().size() == 0)
|
|
655 {
|
|
656 (*it)->ConnectNext(sink);
|
|
657 }
|
|
658 }
|
|
659
|
|
660 // Submit the job
|
|
661 SubmitInternal(job, true);
|
|
662
|
|
663 // Wait for the job to complete (either success or failure)
|
768
|
664 JobStatus status;
|
765
|
665
|
|
666 {
|
|
667 boost::mutex::scoped_lock lock(mutex_);
|
768
|
668
|
|
669 assert(watchedJobStatus_.find(jobId) != watchedJobStatus_.end());
|
765
|
670
|
768
|
671 while (watchedJobStatus_[jobId] == JobStatus_Running)
|
765
|
672 {
|
|
673 jobFinished_.wait(lock);
|
|
674 }
|
|
675
|
|
676 status = watchedJobStatus_[jobId];
|
|
677 watchedJobStatus_.erase(jobId);
|
|
678 }
|
|
679
|
768
|
680 return (status == JobStatus_Success);
|
765
|
681 }
|
|
682
|
|
683
|
|
684 bool IsRunning(const std::string& jobId)
|
|
685 {
|
|
686 boost::mutex::scoped_lock lock(mutex_);
|
|
687 return jobs_.find(jobId) != jobs_.end();
|
|
688 }
|
|
689
|
|
690
|
|
691 void Cancel(const std::string& jobId)
|
|
692 {
|
|
693 boost::mutex::scoped_lock lock(mutex_);
|
|
694
|
|
695 Jobs::iterator job = jobs_.find(jobId);
|
|
696
|
|
697 if (job != jobs_.end())
|
|
698 {
|
|
699 job->second.cancel_ = true;
|
|
700 LOG(WARNING) << "Canceling a job (" << job->second.description_ << ")";
|
|
701 }
|
|
702 }
|
|
703
|
|
704
|
|
705 // Returns a number between 0 and 1
|
|
706 float GetProgress(const std::string& jobId)
|
|
707 {
|
|
708 boost::mutex::scoped_lock lock(mutex_);
|
|
709
|
|
710 Jobs::iterator job = jobs_.find(jobId);
|
|
711
|
|
712 if (job == jobs_.end() ||
|
|
713 job->second.size_ == 0 /* should never happen */)
|
|
714 {
|
|
715 // This job is not running
|
|
716 return 1;
|
|
717 }
|
|
718
|
768
|
719 if (job->second.failures_ != 0)
|
|
720 {
|
|
721 return 1;
|
|
722 }
|
|
723
|
|
724 if (job->second.size_ == 1)
|
|
725 {
|
|
726 return job->second.success_;
|
|
727 }
|
|
728
|
|
729 return (static_cast<float>(job->second.success_) /
|
|
730 static_cast<float>(job->second.size_ - 1));
|
765
|
731 }
|
|
732
|
|
733 bool IsRunning(const ServerJob& job)
|
|
734 {
|
|
735 return IsRunning(job.GetId());
|
|
736 }
|
|
737
|
|
738 void Cancel(const ServerJob& job)
|
|
739 {
|
|
740 Cancel(job.GetId());
|
|
741 }
|
|
742
|
|
743 float GetProgress(const ServerJob& job)
|
|
744 {
|
|
745 return GetProgress(job);
|
|
746 }
|
768
|
747
|
|
748 void GetListOfJobs(ListOfStrings& jobs)
|
|
749 {
|
|
750 boost::mutex::scoped_lock lock(mutex_);
|
|
751
|
|
752 jobs.clear();
|
|
753
|
|
754 for (Jobs::const_iterator
|
|
755 it = jobs_.begin(); it != jobs_.end(); it++)
|
|
756 {
|
|
757 jobs.push_back(it->first);
|
|
758 }
|
|
759 }
|
765
|
760 };
|
|
761
|
|
762 }
|
|
763
|
|
764
|
|
765
|
|
766 class Tutu : public IServerFilter
|
|
767 {
|
|
768 private:
|
|
769 int factor_;
|
|
770
|
|
771 public:
|
|
772 Tutu(int f) : factor_(f)
|
|
773 {
|
|
774 }
|
|
775
|
|
776 virtual bool Apply(ListOfStrings& outputs,
|
|
777 const ListOfStrings& inputs)
|
|
778 {
|
|
779 for (ListOfStrings::const_iterator
|
|
780 it = inputs.begin(); it != inputs.end(); it++)
|
|
781 {
|
|
782 int a = boost::lexical_cast<int>(*it);
|
|
783 int b = factor_ * a;
|
|
784
|
|
785 printf("%d * %d = %d\n", a, factor_, b);
|
|
786
|
|
787 //if (a == 84) { printf("BREAK\n"); return false; }
|
|
788
|
|
789 outputs.push_back(boost::lexical_cast<std::string>(b));
|
|
790 }
|
|
791
|
|
792 Toolbox::USleep(1000000);
|
|
793
|
|
794 return true;
|
|
795 }
|
|
796 };
|
|
797
|
768
|
798
|
|
799 static void Tata(ServerScheduler* s, ServerJob* j)
|
|
800 {
|
|
801 #if 1
|
|
802 for (;;)
|
|
803 {
|
|
804 ListOfStrings l;
|
|
805 s->GetListOfJobs(l);
|
|
806 for (ListOfStrings::iterator i = l.begin(); i != l.end(); i++)
|
|
807 printf(">> %s: %0.1f\n", i->c_str(), 100.0f * s->GetProgress(*i));
|
|
808 Toolbox::USleep(100000);
|
|
809 }
|
|
810 #else
|
|
811 ListOfStrings l;
|
|
812 s->GetListOfJobs(l);
|
|
813 for (ListOfStrings::iterator i = l.begin(); i != l.end(); i++)
|
|
814 printf(">> %s\n", i->c_str());
|
|
815 Toolbox::USleep(1500000);
|
|
816 s->Cancel(*j);
|
|
817 Toolbox::USleep(1000000);
|
|
818 s->GetListOfJobs(l);
|
|
819 for (ListOfStrings::iterator i = l.begin(); i != l.end(); i++)
|
|
820 printf(">> %s\n", i->c_str());
|
|
821 #endif
|
|
822 }
|
|
823
|
|
824
|
765
|
825 TEST(Toto, Toto)
|
|
826 {
|
|
827 ServerScheduler scheduler;
|
|
828
|
|
829 ServerJob job;
|
|
830 FilterWrapper& f2 = job.AddFilter(new Tutu(2));
|
|
831 FilterWrapper& f3 = job.AddFilter(new Tutu(3));
|
|
832 FilterWrapper& f4 = job.AddFilter(new Tutu(4));
|
768
|
833 FilterWrapper& f5 = job.AddFilter(new Tutu(5));
|
765
|
834 f2.AddInput(boost::lexical_cast<std::string>(42));
|
|
835 //f3.AddInput(boost::lexical_cast<std::string>(42));
|
|
836 //f4.AddInput(boost::lexical_cast<std::string>(42));
|
|
837 f2.ConnectNext(f3);
|
|
838 f3.ConnectNext(f4);
|
768
|
839 f4.ConnectNext(f5);
|
765
|
840
|
|
841 job.SetDescription("tutu");
|
|
842
|
768
|
843 boost::thread t(Tata, &scheduler, &job);
|
|
844
|
|
845
|
765
|
846 //scheduler.Submit(job);
|
|
847
|
|
848 ListOfStrings l;
|
|
849 scheduler.SubmitAndWait(l, job);
|
|
850
|
|
851 for (ListOfStrings::iterator i = l.begin(); i != l.end(); i++)
|
|
852 {
|
|
853 printf("** %s\n", i->c_str());
|
|
854 }
|
|
855
|
|
856 //Toolbox::ServerBarrier();
|
768
|
857 //Toolbox::USleep(3000000);
|
|
858
|
|
859 //t.join();
|
765
|
860 }
|