aboutsummaryrefslogtreecommitdiff
path: root/libbuild2/scheduler.ixx
blob: f46d0351eba7fff19e96e0e460e40afd2e9c7105 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
// file      : libbuild2/scheduler.ixx -*- C++ -*-
// license   : MIT; see accompanying LICENSE file

namespace build2
{
  inline size_t scheduler::
  wait (size_t start_count, const atomic_count& task_count, work_queue wq)
  {
    // Note that task_count is a synchronization point.
    //
    size_t tc;
    if ((tc = task_count.load (memory_order_acquire)) <= start_count)
      return tc;

    if (optional<size_t> r = wait_impl (start_count, task_count, wq))
      return *r;

    return suspend (start_count, task_count);
  }

  inline size_t scheduler::
  wait (const atomic_count& task_count, work_queue wq)
  {
    return wait (0, task_count, wq);
  }

  template <typename L>
  inline size_t scheduler::
  wait (size_t start_count,
        const atomic_count& task_count,
        L& lock,
        work_queue wq)
  {
    // Note that task_count is a synchronization point.
    //
    size_t tc;
    if ((tc = task_count.load (memory_order_acquire)) <= start_count)
      return tc;

    if (optional<size_t> r = wait_impl (start_count, task_count, wq))
      return *r;

    lock.unlock ();
    return suspend (start_count, task_count);
  }

  inline void scheduler::
  deactivate (bool external)
  {
    if (max_active_ != 1) // Serial execution.
      deactivate_impl (external, lock (mutex_));
  }

  inline void scheduler::
  activate (bool external)
  {
    if (max_active_ != 1) // Serial execution.
      activate_impl (external, false /* collision */);
  }

  inline scheduler::queue_mark::
  queue_mark (scheduler& s)
      : tq_ (s.queue ())
  {
    if (tq_ != nullptr)
    {
      lock ql (tq_->mutex);

      if (tq_->mark != s.task_queue_depth_)
      {
        om_ = tq_->mark;
        tq_->mark = s.task_queue_depth_;
      }
      else
        tq_ = nullptr;
    }
  }

  inline scheduler::queue_mark::
  ~queue_mark ()
  {
    if (tq_ != nullptr)
    {
      lock ql (tq_->mutex);
      tq_->mark = tq_->size == 0 ? tq_->tail : om_;
    }
  }
}