aboutsummaryrefslogtreecommitdiff
path: root/libbuild2
diff options
context:
space:
mode:
Diffstat (limited to 'libbuild2')
-rw-r--r--libbuild2/adhoc-rule-cxx.cxx5
-rw-r--r--libbuild2/module.cxx6
-rw-r--r--libbuild2/scheduler.hxx28
-rw-r--r--libbuild2/scheduler.ixx33
-rw-r--r--libbuild2/scheduler.txx4
5 files changed, 70 insertions, 6 deletions
diff --git a/libbuild2/adhoc-rule-cxx.cxx b/libbuild2/adhoc-rule-cxx.cxx
index 55f8ccb..4bf67f0 100644
--- a/libbuild2/adhoc-rule-cxx.cxx
+++ b/libbuild2/adhoc-rule-cxx.cxx
@@ -295,6 +295,11 @@ namespace build2
//
context& ctx (*t.ctx.module_context);
+ // Mark the queue so that we don't work any tasks that may already be
+ // there.
+ //
+ scheduler::queue_mark qm (ctx.sched);
+
const uint16_t verbosity (3); // Project creation command verbosity.
// Project and location signatures.
diff --git a/libbuild2/module.cxx b/libbuild2/module.cxx
index b2b4036..9a7975d 100644
--- a/libbuild2/module.cxx
+++ b/libbuild2/module.cxx
@@ -415,6 +415,12 @@ namespace build2
//
context& ctx (*bs.ctx.module_context);
+ // Mark the queue so that we don't work any tasks that may already be
+ // there (we could be called in strange ways, for example, as part of
+ // match via dir_search()).
+ //
+ scheduler::queue_mark qm (ctx.sched);
+
// Load the imported project in the module context.
//
pair<names, const scope&> lr (
diff --git a/libbuild2/scheduler.hxx b/libbuild2/scheduler.hxx
index 7e052cd..e1bb715 100644
--- a/libbuild2/scheduler.hxx
+++ b/libbuild2/scheduler.hxx
@@ -125,6 +125,24 @@ namespace build2
return wait (0, task_count, wq);
}
+ // Mark the queue so that we don't work any tasks that may already be
+ // there. In the normal "bunch of acync() calls followed by wait()"
+ // cases this happens automatically but in special cases where async()
+ // calls from different "levels" can mix we need to do explicit marking
+ // (see the task queue description below for details).
+ //
+ struct task_queue;
+ struct queue_mark
+ {
+ explicit
+ queue_mark (scheduler&);
+ ~queue_mark ();
+
+ private:
+ task_queue* tq_;
+ size_t om_;
+ };
+
// Resume threads waiting on this task count.
//
void
@@ -446,7 +464,9 @@ namespace build2
lock
wait_idle ();
- private:
+ // Implementation details.
+ //
+ public:
bool
activate_helper (lock&);
@@ -494,7 +514,6 @@ namespace build2
static std::decay_t<T>
decay_copy (T&& x) {return forward<T> (x);}
- private:
// Monitor.
//
atomic_count* monitor_count_ = nullptr; // NULL if not used.
@@ -629,8 +648,8 @@ namespace build2
// back.
//
// To satisfy the second requirement, the master thread stores the index
- // of the first task it has queued at this "level" and makes sure it
- // doesn't try to deque any task beyond that.
+ // (mark) of the first task it has queued at this "level" and makes sure
+ // it doesn't try to deque any task beyond that.
//
size_t task_queue_depth_; // Multiple of max_active.
@@ -830,6 +849,7 @@ namespace build2
};
}
+#include <libbuild2/scheduler.ixx>
#include <libbuild2/scheduler.txx>
#endif // LIBBUILD2_SCHEDULER_HXX
diff --git a/libbuild2/scheduler.ixx b/libbuild2/scheduler.ixx
new file mode 100644
index 0000000..f9f0f2e
--- /dev/null
+++ b/libbuild2/scheduler.ixx
@@ -0,0 +1,33 @@
+// file : libbuild2/scheduler.ixx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+namespace build2
+{
+ inline scheduler::queue_mark::
+ queue_mark (scheduler& s)
+ : tq_ (s.queue ())
+ {
+ if (tq_ != nullptr)
+ {
+ lock ql (tq_->mutex);
+
+ if (tq_->mark != s.task_queue_depth_)
+ {
+ om_ = tq_->mark;
+ tq_->mark = s.task_queue_depth_;
+ }
+ else
+ tq_ = nullptr;
+ }
+ }
+
+ inline scheduler::queue_mark::
+ ~queue_mark ()
+ {
+ if (tq_ != nullptr)
+ {
+ lock ql (tq_->mutex);
+ tq_->mark = tq_->size == 0 ? tq_->tail : om_;
+ }
+ }
+}
diff --git a/libbuild2/scheduler.txx b/libbuild2/scheduler.txx
index 7742521..9cfc411 100644
--- a/libbuild2/scheduler.txx
+++ b/libbuild2/scheduler.txx
@@ -77,8 +77,8 @@ namespace build2
tq->stat_full++;
// We have to perform the same mark adjust/restore as in pop_back()
- // since the task we are about to execute synchronously may try to
- // work the queue.
+ // (and in queue_mark) since the task we are about to execute
+ // synchronously may try to work the queue.
//
// It would have been cleaner to package all this logic into push()
// but that would require dragging function/argument types into it.