aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBoris Kolpackov <boris@codesynthesis.com>2018-02-16 16:24:07 +0200
committerBoris Kolpackov <boris@codesynthesis.com>2018-02-16 16:40:45 +0200
commit6293ede7a742866a713050737cc2b43d51161b6f (patch)
treef259845f47c2e03432bdd98b4b665e1a3de85d55
parentad9cb7fec5cc74697322620909e0ff1ba9ecb61b (diff)
Add support for detecting dependency cycles
-rw-r--r--build2/algorithm.cxx53
-rw-r--r--build2/algorithm.hxx45
-rw-r--r--build2/algorithm.ixx66
-rw-r--r--build2/diagnostics.hxx45
-rw-r--r--build2/test/rule.cxx12
-rw-r--r--build2/test/script/parser.cxx12
6 files changed, 189 insertions, 44 deletions
diff --git a/build2/algorithm.cxx b/build2/algorithm.cxx
index 38693bc..17c2184 100644
--- a/build2/algorithm.cxx
+++ b/build2/algorithm.cxx
@@ -100,6 +100,15 @@ namespace build2
return q ? import_existing (pk) : search_existing_target (pk);
}
+ // target_lock
+ //
+#ifdef __cpp_thread_local
+ thread_local
+#else
+ __thread
+#endif
+ const target_lock* target_lock::stack = nullptr;
+
// If the work_queue is absent, then we don't wait.
//
target_lock
@@ -129,6 +138,12 @@ namespace build2
//
if (e >= busy)
{
+ // Check for dependency cycles. The cycle members should be evident
+ // from the "while ..." info lines that will follow.
+ //
+ if (dependency_cycle (a, ct))
+ fail << "dependency cycle detected involving target " << ct;
+
if (!wq)
return target_lock {a, nullptr, e - b};
@@ -520,31 +535,39 @@ namespace build2
return match_impl (l, false /* step */, try_match);
// Pass "disassembled" lock since the scheduler queue doesn't support
- // task destruction. Also pass our diagnostics stack (this is safe since
- // we expect the caller to wait for completion before unwinding its diag
- // stack).
+ // task destruction.
+ //
+ target_lock::data ld (l.release ());
+
+ // Also pass our diagnostics and lock stacks (this is safe since we
+ // expect the caller to wait for completion before unwinding its stack).
//
if (sched.async (start_count,
*task_count,
- [a, try_match] (target& t,
- size_t offset,
- const diag_frame* ds)
+ [a, try_match] (const diag_frame* ds,
+ const target_lock* ls,
+ target& t, size_t offset)
{
- diag_frame df (ds);
+ // Switch to caller's diag and lock stacks.
+ //
+ diag_frame::stack_guard dsg (ds);
+ target_lock::stack_guard lsg (ls);
+
try
{
phase_lock pl (run_phase::match); // Can throw.
{
target_lock l {a, &t, offset}; // Reassemble.
match_impl (l, false /* step */, try_match);
- // Unlock withing the match phase.
+ // Unlock within the match phase.
}
}
catch (const failed&) {} // Phase lock failure.
},
- ref (*l.release ()),
- l.offset,
- diag_frame::stack))
+ diag_frame::stack,
+ target_lock::stack,
+ ref (*ld.target),
+ ld.offset))
return make_pair (true, target_state::postponed); // Queued.
// Matched synchronously, fall through.
@@ -979,13 +1002,13 @@ namespace build2
//
if (sched.async (start_count,
*task_count,
- [a] (target& t, const diag_frame* ds)
+ [a] (const diag_frame* ds, target& t)
{
- diag_frame df (ds);
+ diag_frame::stack_guard dsg (ds);
execute_impl (a, t);
},
- ref (t),
- diag_frame::stack))
+ diag_frame::stack,
+ ref (t)))
return target_state::unknown; // Queued.
// Executed synchronously, fall through.
diff --git a/build2/algorithm.hxx b/build2/algorithm.hxx
index 1f8736f..62c312f 100644
--- a/build2/algorithm.hxx
+++ b/build2/algorithm.hxx
@@ -104,7 +104,9 @@ namespace build2
// Target match lock: a non-const target reference and the target::offset_*
// state that has already been "achieved". Note that target::task_count
- // itself is set to busy for the duration or the lock.
+ // itself is set to busy for the duration or the lock. While at it we also
+ // maintain a stack of active locks in the current dependency chain (used to
+ // detect dependency cycles).
//
struct target_lock
{
@@ -120,6 +122,8 @@ namespace build2
void
unlock ();
+ // Movable-only type with move-assignment only to NULL lock.
+ //
target_lock () = default;
target_lock (target_lock&&);
target_lock& operator= (target_lock&&);
@@ -130,13 +134,42 @@ namespace build2
// Implementation details.
//
~target_lock ();
- target_lock (action_type a, target_type* t, size_t o)
- : action (a), target (t), offset (o) {}
-
- target_type*
- release () {auto r (target); target = nullptr; return r;}
+ target_lock (action_type, target_type*, size_t);
+
+ struct data
+ {
+ action_type action;
+ target_type* target;
+ size_t offset;
+ };
+
+ data
+ release ();
+
+ static
+#ifdef __cpp_thread_local
+ thread_local
+#else
+ __thread
+#endif
+ const target_lock* stack; // Tip of the stack.
+ const target_lock* prev;
+
+ struct stack_guard
+ {
+ explicit stack_guard (const target_lock* s): s_ (stack) {stack = s;}
+ ~stack_guard () {stack = s_;}
+ const target_lock* s_;
+ };
};
+ // If this target is already locked in this dependency chain, then return
+ // the corresponding lock. Return NULL otherwise (so can be used a boolean
+ // predicate).
+ //
+ const target_lock*
+ dependency_cycle (action, const target&);
+
// If the target is already applied (for this action ) or executed, then no
// lock is acquired. Otherwise, the target must not yet be matched for this
// action.
diff --git a/build2/algorithm.ixx b/build2/algorithm.ixx
index 9b1df35..7572548 100644
--- a/build2/algorithm.ixx
+++ b/build2/algorithm.ixx
@@ -131,16 +131,45 @@ namespace build2
void
unlock_impl (action, target&, size_t);
+ inline target_lock::
+ target_lock (action_type a, target_type* t, size_t o)
+ : action (a), target (t), offset (o)
+ {
+ if (target != nullptr)
+ {
+ prev = stack;
+ stack = this;
+ }
+ }
+
inline void target_lock::
unlock ()
{
if (target != nullptr)
{
unlock_impl (action, *target, offset);
+
+ assert (stack == this);
+ stack = prev;
target = nullptr;
}
}
+ inline auto target_lock::
+ release () -> data
+ {
+ data r {action, target, offset};
+
+ if (target != nullptr)
+ {
+ assert (stack == this);
+ stack = prev;
+ target = nullptr;
+ }
+
+ return r;
+ }
+
inline target_lock::
~target_lock ()
{
@@ -151,7 +180,14 @@ namespace build2
target_lock (target_lock&& x)
: action (x.action), target (x.target), offset (x.offset)
{
- x.target = nullptr;
+ if (target != nullptr)
+ {
+ assert (stack == &x);
+ prev = x.prev;
+ stack = this;
+
+ x.target = nullptr;
+ }
}
inline target_lock& target_lock::
@@ -159,15 +195,39 @@ namespace build2
{
if (this != &x)
{
- unlock ();
+ assert (target == nullptr);
+
action = x.action;
target = x.target;
offset = x.offset;
- x.target = nullptr;
+
+ if (target != nullptr)
+ {
+ assert (stack == &x);
+ prev = x.prev;
+ stack = this;
+
+ x.target = nullptr;
+ }
}
+
return *this;
}
+ inline const target_lock*
+ dependency_cycle (action a, const target& t)
+ {
+ const target_lock* l (target_lock::stack);
+
+ for (; l != nullptr; l = l->prev)
+ {
+ if (l->action == a && l->target == &t)
+ break;
+ }
+
+ return l;
+ }
+
inline target_lock
lock (action a, const target& t)
{
diff --git a/build2/diagnostics.hxx b/build2/diagnostics.hxx
index 653d9a6..b9757d6 100644
--- a/build2/diagnostics.hxx
+++ b/build2/diagnostics.hxx
@@ -160,13 +160,37 @@ namespace build2
{
explicit
diag_frame (void (*f) (const diag_frame&, const diag_record&))
- : func_ (f), prev_ (stack) {stack = this;}
+ : func_ (f)
+ {
+ if (func_ != nullptr)
+ {
+ prev_ = stack;
+ stack = this;
+ }
+ }
- // Start with an existing stack, for example, from another thread.
- //
- explicit
- diag_frame (const diag_frame* prev)
- : prev_ (stack) {stack = prev;} // Just a restore guard.
+ diag_frame (diag_frame&& x)
+ : func_ (x.func_)
+ {
+ if (func_ != nullptr)
+ {
+ prev_ = x.prev_;
+ stack = this;
+
+ x.func_ = nullptr;
+ }
+ }
+
+ diag_frame& operator= (diag_frame&&) = delete;
+
+ diag_frame (const diag_frame&) = delete;
+ diag_frame& operator= (const diag_frame&) = delete;
+
+ ~diag_frame ()
+ {
+ if (func_ != nullptr )
+ stack = prev_;
+ }
static void
apply (const diag_record& r)
@@ -175,8 +199,6 @@ namespace build2
f->func_ (*f, r);
}
- ~diag_frame () {stack = prev_;}
-
static
#ifdef __cpp_thread_local
thread_local
@@ -185,6 +207,13 @@ namespace build2
#endif
const diag_frame* stack; // Tip of the stack.
+ struct stack_guard
+ {
+ explicit stack_guard (const diag_frame* s): s_ (stack) {stack = s;}
+ ~stack_guard () {stack = s_;}
+ const diag_frame* s_;
+ };
+
private:
void (*func_) (const diag_frame&, const diag_record&);
const diag_frame* prev_;
diff --git a/build2/test/rule.cxx b/build2/test/rule.cxx
index b21f0c3..4225d24 100644
--- a/build2/test/rule.cxx
+++ b/build2/test/rule.cxx
@@ -498,20 +498,20 @@ namespace build2
if (!sched.async (target::count_busy (),
t[a].task_count,
- [this] (scope_state& r,
+ [this] (const diag_frame* ds,
+ scope_state& r,
const target& t,
const testscript& ts,
- const dir_path& wd,
- const diag_frame* ds)
+ const dir_path& wd)
{
- diag_frame df (ds);
+ diag_frame::stack_guard dsg (ds);
r = perform_script_impl (t, ts, wd, *this);
},
+ diag_frame::stack,
ref (r),
cref (t),
cref (ts),
- cref (wd),
- diag_frame::stack))
+ cref (wd)))
{
// Executed synchronously. If failed and we were not asked to keep
// going, bail out.
diff --git a/build2/test/script/parser.cxx b/build2/test/script/parser.cxx
index 69fe979..d3cab0a 100644
--- a/build2/test/script/parser.cxx
+++ b/build2/test/script/parser.cxx
@@ -3018,18 +3018,18 @@ namespace build2
//
const diag_frame* df (diag_frame::stack); // UBSan workaround.
if (!sched.async (task_count,
- [] (scope& s,
+ [] (const diag_frame* ds,
+ scope& s,
script& scr,
- runner& r,
- const diag_frame* ds)
+ runner& r)
{
- diag_frame df (ds);
+ diag_frame::stack_guard dsg (ds);
execute_impl (s, scr, r);
},
+ df,
ref (*chain),
ref (*script_),
- ref (*runner_),
- df))
+ ref (*runner_)))
{
// Bail out if the scope has failed and we weren't instructed
// to keep going.