aboutsummaryrefslogtreecommitdiff
path: root/libbuild2/algorithm.ixx
diff options
context:
space:
mode:
Diffstat (limited to 'libbuild2/algorithm.ixx')
-rw-r--r--libbuild2/algorithm.ixx470
1 files changed, 400 insertions, 70 deletions
diff --git a/libbuild2/algorithm.ixx b/libbuild2/algorithm.ixx
index 24d9e5b..836dbed 100644
--- a/libbuild2/algorithm.ixx
+++ b/libbuild2/algorithm.ixx
@@ -45,6 +45,39 @@ namespace build2
k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
}
+ inline const target*
+ search_existing (context& ctx,
+ const target_type& tt,
+ const prerequisite_key& k)
+ {
+ return search_existing (
+ ctx,
+ prerequisite_key {
+ k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
+ }
+
+ inline const target&
+ search_new (context& ctx,
+ const target_type& tt,
+ const prerequisite_key& k)
+ {
+ return search_new (
+ ctx,
+ prerequisite_key {
+ k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
+ }
+
+ inline pair<target&, ulock>
+ search_new_locked (context& ctx,
+ const target_type& tt,
+ const prerequisite_key& k)
+ {
+ return search_new_locked (
+ ctx,
+ prerequisite_key {
+ k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
+ }
+
inline const target&
search (const target& t,
const target_type& type,
@@ -110,6 +143,48 @@ namespace build2
scope});
}
+ inline const target&
+ search_new (context& ctx,
+ const target_type& type,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext,
+ const scope* scope)
+ {
+ return search_new (
+ ctx,
+ prerequisite_key {
+ nullopt,
+ {
+ &type,
+ &dir, &out, &name,
+ ext != nullptr ? optional<string> (*ext) : nullopt
+ },
+ scope});
+ }
+
+ inline pair<target&, ulock>
+ search_new_locked (context& ctx,
+ const target_type& type,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext,
+ const scope* scope)
+ {
+ return search_new_locked (
+ ctx,
+ prerequisite_key {
+ nullopt,
+ {
+ &type,
+ &dir, &out, &name,
+ ext != nullptr ? optional<string> (*ext) : nullopt
+ },
+ scope});
+ }
+
template <typename T>
inline const T&
search (const target& t,
@@ -123,15 +198,32 @@ namespace build2
t, T::static_type, dir, out, name, ext, scope).template as<T> ();
}
+ template <typename T>
+ inline const T*
+ search_existing (context& ctx,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext,
+ const scope* scope)
+ {
+ const target* r (
+ search_existing (
+ ctx, T::static_type, dir, out, name, ext, scope));
+ return r != nullptr ? &r->template as<T> () : nullptr;
+ }
+
LIBBUILD2_SYMEXPORT target_lock
- lock_impl (action, const target&, optional<scheduler::work_queue>);
+ lock_impl (action, const target&,
+ optional<scheduler::work_queue>,
+ uint64_t = 0);
LIBBUILD2_SYMEXPORT void
unlock_impl (action, target&, size_t);
inline target_lock::
- target_lock (action_type a, target_type* t, size_t o)
- : action (a), target (t), offset (o)
+ target_lock (action_type a, target_type* t, size_t o, bool f)
+ : action (a), target (t), offset (o), first (f)
{
if (target != nullptr)
prev = stack (this);
@@ -143,7 +235,8 @@ namespace build2
if (target != nullptr && prev != this)
{
const target_lock* cur (stack (prev));
- assert (cur == this);
+ if (cur != this) // NDEBUG
+ assert (cur == this);
prev = this;
}
}
@@ -158,7 +251,8 @@ namespace build2
if (prev != this)
{
const target_lock* cur (stack (prev));
- assert (cur == this);
+ if (cur != this) // NDEBUG
+ assert (cur == this);
}
target = nullptr;
@@ -168,14 +262,15 @@ namespace build2
inline auto target_lock::
release () -> data
{
- data r {action, target, offset};
+ data r {action, target, offset, first};
if (target != nullptr)
{
if (prev != this)
{
const target_lock* cur (stack (prev));
- assert (cur == this);
+ if (cur != this) // NDEBUG
+ assert (cur == this);
}
target = nullptr;
@@ -191,7 +286,7 @@ namespace build2
}
inline target_lock::
- target_lock (target_lock&& x)
+ target_lock (target_lock&& x) noexcept
: action (x.action), target (x.target), offset (x.offset)
{
if (target != nullptr)
@@ -199,7 +294,8 @@ namespace build2
if (x.prev != &x)
{
const target_lock* cur (stack (this));
- assert (cur == &x);
+ if (cur != &x) // NDEBUG
+ assert (cur == &x);
prev = x.prev;
}
else
@@ -210,7 +306,7 @@ namespace build2
}
inline target_lock& target_lock::
- operator= (target_lock&& x)
+ operator= (target_lock&& x) noexcept
{
if (this != &x)
{
@@ -225,7 +321,8 @@ namespace build2
if (x.prev != &x)
{
const target_lock* cur (stack (this));
- assert (cur == &x);
+ if (cur != &x) // NDEBUG
+ assert (cur == &x);
prev = x.prev;
}
else
@@ -277,7 +374,7 @@ namespace build2
n += e;
}
- return add_adhoc_member (t, tt, t.dir, t.out, move (n));
+ return add_adhoc_member (t, tt, t.dir, t.out, move (n), nullopt /* ext */);
}
inline target*
@@ -297,30 +394,37 @@ namespace build2
}
LIBBUILD2_SYMEXPORT const rule_match*
- match_rule (action, target&, const rule* skip, bool try_match = false);
+ match_rule_impl (action, target&,
+ uint64_t options,
+ const rule* skip,
+ bool try_match = false,
+ match_extra* = nullptr);
LIBBUILD2_SYMEXPORT recipe
apply_impl (action, target&, const rule_match&);
LIBBUILD2_SYMEXPORT pair<bool, target_state>
- match (action, const target&, size_t, atomic_count*, bool try_match = false);
+ match_impl (action, const target&,
+ uint64_t options,
+ size_t, atomic_count*,
+ bool try_match = false);
inline void
- match_inc_dependens (action a, const target& t)
+ match_inc_dependents (action a, const target& t)
{
t.ctx.dependency_count.fetch_add (1, memory_order_relaxed);
t[a].dependents.fetch_add (1, memory_order_release);
}
inline target_state
- match (action a, const target& t, bool fail)
+ match_sync (action a, const target& t, uint64_t options, bool fail)
{
assert (t.ctx.phase == run_phase::match);
- target_state r (match (a, t, 0, nullptr).second);
+ target_state r (match_impl (a, t, options, 0, nullptr).second);
if (r != target_state::failed)
- match_inc_dependens (a, t);
+ match_inc_dependents (a, t);
else if (fail)
throw failed ();
@@ -328,17 +432,17 @@ namespace build2
}
inline pair<bool, target_state>
- try_match (action a, const target& t, bool fail)
+ try_match_sync (action a, const target& t, uint64_t options, bool fail)
{
assert (t.ctx.phase == run_phase::match);
pair<bool, target_state> r (
- match (a, t, 0, nullptr, true /* try_match */));
+ match_impl (a, t, options, 0, nullptr, true /* try_match */));
if (r.first)
{
if (r.second != target_state::failed)
- match_inc_dependens (a, t);
+ match_inc_dependents (a, t);
else if (fail)
throw failed ();
}
@@ -347,11 +451,11 @@ namespace build2
}
inline pair<bool, target_state>
- match (action a, const target& t, unmatch um)
+ match_sync (action a, const target& t, unmatch um, uint64_t options)
{
assert (t.ctx.phase == run_phase::match);
- target_state s (match (a, t, 0, nullptr).second);
+ target_state s (match_impl (a, t, options, 0, nullptr).second);
if (s == target_state::failed)
throw failed ();
@@ -378,42 +482,90 @@ namespace build2
// cannot change their mind).
//
if ((s == target_state::unchanged && t.group == nullptr) ||
- t[a].dependents.load (memory_order_consume) != 0)
+ t[a].dependents.load (memory_order_relaxed) != 0)
return make_pair (true, s);
break;
}
}
- match_inc_dependens (a, t);
+ match_inc_dependents (a, t);
return make_pair (false, s);;
}
inline target_state
match_async (action a, const target& t,
size_t sc, atomic_count& tc,
+ uint64_t options,
bool fail)
{
context& ctx (t.ctx);
assert (ctx.phase == run_phase::match);
- target_state r (match (a, t, sc, &tc).second);
+ target_state r (match_impl (a, t, options, sc, &tc).second);
- if (fail && !ctx.keep_going && r == target_state::failed)
+ if (r == target_state::failed && fail && !ctx.keep_going)
throw failed ();
return r;
}
- // Clear rule match-specific target data.
+ inline target_state
+ match_complete (action a, const target& t, uint64_t options, bool fail)
+ {
+ return match_sync (a, t, options, fail);
+ }
+
+ inline pair<bool, target_state>
+ match_complete (action a, const target& t, unmatch um, uint64_t options)
+ {
+ return match_sync (a, t, um, options);
+ }
+
+ inline target_state
+ match_direct_sync (action a, const target& t, uint64_t options, bool fail)
+ {
+ assert (t.ctx.phase == run_phase::match);
+
+ target_state r (match_impl (a, t, options, 0, nullptr).second);
+
+ if (r == target_state::failed && fail)
+ throw failed ();
+
+ return r;
+ }
+
+ inline target_state
+ match_direct_complete (action a, const target& t,
+ uint64_t options,
+ bool fail)
+ {
+ return match_direct_sync (a, t, options, fail);
+ }
+
+ // Clear rule match-specific target data (except match_extra).
//
inline void
clear_target (action a, target& t)
{
- t[a].vars.clear ();
+ target::opstate& s (t.state[a]);
+ s.recipe = nullptr;
+ s.recipe_keep = false;
+ s.resolve_counted = false;
+ s.vars.clear ();
t.prerequisite_targets[a].clear ();
- if (a.inner ())
- t.clear_data ();
+ }
+
+ LIBBUILD2_SYMEXPORT void
+ set_rule_trace (target_lock&, const rule_match*);
+
+ inline void
+ set_rule (target_lock& l, const rule_match* r)
+ {
+ if (l.target->ctx.trace_match == nullptr)
+ (*l.target)[l.action].rule = r;
+ else
+ set_rule_trace (l, r);
}
inline void
@@ -423,6 +575,7 @@ namespace build2
target::opstate& s (t[l.action]);
s.recipe = move (r);
+ s.recipe_group_action = false;
// If this is a noop recipe, then mark the target unchanged to allow for
// some optimizations.
@@ -448,69 +601,112 @@ namespace build2
// likely. The alternative (trying to "merge" the count keeping track of
// whether inner and/or outer is noop) gets hairy rather quickly.
//
- if (l.action.inner ())
+ if (f != nullptr && *f == &group_action)
+ s.recipe_group_action = true;
+ else
{
- if (f == nullptr || *f != &group_action)
+ if (l.action.inner ())
t.ctx.target_count.fetch_add (1, memory_order_relaxed);
}
}
}
inline void
- match_recipe (target_lock& l, recipe r)
+ match_recipe (target_lock& l, recipe r, uint64_t options)
{
- assert (l.target != nullptr &&
- l.offset != target::offset_matched &&
+ assert (options != 0 &&
+ l.target != nullptr &&
+ l.offset < target::offset_matched &&
l.target->ctx.phase == run_phase::match);
+ match_extra& me ((*l.target)[l.action].match_extra);
+
+ me.reinit (false /* fallback */);
+ me.cur_options = options; // Already applied, so cur_, not new_options.
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
clear_target (l.action, *l.target);
- (*l.target)[l.action].rule = nullptr; // No rule.
+ set_rule (l, nullptr); // No rule.
set_recipe (l, move (r));
l.offset = target::offset_applied;
}
inline void
- match_rule (target_lock& l, const rule_match& r)
+ match_rule (target_lock& l, const rule_match& r, uint64_t options)
{
assert (l.target != nullptr &&
- l.offset != target::offset_matched &&
+ l.offset < target::offset_matched &&
l.target->ctx.phase == run_phase::match);
+ match_extra& me ((*l.target)[l.action].match_extra);
+
+ me.reinit (false /* fallback */);
+ me.new_options = options;
clear_target (l.action, *l.target);
- (*l.target)[l.action].rule = &r;
+ set_rule (l, &r);
l.offset = target::offset_matched;
}
inline recipe
- match_delegate (action a, target& t, const rule& dr, bool try_match)
+ match_delegate (action a, target& t,
+ const rule& dr,
+ uint64_t options,
+ bool try_match)
{
assert (t.ctx.phase == run_phase::match);
// Note: we don't touch any of the t[a] state since that was/will be set
// for the delegating rule.
//
- const rule_match* r (match_rule (a, t, &dr, try_match));
+ const rule_match* r (match_rule_impl (a, t, options, &dr, try_match));
return r != nullptr ? apply_impl (a, t, *r) : empty_recipe;
}
inline target_state
- match_inner (action a, const target& t)
+ match_inner (action a, const target& t, uint64_t options)
{
// In a sense this is like any other dependency.
//
assert (a.outer ());
- return match (a.inner_action (), t);
+ return match_sync (a.inner_action (), t, options);
}
inline pair<bool, target_state>
- match_inner (action a, const target& t, unmatch um)
+ match_inner (action a, const target& t, unmatch um, uint64_t options)
{
assert (a.outer ());
- return match (a.inner_action (), t, um);
+ return match_sync (a.inner_action (), t, um, options);
+ }
+
+ // Note: rematch is basically normal match but without the counts increment,
+ // so we just delegate to match_direct_*().
+ //
+ inline target_state
+ rematch_sync (action a, const target& t,
+ uint64_t options,
+ bool fail)
+ {
+ return match_direct_sync (a, t, options, fail);
+ }
+
+ inline target_state
+ rematch_async (action a, const target& t,
+ size_t start_count, atomic_count& task_count,
+ uint64_t options,
+ bool fail)
+ {
+ return match_async (a, t, start_count, task_count, options, fail);
+ }
+
+ inline target_state
+ rematch_complete (action a, const target& t,
+ uint64_t options,
+ bool fail)
+ {
+ return match_direct_complete (a, t, options, fail);
}
LIBBUILD2_SYMEXPORT void
- resolve_group_impl (action, const target&, target_lock);
+ resolve_group_impl (target_lock&&);
inline const target*
resolve_group (action a, const target& t)
@@ -530,7 +726,7 @@ namespace build2
// then unlock and return.
//
if (t.group == nullptr && l.offset < target::offset_tried)
- resolve_group_impl (a, t, move (l));
+ resolve_group_impl (move (l));
break;
}
@@ -544,17 +740,21 @@ namespace build2
inline void
inject (action a, target& t, const target& p)
{
- match (a, p);
+ match_sync (a, p);
t.prerequisite_targets[a].emplace_back (&p);
}
LIBBUILD2_SYMEXPORT void
- match_prerequisites (action, target&, const match_search&, const scope*);
+ match_prerequisites (action, target&,
+ const match_search&,
+ const scope*,
+ bool search_only);
LIBBUILD2_SYMEXPORT void
match_prerequisite_members (action, target&,
const match_search_member&,
- const scope*);
+ const scope*,
+ bool search_only);
inline void
match_prerequisites (action a, target& t, const match_search& ms)
@@ -565,7 +765,21 @@ namespace build2
ms,
(a.operation () != clean_id || t.is_a<alias> ()
? nullptr
- : &t.root_scope ()));
+ : &t.root_scope ()),
+ false);
+ }
+
+ inline void
+ search_prerequisites (action a, target& t, const match_search& ms)
+ {
+ match_prerequisites (
+ a,
+ t,
+ ms,
+ (a.operation () != clean_id || t.is_a<alias> ()
+ ? nullptr
+ : &t.root_scope ()),
+ true);
}
inline void
@@ -573,13 +787,16 @@ namespace build2
const match_search_member& msm)
{
if (a.operation () != clean_id || t.is_a<alias> ())
- match_prerequisite_members (a, t, msm, nullptr);
+ match_prerequisite_members (a, t, msm, nullptr, false);
else
{
// Note that here we don't iterate over members even for see-through
// groups since the group target should clean eveything up. A bit of an
// optimization.
//
+ // @@ TMP: I wonder if this still holds for the new group semantics
+ // we have in Qt automoc? Also below.
+ //
match_search ms (
msm
? [&msm] (action a,
@@ -591,40 +808,85 @@ namespace build2
}
: match_search ());
- match_prerequisites (a, t, ms, &t.root_scope ());
+ match_prerequisites (a, t, ms, &t.root_scope (), false);
+ }
+ }
+
+ inline void
+ search_prerequisite_members (action a, target& t,
+ const match_search_member& msm)
+ {
+ if (a.operation () != clean_id || t.is_a<alias> ())
+ match_prerequisite_members (a, t, msm, nullptr, true);
+ else
+ {
+ // Note that here we don't iterate over members even for see-through
+ // groups since the group target should clean eveything up. A bit of an
+ // optimization.
+ //
+ // @@ TMP: I wonder if this still holds for the new group semantics
+ // we have in Qt automoc? Also above.
+ //
+ match_search ms (
+ msm
+ ? [&msm] (action a,
+ const target& t,
+ const prerequisite& p,
+ include_type i)
+ {
+ return msm (a, t, prerequisite_member {p, nullptr}, i);
+ }
+ : match_search ());
+
+ match_prerequisites (a, t, ms, &t.root_scope (), true);
}
}
inline void
match_prerequisites (action a, target& t, const scope& s)
{
- match_prerequisites (a, t, nullptr, &s);
+ match_prerequisites (a, t, nullptr, &s, false);
}
inline void
- match_prerequisite_members (action a, target& t, const scope& s)
+ search_prerequisites (action a, target& t, const scope& s)
{
- match_prerequisite_members (a, t, nullptr, &s);
+ match_prerequisites (a, t, nullptr, &s, true);
}
- LIBBUILD2_SYMEXPORT target_state
- execute (action, const target&, size_t, atomic_count*);
+ inline void
+ match_prerequisite_members (action a, target& t, const scope& s)
+ {
+ match_prerequisite_members (a, t, nullptr, &s, false);
+ }
- inline target_state
- execute (action a, const target& t)
+ inline void
+ search_prerequisite_members (action a, target& t, const scope& s)
{
- return execute (a, t, 0, nullptr);
+ match_prerequisite_members (a, t, nullptr, &s, true);
}
+ LIBBUILD2_SYMEXPORT target_state
+ execute_impl (action, const target&, size_t, atomic_count*);
+
inline target_state
- execute_wait (action a, const target& t)
+ execute_sync (action a, const target& t, bool fail)
{
- if (execute (a, t) == target_state::busy)
- t.ctx.sched.wait (t.ctx.count_executed (),
+ target_state r (execute_impl (a, t, 0, nullptr));
+
+ if (r == target_state::busy)
+ {
+ t.ctx.sched->wait (t.ctx.count_executed (),
t[a].task_count,
scheduler::work_none);
- return t.executed_state (a);
+ r = t.executed_state (a, false);
+ }
+
+ if (r == target_state::failed && fail)
+ throw failed ();
+
+ return r;
}
inline target_state
@@ -632,9 +894,62 @@ namespace build2
size_t sc, atomic_count& tc,
bool fail)
{
- target_state r (execute (a, t, sc, &tc));
+ target_state r (execute_impl (a, t, sc, &tc));
- if (fail && !t.ctx.keep_going && r == target_state::failed)
+ if (r == target_state::failed && fail && !t.ctx.keep_going)
+ throw failed ();
+
+ return r;
+ }
+
+ inline target_state
+ execute_complete (action a, const target& t)
+ {
+ // Note: standard operation execute() sidesteps this and calls
+ // executed_state() directly.
+
+ context& ctx (t.ctx);
+
+ // If the target is still busy, wait for its completion.
+ //
+ ctx.sched->wait (ctx.count_executed (),
+ t[a].task_count,
+ scheduler::work_none);
+
+ return t.executed_state (a);
+ }
+
+ LIBBUILD2_SYMEXPORT target_state
+ execute_direct_impl (action, const target&, size_t, atomic_count*);
+
+ inline target_state
+ execute_direct_sync (action a, const target& t, bool fail)
+ {
+ target_state r (execute_direct_impl (a, t, 0, nullptr));
+
+ if (r == target_state::busy)
+ {
+ t.ctx.sched->wait (t.ctx.count_executed (),
+ t[a].task_count,
+ scheduler::work_none);
+
+ r = t.executed_state (a, false);
+ }
+
+ if (r == target_state::failed && fail)
+ throw failed ();
+
+ return r;
+ }
+
+ inline target_state
+ execute_direct_async (action a, const target& t,
+ size_t sc, atomic_count& tc,
+ bool fail)
+ {
+ target_state r (execute_direct_impl (a, t, sc, &tc));
+
+ if (r == target_state::failed && fail && !t.ctx.keep_going)
throw failed ();
return r;
@@ -650,7 +965,7 @@ namespace build2
execute_inner (action a, const target& t)
{
assert (a.outer ());
- return execute_wait (a.inner_action (), t);
+ return execute_sync (a.inner_action (), t);
}
inline target_state
@@ -726,6 +1041,12 @@ namespace build2
const timestamp&, const execute_filter&,
size_t);
+ LIBBUILD2_SYMEXPORT pair<optional<target_state>, const target*>
+ reverse_execute_prerequisites (const target_type*,
+ action, const target&,
+ const timestamp&, const execute_filter&,
+ size_t);
+
inline optional<target_state>
execute_prerequisites (action a, const target& t,
const timestamp& mt, const execute_filter& ef,
@@ -734,6 +1055,14 @@ namespace build2
return execute_prerequisites (nullptr, a, t, mt, ef, n).first;
}
+ inline optional<target_state>
+ reverse_execute_prerequisites (action a, const target& t,
+ const timestamp& mt, const execute_filter& ef,
+ size_t n)
+ {
+ return reverse_execute_prerequisites (nullptr, a, t, mt, ef, n).first;
+ }
+
template <typename T>
inline pair<optional<target_state>, const T&>
execute_prerequisites (action a, const target& t,
@@ -767,8 +1096,9 @@ namespace build2
p.first, static_cast<const T&> (p.second));
}
+ template <typename T>
inline target_state
- execute_members (action a, const target& t, const target* ts[], size_t n)
+ execute_members (action a, const target& t, T ts[], size_t n)
{
return t.ctx.current_mode == execution_mode::first
? straight_execute_members (a, t, ts, n, 0)