aboutsummaryrefslogtreecommitdiff
path: root/libbuild2/algorithm.cxx
diff options
context:
space:
mode:
Diffstat (limited to 'libbuild2/algorithm.cxx')
-rw-r--r--libbuild2/algorithm.cxx1986
1 files changed, 1593 insertions, 393 deletions
diff --git a/libbuild2/algorithm.cxx b/libbuild2/algorithm.cxx
index 355e633..16f1503 100644
--- a/libbuild2/algorithm.cxx
+++ b/libbuild2/algorithm.cxx
@@ -54,29 +54,45 @@ namespace build2
const target&
search (const target& t, const prerequisite_key& pk)
{
- assert (t.ctx.phase == run_phase::match);
+ context& ctx (t.ctx);
+
+ assert (ctx.phase == run_phase::match);
// If this is a project-qualified prerequisite, then this is import's
- // business.
+ // business (phase 2).
//
if (pk.proj)
- return import (t.ctx, pk);
+ return import2 (ctx, pk);
- if (const target* pt = pk.tk.type->search (t, pk))
+ if (const target* pt = pk.tk.type->search (ctx, &t, pk))
return *pt;
- return create_new_target (t.ctx, pk);
+ if (pk.tk.out->empty ())
+ return create_new_target (ctx, pk);
+
+ // If this is triggered, then you are probably not passing scope to
+ // search() (which leads to search_existing_file() being skipped).
+ //
+ fail << "no existing source file for prerequisite " << pk << endf;
}
pair<target&, ulock>
search_locked (const target& t, const prerequisite_key& pk)
{
- assert (t.ctx.phase == run_phase::match && !pk.proj);
+ context& ctx (t.ctx);
- if (const target* pt = pk.tk.type->search (t, pk))
+ assert (ctx.phase == run_phase::match && !pk.proj);
+
+ if (const target* pt = pk.tk.type->search (ctx, &t, pk))
return {const_cast<target&> (*pt), ulock ()};
- return create_new_target_locked (t.ctx, pk);
+ if (pk.tk.out->empty ())
+ return create_new_target_locked (ctx, pk);
+
+ // If this is triggered, then you are probably not passing scope to
+ // search() (which leads to search_existing_file() being skipped).
+ //
+ fail << "no existing source file for prerequisite " << pk << endf;
}
const target*
@@ -84,7 +100,7 @@ namespace build2
{
return pk.proj
? import_existing (ctx, pk)
- : search_existing_target (ctx, pk);
+ : pk.tk.type->search (ctx, nullptr /* existing */, pk);
}
const target&
@@ -92,7 +108,7 @@ namespace build2
{
assert (ctx.phase == run_phase::load || ctx.phase == run_phase::match);
- if (const target* pt = search_existing_target (ctx, pk))
+ if (const target* pt = search_existing_target (ctx, pk, true /*out_only*/))
return *pt;
return create_new_target (ctx, pk);
@@ -103,14 +119,14 @@ namespace build2
{
assert (ctx.phase == run_phase::load || ctx.phase == run_phase::match);
- if (const target* pt = search_existing_target (ctx, pk))
+ if (const target* pt = search_existing_target (ctx, pk, true /*out_only*/))
return {const_cast<target&> (*pt), ulock ()};
return create_new_target_locked (ctx, pk);
}
const target&
- search (const target& t, name n, const scope& s, const target_type* tt)
+ search (const target& t, name&& n, const scope& s, const target_type* tt)
{
assert (t.ctx.phase == run_phase::match);
@@ -164,16 +180,12 @@ namespace build2
}
bool q (cn.qualified ());
-
- // @@ OUT: for now we assume the prerequisite's out is undetermined.
- // Would need to pass a pair of names.
- //
prerequisite_key pk {
n.proj, {tt, &n.dir, q ? &empty_dir_path : &out, &n.value, ext}, &s};
return q
? import_existing (s.ctx, pk)
- : search_existing_target (s.ctx, pk);
+ : tt->search (s.ctx, nullptr /* existing */, pk);
}
const target*
@@ -220,8 +232,14 @@ namespace build2
// If the work_queue is absent, then we don't wait.
//
+ // While already applied or executed targets are normally not locked, if
+ // options contain any bits that are not already in cur_options, then the
+ // target is locked even in these states.
+ //
target_lock
- lock_impl (action a, const target& ct, optional<scheduler::work_queue> wq)
+ lock_impl (action a, const target& ct,
+ optional<scheduler::work_queue> wq,
+ uint64_t options)
{
context& ctx (ct.ctx);
@@ -236,7 +254,8 @@ namespace build2
size_t appl (b + target::offset_applied);
size_t busy (b + target::offset_busy);
- atomic_count& task_count (ct[a].task_count);
+ const target::opstate& cs (ct[a]);
+ atomic_count& task_count (cs.task_count);
while (!task_count.compare_exchange_strong (
e,
@@ -256,7 +275,7 @@ namespace build2
fail << "dependency cycle detected involving target " << ct;
if (!wq)
- return target_lock {a, nullptr, e - b};
+ return target_lock {a, nullptr, e - b, false};
// We also unlock the phase for the duration of the wait. Why?
// Consider this scenario: we are trying to match a dir{} target whose
@@ -265,14 +284,20 @@ namespace build2
// to switch the phase to load. Which would result in a deadlock
// unless we release the phase.
//
- phase_unlock u (ct.ctx, true /* unlock */, true /* delay */);
- e = ctx.sched.wait (busy - 1, task_count, u, *wq);
+ phase_unlock u (ct.ctx, true /* delay */);
+ e = ctx.sched->wait (busy - 1, task_count, u, *wq);
}
- // We don't lock already applied or executed targets.
+ // We don't lock already applied or executed targets unless there
+ // are new options.
+ //
+ // Note: we don't have the lock yet so we must use atomic cur_options.
+ // We also have to re-check this once we've grabbed the lock.
//
- if (e >= appl)
- return target_lock {a, nullptr, e - b};
+ if (e >= appl &&
+ (cs.match_extra.cur_options_.load (memory_order_relaxed) & options)
+ == options)
+ return target_lock {a, nullptr, e - b, false};
}
// We now have the lock. Analyze the old value and decide what to do.
@@ -281,24 +306,41 @@ namespace build2
target::opstate& s (t[a]);
size_t offset;
- if (e <= b)
+ bool first;
+ if ((first = (e <= b)))
{
// First lock for this operation.
//
+ // Note that we use 0 match_extra::cur_options_ as an indication of not
+ // being applied yet. In particular, in the match phase, this is used to
+ // distinguish between the "busy because not applied yet" and "busy
+ // because relocked to reapply match options" cases. See
+ // target::matched() for details.
+ //
s.rule = nullptr;
s.dependents.store (0, memory_order_release);
+ s.match_extra.cur_options_.store (0, memory_order_relaxed);
offset = target::offset_touched;
}
else
{
+ // Re-check the options if already applied or worse.
+ //
+ if (e >= appl && (s.match_extra.cur_options & options) == options)
+ {
+ // Essentially unlock_impl().
+ //
+ task_count.store (e, memory_order_release);
+ ctx.sched->resume (task_count);
+
+ return target_lock {a, nullptr, e - b, false};
+ }
+
offset = e - b;
- assert (offset == target::offset_touched ||
- offset == target::offset_tried ||
- offset == target::offset_matched);
}
- return target_lock {a, &t, offset};
+ return target_lock {a, &t, offset, first};
}
void
@@ -314,7 +356,7 @@ namespace build2
// this target.
//
task_count.store (offset + ctx.count_base (), memory_order_release);
- ctx.sched.resume (task_count);
+ ctx.sched->resume (task_count);
}
target&
@@ -322,7 +364,8 @@ namespace build2
const target_type& tt,
dir_path dir,
dir_path out,
- string n)
+ string n,
+ optional<string> ext)
{
tracer trace ("add_adhoc_member");
@@ -332,113 +375,347 @@ namespace build2
if (*mp != nullptr) // Might already be there.
return **mp;
- target* m (nullptr);
+ pair<target&, ulock> r (
+ t.ctx.targets.insert_locked (tt,
+ move (dir),
+ move (out),
+ move (n),
+ move (ext),
+ target_decl::implied,
+ trace,
+ true /* skip_find */));
+
+ target& m (r.first);
+
+ if (!r.second)
+ fail << "target " << m << " already exists and cannot be made "
+ << "ad hoc member of group " << t;
+
+ m.group = &t;
+ *mp = &m;
+
+ return m;
+ };
+
+ pair<target&, bool>
+ add_adhoc_member_identity (target& t,
+ const target_type& tt,
+ dir_path dir,
+ dir_path out,
+ string n,
+ optional<string> ext,
+ const location& loc)
+ {
+ // NOTE: see similar code in parser::enter_adhoc_members().
+
+ tracer trace ("add_adhoc_member_identity");
+
+ pair<target&, ulock> r (
+ t.ctx.targets.insert_locked (tt,
+ move (dir),
+ move (out),
+ move (n),
+ move (ext),
+ target_decl::implied,
+ trace,
+ true /* skip_find */));
+ target& m (r.first);
+
+ // Add as an ad hoc member at the end of the chain skipping duplicates.
+ //
+ const_ptr<target>* mp (&t.adhoc_member);
+ for (; *mp != nullptr; mp = &(*mp)->adhoc_member)
{
- pair<target&, ulock> r (
- t.ctx.targets.insert_locked (tt,
- move (dir),
- move (out),
- move (n),
- nullopt /* ext */,
- target_decl::implied,
- trace,
- true /* skip_find */));
+ if (*mp == &m)
+ return {m, false};
+ }
+
+ if (!r.second)
+ fail (loc) << "target " << m << " already exists and cannot be made "
+ << "ad hoc member of group " << t;
+
+ m.group = &t;
+ *mp = &m;
- if (r.second) // Inserted.
+ return {m, true};
+ }
+
+ static bool
+ trace_target (const target& t, const vector<name>& ns)
+ {
+ for (const name& n: ns)
+ {
+ if (n.untyped () || n.qualified () || n.pattern)
+ fail << "unsupported trace target name '" << n << "'" <<
+ info << "unqualified, typed, non-pattern name expected";
+
+ if (!n.dir.empty ())
{
- m = &r.first;
- m->group = &t;
+ if (n.dir.relative () || !n.dir.normalized ())
+ fail << "absolute and normalized trace target directory expected";
+
+ if (t.dir != n.dir)
+ continue;
}
- }
- assert (m != nullptr);
- *mp = m;
+ if (n.type == t.type ().name && n.value == t.name)
+ return true;
+ }
- return *m;
- };
+ return false;
+ }
- // Return the matching rule or NULL if no match and try_match is true.
- //
- const rule_match*
- match_rule (action a, target& t, const rule* skip, bool try_match)
+ void
+ set_rule_trace (target_lock& l, const rule_match* rm)
{
- const scope& bs (t.base_scope ());
+ action a (l.action);
+ target& t (*l.target);
- // Match rules in project environment.
+ // Note: see similar code in execute_impl() for execute.
//
- auto_project_env penv;
- if (const scope* rs = bs.root_scope ())
- penv = auto_project_env (*rs);
+ if (trace_target (t, *t.ctx.trace_match))
+ {
+ diag_record dr (info);
- match_extra& me (t[a].match_extra);
+ dr << "matching to " << diag_do (a, t);
- // First check for an ad hoc recipe.
- //
- // Note that a fallback recipe is preferred over a non-fallback rule.
- //
- if (!t.adhoc_recipes.empty ())
- {
- auto df = make_diag_frame (
- [a, &t](const diag_record& dr)
+ if (rm != nullptr)
+ {
+ const rule& r (rm->second);
+
+ if (const adhoc_rule* ar = dynamic_cast<const adhoc_rule*> (&r))
{
- if (verb != 0)
- dr << info << "while matching ad hoc recipe to " << diag_do (a, t);
- });
+ dr << info (ar->loc);
+
+ if (ar->pattern != nullptr)
+ dr << "using ad hoc pattern rule ";
+ else
+ dr << "using ad hoc recipe ";
+ }
+ else
+ dr << info << "using rule ";
+
+ dr << rm->first;
+ }
+ else
+ dr << info << "using directly-assigned recipe";
+ }
- auto match = [a, &t, &me] (const adhoc_rule& r, bool fallback) -> bool
+ t[a].rule = rm;
+ }
+
+ // Note: not static since also called by rule::sub_match().
+ //
+ const rule_match*
+ match_adhoc_recipe (action a, target& t, match_extra& me)
+ {
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
{
- me.init (fallback);
+ if (verb != 0)
+ dr << info << "while matching ad hoc recipe to " << diag_do (a, t);
+ });
- if (auto* f = (a.outer ()
- ? t.ctx.current_outer_oif
- : t.ctx.current_inner_oif)->adhoc_match)
- return f (r, a, t, string () /* hint */, me);
- else
- return r.match (a, t, string () /* hint */, me);
- };
+ auto match = [a, &t, &me] (const adhoc_rule& r, bool fallback) -> bool
+ {
+ me.reinit (fallback);
- // The action could be Y-for-X while the ad hoc recipes are always for
- // X. So strip the Y-for part for comparison (but not for the match()
- // calls; see below for the hairy inner/outer semantics details).
- //
- action ca (a.inner ()
- ? a
- : action (a.meta_operation (), a.outer_operation ()));
+ if (auto* f = (a.outer ()
+ ? t.ctx.current_outer_oif
+ : t.ctx.current_inner_oif)->adhoc_match)
+ return f (r, a, t, string () /* hint */, me);
+ else
+ return r.match (a, t, string () /* hint */, me);
+ };
+ // The action could be Y-for-X while the ad hoc recipes are always for
+ // X. So strip the Y-for part for comparison (but not for the match()
+ // calls; see below for the hairy inner/outer semantics details).
+ //
+ action ca (a.inner ()
+ ? a
+ : action (a.meta_operation (), a.outer_operation ()));
+
+ // If returned rule_match is NULL, then the second half indicates whether
+ // the rule was found (but did not match).
+ //
+ auto find_match = [&t, &match] (action ca) -> pair<const rule_match*, bool>
+ {
+ // Note that there can be at most one recipe for any action.
+ //
auto b (t.adhoc_recipes.begin ()), e (t.adhoc_recipes.end ());
auto i (find_if (
b, e,
- [&match, ca] (const shared_ptr<adhoc_rule>& r)
+ [ca] (const shared_ptr<adhoc_rule>& r)
{
auto& as (r->actions);
- return (find (as.begin (), as.end (), ca) != as.end () &&
- match (*r, false));
+ return find (as.begin (), as.end (), ca) != as.end ();
}));
- if (i == e)
+ bool f (i != e);
+ if (f)
+ {
+ if (!match (**i, false /* fallback */))
+ i = e;
+ }
+ else
{
// See if we have a fallback implementation.
//
// See the adhoc_rule::reverse_fallback() documentation for details on
// what's going on here.
//
+ // Note that it feels natural not to look for a fallback if a custom
+ // recipe was provided but did not match.
+ //
+ const target_type& tt (t.type ());
i = find_if (
b, e,
- [&match, ca, &t] (const shared_ptr<adhoc_rule>& r)
+ [ca, &tt] (const shared_ptr<adhoc_rule>& r)
{
- auto& as (r->actions);
-
- // Note that the rule could be there but not match (see above),
- // thus this extra check.
+ // Only the rule that provides the "forward" action can provide
+ // "reverse", so there can be at most one such rule.
//
- return (find (as.begin (), as.end (), ca) == as.end () &&
- r->reverse_fallback (ca, t.type ()) &&
- match (*r, true));
+ return r->reverse_fallback (ca, tt);
});
+
+ f = (i != e);
+ if (f)
+ {
+ if (!match (**i, true /* fallback */))
+ i = e;
+ }
+ }
+
+ return pair<const rule_match*, bool> (
+ i != e ? &(*i)->rule_match : nullptr,
+ f);
+ };
+
+ pair<const rule_match*, bool> r (find_match (ca));
+
+ // Provide the "add dist_* and configure_* actions for every perform_*
+ // action unless there is a custom one" semantics (see the equivalent ad
+ // hoc rule registration code in the parser for background).
+ //
+ // Note that handling this in the parser by adding the extra actions is
+ // difficult because we store recipe actions in the recipe itself (
+ // adhoc_rule::actions) and a recipe could be shared among multiple
+ // targets, some of which may provide a "custom one" as another recipe. On
+ // the other hand, handling it here is relatively straightforward.
+ //
+ if (r.first == nullptr && !r.second)
+ {
+ meta_operation_id mo (ca.meta_operation ());
+ if (mo == configure_id || mo == dist_id)
+ {
+ action pa (perform_id, ca.operation ());
+ r = find_match (pa);
+ }
+ }
+
+ return r.first;
+ }
+
+ // Return the matching rule or NULL if no match and try_match is true.
+ //
+ const rule_match*
+ match_rule_impl (action a, target& t,
+ uint64_t options,
+ const rule* skip,
+ bool try_match,
+ match_extra* pme)
+ {
+ using fallback_rule = adhoc_rule_pattern::fallback_rule;
+
+ auto adhoc_rule_match = [] (const rule_match& r)
+ {
+ return dynamic_cast<const adhoc_rule*> (&r.second.get ());
+ };
+
+ auto fallback_rule_match = [] (const rule_match& r)
+ {
+ return dynamic_cast<const fallback_rule*> (&r.second.get ());
+ };
+
+ // Note: we copy the options value to me.new_options after successfully
+ // matching the rule to make sure rule::match() implementations don't rely
+ // on it.
+ //
+ match_extra& me (pme == nullptr ? t[a].match_extra : *pme);
+
+ if (const target* g = t.group)
+ {
+ // If this is a group with dynamic members, then match it with the
+ // group's rule automatically. See dyndep_rule::inject_group_member()
+ // for background.
+ //
+ if ((g->type ().flags & target_type::flag::dyn_members) ==
+ target_type::flag::dyn_members)
+ {
+ if (g->matched (a, memory_order_acquire))
+ {
+ const rule_match* r (g->state[a].rule);
+ assert (r != nullptr); // Shouldn't happen with dyn_members.
+
+ me.new_options = options;
+ return r;
+ }
+
+ // Assume static member and fall through.
}
- if (i != e)
- return &(*i)->rule_match;
+ // If this is a member of group-based target, then first try to find a
+ // matching ad hoc recipe/rule by matching (to an ad hoc recipe/rule)
+ // the group but applying to the member. See adhoc_rule::match() for
+ // background, including for why const_cast should be safe.
+ //
+ // To put it another way, if a group is matched by an ad hoc
+ // recipe/rule, then we want all the member to be matched to the same
+ // recipe/rule.
+ //
+ // Note that such a group is dyn_members so we would have tried the
+ // "already matched" case above.
+ //
+ if (g->is_a<group> ())
+ {
+ // We cannot init match_extra from the target if it's unlocked so use
+ // a temporary (it shouldn't be modified if unlocked).
+ //
+ match_extra gme (false /* locked */);
+ if (const rule_match* r = match_rule_impl (a, const_cast<target&> (*g),
+ 0 /* options */,
+ skip,
+ true /* try_match */,
+ &gme))
+ {
+ me.new_options = options;
+ return r;
+ }
+
+ // Fall through to normal match of the member.
+ }
+ }
+
+ const scope& bs (t.base_scope ());
+
+ // Match rules in project environment.
+ //
+ auto_project_env penv;
+ if (const scope* rs = bs.root_scope ())
+ penv = auto_project_env (*rs);
+
+ // First check for an ad hoc recipe.
+ //
+ // Note that a fallback recipe is preferred over a non-fallback rule.
+ //
+ if (!t.adhoc_recipes.empty ())
+ {
+ if (const rule_match* r = match_adhoc_recipe (a, t, me))
+ {
+ me.new_options = options;
+ return r;
+ }
}
// If this is an outer operation (Y-for-X), then we look for rules
@@ -534,8 +811,6 @@ namespace build2
// reverse_fallback() rather than it returning (a list) of
// reverse actions, which would be necessary to register them.
//
- using fallback_rule = adhoc_rule_pattern::fallback_rule;
-
auto find_fallback = [mo, o, tt] (const fallback_rule& fr)
-> const rule_match*
{
@@ -548,21 +823,27 @@ namespace build2
if (oi == 0)
{
- if (auto* fr =
- dynamic_cast<const fallback_rule*> (&r->second.get ()))
+ if (const fallback_rule* fr = fallback_rule_match (*r))
{
if ((r = find_fallback (*fr)) == nullptr)
continue;
}
}
+ // Skip non-ad hoc rules if the target is not locked (see above;
+ // note that in this case match_extra is a temporary which we
+ // can reinit).
+ //
+ if (!me.locked && !adhoc_rule_match (*r))
+ continue;
+
const string& n (r->first);
const rule& ru (r->second);
if (&ru == skip)
continue;
- me.init (oi == 0 /* fallback */);
+ me.reinit (oi == 0 /* fallback */);
{
auto df = make_diag_frame (
[a, &t, &n](const diag_record& dr)
@@ -587,14 +868,16 @@ namespace build2
if (oi == 0)
{
- if (auto* fr =
- dynamic_cast<const fallback_rule*> (&r1->second.get ()))
+ if (const fallback_rule* fr = fallback_rule_match (*r1))
{
if ((r1 = find_fallback (*fr)) == nullptr)
continue;
}
}
+ if (!me.locked && !adhoc_rule_match (*r1))
+ continue;
+
const string& n1 (r1->first);
const rule& ru1 (r1->second);
@@ -613,8 +896,7 @@ namespace build2
//
// @@ Can't we temporarily swap things out in target?
//
- match_extra me1;
- me1.init (oi == 0);
+ match_extra me1 (me.locked, oi == 0 /* fallback */);
if (!ru1.match (a, t, *hint, me1))
continue;
}
@@ -630,7 +912,10 @@ namespace build2
}
if (!ambig)
+ {
+ me.new_options = options;
return r;
+ }
else
dr << info << "use rule hint to disambiguate this match";
}
@@ -743,66 +1028,280 @@ namespace build2
recipe re (ar != nullptr ? f (*ar, a, t, me) : ru.apply (a, t, me));
- me.free ();
+ me.free (); // Note: cur_options are still in use.
+ assert (me.cur_options != 0); // Match options cannot be 0 after apply().
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
return re;
}
+ static void
+ apply_posthoc_impl (
+ action a, target& t,
+ const pair<const string, reference_wrapper<const rule>>& m,
+ context::posthoc_target& pt)
+ {
+ const scope& bs (t.base_scope ());
+
+ // Apply rules in project environment.
+ //
+ auto_project_env penv;
+ if (const scope* rs = bs.root_scope ())
+ penv = auto_project_env (*rs);
+
+ const rule& ru (m.second);
+ match_extra& me (t[a].match_extra);
+ me.posthoc_prerequisite_targets = &pt.prerequisite_targets;
+
+ auto df = make_diag_frame (
+ [a, &t, &m](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while applying rule " << m.first << " to "
+ << diag_do (a, t) << " for post hoc prerequisites";
+ });
+
+ // Note: for now no adhoc_apply_posthoc().
+ //
+ ru.apply_posthoc (a, t, me);
+ }
+
+ static void
+ reapply_impl (action a,
+ target& t,
+ const pair<const string, reference_wrapper<const rule>>& m)
+ {
+ const scope& bs (t.base_scope ());
+
+ // Reapply rules in project environment.
+ //
+ auto_project_env penv;
+ if (const scope* rs = bs.root_scope ())
+ penv = auto_project_env (*rs);
+
+ const rule& ru (m.second);
+ match_extra& me (t[a].match_extra);
+ // Note: me.posthoc_prerequisite_targets carried over.
+
+ auto df = make_diag_frame (
+ [a, &t, &m](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while reapplying rule " << m.first << " to "
+ << diag_do (a, t);
+ });
+
+ // Note: for now no adhoc_reapply().
+ //
+ ru.reapply (a, t, me);
+ assert (me.cur_options != 0); // Match options cannot be 0 after reapply().
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
+ }
+
+ // If anything goes wrong, set target state to failed and return nullopt.
+ // Otherwise return the pointer to the new posthoc_target entry if any post
+ // hoc prerequisites were present or NULL otherwise. Note that the returned
+ // entry is stable (because we use a list) and should only be accessed
+ // during the match phase if the holding the target lock.
+ //
+ // Note: must be called while holding target_lock.
+ //
+ static optional<context::posthoc_target*>
+ match_posthoc (action a, target& t)
+ {
+ // The plan is to, while holding the lock, search and collect all the post
+ // hoc prerequisited and add an entry to context::current_posthoc_targets.
+ // The actual matching happens as post-pass in the meta-operation's match
+ // function.
+ //
+ // While it may seem like we could do matching here by unlocking (or
+ // unstacking) the lock for this target, that will only work for simple
+ // cases. In particular, consider:
+ //
+ // lib{foo}: ...
+ // lib{plug}: ... lib{foo}
+ // libs{foo}: libs{plug}: include = posthoc
+ //
+ // The chain we will end up with:
+ //
+ // lib{foo}->libs{foo}=>libs{plug}->lib{foo}
+ //
+ // This will trip up the cycle detection for group lib{foo}, not for
+ // libs{foo}.
+ //
+ // In the end, matching (and execution) "inline" (i.e., as we match/
+ // execute the corresponding target) appears to be unworkable in the
+ // face of cycles.
+ //
+ // Note also that this delayed match also helps with allowing the rule to
+ // adjust match options of post hoc prerequisites without needing the
+ // rematch support (see match_extra::posthoc_prerequisites).
+ //
+ // @@ Anything we need to do for group members (see through)? Feels quite
+ // far-fetched.
+ //
+ using posthoc_target = context::posthoc_target;
+ using posthoc_prerequisite_target = posthoc_target::prerequisite_target;
+
+ vector<posthoc_prerequisite_target> pts;
+ try
+ {
+ for (const prerequisite& p: group_prerequisites (t))
+ {
+ // Note that we have to ignore any operation-specific values for
+ // non-posthoc prerequisites. See include_impl() for details.
+ //
+ lookup l;
+ if (include (a, t, p, &l) == include_type::posthoc)
+ {
+ if (l)
+ {
+ const string& v (cast<string> (l));
+
+ // The only valid values are true and false and the latter would
+ // have been translated to include_type::exclude.
+ //
+ if (v != "true")
+ {
+ fail << "unrecognized " << *l.var << " variable value "
+ << "'" << v << "' specified for prerequisite " << p;
+ }
+ }
+
+ pts.push_back (
+ posthoc_prerequisite_target {
+ &search (t, p), // May fail.
+ match_extra::all_options});
+ }
+ }
+ }
+ catch (const failed&)
+ {
+ t.state[a].state = target_state::failed;
+ return nullopt;
+ }
+
+ if (!pts.empty ())
+ {
+ context& ctx (t.ctx);
+
+ mlock l (ctx.current_posthoc_targets_mutex);
+ ctx.current_posthoc_targets.push_back (posthoc_target {a, t, move (pts)});
+ return &ctx.current_posthoc_targets.back (); // Stable.
+ }
+
+ return nullptr;
+ }
+
// If step is true then perform only one step of the match/apply sequence.
//
// If try_match is true, then indicate whether there is a rule match with
// the first half of the result.
//
static pair<bool, target_state>
- match_impl (target_lock& l,
- bool step = false,
- bool try_match = false)
+ match_impl_impl (target_lock& l,
+ uint64_t options,
+ bool step = false,
+ bool try_match = false)
{
+ // With regards to options, the semantics that we need to achieve for each
+ // target::offeset_*:
+ //
+ // tried -- nothing to do (no match)
+ // touched -- set to new_options
+ // matched -- add to new_options
+ // applied -- reapply if any new options
+ // executed -- check and fail if any new options
+ // busy -- postpone until *_complete() call
+ //
+ // Note that if options is 0 (see resolve_{members,group}_impl()), then
+ // all this can be skipped.
+
assert (l.target != nullptr);
action a (l.action);
target& t (*l.target);
target::opstate& s (t[a]);
- // Intercept and handle matching an ad hoc group member.
- //
- if (t.adhoc_group_member ())
+ try
{
- assert (!step);
+ // Intercept and handle matching an ad hoc group member.
+ //
+ if (t.adhoc_group_member ())
+ {
+ // It feels natural to "convert" this call to the one for the group,
+ // including the try_match part. Semantically, we want to achieve the
+ // following:
+ //
+ // [try_]match (a, g);
+ // match_recipe (l, group_recipe);
+ //
+ // Currently, ad hoc group members cannot have options. An alternative
+ // semantics could be to call the goup's rule to translate member
+ // options to group options and then (re)match the group with that.
+ // The implementation of this semantics could look like this:
+ //
+ // 1. Lock the group.
+ // 2. If not already offset_matched, do one step to get the rule.
+ // 3. Call the rule to translate options.
+ // 4. Continue matching the group passing the translated options.
+ // 5. Keep track of member options in member's cur_options to handle
+ // member rematches (if already offset_{applied,executed}).
+ //
+ // Note: see also similar semantics but for explicit groups in
+ // adhoc-rule-*.cxx.
- const target& g (*t.group);
+ assert (!step && options == match_extra::all_options);
- // It feels natural to "convert" this call to the one for the group,
- // including the try_match part. Semantically, we want to achieve the
- // following:
- //
- // [try_]match (a, g);
- // match_recipe (l, group_recipe);
- //
- auto df = make_diag_frame (
- [a, &t](const diag_record& dr)
- {
- if (verb != 0)
- dr << info << "while matching group rule to " << diag_do (a, t);
- });
+ const target& g (*t.group);
- pair<bool, target_state> r (match_impl (a, g, 0, nullptr, try_match));
+ // What should we do with options? After some rumination it fells most
+ // natural to treat options for the group and for its ad hoc member as
+ // the same entity ... or not.
+ //
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while matching group rule to " << diag_do (a, t);
+ });
- if (r.first)
- {
- if (r.second != target_state::failed)
+ pair<bool, target_state> r (
+ match_impl (a, g, 0 /* options */, 0, nullptr, try_match));
+
+ if (r.first)
{
- match_inc_dependents (a, g);
- match_recipe (l, group_recipe);
+ if (r.second != target_state::failed)
+ {
+ // Note: in particular, passing all_options makes sure we will
+ // never re-lock this member if already applied/executed.
+ //
+ match_inc_dependents (a, g);
+ match_recipe (l, group_recipe, match_extra::all_options);
+
+ // Note: no need to call match_posthoc() since an ad hoc member
+ // has no own prerequisites and the group's ones will be matched
+ // by the group.
+ }
+ else
+ {
+ // Similar to catch(failed) below.
+ //
+ s.state = target_state::failed;
+ l.offset = target::offset_applied;
+
+ // Make sure we don't relock a failed target.
+ //
+ match_extra& me (s.match_extra);
+ me.cur_options = match_extra::all_options;
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
+ }
}
- }
- else
- l.offset = target::offset_tried;
+ else
+ l.offset = target::offset_tried;
- return r; // Group state (must be consistent with matched_state()).
- }
+ return r; // Group state (must be consistent with matched_state()).
+ }
- try
- {
// Continue from where the target has been left off.
//
switch (l.offset)
@@ -827,7 +1326,8 @@ namespace build2
//
clear_target (a, t);
- const rule_match* r (match_rule (a, t, nullptr, try_match));
+ const rule_match* r (
+ match_rule_impl (a, t, options, nullptr, try_match));
assert (l.offset != target::offset_tried); // Should have failed.
@@ -837,7 +1337,7 @@ namespace build2
return make_pair (false, target_state::unknown);
}
- s.rule = r;
+ set_rule (l, r);
l.offset = target::offset_matched;
if (step)
@@ -849,25 +1349,86 @@ namespace build2
// Fall through.
case target::offset_matched:
{
+ // Add any new options.
+ //
+ s.match_extra.new_options |= options;
+
// Apply.
//
set_recipe (l, apply_impl (a, t, *s.rule));
l.offset = target::offset_applied;
+
+ if (t.has_group_prerequisites ()) // Ok since already matched.
+ {
+ if (optional<context::posthoc_target*> p = match_posthoc (a, t))
+ {
+ if (*p != nullptr)
+ {
+ // It would have been more elegant to do this before calling
+ // apply_impl() and then expose the post hoc prerequisites to
+ // apply(). The problem is the group may not be resolved until
+ // the call to apply(). And so we resort to the separate
+ // apply_posthoc() function.
+ //
+ apply_posthoc_impl (a, t, *s.rule, **p);
+ }
+ }
+ else
+ s.state = target_state::failed;
+ }
+
break;
}
+ case target::offset_applied:
+ {
+ // Reapply if any new options.
+ //
+ match_extra& me (s.match_extra);
+ me.new_options = options & ~me.cur_options; // Clear existing.
+ assert (me.new_options != 0); // Otherwise should not have locked.
+
+ // Feels like this can only be a logic bug since to end up with a
+ // subset of options requires a rule (see match_extra for details).
+ //
+ assert (s.rule != nullptr);
+
+ reapply_impl (a, t, *s.rule);
+ break;
+ }
+ case target::offset_executed:
+ {
+ // Diagnose new options after execute.
+ //
+ match_extra& me (s.match_extra);
+ assert ((me.cur_options & options) != options); // Otherwise no lock.
+
+ fail << "change of match options after " << diag_do (a, t)
+ << " has been executed" <<
+ info << "executed options 0x" << hex << me.cur_options <<
+ info << "requested options 0x" << hex << options << endf;
+ }
default:
assert (false);
}
}
catch (const failed&)
{
+ s.state = target_state::failed;
+ l.offset = target::offset_applied;
+
+ // Make sure we don't relock a failed target.
+ //
+ match_extra& me (s.match_extra);
+ me.cur_options = match_extra::all_options;
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
+ }
+
+ if (s.state == target_state::failed)
+ {
// As a sanity measure clear the target data since it can be incomplete
// or invalid (mark()/unmark() should give you some ideas).
//
clear_target (a, t);
-
- s.state = target_state::failed;
- l.offset = target::offset_applied;
}
return make_pair (true, s.state);
@@ -877,10 +1438,9 @@ namespace build2
// the first half of the result.
//
pair<bool, target_state>
- match_impl (action a,
- const target& ct,
- size_t start_count,
- atomic_count* task_count,
+ match_impl (action a, const target& ct,
+ uint64_t options,
+ size_t start_count, atomic_count* task_count,
bool try_match)
{
// If we are blocking then work our own queue one task at a time. The
@@ -902,17 +1462,16 @@ namespace build2
ct,
task_count == nullptr
? optional<scheduler::work_queue> (scheduler::work_none)
- : nullopt));
+ : nullopt,
+ options));
if (l.target != nullptr)
{
- assert (l.offset < target::offset_applied); // Shouldn't lock otherwise.
-
if (try_match && l.offset == target::offset_tried)
return make_pair (false, target_state::unknown);
if (task_count == nullptr)
- return match_impl (l, false /* step */, try_match);
+ return match_impl_impl (l, options, false /* step */, try_match);
// Pass "disassembled" lock since the scheduler queue doesn't support
// task destruction.
@@ -922,12 +1481,18 @@ namespace build2
// Also pass our diagnostics and lock stacks (this is safe since we
// expect the caller to wait for completion before unwinding its stack).
//
- if (ct.ctx.sched.async (
+ // Note: pack captures and arguments a bit to reduce the storage space
+ // requrements.
+ //
+ bool first (ld.first);
+
+ if (ct.ctx.sched->async (
start_count,
*task_count,
- [a, try_match] (const diag_frame* ds,
- const target_lock* ls,
- target& t, size_t offset)
+ [a, try_match, first] (const diag_frame* ds,
+ const target_lock* ls,
+ target& t, size_t offset,
+ uint64_t options)
{
// Switch to caller's diag and lock stacks.
//
@@ -938,17 +1503,18 @@ namespace build2
{
phase_lock pl (t.ctx, run_phase::match); // Throws.
{
- target_lock l {a, &t, offset}; // Reassemble.
- match_impl (l, false /* step */, try_match);
- // Unlock within the match phase.
+ // Note: target_lock must be unlocked within the match phase.
+ //
+ target_lock l {a, &t, offset, first}; // Reassemble.
+ match_impl_impl (l, options, false /* step */, try_match);
}
}
catch (const failed&) {} // Phase lock failure.
},
diag_frame::stack (),
target_lock::stack (),
- ref (*ld.target),
- ld.offset))
+ ref (*ld.target), ld.offset,
+ options))
return make_pair (true, target_state::postponed); // Queued.
// Matched synchronously, fall through.
@@ -966,9 +1532,39 @@ namespace build2
return ct.try_matched_state (a, false);
}
+ void
+ match_only_sync (action a, const target& t, uint64_t options)
+ {
+ assert (t.ctx.phase == run_phase::match);
+
+ target_lock l (lock_impl (a, t, scheduler::work_none, options));
+
+ if (l.target != nullptr)
+ {
+ if (l.offset != target::offset_matched)
+ {
+ if (match_impl_impl (l,
+ options,
+ true /* step */).second == target_state::failed)
+ throw failed ();
+ }
+ else
+ {
+ // If the target is already matched, then we need to add any new
+ // options but not call apply() (thus cannot use match_impl_impl()).
+ //
+ (*l.target)[a].match_extra.new_options |= options;
+ }
+ }
+ }
+
+ // Note: lock is a reference to avoid the stacking overhead.
+ //
static group_view
- resolve_members_impl (action a, const target& g, target_lock l)
+ resolve_members_impl (action a, const target& g, target_lock&& l)
{
+ assert (a.inner ());
+
// Note that we will be unlocked if the target is already applied.
//
group_view r;
@@ -982,7 +1578,9 @@ namespace build2
{
// Match (locked).
//
- if (match_impl (l, true).second == target_state::failed)
+ if (match_impl_impl (l,
+ 0 /* options */,
+ true /* step */).second == target_state::failed)
throw failed ();
if ((r = g.group_members (a)).members != nullptr)
@@ -993,34 +1591,52 @@ namespace build2
// Fall through.
case target::offset_matched:
{
- // @@ Doing match without execute messes up our target_count. Does
- // not seem like it will be easy to fix (we don't know whether
- // someone else will execute this target).
- //
- // What if we always do match & execute together? After all,
- // if a group can be resolved in apply(), then it can be
- // resolved in match()! Feels a bit drastic.
- //
- // But, this won't be a problem if the target returns noop_recipe.
- // And perhaps it's correct to fail if it's not noop_recipe but
- // nobody executed it? Maybe not.
- //
- // Another option would be to have a count for such "matched but
- // may not be executed" targets and then make sure target_count
- // is less than that at the end. Though this definitelt makes it
- // less exact (since we can end up executed this target but not
- // some other). Maybe we can increment and decrement such targets
- // in a separate count (i.e., mark their recipe as special or some
- // such).
- //
-
// Apply (locked).
//
- if (match_impl (l, true).second == target_state::failed)
+ pair<bool, target_state> s (
+ match_impl_impl (l, 0 /* options */, true /* step */));
+
+ if (s.second == target_state::failed)
throw failed ();
if ((r = g.group_members (a)).members != nullptr)
+ {
+ // Doing match without execute messes up our target_count. There
+ // doesn't seem to be a clean way to solve this. Well, just always
+ // executing if we've done the match would have been clean but quite
+ // heavy-handed (it would be especially surprising if otherwise
+ // there is nothing else to do, which can happen, for example,
+ // during update-for-test when there are no tests to run).
+ //
+ // So our solution is as follows:
+ //
+ // 1. Keep track both of the targets that ended up in this situation
+ // (the target::resolve_counted flag) as well as their total
+ // count (the context::resolve_count member). Only do this if
+ // set_recipe() (called by match_impl()) would have incremented
+ // target_count.
+ //
+ // 2. If we happen to execute such a target (common case), then
+ // clear the flag and decrement the count.
+ //
+ // 3. When it's time to assert that target_count==0 (i.e., all the
+ // matched targets have been executed), check if resolve_count is
+ // 0. If it's not, then find every target with the flag set,
+ // pretend-execute it, and decrement both counts. See
+ // perform_execute() for further details on this step.
+ //
+ if (s.second != target_state::unchanged)
+ {
+ target::opstate& s (l.target->state[a]); // Inner.
+
+ if (!s.recipe_group_action)
+ {
+ s.resolve_counted = true;
+ g.ctx.resolve_count.fetch_add (1, memory_order_relaxed);
+ }
+ }
break;
+ }
// Unlock and to execute ...
//
@@ -1037,6 +1653,10 @@ namespace build2
// we would have already known the members list) and we really do need
// to execute it now.
//
+ // Note that while it might be tempting to decrement resolve_count
+ // here, there is no guarantee that we were the ones who have matched
+ // this target.
+ //
{
phase_switch ps (g.ctx, run_phase::execute);
execute_direct_sync (a, g);
@@ -1085,10 +1705,23 @@ namespace build2
return r;
}
+ // Note: lock is a reference to avoid the stacking overhead.
+ //
void
- resolve_group_impl (action, const target&, target_lock l)
+ resolve_group_impl (target_lock&& l)
{
- match_impl (l, true /* step */, true /* try_match */);
+ assert (l.action.inner ());
+
+ pair<bool, target_state> r (
+ match_impl_impl (l,
+ 0 /* options */,
+ true /* step */,
+ true /* try_match */));
+
+ l.unlock ();
+
+ if (r.first && r.second == target_state::failed)
+ throw failed ();
}
template <typename R, typename S>
@@ -1096,16 +1729,33 @@ namespace build2
match_prerequisite_range (action a, target& t,
R&& r,
const S& ms,
- const scope* s)
+ const scope* s,
+ bool search_only)
{
auto& pts (t.prerequisite_targets[a]);
+ size_t i (pts.size ()); // Index of the first to be added.
+
+ // Avoid duplicating fsdir{} that may have already been injected by
+ // inject_fsdir() (in which case it is expected to be first).
+ //
+ const target* dir (nullptr);
+ if (i != 0)
+ {
+ const prerequisite_target& pt (pts.front ());
+
+ if (pt.target != nullptr && pt.adhoc () && pt.target->is_a<fsdir> ())
+ dir = pt.target;
+ }
+
// Start asynchronous matching of prerequisites. Wait with unlocked phase
// to allow phase switching.
//
- wait_guard wg (t.ctx, t.ctx.count_busy (), t[a].task_count, true);
+ wait_guard wg (
+ search_only
+ ? wait_guard ()
+ : wait_guard (t.ctx, t.ctx.count_busy (), t[a].task_count, true));
- size_t i (pts.size ()); // Index of the first to be added.
for (auto&& p: forward<R> (r))
{
// Ignore excluded.
@@ -1119,13 +1769,20 @@ namespace build2
? ms (a, t, p, pi)
: prerequisite_target (&search (t, p), pi));
- if (pt.target == nullptr || (s != nullptr && !pt.target->in (*s)))
+ if (pt.target == nullptr ||
+ pt.target == dir ||
+ (s != nullptr && !pt.target->in (*s)))
continue;
- match_async (a, *pt.target, t.ctx.count_busy (), t[a].task_count);
+ if (!search_only)
+ match_async (a, *pt.target, t.ctx.count_busy (), t[a].task_count);
+
pts.push_back (move (pt));
}
+ if (search_only)
+ return;
+
wg.wait ();
// Finish matching all the targets that we have started.
@@ -1140,22 +1797,31 @@ namespace build2
void
match_prerequisites (action a, target& t,
const match_search& ms,
- const scope* s)
+ const scope* s,
+ bool search_only)
{
- match_prerequisite_range (a, t, group_prerequisites (t), ms, s);
+ match_prerequisite_range (a, t,
+ group_prerequisites (t),
+ ms,
+ s,
+ search_only);
}
void
match_prerequisite_members (action a, target& t,
const match_search_member& msm,
- const scope* s)
+ const scope* s,
+ bool search_only)
{
- match_prerequisite_range (a, t, group_prerequisite_members (a, t), msm, s);
+ match_prerequisite_range (a, t,
+ group_prerequisite_members (a, t),
+ msm,
+ s,
+ search_only);
}
- template <typename T>
void
- match_members (action a, target& t, T const* ts, size_t n)
+ match_members (action a, const target& t, const target* const* ts, size_t n)
{
// Pretty much identical to match_prerequisite_range() except we don't
// search.
@@ -1187,18 +1853,48 @@ namespace build2
}
}
- // Instantiate only for what we need.
- //
- template LIBBUILD2_SYMEXPORT void
- match_members<const target*> (action, target&,
- const target* const*, size_t);
+ void
+ match_members (action a,
+ const target& t,
+ prerequisite_targets& ts,
+ size_t s,
+ pair<uintptr_t, uintptr_t> imv)
+ {
+ size_t n (ts.size ());
+
+ wait_guard wg (t.ctx, t.ctx.count_busy (), t[a].task_count, true);
- template LIBBUILD2_SYMEXPORT void
- match_members<prerequisite_target> (action, target&,
- prerequisite_target const*, size_t);
+ for (size_t i (s); i != n; ++i)
+ {
+ const prerequisite_target& pt (ts[i]);
+ const target* m (pt.target);
+
+ if (m == nullptr ||
+ marked (m) ||
+ (imv.first != 0 && (pt.include & imv.first) != imv.second))
+ continue;
+
+ match_async (a, *m, t.ctx.count_busy (), t[a].task_count);
+ }
+
+ wg.wait ();
+
+ for (size_t i (s); i != n; ++i)
+ {
+ const prerequisite_target& pt (ts[i]);
+ const target* m (pt.target);
+
+ if (m == nullptr ||
+ marked (m) ||
+ (imv.first != 0 && (pt.include & imv.first) != imv.second))
+ continue;
+
+ match_complete (a, *m);
+ }
+ }
const fsdir*
- inject_fsdir (action a, target& t, bool parent)
+ inject_fsdir_impl (target& t, bool prereq, bool parent)
{
tracer trace ("inject_fsdir");
@@ -1219,6 +1915,7 @@ namespace build2
// subprojects (e.g., tests/).
//
const fsdir* r (nullptr);
+
if (rs != nullptr && !d.sub (rs->src_path ()))
{
l6 ([&]{trace << d << " for " << t;});
@@ -1227,7 +1924,7 @@ namespace build2
//
r = &search<fsdir> (t, d, dir_path (), string (), nullptr, nullptr);
}
- else
+ else if (prereq)
{
// See if one was mentioned explicitly.
//
@@ -1246,13 +1943,45 @@ namespace build2
}
}
+ return r;
+ }
+
+ const fsdir*
+ inject_fsdir (action a, target& t, bool match, bool prereq, bool parent)
+ {
+ auto& pts (t.prerequisite_targets[a]);
+
+ assert (!prereq || pts.empty ()); // This prerequisite target must be first.
+
+ const fsdir* r (inject_fsdir_impl (t, prereq, parent));
+
if (r != nullptr)
{
+ if (match)
+ match_sync (a, *r);
+
// Make it ad hoc so that it doesn't end up in prerequisite_targets
// after execution.
//
- match_sync (a, *r);
- t.prerequisite_targets[a].emplace_back (r, include_type::adhoc);
+ pts.emplace_back (r, include_type::adhoc);
+ }
+
+ return r;
+ }
+
+ const fsdir*
+ inject_fsdir_direct (action a, target& t, bool prereq, bool parent)
+ {
+ auto& pts (t.prerequisite_targets[a]);
+
+ assert (!prereq || pts.empty ()); // This prerequisite target must be first.
+
+ const fsdir* r (inject_fsdir_impl (t, prereq, parent));
+
+ if (r != nullptr)
+ {
+ match_direct_sync (a, *r);
+ pts.emplace_back (r, include_type::adhoc);
}
return r;
@@ -1365,11 +2094,26 @@ namespace build2
return ts;
}
- void
- update_backlink (const file& f, const path& l, bool changed, backlink_mode m)
+ static inline const char*
+ update_backlink_name (backlink_mode m, bool to_dir)
{
using mode = backlink_mode;
+ const char* r (nullptr);
+ switch (m)
+ {
+ case mode::link:
+ case mode::symbolic: r = verb >= 3 ? "ln -sf" : verb >= 2 ? "ln -s" : "ln"; break;
+ case mode::hard: r = verb >= 3 ? "ln -f" : "ln"; break;
+ case mode::copy:
+ case mode::overwrite: r = to_dir ? "cp -r" : "cp"; break;
+ }
+ return r;
+ }
+
+ void
+ update_backlink (const file& f, const path& l, bool changed, backlink_mode m)
+ {
const path& p (f.path ());
dir_path d (l.directory ());
@@ -1381,28 +2125,20 @@ namespace build2
// actually updated to signal to the user that the updated out target is
// now available in src.
//
- if (verb <= 2)
+ if (verb == 1 || verb == 2)
{
if (changed || !butl::entry_exists (l,
false /* follow_symlinks */,
true /* ignore_errors */))
{
- const char* c (nullptr);
- switch (m)
- {
- case mode::link:
- case mode::symbolic: c = verb >= 2 ? "ln -s" : "ln"; break;
- case mode::hard: c = "ln"; break;
- case mode::copy:
- case mode::overwrite: c = l.to_directory () ? "cp -r" : "cp"; break;
- }
+ const char* c (update_backlink_name (m, l.to_directory ()));
- // Note: 'ln foo/ bar/' means a different thing.
+ // Note: 'ln foo/ bar/' means a different thing (and below).
//
- if (verb >= 2)
+ if (verb == 2)
text << c << ' ' << p.string () << ' ' << l.string ();
else
- text << c << ' ' << f << " -> " << d;
+ print_diag (c, f, d);
}
}
@@ -1422,30 +2158,25 @@ namespace build2
{
// As above but with a slightly different diagnostics.
- using mode = backlink_mode;
-
dir_path d (l.directory ());
- if (verb <= 2)
+ if (verb == 1 || verb == 2)
{
if (changed || !butl::entry_exists (l,
false /* follow_symlinks */,
true /* ignore_errors */))
{
- const char* c (nullptr);
- switch (m)
- {
- case mode::link:
- case mode::symbolic: c = verb >= 2 ? "ln -s" : "ln"; break;
- case mode::hard: c = "ln"; break;
- case mode::copy:
- case mode::overwrite: c = l.to_directory () ? "cp -r" : "cp"; break;
- }
+ const char* c (update_backlink_name (m, l.to_directory ()));
+ // Note: 'ln foo/ bar/' means a different thing (and above) so strip
+ // trailing directory separator (but keep as path for relative).
+ //
if (verb >= 2)
text << c << ' ' << p.string () << ' ' << l.string ();
else
- text << c << ' ' << p.string () << " -> " << d;
+ print_diag (c,
+ p.to_directory () ? path (p.string ()) : p,
+ d);
}
}
@@ -1497,6 +2228,8 @@ namespace build2
const path& p, const path& l, backlink_mode om,
uint16_t verbosity)
{
+ assert (verbosity >= 2);
+
using mode = backlink_mode;
bool d (l.to_directory ());
@@ -1506,17 +2239,8 @@ namespace build2
{
if (verb >= verbosity)
{
- const char* c (nullptr);
- switch (m)
- {
- case mode::link:
- case mode::symbolic: c = "ln -sf"; break;
- case mode::hard: c = "ln -f"; break;
- case mode::copy:
- case mode::overwrite: c = d ? "cp -r" : "cp"; break;
- }
-
- text << c << ' ' << p.string () << ' ' << l.string ();
+ text << update_backlink_name (m, d) << ' ' << p.string () << ' '
+ << l.string ();
}
};
@@ -1578,8 +2302,7 @@ namespace build2
try_mkdir (to);
- for (const auto& de:
- dir_iterator (fr, false /* ignore_dangling */))
+ for (const auto& de: dir_iterator (fr, dir_iterator::no_follow))
{
path f (fr / de.path ());
path t (to / de.path ());
@@ -1632,6 +2355,11 @@ namespace build2
//
// Note that here the dry-run mode is handled by the filesystem functions.
+ // Note that if we ever need to support level 1 for some reason, maybe
+ // consider showing the target, for example, `unlink exe{hello} <- dir/`?
+ //
+ assert (v >= 2);
+
using mode = backlink_mode;
if (l.to_directory ())
@@ -1666,9 +2394,15 @@ namespace build2
struct backlink: auto_rm<path>
{
using path_type = build2::path;
+ using target_type = build2::target;
reference_wrapper<const path_type> target;
- backlink_mode mode;
+ backlink_mode mode;
+
+ // Ad hoc group-specific information for diagnostics (see below).
+ //
+ const target_type* member = nullptr;
+ bool print = true;
backlink (const path_type& t, path_type&& l, backlink_mode m, bool active)
: auto_rm<path_type> (move (l), active), target (t), mode (m)
@@ -1690,33 +2424,65 @@ namespace build2
};
// Normally (i.e., on sane platforms that don't have things like PDBs, etc)
- // there will be just one backlink so optimize for that.
+ // there will be just one or two backlinks so optimize for that.
//
- using backlinks = small_vector<backlink, 1>;
+ using backlinks = small_vector<backlink, 2>;
- static optional<backlink_mode>
- backlink_test (const target& t, const lookup& l)
+ static optional<pair<backlink_mode, bool>>
+ backlink_test (const target& t, const lookup& l, optional<backlink_mode> gm)
{
using mode = backlink_mode;
- optional<mode> r;
- const string& v (cast<string> (l));
+ const names& ns (cast<names> (l));
- if (v == "true") r = mode::link;
- else if (v == "symbolic") r = mode::symbolic;
- else if (v == "hard") r = mode::hard;
- else if (v == "copy") r = mode::copy;
- else if (v == "overwrite") r = mode::overwrite;
- else if (v != "false")
- fail << "invalid backlink variable value '" << v << "' "
+ if (ns.size () != 1 && ns.size () != 2)
+ {
+ fail << "invalid backlink variable value '" << ns << "' "
<< "specified for target " << t;
+ }
- return r;
+ optional<mode> m;
+ for (;;) // Breakout loop.
+ {
+ const name& n (ns.front ());
+
+ if (n.simple ())
+ {
+ const string& v (n.value);
+
+ if (v == "true") {m = mode::link; break;}
+ else if (v == "symbolic") {m = mode::symbolic; break;}
+ else if (v == "hard") {m = mode::hard; break;}
+ else if (v == "copy") {m = mode::copy; break;}
+ else if (v == "overwrite") {m = mode::overwrite; break;}
+ else if (v == "false") { break;}
+ else if (v == "group") {if ((m = gm)) break;}
+ }
+
+ fail << "invalid backlink variable value mode component '" << n << "' "
+ << "specified for target " << t << endf;
+ }
+
+ bool np (false); // "not print"
+ if (ns.size () == 2)
+ {
+ const name& n (ns.back ());
+
+ if (n.simple () && (n.value == "true" || (np = (n.value == "false"))))
+ ;
+ else
+ fail << "invalid backlink variable value print component '" << n
+ << "' specified for target " << t;
+ }
+
+ return m ? optional<pair<mode, bool>> (make_pair (*m, !np)) : nullopt;
}
static optional<backlink_mode>
backlink_test (action a, target& t)
{
+ using mode = backlink_mode;
+
context& ctx (t.ctx);
// Note: the order of these checks is from the least to most expensive.
@@ -1726,9 +2492,20 @@ namespace build2
if (a.outer () || (a != perform_update_id && a != perform_clean_id))
return nullopt;
- // Only file-based targets in the out tree can be backlinked.
+ // Only targets in the out tree can be backlinked.
+ //
+ if (!t.out.empty ())
+ return nullopt;
+
+ // Only file-based targets or groups containing file-based targets can be
+ // backlinked. Note that we don't do the "file-based" check of the latter
+ // case here since they can still be execluded. So instead we are prepared
+ // to handle the empty backlinks list.
//
- if (!t.out.empty () || !t.is_a<file> ())
+ // @@ Potentially members could only be resolved in execute. I guess we
+ // don't support backlink for such groups at the moment.
+ //
+ if (!t.is_a<file> () && t.group_members (a).members == nullptr)
return nullopt;
// Neither an out-of-project nor in-src configuration can be forwarded.
@@ -1752,7 +2529,13 @@ namespace build2
if (!l.defined ())
l = ctx.global_scope.lookup (*ctx.var_backlink, t.key ());
- return l ? backlink_test (t, l) : nullopt;
+ optional<pair<mode, bool>> r (l ? backlink_test (t, l, nullopt) : nullopt);
+
+ if (r && !r->second)
+ fail << "backlink variable value print component cannot be false "
+ << "for primary target " << t;
+
+ return r ? optional<mode> (r->first) : nullopt;
}
static backlinks
@@ -1760,58 +2543,104 @@ namespace build2
{
using mode = backlink_mode;
+ context& ctx (t.ctx);
const scope& s (t.base_scope ());
backlinks bls;
- auto add = [&bls, &s] (const path& p, mode m)
+ auto add = [&bls, &s] (const path& p,
+ mode m,
+ const target* mt = nullptr,
+ bool print = true)
{
bls.emplace_back (p,
s.src_path () / p.leaf (s.out_path ()),
m,
!s.ctx.dry_run /* active */);
+
+ if (mt != nullptr)
+ {
+ backlink& bl (bls.back ());
+ bl.member = mt;
+ bl.print = print;
+ }
};
- // First the target itself.
+ // Check for a custom backlink mode for this member. If none, then
+ // inherit the one from the group (so if the user asked to copy
+ // .exe, we will also copy .pdb).
+ //
+ // Note that we want to avoid group or tt/patter-spec lookup. And
+ // since this is an ad hoc member (which means it was either declared
+ // in the buildfile or added by the rule), we assume that the value,
+ // if any, will be set as a target or rule-specific variable.
//
- add (t.as<file> ().path (), m);
+ auto member_mode = [a, m, &ctx] (const target& mt)
+ -> optional<pair<mode, bool>>
+ {
+ lookup l (mt.state[a].vars[ctx.var_backlink]);
+
+ if (!l)
+ l = mt.vars[ctx.var_backlink];
- // Then ad hoc group file/fsdir members, if any.
+ return l ? backlink_test (mt, l, m) : make_pair (m, true);
+ };
+
+ // @@ Currently we don't handle the following cases:
+ //
+ // 1. File-based explicit groups.
//
- for (const target* mt (t.adhoc_member);
- mt != nullptr;
- mt = mt->adhoc_member)
+ // 2. Ad hoc subgroups in explicit groups.
+ //
+ // Note: see also the corresponding code in backlink_update_post().
+ //
+ if (file* f = t.is_a<file> ())
{
- const path* p (nullptr);
+ // First the target itself.
+ //
+ add (f->path (), m, f, true); // Note: always printed.
- if (const file* f = mt->is_a<file> ())
+ // Then ad hoc group file/fsdir members, if any.
+ //
+ for (const target* mt (t.adhoc_member);
+ mt != nullptr;
+ mt = mt->adhoc_member)
{
- p = &f->path ();
+ const path* p (nullptr);
- if (p->empty ()) // The "trust me, it's somewhere" case.
- p = nullptr;
- }
- else if (const fsdir* d = mt->is_a<fsdir> ())
- p = &d->dir;
+ if (const file* f = mt->is_a<file> ())
+ {
+ p = &f->path ();
- if (p != nullptr)
- {
- // Check for a custom backlink mode for this member. If none, then
- // inherit the one from the group (so if the user asked to copy .exe,
- // we will also copy .pdb).
- //
- // Note that we want to avoid group or tt/patter-spec lookup. And
- // since this is an ad hoc member (which means it was either declared
- // in the buildfile or added by the rule), we assume that the value,
- // if any, will be set as a rule-specific variable (since setting it
- // as a target-specific wouldn't be MT-safe). @@ Don't think this
- // applies to declared ad hoc members.
- //
- lookup l (mt->state[a].vars[t.ctx.var_backlink]);
+ if (p->empty ()) // The "trust me, it's somewhere" case.
+ p = nullptr;
+ }
+ else if (const fsdir* d = mt->is_a<fsdir> ())
+ p = &d->dir;
- optional<mode> bm (l ? backlink_test (*mt, l) : m);
+ if (p != nullptr)
+ {
+ if (optional<pair<mode, bool>> m = member_mode (*mt))
+ add (*p, m->first, mt, m->second);
+ }
+ }
+ }
+ else
+ {
+ // Explicit group.
+ //
+ group_view gv (t.group_members (a));
+ assert (gv.members != nullptr);
- if (bm)
- add (*p, *bm);
+ for (size_t i (0); i != gv.count; ++i)
+ {
+ if (const target* mt = gv.members[i])
+ {
+ if (const file* f = mt->is_a<file> ())
+ {
+ if (optional<pair<mode, bool>> m = member_mode (*mt))
+ add (f->path (), m->first);
+ }
+ }
}
}
@@ -1825,29 +2654,89 @@ namespace build2
}
static void
- backlink_update_post (target& t, target_state ts, backlinks& bls)
+ backlink_update_post (target& t, target_state ts,
+ backlink_mode m, backlinks& bls)
{
if (ts == target_state::failed)
return; // Let auto rm clean things up.
- // Make backlinks.
- //
- for (auto b (bls.begin ()), i (b); i != bls.end (); ++i)
+ context& ctx (t.ctx);
+
+ file* ft (t.is_a<file> ());
+
+ if (ft != nullptr && bls.size () == 1)
{
- const backlink& bl (*i);
+ // Single file-based target.
+ //
+ const backlink& bl (bls.front ());
- if (i == b)
- update_backlink (t.as<file> (),
- bl.path,
- ts == target_state::changed,
- bl.mode);
- else
- update_backlink (t.ctx, bl.target, bl.path, bl.mode);
+ update_backlink (*ft,
+ bl.path,
+ ts == target_state::changed,
+ bl.mode);
+ }
+ else
+ {
+ // Explicit or ad hoc group.
+ //
+ // What we have below is a custom variant of update_backlink(file).
+ //
+ dir_path d (bls.front ().path.directory ());
+
+ // First print the verbosity level 1 diagnostics. Level 2 and higher are
+ // handled by the update_backlink() calls below.
+ //
+ if (verb == 1)
+ {
+ bool changed (ts == target_state::changed);
+
+ if (!changed)
+ {
+ for (const backlink& bl: bls)
+ {
+ changed = !butl::entry_exists (bl.path,
+ false /* follow_symlinks */,
+ true /* ignore_errors */);
+ if (changed)
+ break;
+ }
+ }
+
+ if (changed)
+ {
+ const char* c (update_backlink_name (m, false /* to_dir */));
+
+ // For explicit groups we only print the group target. For ad hoc
+ // groups we print all the members except those explicitly excluded.
+ //
+ if (ft == nullptr)
+ print_diag (c, t, d);
+ else
+ {
+ vector<target_key> tks;
+ tks.reserve (bls.size ());
+
+ for (const backlink& bl: bls)
+ if (bl.print)
+ tks.push_back (bl.member->key ());
+
+ print_diag (c, move (tks), d);
+ }
+ }
+ }
+
+ if (!exists (d))
+ mkdir_p (d, 2 /* verbosity */);
+
+ // Make backlinks.
+ //
+ for (const backlink& bl: bls)
+ update_backlink (ctx, bl.target, bl.path, bl.mode, 2 /* verbosity */);
}
// Cancel removal.
//
- if (!t.ctx.dry_run)
+ if (!ctx.dry_run)
{
for (backlink& bl: bls)
bl.cancel ();
@@ -1888,15 +2777,57 @@ namespace build2
// which is ok since such targets are probably not interesting for
// backlinking.
//
+ // Note also that for group members (both ad hoc and non) backlinking
+ // is handled when updating/cleaning the group.
+ //
backlinks bls;
- optional<backlink_mode> blm (backlink_test (a, t));
+ optional<backlink_mode> blm;
- if (blm)
+ if (t.group == nullptr) // Matched so must be already resolved.
{
- if (a == perform_update_id)
- bls = backlink_update_pre (a, t, *blm);
+ blm = backlink_test (a, t);
+
+ if (blm)
+ {
+ if (a == perform_update_id)
+ {
+ bls = backlink_update_pre (a, t, *blm);
+ if (bls.empty ())
+ blm = nullopt;
+ }
+ else
+ backlink_clean_pre (a, t, *blm);
+ }
+ }
+
+ // Note: see similar code in set_rule_trace() for match.
+ //
+ if (ctx.trace_execute != nullptr && trace_target (t, *ctx.trace_execute))
+ {
+ diag_record dr (info);
+
+ dr << diag_doing (a, t);
+
+ if (s.rule != nullptr)
+ {
+ const rule& r (s.rule->second);
+
+ if (const adhoc_rule* ar = dynamic_cast<const adhoc_rule*> (&r))
+ {
+ dr << info (ar->loc);
+
+ if (ar->pattern != nullptr)
+ dr << "using ad hoc pattern rule ";
+ else
+ dr << "using ad hoc recipe ";
+ }
+ else
+ dr << info << "using rule ";
+
+ dr << s.rule->first;
+ }
else
- backlink_clean_pre (a, t, *blm);
+ dr << info << "using directly-assigned recipe";
}
ts = execute_recipe (a, t, s.recipe);
@@ -1904,7 +2835,7 @@ namespace build2
if (blm)
{
if (a == perform_update_id)
- backlink_update_post (t, ts, bls);
+ backlink_update_post (t, ts, *blm, bls);
}
}
catch (const failed&)
@@ -1919,7 +2850,6 @@ namespace build2
// s.recipe_group_action may be used further (see, for example,
// group_state()) and should retain its value.
//
- //
if (!s.recipe_keep)
s.recipe = nullptr;
@@ -1929,7 +2859,17 @@ namespace build2
// postponment logic (see excute_recipe() for details).
//
if (a.inner () && !s.recipe_group_action)
+ {
+ // See resolve_members_impl() for background.
+ //
+ if (s.resolve_counted)
+ {
+ s.resolve_counted = false;
+ ctx.resolve_count.fetch_sub (1, memory_order_relaxed);
+ }
+
ctx.target_count.fetch_sub (1, memory_order_relaxed);
+ }
// Decrement the task count (to count_executed) and wake up any threads
// that might be waiting for this target.
@@ -1938,7 +2878,7 @@ namespace build2
target::offset_busy - target::offset_executed,
memory_order_release));
assert (tc == ctx.count_busy ());
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
return ts;
}
@@ -1949,6 +2889,8 @@ namespace build2
size_t start_count,
atomic_count* task_count)
{
+ // NOTE: see also pretend_execute lambda in perform_execute().
+
target& t (const_cast<target&> (ct)); // MT-aware.
target::opstate& s (t[a]);
@@ -1991,6 +2933,7 @@ namespace build2
size_t exec (ctx.count_executed ());
size_t busy (ctx.count_busy ());
+ optional<target_state> r;
if (s.task_count.compare_exchange_strong (
tc,
busy,
@@ -2003,32 +2946,35 @@ namespace build2
{
// There could still be scope operations.
//
- if (t.is_a<dir> ())
- execute_recipe (a, t, nullptr /* recipe */);
+ r = t.is_a<dir> ()
+ ? execute_recipe (a, t, nullptr /* recipe */)
+ : s.state;
s.task_count.store (exec, memory_order_release);
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
}
else
{
if (task_count == nullptr)
- return execute_impl (a, t);
-
- // Pass our diagnostics stack (this is safe since we expect the
- // caller to wait for completion before unwinding its diag stack).
- //
- if (ctx.sched.async (start_count,
- *task_count,
- [a] (const diag_frame* ds, target& t)
- {
- diag_frame::stack_guard dsg (ds);
- execute_impl (a, t);
- },
- diag_frame::stack (),
- ref (t)))
- return target_state::unknown; // Queued.
-
- // Executed synchronously, fall through.
+ r = execute_impl (a, t);
+ else
+ {
+ // Pass our diagnostics stack (this is safe since we expect the
+ // caller to wait for completion before unwinding its diag stack).
+ //
+ if (ctx.sched->async (start_count,
+ *task_count,
+ [a] (const diag_frame* ds, target& t)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (a, t);
+ },
+ diag_frame::stack (),
+ ref (t)))
+ return target_state::unknown; // Queued.
+
+ // Executed synchronously, fall through.
+ }
}
}
else
@@ -2039,7 +2985,7 @@ namespace build2
else assert (tc == exec);
}
- return t.executed_state (a, false);
+ return r ? *r : t.executed_state (a, false /* fail */);
}
target_state
@@ -2060,6 +3006,7 @@ namespace build2
size_t exec (ctx.count_executed ());
size_t busy (ctx.count_busy ());
+ optional<target_state> r;
if (s.task_count.compare_exchange_strong (
tc,
busy,
@@ -2069,34 +3016,34 @@ namespace build2
if (s.state == target_state::unknown)
{
if (task_count == nullptr)
- return execute_impl (a, t);
-
- if (ctx.sched.async (start_count,
- *task_count,
- [a] (const diag_frame* ds, target& t)
- {
- diag_frame::stack_guard dsg (ds);
- execute_impl (a, t);
- },
- diag_frame::stack (),
- ref (t)))
- return target_state::unknown; // Queued.
-
- // Executed synchronously, fall through.
+ r = execute_impl (a, t);
+ else
+ {
+ if (ctx.sched->async (start_count,
+ *task_count,
+ [a] (const diag_frame* ds, target& t)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (a, t);
+ },
+ diag_frame::stack (),
+ ref (t)))
+ return target_state::unknown; // Queued.
+
+ // Executed synchronously, fall through.
+ }
}
else
{
assert (s.state == target_state::unchanged ||
s.state == target_state::failed);
- if (s.state == target_state::unchanged)
- {
- if (t.is_a<dir> ())
- execute_recipe (a, t, nullptr /* recipe */);
- }
+ r = s.state == target_state::unchanged && t.is_a<dir> ()
+ ? execute_recipe (a, t, nullptr /* recipe */)
+ : s.state;
s.task_count.store (exec, memory_order_release);
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
}
}
else
@@ -2107,12 +3054,14 @@ namespace build2
else assert (tc == exec);
}
- return t.executed_state (a, false);
+ return r ? *r : t.executed_state (a, false /* fail */);
}
bool
update_during_match (tracer& trace, action a, const target& t, timestamp ts)
{
+ // NOTE: see also clean_during_match() if changing anything here.
+
assert (a == perform_update_id);
// Note: this function is used to make sure header dependencies are up to
@@ -2184,6 +3133,11 @@ namespace build2
action a, target& t,
uintptr_t mask)
{
+ // NOTE: see also clean_during_match_prerequisites() if changing anything
+ // here.
+
+ assert (a == perform_update_id);
+
prerequisite_targets& pts (t.prerequisite_targets[a]);
// On the first pass detect and handle unchanged tragets. Note that we
@@ -2194,7 +3148,7 @@ namespace build2
for (prerequisite_target& p: pts)
{
- if ((p.include & mask) != 0)
+ if (mask == 0 || (p.include & mask) != 0)
{
if (p.target != nullptr)
{
@@ -2219,6 +3173,16 @@ namespace build2
if (n == 0)
return false;
+ // Provide additional information on what's going on.
+ //
+ auto df = make_diag_frame (
+ [&t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while updating during match prerequisites of "
+ << "target " << t;
+ });
+
context& ctx (t.ctx);
phase_switch ps (ctx, run_phase::execute);
@@ -2231,7 +3195,7 @@ namespace build2
#if 0
for (prerequisite_target& p: pts)
{
- if ((p.include & mask) != 0 && p.data != 0)
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
{
const target& pt (*p.target);
@@ -2266,7 +3230,7 @@ namespace build2
for (prerequisite_target& p: pts)
{
- if ((p.include & mask) != 0 && p.data != 0)
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
{
execute_direct_async (a, *p.target, busy, tc);
}
@@ -2278,7 +3242,7 @@ namespace build2
//
for (prerequisite_target& p: pts)
{
- if ((p.include & mask) != 0 && p.data != 0)
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
{
const target& pt (*p.target);
target_state ns (execute_complete (a, pt));
@@ -2300,6 +3264,188 @@ namespace build2
return r;
}
+ bool
+ clean_during_match (tracer& trace, action a, const target& t)
+ {
+ // Let's keep this as close to update_during_match() semantically as
+ // possible until we see a clear reason to deviate.
+
+ // We have a problem with fsdir{}: if the directory is not empty because
+ // there are other targets that depend on it and we execute it here and
+ // now, it will not remove the directory (because it's not yet empty) but
+ // will cause the target to be in the executed state, which means that
+ // when other targets try to execute it, it will be a noop and the
+ // directory will be left behind.
+
+ assert (a == perform_clean_id && !t.is_a<fsdir> ());
+
+ target_state os (t.matched_state (a));
+
+ if (os == target_state::unchanged)
+ return false;
+ else
+ {
+ target_state ns;
+ if (os != target_state::changed)
+ {
+ phase_switch ps (t.ctx, run_phase::execute);
+ ns = execute_direct_sync (a, t);
+ }
+ else
+ ns = os;
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "cleaned " << t
+ << "; old state " << os
+ << "; new state " << ns;});
+ return true;
+ }
+ else
+ return false;
+ }
+ }
+
+ bool
+ clean_during_match_prerequisites (tracer& trace,
+ action a, target& t,
+ uintptr_t mask)
+ {
+ // Let's keep this as close to update_during_match_prerequisites()
+ // semantically as possible until we see a clear reason to deviate.
+ //
+ // Currently the only substantial change is the reverse iteration order.
+
+ assert (a == perform_clean_id);
+
+ prerequisite_targets& pts (t.prerequisite_targets[a]);
+
+ // On the first pass detect and handle unchanged tragets. Note that we
+ // have to do it in a separate pass since we cannot call matched_state()
+ // once we've switched the phase.
+ //
+ size_t n (0);
+
+ for (prerequisite_target& p: pts)
+ {
+ if (mask == 0 || (p.include & mask) != 0)
+ {
+ if (p.target != nullptr)
+ {
+ const target& pt (*p.target);
+
+ assert (!pt.is_a<fsdir> ()); // See above.
+
+ target_state os (pt.matched_state (a));
+
+ if (os != target_state::unchanged)
+ {
+ ++n;
+ p.data = static_cast<uintptr_t> (os);
+ continue;
+ }
+ }
+
+ p.data = 0;
+ }
+ }
+
+ // If all unchanged, we are done.
+ //
+ if (n == 0)
+ return false;
+
+ // Provide additional information on what's going on.
+ //
+ auto df = make_diag_frame (
+ [&t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while cleaning during match prerequisites of "
+ << "target " << t;
+ });
+
+ context& ctx (t.ctx);
+
+ phase_switch ps (ctx, run_phase::execute);
+
+ bool r (false);
+
+ // @@ Maybe we should optimize for n == 1? Maybe we should just call
+ // smarter clean_during_match() in this case?
+ //
+#if 0
+ for (prerequisite_target& p: reverse_iterate (pts))
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ const target& pt (*p.target);
+
+ target_state os (static_cast<target_state> (p.data));
+ target_state ns (execute_direct_sync (a, pt));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "cleaned " << pt
+ << "; old state " << os
+ << "; new state " << ns;});
+ r = true;
+ }
+
+ p.data = 0;
+ }
+ }
+#else
+
+ // Start asynchronous execution of prerequisites. Similar logic to
+ // straight_execute_members().
+ //
+ // Note that the target's task count is expected to be busy (since this
+ // function is called during match). And there don't seem to be any
+ // problems in using it for execute.
+ //
+ atomic_count& tc (t[a].task_count);
+
+ size_t busy (ctx.count_busy ());
+
+ wait_guard wg (ctx, busy, tc);
+
+ for (prerequisite_target& p: reverse_iterate (pts))
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ execute_direct_async (a, *p.target, busy, tc);
+ }
+ }
+
+ wg.wait ();
+
+ // Finish execution and process the result.
+ //
+ for (prerequisite_target& p: reverse_iterate (pts))
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ const target& pt (*p.target);
+ target_state ns (execute_complete (a, pt));
+ target_state os (static_cast<target_state> (p.data));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "cleaned " << pt
+ << "; old state " << os
+ << "; new state " << ns;});
+ r = true;
+ }
+
+ p.data = 0;
+ }
+ }
+#endif
+
+ return r;
+ }
+
static inline void
blank_adhoc_member (const target*&)
{
@@ -2611,7 +3757,7 @@ namespace build2
target_state
noop_action (action a, const target& t)
{
- text << "noop action triggered for " << diag_doing (a, t);
+ error << "noop action triggered for " << diag_doing (a, t);
assert (false); // We shouldn't be called (see set_recipe()).
return target_state::unchanged;
}
@@ -2631,7 +3777,7 @@ namespace build2
target_state gs (execute_impl (a, g, 0, nullptr));
if (gs == target_state::busy)
- ctx.sched.wait (ctx.count_executed (),
+ ctx.sched->wait (ctx.count_executed (),
g[a].task_count,
scheduler::work_none);
@@ -2711,7 +3857,7 @@ namespace build2
case rmdir_status::not_empty:
{
if (verb >= 3)
- text << dp << " is current working directory, not removing";
+ info << dp << " is current working directory, not removing";
break;
}
case rmdir_status::not_exist:
@@ -2739,7 +3885,8 @@ namespace build2
target_state
perform_clean_extra (action a, const file& ft,
const clean_extras& extras,
- const clean_adhoc_extras& adhoc_extras)
+ const clean_adhoc_extras& adhoc_extras,
+ bool show_adhoc)
{
context& ctx (ft.ctx);
@@ -2771,6 +3918,12 @@ namespace build2
// Now clean the ad hoc group file members, if any.
//
+ // While at it, also collect the group target keys if we are showing
+ // the members. But only those that exist (since we don't want to
+ // print any diagnostics if none of them exist).
+ //
+ vector<target_key> tks;
+
for (const target* m (ft.adhoc_member);
m != nullptr;
m = m->adhoc_member)
@@ -2811,18 +3964,38 @@ namespace build2
? target_state::changed
: target_state::unchanged);
- if (r == target_state::changed && ep.empty ())
- ep = *mp;
-
- er |= r;
+ if (r == target_state::changed)
+ {
+ if (show_adhoc && verb == 1)
+ tks.push_back (mf->key ());
+ else if (ep.empty ())
+ {
+ ep = *mp;
+ er |= r;
+ }
+ }
}
}
// Now clean the primary target and its prerequisited in the reverse order
// of update: first remove the file, then clean the prerequisites.
//
- if (clean && !fp.empty () && rmfile (fp, ft))
- tr = target_state::changed;
+ if (clean && !fp.empty ())
+ {
+ if (show_adhoc && verb == 1 && !tks.empty ())
+ {
+ if (rmfile (fp, ft, 2 /* verbosity */))
+ tks.insert (tks.begin (), ft.key ());
+
+ print_diag ("rm", move (tks));
+ tr = target_state::changed;
+ }
+ else
+ {
+ if (rmfile (fp, ft))
+ tr = target_state::changed;
+ }
+ }
// Update timestamp in case there are operations after us that could use
// the information.
@@ -2842,10 +4015,20 @@ namespace build2
{
if (verb > (ctx.current_diag_noise ? 0 : 1) && verb < 3)
{
- if (ed)
- text << "rm -r " << path_cast<dir_path> (ep);
- else
- text << "rm " << ep;
+ if (verb >= 2)
+ {
+ if (ed)
+ text << "rm -r " << path_cast<dir_path> (ep);
+ else
+ text << "rm " << ep;
+ }
+ else if (verb)
+ {
+ if (ed)
+ print_diag ("rm -r", path_cast<dir_path> (ep));
+ else
+ print_diag ("rm", ep);
+ }
}
}
@@ -2878,10 +4061,17 @@ namespace build2
{
if (const target* m = gv.members[gv.count - 1])
{
- if (rmfile (m->as<file> ().path (), *m))
+ // Note that at the verbosity level 1 we don't show the removal of
+ // each group member. This is consistent with what is normally shown
+ // during update.
+ //
+ if (rmfile (m->as<file> ().path (), *m, 2 /* verbosity */))
tr |= target_state::changed;
}
}
+
+ if (tr == target_state::changed && verb == 1)
+ print_diag ("rm", g);
}
g.mtime (timestamp_nonexistent);
@@ -2890,10 +4080,20 @@ namespace build2
{
if (verb > (ctx.current_diag_noise ? 0 : 1) && verb < 3)
{
- if (ed)
- text << "rm -r " << path_cast<dir_path> (ep);
- else
- text << "rm " << ep;
+ if (verb >= 2)
+ {
+ if (ed)
+ text << "rm -r " << path_cast<dir_path> (ep);
+ else
+ text << "rm " << ep;
+ }
+ else if (verb)
+ {
+ if (ed)
+ print_diag ("rm -r", path_cast<dir_path> (ep));
+ else
+ print_diag ("rm", ep);
+ }
}
}