aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libbuild2/algorithm.cxx238
-rw-r--r--libbuild2/algorithm.hxx5
-rw-r--r--libbuild2/algorithm.ixx12
-rw-r--r--libbuild2/scheduler.hxx8
-rw-r--r--libbuild2/scheduler.txx4
-rw-r--r--libbuild2/target.hxx31
-rw-r--r--libbuild2/target.ixx1
7 files changed, 233 insertions, 66 deletions
diff --git a/libbuild2/algorithm.cxx b/libbuild2/algorithm.cxx
index 2b5e402..4c404c6 100644
--- a/libbuild2/algorithm.cxx
+++ b/libbuild2/algorithm.cxx
@@ -232,8 +232,14 @@ namespace build2
// If the work_queue is absent, then we don't wait.
//
+ // While already applied or executed targets are normally not locked, if
+ // options contain any bits that are not already in cur_options, then the
+ // target is locked even in these states.
+ //
target_lock
- lock_impl (action a, const target& ct, optional<scheduler::work_queue> wq)
+ lock_impl (action a, const target& ct,
+ optional<scheduler::work_queue> wq,
+ uint64_t options)
{
context& ctx (ct.ctx);
@@ -248,7 +254,8 @@ namespace build2
size_t appl (b + target::offset_applied);
size_t busy (b + target::offset_busy);
- atomic_count& task_count (ct[a].task_count);
+ const target::opstate& cs (ct[a]);
+ atomic_count& task_count (cs.task_count);
while (!task_count.compare_exchange_strong (
e,
@@ -281,9 +288,10 @@ namespace build2
e = ctx.sched->wait (busy - 1, task_count, u, *wq);
}
- // We don't lock already applied or executed targets.
+ // We don't lock already applied or executed targets unless there
+ // are new options.
//
- if (e >= appl)
+ if (e >= appl && (cs.match_extra.cur_options & options) == options)
return target_lock {a, nullptr, e - b, false};
}
@@ -304,12 +312,7 @@ namespace build2
offset = target::offset_touched;
}
else
- {
offset = e - b;
- assert (offset == target::offset_touched ||
- offset == target::offset_tried ||
- offset == target::offset_matched);
- }
return target_lock {a, &t, offset, first};
}
@@ -551,6 +554,7 @@ namespace build2
//
const rule_match*
match_rule (action a, target& t,
+ uint64_t options,
const rule* skip,
bool try_match,
match_extra* pme)
@@ -567,6 +571,12 @@ namespace build2
return dynamic_cast<const fallback_rule*> (&r.second.get ());
};
+ // Note: we copy the options value to me.new_options after successfully
+ // matching the rule to make sure rule::match() implementations don't rely
+ // on it.
+ //
+ match_extra& me (pme == nullptr ? t[a].match_extra : *pme);
+
if (const target* g = t.group)
{
// If this is a group with dynamic members, then match it with the
@@ -580,6 +590,8 @@ namespace build2
{
const rule_match* r (g->state[a].rule);
assert (r != nullptr); // Shouldn't happen with dyn_members.
+
+ me.new_options = options; // Currently unused but maybe in future.
return r;
}
@@ -587,8 +599,8 @@ namespace build2
}
// If this is a member of group-based target, then first try to find a
- // matching ad hoc recipe/rule by matching (to an ad hoc recipe/rule) the
- // group but applying to the member. See adhoc_rule::match() for
+ // matching ad hoc recipe/rule by matching (to an ad hoc recipe/rule)
+ // the group but applying to the member. See adhoc_rule::match() for
// background, including for why const_cast should be safe.
//
// To put it another way, if a group is matched by an ad hoc
@@ -603,10 +615,16 @@ namespace build2
// We cannot init match_extra from the target if it's unlocked so use
// a temporary (it shouldn't be modified if unlocked).
//
- match_extra me (false /* locked */);
- if (const rule_match* r = match_rule (
- a, const_cast<target&> (*g), skip, true /* try_match */, &me))
+ match_extra gme (false /* locked */);
+ if (const rule_match* r = match_rule (a, const_cast<target&> (*g),
+ 0 /* options */,
+ skip,
+ true /* try_match */,
+ &gme))
+ {
+ me.new_options = options; // Currently unused but maybe in future.
return r;
+ }
// Fall through to normal match of the member.
}
@@ -620,8 +638,6 @@ namespace build2
if (const scope* rs = bs.root_scope ())
penv = auto_project_env (*rs);
- match_extra& me (pme == nullptr ? t[a].match_extra : *pme);
-
// First check for an ad hoc recipe.
//
// Note that a fallback recipe is preferred over a non-fallback rule.
@@ -629,7 +645,10 @@ namespace build2
if (!t.adhoc_recipes.empty ())
{
if (const rule_match* r = match_adhoc_recipe (a, t, me))
+ {
+ me.new_options = options;
return r;
+ }
}
// If this is an outer operation (Y-for-X), then we look for rules
@@ -744,8 +763,9 @@ namespace build2
}
}
- // Skip non-ad hoc rules if the target is not locked (see
- // above).
+ // Skip non-ad hoc rules if the target is not locked (see above;
+ // note that in this case match_extra is a temporary which we
+ // can reinit).
//
if (!me.locked && !adhoc_rule_match (*r))
continue;
@@ -825,7 +845,10 @@ namespace build2
}
if (!ambig)
+ {
+ me.new_options = options;
return r;
+ }
else
dr << info << "use rule hint to disambiguate this match";
}
@@ -938,10 +961,39 @@ namespace build2
recipe re (ar != nullptr ? f (*ar, a, t, me) : ru.apply (a, t, me));
- me.free ();
+ me.free (); // Note: cur_options are still in use.
return re;
}
+ static void
+ reapply_impl (action a,
+ target& t,
+ const pair<const string, reference_wrapper<const rule>>& m)
+ {
+ const scope& bs (t.base_scope ());
+
+ // Reapply rules in project environment.
+ //
+ auto_project_env penv;
+ if (const scope* rs = bs.root_scope ())
+ penv = auto_project_env (*rs);
+
+ const rule& ru (m.second);
+ match_extra& me (t[a].match_extra);
+
+ auto df = make_diag_frame (
+ [a, &t, &m](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while reapplying rule " << m.first << " to "
+ << diag_do (a, t);
+ });
+
+ // Note: for now no adhoc_reapply().
+ //
+ ru.reapply (a, t, me);
+ }
+
// If anything goes wrong, set target state to failed and return false.
//
// Note: must be called while holding target_lock.
@@ -1029,10 +1081,24 @@ namespace build2
// the first half of the result.
//
static pair<bool, target_state>
- match_impl (target_lock& l,
- bool step = false,
- bool try_match = false)
+ match_impl_impl (target_lock& l,
+ uint64_t options,
+ bool step = false,
+ bool try_match = false)
{
+ // With regards to options, the semantics that we need to achieve for each
+ // target::offeset_*:
+ //
+ // tried -- nothing to do (no match)
+ // touched -- set to new_options
+ // matched -- add to new_options
+ // applied -- reapply if any new options
+ // executed -- check and fail if any new options
+ // busy -- postpone until *_complete() call
+ //
+ // Note that if options is 0 (see resolve_{members,group}_impl()), then
+ // all this can be skipped.
+
assert (l.target != nullptr);
action a (l.action);
@@ -1056,6 +1122,16 @@ namespace build2
// [try_]match (a, g);
// match_recipe (l, group_recipe);
//
+ // What should we do with options? After some rumination it fells most
+ // natural to treat options for the group and for its ad hoc member as
+ // the same entity ... or not.
+ //
+ // @@ But we cannot reapply them if options change since there is
+ // no rule! Feels easiest to just assume no options for now? Or
+ // member options are group options (won't then still need
+ // reapply() support via member). Need to keep KISS for now so
+ // that don't need to deal with _applied/_executed!
+ //
auto df = make_diag_frame (
[a, &t](const diag_record& dr)
{
@@ -1063,6 +1139,9 @@ namespace build2
dr << info << "while matching group rule to " << diag_do (a, t);
});
+ // @@ TODO: unclear what to do about options here.
+ // @@ TODO: this could also be _applied/_executed, theoretically.
+
pair<bool, target_state> r (match_impl (a, g, 0, nullptr, try_match));
if (r.first)
@@ -1103,7 +1182,8 @@ namespace build2
//
clear_target (a, t);
- const rule_match* r (match_rule (a, t, nullptr, try_match));
+ const rule_match* r (
+ match_rule (a, t, options, nullptr, try_match));
assert (l.offset != target::offset_tried); // Should have failed.
@@ -1125,12 +1205,44 @@ namespace build2
// Fall through.
case target::offset_matched:
{
+ // Add any new options.
+ //
+ s.match_extra.new_options |= options;
+
// Apply.
//
set_recipe (l, apply_impl (a, t, *s.rule));
l.offset = target::offset_applied;
break;
}
+ case target::offset_applied:
+ {
+ // Reapply if any new options.
+ //
+ match_extra& me (s.match_extra);
+ assert ((me.cur_options & options) != options); // Otherwise no lock.
+ me.new_options = options;
+
+ // Feels like this can only be a logic bug since to end up with a
+ // subset of options requires a rule (see match_extra for details).
+ //
+ assert (s.rule != nullptr);
+
+ reapply_impl (a, t, *s.rule);
+ break;
+ }
+ case target::offset_executed:
+ {
+ // Diagnose new options after execute.
+ //
+ match_extra& me (s.match_extra);
+ assert ((me.cur_options & options) != options); // Otherwise no lock.
+
+ fail << "change of match options after " << diag_do (a, t)
+ << " has been executed" <<
+ info << "executed options 0x" << hex << me.cur_options <<
+ info << "requested options 0x" << hex << options << endf;
+ }
default:
assert (false);
}
@@ -1159,6 +1271,9 @@ namespace build2
atomic_count* task_count,
bool try_match)
{
+
+ uint64_t options (match_extra::all_options); // @@ TMP
+
// If we are blocking then work our own queue one task at a time. The
// logic here is that we may have already queued other tasks before this
// one and there is nothing bad (except a potentially deep stack trace)
@@ -1178,22 +1293,24 @@ namespace build2
ct,
task_count == nullptr
? optional<scheduler::work_queue> (scheduler::work_none)
- : nullopt));
+ : nullopt,
+ options));
if (l.target != nullptr)
{
- assert (l.offset < target::offset_applied); // Shouldn't lock otherwise.
-
if (try_match && l.offset == target::offset_tried)
return make_pair (false, target_state::unknown);
if (task_count == nullptr)
{
- pair<bool, target_state> r (match_impl (l, false /*step*/, try_match));
+ bool applied (l.offset == target::offset_applied);
- if (r.first &&
- r.second != target_state::failed &&
- l.offset == target::offset_applied &&
+ pair<bool, target_state> r (
+ match_impl_impl (l, options, false /* step */, try_match));
+
+ if (r.first &&
+ r.second != target_state::failed &&
+ (!applied && l.offset == target::offset_applied) &&
ct.has_group_prerequisites ()) // Already matched.
{
if (!match_posthoc (a, *l.target))
@@ -1211,12 +1328,18 @@ namespace build2
// Also pass our diagnostics and lock stacks (this is safe since we
// expect the caller to wait for completion before unwinding its stack).
//
+ // Note: pack captures and arguments a bit to reduce the storage space
+ // requrements.
+ //
+ bool first (ld.first);
+
if (ct.ctx.sched->async (
start_count,
*task_count,
- [a, try_match] (const diag_frame* ds,
- const target_lock* ls,
- target& t, size_t offset, bool first)
+ [a, try_match, first] (const diag_frame* ds,
+ const target_lock* ls,
+ target& t, size_t offset,
+ uint64_t options)
{
// Switch to caller's diag and lock stacks.
//
@@ -1231,12 +1354,14 @@ namespace build2
//
target_lock l {a, &t, offset, first}; // Reassemble.
+ bool applied (l.offset == target::offset_applied);
+
pair<bool, target_state> r (
- match_impl (l, false /* step */, try_match));
+ match_impl_impl (l, options, false /* step */, try_match));
- if (r.first &&
- r.second != target_state::failed &&
- l.offset == target::offset_applied &&
+ if (r.first &&
+ r.second != target_state::failed &&
+ (!applied && l.offset == target::offset_applied) &&
t.has_group_prerequisites ()) // Already matched.
match_posthoc (a, t);
}
@@ -1245,9 +1370,8 @@ namespace build2
},
diag_frame::stack (),
target_lock::stack (),
- ref (*ld.target),
- ld.offset,
- ld.first))
+ ref (*ld.target), ld.offset,
+ options))
return make_pair (true, target_state::postponed); // Queued.
// Matched synchronously, fall through.
@@ -1270,12 +1394,26 @@ namespace build2
{
assert (t.ctx.phase == run_phase::match);
- target_lock l (lock_impl (a, t, scheduler::work_none));
+ uint64_t options (match_extra::all_options); // @@ TMP
- if (l.target != nullptr && l.offset < target::offset_matched)
+ target_lock l (lock_impl (a, t, scheduler::work_none, options));
+
+ if (l.target != nullptr)
{
- if (match_impl (l, true /* step */).second == target_state::failed)
- throw failed ();
+ if (l.offset != target::offset_matched)
+ {
+ if (match_impl_impl (l,
+ options,
+ true /* step */).second == target_state::failed)
+ throw failed ();
+ }
+ else
+ {
+ // If the target is already matched, then we need to add any new
+ // options but not call apply() (thus cannot use match_impl_impl()).
+ //
+ (*l.target)[a].match_extra.new_options |= options;
+ }
}
}
@@ -1299,7 +1437,9 @@ namespace build2
{
// Match (locked).
//
- if (match_impl (l, true /* step */).second == target_state::failed)
+ if (match_impl_impl (l,
+ 0 /* options */,
+ true /* step */).second == target_state::failed)
throw failed ();
// Note: only matched so no call to match_posthoc().
@@ -1314,7 +1454,8 @@ namespace build2
{
// Apply (locked).
//
- pair<bool, target_state> s (match_impl (l, true /* step */));
+ pair<bool, target_state> s (
+ match_impl_impl (l, 0 /* options */, true /* step */));
if (s.second != target_state::failed &&
g.has_group_prerequisites ()) // Already matched.
@@ -1440,7 +1581,10 @@ namespace build2
assert (a.inner ());
pair<bool, target_state> r (
- match_impl (l, true /* step */, true /* try_match */));
+ match_impl_impl (l,
+ 0 /* options */,
+ true /* step */,
+ true /* try_match */));
if (r.first &&
r.second != target_state::failed &&
diff --git a/libbuild2/algorithm.hxx b/libbuild2/algorithm.hxx
index 983e7f5..32ab7be 100644
--- a/libbuild2/algorithm.hxx
+++ b/libbuild2/algorithm.hxx
@@ -434,7 +434,10 @@ namespace build2
// See also the companion execute_delegate().
//
recipe
- match_delegate (action, target&, const rule&, bool try_match = false);
+ match_delegate (action, target&,
+ const rule&,
+ bool try_match = false,
+ uint64_t options = match_extra::all_options);
// Incrementing the dependency counts of the specified target.
//
diff --git a/libbuild2/algorithm.ixx b/libbuild2/algorithm.ixx
index 6d83984..51f7699 100644
--- a/libbuild2/algorithm.ixx
+++ b/libbuild2/algorithm.ixx
@@ -214,7 +214,9 @@ namespace build2
}
LIBBUILD2_SYMEXPORT target_lock
- lock_impl (action, const target&, optional<scheduler::work_queue>);
+ lock_impl (action, const target&,
+ optional<scheduler::work_queue>,
+ uint64_t = 0);
LIBBUILD2_SYMEXPORT void
unlock_impl (action, target&, size_t);
@@ -393,6 +395,7 @@ namespace build2
LIBBUILD2_SYMEXPORT const rule_match*
match_rule (action, target&,
+ uint64_t options,
const rule* skip,
bool try_match = false,
match_extra* = nullptr);
@@ -630,14 +633,17 @@ namespace build2
}
inline recipe
- match_delegate (action a, target& t, const rule& dr, bool try_match)
+ match_delegate (action a, target& t,
+ const rule& dr,
+ bool try_match,
+ uint64_t options)
{
assert (t.ctx.phase == run_phase::match);
// Note: we don't touch any of the t[a] state since that was/will be set
// for the delegating rule.
//
- const rule_match* r (match_rule (a, t, &dr, try_match));
+ const rule_match* r (match_rule (a, t, options, &dr, try_match));
return r != nullptr ? apply_impl (a, t, *r) : empty_recipe;
}
diff --git a/libbuild2/scheduler.hxx b/libbuild2/scheduler.hxx
index dcddfcc..d3b8ceb 100644
--- a/libbuild2/scheduler.hxx
+++ b/libbuild2/scheduler.hxx
@@ -555,8 +555,8 @@ namespace build2
atomic_count* task_count;
size_t start_count;
- func_type func;
args_type args;
+ func_type func;
template <size_t... i>
void
@@ -685,7 +685,11 @@ namespace build2
//
struct task_data
{
- alignas (std::max_align_t) unsigned char data[sizeof (void*) * 8];
+ static const size_t data_size = (sizeof (void*) == 4
+ ? sizeof (void*) * 16
+ : sizeof (void*) * 8);
+
+ alignas (std::max_align_t) unsigned char data[data_size];
void (*thunk) (scheduler&, lock&, void*);
};
diff --git a/libbuild2/scheduler.txx b/libbuild2/scheduler.txx
index 5c6b339..460c4d4 100644
--- a/libbuild2/scheduler.txx
+++ b/libbuild2/scheduler.txx
@@ -64,8 +64,8 @@ namespace build2
new (&td->data) task {
&task_count,
start_count,
- decay_copy (forward<F> (f)),
- typename task::args_type (decay_copy (forward<A> (a))...)};
+ typename task::args_type (decay_copy (forward<A> (a))...),
+ decay_copy (forward<F> (f))};
td->thunk = &task_thunk<F, A...>;
diff --git a/libbuild2/target.hxx b/libbuild2/target.hxx
index ed8c0bf..32f717f 100644
--- a/libbuild2/target.hxx
+++ b/libbuild2/target.hxx
@@ -184,15 +184,22 @@ namespace build2
//
// On initial match()/apply(), cur_options is initialized to ~0 (all
// options enabled) and the matching rule is expected to override it with
- // new_options in match() (if it matches). This way a rule that does not
- // support any match options does not need to do anything. On rematch in
- // the reapply() call, cur_options are the currently enabled options and
- // new_options are the newly requested options. Here the rule is expected
- // to factor new_options to cur_options as appropriate. Note also that on
- // rematch, if current options already include new options, then no call
- // to reapply() is made. This, in particular, means that a rule that does
- // not adjust cur_options in match() will never get a reapply() call
- // (because all the options are enabled from the start).
+ // new_options in apply() (note that match() should no base any decisions
+ // on new_options since they may change between match() and apply()). This
+ // way a rule that does not support any match options does not need to do
+ // anything. On rematch in the reapply() call, cur_options are the
+ // currently enabled options and new_options are the newly requested
+ // options. Here the rule is expected to factor new_options to cur_options
+ // as appropriate. Note also that on rematch, if current options already
+ // include new options, then no call to reapply() is made. This, in
+ // particular, means that a rule that does not adjust cur_options in
+ // match() will never get a reapply() call (because all the options are
+ // enabled from the start).
+ //
+ // Note: options are currently not supported in ad hoc recipes/rules.
+ //
+ // @@ We could use 0 new_options (which otherwise don't make sense) for
+ // match for the purpose of resolving members?
//
// @@ TODO: clear already enabled options from new_options on rematch.
//
@@ -789,7 +796,11 @@ namespace build2
//
mutable atomic_count dependents {0};
- // Match state storage between the match() and apply() calls.
+ // Match state storage between the match() and apply() calls with only
+ // the *_options members extended to reapply().
+ //
+ // Note: in reality, cur_options are used beyong (re)apply() as an
+ // implementation detail.
//
build2::match_extra match_extra;
diff --git a/libbuild2/target.ixx b/libbuild2/target.ixx
index 8873b7b..03cf444 100644
--- a/libbuild2/target.ixx
+++ b/libbuild2/target.ixx
@@ -138,7 +138,6 @@ namespace build2
inline void match_extra::
reinit (bool f)
{
- //assert (locked);
clear_data ();
fallback = f;
cur_options = all_options;