aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build2/algorithm.cxx688
-rw-r--r--build2/algorithm.hxx87
-rw-r--r--build2/algorithm.ixx144
-rw-r--r--build2/bin/init.cxx25
-rw-r--r--build2/bin/rule.cxx24
-rw-r--r--build2/bin/rule.hxx9
-rw-r--r--build2/bin/target.cxx4
-rw-r--r--build2/bin/target.hxx2
-rw-r--r--build2/cc/common.cxx28
-rw-r--r--build2/cc/common.hxx8
-rw-r--r--build2/cc/compile-rule.cxx (renamed from build2/cc/compile.cxx)246
-rw-r--r--build2/cc/compile-rule.hxx (renamed from build2/cc/compile.hxx)27
-rw-r--r--build2/cc/install-rule.cxx (renamed from build2/cc/install.cxx)91
-rw-r--r--build2/cc/install-rule.hxx77
-rw-r--r--build2/cc/install.hxx67
-rw-r--r--build2/cc/link-rule.cxx (renamed from build2/cc/link.cxx)244
-rw-r--r--build2/cc/link-rule.hxx (renamed from build2/cc/link.hxx)59
-rw-r--r--build2/cc/module.cxx33
-rw-r--r--build2/cc/module.hxx22
-rw-r--r--build2/cc/pkgconfig.cxx37
-rw-r--r--build2/cc/windows-manifest.cxx6
-rw-r--r--build2/cc/windows-rpath.cxx42
-rw-r--r--build2/cli/init.cxx10
-rw-r--r--build2/cli/rule.cxx8
-rw-r--r--build2/cli/rule.hxx6
-rw-r--r--build2/cli/target.cxx2
-rw-r--r--build2/cli/target.hxx2
-rw-r--r--build2/context.hxx2
-rw-r--r--build2/dist/rule.cxx2
-rw-r--r--build2/dist/rule.hxx3
-rw-r--r--build2/dump.cxx6
-rw-r--r--build2/install/init.cxx2
-rw-r--r--build2/install/rule.cxx257
-rw-r--r--build2/install/rule.hxx73
-rw-r--r--build2/operation.cxx19
-rw-r--r--build2/operation.hxx35
-rw-r--r--build2/parser.cxx13
-rw-r--r--build2/rule-map.hxx2
-rw-r--r--build2/rule.cxx14
-rw-r--r--build2/rule.hxx44
-rw-r--r--build2/target.cxx98
-rw-r--r--build2/target.hxx166
-rw-r--r--build2/target.ixx104
-rw-r--r--build2/test/common.hxx22
-rw-r--r--build2/test/init.cxx111
-rw-r--r--build2/test/module.hxx13
-rw-r--r--build2/test/rule.cxx716
-rw-r--r--build2/test/rule.hxx38
-rw-r--r--build2/test/script/runner.cxx4
-rw-r--r--build2/test/script/script.cxx8
-rw-r--r--build2/variable.cxx15
-rw-r--r--build2/variable.ixx10
-rw-r--r--build2/version/init.cxx16
-rw-r--r--build2/version/rule.cxx22
-rw-r--r--build2/version/rule.hxx12
-rw-r--r--doc/testscript.cli4
-rw-r--r--tests/cc/preprocessed/testscript2
-rw-r--r--tests/test/config-test/testscript3
58 files changed, 1935 insertions, 1899 deletions
diff --git a/build2/algorithm.cxx b/build2/algorithm.cxx
index 7ef5267..0e57c08 100644
--- a/build2/algorithm.cxx
+++ b/build2/algorithm.cxx
@@ -100,7 +100,7 @@ namespace build2
return q ? import_existing (pk) : search_existing_target (pk);
}
- // If the work_queue is not present, then we don't wait.
+ // If the work_queue is absent, then we don't wait.
//
target_lock
lock_impl (action a, const target& ct, optional<scheduler::work_queue> wq)
@@ -113,27 +113,25 @@ namespace build2
size_t b (target::count_base ());
size_t e (b + target::offset_touched - 1);
- size_t lock (b + target::offset_locked);
+ size_t appl (b + target::offset_applied);
size_t busy (b + target::offset_busy);
- for (;;)
+ atomic_count& task_count (ct[a].task_count);
+
+ while (!task_count.compare_exchange_strong (
+ e,
+ busy,
+ memory_order_acq_rel, // Synchronize on success.
+ memory_order_acquire)) // Synchronize on failure.
{
- // First try to grab the spin lock which we later may upgrade to busy.
+ // Wait for the count to drop below busy if someone is already working
+ // on this target.
//
- if (ct.task_count.compare_exchange_strong (
- e,
- lock,
- memory_order_acq_rel, // Synchronize on success.
- memory_order_acquire)) // Synchronize on failure.
+ if (e >= busy)
{
- break;
- }
+ if (!wq)
+ return target_lock {a, nullptr, e - b};
- while (e == lock || e >= busy)
- {
- // Wait for the count to drop below busy if someone is already working
- // on this target.
- //
// We also unlock the phase for the duration of the wait. Why?
// Consider this scenario: we are trying to match a dir{} target whose
// buildfile still needs to be loaded. Let's say someone else started
@@ -141,118 +139,54 @@ namespace build2
// to switch the phase to load. Which would result in a deadlock
// unless we release the phase.
//
- if (e >= busy)
- {
- if (!wq)
- return target_lock {nullptr, e - b};
-
- phase_unlock ul;
- e = sched.wait (busy - 1, ct.task_count, *wq);
- }
-
- // Spin if it is locked.
- //
- for (; e == lock; e = ct.task_count.load (memory_order_acquire))
- this_thread::yield ();
+ phase_unlock ul;
+ e = sched.wait (busy - 1, task_count, *wq);
}
+
+ // We don't lock already applied or executed targets.
+ //
+ if (e >= appl)
+ return target_lock {a, nullptr, e - b};
}
- // We now have the sping lock. Analyze the old value and decide what to
- // do.
+ // We now have the lock. Analyze the old value and decide what to do.
//
target& t (const_cast<target&> (ct));
+ target::opstate& s (t[a]);
size_t offset;
if (e <= b)
{
// First lock for this operation.
//
- t.action = a;
- t.rule = nullptr;
- t.dependents.store (0, memory_order_release);
+ s.rule = nullptr;
+ s.dependents.store (0, memory_order_release);
+
offset = target::offset_touched;
}
else
{
offset = e - b;
-
- switch (offset)
- {
- case target::offset_executed:
- {
- if (t.action >= a)
- {
- // We don't lock already executed targets.
- //
- t.task_count.store (e, memory_order_release);
- return target_lock {nullptr, target::offset_executed};
- }
-
- // Override, fall through.
- //
- assert (a > t.action);
- }
- case target::offset_touched:
- case target::offset_tried:
- case target::offset_matched:
- case target::offset_applied:
- {
- if (a > t.action)
- {
- // Only noop_recipe can be overridden.
- //
- if (offset >= target::offset_applied)
- {
- recipe_function** f (t.recipe_.target<recipe_function*> ());
- assert (f != nullptr && *f == &noop_action);
- }
-
- t.action = a;
- t.rule = nullptr;
- offset = target::offset_touched; // Back to just touched.
- }
- else
- {
- assert (t.action >= a);
-
- // Release the lock if already applied for this action. This is
- // necessary no to confuse execute since otherwise it might see
- // that the target is busy and assume that someone is already
- // executing it. Note that we cannot handle this case in the loop
- // above since we need to lock target::action.
- //
- if (offset == target::offset_applied || t.action > a)
- {
- // Release the spin lock.
- //
- t.task_count.store (e, memory_order_release);
- return target_lock {nullptr, offset};
- }
- }
-
- break;
- }
- default:
- assert (false);
- }
+ assert (offset == target::offset_touched ||
+ offset == target::offset_tried ||
+ offset == target::offset_matched);
}
- // We are keeping it so upgrade to busy.
- //
- t.task_count.store (busy, memory_order_release);
- return target_lock (&t, offset);
+ return target_lock (a, &t, offset);
}
void
- unlock_impl (target& t, size_t offset)
+ unlock_impl (action a, target& t, size_t offset)
{
assert (phase == run_phase::match);
+ atomic_count& task_count (t[a].task_count);
+
// Set the task count and wake up any threads that might be waiting for
// this target.
//
- t.task_count.store (offset + target::count_base (), memory_order_release);
- sched.resume (t.task_count);
+ task_count.store (offset + target::count_base (), memory_order_release);
+ sched.resume (task_count);
}
target_lock
@@ -283,160 +217,137 @@ namespace build2
return l;
};
- // Return the matching rule and the recipe action.
+ // Return the matching rule or NULL if no match and try_match is true.
//
- pair<const pair<const string, reference_wrapper<const rule>>*, action>
+ const rule_match*
match_impl (action a, target& t, const rule* skip, bool try_match)
{
- // Clear the resolved targets list before calling match(). The rule is
- // free to modify this list in match() (provided that it matches) in order
- // to, for example, prepare it for apply().
+ // If this is an outer operation (Y_for_X), then we look for rules
+ // registered for the outer id (X). Note that we still pass the original
+ // action to the rule's match() function so that it can distinguish
+ // between a pre/post operation (Y_for_X) and the actual operation (X).
//
- t.clear_data ();
- t.prerequisite_targets.clear ();
+ meta_operation_id mo (a.meta_operation ());
+ operation_id o (a.inner () ? a.operation () : a.outer_operation ());
- // If this is a nested operation, first try the outer operation.
- // This allows a rule to implement a "precise match", that is,
- // both inner and outer operations match.
- //
- for (operation_id oo (a.outer_operation ()), io (a.operation ()),
- o (oo != 0 ? oo : io);
- o != 0;
- o = (oo != 0 && o != io ? io : 0))
+ const scope& bs (t.base_scope ());
+
+ for (auto tt (&t.type ()); tt != nullptr; tt = tt->base)
{
- // Adjust action for recipe: on the first iteration we want it
- // {inner, outer} (which is the same as 'a') while on the second
- // -- {inner, 0}. Note that {inner, 0} is the same or "stronger"
- // (i.e., overrides; see action::operator<()) than 'a'. This
- // allows "unconditional inner" to override "inner for outer"
- // recipes.
+ // Search scopes outwards, stopping at the project root.
//
- action ra (a.meta_operation (), io, o != oo ? 0 : oo);
+ for (const scope* s (&bs);
+ s != nullptr;
+ s = s->root () ? global_scope : s->parent_scope ())
+ {
+ const operation_rule_map* om (s->rules[mo]);
- const scope& bs (t.base_scope ());
+ if (om == nullptr)
+ continue; // No entry for this meta-operation id.
- for (auto tt (&t.type ()); tt != nullptr; tt = tt->base)
- {
- // Search scopes outwards, stopping at the project root.
+ // First try the map for the actual operation. If that doesn't yeld
+ // anything, try the wildcard map.
//
- for (const scope* s (&bs);
- s != nullptr;
- s = s->root () ? global_scope : s->parent_scope ())
+ for (operation_id oi (o), oip (o); oip != 0; oip = oi, oi = 0)
{
- const operation_rule_map* om (s->rules[a.meta_operation ()]);
+ const target_type_rule_map* ttm ((*om)[oi]);
- if (om == nullptr)
- continue; // No entry for this meta-operation id.
+ if (ttm == nullptr)
+ continue; // No entry for this operation id.
- // First try the map for the actual operation. If that doesn't yeld
- // anything, try the wildcard map.
- //
- for (operation_id oi (o), oip (o); oip != 0; oip = oi, oi = 0)
- {
- const target_type_rule_map* ttm ((*om)[oi]);
+ if (ttm->empty ())
+ continue; // Empty map for this operation id.
- if (ttm == nullptr)
- continue; // No entry for this operation id.
+ auto i (ttm->find (tt));
- if (ttm->empty ())
- continue; // Empty map for this operation id.
+ if (i == ttm->end () || i->second.empty ())
+ continue; // No rules registered for this target type.
- auto i (ttm->find (tt));
+ const auto& rules (i->second); // Hint map.
- if (i == ttm->end () || i->second.empty ())
- continue; // No rules registered for this target type.
+ // @@ TODO
+ //
+ // Different rules can be used for different operations (update vs
+ // test is a good example). So, at some point, we will probably have
+ // to support a list of hints or even an operation-hint map (e.g.,
+ // 'hint=cxx test=foo' if cxx supports the test operation but we
+ // want the foo rule instead). This is also the place where the
+ // '{build clean}=cxx' construct (which we currently do not support)
+ // can come handy.
+ //
+ // Also, ignore the hint (that is most likely ment for a different
+ // operation) if this is a unique match.
+ //
+ string hint;
+ auto rs (rules.size () == 1
+ ? make_pair (rules.begin (), rules.end ())
+ : rules.find_sub (hint));
- const auto& rules (i->second); // Hint map.
+ for (auto i (rs.first); i != rs.second; ++i)
+ {
+ const auto& r (*i);
+ const string& n (r.first);
+ const rule& ru (r.second);
- // @@ TODO
- //
- // Different rules can be used for different operations (update
- // vs test is a good example). So, at some point, we will probably
- // have to support a list of hints or even an operation-hint map
- // (e.g., 'hint=cxx test=foo' if cxx supports the test operation
- // but we want the foo rule instead). This is also the place where
- // the '{build clean}=cxx' construct (which we currently do not
- // support) can come handy.
- //
- // Also, ignore the hint (that is most likely ment for a different
- // operation) if this is a unique match.
- //
- string hint;
- auto rs (rules.size () == 1
- ? make_pair (rules.begin (), rules.end ())
- : rules.find_sub (hint));
+ if (&ru == skip)
+ continue;
- for (auto i (rs.first); i != rs.second; ++i)
{
- const auto& r (*i);
- const string& n (r.first);
- const rule& ru (r.second);
+ auto df = make_diag_frame (
+ [a, &t, &n](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while matching rule " << n << " to "
+ << diag_do (a, t);
+ });
- if (&ru == skip)
+ if (!ru.match (a, t, hint))
continue;
+ }
+
+ // Do the ambiguity test.
+ //
+ bool ambig (false);
+
+ diag_record dr;
+ for (++i; i != rs.second; ++i)
+ {
+ const string& n1 (i->first);
+ const rule& ru1 (i->second);
- match_result m (false);
{
auto df = make_diag_frame (
- [ra, &t, &n](const diag_record& dr)
+ [a, &t, &n1](const diag_record& dr)
{
if (verb != 0)
- dr << info << "while matching rule " << n << " to "
- << diag_do (ra, t);
+ dr << info << "while matching rule " << n1 << " to "
+ << diag_do (a, t);
});
- if (!(m = ru.match (ra, t, hint)))
+ // @@ TODO: this makes target state in match() undetermined
+ // so need to fortify rules that modify anything in match
+ // to clear things.
+ //
+ // @@ Can't we temporarily swap things out in target?
+ //
+ if (!ru1.match (a, t, hint))
continue;
-
- if (m.recipe_action.valid ())
- assert (m.recipe_action > ra);
- else
- m.recipe_action = ra; // Default, if not set.
}
- // Do the ambiguity test.
- //
- bool ambig (false);
-
- diag_record dr;
- for (++i; i != rs.second; ++i)
+ if (!ambig)
{
- const string& n1 (i->first);
- const rule& ru1 (i->second);
-
- {
- auto df = make_diag_frame (
- [ra, &t, &n1](const diag_record& dr)
- {
- if (verb != 0)
- dr << info << "while matching rule " << n1 << " to "
- << diag_do (ra, t);
- });
-
- // @@ TODO: this makes target state in match() undetermined
- // so need to fortify rules that modify anything in match
- // to clear things.
- //
- if (!ru1.match (ra, t, hint))
- continue;
- }
-
- if (!ambig)
- {
- dr << fail << "multiple rules matching "
- << diag_doing (ra, t)
- << info << "rule " << n << " matches";
- ambig = true;
- }
-
- dr << info << "rule " << n1 << " also matches";
+ dr << fail << "multiple rules matching " << diag_doing (a, t)
+ << info << "rule " << n << " matches";
+ ambig = true;
}
- if (!ambig)
- return make_pair (&r, m.recipe_action);
- else
- dr << info << "use rule hint to disambiguate this match";
+ dr << info << "rule " << n1 << " also matches";
}
+
+ if (!ambig)
+ return &r;
+ else
+ dr << info << "use rule hint to disambiguate this match";
}
}
}
@@ -451,14 +362,13 @@ namespace build2
dr << info << "re-run with --verbose 4 for more information";
}
- return pair<const pair<const string, reference_wrapper<const rule>>*,
- action> {nullptr, a};
+ return nullptr;
}
recipe
- apply_impl (target& t,
- const pair<const string, reference_wrapper<const rule>>& r,
- action a)
+ apply_impl (action a,
+ target& t,
+ const pair<const string, reference_wrapper<const rule>>& r)
{
auto df = make_diag_frame (
[a, &t, &r](const diag_record& dr)
@@ -468,9 +378,6 @@ namespace build2
<< diag_do (a, t);
});
- // @@ We could also allow the rule to change the recipe action in
- // apply(). Could be useful with delegates.
- //
return r.second.get ().apply (a, t);
}
@@ -480,13 +387,15 @@ namespace build2
// the first half of the result.
//
static pair<bool, target_state>
- match_impl (action a,
- target_lock& l,
+ match_impl (target_lock& l,
bool step = false,
bool try_match = false)
{
assert (l.target != nullptr);
+
+ action a (l.action);
target& t (*l.target);
+ target::opstate& s (t[a]);
try
{
@@ -506,29 +415,38 @@ namespace build2
{
// Match.
//
- auto mr (match_impl (a, t, nullptr, try_match));
- if (mr.first == nullptr) // Not found (try_match == true).
+ // Clear the resolved targets list and the data pad before calling
+ // match(). The rule is free to modify these in its match()
+ // (provided that it matches) in order to, for example, convey some
+ // information to apply().
+ //
+ t.prerequisite_targets[a].clear ();
+ if (a.inner ()) t.clear_data ();
+
+ const rule_match* r (match_impl (a, t, nullptr, try_match));
+
+ if (r == nullptr) // Not found (try_match == true).
{
l.offset = target::offset_tried;
return make_pair (false, target_state::unknown);
}
- t.rule = mr.first;
- t.action = mr.second; // In case overriden.
+ s.rule = r;
l.offset = target::offset_matched;
if (step)
- // t.state_ is not yet set.
- //
+ // Note: s.state is still undetermined.
return make_pair (true, target_state::unknown);
+
+ // Otherwise ...
}
// Fall through.
case target::offset_matched:
{
// Apply.
//
- t.recipe (apply_impl (t, *t.rule, t.action));
+ set_recipe (l, apply_impl (a, t, *s.rule));
l.offset = target::offset_applied;
break;
}
@@ -541,14 +459,14 @@ namespace build2
// As a sanity measure clear the target data since it can be incomplete
// or invalid (mark()/unmark() should give you some ideas).
//
- t.clear_data ();
- t.prerequisite_targets.clear ();
+ t.prerequisite_targets[a].clear ();
+ if (a.inner ()) t.clear_data ();
- t.state_ = target_state::failed;
+ s.state = target_state::failed;
l.offset = target::offset_applied;
}
- return make_pair (true, t.state_);
+ return make_pair (true, s.state);
}
// If try_match is true, then indicate whether there is a rule match with
@@ -599,7 +517,7 @@ namespace build2
return make_pair (false, target_state::unknown);
if (task_count == nullptr)
- return match_impl (a, l, false /* step */, try_match);
+ return match_impl (l, false /* step */, try_match);
// Pass "disassembled" lock since the scheduler queue doesn't support
// task destruction. Also pass our diagnostics stack (this is safe since
@@ -617,8 +535,8 @@ namespace build2
{
phase_lock pl (run_phase::match); // Can throw.
{
- target_lock l {&t, offset}; // Reassemble.
- match_impl (a, l, false /* step */, try_match);
+ target_lock l {a, &t, offset}; // Reassemble.
+ match_impl (l, false /* step */, try_match);
// Unlock withing the match phase.
}
}
@@ -651,7 +569,7 @@ namespace build2
{
// Match (locked).
//
- if (match_impl (a, l, true).second == target_state::failed)
+ if (match_impl (l, true).second == target_state::failed)
throw failed ();
if ((r = g.group_members (a)).members != nullptr)
@@ -666,10 +584,14 @@ namespace build2
// not seem like it will be easy to fix (we don't know whether
// someone else will execute this target).
//
+ // @@ What if we always do match & execute together? After all,
+ // if a group can be resolved in apply(), then it can be
+ // resolved in match()!
+ //
// Apply (locked).
//
- if (match_impl (a, l, true).second == target_state::failed)
+ if (match_impl (l, true).second == target_state::failed)
throw failed ();
if ((r = g.group_members (a)).members != nullptr)
@@ -707,12 +629,12 @@ namespace build2
static void
match_prerequisite_range (action a, target& t, R&& r, const scope* s)
{
- auto& pts (t.prerequisite_targets);
+ auto& pts (t.prerequisite_targets[a]);
// Start asynchronous matching of prerequisites. Wait with unlocked phase
// to allow phase switching.
//
- wait_guard wg (target::count_busy (), t.task_count, true);
+ wait_guard wg (target::count_busy (), t[a].task_count, true);
size_t i (pts.size ()); // Index of the first to be added.
for (auto&& p: forward<R> (r))
@@ -722,7 +644,7 @@ namespace build2
if (s != nullptr && !pt.in (*s))
continue;
- match_async (a, pt, target::count_busy (), t.task_count);
+ match_async (a, pt, target::count_busy (), t[a].task_count);
pts.push_back (&pt);
}
@@ -751,12 +673,12 @@ namespace build2
template <typename T>
void
- match_members (action a, target& t, T ts[], size_t n)
+ match_members (action a, target& t, T const* ts, size_t n)
{
// Pretty much identical to match_prerequisite_range() except we don't
// search.
//
- wait_guard wg (target::count_busy (), t.task_count, true);
+ wait_guard wg (target::count_busy (), t[a].task_count, true);
for (size_t i (0); i != n; ++i)
{
@@ -765,7 +687,7 @@ namespace build2
if (m == nullptr || marked (m))
continue;
- match_async (a, *m, target::count_busy (), t.task_count);
+ match_async (a, *m, target::count_busy (), t[a].task_count);
}
wg.wait ();
@@ -786,11 +708,11 @@ namespace build2
// Instantiate only for what we need.
//
template void
- match_members<const target*> (action, target&, const target*[], size_t);
+ match_members<const target*> (action, target&, const target* const*, size_t);
template void
match_members<prerequisite_target> (
- action, target&, prerequisite_target[], size_t);
+ action, target&, prerequisite_target const*, size_t);
const fsdir*
inject_fsdir (action a, target& t, bool parent)
@@ -844,7 +766,7 @@ namespace build2
if (r != nullptr)
{
match (a, *r);
- t.prerequisite_targets.emplace_back (r);
+ t.prerequisite_targets[a].emplace_back (r);
}
return r;
@@ -922,16 +844,16 @@ namespace build2
// Note that if the result is group, then the group's state can be
// failed.
//
- switch (t.state_ = ts)
+ switch (t[a].state = ts)
{
case target_state::changed:
case target_state::unchanged:
break;
case target_state::postponed:
- ts = t.state_ = target_state::unchanged;
+ ts = t[a].state = target_state::unchanged;
break;
case target_state::group:
- ts = t.group->state_;
+ ts = (*t.group)[a].state;
break;
default:
assert (false);
@@ -939,7 +861,7 @@ namespace build2
}
catch (const failed&)
{
- ts = t.state_ = target_state::failed;
+ ts = t[a].state = target_state::failed;
}
return ts;
@@ -948,15 +870,17 @@ namespace build2
static target_state
execute_impl (action a, target& t)
{
- assert (t.task_count.load (memory_order_consume) == target::count_busy ()
- && t.state_ == target_state::unknown);
+ target::opstate& s (t[a]);
- target_state ts (execute_recipe (a, t, t.recipe_));
+ assert (s.task_count.load (memory_order_consume) == target::count_busy ()
+ && s.state == target_state::unknown);
- // Decrement the target count (see target::recipe() for details).
+ target_state ts (execute_recipe (a, t, s.recipe));
+
+ // Decrement the target count (see set_recipe() for details).
//
{
- recipe_function** f (t.recipe_.target<recipe_function*> ());
+ recipe_function** f (s.recipe.target<recipe_function*> ());
if (f == nullptr || *f != &group_action)
target_count.fetch_sub (1, memory_order_relaxed);
}
@@ -964,11 +888,11 @@ namespace build2
// Decrement the task count (to count_executed) and wake up any threads
// that might be waiting for this target.
//
- size_t tc (t.task_count.fetch_sub (
+ size_t tc (s.task_count.fetch_sub (
target::offset_busy - target::offset_executed,
memory_order_release));
assert (tc == target::count_busy ());
- sched.resume (t.task_count);
+ sched.resume (s.task_count);
return ts;
}
@@ -980,11 +904,12 @@ namespace build2
atomic_count* task_count)
{
target& t (const_cast<target&> (ct)); // MT-aware.
+ target::opstate& s (t[a]);
// Update dependency counts and make sure they are not skew.
//
size_t gd (dependency_count.fetch_sub (1, memory_order_relaxed));
- size_t td (t.dependents.fetch_sub (1, memory_order_release));
+ size_t td (s.dependents.fetch_sub (1, memory_order_release));
assert (td != 0 && gd != 0);
td--;
@@ -1012,133 +937,103 @@ namespace build2
if (current_mode == execution_mode::last && td != 0)
return target_state::postponed;
- // Try to atomically change applied to busy. Note that we are in the
- // execution phase so the target shall not be spin-locked.
+ // Try to atomically change applied to busy.
//
- size_t touc (target::count_touched ());
- size_t matc (target::count_matched ());
- size_t appc (target::count_applied ());
+ size_t tc (target::count_applied ());
+
size_t exec (target::count_executed ());
size_t busy (target::count_busy ());
- for (size_t tc (appc);;)
+ if (s.task_count.compare_exchange_strong (
+ tc,
+ busy,
+ memory_order_acq_rel, // Synchronize on success.
+ memory_order_acquire)) // Synchronize on failure.
{
- if (t.task_count.compare_exchange_strong (
- tc,
- busy,
- memory_order_acq_rel, // Synchronize on success.
- memory_order_acquire)) // Synchronize on failure.
+ // Handle the noop recipe.
+ //
+ if (s.state == target_state::unchanged)
{
- // Overriden touch/tried/match-only or noop recipe.
+ // There could still be scope operations.
//
- if ((tc >= touc && tc <= matc) || t.state_ == target_state::unchanged)
- {
- // If we have a noop recipe, there could still be scope operations.
- //
- if (tc == appc && t.is_a<dir> ())
- execute_recipe (a, t, nullptr /* recipe */);
- else
- t.state_ = target_state::unchanged;
-
- t.task_count.store (exec, memory_order_release);
- sched.resume (t.task_count);
- }
- else
- {
- if (task_count == nullptr)
- return execute_impl (a, t);
+ if (t.is_a<dir> ())
+ execute_recipe (a, t, nullptr /* recipe */);
- // Pass our diagnostics stack (this is safe since we expect the
- // caller to wait for completion before unwinding its diag stack).
- //
- if (sched.async (start_count,
- *task_count,
- [a] (target& t, const diag_frame* ds)
- {
- diag_frame df (ds);
- execute_impl (a, t);
- },
- ref (t),
- diag_frame::stack))
- return target_state::unknown; // Queued.
-
- // Executed synchronously, fall through.
- }
+ s.task_count.store (exec, memory_order_release);
+ sched.resume (s.task_count);
}
else
{
- // Normally either busy or already executed.
+ if (task_count == nullptr)
+ return execute_impl (a, t);
+
+ // Pass our diagnostics stack (this is safe since we expect the
+ // caller to wait for completion before unwinding its diag stack).
//
- if (tc >= busy) return target_state::busy;
- else if (tc != exec)
- {
- // This can happen if we touched/tried/matched (a noop) recipe which
- // then got overridden as part of group resolution but not all the
- // way to applied. In this case we treat it as noop.
- //
- assert ((tc >= touc && tc <= matc) && t.action > a);
- continue;
- }
+ if (sched.async (start_count,
+ *task_count,
+ [a] (target& t, const diag_frame* ds)
+ {
+ diag_frame df (ds);
+ execute_impl (a, t);
+ },
+ ref (t),
+ diag_frame::stack))
+ return target_state::unknown; // Queued.
+
+ // Executed synchronously, fall through.
}
-
- break;
+ }
+ else
+ {
+ // Either busy or already executed.
+ //
+ if (tc >= busy) return target_state::busy;
+ else assert (tc == exec);
}
- return t.executed_state (false);
+ return t.executed_state (a, false);
}
target_state
execute_direct (action a, const target& ct)
{
target& t (const_cast<target&> (ct)); // MT-aware.
+ target::opstate& s (t[a]);
- // Similar logic to match() above.
+ // Similar logic to match() above except we execute synchronously.
//
- size_t touc (target::count_touched ());
- size_t matc (target::count_matched ());
- size_t appc (target::count_applied ());
+ size_t tc (target::count_applied ());
+
size_t exec (target::count_executed ());
size_t busy (target::count_busy ());
- for (size_t tc (appc);;)
+ if (s.task_count.compare_exchange_strong (
+ tc,
+ busy,
+ memory_order_acq_rel, // Synchronize on success.
+ memory_order_acquire)) // Synchronize on failure.
{
- if (t.task_count.compare_exchange_strong (
- tc,
- busy,
- memory_order_acq_rel, // Synchronize on success.
- memory_order_acquire)) // Synchronize on failure.
+ if (s.state == target_state::unchanged)
{
- if ((tc >= touc && tc <= matc) || t.state_ == target_state::unchanged)
- {
- // If we have a noop recipe, there could still be scope operations.
- //
- if (tc == appc && t.is_a<dir> ())
- execute_recipe (a, t, nullptr /* recipe */);
- else
- t.state_ = target_state::unchanged;
+ if (t.is_a<dir> ())
+ execute_recipe (a, t, nullptr /* recipe */);
- t.task_count.store (exec, memory_order_release);
- sched.resume (t.task_count);
- }
- else
- execute_impl (a, t);
+ s.task_count.store (exec, memory_order_release);
+ sched.resume (s.task_count);
}
else
- {
+ execute_impl (a, t);
+ }
+ else
+ {
// If the target is busy, wait for it.
//
- if (tc >= busy) sched.wait (exec, t.task_count, scheduler::work_none);
- else if (tc != exec)
- {
- assert ((tc >= touc && tc <= matc) && t.action > a);
- continue;
- }
- }
-
- break;
+ if (tc >= busy) sched.wait (exec, s.task_count, scheduler::work_none);
+ else assert (tc == exec);
}
- return t.executed_state ();
+ return t.executed_state (a);
}
template <typename T>
@@ -1149,7 +1044,7 @@ namespace build2
// Start asynchronous execution of prerequisites.
//
- wait_guard wg (target::count_busy (), t.task_count);
+ wait_guard wg (target::count_busy (), t[a].task_count);
for (size_t i (0); i != n; ++i)
{
@@ -1160,7 +1055,7 @@ namespace build2
target_state s (
execute_async (
- a, *mt, target::count_busy (), t.task_count));
+ a, *mt, target::count_busy (), t[a].task_count));
if (s == target_state::postponed)
{
@@ -1177,18 +1072,18 @@ namespace build2
//
for (size_t i (0); i != n; ++i)
{
- const target*& mt (ts[i]);
-
- if (mt == nullptr)
+ if (ts[i] == nullptr)
continue;
+ const target& mt (*ts[i]);
+
// If the target is still busy, wait for its completion.
//
- if (mt->task_count.load (memory_order_acquire) >= target::count_busy ())
- sched.wait (
- target::count_executed (), mt->task_count, scheduler::work_none);
+ const auto& tc (mt[a].task_count);
+ if (tc.load (memory_order_acquire) >= target::count_busy ())
+ sched.wait (target::count_executed (), tc, scheduler::work_none);
- r |= mt->executed_state ();
+ r |= mt.executed_state (a);
}
return r;
@@ -1202,18 +1097,18 @@ namespace build2
//
target_state r (target_state::unchanged);
- wait_guard wg (target::count_busy (), t.task_count);
+ wait_guard wg (target::count_busy (), t[a].task_count);
- for (size_t i (n); i != 0; --i)
+ for (size_t i (n); i != 0; )
{
- const target*& mt (ts[i - 1]);
+ const target*& mt (ts[--i]);
if (mt == nullptr)
continue;
target_state s (
execute_async (
- a, *mt, target::count_busy (), t.task_count));
+ a, *mt, target::count_busy (), t[a].task_count));
if (s == target_state::postponed)
{
@@ -1224,18 +1119,18 @@ namespace build2
wg.wait ();
- for (size_t i (n); i != 0; --i)
+ for (size_t i (n); i != 0; )
{
- const target*& mt (ts[i - 1]);
-
- if (mt == nullptr)
+ if (ts[--i] == nullptr)
continue;
- if (mt->task_count.load (memory_order_acquire) >= target::count_busy ())
- sched.wait (
- target::count_executed (), mt->task_count, scheduler::work_none);
+ const target& mt (*ts[i]);
+
+ const auto& tc (mt[a].task_count);
+ if (tc.load (memory_order_acquire) >= target::count_busy ())
+ sched.wait (target::count_executed (), tc, scheduler::work_none);
- r |= mt->executed_state ();
+ r |= mt.executed_state (a);
}
return r;
@@ -1267,7 +1162,7 @@ namespace build2
{
assert (current_mode == execution_mode::first);
- auto& pts (const_cast<target&> (t).prerequisite_targets); // MT-aware.
+ auto& pts (t.prerequisite_targets[a]);
if (n == 0)
n = pts.size ();
@@ -1276,7 +1171,7 @@ namespace build2
//
target_state rs (target_state::unchanged);
- wait_guard wg (target::count_busy (), t.task_count);
+ wait_guard wg (target::count_busy (), t[a].task_count);
for (size_t i (0); i != n; ++i)
{
@@ -1287,7 +1182,7 @@ namespace build2
target_state s (
execute_async (
- a, *pt, target::count_busy (), t.task_count));
+ a, *pt, target::count_busy (), t[a].task_count));
if (s == target_state::postponed)
{
@@ -1303,25 +1198,25 @@ namespace build2
for (size_t i (0); i != n; ++i)
{
- const target*& pt (pts[i]);
-
- if (pt == nullptr)
+ if (pts[i] == nullptr)
continue;
- if (pt->task_count.load (memory_order_acquire) >= target::count_busy ())
- sched.wait (
- target::count_executed (), pt->task_count, scheduler::work_none);
+ const target& pt (*pts[i]);
+
+ const auto& tc (pt[a].task_count);
+ if (tc.load (memory_order_acquire) >= target::count_busy ())
+ sched.wait (target::count_executed (), tc, scheduler::work_none);
- target_state s (pt->executed_state ());
+ target_state s (pt.executed_state (a));
rs |= s;
// Should we compare the timestamp to this target's?
//
- if (!e && (!pf || pf (*pt, i)))
+ if (!e && (!pf || pf (pt, i)))
{
// If this is an mtime-based target, then compare timestamps.
//
- if (auto mpt = dynamic_cast<const mtime_target*> (pt))
+ if (const mtime_target* mpt = pt.is_a<mtime_target> ())
{
timestamp mp (mpt->mtime ());
@@ -1340,8 +1235,8 @@ namespace build2
}
}
- if (rt == nullptr && pt->is_a (*tt))
- rt = pt;
+ if (rt == nullptr && pt.is_a (*tt))
+ rt = &pt;
}
assert (rt != nullptr);
@@ -1355,7 +1250,7 @@ namespace build2
noop_action (action a, const target& t)
{
text << "noop action triggered for " << diag_doing (a, t);
- assert (false); // We shouldn't be called (see target::recipe()).
+ assert (false); // We shouldn't be called (see set_recipe()).
return target_state::unchanged;
}
@@ -1367,8 +1262,9 @@ namespace build2
const target& g (*t.group);
if (execute (a, g) == target_state::busy)
- sched.wait (
- target::count_executed (), g.task_count, scheduler::work_none);
+ sched.wait (target::count_executed (),
+ g[a].task_count,
+ scheduler::work_none);
// Indicate to execute() that this target's state comes from the group
// (which, BTW, can be failed).
@@ -1488,7 +1384,7 @@ namespace build2
//
for (const target* m (ft.member); m != nullptr; m = m->member)
{
- const file* fm (dynamic_cast<const file*> (m));
+ const file* fm (m->is_a<file> ());
const path* fp (fm != nullptr ? &fm->path () : nullptr);
if (fm == nullptr || fp->empty ())
diff --git a/build2/algorithm.hxx b/build2/algorithm.hxx
index ff70b21..8978eba 100644
--- a/build2/algorithm.hxx
+++ b/build2/algorithm.hxx
@@ -102,33 +102,39 @@ namespace build2
const scope&,
const dir_path& out = dir_path ());
- // Target match lock: a non-const target reference as well as the
- // target::offset_* state that has already been "achieved".
+ // Target match lock: a non-const target reference and the target::offset_*
+ // state that has already been "achieved". Note that target::task_count
+ // itself is set to busy for the duration or the lock.
//
struct target_lock
{
+ using action_type = build2::action;
using target_type = build2::target;
+ action_type action;
target_type* target = nullptr;
- size_t offset = 0;
+ size_t offset = 0;
explicit operator bool () const {return target != nullptr;}
- void unlock ();
- target_type* release ();
+ void
+ unlock ();
target_lock () = default;
-
target_lock (target_lock&&);
target_lock& operator= (target_lock&&);
- // Implementation details.
- //
target_lock (const target_lock&) = delete;
target_lock& operator= (const target_lock&) = delete;
- target_lock (target_type* t, size_t o): target (t), offset (o) {}
+ // Implementation details.
+ //
~target_lock ();
+ target_lock (action_type a, target_type* t, size_t o)
+ : action (a), target (t), offset (o) {}
+
+ target_type*
+ release () {auto r (target); target = nullptr; return r;}
};
// If the target is already applied (for this action ) or executed, then no
@@ -222,13 +228,23 @@ namespace build2
// Match a "delegate rule" from withing another rules' apply() function
// avoiding recursive matches (thus the third argument). Unless try_match is
- // true, fail if not rule is found. Otherwise return empty recipe. Note that
- // unlike match(), this function does not increment the dependents
- // count. See also the companion execute_delegate().
+ // true, fail if no rule is found. Otherwise return empty recipe. Note that
+ // unlike match(), this function does not increment the dependents count and
+ // the two rules must coordinate who is using the target's data pad and/or
+ // prerequisite_targets. See also the companion execute_delegate().
//
recipe
match_delegate (action, target&, const rule&, bool try_match = false);
+ // Match a rule for the inner operation from withing the outer rule's
+ // apply() function. See also the companion execute_inner().
+ //
+ target_state
+ match_inner (action, const target&);
+
+ bool
+ match_inner (action, const target&, unmatch);
+
// The standard prerequisite search and match implementations. They call
// search() and then match() for each prerequisite in a loop omitting out of
// project prerequisites for the clean operation. If this target is a member
@@ -259,7 +275,7 @@ namespace build2
//
template <typename T>
void
- match_members (action, target&, T[], size_t);
+ match_members (action, target&, T const*, size_t);
template <size_t N>
inline void
@@ -269,7 +285,10 @@ namespace build2
}
inline void
- match_members (action a, target& t, prerequisite_targets& ts, size_t start)
+ match_members (action a,
+ target& t,
+ prerequisite_targets& ts,
+ size_t start = 0)
{
match_members (a, t, ts.data () + start, ts.size () - start);
}
@@ -279,6 +298,13 @@ namespace build2
// member's list might still not be available (e.g., if some wildcard/
// fallback rule matched).
//
+ // If the action is is for an outer operation, then it is changed to inner
+ // which means the members are always resolved by the inner (e.g., update)
+ // rule. This feels right since this is the rule that will normally do the
+ // work (e.g., update) and therefore knows what it will produce (and if we
+ // don't do this, then the group resolution will be racy since we will use
+ // two different task_count instances for synchronization).
+ //
group_view
resolve_group_members (action, const target&);
@@ -307,6 +333,12 @@ namespace build2
target_state
execute (action, const target&);
+ // As above but wait for completion if the target is busy and translate
+ // target_state::failed to the failed exception.
+ //
+ target_state
+ execute_wait (action, const target&);
+
// As above but start asynchronous execution. Return target_state::unknown
// if the asynchrounous execution has been started and target_state::busy if
// the target has already been busy.
@@ -328,6 +360,18 @@ namespace build2
target_state
execute_delegate (const recipe&, action, const target&);
+ // Execute the inner operation matched with match_inner(). Note that the
+ // returned target state is for the inner operation. The appropriate usage
+ // is to call this function from the outer operation's recipe and to factor
+ // the obtained state into the one returned (similar to how we do it for
+ // prerequisites).
+ //
+ // Note: waits for the completion if the target is busy and translates
+ // target_state::failed to the failed exception.
+ //
+ target_state
+ execute_inner (action, const target&);
+
// A special version of the above that should be used for "direct" and "now"
// execution, that is, side-stepping the normal target-prerequisite
// relationship (so no dependents count is decremented) and execution order
@@ -344,30 +388,29 @@ namespace build2
// for their completion. Return target_state::changed if any of them were
// changed and target_state::unchanged otherwise. If a prerequisite's
// execution is postponed, then set its pointer in prerequisite_targets to
- // NULL (since its state cannot be queried MT-safely).
- //
- // Note that this function can be used as a recipe.
+ // NULL (since its state cannot be queried MT-safely). If count is not 0,
+ // then only the first count prerequisites are executed.
//
target_state
- straight_execute_prerequisites (action, const target&);
+ straight_execute_prerequisites (action, const target&, size_t count = 0);
// As above but iterates over the prerequisites in reverse.
//
target_state
- reverse_execute_prerequisites (action, const target&);
+ reverse_execute_prerequisites (action, const target&, size_t count = 0);
// Call straight or reverse depending on the current mode.
//
target_state
- execute_prerequisites (action, const target&);
+ execute_prerequisites (action, const target&, size_t count = 0);
// A version of the above that also determines whether the action needs to
// be executed on the target based on the passed timestamp and filter.
//
// The filter is passed each prerequisite target and is expected to signal
// which ones should be used for timestamp comparison. If the filter is
- // NULL, then all the prerequisites are used. If the count is not 0, then
- // only the first count prerequisites are executed.
+ // NULL, then all the prerequisites are used. If count is not 0, then only
+ // the first count prerequisites are executed.
//
// Note that the return value is an optional target state. If the target
// needs updating, then the value absent. Otherwise it is the state that
diff --git a/build2/algorithm.ixx b/build2/algorithm.ixx
index 565414a..9baa650 100644
--- a/build2/algorithm.ixx
+++ b/build2/algorithm.ixx
@@ -129,26 +129,18 @@ namespace build2
lock_impl (action, const target&, optional<scheduler::work_queue>);
void
- unlock_impl (target&, size_t);
+ unlock_impl (action, target&, size_t);
inline void target_lock::
unlock ()
{
if (target != nullptr)
{
- unlock_impl (*target, offset);
+ unlock_impl (action, *target, offset);
target = nullptr;
}
}
- inline target* target_lock::
- release ()
- {
- target_type* r (target);
- target = nullptr;
- return r;
- }
-
inline target_lock::
~target_lock ()
{
@@ -157,8 +149,9 @@ namespace build2
inline target_lock::
target_lock (target_lock&& x)
- : target (x.release ()), offset (x.offset)
+ : action (x.action), target (x.target), offset (x.offset)
{
+ x.target = nullptr;
}
inline target_lock& target_lock::
@@ -167,8 +160,10 @@ namespace build2
if (this != &x)
{
unlock ();
- target = x.release ();
+ action = x.action;
+ target = x.target;
offset = x.offset;
+ x.target = nullptr;
}
return *this;
}
@@ -185,13 +180,11 @@ namespace build2
return r;
}
- pair<const pair<const string, reference_wrapper<const rule>>*, action>
+ const rule_match*
match_impl (action, target&, const rule* skip, bool try_match = false);
recipe
- apply_impl (target&,
- const pair<const string, reference_wrapper<const rule>>&,
- action);
+ apply_impl (action, target&, const rule_match&);
pair<bool, target_state>
match (action, const target&, size_t, atomic_count*, bool try_match = false);
@@ -206,7 +199,7 @@ namespace build2
if (r != target_state::failed)
{
dependency_count.fetch_add (1, memory_order_relaxed);
- t.dependents.fetch_add (1, memory_order_release);
+ t[a].dependents.fetch_add (1, memory_order_release);
}
else if (fail)
throw failed ();
@@ -227,7 +220,7 @@ namespace build2
if (r.second != target_state::failed)
{
dependency_count.fetch_add (1, memory_order_relaxed);
- t.dependents.fetch_add (1, memory_order_release);
+ t[a].dependents.fetch_add (1, memory_order_release);
}
else if (fail)
throw failed ();
@@ -236,7 +229,6 @@ namespace build2
return r;
}
-
inline bool
match (action a, const target& t, unmatch um)
{
@@ -261,8 +253,8 @@ namespace build2
{
// Safe if unchanged or someone else is also a dependent.
//
- if (s == target_state::unchanged ||
- t.dependents.load (memory_order_consume) != 0)
+ if (s == target_state::unchanged ||
+ t[a].dependents.load (memory_order_consume) != 0)
return true;
break;
@@ -270,7 +262,7 @@ namespace build2
}
dependency_count.fetch_add (1, memory_order_relaxed);
- t.dependents.fetch_add (1, memory_order_release);
+ t[a].dependents.fetch_add (1, memory_order_release);
return false;
}
@@ -290,24 +282,76 @@ namespace build2
}
inline void
+ set_recipe (target_lock& l, recipe&& r)
+ {
+ target::opstate& s ((*l.target)[l.action]);
+
+ s.recipe = move (r);
+
+ // If this is a noop recipe, then mark the target unchanged to allow for
+ // some optimizations.
+ //
+ recipe_function** f (s.recipe.target<recipe_function*> ());
+
+ if (f != nullptr && *f == &noop_action)
+ s.state = target_state::unchanged;
+ else
+ {
+ s.state = target_state::unknown;
+
+ // This gets tricky when we start considering direct execution, etc. So
+ // here seems like the best place to do it.
+ //
+ // We also ignore the group recipe since it is used for ad hoc groups
+ // (which are not executed). Plus, group action means real recipe is in
+ // the group so this also feels right conceptually.
+ //
+ // Note that we will increment this count twice for the same target if
+ // we have non-noop recipes for both inner and outer operations. While
+ // not ideal, the alternative (trying to "merge" the count keeping track
+ // whether inner and/or outer is noop) gets hairy rather quickly.
+ //
+ if (f == nullptr || *f != &group_action)
+ target_count.fetch_add (1, memory_order_relaxed);
+ }
+ }
+
+ inline void
match_recipe (target_lock& l, recipe r)
{
assert (phase == run_phase::match && l.target != nullptr);
- target& t (*l.target);
- t.rule = nullptr; // No rule.
- t.recipe (move (r));
+ (*l.target)[l.action].rule = nullptr; // No rule.
+ set_recipe (l, move (r));
l.offset = target::offset_applied;
}
inline recipe
- match_delegate (action a, target& t, const rule& r, bool try_match)
+ match_delegate (action a, target& t, const rule& dr, bool try_match)
{
assert (phase == run_phase::match);
- auto mr (match_impl (a, t, &r, try_match));
- return mr.first != nullptr
- ? apply_impl (t, *mr.first, mr.second)
- : empty_recipe;
+
+ // Note: we don't touch any of the t[a] state since that was/will be set
+ // for the delegating rule.
+ //
+ const rule_match* r (match_impl (a, t, &dr, try_match));
+ return r != nullptr ? apply_impl (a, t, *r) : empty_recipe;
+ }
+
+ inline target_state
+ match_inner (action a, const target& t)
+ {
+ // In a sense this is like any other dependency.
+ //
+ assert (a.outer ());
+ return match (action (a.meta_operation (), a.operation ()), t);
+ }
+
+ inline bool
+ match_inner (action a, const target& t, unmatch um)
+ {
+ assert (a.outer ());
+ return match (action (a.meta_operation (), a.operation ()), t, um);
}
group_view
@@ -318,6 +362,9 @@ namespace build2
{
group_view r;
+ if (a.outer ())
+ a = action (a.meta_operation (), a.operation ());
+
// We can be called during execute though everything should have been
// already resolved.
//
@@ -395,6 +442,17 @@ namespace build2
}
inline target_state
+ execute_wait (action a, const target& t)
+ {
+ if (execute (a, t) == target_state::busy)
+ sched.wait (target::count_executed (),
+ t[a].task_count,
+ scheduler::work_none);
+
+ return t.executed_state (a);
+ }
+
+ inline target_state
execute_async (action a, const target& t,
size_t sc, atomic_count& tc,
bool fail)
@@ -414,26 +472,32 @@ namespace build2
}
inline target_state
- straight_execute_prerequisites (action a, const target& t)
+ execute_inner (action a, const target& t)
+ {
+ assert (a.outer ());
+ return execute_wait (action (a.meta_operation (), a.operation ()), t);
+ }
+
+ inline target_state
+ straight_execute_prerequisites (action a, const target& t, size_t c)
{
- auto& p (const_cast<target&> (t).prerequisite_targets); // MT-aware.
- return straight_execute_members (a, t, p.data (), p.size ());
+ auto& p (t.prerequisite_targets[a]);
+ return straight_execute_members (a, t, p.data (), c == 0 ? p.size () : c);
}
inline target_state
- reverse_execute_prerequisites (action a, const target& t)
+ reverse_execute_prerequisites (action a, const target& t, size_t c)
{
- auto& p (const_cast<target&> (t).prerequisite_targets); // MT-aware.
- return reverse_execute_members (a, t, p.data (), p.size ());
+ auto& p (t.prerequisite_targets[a]);
+ return reverse_execute_members (a, t, p.data (), c == 0 ? p.size () : c);
}
inline target_state
- execute_prerequisites (action a, const target& t)
+ execute_prerequisites (action a, const target& t, size_t c)
{
- auto& p (const_cast<target&> (t).prerequisite_targets); // MT-aware.
return current_mode == execution_mode::first
- ? straight_execute_members (a, t, p.data (), p.size ())
- : reverse_execute_members (a, t, p.data (), p.size ());
+ ? straight_execute_prerequisites (a, t, c)
+ : reverse_execute_prerequisites (a, t, c);
}
// If the first argument is NULL, then the result is treated as a boolean
diff --git a/build2/bin/init.cxx b/build2/bin/init.cxx
index d9f3c0e..565936f 100644
--- a/build2/bin/init.cxx
+++ b/build2/bin/init.cxx
@@ -11,6 +11,10 @@
#include <build2/diagnostics.hxx>
#include <build2/config/utility.hxx>
+
+#include <build2/test/module.hxx>
+
+#include <build2/install/rule.hxx>
#include <build2/install/utility.hxx>
#include <build2/bin/rule.hxx>
@@ -456,17 +460,24 @@ namespace build2
r.insert<libu> (perform_update_id, "bin.libu", fail_);
r.insert<libu> (perform_clean_id, "bin.libu", fail_);
- r.insert<lib> (perform_update_id, "bin.lib", lib_);
- r.insert<lib> (perform_clean_id, "bin.lib", lib_);
-
- // Configure members.
+ // Similar to alias.
//
- r.insert<lib> (configure_update_id, "bin.lib", lib_);
+ r.insert<lib> (perform_id, 0, "bin.lib", lib_);
+ r.insert<lib> (configure_id, 0, "bin.lib", lib_);
+ // Treat as a see through group for install and test.
+ //
if (install_loaded)
{
- r.insert<lib> (perform_install_id, "bin.lib", lib_);
- r.insert<lib> (perform_uninstall_id, "bin.lib", lib_);
+ auto& gr (install::group_rule::instance);
+
+ r.insert<lib> (perform_install_id, "bin.lib", gr);
+ r.insert<lib> (perform_uninstall_id, "bin.lib", gr);
+ }
+
+ if (const test::module* m = rs.modules.lookup<test::module> ("test"))
+ {
+ r.insert<lib> (perform_test_id, "bin.lib", m->group_rule ());
}
}
diff --git a/build2/bin/rule.cxx b/build2/bin/rule.cxx
index bb9036b..79270c3 100644
--- a/build2/bin/rule.cxx
+++ b/build2/bin/rule.cxx
@@ -19,7 +19,7 @@ namespace build2
{
// fail_rule
//
- match_result fail_rule::
+ bool fail_rule::
match (action a, target& t, const string&) const
{
const char* n (t.dynamic_type ().name); // Ignore derived type.
@@ -37,8 +37,8 @@ namespace build2
// The whole logic is pretty much as if we had our two group members as
// our prerequisites.
//
- match_result lib_rule::
- match (action act, target& xt, const string&) const
+ bool lib_rule::
+ match (action, target& xt, const string&) const
{
lib& t (xt.as<lib> ());
@@ -57,35 +57,27 @@ namespace build2
t.a = a ? &search<liba> (t, t.dir, t.out, t.name) : nullptr;
t.s = s ? &search<libs> (t, t.dir, t.out, t.name) : nullptr;
- match_result mr (true);
-
- // If there is an outer operation, indicate that we match
- // unconditionally so that we don't override ourselves.
- //
- if (act.outer_operation () != 0)
- mr.recipe_action = action (act.meta_operation (), act.operation ());
-
- return mr;
+ return true;
}
recipe lib_rule::
- apply (action act, target& xt) const
+ apply (action a, target& xt) const
{
lib& t (xt.as<lib> ());
const target* m[] = {t.a, t.s};
- match_members (act, t, m);
+ match_members (a, t, m);
return &perform;
}
target_state lib_rule::
- perform (action act, const target& xt)
+ perform (action a, const target& xt)
{
const lib& t (xt.as<lib> ());
const target* m[] = {t.a, t.s};
- return execute_members (act, t, m);
+ return execute_members (a, t, m);
}
}
}
diff --git a/build2/bin/rule.hxx b/build2/bin/rule.hxx
index b4835dc..6385830 100644
--- a/build2/bin/rule.hxx
+++ b/build2/bin/rule.hxx
@@ -14,26 +14,29 @@ namespace build2
{
namespace bin
{
- // Fail rule for obj{}, bmi{}, and libu{}.
+ // "Fail rule" for obj{}, bmi{}, and libu{} that issues diagnostics if
+ // someone tries to build any of these groups directly.
//
class fail_rule: public rule
{
public:
fail_rule () {}
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override;
virtual recipe
apply (action, target&) const override;
};
+ // Pass-through to group members rule, similar to alias.
+ //
class lib_rule: public rule
{
public:
lib_rule () {}
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override;
virtual recipe
diff --git a/build2/bin/target.cxx b/build2/bin/target.cxx
index 533da43..2bcb8bc 100644
--- a/build2/bin/target.cxx
+++ b/build2/bin/target.cxx
@@ -281,7 +281,7 @@ namespace build2
// lib
//
group_view lib::
- group_members (action_type) const
+ group_members (action) const
{
static_assert (sizeof (lib_members) == sizeof (const target*) * 2,
"member layout incompatible with array");
@@ -321,7 +321,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ false // Note: not see-through ("alternatives" group).
};
// libi
diff --git a/build2/bin/target.hxx b/build2/bin/target.hxx
index 790d1f0..329b4a9 100644
--- a/build2/bin/target.hxx
+++ b/build2/bin/target.hxx
@@ -226,7 +226,7 @@ namespace build2
using libx::libx;
virtual group_view
- group_members (action_type) const override;
+ group_members (action) const override;
public:
static const target_type static_type;
diff --git a/build2/cc/common.cxx b/build2/cc/common.cxx
index 3daada1..e4dbfe8 100644
--- a/build2/cc/common.cxx
+++ b/build2/cc/common.cxx
@@ -46,7 +46,7 @@ namespace build2
//
void common::
process_libraries (
- action act,
+ action a,
const scope& top_bs,
linfo top_li,
const dir_paths& top_sysd,
@@ -222,23 +222,23 @@ namespace build2
//
if (impl && !c_e_libs.defined () && !x_e_libs.defined ())
{
- for (auto pt: l.prerequisite_targets)
+ for (const prerequisite_target& pt: l.prerequisite_targets[a])
{
if (pt == nullptr)
continue;
- bool a;
+ bool la;
const file* f;
- if ((a = (f = pt->is_a<liba> ())) ||
- (a = (f = pt->is_a<libux> ())) ||
- ( f = pt->is_a<libs> ()))
+ if ((la = (f = pt->is_a<liba> ())) ||
+ (la = (f = pt->is_a<libux> ())) ||
+ ( f = pt->is_a<libs> ()))
{
if (sysd == nullptr) find_sysd ();
if (!li) find_linfo ();
- process_libraries (act, bs, *li, *sysd,
- *f, a, pt.data,
+ process_libraries (a, bs, *li, *sysd,
+ *f, la, pt.data,
proc_impl, proc_lib, proc_opt, true);
}
}
@@ -275,7 +275,7 @@ namespace build2
&proc_impl, &proc_lib, &proc_opt,
&sysd, &usrd,
&find_sysd, &find_linfo, &sys_simple,
- &bs, act, &li, this] (const lookup& lu)
+ &bs, a, &li, this] (const lookup& lu)
{
const vector<name>* ns (cast_null<vector<name>> (lu));
if (ns == nullptr || ns->empty ())
@@ -300,7 +300,7 @@ namespace build2
if (sysd == nullptr) find_sysd ();
if (!li) find_linfo ();
- const file& t (resolve_library (act, bs, n, *li, *sysd, usrd));
+ const file& t (resolve_library (a, bs, n, *li, *sysd, usrd));
if (proc_lib)
{
@@ -324,7 +324,7 @@ namespace build2
// @@ Where can we get the link flags? Should we try to find them
// in the library's prerequisites? What about installed stuff?
//
- process_libraries (act, bs, *li, *sysd,
+ process_libraries (a, bs, *li, *sysd,
t, t.is_a<liba> () || t.is_a<libux> (), 0,
proc_impl, proc_lib, proc_opt, true);
}
@@ -402,7 +402,7 @@ namespace build2
// that's the only way to guarantee it will be up-to-date.
//
const file& common::
- resolve_library (action act,
+ resolve_library (action a,
const scope& s,
name n,
linfo li,
@@ -439,7 +439,7 @@ namespace build2
//
dir_path out;
prerequisite_key pk {n.proj, {tt, &n.dir, &out, &n.value, ext}, &s};
- xt = search_library_existing (act, sysd, usrd, pk);
+ xt = search_library_existing (a, sysd, usrd, pk);
if (xt == nullptr)
{
@@ -454,7 +454,7 @@ namespace build2
// If this is lib{}/libu{}, pick appropriate member.
//
if (const libx* l = xt->is_a<libx> ())
- xt = &link_member (*l, act, li); // Pick lib*{e,a,s}{}.
+ xt = &link_member (*l, a, li); // Pick lib*{e,a,s}{}.
return xt->as<file> ();
}
diff --git a/build2/cc/common.hxx b/build2/cc/common.hxx
index 5ed7173..5952df6 100644
--- a/build2/cc/common.hxx
+++ b/build2/cc/common.hxx
@@ -225,7 +225,7 @@ namespace build2
bool = false) const;
const target*
- search_library (action act,
+ search_library (action a,
const dir_paths& sysd,
optional<dir_paths>& usrd,
const prerequisite& p) const
@@ -234,7 +234,7 @@ namespace build2
if (r == nullptr)
{
- if ((r = search_library (act, sysd, usrd, p.key ())) != nullptr)
+ if ((r = search_library (a, sysd, usrd, p.key ())) != nullptr)
{
const target* e (nullptr);
if (!p.target.compare_exchange_strong (
@@ -274,12 +274,12 @@ namespace build2
bool existing = false) const;
const target*
- search_library_existing (action act,
+ search_library_existing (action a,
const dir_paths& sysd,
optional<dir_paths>& usrd,
const prerequisite_key& pk) const
{
- return search_library (act, sysd, usrd, pk, true);
+ return search_library (a, sysd, usrd, pk, true);
}
dir_paths
diff --git a/build2/cc/compile.cxx b/build2/cc/compile-rule.cxx
index 94b3478..df84547 100644
--- a/build2/cc/compile.cxx
+++ b/build2/cc/compile-rule.cxx
@@ -1,8 +1,8 @@
-// file : build2/cc/compile.cxx -*- C++ -*-
+// file : build2/cc/compile-rule.cxx -*- C++ -*-
// copyright : Copyright (c) 2014-2017 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
-#include <build2/cc/compile.hxx>
+#include <build2/cc/compile-rule.hxx>
#include <cstdlib> // exit()
#include <cstring> // strlen()
@@ -124,7 +124,7 @@ namespace build2
throw invalid_argument ("invalid preprocessed value '" + s + "'");
}
- struct compile::match_data
+ struct compile_rule::match_data
{
explicit
match_data (translation_type t, const prerequisite_member& s)
@@ -141,16 +141,16 @@ namespace build2
module_positions mods = {0, 0, 0};
};
- compile::
- compile (data&& d)
+ compile_rule::
+ compile_rule (data&& d)
: common (move (d)),
rule_id (string (x) += ".compile 4")
{
- static_assert (sizeof (compile::match_data) <= target::data_size,
+ static_assert (sizeof (match_data) <= target::data_size,
"insufficient space");
}
- const char* compile::
+ const char* compile_rule::
langopt (const match_data& md) const
{
bool m (md.type == translation_type::module_iface);
@@ -204,7 +204,7 @@ namespace build2
return nullptr;
}
- inline void compile::
+ inline void compile_rule::
append_symexport_options (cstrings& args, const target& t) const
{
// With VC if a BMI is compiled with dllexport, then when such BMI is
@@ -216,10 +216,10 @@ namespace build2
: "-D__symexport=");
}
- match_result compile::
- match (action act, target& t, const string&) const
+ bool compile_rule::
+ match (action a, target& t, const string&) const
{
- tracer trace (x, "compile::match");
+ tracer trace (x, "compile_rule::match");
bool mod (t.is_a<bmie> () || t.is_a<bmia> () || t.is_a<bmis> ());
@@ -235,7 +235,7 @@ namespace build2
// file specified for a member overrides the one specified for the
// group. Also "see through" groups.
//
- for (prerequisite_member p: reverse_group_prerequisite_members (act, t))
+ for (prerequisite_member p: reverse_group_prerequisite_members (a, t))
{
if (p.is_a (mod ? *x_mod : x_src))
{
@@ -257,11 +257,11 @@ namespace build2
// Append or hash library options from a pair of *.export.* variables
// (first one is cc.export.*) recursively, prerequisite libraries first.
//
- void compile::
+ void compile_rule::
append_lib_options (const scope& bs,
cstrings& args,
+ action a,
const target& t,
- action act,
linfo li) const
{
// See through utility libraries.
@@ -290,33 +290,33 @@ namespace build2
const function<bool (const file&, bool)> impf (imp);
const function<void (const file&, const string&, bool, bool)> optf (opt);
- for (prerequisite_member p: group_prerequisite_members (act, t))
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
// Should be already searched and matched for libraries.
//
if (const target* pt = p.load ())
{
if (const libx* l = pt->is_a<libx> ())
- pt = &link_member (*l, act, li);
+ pt = &link_member (*l, a, li);
- bool a;
- if (!((a = pt->is_a<liba> ()) ||
- (a = pt->is_a<libux> ()) ||
+ bool la;
+ if (!((la = pt->is_a<liba> ()) ||
+ (la = pt->is_a<libux> ()) ||
pt->is_a<libs> ()))
continue;
- process_libraries (act, bs, li, sys_lib_dirs,
- pt->as<file> (), a, 0, // Hack: lflags unused.
+ process_libraries (a, bs, li, sys_lib_dirs,
+ pt->as<file> (), la, 0, // Hack: lflags unused.
impf, nullptr, optf);
}
}
}
- void compile::
+ void compile_rule::
hash_lib_options (const scope& bs,
sha256& cs,
+ action a,
const target& t,
- action act,
linfo li) const
{
auto imp = [] (const file& l, bool la) {return la && l.is_a<libux> ();};
@@ -340,21 +340,21 @@ namespace build2
const function<bool (const file&, bool)> impf (imp);
const function<void (const file&, const string&, bool, bool)> optf (opt);
- for (prerequisite_member p: group_prerequisite_members (act, t))
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
if (const target* pt = p.load ())
{
if (const libx* l = pt->is_a<libx> ())
- pt = &link_member (*l, act, li);
+ pt = &link_member (*l, a, li);
- bool a;
- if (!((a = pt->is_a<liba> ()) ||
- (a = pt->is_a<libux> ()) ||
+ bool la;
+ if (!((la = pt->is_a<liba> ()) ||
+ (la = pt->is_a<libux> ()) ||
pt->is_a<libs> ()))
continue;
- process_libraries (act, bs, li, sys_lib_dirs,
- pt->as<file> (), a, 0, // Hack: lflags unused.
+ process_libraries (a, bs, li, sys_lib_dirs,
+ pt->as<file> (), la, 0, // Hack: lflags unused.
impf, nullptr, optf);
}
}
@@ -363,11 +363,11 @@ namespace build2
// Append library prefixes based on the *.export.poptions variables
// recursively, prerequisite libraries first.
//
- void compile::
+ void compile_rule::
append_lib_prefixes (const scope& bs,
prefix_map& m,
+ action a,
target& t,
- action act,
linfo li) const
{
auto imp = [] (const file& l, bool la) {return la && l.is_a<libux> ();};
@@ -391,21 +391,21 @@ namespace build2
const function<bool (const file&, bool)> impf (imp);
const function<void (const file&, const string&, bool, bool)> optf (opt);
- for (prerequisite_member p: group_prerequisite_members (act, t))
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
if (const target* pt = p.load ())
{
if (const libx* l = pt->is_a<libx> ())
- pt = &link_member (*l, act, li);
+ pt = &link_member (*l, a, li);
- bool a;
- if (!((a = pt->is_a<liba> ()) ||
- (a = pt->is_a<libux> ()) ||
+ bool la;
+ if (!((la = pt->is_a<liba> ()) ||
+ (la = pt->is_a<libux> ()) ||
pt->is_a<libs> ()))
continue;
- process_libraries (act, bs, li, sys_lib_dirs,
- pt->as<file> (), a, 0, // Hack: lflags unused.
+ process_libraries (a, bs, li, sys_lib_dirs,
+ pt->as<file> (), la, 0, // Hack: lflags unused.
impf, nullptr, optf);
}
}
@@ -427,14 +427,14 @@ namespace build2
// file is known to be up to date. So we do the update "smartly".
//
static bool
- update (tracer& trace, action act, const target& t, timestamp ts)
+ update (tracer& trace, action a, const target& t, timestamp ts)
{
const path_target* pt (t.is_a<path_target> ());
if (pt == nullptr)
ts = timestamp_unknown;
- target_state os (t.matched_state (act));
+ target_state os (t.matched_state (a));
if (os == target_state::unchanged)
{
@@ -444,7 +444,7 @@ namespace build2
{
// We expect the timestamp to be known (i.e., existing file).
//
- timestamp mt (pt->mtime ()); // @@ MT perf: know target state.
+ timestamp mt (pt->mtime ());
assert (mt != timestamp_unknown);
return mt > ts;
}
@@ -460,7 +460,7 @@ namespace build2
// any generated header.
//
phase_switch ps (run_phase::execute);
- target_state ns (execute_direct (act, t));
+ target_state ns (execute_direct (a, t));
if (ns != os && ns != target_state::unchanged)
{
@@ -474,10 +474,10 @@ namespace build2
}
}
- recipe compile::
- apply (action act, target& xt) const
+ recipe compile_rule::
+ apply (action a, target& xt) const
{
- tracer trace (x, "compile::apply");
+ tracer trace (x, "compile_rule::apply");
file& t (xt.as<file> ()); // Either obj*{} or bmi*{}.
@@ -569,7 +569,7 @@ namespace build2
// (e.g., foo.mxx and foo.cxx) which means obj*{} targets could
// collide. So we add the module extension to the target name.
//
- target_lock obj (add_adhoc_member (act, t, tt.obj, e.c_str ()));
+ target_lock obj (add_adhoc_member (a, t, tt.obj, e.c_str ()));
obj.target->as<file> ().derive_path (o);
match_recipe (obj, group_recipe); // Set recipe and unlock.
}
@@ -579,7 +579,7 @@ namespace build2
// Inject dependency on the output directory.
//
- const fsdir* dir (inject_fsdir (act, t));
+ const fsdir* dir (inject_fsdir (a, t));
// Match all the existing prerequisites. The injection code takes care
// of the ones it is adding.
@@ -587,16 +587,16 @@ namespace build2
// When cleaning, ignore prerequisites that are not in the same or a
// subdirectory of our project root.
//
- auto& pts (t.prerequisite_targets);
+ auto& pts (t.prerequisite_targets[a]);
optional<dir_paths> usr_lib_dirs; // Extract lazily.
// Start asynchronous matching of prerequisites. Wait with unlocked
// phase to allow phase switching.
//
- wait_guard wg (target::count_busy (), t.task_count, true);
+ wait_guard wg (target::count_busy (), t[a].task_count, true);
size_t start (pts.size ()); // Index of the first to be added.
- for (prerequisite_member p: group_prerequisite_members (act, t))
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
const target* pt (nullptr);
@@ -609,7 +609,7 @@ namespace build2
p.is_a<libs> () ||
p.is_a<libux> ())
{
- if (act.operation () == update_id)
+ if (a.operation () == update_id)
{
// Handle (phase two) imported libraries. We know that for such
// libraries we don't need to do match() in order to get options
@@ -617,7 +617,7 @@ namespace build2
//
if (p.proj ())
{
- if (search_library (act,
+ if (search_library (a,
sys_lib_dirs,
usr_lib_dirs,
p.prerequisite) != nullptr)
@@ -627,7 +627,7 @@ namespace build2
pt = &p.search (t);
if (const libx* l = pt->is_a<libx> ())
- pt = &link_member (*l, act, li);
+ pt = &link_member (*l, a, li);
}
else
continue;
@@ -644,11 +644,11 @@ namespace build2
{
pt = &p.search (t);
- if (act.operation () == clean_id && !pt->dir.sub (rs.out_path ()))
+ if (a.operation () == clean_id && !pt->dir.sub (rs.out_path ()))
continue;
}
- match_async (act, *pt, target::count_busy (), t.task_count);
+ match_async (a, *pt, target::count_busy (), t[a].task_count);
pts.push_back (pt);
}
@@ -668,7 +668,7 @@ namespace build2
// an obj?{} target directory.
//
if (build2::match (
- act,
+ a,
*pt,
pt->is_a<liba> () || pt->is_a<libs> () || pt->is_a<libux> ()
? unmatch::safe
@@ -680,7 +680,7 @@ namespace build2
// since chances are we will have to update some of our prerequisites in
// the process (auto-generated source code).
//
- if (act == perform_update_id)
+ if (a == perform_update_id)
{
// The cached prerequisite target should be the same as what is in
// t.prerequisite_targets since we used standard search() and match()
@@ -722,7 +722,7 @@ namespace build2
// this can very well be happening in parallel. But that's not a
// problem since fsdir{}'s update is idempotent.
//
- fsdir_rule::perform_update_direct (act, t);
+ fsdir_rule::perform_update_direct (a, t);
}
// Note: the leading '@' is reserved for the module map prefix (see
@@ -764,7 +764,7 @@ namespace build2
// Hash *.export.poptions from prerequisite libraries.
//
- hash_lib_options (bs, cs, t, act, li);
+ hash_lib_options (bs, cs, a, t, li);
// Extra system header dirs (last).
//
@@ -821,14 +821,14 @@ namespace build2
if (pt == nullptr || pt == dir)
continue;
- u = update (trace, act, *pt, u ? timestamp_unknown : mt) || u;
+ u = update (trace, a, *pt, u ? timestamp_unknown : mt) || u;
}
// Check if the source is already preprocessed to a certain degree.
// This determines which of the following steps we perform and on
// what source (original or preprocessed).
//
- // Note: must be set of the src target.
+ // Note: must be set on the src target.
//
if (const string* v = cast_null<string> (src[x_preprocessed]))
try
@@ -846,7 +846,7 @@ namespace build2
//
pair<auto_rmfile, bool> psrc (auto_rmfile (), false);
if (md.pp < preprocessed::includes)
- psrc = extract_headers (act, bs, t, li, src, md, dd, u, mt);
+ psrc = extract_headers (a, bs, t, li, src, md, dd, u, mt);
// Next we "obtain" the translation unit information. What exactly
// "obtain" entails is tricky: If things changed, then we re-parse the
@@ -869,7 +869,7 @@ namespace build2
{
if (u)
{
- auto p (parse_unit (act, t, li, src, psrc.first, md));
+ auto p (parse_unit (a, t, li, src, psrc.first, md));
if (cs != p.second)
{
@@ -948,7 +948,7 @@ namespace build2
// NOTE: assumes that no further targets will be added into
// t.prerequisite_targets!
//
- extract_modules (act, bs, t, li, tt, src, md, move (tu.mod), dd, u);
+ extract_modules (a, bs, t, li, tt, src, md, move (tu.mod), dd, u);
}
// If anything got updated, then we didn't rely on the cache. However,
@@ -1002,7 +1002,7 @@ namespace build2
md.mt = u ? timestamp_nonexistent : dd.mtime ();
}
- switch (act)
+ switch (a)
{
case perform_update_id: return [this] (action a, const target& t)
{
@@ -1018,7 +1018,7 @@ namespace build2
// Reverse-lookup target type from extension.
//
- const target_type* compile::
+ const target_type* compile_rule::
map_extension (const scope& s, const string& n, const string& e) const
{
// We will just have to try all of the possible ones, in the "most
@@ -1047,10 +1047,10 @@ namespace build2
return nullptr;
}
- void compile::
+ void compile_rule::
append_prefixes (prefix_map& m, const target& t, const variable& var) const
{
- tracer trace (x, "compile::append_prefixes");
+ tracer trace (x, "compile_rule::append_prefixes");
// If this target does not belong to any project (e.g, an "imported as
// installed" library), then it can't possibly generate any headers for
@@ -1187,10 +1187,10 @@ namespace build2
}
}
- auto compile::
+ auto compile_rule::
build_prefix_map (const scope& bs,
+ action a,
target& t,
- action act,
linfo li) const -> prefix_map
{
prefix_map m;
@@ -1202,7 +1202,7 @@ namespace build2
// Then process the include directories from prerequisite libraries.
//
- append_lib_prefixes (bs, m, t, act, li);
+ append_lib_prefixes (bs, m, a, t, li);
return m;
}
@@ -1405,8 +1405,8 @@ namespace build2
// file as well as an indication if it is usable for compilation (see
// below for details).
//
- pair<auto_rmfile, bool> compile::
- extract_headers (action act,
+ pair<auto_rmfile, bool> compile_rule::
+ extract_headers (action a,
const scope& bs,
file& t,
linfo li,
@@ -1416,7 +1416,7 @@ namespace build2
bool& updating,
timestamp mt) const
{
- tracer trace (x, "compile::extract_headers");
+ tracer trace (x, "compile_rule::extract_headers");
l5 ([&]{trace << "target: " << t;});
@@ -1628,7 +1628,7 @@ namespace build2
// Return NULL if the dependency information goes to stdout and a
// pointer to the temporary file path otherwise.
//
- auto init_args = [&t, act, li,
+ auto init_args = [&t, a, li,
&src, &md, &psrc, &sense_diag,
&rs, &bs,
pp, &env, &args, &args_gen, &args_i, &out, &drm,
@@ -1677,7 +1677,7 @@ namespace build2
// Add *.export.poptions from prerequisite libraries.
//
- append_lib_options (bs, args, t, act, li);
+ append_lib_options (bs, args, a, t, li);
append_options (args, t, c_poptions);
append_options (args, t, x_poptions);
@@ -2055,7 +2055,7 @@ namespace build2
// extraction process should be restarted.
//
auto add = [&trace, &pfx_map, &so_map,
- act, &t, li,
+ a, &t, li,
&dd, &updating, &skip_count,
&bs, this]
(path f, bool cache, timestamp mt) -> bool
@@ -2185,7 +2185,7 @@ namespace build2
l4 ([&]{trace << "non-existent header '" << f << "'";});
if (!pfx_map)
- pfx_map = build_prefix_map (bs, t, act, li);
+ pfx_map = build_prefix_map (bs, a, t, li);
// First try the whole file. Then just the directory.
//
@@ -2300,8 +2300,8 @@ namespace build2
// will lead to the match failure which we translate to a restart.
//
if (!cache)
- build2::match (act, *pt);
- else if (!build2::try_match (act, *pt).first)
+ build2::match (a, *pt);
+ else if (!build2::try_match (a, *pt).first)
{
dd.write (); // Invalidate this line.
updating = true;
@@ -2310,7 +2310,7 @@ namespace build2
// Update.
//
- bool restart (update (trace, act, *pt, mt));
+ bool restart (update (trace, a, *pt, mt));
// Verify/add it to the dependency database. We do it after update in
// order not to add bogus files (non-existent and without a way to
@@ -2321,7 +2321,7 @@ namespace build2
// Add to our prerequisite target list.
//
- t.prerequisite_targets.push_back (pt);
+ t.prerequisite_targets[a].push_back (pt);
skip_count++;
updating = updating || restart;
@@ -2796,15 +2796,15 @@ namespace build2
return make_pair (move (psrc), puse);
}
- pair<translation_unit, string> compile::
- parse_unit (action act,
+ pair<translation_unit, string> compile_rule::
+ parse_unit (action a,
file& t,
linfo lo,
const file& src,
auto_rmfile& psrc,
const match_data& md) const
{
- tracer trace (x, "compile::parse_unit");
+ tracer trace (x, "compile_rule::parse_unit");
// If things go wrong give the user a bit extra context.
//
@@ -2844,7 +2844,7 @@ namespace build2
//
args.push_back (cpath.recall_string ());
- append_lib_options (t.base_scope (), args, t, act, lo);
+ append_lib_options (t.base_scope (), args, a, t, lo);
append_options (args, t, c_poptions);
append_options (args, t, x_poptions);
@@ -3071,8 +3071,8 @@ namespace build2
// Extract and inject module dependencies.
//
- void compile::
- extract_modules (action act,
+ void compile_rule::
+ extract_modules (action a,
const scope& bs,
file& t,
linfo li,
@@ -3083,7 +3083,7 @@ namespace build2
depdb& dd,
bool& updating) const
{
- tracer trace (x, "compile::extract_modules");
+ tracer trace (x, "compile_rule::extract_modules");
l5 ([&]{trace << "target: " << t;});
// If things go wrong, give the user a bit extra context.
@@ -3131,7 +3131,7 @@ namespace build2
sha256 cs;
if (!mi.imports.empty ())
- md.mods = search_modules (act, bs, t, li, tt.bmi, src, mi.imports, cs);
+ md.mods = search_modules (a, bs, t, li, tt.bmi, src, mi.imports, cs);
if (dd.expect (cs.string ()) != nullptr)
updating = true;
@@ -3201,8 +3201,8 @@ namespace build2
// Resolve imported modules to bmi*{} targets.
//
- module_positions compile::
- search_modules (action act,
+ module_positions compile_rule::
+ search_modules (action a,
const scope& bs,
file& t,
linfo li,
@@ -3211,7 +3211,7 @@ namespace build2
module_imports& imports,
sha256& cs) const
{
- tracer trace (x, "compile::search_modules");
+ tracer trace (x, "compile_rule::search_modules");
// So we have a list of imports and a list of "potential" module
// prerequisites. They are potential in the sense that they may or may
@@ -3317,7 +3317,7 @@ namespace build2
return m.size () - mi;
};
- auto& pts (t.prerequisite_targets);
+ auto& pts (t.prerequisite_targets[a]);
size_t start (pts.size ()); // Index of the first to be added.
// We have two parallel vectors: module names/scores in imports and
@@ -3476,7 +3476,7 @@ namespace build2
return r;
};
- for (prerequisite_member p: group_prerequisite_members (act, t))
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
const target* pt (p.load ()); // Should be cached for libraries.
@@ -3485,7 +3485,7 @@ namespace build2
const target* lt (nullptr);
if (const libx* l = pt->is_a<libx> ())
- lt = &link_member (*l, act, li);
+ lt = &link_member (*l, a, li);
else if (pt->is_a<liba> () || pt->is_a<libs> () || pt->is_a<libux> ())
lt = pt;
@@ -3493,7 +3493,7 @@ namespace build2
//
if (lt != nullptr)
{
- for (const target* bt: lt->prerequisite_targets)
+ for (const target* bt: lt->prerequisite_targets[a])
{
if (bt == nullptr)
continue;
@@ -3528,7 +3528,7 @@ namespace build2
continue;
if (const target** p = check_exact (*n))
- *p = &make_module_sidebuild (act, bs, *lt, *bt, *n);
+ *p = &make_module_sidebuild (a, bs, *lt, *bt, *n);
}
else
continue;
@@ -3563,7 +3563,7 @@ namespace build2
// Find the mxx{} prerequisite and extract its "file name" for the
// fuzzy match unless the user specified the module name explicitly.
//
- for (prerequisite_member p: group_prerequisite_members (act, *pt))
+ for (prerequisite_member p: group_prerequisite_members (a, *pt))
{
if (p.is_a (*x_mod))
{
@@ -3642,7 +3642,7 @@ namespace build2
// Match in parallel and wait for completion.
//
- match_members (act, t, pts, start);
+ match_members (a, t, pts, start);
// Post-process the list of our (direct) imports. While at it, calculate
// the checksum of all (direct and indirect) bmi{} paths.
@@ -3675,7 +3675,7 @@ namespace build2
if (in != mn)
{
- for (prerequisite_member p: group_prerequisite_members (act, *bt))
+ for (prerequisite_member p: group_prerequisite_members (a, *bt))
{
if (p.is_a (*x_mod)) // Got to be there.
{
@@ -3702,9 +3702,10 @@ namespace build2
// Hard to say whether we should reserve or not. We will probably
// get quite a bit of duplications.
//
- for (size_t m (bt->prerequisite_targets.size ()); j != m; ++j)
+ auto& bpts (bt->prerequisite_targets[a]);
+ for (size_t m (bpts.size ()); j != m; ++j)
{
- const target* et (bt->prerequisite_targets[j]);
+ const target* et (bpts[j]);
if (et == nullptr)
continue; // Unresolved (std.*).
@@ -3745,14 +3746,14 @@ namespace build2
// Synthesize a dependency for building a module binary interface on
// the side.
//
- const target& compile::
- make_module_sidebuild (action act,
+ const target& compile_rule::
+ make_module_sidebuild (action a,
const scope& bs,
const target& lt,
const target& mt,
const string& mn) const
{
- tracer trace (x, "compile::make_module_sidebuild");
+ tracer trace (x, "compile_rule::make_module_sidebuild");
// First figure out where we are going to build. We want to avoid
// multiple sidebuilds so the outermost scope that has loaded the
@@ -3891,7 +3892,7 @@ namespace build2
// synthesizing dependencies for bmi{}'s.
//
ps.push_back (prerequisite (lt));
- for (prerequisite_member p: group_prerequisite_members (act, lt))
+ for (prerequisite_member p: group_prerequisite_members (a, lt))
{
// @@ TODO: will probably need revision if using sidebuild for
// non-installed libraries (e.g., direct BMI dependencies
@@ -3927,10 +3928,11 @@ namespace build2
void
msvc_filter_cl (ifdstream&, const path& src);
- void compile::
+ void compile_rule::
append_modules (environment& env,
cstrings& args,
strings& stor,
+ action a,
const file& t,
const match_data& md) const
{
@@ -3939,6 +3941,8 @@ namespace build2
dir_path stdifc; // See the VC case below.
+ auto& pts (t.prerequisite_targets[a]);
+
#if 0
switch (cid)
{
@@ -3959,7 +3963,7 @@ namespace build2
//
if (md.type == translation_type::module_impl)
{
- const file& f (t.prerequisite_targets[ms.start]->as<file> ());
+ const file& f (pts[ms.start]->as<file> ());
string s (relative (f.path ()).string ());
s.insert (0, "-fmodule-file=");
stor.push_back (move (s));
@@ -3974,11 +3978,11 @@ namespace build2
}
case compiler_id::msvc:
{
- for (size_t i (ms.start), n (t.prerequisite_targets.size ());
+ for (size_t i (ms.start), n (pts.size ());
i != n;
++i)
{
- const target* pt (t.prerequisite_targets[i]);
+ const target* pt (pts[i]);
if (pt == nullptr)
continue;
@@ -4021,7 +4025,7 @@ namespace build2
assert (false);
}
#else
- size_t n (t.prerequisite_targets.size ());
+ size_t n (pts.size ());
// Clang embeds module file references so we only need to specify
// our direct imports.
@@ -4040,7 +4044,7 @@ namespace build2
for (size_t i (ms.start); i != n; ++i)
{
- const target* pt (t.prerequisite_targets[i]);
+ const target* pt (pts[i]);
if (pt == nullptr)
continue;
@@ -4130,8 +4134,8 @@ namespace build2
env.push_back ("IFCPATH");
}
- target_state compile::
- perform_update (action act, const target& xt) const
+ target_state compile_rule::
+ perform_update (action a, const target& xt) const
{
const file& t (xt.as<file> ());
const path& tp (t.path ());
@@ -4146,7 +4150,7 @@ namespace build2
auto pr (
execute_prerequisites<file> (
(mod ? *x_mod : x_src),
- act, t,
+ a, t,
md.mt,
[s = md.mods.start] (const target&, size_t i)
{
@@ -4203,7 +4207,7 @@ namespace build2
// Add *.export.poptions from prerequisite libraries.
//
- append_lib_options (bs, args, t, act, li);
+ append_lib_options (bs, args, a, t, li);
// Extra system header dirs (last).
//
@@ -4270,7 +4274,7 @@ namespace build2
args.push_back ("/MD");
if (md.mods.start != 0)
- append_modules (env, args, mods, t, md);
+ append_modules (env, args, mods, a, t, md);
// The presence of /Zi or /ZI causes the compiler to write debug info
// to the .pdb file. By default it is a shared file called vcNN.pdb
@@ -4335,7 +4339,7 @@ namespace build2
}
if (md.mods.start != 0)
- append_modules (env, args, mods, t, md);
+ append_modules (env, args, mods, a, t, md);
// Note: the order of the following options is relied upon below.
//
@@ -4604,7 +4608,7 @@ namespace build2
return target_state::changed;
}
- target_state compile::
+ target_state compile_rule::
perform_clean (action a, const target& xt) const
{
const file& t (xt.as<file> ());
diff --git a/build2/cc/compile.hxx b/build2/cc/compile-rule.hxx
index 2878e3d..6bf63bf 100644
--- a/build2/cc/compile.hxx
+++ b/build2/cc/compile-rule.hxx
@@ -1,9 +1,9 @@
-// file : build2/cc/compile.hxx -*- C++ -*-
+// file : build2/cc/compile-rule.hxx -*- C++ -*-
// copyright : Copyright (c) 2014-2017 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
-#ifndef BUILD2_CC_COMPILE_HXX
-#define BUILD2_CC_COMPILE_HXX
+#ifndef BUILD2_CC_COMPILE_RULE_HXX
+#define BUILD2_CC_COMPILE_RULE_HXX
#include <libbutl/path-map.mxx>
@@ -37,12 +37,12 @@ namespace build2
size_t copied; // First copied-over bmi*{}, 0 if none.
};
- class compile: public rule, virtual common
+ class compile_rule: public rule, virtual common
{
public:
- compile (data&&);
+ compile_rule (data&&);
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override;
virtual recipe
@@ -61,14 +61,16 @@ namespace build2
void
append_lib_options (const scope&,
cstrings&,
+ action,
const target&,
- action, linfo) const;
+ linfo) const;
void
hash_lib_options (const scope&,
sha256&,
+ action,
const target&,
- action, linfo) const;
+ linfo) const;
// Mapping of include prefixes (e.g., foo in <foo/bar>) for auto-
// generated headers to directories where they will be generated.
@@ -97,11 +99,12 @@ namespace build2
void
append_lib_prefixes (const scope&,
prefix_map&,
+ action,
target&,
- action, linfo) const;
+ linfo) const;
prefix_map
- build_prefix_map (const scope&, target&, action, linfo) const;
+ build_prefix_map (const scope&, action, target&, linfo) const;
// Reverse-lookup target type from extension.
//
@@ -134,7 +137,7 @@ namespace build2
void
append_modules (environment&, cstrings&, strings&,
- const file&, const match_data&) const;
+ action, const file&, const match_data&) const;
// Language selection option (for VC) or the value for the -x option.
//
@@ -150,4 +153,4 @@ namespace build2
}
}
-#endif // BUILD2_CC_COMPILE_HXX
+#endif // BUILD2_CC_COMPILE_RULE_HXX
diff --git a/build2/cc/install.cxx b/build2/cc/install-rule.cxx
index fcaf626..4e232ff 100644
--- a/build2/cc/install.cxx
+++ b/build2/cc/install-rule.cxx
@@ -1,15 +1,15 @@
-// file : build2/cc/install.cxx -*- C++ -*-
+// file : build2/cc/install-rule.cxx -*- C++ -*-
// copyright : Copyright (c) 2014-2017 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
-#include <build2/cc/install.hxx>
+#include <build2/cc/install-rule.hxx>
#include <build2/algorithm.hxx>
#include <build2/bin/target.hxx>
-#include <build2/cc/link.hxx> // match()
#include <build2/cc/utility.hxx>
+#include <build2/cc/link-rule.hxx> // match()
using namespace std;
@@ -19,16 +19,16 @@ namespace build2
{
using namespace bin;
- // file_install
+ // install_rule
//
- file_install::
- file_install (data&& d, const link& l): common (move (d)), link_ (l) {}
+ install_rule::
+ install_rule (data&& d, const link_rule& l)
+ : common (move (d)), link_ (l) {}
- const target* file_install::
+ const target* install_rule::
filter (action a, const target& t, prerequisite_member p) const
{
- // NOTE: see also alias_install::filter() below if changing anything
- // here.
+ // NOTE: see libux_install_rule::filter() if changing anything here.
otype ot (link_type (t).type);
@@ -72,7 +72,7 @@ namespace build2
const target* pt (&p.search (t));
// If this is the lib{}/libu{} group, pick a member which we would
- // link. For libu{} we want to the "see through" logic.
+ // link. For libu{} we want the "see through" logic.
//
if (const libx* l = pt->is_a<libx> ())
pt = &link_member (*l, a, link_info (t.base_scope (), ot));
@@ -90,7 +90,7 @@ namespace build2
return file_rule::filter (a, t, p);
}
- match_result file_install::
+ bool install_rule::
match (action a, target& t, const string& hint) const
{
// @@ How do we split the hint between the two?
@@ -99,20 +99,38 @@ namespace build2
// We only want to handle installation if we are also the ones building
// this target. So first run link's match().
//
- match_result r (link_.match (a, t, hint));
- return r ? file_rule::match (a, t, "") : r;
+ return link_.match (a, t, hint) && file_rule::match (a, t, "");
}
- recipe file_install::
+ recipe install_rule::
apply (action a, target& t) const
{
recipe r (file_rule::apply (a, t));
- // Derive shared library paths and cache them in the target's aux
- // storage if we are (un)installing (used in *_extra() functions below).
- //
- if (a.operation () == install_id || a.operation () == uninstall_id)
+ if (a.operation () == update_id)
+ {
+ // Signal to the link rule that this is update for install. And if the
+ // update has already been executed, verify it was done for install.
+ //
+ auto& md (t.data<link_rule::match_data> ());
+
+ if (md.for_install)
+ {
+ if (!*md.for_install)
+ fail << "target " << t << " already updated but not for install";
+ }
+ else
+ md.for_install = true;
+ }
+ else // install or uninstall
{
+ // Derive shared library paths and cache them in the target's aux
+ // storage if we are un/installing (used in *_extra() functions
+ // below).
+ //
+ static_assert (sizeof (link_rule::libs_paths) <= target::data_size,
+ "insufficient space");
+
file* f;
if ((f = t.is_a<libs> ()) != nullptr && tclass != "windows")
{
@@ -128,34 +146,39 @@ namespace build2
return r;
}
- void file_install::
+ bool install_rule::
install_extra (const file& t, const install_dir& id) const
{
+ bool r (false);
+
if (t.is_a<libs> () && tclass != "windows")
{
// Here we may have a bunch of symlinks that we need to install.
//
const scope& rs (t.root_scope ());
- auto& lp (t.data<link::libs_paths> ());
+ auto& lp (t.data<link_rule::libs_paths> ());
auto ln = [&rs, &id] (const path& f, const path& l)
{
install_l (rs, id, f.leaf (), l.leaf (), false);
+ return true;
};
const path& lk (lp.link);
const path& so (lp.soname);
const path& in (lp.interm);
- const path* f (&lp.real);
+ const path* f (lp.real);
- if (!in.empty ()) {ln (*f, in); f = &in;}
- if (!so.empty ()) {ln (*f, so); f = &so;}
- if (!lk.empty ()) {ln (*f, lk);}
+ if (!in.empty ()) {r = ln (*f, in) || r; f = &in;}
+ if (!so.empty ()) {r = ln (*f, so) || r; f = &so;}
+ if (!lk.empty ()) {r = ln (*f, lk) || r; }
}
+
+ return r;
}
- bool file_install::
+ bool install_rule::
uninstall_extra (const file& t, const install_dir& id) const
{
bool r (false);
@@ -165,7 +188,7 @@ namespace build2
// Here we may have a bunch of symlinks that we need to uninstall.
//
const scope& rs (t.root_scope ());
- auto& lp (t.data<link::libs_paths> ());
+ auto& lp (t.data<link_rule::libs_paths> ());
auto rm = [&rs, &id] (const path& l)
{
@@ -184,15 +207,16 @@ namespace build2
return r;
}
- // alias_install
+ // libux_install_rule
//
- alias_install::
- alias_install (data&& d, const link& l): common (move (d)), link_ (l) {}
+ libux_install_rule::
+ libux_install_rule (data&& d, const link_rule& l)
+ : common (move (d)), link_ (l) {}
- const target* alias_install::
+ const target* libux_install_rule::
filter (action a, const target& t, prerequisite_member p) const
{
- // The "see through" semantics that should be parallel to file_install
+ // The "see through" semantics that should be parallel to install_rule
// above. In particular, here we use libue/libua/libus{} as proxies for
// exe/liba/libs{} there.
@@ -233,14 +257,13 @@ namespace build2
return alias_rule::filter (a, t, p);
}
- match_result alias_install::
+ bool libux_install_rule::
match (action a, target& t, const string& hint) const
{
// We only want to handle installation if we are also the ones building
// this target. So first run link's match().
//
- match_result r (link_.match (a, t, hint));
- return r ? alias_rule::match (a, t, "") : r;
+ return link_.match (a, t, hint) && alias_rule::match (a, t, "");
}
}
}
diff --git a/build2/cc/install-rule.hxx b/build2/cc/install-rule.hxx
new file mode 100644
index 0000000..ac2f93a
--- /dev/null
+++ b/build2/cc/install-rule.hxx
@@ -0,0 +1,77 @@
+// file : build2/cc/install-rule.hxx -*- C++ -*-
+// copyright : Copyright (c) 2014-2017 Code Synthesis Ltd
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_CC_INSTALL_RULE_HXX
+#define BUILD2_CC_INSTALL_RULE_HXX
+
+#include <build2/types.hxx>
+#include <build2/utility.hxx>
+
+#include <build2/install/rule.hxx>
+
+#include <build2/cc/types.hxx>
+#include <build2/cc/common.hxx>
+
+namespace build2
+{
+ namespace cc
+ {
+ class link_rule;
+
+ // Installation rule for exe{} and lib*{}. Here we do:
+ //
+ // 1. Signal to the link rule that this is update for install.
+ //
+ // 2. Additional filtering of prerequisites (e.g., headers of an exe{}).
+ //
+ // 3. Extra un/installation (e.g., libs{} symlinks).
+ //
+ class install_rule: public install::file_rule, virtual common
+ {
+ public:
+ install_rule (data&&, const link_rule&);
+
+ virtual const target*
+ filter (action, const target&, prerequisite_member) const override;
+
+ virtual bool
+ match (action, target&, const string&) const override;
+
+ virtual recipe
+ apply (action, target&) const override;
+
+ virtual bool
+ install_extra (const file&, const install_dir&) const override;
+
+ virtual bool
+ uninstall_extra (const file&, const install_dir&) const override;
+
+ private:
+ const link_rule& link_;
+ };
+
+ // Installation rule for libu*{}.
+ //
+ // While libu*{} themselves are not installable, we need to see through
+ // them in case they depend on stuff that we need to install (e.g.,
+ // headers). Note that we use the alias_rule as a base.
+ //
+ class libux_install_rule: public install::alias_rule, virtual common
+ {
+ public:
+ libux_install_rule (data&&, const link_rule&);
+
+ virtual const target*
+ filter (action, const target&, prerequisite_member) const override;
+
+ virtual bool
+ match (action, target&, const string&) const override;
+
+ private:
+ const link_rule& link_;
+ };
+ }
+}
+
+#endif // BUILD2_CC_INSTALL_RULE_HXX
diff --git a/build2/cc/install.hxx b/build2/cc/install.hxx
deleted file mode 100644
index 28a0a94..0000000
--- a/build2/cc/install.hxx
+++ /dev/null
@@ -1,67 +0,0 @@
-// file : build2/cc/install.hxx -*- C++ -*-
-// copyright : Copyright (c) 2014-2017 Code Synthesis Ltd
-// license : MIT; see accompanying LICENSE file
-
-#ifndef BUILD2_CC_INSTALL_HXX
-#define BUILD2_CC_INSTALL_HXX
-
-#include <build2/types.hxx>
-#include <build2/utility.hxx>
-
-#include <build2/install/rule.hxx>
-
-#include <build2/cc/types.hxx>
-#include <build2/cc/common.hxx>
-
-namespace build2
-{
- namespace cc
- {
- class link;
-
- // Installation rule for exe{}, lib*{}, etc.
- //
- class file_install: public install::file_rule, virtual common
- {
- public:
- file_install (data&&, const link&);
-
- virtual const target*
- filter (action, const target&, prerequisite_member) const override;
-
- virtual match_result
- match (action, target&, const string&) const override;
-
- virtual recipe
- apply (action, target&) const override;
-
- virtual void
- install_extra (const file&, const install_dir&) const override;
-
- virtual bool
- uninstall_extra (const file&, const install_dir&) const override;
-
- private:
- const link& link_;
- };
-
- // Installation rule for libux{}.
- //
- class alias_install: public install::alias_rule, virtual common
- {
- public:
- alias_install (data&&, const link&);
-
- virtual const target*
- filter (action, const target&, prerequisite_member) const override;
-
- virtual match_result
- match (action, target&, const string&) const override;
-
- private:
- const link& link_;
- };
- }
-}
-
-#endif // BUILD2_CC_INSTALL_HXX
diff --git a/build2/cc/link.cxx b/build2/cc/link-rule.cxx
index f69d549..d06a835 100644
--- a/build2/cc/link.cxx
+++ b/build2/cc/link-rule.cxx
@@ -1,8 +1,8 @@
-// file : build2/cc/link.cxx -*- C++ -*-
+// file : build2/cc/link-rule.cxx -*- C++ -*-
// copyright : Copyright (c) 2014-2017 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
-#include <build2/cc/link.hxx>
+#include <build2/cc/link-rule.hxx>
#include <map>
#include <cstdlib> // exit()
@@ -32,29 +32,21 @@ namespace build2
{
using namespace bin;
- link::
- link (data&& d)
+ link_rule::
+ link_rule (data&& d)
: common (move (d)),
rule_id (string (x) += ".link 1")
{
+ static_assert (sizeof (match_data) <= target::data_size,
+ "insufficient space");
}
- match_result link::
- match (action act, target& t, const string& hint) const
+ bool link_rule::
+ match (action a, target& t, const string& hint) const
{
- tracer trace (x, "link::match");
+ tracer trace (x, "link_rule::match");
- // @@ TODO:
- //
- // - if path already assigned, verify extension?
- //
- // @@ Q:
- //
- // - if there is no .o, are we going to check if the one derived
- // from target exist or can be built? A: No.
- // What if there is a library. Probably ok if static, not if shared,
- // (i.e., a utility library).
- //
+ // NOTE: may be called multiple times (see install rules).
ltype lt (link_type (t));
otype ot (lt.type);
@@ -77,7 +69,7 @@ namespace build2
//
bool seen_x (false), seen_c (false), seen_obj (false), seen_lib (false);
- for (prerequisite_member p: group_prerequisite_members (act, t))
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
if (p.is_a (x_src) || (x_mod != nullptr && p.is_a (*x_mod)))
{
@@ -141,7 +133,7 @@ namespace build2
return true;
}
- auto link::
+ auto link_rule::
derive_libs_paths (file& ls, const char* pfx, const char* sfx) const
-> libs_paths
{
@@ -286,19 +278,21 @@ namespace build2
const path& re (ls.derive_path (move (b)));
- return libs_paths {move (lk), move (so), move (in), re, move (cp)};
+ return libs_paths {move (lk), move (so), move (in), &re, move (cp)};
}
- recipe link::
- apply (action act, target& xt) const
+ recipe link_rule::
+ apply (action a, target& xt) const
{
- static_assert (sizeof (link::libs_paths) <= target::data_size,
- "insufficient space");
-
- tracer trace (x, "link::apply");
+ tracer trace (x, "link_rule::apply");
file& t (xt.as<file> ());
+ // Note that for_install is signalled by install_rule and therefore
+ // can only be relied upon during execute.
+ //
+ match_data& md (t.data (match_data ()));
+
const scope& bs (t.base_scope ());
const scope& rs (*bs.root_scope ());
@@ -417,9 +411,9 @@ namespace build2
// the DLL and we add libi{} import library as its member.
//
if (tclass == "windows")
- libi = add_adhoc_member<bin::libi> (act, t);
+ libi = add_adhoc_member<bin::libi> (a, t);
- t.data (derive_libs_paths (t, p, s)); // Cache in target.
+ md.libs_data = derive_libs_paths (t, p, s);
if (libi)
match_recipe (libi, group_recipe); // Set recipe and unlock.
@@ -439,7 +433,7 @@ namespace build2
// Note: add after the import library if any.
//
target_lock pdb (
- add_adhoc_member (act, t, *bs.find_target_type ("pdb")));
+ add_adhoc_member (a, t, *bs.find_target_type ("pdb")));
// We call it foo.{exe,dll}.pdb rather than just foo.pdb because
// we can have both foo.exe and foo.dll in the same directory.
@@ -453,16 +447,16 @@ namespace build2
//
// Note that we do it here regardless of whether we are installing
// or not for two reasons. Firstly, it is not easy to detect this
- // situation in apply() since the action may (and is) overridden to
- // unconditional install. Secondly, always having the member takes
- // care of cleanup automagically. The actual generation happens in
- // the install rule.
+ // situation in apply() since the for_install hasn't yet been
+ // communicated by install_rule. Secondly, always having the member
+ // takes care of cleanup automagically. The actual generation
+ // happens in perform_update() below.
//
if (ot != otype::e)
{
target_lock pc (
add_adhoc_member (
- act, t,
+ a, t,
ot == otype::a ? pca::static_type : pcs::static_type));
// Note that here we always use the lib name prefix, even on
@@ -482,7 +476,7 @@ namespace build2
// Inject dependency on the output directory.
//
- inject_fsdir (act, t);
+ inject_fsdir (a, t);
// Process prerequisites, pass 1: search and match prerequisite
// libraries, search obj/bmi{} targets, and search targets we do rule
@@ -507,23 +501,24 @@ namespace build2
optional<dir_paths> usr_lib_dirs; // Extract lazily.
compile_target_types tt (compile_types (ot));
- auto skip = [&act, &rs] (const target*& pt)
+ auto skip = [&a, &rs] (const target*& pt)
{
- if (act.operation () == clean_id && !pt->dir.sub (rs.out_path ()))
+ if (a.operation () == clean_id && !pt->dir.sub (rs.out_path ()))
pt = nullptr;
return pt == nullptr;
};
- size_t start (t.prerequisite_targets.size ());
+ auto& pts (t.prerequisite_targets[a]);
+ size_t start (pts.size ());
- for (prerequisite_member p: group_prerequisite_members (act, t))
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
// We pre-allocate a NULL slot for each (potential; see clean)
// prerequisite target.
//
- t.prerequisite_targets.push_back (nullptr);
- const target*& pt (t.prerequisite_targets.back ());
+ pts.push_back (nullptr);
+ const target*& pt (pts.back ());
uint8_t m (0); // Mark: lib (0), src (1), mod (2), obj/bmi (3).
@@ -603,7 +598,7 @@ namespace build2
//
if (p.proj ())
pt = search_library (
- act, sys_lib_dirs, usr_lib_dirs, p.prerequisite);
+ a, sys_lib_dirs, usr_lib_dirs, p.prerequisite);
// The rest is the same basic logic as in search_and_match().
//
@@ -617,7 +612,7 @@ namespace build2
// member.
//
if (const libx* l = pt->is_a<libx> ())
- pt = &link_member (*l, act, li);
+ pt = &link_member (*l, a, li);
}
else
{
@@ -639,7 +634,7 @@ namespace build2
// Match lib{} (the only unmarked) in parallel and wait for completion.
//
- match_members (act, t, t.prerequisite_targets, start);
+ match_members (a, t, pts, start);
// Process prerequisites, pass 2: finish rule chaining but don't start
// matching anything yet since that may trigger recursive matching of
@@ -648,11 +643,11 @@ namespace build2
// Parallel prerequisite_targets loop.
//
- size_t i (start), n (t.prerequisite_targets.size ());
- for (prerequisite_member p: group_prerequisite_members (act, t))
+ size_t i (start), n (pts.size ());
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
- const target*& pt (t.prerequisite_targets[i].target);
- uintptr_t& pd (t.prerequisite_targets[i++].data);
+ const target*& pt (pts[i].target);
+ uintptr_t& pd (pts[i++].data);
if (pt == nullptr)
continue;
@@ -710,9 +705,9 @@ namespace build2
// Note: have similar logic in make_module_sidebuild().
//
size_t j (start);
- for (prerequisite_member p: group_prerequisite_members (act, t))
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
- const target* pt (t.prerequisite_targets[j++]);
+ const target* pt (pts[j++]);
if (p.is_a<libx> () ||
p.is_a<liba> () || p.is_a<libs> () || p.is_a<libux> () ||
@@ -760,7 +755,7 @@ namespace build2
: (group ? obj::static_type : tt.obj));
bool src (false);
- for (prerequisite_member p1: group_prerequisite_members (act, *pt))
+ for (prerequisite_member p1: group_prerequisite_members (a, *pt))
{
// Most of the time we will have just a single source so fast-
// path that case.
@@ -843,18 +838,18 @@ namespace build2
// Wait with unlocked phase to allow phase switching.
//
- wait_guard wg (target::count_busy (), t.task_count, true);
+ wait_guard wg (target::count_busy (), t[a].task_count, true);
for (i = start; i != n; ++i)
{
- const target*& pt (t.prerequisite_targets[i]);
+ const target*& pt (pts[i]);
if (pt == nullptr)
continue;
if (uint8_t m = unmark (pt))
{
- match_async (act, *pt, target::count_busy (), t.task_count);
+ match_async (a, *pt, target::count_busy (), t[a].task_count);
mark (pt, m);
}
}
@@ -865,9 +860,9 @@ namespace build2
// that we may have bailed out early (thus the parallel i/n for-loop).
//
i = start;
- for (prerequisite_member p: group_prerequisite_members (act, t))
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
- const target*& pt (t.prerequisite_targets[i++]);
+ const target*& pt (pts[i++]);
// Skipped or not marked for completion.
//
@@ -875,7 +870,7 @@ namespace build2
if (pt == nullptr || (m = unmark (pt)) == 0)
continue;
- build2::match (act, *pt);
+ build2::match (a, *pt);
// Nothing else to do if not marked for verification.
//
@@ -887,7 +882,7 @@ namespace build2
//
bool mod (x_mod != nullptr && p.is_a (*x_mod));
- for (prerequisite_member p1: group_prerequisite_members (act, *pt))
+ for (prerequisite_member p1: group_prerequisite_members (a, *pt))
{
if (p1.is_a (mod ? *x_mod : x_src) || p1.is_a<c> ())
{
@@ -915,7 +910,7 @@ namespace build2
}
}
- switch (act)
+ switch (a)
{
case perform_update_id: return [this] (action a, const target& t)
{
@@ -929,10 +924,10 @@ namespace build2
}
}
- void link::
+ void link_rule::
append_libraries (strings& args,
const file& l, bool la, lflags lf,
- const scope& bs, action act, linfo li) const
+ const scope& bs, action a, linfo li) const
{
// Note: lack of the "small function object" optimization will really
// kill us here since we are called in a loop.
@@ -996,14 +991,14 @@ namespace build2
};
process_libraries (
- act, bs, li, sys_lib_dirs, l, la, lf, imp, lib, opt, true);
+ a, bs, li, sys_lib_dirs, l, la, lf, imp, lib, opt, true);
}
- void link::
+ void link_rule::
hash_libraries (sha256& cs,
bool& update, timestamp mt,
const file& l, bool la, lflags lf,
- const scope& bs, action act, linfo li) const
+ const scope& bs, action a, linfo li) const
{
auto imp = [] (const file&, bool la) {return la;};
@@ -1053,14 +1048,14 @@ namespace build2
};
process_libraries (
- act, bs, li, sys_lib_dirs, l, la, lf, imp, lib, opt, true);
+ a, bs, li, sys_lib_dirs, l, la, lf, imp, lib, opt, true);
}
- void link::
+ void link_rule::
rpath_libraries (strings& args,
const target& t,
const scope& bs,
- action act,
+ action a,
linfo li,
bool for_install) const
{
@@ -1158,16 +1153,16 @@ namespace build2
const function<bool (const file&, bool)> impf (imp);
const function<void (const file*, const string&, lflags, bool)> libf (lib);
- for (auto pt: t.prerequisite_targets)
+ for (const prerequisite_target& pt: t.prerequisite_targets[a])
{
- bool a;
+ bool la;
const file* f;
- if ((a = (f = pt->is_a<liba> ())) ||
- (a = (f = pt->is_a<libux> ())) ||
- ( f = pt->is_a<libs> ()))
+ if ((la = (f = pt->is_a<liba> ())) ||
+ (la = (f = pt->is_a<libux> ())) ||
+ ( f = pt->is_a<libs> ()))
{
- if (!for_install && !a)
+ if (!for_install && !la)
{
// Top-level sharen library dependency. It is either matched or
// imported so should be a cc library.
@@ -1177,8 +1172,8 @@ namespace build2
"-Wl,-rpath," + f->path ().directory ().string ());
}
- process_libraries (act, bs, li, sys_lib_dirs,
- *f, a, pt.data,
+ process_libraries (a, bs, li, sys_lib_dirs,
+ *f, la, pt.data,
impf, libf, nullptr);
}
}
@@ -1194,17 +1189,24 @@ namespace build2
const char*
msvc_machine (const string& cpu); // msvc.cxx
- target_state link::
- perform_update (action act, const target& xt) const
+ target_state link_rule::
+ perform_update (action a, const target& xt) const
{
- tracer trace (x, "link::perform_update");
-
- auto oop (act.outer_operation ());
- bool for_install (oop == install_id || oop == uninstall_id);
+ tracer trace (x, "link_rule::perform_update");
const file& t (xt.as<file> ());
const path& tp (t.path ());
+ match_data& md (t.data<match_data> ());
+
+ // Unless the outer install rule signalled that this is update for
+ // install, signal back that we've performed plain update.
+ //
+ if (!md.for_install)
+ md.for_install = false;
+
+ bool for_install (*md.for_install);
+
const scope& bs (t.base_scope ());
const scope& rs (*bs.root_scope ());
@@ -1217,7 +1219,7 @@ namespace build2
//
bool update (false);
timestamp mt (t.load_mtime ());
- target_state ts (straight_execute_prerequisites (act, t));
+ target_state ts (straight_execute_prerequisites (a, t));
// If targeting Windows, take care of the manifest.
//
@@ -1231,7 +1233,7 @@ namespace build2
// it if we are updating for install.
//
if (!for_install)
- rpath_timestamp = windows_rpath_timestamp (t, bs, act, li);
+ rpath_timestamp = windows_rpath_timestamp (t, bs, a, li);
pair<path, bool> p (
windows_manifest (t,
@@ -1450,7 +1452,7 @@ namespace build2
//
if (lt.shared_library ())
{
- const libs_paths& paths (t.data<libs_paths> ());
+ const libs_paths& paths (md.libs_data);
const string& leaf (paths.effect_soname ().leaf ().string ());
if (tclass == "macos")
@@ -1486,7 +1488,7 @@ namespace build2
// rpath of the imported libraries (i.e., we assume they are also
// installed). But we add -rpath-link for some platforms.
//
- rpath_libraries (sargs, t, bs, act, li, for_install);
+ rpath_libraries (sargs, t, bs, a, li, for_install);
if (auto l = t["bin.rpath"])
for (const dir_path& p: cast<dir_paths> (l))
@@ -1519,7 +1521,7 @@ namespace build2
{
sha256 cs;
- for (auto p: t.prerequisite_targets)
+ for (const prerequisite_target& p: t.prerequisite_targets[a])
{
const target* pt (p.target);
@@ -1532,15 +1534,15 @@ namespace build2
}
const file* f;
- bool a (false), s (false);
+ bool la (false), ls (false);
if ((f = pt->is_a<obje> ()) ||
(f = pt->is_a<obja> ()) ||
(f = pt->is_a<objs> ()) ||
(!lt.static_library () && // @@ UTL: TODO libua to liba link.
- ((a = (f = pt->is_a<liba> ())) ||
- (a = (f = pt->is_a<libux> ())) ||
- (s = (f = pt->is_a<libs> ())))))
+ ((la = (f = pt->is_a<liba> ())) ||
+ (la = (f = pt->is_a<libux> ())) ||
+ (ls = (f = pt->is_a<libs> ())))))
{
// Link all the dependent interface libraries (shared) or interface
// and implementation (static), recursively.
@@ -1551,9 +1553,9 @@ namespace build2
// reason to re-archive the utility but those who link the utility
// have to "see through" the changes in the shared library.
//
- if (a || s)
+ if (la || ls)
{
- hash_libraries (cs, update, mt, *f, a, p.data, bs, act, li);
+ hash_libraries (cs, update, mt, *f, la, p.data, bs, a, li);
f = nullptr; // Timestamp checked by hash_libraries().
}
else
@@ -1603,22 +1605,16 @@ namespace build2
//
// Also, if you are wondering why don't we just always produce this .pc,
// install or no install, the reason is unless and until we are updating
- // for install, we have no idea where to things will be installed.
+ // for install, we have no idea where-to things will be installed.
//
if (for_install)
{
- bool a;
+ bool la;
const file* f;
- if ((a = (f = t.is_a<liba> ())) ||
- ( f = t.is_a<libs> ()))
- {
- // @@ Hack: this should really be in install:update_extra() where we
- // (should) what we are installing and what not.
- //
- if (rs["install.root"])
- pkgconfig_save (act, *f, a);
- }
+ if ((la = (f = t.is_a<liba> ())) ||
+ ( f = t.is_a<libs> ()))
+ pkgconfig_save (a, *f, la);
}
// If nothing changed, then we are done.
@@ -1810,7 +1806,7 @@ namespace build2
// The same logic as during hashing above.
//
- for (auto p: t.prerequisite_targets)
+ for (const prerequisite_target& p: t.prerequisite_targets[a])
{
const target* pt (p.target);
@@ -1821,21 +1817,21 @@ namespace build2
}
const file* f;
- bool a (false), s (false);
+ bool la (false), ls (false);
if ((f = pt->is_a<obje> ()) ||
(f = pt->is_a<obja> ()) ||
(f = pt->is_a<objs> ()) ||
(!lt.static_library () && // @@ UTL: TODO libua to liba link.
- ((a = (f = pt->is_a<liba> ())) ||
- (a = (f = pt->is_a<libux> ())) ||
- (s = (f = pt->is_a<libs> ())))))
+ ((la = (f = pt->is_a<liba> ())) ||
+ (la = (f = pt->is_a<libux> ())) ||
+ (ls = (f = pt->is_a<libs> ())))))
{
// Link all the dependent interface libraries (shared) or interface
// and implementation (static), recursively.
//
- if (a || s)
- append_libraries (sargs, *f, a, p.data, bs, act, li);
+ if (la || ls)
+ append_libraries (sargs, *f, la, p.data, bs, a, li);
else
sargs.push_back (relative (f->path ()).string ()); // string()&&
}
@@ -1864,7 +1860,7 @@ namespace build2
//
if (lt.shared_library ())
{
- const libs_paths& paths (t.data<libs_paths> ());
+ const libs_paths& paths (md.libs_data);
const path& p (paths.clean);
if (!p.empty ())
@@ -1886,7 +1882,7 @@ namespace build2
return s.empty () || m.string ().compare (0, s.size (), s) != 0;
};
- if (test (paths.real) &&
+ if (test (*paths.real) &&
test (paths.interm) &&
test (paths.soname) &&
test (paths.link))
@@ -2004,7 +2000,7 @@ namespace build2
// install).
//
if (lt.executable () && !for_install)
- windows_rpath_assembly (t, bs, act, li,
+ windows_rpath_assembly (t, bs, a, li,
cast<string> (rs[x_target_cpu]),
rpath_timestamp,
scratch);
@@ -2031,13 +2027,13 @@ namespace build2
}
};
- const libs_paths& paths (t.data<libs_paths> ());
+ const libs_paths& paths (md.libs_data);
const path& lk (paths.link);
const path& so (paths.soname);
const path& in (paths.interm);
- const path* f (&paths.real);
+ const path* f (paths.real);
if (!in.empty ()) {ln (f->leaf (), in); f = &in;}
if (!so.empty ()) {ln (f->leaf (), so); f = &so;}
@@ -2054,8 +2050,8 @@ namespace build2
return target_state::changed;
}
- target_state link::
- perform_clean (action act, const target& xt) const
+ target_state link_rule::
+ perform_clean (action a, const target& xt) const
{
const file& t (xt.as<file> ());
ltype lt (link_type (t));
@@ -2066,13 +2062,13 @@ namespace build2
{
if (tsys == "mingw32")
return clean_extra (
- act, t, {".d", ".dlls/", ".manifest.o", ".manifest"});
+ a, t, {".d", ".dlls/", ".manifest.o", ".manifest"});
else
// Assuming it's VC or alike. Clean up .ilk in case the user
// enabled incremental linking (note that .ilk replaces .exe).
//
return clean_extra (
- act, t, {".d", ".dlls/", ".manifest", "-.ilk"});
+ a, t, {".d", ".dlls/", ".manifest", "-.ilk"});
}
}
else if (lt.shared_library ())
@@ -2085,16 +2081,16 @@ namespace build2
// versioning their bases may not be the same.
//
if (tsys != "mingw32")
- return clean_extra (act, t, {{".d", "-.ilk"}, {"-.exp"}});
+ return clean_extra (a, t, {{".d", "-.ilk"}, {"-.exp"}});
}
else
{
// Here we can have a bunch of symlinks that we need to remove. If
// the paths are empty, then they will be ignored.
//
- const libs_paths& paths (t.data<libs_paths> ());
+ const libs_paths& paths (t.data<match_data> ().libs_data);
- return clean_extra (act, t, {".d",
+ return clean_extra (a, t, {".d",
paths.link.string ().c_str (),
paths.soname.string ().c_str (),
paths.interm.string ().c_str ()});
@@ -2102,7 +2098,7 @@ namespace build2
}
// For static library it's just the defaults.
- return clean_extra (act, t, {".d"});
+ return clean_extra (a, t, {".d"});
}
}
}
diff --git a/build2/cc/link.hxx b/build2/cc/link-rule.hxx
index c26102d..ba40410 100644
--- a/build2/cc/link.hxx
+++ b/build2/cc/link-rule.hxx
@@ -1,9 +1,9 @@
-// file : build2/cc/link.hxx -*- C++ -*-
+// file : build2/cc/link-rule.hxx -*- C++ -*-
// copyright : Copyright (c) 2014-2017 Code Synthesis Ltd
// license : MIT; see accompanying LICENSE file
-#ifndef BUILD2_CC_LINK_HXX
-#define BUILD2_CC_LINK_HXX
+#ifndef BUILD2_CC_LINK_RULE_HXX
+#define BUILD2_CC_LINK_RULE_HXX
#include <set>
@@ -19,12 +19,12 @@ namespace build2
{
namespace cc
{
- class link: public rule, virtual common
+ class link_rule: public rule, virtual common
{
public:
- link (data&&);
+ link_rule (data&&);
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override;
virtual recipe
@@ -37,8 +37,8 @@ namespace build2
perform_clean (action, const target&) const;
private:
- friend class file_install;
- friend class alias_install;
+ friend class install_rule;
+ friend class libux_install_rule;
// Shared library paths.
//
@@ -51,27 +51,56 @@ namespace build2
// The libs{} path is always the real path. On Windows the link path
// is the import library.
//
- const path link; // What we link: libfoo.so
- const path soname; // SONAME: libfoo-1.so, libfoo.so.1
- const path interm; // Intermediate: libfoo.so.1.2
- const path& real; // Real: libfoo.so.1.2.3
+ path link; // What we link: libfoo.so
+ path soname; // SONAME: libfoo-1.so, libfoo.so.1
+ path interm; // Intermediate: libfoo.so.1.2
+ const path* real; // Real: libfoo.so.1.2.3
inline const path&
effect_link () const {return link.empty () ? effect_soname () : link;}
inline const path&
- effect_soname () const {return soname.empty () ? real : soname;}
+ effect_soname () const {return soname.empty () ? *real : soname;}
// Cleanup pattern used to remove previous versions. If empty, no
// cleanup is performed. The above (current) names are automatically
// filtered out.
//
- const path clean;
+ path clean;
};
libs_paths
derive_libs_paths (file&, const char*, const char*) const;
+ struct match_data
+ {
+ // The "for install" condition is signalled to us by install_rule when
+ // it is matched for the update operation. It also verifies that if we
+ // have already been executed, then it was for install.
+ //
+ // This has an interesting implication: it means that this rule cannot
+ // be used to update targets during match. Specifically, we cannot be
+ // executed for group resolution purposes (not a problem) nor as part
+ // of the generated source update. The latter case can be a problem:
+ // imagine a code generator that itself may need to be updated before
+ // it can be used to re-generate some out-of-date source code. As an
+ // aside, note that even if we were somehow able to communicate the
+ // "for install" in this case, the result of such an update may not
+ // actually be "usable" (e.g., not runnable because of the missing
+ // rpaths). There is another prominent case where the result may not
+ // be usable: cross-compilation.
+ //
+ // So the current (admittedly fuzzy) thinking is that a project shall
+ // not try to use its own build for update since it may not be usable
+ // (because of cross-compilations, being "for install", etc). Instead,
+ // it should rely on another, "usable" build of itself (this, BTW, is
+ // related to bpkg's build-time vs run-time dependencies).
+ //
+ optional<bool> for_install;
+
+ libs_paths libs_data;
+ };
+
// Library handling.
//
void
@@ -134,4 +163,4 @@ namespace build2
}
}
-#endif // BUILD2_CC_LINK_HXX
+#endif // BUILD2_CC_LINK_RULE_HXX
diff --git a/build2/cc/module.cxx b/build2/cc/module.cxx
index c56bca9..ae64220 100644
--- a/build2/cc/module.cxx
+++ b/build2/cc/module.cxx
@@ -499,8 +499,8 @@ namespace build2
// We register for configure so that we detect unresolved imports
// during configuration rather that later, e.g., during update.
//
- const compile& cr (*this);
- const link& lr (*this);
+ const compile_rule& cr (*this);
+ const link_rule& lr (*this);
r.insert<obje> (perform_update_id, x_compile, cr);
r.insert<obje> (perform_clean_id, x_compile, cr);
@@ -559,26 +559,27 @@ namespace build2
//
if (install_loaded)
{
- const file_install& fr (*this);
- const alias_install& ar (*this);
+ const install_rule& ir (*this);
- r.insert<exe> (perform_install_id, x_install, fr);
- r.insert<exe> (perform_uninstall_id, x_uninstall, fr);
+ r.insert<exe> (perform_install_id, x_install, ir);
+ r.insert<exe> (perform_uninstall_id, x_uninstall, ir);
- r.insert<liba> (perform_install_id, x_install, fr);
- r.insert<liba> (perform_uninstall_id, x_uninstall, fr);
+ r.insert<liba> (perform_install_id, x_install, ir);
+ r.insert<liba> (perform_uninstall_id, x_uninstall, ir);
- r.insert<libs> (perform_install_id, x_install, fr);
- r.insert<libs> (perform_uninstall_id, x_uninstall, fr);
+ r.insert<libs> (perform_install_id, x_install, ir);
+ r.insert<libs> (perform_uninstall_id, x_uninstall, ir);
- r.insert<libue> (perform_install_id, x_install, ar);
- r.insert<libue> (perform_uninstall_id, x_uninstall, ar);
+ const libux_install_rule& lr (*this);
- r.insert<libua> (perform_install_id, x_install, ar);
- r.insert<libua> (perform_uninstall_id, x_uninstall, ar);
+ r.insert<libue> (perform_install_id, x_install, lr);
+ r.insert<libue> (perform_uninstall_id, x_uninstall, lr);
- r.insert<libus> (perform_install_id, x_install, ar);
- r.insert<libus> (perform_uninstall_id, x_uninstall, ar);
+ r.insert<libua> (perform_install_id, x_install, lr);
+ r.insert<libua> (perform_uninstall_id, x_uninstall, lr);
+
+ r.insert<libus> (perform_install_id, x_install, lr);
+ r.insert<libus> (perform_uninstall_id, x_uninstall, lr);
}
}
}
diff --git a/build2/cc/module.hxx b/build2/cc/module.hxx
index de61611..58aa184 100644
--- a/build2/cc/module.hxx
+++ b/build2/cc/module.hxx
@@ -13,9 +13,9 @@
#include <build2/cc/common.hxx>
-#include <build2/cc/compile.hxx>
-#include <build2/cc/link.hxx>
-#include <build2/cc/install.hxx>
+#include <build2/cc/compile-rule.hxx>
+#include <build2/cc/link-rule.hxx>
+#include <build2/cc/install-rule.hxx>
namespace build2
{
@@ -76,19 +76,19 @@ namespace build2
};
class module: public module_base, public virtual common,
- link,
- compile,
- file_install,
- alias_install
+ link_rule,
+ compile_rule,
+ install_rule,
+ libux_install_rule
{
public:
explicit
module (data&& d)
: common (move (d)),
- link (move (d)),
- compile (move (d)),
- file_install (move (d), *this),
- alias_install (move (d), *this) {}
+ link_rule (move (d)),
+ compile_rule (move (d)),
+ install_rule (move (d), *this),
+ libux_install_rule (move (d), *this) {}
void
init (scope&, const location&, const variable_map&);
diff --git a/build2/cc/pkgconfig.cxx b/build2/cc/pkgconfig.cxx
index 0ffd135..697a60e 100644
--- a/build2/cc/pkgconfig.cxx
+++ b/build2/cc/pkgconfig.cxx
@@ -26,8 +26,8 @@
#include <build2/cc/utility.hxx>
#include <build2/cc/common.hxx>
-#include <build2/cc/compile.hxx>
-#include <build2/cc/link.hxx>
+#include <build2/cc/compile-rule.hxx>
+#include <build2/cc/link-rule.hxx>
using namespace std;
using namespace butl;
@@ -451,7 +451,7 @@ namespace build2
//
#ifndef BUILD2_BOOTSTRAP
bool common::
- pkgconfig_load (action act,
+ pkgconfig_load (action a,
const scope& s,
lib& lt,
liba* at,
@@ -592,12 +592,13 @@ namespace build2
// Extract --cflags and set them as lib?{}:export.poptions. Note that we
// still pass --static in case this is pkgconf which has Cflags.private.
//
- auto parse_cflags = [&trace, this] (target& t, const pkgconf& pc, bool a)
+ auto parse_cflags =
+ [&trace, this] (target& t, const pkgconf& pc, bool la)
{
strings pops;
bool arg (false);
- for (auto& o: pc.cflags (a))
+ for (auto& o: pc.cflags (la))
{
if (arg)
{
@@ -646,8 +647,8 @@ namespace build2
// Parse --libs into loptions/libs (interface and implementation). If
// ps is not NULL, add each resolves library target as a prerequisite.
//
- auto parse_libs = [act, &s, top_sysd, this]
- (target& t, const pkgconf& pc, bool a, prerequisites* ps)
+ auto parse_libs = [a, &s, top_sysd, this]
+ (target& t, const pkgconf& pc, bool la, prerequisites* ps)
{
strings lops;
vector<name> libs;
@@ -664,7 +665,7 @@ namespace build2
// library names (without -l) after seeing an unknown option.
//
bool arg (false), first (true), known (true), have_L;
- for (auto& o: pc.libs (a))
+ for (auto& o: pc.libs (la))
{
if (arg)
{
@@ -726,10 +727,10 @@ namespace build2
// Space-separated list of escaped library flags.
//
- auto lflags = [&pc, a] () -> string
+ auto lflags = [&pc, la] () -> string
{
string r;
- for (const auto& o: pc.libs (a))
+ for (const auto& o: pc.libs (la))
{
if (!r.empty ())
r += ' ';
@@ -831,7 +832,7 @@ namespace build2
prerequisite_key pk {
nullopt, {&lib::static_type, &out, &out, &name, nullopt}, &s};
- if (const target* lt = search_library (act, top_sysd, usrd, pk))
+ if (const target* lt = search_library (a, top_sysd, usrd, pk))
{
// We used to pick a member but that doesn't seem right since the
// same target could be used with different link orders.
@@ -1112,8 +1113,8 @@ namespace build2
#endif
- void link::
- pkgconfig_save (action act, const file& l, bool la) const
+ void link_rule::
+ pkgconfig_save (action a, const file& l, bool la) const
{
tracer trace (x, "pkgconfig_save");
@@ -1258,7 +1259,7 @@ namespace build2
os << " -L" << escape (ld.string ());
// Now process ourselves as if we were being linked to something (so
- // pretty similar to link::append_libraries()).
+ // pretty similar to link_rule::append_libraries()).
//
bool priv (false);
auto imp = [&priv] (const file&, bool la) {return priv && la;};
@@ -1307,7 +1308,7 @@ namespace build2
//
linfo li {otype::e, la ? lorder::a_s : lorder::s_a};
- process_libraries (act, bs, li, sys_lib_dirs,
+ process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0, // Link flags.
imp, lib, opt, true);
os << endl;
@@ -1317,7 +1318,7 @@ namespace build2
os << "Libs.private:";
priv = true;
- process_libraries (act, bs, li, sys_lib_dirs,
+ process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0, // Link flags.
imp, lib, opt, false);
os << endl;
@@ -1339,7 +1340,7 @@ namespace build2
};
vector<module> modules;
- for (const target* pt: l.prerequisite_targets)
+ for (const target* pt: l.prerequisite_targets[a])
{
// @@ UTL: we need to (recursively) see through libux{} (and
// also in search_modules()).
@@ -1354,7 +1355,7 @@ namespace build2
// the first mxx{} target that we see.
//
const target* mt (nullptr);
- for (const target* t: pt->prerequisite_targets)
+ for (const target* t: pt->prerequisite_targets[a])
{
if ((mt = t->is_a (*x_mod)))
break;
diff --git a/build2/cc/windows-manifest.cxx b/build2/cc/windows-manifest.cxx
index 4393fbf..ae33f66 100644
--- a/build2/cc/windows-manifest.cxx
+++ b/build2/cc/windows-manifest.cxx
@@ -9,7 +9,7 @@
#include <build2/filesystem.hxx>
#include <build2/diagnostics.hxx>
-#include <build2/cc/link.hxx>
+#include <build2/cc/link-rule.hxx>
using namespace std;
using namespace butl;
@@ -39,10 +39,10 @@ namespace build2
// file corresponding to the exe{} target. Return the manifest file path
// as well as whether it was changed.
//
- pair<path, bool> link::
+ pair<path, bool> link_rule::
windows_manifest (const file& t, bool rpath_assembly) const
{
- tracer trace (x, "link::windows_manifest");
+ tracer trace (x, "link_rule::windows_manifest");
const scope& rs (t.root_scope ());
diff --git a/build2/cc/windows-rpath.cxx b/build2/cc/windows-rpath.cxx
index b28ce42..8854542 100644
--- a/build2/cc/windows-rpath.cxx
+++ b/build2/cc/windows-rpath.cxx
@@ -13,7 +13,7 @@
#include <build2/bin/target.hxx>
-#include <build2/cc/link.hxx>
+#include <build2/cc/link-rule.hxx>
using namespace std;
using namespace butl;
@@ -46,10 +46,10 @@ namespace build2
// Return the greatest (newest) timestamp of all the DLLs that we will be
// adding to the assembly or timestamp_nonexistent if there aren't any.
//
- timestamp link::
+ timestamp link_rule::
windows_rpath_timestamp (const file& t,
const scope& bs,
- action act,
+ action a,
linfo li) const
{
timestamp r (timestamp_nonexistent);
@@ -103,19 +103,19 @@ namespace build2
r = t;
};
- for (auto pt: t.prerequisite_targets)
+ for (const prerequisite_target& pt: t.prerequisite_targets[a])
{
if (pt == nullptr)
continue;
- bool a;
+ bool la;
const file* f;
- if ((a = (f = pt->is_a<liba> ())) ||
- (a = (f = pt->is_a<libux> ())) || // See through.
+ if ((la = (f = pt->is_a<liba> ())) ||
+ (la = (f = pt->is_a<libux> ())) || // See through.
( f = pt->is_a<libs> ()))
- process_libraries (act, bs, li, sys_lib_dirs,
- *f, a, pt.data,
+ process_libraries (a, bs, li, sys_lib_dirs,
+ *f, la, pt.data,
imp, lib, nullptr, true);
}
@@ -125,10 +125,10 @@ namespace build2
// Like *_timestamp() but actually collect the DLLs (and weed out the
// duplicates).
//
- auto link::
+ auto link_rule::
windows_rpath_dlls (const file& t,
const scope& bs,
- action act,
+ action a,
linfo li) const -> windows_dlls
{
windows_dlls r;
@@ -193,19 +193,19 @@ namespace build2
}
};
- for (auto pt: t.prerequisite_targets)
+ for (const prerequisite_target& pt: t.prerequisite_targets[a])
{
if (pt == nullptr)
continue;
- bool a;
+ bool la;
const file* f;
- if ((a = (f = pt->is_a<liba> ())) ||
- (a = (f = pt->is_a<libux> ())) || // See through.
- ( f = pt->is_a<libs> ()))
- process_libraries (act, bs, li, sys_lib_dirs,
- *f, a, pt.data,
+ if ((la = (f = pt->is_a<liba> ())) ||
+ (la = (f = pt->is_a<libux> ())) || // See through.
+ ( f = pt->is_a<libs> ()))
+ process_libraries (a, bs, li, sys_lib_dirs,
+ *f, la, pt.data,
imp, lib, nullptr, true);
}
@@ -223,10 +223,10 @@ namespace build2
// unnecessary work by comparing the DLLs timestamp against the assembly
// manifest file.
//
- void link::
+ void link_rule::
windows_rpath_assembly (const file& t,
const scope& bs,
- action act,
+ action a,
linfo li,
const string& tcpu,
timestamp ts,
@@ -264,7 +264,7 @@ namespace build2
windows_dlls dlls;
if (!empty)
- dlls = windows_rpath_dlls (t, bs, act, li);
+ dlls = windows_rpath_dlls (t, bs, a, li);
// Clean the assembly directory and make sure it exists. Maybe it would
// have been faster to overwrite the existing manifest rather than
diff --git a/build2/cli/init.cxx b/build2/cli/init.cxx
index a4403a9..df123ba 100644
--- a/build2/cli/init.cxx
+++ b/build2/cli/init.cxx
@@ -23,7 +23,7 @@ namespace build2
{
namespace cli
{
- static const compile compile_;
+ static const compile_rule compile_rule_;
bool
config_init (scope& rs,
@@ -306,10 +306,10 @@ namespace build2
auto reg = [&r] (meta_operation_id mid, operation_id oid)
{
- r.insert<cli_cxx> (mid, oid, "cli.compile", compile_);
- r.insert<cxx::hxx> (mid, oid, "cli.compile", compile_);
- r.insert<cxx::cxx> (mid, oid, "cli.compile", compile_);
- r.insert<cxx::ixx> (mid, oid, "cli.compile", compile_);
+ r.insert<cli_cxx> (mid, oid, "cli.compile", compile_rule_);
+ r.insert<cxx::hxx> (mid, oid, "cli.compile", compile_rule_);
+ r.insert<cxx::cxx> (mid, oid, "cli.compile", compile_rule_);
+ r.insert<cxx::ixx> (mid, oid, "cli.compile", compile_rule_);
};
reg (perform_id, update_id);
diff --git a/build2/cli/rule.cxx b/build2/cli/rule.cxx
index d05b190..42f2176 100644
--- a/build2/cli/rule.cxx
+++ b/build2/cli/rule.cxx
@@ -43,10 +43,10 @@ namespace build2
return false;
}
- match_result compile::
+ bool compile_rule::
match (action a, target& xt, const string&) const
{
- tracer trace ("cli::compile::match");
+ tracer trace ("cli::compile_rule::match");
if (cli_cxx* pt = xt.is_a<cli_cxx> ())
{
@@ -149,7 +149,7 @@ namespace build2
}
}
- recipe compile::
+ recipe compile_rule::
apply (action a, target& xt) const
{
if (cli_cxx* pt = xt.is_a<cli_cxx> ())
@@ -208,7 +208,7 @@ namespace build2
}
}
- target_state compile::
+ target_state compile_rule::
perform_update (action a, const target& xt)
{
const cli_cxx& t (xt.as<cli_cxx> ());
diff --git a/build2/cli/rule.hxx b/build2/cli/rule.hxx
index 9af1da4..ba6337a 100644
--- a/build2/cli/rule.hxx
+++ b/build2/cli/rule.hxx
@@ -16,12 +16,12 @@ namespace build2
{
// @@ Redo as two separate rules?
//
- class compile: public rule
+ class compile_rule: public rule
{
public:
- compile () {}
+ compile_rule () {}
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override;
virtual recipe
diff --git a/build2/cli/target.cxx b/build2/cli/target.cxx
index c35ee18..be3098c 100644
--- a/build2/cli/target.cxx
+++ b/build2/cli/target.cxx
@@ -33,7 +33,7 @@ namespace build2
// cli.cxx
//
group_view cli_cxx::
- group_members (action_type) const
+ group_members (action) const
{
static_assert (sizeof (cli_cxx_members) == sizeof (const target*) * 3,
"member layout incompatible with array");
diff --git a/build2/cli/target.hxx b/build2/cli/target.hxx
index 1247172..d595856 100644
--- a/build2/cli/target.hxx
+++ b/build2/cli/target.hxx
@@ -41,7 +41,7 @@ namespace build2
using mtime_target::mtime_target;
virtual group_view
- group_members (action_type) const override;
+ group_members (action) const override;
public:
static const target_type static_type;
diff --git a/build2/context.hxx b/build2/context.hxx
index d2daad6..71035ad 100644
--- a/build2/context.hxx
+++ b/build2/context.hxx
@@ -33,7 +33,7 @@ namespace build2
// Match can be interrupted with "exclusive load" in order to load
// additional buildfiles. Similarly, it can be interrupted with (parallel)
// execute in order to build targetd required to complete the match (for
- // example, generated source code or source code generators themselves.
+ // example, generated source code or source code generators themselves).
//
// Such interruptions are performed by phase change that is protected by
// phase_mutex (which is also used to synchronize the state changes between
diff --git a/build2/dist/rule.cxx b/build2/dist/rule.cxx
index b288a66..a131000 100644
--- a/build2/dist/rule.cxx
+++ b/build2/dist/rule.cxx
@@ -15,7 +15,7 @@ namespace build2
{
namespace dist
{
- match_result rule::
+ bool rule::
match (action, target&, const string&) const
{
return true; // We always match.
diff --git a/build2/dist/rule.hxx b/build2/dist/rule.hxx
index 0524029..ffeed9e 100644
--- a/build2/dist/rule.hxx
+++ b/build2/dist/rule.hxx
@@ -19,7 +19,6 @@ namespace build2
// This is the default rule that simply matches all the prerequisites.
//
// A custom rule (usually the same as perform_update) may be necessary to
- // enter ad hoc prerequisites (like generated test input/output) or
// establishing group links (so that we see the dist variable set on a
// group).
//
@@ -28,7 +27,7 @@ namespace build2
public:
rule () {}
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override;
virtual recipe
diff --git a/build2/dump.cxx b/build2/dump.cxx
index f88dcfd..263f1b5 100644
--- a/build2/dump.cxx
+++ b/build2/dump.cxx
@@ -243,12 +243,14 @@ namespace build2
// Note: running serial and task_count is 0 before any operation has
// started.
//
- if (size_t c = t.task_count.load (memory_order_relaxed))
+ action inner; // @@ Only for the inner part of the action currently.
+
+ if (size_t c = t[inner].task_count.load (memory_order_relaxed))
{
if (c == target::count_applied () || c == target::count_executed ())
{
bool f (false);
- for (const target* pt: t.prerequisite_targets)
+ for (const target* pt: t.prerequisite_targets[inner])
{
if (pt == nullptr) // Skipped.
continue;
diff --git a/build2/install/init.cxx b/build2/install/init.cxx
index 0eb9521..a11f3e5 100644
--- a/build2/install/init.cxx
+++ b/build2/install/init.cxx
@@ -217,7 +217,7 @@ namespace build2
bs.rules.insert<alias> (perform_uninstall_id, "uninstall.alias", ar);
bs.rules.insert<file> (perform_install_id, "install.file", fr);
- bs.rules.insert<file> (perform_uninstall_id, "uinstall.file", fr);
+ bs.rules.insert<file> (perform_uninstall_id, "uninstall.file", fr);
}
// Configuration.
diff --git a/build2/install/rule.cxx b/build2/install/rule.cxx
index 79287f8..4d4cb51 100644
--- a/build2/install/rule.cxx
+++ b/build2/install/rule.cxx
@@ -40,9 +40,15 @@ namespace build2
//
const alias_rule alias_rule::instance;
- match_result alias_rule::
+ bool alias_rule::
match (action, target&, const string&) const
{
+ // We always match.
+ //
+ // Note that we are called both as the outer part during the "update for
+ // un/install" pre-operation and as the inner part during the
+ // un/install operation itself.
+ //
return true;
}
@@ -57,78 +63,134 @@ namespace build2
{
tracer trace ("install::alias_rule::apply");
+ // Pass-through to our installable prerequisites.
+ //
+ // @@ Shouldn't we do match in parallel (here and below)?
+ //
+ auto& pts (t.prerequisite_targets[a]);
for (prerequisite_member p: group_prerequisite_members (a, t))
{
+ // Ignore unresolved targets that are imported from other projects.
+ // We are definitely not installing those.
+ //
+ if (p.proj ())
+ continue;
+
// Let a customized rule have its say.
//
const target* pt (filter (a, t, p));
if (pt == nullptr)
+ {
+ l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
continue;
+ }
- // Check if this prerequisite is explicitly "not installable",
- // that is, there is the 'install' variable and its value is
- // false.
+ // Check if this prerequisite is explicitly "not installable", that
+ // is, there is the 'install' variable and its value is false.
//
- // At first, this might seem redundand since we could have let
- // the file_rule below take care of it. The nuance is this: this
- // prerequsite can be in a different subproject that hasn't loaded
- // the install module (and therefore has no file_rule registered).
- // The typical example would be the 'tests' subproject.
+ // At first, this might seem redundand since we could have let the
+ // file_rule below take care of it. The nuance is this: this
+ // prerequsite can be in a different subproject that hasn't loaded the
+ // install module (and therefore has no file_rule registered). The
+ // typical example would be the 'tests' subproject.
//
// Note: not the same as lookup() above.
//
auto l ((*pt)["install"]);
if (l && cast<path> (l).string () == "false")
{
- l5 ([&]{trace << "ignoring " << *pt;});
+ l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
continue;
}
build2::match (a, *pt);
- t.prerequisite_targets.push_back (pt);
+ pts.push_back (pt);
}
return default_recipe;
}
- // file_rule
+ // group_rule
//
- const file_rule file_rule::instance;
+ const group_rule group_rule::instance;
- struct match_data
+ const target* group_rule::
+ filter (action, const target&, const target& m) const
{
- bool install;
- };
-
- static_assert (sizeof (match_data) <= target::data_size,
- "insufficient space");
+ return &m;
+ }
- match_result file_rule::
- match (action a, target& t, const string&) const
+ recipe group_rule::
+ apply (action a, target& t) const
{
- // First determine if this target should be installed (called
- // "installable" for short).
+ tracer trace ("install::group_rule::apply");
+
+ // Resolve group members.
+ //
+ // Remember that we are called twice: first during update for install
+ // (pre-operation) and then during install. During the former, we rely
+ // on the normall update rule to resolve the group members. During the
+ // latter, there will be no rule to do this but the group will already
+ // have been resolved by the pre-operation.
//
- match_data md {lookup_install<path> (t, "install") != nullptr};
- match_result mr (true);
+ // If the rule could not resolve the group, then we ignore it.
+ //
+ group_view gv (a.outer ()
+ ? resolve_group_members (a, t)
+ : t.group_members (a));
- if (a.operation () == update_id)
+ if (gv.members != nullptr)
{
- // If this is the update pre-operation and the target is installable,
- // change the recipe action to (update, 0) (i.e., "unconditional
- // update") so that we don't get matched for its prerequisites.
- //
- if (md.install)
- mr.recipe_action = action (a.meta_operation (), update_id);
- else
- // Otherwise, signal that we don't match so that some other rule can
- // take care of it.
+ auto& pts (t.prerequisite_targets[a]);
+ for (size_t i (0); i != gv.count; ++i)
+ {
+ const target* m (gv.members[i]);
+
+ if (m == nullptr)
+ continue;
+
+ // Let a customized rule have its say.
//
- return false;
+ const target* mt (filter (a, t, *m));
+ if (mt == nullptr)
+ {
+ l5 ([&]{trace << "ignoring " << *m << " (filtered out)";});
+ continue;
+ }
+
+ // See if we were explicitly instructed not to touch this target.
+ //
+ // Note: not the same as lookup() above.
+ //
+ auto l ((*mt)["install"]);
+ if (l && cast<path> (l).string () == "false")
+ {
+ l5 ([&]{trace << "ignoring " << *mt << " (not installable)";});
+ continue;
+ }
+
+ build2::match (a, *mt);
+ pts.push_back (mt);
+ }
}
- t.data (md); // Save the data in the target's auxilary storage.
- return mr;
+ // Delegate to the base rule.
+ //
+ return alias_rule::apply (a, t);
+ }
+
+
+ // file_rule
+ //
+ const file_rule file_rule::instance;
+
+ bool file_rule::
+ match (action, target&, const string&) const
+ {
+ // We always match, even if this target is not installable (so that we
+ // can ignore it; see apply()).
+ //
+ return true;
}
const target* file_rule::
@@ -141,28 +203,27 @@ namespace build2
recipe file_rule::
apply (action a, target& t) const
{
- match_data md (move (t.data<match_data> ()));
- t.clear_data (); // In case delegated-to rule (or the rule that overrides
- // us; see cc/install) also uses aux storage.
+ tracer trace ("install::file_rule::apply");
- if (!md.install) // Not installable.
- return noop_recipe;
-
- // Ok, if we are here, then this means:
+ // Note that we are called both as the outer part during the "update for
+ // un/install" pre-operation and as the inner part during the
+ // un/install operation itself.
//
- // 1. This target is installable.
- // 2. The action is either
- // a. (perform, [un]install, 0) or
- // b. (*, update, [un]install)
+ // In both cases we first determine if the target is installable and
+ // return noop if it's not. Otherwise, in the first case (update for
+ // un/install) we delegate to the normal update and in the second
+ // (un/install) -- perform the test.
//
+ if (lookup_install<path> (t, "install") == nullptr)
+ return noop_recipe;
+
// In both cases, the next step is to search, match, and collect all the
// installable prerequisites.
//
- // @@ Perhaps if [noinstall] will be handled by the
- // group_prerequisite_members machinery, then we can just
- // run standard search_and_match()? Will need an indicator
- // that it was forced (e.g., [install]) for filter() below.
+ // @@ Unconditional group? How does it work for cli? Change to maybe
+ // same like test? If so, also in alias_rule.
//
+ auto& pts (t.prerequisite_targets[a]);
for (prerequisite_member p: group_prerequisite_members (a, t))
{
// Ignore unresolved targets that are imported from other projects.
@@ -175,84 +236,70 @@ namespace build2
//
const target* pt (filter (a, t, p));
if (pt == nullptr)
+ {
+ l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
continue;
+ }
// See if we were explicitly instructed not to touch this target.
//
+ // Note: not the same as lookup() above.
+ //
auto l ((*pt)["install"]);
if (l && cast<path> (l).string () == "false")
+ {
+ l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
continue;
+ }
- // If the matched rule returned noop_recipe, then the target
- // state will be set to unchanged as an optimization. Use this
- // knowledge to optimize things on our side as well since this
- // will help a lot in case of any static installable content
- // (headers, documentation, etc).
+ // If the matched rule returned noop_recipe, then the target state is
+ // set to unchanged as an optimization. Use this knowledge to optimize
+ // things on our side as well since this will help a lot when updating
+ // static installable content (headers, documentation, etc).
//
if (!build2::match (a, *pt, unmatch::unchanged))
- t.prerequisite_targets.push_back (pt);
+ pts.push_back (pt);
}
- // This is where we diverge depending on the operation. In the
- // update pre-operation, we need to make sure that this target
- // as well as all its installable prerequisites are up to date.
- //
if (a.operation () == update_id)
{
- // Save the prerequisite targets that we found since the
- // call to match_delegate() below will wipe them out.
- //
- prerequisite_targets p;
-
- if (!t.prerequisite_targets.empty ())
- p.swap (t.prerequisite_targets);
-
- // Find the "real" update rule, that is, the rule that would
- // have been found if we signalled that we do not match from
- // match() above.
- //
- recipe d (match_delegate (a, t, *this));
-
- // If we have no installable prerequisites, then simply redirect
- // to it.
+ // For the update pre-operation match the inner rule (actual update).
//
- if (p.empty ())
- return d;
-
- // Ok, the worst case scenario: we need to cause update of
- // prerequisite targets and also delegate to the real update.
- //
- return [pt = move (p), dr = move (d)] (
- action a, const target& t) mutable -> target_state
+ if (match_inner (a, t, unmatch::unchanged))
{
- // Do the target update first.
- //
- target_state r (execute_delegate (dr, a, t));
-
- // Swap our prerequisite targets back in and execute.
- //
- t.prerequisite_targets.swap (pt);
- r |= straight_execute_prerequisites (a, t);
- pt.swap (t.prerequisite_targets); // In case we get re-executed.
+ return pts.empty () ? noop_recipe : default_recipe;
+ }
- return r;
- };
+ return &perform_update;
}
- else if (a.operation () == install_id)
- return [this] (action a, const target& t)
- {
- return perform_install (a, t);
- };
else
+ {
return [this] (action a, const target& t)
{
- return perform_uninstall (a, t);
+ return a.operation () == install_id
+ ? perform_install (a, t)
+ : perform_uninstall (a, t);
};
+ }
}
- void file_rule::
+ target_state file_rule::
+ perform_update (action a, const target& t)
+ {
+ // First execute the inner recipe then prerequisites.
+ //
+ target_state ts (execute_inner (a, t));
+
+ if (t.prerequisite_targets[a].size () != 0)
+ ts |= straight_execute_prerequisites (a, t);
+
+ return ts;
+ }
+
+ bool file_rule::
install_extra (const file&, const install_dir&) const
{
+ return false;
}
bool file_rule::
diff --git a/build2/install/rule.hxx b/build2/install/rule.hxx
index 642ab96..ffab206 100644
--- a/build2/install/rule.hxx
+++ b/build2/install/rule.hxx
@@ -19,21 +19,39 @@ namespace build2
class alias_rule: public rule
{
public:
- static const alias_rule instance;
-
- alias_rule () {}
-
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override;
- virtual recipe
- apply (action, target&) const override;
-
// Return NULL if this prerequisite should be ignored and pointer to its
// target otherwise. The default implementation accepts all prerequsites.
//
virtual const target*
filter (action, const target&, prerequisite_member) const;
+
+ virtual recipe
+ apply (action, target&) const override;
+
+ alias_rule () {}
+ static const alias_rule instance;
+ };
+
+ // In addition to the alias rule's semantics, this rule sees through to
+ // the group's members.
+ //
+ class group_rule: public alias_rule
+ {
+ public:
+ // Return NULL if this group member should be ignored and pointer to its
+ // target otherwise. The default implementation accepts all members.
+ //
+ virtual const target*
+ filter (action, const target&, const target& group_member) const;
+
+ virtual recipe
+ apply (action, target&) const override;
+
+ group_rule () {}
+ static const group_rule instance;
};
struct install_dir;
@@ -41,41 +59,38 @@ namespace build2
class file_rule: public rule
{
public:
- static const file_rule instance;
-
- file_rule () {}
-
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override;
- virtual recipe
- apply (action, target&) const override;
-
// Return NULL if this prerequisite should be ignored and pointer to its
- // target otherwise. The default implementation ignores prerequsites that
- // are outside of this target's project.
+ // target otherwise. The default implementation ignores prerequsites
+ // that are outside of this target's project.
//
virtual const target*
filter (action, const target&, prerequisite_member) const;
- // Extra installation hooks.
+ virtual recipe
+ apply (action, target&) const override;
+
+ static target_state
+ perform_update (action, const target&);
+
+ // Extra un/installation hooks. Return true if anything was
+ // un/installed.
//
- using install_dir = install::install_dir;
+ using install_dir = install::install_dir; // For derived rules.
- virtual void
+ virtual bool
install_extra (const file&, const install_dir&) const;
- // Return true if anything was uninstalled.
- //
virtual bool
uninstall_extra (const file&, const install_dir&) const;
- // Installation "commands".
+ // Installation/uninstallation "commands".
//
// If verbose is false, then only print the command at verbosity level 2
// or higher.
- //
- public:
+
// Install a symlink: base/link -> target.
//
static void
@@ -96,16 +111,18 @@ namespace build2
static bool
uninstall_f (const scope& rs,
const install_dir& base,
- const file* t,
+ const file* target,
const path& name,
bool verbose);
- protected:
target_state
perform_install (action, const target&) const;
target_state
perform_uninstall (action, const target&) const;
+
+ static const file_rule instance;
+ file_rule () {}
};
}
}
diff --git a/build2/operation.cxx b/build2/operation.cxx
index 698edbb..a43e20a 100644
--- a/build2/operation.cxx
+++ b/build2/operation.cxx
@@ -191,20 +191,9 @@ namespace build2
action_target& at (ts[j]);
const target& t (at.as_target ());
- // Finish matching targets that we have started. Note that we use the
- // state for the "final" action that will be executed and not our
- // action. Failed that, we may fail to find a match for a "stronger"
- // action but will still get unchanged for the original one.
- //
- target_state s;
- if (j < i)
- {
- match (a, t, false);
- s = t.serial_state (false);
- }
- else
- s = target_state::postponed;
-
+ target_state s (j < i
+ ? match (a, t, false)
+ : target_state::postponed);
switch (s)
{
case target_state::postponed:
@@ -369,7 +358,7 @@ namespace build2
{
const target& t (at.as_target ());
- switch ((at.state = t.executed_state (false)))
+ switch ((at.state = t.executed_state (a, false)))
{
case target_state::unknown:
{
diff --git a/build2/operation.hxx b/build2/operation.hxx
index a65fc3d..8c9818e 100644
--- a/build2/operation.hxx
+++ b/build2/operation.hxx
@@ -52,9 +52,6 @@ namespace build2
{
action (): inner_id (0), outer_id (0) {} // Invalid action.
- bool
- valid () const {return inner_id != 0;}
-
// If this is not a nested operation, then outer should be 0.
//
action (meta_operation_id m, operation_id inner, operation_id outer = 0)
@@ -70,6 +67,9 @@ namespace build2
operation_id
outer_operation () const {return outer_id & 0xF;}
+ bool inner () const {return outer_id == 0;}
+ bool outer () const {return outer_id != 0;}
+
// Implicit conversion operator to action_id for the switch() statement,
// etc. Most places only care about the inner operation.
//
@@ -88,24 +88,25 @@ namespace build2
inline bool
operator!= (action x, action y) {return !(x == y);}
- // This is an "overrides" comparison, i.e., it returns true if the recipe
- // for x overrides recipe for y. The idea is that for the same inner
- // operation, action with an outer operation is "weaker" than the one
- // without.
- //
- inline bool
- operator> (action x, action y)
- {
- return x.inner_id != y.inner_id ||
- (x.outer_id != y.outer_id && y.outer_id != 0);
- }
-
- inline bool
- operator>= (action x, action y) {return x == y || x > y;}
+ bool operator> (action, action) = delete;
+ bool operator< (action, action) = delete;
+ bool operator>= (action, action) = delete;
+ bool operator<= (action, action) = delete;
ostream&
operator<< (ostream&, action);
+ // Inner/outer operation state container.
+ //
+ template <typename T>
+ struct action_state
+ {
+ T states[2]; // [0] -- inner, [1] -- outer.
+
+ T& operator[] (action a) {return states[a.inner () ? 0 : 1];}
+ const T& operator[] (action a) const {return states[a.inner () ? 0 : 1];}
+ };
+
// Id constants for build-in and pre-defined meta/operations.
//
const meta_operation_id noop_id = 1; // nomop?
diff --git a/build2/parser.cxx b/build2/parser.cxx
index b21d51c..5687b0f 100644
--- a/build2/parser.cxx
+++ b/build2/parser.cxx
@@ -747,14 +747,15 @@ namespace build2
//
small_vector<reference_wrapper<target>, 1> tgs;
- for (auto& tn: ns)
+ for (auto i (ns.begin ()), e (ns.end ()); i != e; ++i)
{
- if (tn.qualified ())
- fail (nloc) << "project name in target " << tn;
+ name& n (*i);
- // @@ OUT TODO
- //
- enter_target tg (*this, move (tn), name (), false, nloc, trace);
+ if (n.qualified ())
+ fail (nloc) << "project name in target " << n;
+
+ name o (n.pair ? move (*++i) : name ());
+ enter_target tg (*this, move (n), move (o), false, nloc, trace);
if (default_target_ == nullptr)
default_target_ = target_;
diff --git a/build2/rule-map.hxx b/build2/rule-map.hxx
index f63d4b9..e754283 100644
--- a/build2/rule-map.hxx
+++ b/build2/rule-map.hxx
@@ -79,6 +79,8 @@ namespace build2
insert<T> (a >> 4, a & 0x0F, hint, r);
}
+ // 0 oid is a wildcard.
+ //
template <typename T>
void
insert (meta_operation_id mid,
diff --git a/build2/rule.cxx b/build2/rule.cxx
index 2ed0bbd..ef1d3a4 100644
--- a/build2/rule.cxx
+++ b/build2/rule.cxx
@@ -25,7 +25,7 @@ namespace build2
// that normal implementations should follow. So you probably shouldn't
// use it as a guide to implement your own, normal, rules.
//
- match_result file_rule::
+ bool file_rule::
match (action a, target& t, const string&) const
{
tracer trace ("file_rule::match");
@@ -131,7 +131,7 @@ namespace build2
// alias_rule
//
- match_result alias_rule::
+ bool alias_rule::
match (action, target&, const string&) const
{
return true;
@@ -153,7 +153,7 @@ namespace build2
// fsdir_rule
//
- match_result fsdir_rule::
+ bool fsdir_rule::
match (action, target&, const string&) const
{
return true;
@@ -220,7 +220,7 @@ namespace build2
// First update prerequisites (e.g. create parent directories) then create
// this directory.
//
- if (!t.prerequisite_targets.empty ())
+ if (!t.prerequisite_targets[a].empty ())
ts = straight_execute_prerequisites (a, t);
// The same code as in perform_update_direct() below.
@@ -243,9 +243,9 @@ namespace build2
{
// First create the parent directory. If present, it is always first.
//
- const target* p (t.prerequisite_targets.empty ()
+ const target* p (t.prerequisite_targets[a].empty ()
? nullptr
- : t.prerequisite_targets[0]);
+ : t.prerequisite_targets[a][0]);
if (p != nullptr && p->is_a<fsdir> ())
perform_update_direct (a, *p);
@@ -272,7 +272,7 @@ namespace build2
? target_state::changed
: target_state::unchanged);
- if (!t.prerequisite_targets.empty ())
+ if (!t.prerequisite_targets[a].empty ())
ts |= reverse_execute_prerequisites (a, t);
return ts;
diff --git a/build2/rule.hxx b/build2/rule.hxx
index f6e23f6..8e43ca6 100644
--- a/build2/rule.hxx
+++ b/build2/rule.hxx
@@ -13,48 +13,16 @@
namespace build2
{
- class match_result
- {
- public:
- bool result;
-
- // If set, then this is a recipe's action. It must override the original
- // action. Normally it is "unconditional inner operation". Only
- // noop_recipe can be overridden.
- //
- // It is passed to rule::apply() so that prerequisites are matched for
- // this action. It is also passed to target::recipe() so that if someone
- // is matching this target for this action, we won't end-up re-matching
- // it. However, the recipe itself is executed with the original action
- // so that it can adjust its logic, if necessary.
- //
- action recipe_action = action ();
-
- explicit
- operator bool () const {return result;}
-
- // Note that the from-bool constructor is intentionally implicit so that
- // we can return true/false from match().
- //
- match_result (bool r): result (r) {}
- match_result (bool r, action a): result (r), recipe_action (a) {}
- };
-
// Once a rule is registered (for a scope), it is treated as immutable. If
// you need to modify some state (e.g., counters or some such), then make
// sure it is MT-safe.
//
- // Note that match() may not be followed by apply() or be called several
- // times before the following apply() (see resolve_group_members()) which
- // means that it should be idempotent. The target_data object in the call
- // to match() may not be the same as target.
- //
- // match() can also be called by another rules (see cc/install).
+ // Note: match() is only called once but may not be followed by apply().
//
class rule
{
public:
- virtual match_result
+ virtual bool
match (action, target&, const string& hint) const = 0;
virtual recipe
@@ -68,7 +36,7 @@ namespace build2
public:
file_rule () {}
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override;
virtual recipe
@@ -82,7 +50,7 @@ namespace build2
public:
alias_rule () {}
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override;
virtual recipe
@@ -96,7 +64,7 @@ namespace build2
public:
fsdir_rule () {}
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override;
virtual recipe
@@ -124,7 +92,7 @@ namespace build2
public:
fallback_rule () {}
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override
{
return true;
diff --git a/build2/target.cxx b/build2/target.cxx
index a5316ce..201bd15 100644
--- a/build2/target.cxx
+++ b/build2/target.cxx
@@ -92,7 +92,7 @@ namespace build2
}
group_view target::
- group_members (action_type) const
+ group_members (action) const
{
assert (false); // Not a group or doesn't expose its members.
return group_view {nullptr, 0};
@@ -117,83 +117,6 @@ namespace build2
return *r;
}
- pair<bool, target_state> target::
- state (action_type a) const
- {
- assert (phase == run_phase::match);
-
- // The tricky aspect here is that we my be re-matching the target for
- // another (overriding action). Since it is only valid to call this
- // function after the target has been matched (for this action), we assume
- // that if the target is busy, then it is being overriden (note that it
- // cannot be being executed since we are in the match phase).
- //
- // But that's not the end of it: in case of a re-match the task count
- // might have been reset to, say, applied. The only way to know for sure
- // that there isn't another match "underneath" is to compare actions. But
- // that can only be done safely if we lock the target. At least we will be
- // quick (and we don't need to wait since if it's busy, we know it is a
- // re-match). This logic is similar to lock_impl().
- //
- size_t b (target::count_base ());
- size_t e (task_count.load (memory_order_acquire));
-
- size_t exec (b + target::offset_executed);
- size_t lock (b + target::offset_locked);
- size_t busy (b + target::offset_busy);
-
- for (;;)
- {
- for (; e == lock; e = task_count.load (memory_order_acquire))
- this_thread::yield ();
-
- if (e >= busy) // Override in progress.
- return make_pair (true, target_state::unchanged);
-
- // Unlike lock_impl(), we are only called after being matched for this
- // action so if we see executed, then it means executed for this action
- // (or noop).
- //
- if (e == exec)
- return make_pair (true, group_state () ? group->state_ : state_);
-
- // Try to grab the spin-lock.
- //
- if (task_count.compare_exchange_strong (
- e,
- lock,
- memory_order_acq_rel, // Synchronize on success.
- memory_order_acquire)) // Synchronize on failure.
- {
- break;
- }
-
- // Task count changed, try again.
- }
-
- // We have the spin-lock. Quickly get the matched action and unlock.
- //
- action_type ma (action);
- bool mf (state_ == target_state::failed);
- task_count.store (e, memory_order_release);
-
- if (ma > a) // Overriden.
- return make_pair (true, // Override may have failed but we had the rule.
- mf ? target_state::failed: target_state::unchanged);
-
- // Otherwise we should have a matched target.
- //
- assert (ma == a);
-
- if (e == b + target::offset_tried)
- return make_pair (false, target_state::unknown);
- else
- {
- assert (e == b + target::offset_applied || e == exec);
- return make_pair (true, group_state () ? group->state_ : state_);
- }
- }
-
pair<lookup, size_t> target::
find_original (const variable& var, bool target_only) const
{
@@ -519,26 +442,19 @@ namespace build2
case run_phase::load: break;
case run_phase::match:
{
- // Similar logic to state(action) except here we don't distinguish
- // between original/overridden actions (an overridable action is by
- // definition a noop and should never need to query the mtime).
- //
- size_t c (task_count.load (memory_order_acquire)); // For group_state()
-
- // Wait out the spin lock to get the meaningful count.
+ // Similar logic to matched_state_impl().
//
- for (size_t lock (target::count_locked ());
- c == lock;
- c = task_count.load (memory_order_acquire))
- this_thread::yield ();
+ const opstate& s (state[action () /* inner */]);
+ size_t o (s.task_count.load (memory_order_relaxed) - // Synchronized.
+ target::count_base ());
- if (c != target::count_applied () && c != target::count_executed ())
+ if (o != target::offset_applied && o != target::offset_executed)
break;
}
// Fall through.
case run_phase::execute:
{
- if (group_state ())
+ if (group_state (action () /* inner */))
t = &group->as<mtime_target> ();
break;
diff --git a/build2/target.hxx b/build2/target.hxx
index e15b970..105cc4b 100644
--- a/build2/target.hxx
+++ b/build2/target.hxx
@@ -104,6 +104,10 @@ namespace build2
};
using prerequisite_targets = vector<prerequisite_target>;
+ // A rule match is an element of hint_rule_map.
+ //
+ using rule_match = pair<const string, reference_wrapper<const rule>>;
+
// Target.
//
class target
@@ -135,6 +139,15 @@ namespace build2
const dir_path&
out_dir () const {return out.empty () ? dir : out;}
+ // A target that is not (yet) entered as part of a real dependency
+ // declaration (for example, that is entered as part of a target-specific
+ // variable assignment, dependency extraction, etc) is called implied.
+ //
+ // The implied flag should only be cleared during the load phase via the
+ // MT-safe target_set::insert().
+ //
+ bool implied;
+
// Target group to which this target belongs, if any. Note that we assume
// that the group and all its members are in the same scope (for example,
// in variable lookup). We also don't support nested groups (with a small
@@ -178,7 +191,6 @@ namespace build2
//
const target* group = nullptr;
-
// What has been described above is a "normal" group. That is, there is
// a dedicated target type that explicitly serves as a group and there
// is an explicit mechanism for discovering the group's members.
@@ -240,13 +252,11 @@ namespace build2
}
public:
- using action_type = build2::action;
-
// You should not call this function directly; rather use
// resolve_group_members() from <build2/algorithm.hxx>.
//
virtual group_view
- group_members (action_type) const;
+ group_members (action) const;
// Note that the returned key "tracks" the target (except for the
// extension).
@@ -395,17 +405,7 @@ namespace build2
value&
append (const variable&);
- // A target that is not (yet) entered as part of a real dependency
- // declaration (for example, that is entered as part of a target-specific
- // variable assignment, dependency extraction, etc) is called implied.
- //
- // The implied flag should only be cleared during the load phase via the
- // MT-safe target_set::insert().
- //
- public:
- bool implied;
-
- // Target state.
+ // Target operation state.
//
public:
// Atomic task count that is used during match and execution to track the
@@ -419,10 +419,6 @@ namespace build2
// automatically resetting the target to "not yet touched" state for this
// operation.
//
- // For match we have a further complication in that we may re-match the
- // target and override with a "stronger" recipe thus re-setting the state
- // from, say, applied back to touched.
- //
// The target is said to be synchronized (in this thread) if we have
// either observed the task count to reach applied or executed or we have
// successfully changed it (via compare_exchange) to locked or busy. If
@@ -434,8 +430,7 @@ namespace build2
static const size_t offset_matched = 3; // Rule has been matched.
static const size_t offset_applied = 4; // Rule has been applied.
static const size_t offset_executed = 5; // Recipe has been executed.
- static const size_t offset_locked = 6; // Fast (spin) lock.
- static const size_t offset_busy = 7; // Slow (wait) lock.
+ static const size_t offset_busy = 6; // Match/execute in progress.
static size_t count_base () {return 5 * (current_on - 1);}
@@ -444,51 +439,70 @@ namespace build2
static size_t count_matched () {return offset_matched + count_base ();}
static size_t count_applied () {return offset_applied + count_base ();}
static size_t count_executed () {return offset_executed + count_base ();}
- static size_t count_locked () {return offset_locked + count_base ();}
static size_t count_busy () {return offset_busy + count_base ();}
- mutable atomic_count task_count {0}; // Start offset_touched - 1.
+ // Inner/outer operation state.
+ //
+ struct opstate
+ {
+ mutable atomic_count task_count {0}; // Start offset_touched - 1.
+
+ // Number of direct targets that depend on this target in the current
+ // operation. It is incremented during match and then decremented during
+ // execution, before running the recipe. As a result, the recipe can
+ // detect the last chance (i.e., last dependent) to execute the command
+ // (see also the first/last execution modes in <operation.hxx>).
+ //
+ mutable atomic_count dependents {0};
+
+ // Matched rule (pointer to hint_rule_map element). Note that in case of
+ // a direct recipe assignment we may not have a rule (NULL).
+ //
+ const rule_match* rule;
+
+ // Applied recipe.
+ //
+ build2::recipe recipe;
+
+ // Target state for this operation. Note that it is undetermined until
+ // a rule is matched and recipe applied (see set_recipe()).
+ //
+ target_state state;
+ };
+
+ action_state<opstate> state;
+
+ opstate& operator[] (action a) {return state[a];}
+ const opstate& operator[] (action a) const {return state[a];}
// This function should only be called during match if we have observed
// (synchronization-wise) that this target has been matched (i.e., the
// rule has been applied) for this action.
//
target_state
- matched_state (action a, bool fail = true) const;
+ matched_state (action, bool fail = true) const;
// See try_match().
//
pair<bool, target_state>
- try_matched_state (action a, bool fail = true) const;
+ try_matched_state (action, bool fail = true) const;
- // This function should only be called during execution if we have
- // observed (synchronization-wise) that this target has been executed.
+ // After the target has been matched and synchronized, check if the target
+ // is known to be unchanged. Used for optimizations during search & match.
//
- target_state
- executed_state (bool fail = true) const;
+ bool
+ unchanged (action a) const
+ {
+ return matched_state_impl (a).second == target_state::unchanged;
+ }
- // This function should only be called between match and execute while
- // running serially. It returns the group state for the "final" action
- // that has been matched (and that will be executed).
+ // This function should only be called during execution if we have
+ // observed (synchronization-wise) that this target has been executed.
//
target_state
- serial_state (bool fail = true) const;
-
- // Number of direct targets that depend on this target in the current
- // operation. It is incremented during match and then decremented during
- // execution, before running the recipe. As a result, the recipe can
- // detect the last chance (i.e., last dependent) to execute the command
- // (see also the first/last execution modes in <operation>).
- //
- mutable atomic_count dependents;
+ executed_state (action, bool fail = true) const;
protected:
- // Return fail-untranslated (but group-translated) state assuming the
- // target is executed and synchronized.
- //
- target_state
- state () const;
-
// Version that should be used during match after the target has been
// matched for this action (see the recipe override).
//
@@ -496,51 +510,22 @@ namespace build2
// result (see try_match()).
//
pair<bool, target_state>
- state (action a) const;
+ matched_state_impl (action) const;
+
+ // Return fail-untranslated (but group-translated) state assuming the
+ // target is executed and synchronized.
+ //
+ target_state
+ executed_state_impl (action) const;
// Return true if the state comes from the group. Target must be at least
// matched.
//
bool
- group_state () const;
-
- // Raw state, normally not accessed directly.
- //
- public:
- target_state state_ = target_state::unknown;
+ group_state (action a) const;
- // Recipe.
- //
public:
- using recipe_type = build2::recipe;
- using rule_type = build2::rule;
-
- action_type action; // Action the rule/recipe is for.
-
- // Matched rule (pointer to hint_rule_map element). Note that in case of a
- // direct recipe assignment we may not have a rule.
- //
- const pair<const string, reference_wrapper<const rule_type>>* rule;
-
- // Applied recipe.
- //
- recipe_type recipe_;
-
- // Note that the target must be locked in order to set the recipe.
- //
- void
- recipe (recipe_type);
-
- // After the target has been matched and synchronized, check if the target
- // is known to be unchanged. Used for optimizations during search & match.
- //
- bool
- unchanged (action_type a) const
- {
- return state (a).second == target_state::unchanged;
- }
-
- // Targets to which prerequisites resolve for this recipe. Note that
+ // Targets to which prerequisites resolve for this action. Note that
// unlike prerequisite::target, these can be resolved to group members.
// NULL means the target should be skipped (or the rule may simply not add
// such a target to the list).
@@ -552,7 +537,7 @@ namespace build2
//
// Note that the recipe may modify this list.
//
- mutable build2::prerequisite_targets prerequisite_targets;
+ mutable action_state<build2::prerequisite_targets> prerequisite_targets;
// Auxilary data storage.
//
@@ -573,7 +558,8 @@ namespace build2
//
// Currenly the data is not destroyed until the next match.
//
- // Note that the recipe may modify the data.
+ // Note that the recipe may modify the data. Currently reserved for the
+ // inner part of the action.
//
static constexpr size_t data_size = sizeof (string) * 16;
mutable std::aligned_storage<data_size>::type data_pad;
@@ -1330,9 +1316,13 @@ namespace build2
// which racing updates happen because we do not modify the external state
// (which is the source of timestemps) while updating the internal.
//
+ // The modification time is reserved for the inner operation thus there is
+ // no action argument.
+ //
// The rule for groups that utilize target_state::group is as follows: if
// it has any members that are mtime_targets, then the group should be
- // mtime_target and the members get the mtime from it.
+ // mtime_target and the members get the mtime from it. During match and
+ // execute the target should be synchronized.
//
// Note that this function can be called before the target is matched in
// which case the value always comes from the target itself. In other
diff --git a/build2/target.ixx b/build2/target.ixx
index 6225aef..16dbf61 100644
--- a/build2/target.ixx
+++ b/build2/target.ixx
@@ -62,26 +62,48 @@ namespace build2
}
}
+ inline pair<bool, target_state> target::
+ matched_state_impl (action a) const
+ {
+ assert (phase == run_phase::match);
+
+ const opstate& s (state[a]);
+ size_t o (s.task_count.load (memory_order_relaxed) - // Synchronized.
+ target::count_base ());
+
+ if (o == target::offset_tried)
+ return make_pair (false, target_state::unknown);
+ else
+ {
+ // Normally applied but can also be already executed.
+ //
+ assert (o == target::offset_applied || o == target::offset_executed);
+ return make_pair (true, (group_state (a) ? group->state[a] : s).state);
+ }
+ }
+
inline target_state target::
- state () const
+ executed_state_impl (action a) const
{
assert (phase == run_phase::execute);
- return group_state () ? group->state_ : state_;
+ return (group_state (a) ? group->state : state)[a].state;
}
inline bool target::
- group_state () const
+ group_state (action a) const
{
// We go an extra step and short-circuit to the target state even if the
// raw state is not group provided the recipe is group_recipe and the
// state is not failed.
+ //
+ const opstate& s (state[a]);
- if (state_ == target_state::group)
+ if (s.state == target_state::group)
return true;
- if (state_ != target_state::failed && group != nullptr)
+ if (s.state != target_state::failed && group != nullptr)
{
- if (recipe_function* const* f = recipe_.target<recipe_function*> ())
+ if (recipe_function* const* f = s.recipe.target<recipe_function*> ())
return *f == &group_action;
}
@@ -89,11 +111,11 @@ namespace build2
}
inline target_state target::
- matched_state (action_type a, bool fail) const
+ matched_state (action a, bool fail) const
{
// Note that the target could be being asynchronously re-matched.
//
- pair<bool, target_state> r (state (a));
+ pair<bool, target_state> r (matched_state_impl (a));
if (fail && (!r.first || r.second == target_state::failed))
throw failed ();
@@ -102,9 +124,9 @@ namespace build2
}
inline pair<bool, target_state> target::
- try_matched_state (action_type a, bool fail) const
+ try_matched_state (action a, bool fail) const
{
- pair<bool, target_state> r (state (a));
+ pair<bool, target_state> r (matched_state_impl (a));
if (fail && r.first && r.second == target_state::failed)
throw failed ();
@@ -113,22 +135,9 @@ namespace build2
}
inline target_state target::
- executed_state (bool fail) const
- {
- target_state r (state ());
-
- if (fail && r == target_state::failed)
- throw failed ();
-
- return r;
- }
-
- inline target_state target::
- serial_state (bool fail) const
+ executed_state (action a, bool fail) const
{
- //assert (sched.serial ());
-
- target_state r (group_state () ? group->state_ : state_);
+ target_state r (executed_state_impl (a));
if (fail && r == target_state::failed)
throw failed ();
@@ -136,44 +145,6 @@ namespace build2
return r;
}
- extern atomic_count target_count; // context.hxx
-
- inline void target::
- recipe (recipe_type r)
- {
- recipe_ = move (r);
-
- // Do not clear the failed target state in case of an override (and we
- // should never see the failed state from the previous operation since we
- // should abort the execution in this case).
- //
- if (state_ != target_state::failed)
- {
- state_ = target_state::unknown;
-
- // If this is a noop recipe, then mark the target unchanged to allow for
- // some optimizations.
- //
- recipe_function** f (recipe_.target<recipe_function*> ());
-
- if (f != nullptr && *f == &noop_action)
- state_ = target_state::unchanged;
- else
- {
- // This gets tricky when we start considering overrides (which can
- // only happen for noop recipes), direct execution, etc. So here seems
- // like the best place to do this.
- //
- // We also ignore the group recipe since it is used for ad hoc
- // groups (which are not executed). Plus, group action means real
- // recipe is in the group so this also feels right conceptually.
- //
- if (f == nullptr || *f != &group_action)
- target_count.fetch_add (1, memory_order_relaxed);
- }
- }
- }
-
// mark()/unmark()
//
@@ -323,7 +294,8 @@ namespace build2
inline timestamp mtime_target::
load_mtime (const path& p) const
{
- assert (phase == run_phase::execute && !group_state ());
+ assert (phase == run_phase::execute &&
+ !group_state (action () /* inner */));
duration::rep r (mtime_.load (memory_order_consume));
if (r == timestamp_unknown_rep)
@@ -349,7 +321,9 @@ namespace build2
// much we can do here except detect the case where the target was
// changed on this run.
//
- return mt < mp || (mt == mp && state () == target_state::changed);
+ return mt < mp || (mt == mp &&
+ executed_state_impl (action () /* inner */) ==
+ target_state::changed);
}
// path_target
diff --git a/build2/test/common.hxx b/build2/test/common.hxx
index 89b7581..81cccb7 100644
--- a/build2/test/common.hxx
+++ b/build2/test/common.hxx
@@ -17,7 +17,24 @@ namespace build2
enum class output_before {fail, warn, clean};
enum class output_after {clean, keep};
- struct common
+ struct common_data
+ {
+ const variable& config_test;
+ const variable& config_test_output;
+
+ const variable& var_test;
+ const variable& test_options;
+ const variable& test_arguments;
+
+ const variable& test_stdin;
+ const variable& test_stdout;
+ const variable& test_roundtrip;
+ const variable& test_input;
+
+ const variable& test_target;
+ };
+
+ struct common: common_data
{
// The config.test.output values.
//
@@ -45,6 +62,9 @@ namespace build2
//
bool
test (const target& test_target, const path& id_path) const;
+
+ explicit
+ common (common_data&& d): common_data (move (d)) {}
};
}
}
diff --git a/build2/test/init.cxx b/build2/test/init.cxx
index 6119ae0..556de00 100644
--- a/build2/test/init.cxx
+++ b/build2/test/init.cxx
@@ -23,7 +23,7 @@ namespace build2
namespace test
{
bool
- boot (scope& rs, const location&, unique_ptr<module_base>&)
+ boot (scope& rs, const location&, unique_ptr<module_base>& mod)
{
tracer trace ("test::boot");
@@ -38,53 +38,78 @@ namespace build2
//
auto& vp (var_pool.rw (rs));
- // Tests to execute.
- //
- // Specified as <target>@<path-id> pairs with both sides being optional.
- // The variable is untyped (we want a list of name-pairs), overridable,
- // and inheritable. The target is relative (in essence a prerequisite)
- // which is resolved from the (root) scope where the config.test value
- // is defined.
- //
- vp.insert ("config.test", true);
+ common_data d {
- // Test working directory before/after cleanup (see Testscript spec for
- // semantics).
- //
- vp.insert<name_pair> ("config.test.output", true);
+ // Tests to execute.
+ //
+ // Specified as <target>@<path-id> pairs with both sides being
+ // optional. The variable is untyped (we want a list of name-pairs),
+ // overridable, and inheritable. The target is relative (in essence a
+ // prerequisite) which is resolved from the (root) scope where the
+ // config.test value is defined.
+ //
+ vp.insert ("config.test", true),
- // Note: none are overridable.
- //
- // The test variable is a name which can be a path (with the
- // true/false special values) or a target name.
- //
- vp.insert<name> ("test", variable_visibility::target);
- vp.insert<name> ("test.input", variable_visibility::project);
- vp.insert<name> ("test.output", variable_visibility::project);
- vp.insert<name> ("test.roundtrip", variable_visibility::project);
- vp.insert<strings> ("test.options", variable_visibility::project);
- vp.insert<strings> ("test.arguments", variable_visibility::project);
+ // Test working directory before/after cleanup (see Testscript spec
+ // for semantics).
+ //
+ vp.insert<name_pair> ("config.test.output", true),
+
+ // The test variable is a name which can be a path (with the
+ // true/false special values) or a target name.
+ //
+ // Note: none are overridable.
+ //
+ vp.insert<name> ("test", variable_visibility::target),
+ vp.insert<strings> ("test.options", variable_visibility::project),
+ vp.insert<strings> ("test.arguments", variable_visibility::project),
+
+ // Prerequisite-specific.
+ //
+ // test.stdin and test.stdout can be used to mark a prerequisite as a
+ // file to redirect stdin from and to compare stdout to, respectively.
+ // test.roundtrip is a shortcut to mark a prerequisite as both stdin
+ // and stdout.
+ //
+ // Prerequisites marked with test.input are treated as additional test
+ // inputs: they are made sure to be up to date and their paths are
+ // passed as additional command line arguments (after test.options and
+ // test.arguments). Their primary use is to pass inputs that may have
+ // varying file names/paths, for example:
+ //
+ // exe{parent}: exe{child}: test.input = true
+ //
+ // Note that currently this mechanism is only available to simple
+ // tests though we could also support it for testscript (e.g., by
+ // appending the input paths to test.arguments or by passing them in a
+ // separate test.inputs variable).
+ //
+ vp.insert<bool> ("test.stdin", variable_visibility::target),
+ vp.insert<bool> ("test.stdout", variable_visibility::target),
+ vp.insert<bool> ("test.roundtrip", variable_visibility::target),
+ vp.insert<bool> ("test.input", variable_visibility::target),
+
+ // Test target platform.
+ //
+ vp.insert<target_triplet> ("test.target", variable_visibility::project)
+ };
// These are only used in testscript.
//
vp.insert<strings> ("test.redirects", variable_visibility::project);
vp.insert<strings> ("test.cleanups", variable_visibility::project);
- // Test target platform.
- //
// Unless already set, default test.target to build.host. Note that it
// can still be overriden by the user, e.g., in root.build.
//
{
- value& v (
- rs.assign (
- vp.insert<target_triplet> (
- "test.target", variable_visibility::project)));
+ value& v (rs.assign (d.test_target));
if (!v || v.empty ())
v = cast<target_triplet> ((*global_scope)["build.host"]);
}
+ mod.reset (new module (move (d)));
return false;
}
@@ -108,8 +133,7 @@ namespace build2
const dir_path& out_root (rs.out_path ());
l5 ([&]{trace << "for " << out_root;});
- assert (mod == nullptr);
- mod.reset (new module ());
+ assert (mod != nullptr);
module& m (static_cast<module&> (*mod));
// Configure.
@@ -123,7 +147,7 @@ namespace build2
// config.test
//
- if (lookup l = config::omitted (rs, "config.test").first)
+ if (lookup l = config::omitted (rs, m.config_test).first)
{
// Figure out which root scope it came from.
//
@@ -139,7 +163,7 @@ namespace build2
// config.test.output
//
- if (lookup l = config::omitted (rs, "config.test.output").first)
+ if (lookup l = config::omitted (rs, m.config_test_output).first)
{
const name_pair& p (cast<name_pair> (l));
@@ -180,22 +204,13 @@ namespace build2
t.insert<testscript> ();
}
- // Register rules.
+ // Register our test running rule.
//
{
- const rule& r (m);
- const alias_rule& ar (m);
+ default_rule& dr (m);
- // Register our test running rule.
- //
- rs.rules.insert<target> (perform_test_id, "test", r);
- rs.rules.insert<alias> (perform_test_id, "test", ar);
-
- // Register our rule for the dist meta-operation. We need to do this
- // because we may have ad hoc prerequisites (test input/output files)
- // that need to be entered into the target list.
- //
- rs.rules.insert<target> (dist_id, test_id, "test", r);
+ rs.rules.insert<target> (perform_test_id, "test", dr);
+ rs.rules.insert<alias> (perform_test_id, "test", dr);
}
return true;
diff --git a/build2/test/module.hxx b/build2/test/module.hxx
index 3c9539f..529f826 100644
--- a/build2/test/module.hxx
+++ b/build2/test/module.hxx
@@ -17,8 +17,19 @@ namespace build2
{
namespace test
{
- struct module: module_base, virtual common, rule, alias_rule
+ struct module: module_base, virtual common, default_rule, group_rule
{
+ const test::group_rule&
+ group_rule () const
+ {
+ return *this;
+ }
+
+ explicit
+ module (common_data&& d)
+ : common (move (d)),
+ test::default_rule (move (d)),
+ test::group_rule (move (d)) {}
};
}
}
diff --git a/build2/test/rule.cxx b/build2/test/rule.cxx
index 96941e6..9a00d84 100644
--- a/build2/test/rule.cxx
+++ b/build2/test/rule.cxx
@@ -23,38 +23,66 @@ namespace build2
{
namespace test
{
- struct match_data
+ bool rule::
+ match (action, target&, const string&) const
{
- bool pass; // Pass-through to prerequsites (for alias only).
- bool test;
-
- bool script;
- };
-
- static_assert (sizeof (match_data) <= target::data_size,
- "insufficient space");
+ // We always match, even if this target is not testable (so that we can
+ // ignore it; see apply()).
+ //
+ return true;
+ }
- match_result rule_common::
- match (action a, target& t, const string&) const
+ recipe rule::
+ apply (action a, target& t) const
{
- // The (admittedly twisted) logic of this rule tries to achieve the
- // following: If the target is testable, then we want both first update
- // it and then test it. Otherwise, we want to ignore it for both
- // operations. To achieve this the rule will be called to match during
- // both operations. For update, if the target is not testable, then the
- // rule matches with a noop recipe. If the target is testable, then the
- // rule also matches but delegates to the real update rule. In this case
- // (and this is tricky) the rule also changes the operation to
- // unconditional update to make sure it doesn't match any prerequisites
- // (which, if not testable, it will noop).
+ // Note that we are called both as the outer part during the "update for
+ // test" pre-operation and as the inner part during the test operation
+ // itself.
+ //
+ // In both cases we first determine if the target is testable and return
+ // noop if it's not. Otherwise, in the first case (update for test) we
+ // delegate to the normal update and in the second (test) -- perform the
+ // test.
//
// And to add a bit more complexity, we want to handle aliases slightly
// differently: we may not want to ignore their prerequisites if the
// alias is not testable since their prerequisites could be.
+ //
+ // Here is the state matrix:
+ //
+ // test'able | pass'able | neither
+ // | |
+ // update for test delegate (& pass) | pass | noop
+ // ---------------------------------------+-------------+---------
+ // test test (& pass) | pass | noop
+ //
+
+ // If we are passing-through, then match our prerequisites.
+ //
+ // Note that we may already have stuff in prerequisite_targets (see
+ // group_rule).
+ //
+ if (t.is_a<alias> () && pass (t))
+ {
+ // For the test operation we have to implement our own search and
+ // match because we need to ignore prerequisites that are outside of
+ // our project. They can be from projects that don't use the test
+ // module (and thus won't have a suitable rule). Or they can be from
+ // no project at all (e.g., installed). Also, generally, not testing
+ // stuff that's not ours seems right.
+ //
+ match_prerequisites (a, t, t.root_scope ());
+ }
- match_data md {t.is_a<alias> () && pass (t), false, false};
+ auto& pts (t.prerequisite_targets[a]);
+ size_t pass_n (pts.size ()); // Number of pass-through prerequisites.
- if (test (t))
+ // See if it's testable and if so, what kind.
+ //
+ bool test (false);
+ bool script (false);
+
+ if (this->test (t))
{
// We have two very different cases: testscript and simple test (plus
// it may not be a testable target at all). So as the first step
@@ -75,292 +103,223 @@ namespace build2
{
if (p.is_a<testscript> ())
{
- md.script = true;
+ if (!script)
+ {
+ script = true;
- // We treat this target as testable unless the test variable is
- // explicitly set to false.
+ // We treat this target as testable unless the test variable is
+ // explicitly set to false.
+ //
+ const name* n (cast_null<name> (t[var_test]));
+ test = (n == nullptr || !n->simple () || n->value != "false");
+
+ if (!test)
+ break;
+ }
+
+ // If this is the test operation, collect testscripts after the
+ // pass-through prerequisites.
+ //
+ // Note that we don't match nor execute them relying on update to
+ // assign their paths and make sure they are up to date.
//
- const name* n (cast_null<name> (t["test"]));
- md.test = n == nullptr || !n->simple () || n->value != "false";
- break;
+ if (a.operation () != test_id)
+ break;
+
+ pts.push_back (&p.search (t));
}
}
// If this is not a script, then determine if it is a simple test.
- // Ignore aliases and testscripts files themselves at the outset.
+ // Ignore aliases and testscript files themselves at the outset.
//
- if (!md.script && !t.is_a<alias> () && !t.is_a<testscript> ())
+ if (!script && !t.is_a<alias> () && !t.is_a<testscript> ())
{
// For the simple case whether this is a test is controlled by the
// test variable. Also, it feels redundant to specify, say, "test =
- // true" and "test.output = test.out" -- the latter already says this
+ // true" and "test.stdout = test.out" -- the latter already says this
// is a test.
//
+ const name* n (cast_null<name> (t[var_test]));
- // Use lookup depths to figure out who "overrides" whom.
+ // If the test variable is explicitly set to false then we treat
+ // it as not testable regardless of what other test.* variables
+ // or prerequisites we might have.
//
- auto p (t.find (var_pool["test"]));
- const name* n (cast_null<name> (p.first));
-
- // Note that test can be set to an "override" target.
+ // Note that the test variable can be set to an "override" target
+ // (which means 'true' for our purposes).
//
- if (n != nullptr && (!n->simple () || n->value != "false"))
- md.test = true;
+ if (n != nullptr && n->simple () && n->value == "false")
+ test = false;
else
{
- auto test = [&t, &p] (const char* var)
+ // Look for test input/stdin/stdout prerequisites. The same
+ // group logic as in the testscript case above.
+ //
+ for (prerequisite_member p:
+ group_prerequisite_members (a, t, members_mode::maybe))
{
- return t.find (var_pool[var]).second < p.second;
- };
-
- md.test =
- test ("test.input") ||
- test ("test.output") ||
- test ("test.roundtrip") ||
- test ("test.options") ||
- test ("test.arguments");
+ const auto& vars (p.prerequisite.vars);
+
+ if (vars.empty ()) // Common case.
+ continue;
+
+ bool rt ( cast_false<bool> (vars[test_roundtrip]));
+ bool si (rt || cast_false<bool> (vars[test_stdin]));
+ bool so (rt || cast_false<bool> (vars[test_stdout]));
+ bool in ( cast_false<bool> (vars[test_input]));
+
+ if (si || so || in)
+ {
+ // Note that we don't match nor execute them relying on update
+ // to assign their paths and make sure they are up to date.
+ //
+ const target& pt (p.search (t));
+
+ // Verify it is file-based.
+ //
+ if (!pt.is_a<file> ())
+ {
+ fail << "test." << (si ? "stdin" : so ? "stdout" : "input")
+ << " prerequisite " << p << " of target " << t
+ << " is not a file";
+ }
+
+ if (!test)
+ {
+ test = true;
+
+ if (a.operation () != test_id)
+ break;
+
+ // First matching prerequisite. Establish the structure in
+ // pts: the first element (after pass_n) is stdin (can be
+ // NULL), the second is stdout (can be NULL), and everything
+ // after that (if any) is inputs.
+ //
+ pts.push_back (nullptr); // stdin
+ pts.push_back (nullptr); // stdout
+ }
+
+ if (si)
+ {
+ if (pts[pass_n] != nullptr)
+ fail << "multiple test.stdin prerequisites for target "
+ << t;
+
+ pts[pass_n] = &pt;
+ }
+
+ if (so)
+ {
+ if (pts[pass_n + 1] != nullptr)
+ fail << "multiple test.stdout prerequisites for target "
+ << t;
+
+ pts[pass_n + 1] = &pt;
+ }
+
+ if (in)
+ pts.push_back (&pt);
+ }
+ }
+
+ if (!test)
+ test = (n != nullptr); // We have the test variable.
+
+ if (!test)
+ test = t[test_options] || t[test_arguments];
}
}
}
- match_result mr (true);
-
- // Theoretically if this target is testable and this is the update
- // pre-operation, then all we need to do is say we are not a match and
- // the standard matching machinery will find the rule to update this
- // target. The problem with this approach is that the matching will
- // still happen for "update for test" which means this rule may still
- // match prerequisites (e.g., non-existent files) which we don't want.
- //
- // Also, for the simple case there is one more complication: test
- // input/output. While normally they will be existing (in src_base)
- // files, they could also be auto-generated. In fact, they could only be
- // needed for testing, which means the normall update won't even know
- // about them (nor clean, for that matter; this is why we need
- // cleantest).
- //
- // @@ Maybe we should just say if input/output are generated, then they
- // must be explicitly listed as prerequisites? Then no need for
- // cleantest but they will be updated even when not needed.
- //
- // To make generated input/output work we will have to cause their
- // update ourselves. In other words, we may have to do some actual work
- // for (update, test), and not simply "guide" (update, 0) as to which
- // targets need updating. For how exactly we are going to do it, see
- // apply() below.
-
- // Change the recipe action to (update, 0) (i.e., "unconditional
- // update") for "leaf" tests to make sure we won't match any
- // prerequisites. Note that this doesn't cover the case where an alias
- // is both a test and a pass for a test prerequisite with generated
- // input/output.
- //
- if (a.operation () == update_id && md.test)
- mr.recipe_action = action (a.meta_operation (), update_id);
-
- // Note that we match even if this target is not testable so that we can
- // ignore it (see apply()).
- //
- t.data (md); // Save the data in the target's auxilary storage.
- return mr;
- }
-
- recipe alias_rule::
- apply (action a, target& t) const
- {
- match_data md (move (t.data<match_data> ()));
- t.clear_data (); // In case delegated-to rule also uses aux storage.
-
- // We can only test an alias via a testscript, not a simple test.
+ // Neither testing nor passing-through.
//
- assert (!md.test || md.script);
-
- if (!md.pass && !md.test)
+ if (!test && pass_n == 0)
return noop_recipe;
- // If this is the update pre-operation then simply redirect to the
- // standard alias rule.
- //
- if (a.operation () == update_id)
- return match_delegate (a, t, *this);
-
- // For the test operation we have to implement our own search and match
- // because we need to ignore prerequisites that are outside of our
- // project. They can be from projects that don't use the test module
- // (and thus won't have a suitable rule). Or they can be from no project
- // at all (e.g., installed). Also, generally, not testing stuff that's
- // not ours seems right. Note that we still want to make sure they are
- // up to date (via the above delegate) since our tests might use them.
+ // If we are only passing-through, then use the default recipe (which
+ // will execute all the matched prerequisites).
//
- match_prerequisites (a, t, t.root_scope ());
+ if (!test)
+ return default_recipe;
- // If not a test then also redirect to the alias rule.
+ // Being here means we are definitely testing and maybe passing-through.
//
- return md.test
- ? [this] (action a, const target& t) {return perform_test (a, t);}
- : default_recipe;
- }
-
- recipe rule::
- apply (action a, target& t) const
- {
- tracer trace ("test::rule::apply");
-
- match_data md (move (t.data<match_data> ()));
- t.clear_data (); // In case delegated-to rule also uses aux storage.
-
- if (!md.test)
- return noop_recipe;
-
- // If we are here, then the target is testable and the action is either
- // a. (perform, test, 0) or
- // b. (*, update, 0)
- //
- if (md.script)
+ if (a.operation () == update_id)
{
- if (a.operation () == update_id)
- return match_delegate (a, t, *this);
-
- // Collect all the testscript targets in prerequisite_targets.
+ // For the update pre-operation match the inner rule (actual update).
+ // Note that here we assume it will update (if required) all the
+ // testscript and input/output prerequisites.
//
- for (prerequisite_member p:
- group_prerequisite_members (a, t, members_mode::maybe))
- {
- if (p.is_a<testscript> ())
- t.prerequisite_targets.push_back (&p.search (t));
- }
-
- return [this] (action a, const target& t)
- {
- return perform_script (a, t);
- };
+ match_inner (a, t);
+ return &perform_update;
}
else
{
- // In both cases, the next step is to see if we have test.{input,
- // output,roundtrip}.
- //
-
- // We should have either arguments or input/roundtrip. Again, use
- // lookup depth to figure out who takes precedence.
- //
- auto ip (t.find (var_pool["test.input"]));
- auto op (t.find (var_pool["test.output"]));
- auto rp (t.find (var_pool["test.roundtrip"]));
- auto ap (t.find (var_pool["test.arguments"]));
-
- auto test = [&t] (pair<lookup, size_t>& x, const char* xn,
- pair<lookup, size_t>& y, const char* yn)
+ if (script)
{
- if (x.first && y.first)
+ return [pass_n, this] (action a, const target& t)
{
- if (x.second == y.second)
- fail << "both " << xn << " and " << yn << " specified for "
- << "target " << t;
-
- (x.second < y.second ? y : x) = make_pair (lookup (), size_t (~0));
- }
- };
-
- test (ip, "test.input", ap, "test.arguments");
- test (rp, "test.roundtrip", ap, "test.arguments");
- test (ip, "test.input", rp, "test.roundtrip");
- test (op, "test.output", rp, "test.roundtrip");
-
- const name* in;
- const name* on;
-
- // Reduce the roundtrip case to input/output.
- //
- if (rp.first)
- {
- in = on = &cast<name> (rp.first);
+ return perform_script (a, t, pass_n);
+ };
}
else
{
- in = ip.first ? &cast<name> (ip.first) : nullptr;
- on = op.first ? &cast<name> (op.first) : nullptr;
+ return [pass_n, this] (action a, const target& t)
+ {
+ return perform_test (a, t, pass_n);
+ };
}
+ }
+ }
- // Resolve them to targets, which normally would be existing files
- // but could also be targets that need updating.
- //
- const scope& bs (t.base_scope ());
-
- // @@ OUT: what if this is a @-qualified pair or names?
- //
- const target* it (in != nullptr ? &search (t, *in, bs) : nullptr);
- const target* ot (on != nullptr
- ? in == on ? it : &search (t, *on, bs)
- : nullptr);
+ recipe group_rule::
+ apply (action a, target& t) const
+ {
+ // Resolve group members.
+ //
+ // Remember that we are called twice: first during update for test
+ // (pre-operation) and then during test. During the former, we rely on
+ // the normall update rule to resolve the group members. During the
+ // latter, there will be no rule to do this but the group will already
+ // have been resolved by the pre-operation.
+ //
+ // If the rule could not resolve the group, then we ignore it.
+ //
+ group_view gv (a.outer ()
+ ? resolve_group_members (a, t)
+ : t.group_members (a));
- if (a.operation () == update_id)
+ if (gv.members != nullptr)
+ {
+ auto& pts (t.prerequisite_targets[a]);
+ for (size_t i (0); i != gv.count; ++i)
{
- // First see if input/output are existing, up-to-date files. This
- // is a common case optimization.
- //
- if (it != nullptr)
- {
- if (build2::match (a, *it, unmatch::unchanged))
- it = nullptr;
- }
+ if (const target* m = gv.members[i])
+ pts.push_back (m);
+ }
- if (ot != nullptr)
- {
- if (in != on)
- {
- if (build2::match (a, *ot, unmatch::unchanged))
- ot = nullptr;
- }
- else
- ot = it;
- }
+ match_members (a, t, pts);
+ }
- // Find the "real" update rule, that is, the rule that would have
- // been found if we signalled that we do not match from match()
- // above.
- //
- recipe d (match_delegate (a, t, *this));
+ // Delegate to the base rule.
+ //
+ return rule::apply (a, t);
+ }
- // If we have no input/output that needs updating, then simply
- // redirect to it.
- //
- if (it == nullptr && ot == nullptr)
- return d;
+ target_state rule::
+ perform_update (action a, const target& t)
+ {
+ // First execute the inner recipe, then, if passing-through, execute
+ // prerequisites.
+ //
+ target_state ts (execute_inner (a, t));
- // Ok, time to handle the worst case scenario: we need to cause
- // update of input/output targets and also delegate to the real
- // update.
- //
- return [it, ot, dr = move (d)] (
- action a, const target& t) -> target_state
- {
- // Do the general update first.
- //
- target_state r (execute_delegate (dr, a, t));
+ if (t.prerequisite_targets[a].size () != 0)
+ ts |= straight_execute_prerequisites (a, t);
- const target* ts[] = {it, ot};
- return r |= straight_execute_members (a, t, ts);
- };
- }
- else
- {
- // Cache the targets in our prerequsite targets lists where they can
- // be found by perform_test(). If we have either or both, then the
- // first entry is input and the second -- output (either can be
- // NULL).
- //
- if (it != nullptr || ot != nullptr)
- {
- auto& pts (t.prerequisite_targets);
- pts.resize (2, nullptr);
- pts[0] = it;
- pts[1] = ot;
- }
-
- return &perform_test;
- }
- }
+ return ts;
}
static script::scope_state
@@ -404,31 +363,33 @@ namespace build2
return r;
}
- target_state rule_common::
- perform_script (action, const target& t) const
+ target_state rule::
+ perform_script (action a, const target& t, size_t pass_n) const
{
+ // First pass through.
+ //
+ if (pass_n != 0)
+ straight_execute_prerequisites (a, t, pass_n);
+
// Figure out whether the testscript file is called 'testscript', in
// which case it should be the only one.
//
+ auto& pts (t.prerequisite_targets[a]);
+ size_t pts_n (pts.size ());
+
bool one;
- size_t count (0);
{
optional<bool> o;
- for (const target* pt: t.prerequisite_targets)
+ for (size_t i (pass_n); i != pts_n; ++i)
{
- // In case we are using the alias rule's list (see above).
- //
- if (const testscript* ts = pt->is_a<testscript> ())
- {
- count++;
+ const testscript& ts (*pts[i]->is_a<testscript> ());
- bool r (ts->name == "testscript");
+ bool r (ts.name == "testscript");
- if ((r && o) || (!r && o && *o))
- fail << "both 'testscript' and other names specified for " << t;
+ if ((r && o) || (!r && o && *o))
+ fail << "both 'testscript' and other names specified for " << t;
- o = r;
- }
+ o = r;
}
assert (o); // We should have a testscript or we wouldn't be here.
@@ -487,57 +448,56 @@ namespace build2
// Start asynchronous execution of the testscripts.
//
- wait_guard wg (target::count_busy (), t.task_count);
+ wait_guard wg (target::count_busy (), t[a].task_count);
// Result vector.
//
using script::scope_state;
vector<scope_state> result;
- result.reserve (count); // Make sure there are no reallocations.
+ result.reserve (pts_n - pass_n); // Make sure there are no reallocations.
- for (const target* pt: t.prerequisite_targets)
+ for (size_t i (pass_n); i != pts_n; ++i)
{
- if (const testscript* ts = pt->is_a<testscript> ())
+ const testscript& ts (*pts[i]->is_a<testscript> ());
+
+ // If this is just the testscript, then its id path is empty (and it
+ // can only be ignored by ignoring the test target, which makes sense
+ // since it's the only testscript file).
+ //
+ if (one || test (t, path (ts.name)))
{
- // If this is just the testscript, then its id path is empty (and it
- // can only be ignored by ignoring the test target, which makes
- // sense since it's the only testscript file).
- //
- if (one || test (t, path (ts->name)))
+ if (mk)
{
- if (mk)
- {
- mkdir (wd, 2);
- mk = false;
- }
+ mkdir (wd, 2);
+ mk = false;
+ }
- result.push_back (scope_state::unknown);
- scope_state& r (result.back ());
-
- if (!sched.async (target::count_busy (),
- t.task_count,
- [this] (scope_state& r,
- const target& t,
- const testscript& ts,
- const dir_path& wd,
- const diag_frame* ds)
- {
- diag_frame df (ds);
- r = perform_script_impl (t, ts, wd, *this);
- },
- ref (r),
- cref (t),
- cref (*ts),
- cref (wd),
- diag_frame::stack))
- {
- // Executed synchronously. If failed and we were not asked to
- // keep going, bail out.
- //
- if (r == scope_state::failed && !keep_going)
- throw failed ();
- }
+ result.push_back (scope_state::unknown);
+ scope_state& r (result.back ());
+
+ if (!sched.async (target::count_busy (),
+ t[a].task_count,
+ [this] (scope_state& r,
+ const target& t,
+ const testscript& ts,
+ const dir_path& wd,
+ const diag_frame* ds)
+ {
+ diag_frame df (ds);
+ r = perform_script_impl (t, ts, wd, *this);
+ },
+ ref (r),
+ cref (t),
+ cref (ts),
+ cref (wd),
+ diag_frame::stack))
+ {
+ // Executed synchronously. If failed and we were not asked to keep
+ // going, bail out.
+ //
+ if (r == scope_state::failed && !keep_going)
+ throw failed ();
}
}
}
@@ -597,28 +557,15 @@ namespace build2
try
{
- if (prev == nullptr)
- {
- // First process.
- //
- process p (args, 0, out);
- pr = *next == nullptr || run_test (t, dr, next, &p);
- p.wait ();
+ process p (prev == nullptr
+ ? process (args, 0, out) // First process.
+ : process (args, *prev, out)); // Next process.
- assert (p.exit);
- pe = *p.exit;
- }
- else
- {
- // Next process.
- //
- process p (args, *prev, out);
- pr = *next == nullptr || run_test (t, dr, next, &p);
- p.wait ();
+ pr = *next == nullptr || run_test (t, dr, next, &p);
+ p.wait ();
- assert (p.exit);
- pe = *p.exit;
- }
+ assert (p.exit);
+ pe = *p.exit;
}
catch (const process_error& e)
{
@@ -646,10 +593,12 @@ namespace build2
}
target_state rule::
- perform_test (action, const target& tt)
+ perform_test (action a, const target& tt, size_t pass_n) const
{
- // @@ Would be nice to print what signal/core was dumped.
+ // First pass through.
//
+ if (pass_n != 0)
+ straight_execute_prerequisites (a, tt, pass_n);
// See if we have the test executable override.
//
@@ -657,7 +606,7 @@ namespace build2
{
// Note that the test variable's visibility is target.
//
- lookup l (tt["test"]);
+ lookup l (tt[var_test]);
// Note that we have similar code for scripted tests.
//
@@ -686,7 +635,7 @@ namespace build2
{
// Must be a target name.
//
- // @@ OUT: what if this is a @-qualified pair or names?
+ // @@ OUT: what if this is a @-qualified pair of names?
//
t = search_existing (*n, tt.base_scope ());
@@ -722,41 +671,74 @@ namespace build2
}
}
- process_path pp (run_search (p, true));
- cstrings args {pp.recall_string ()};
-
- // Do we have options?
+ // See apply() for the structure of prerequisite_targets in the presence
+ // of test.{input,stdin,stdout}.
//
- if (auto l = tt["test.options"])
- append_options (args, cast<strings> (l));
+ auto& pts (tt.prerequisite_targets[a]);
+ size_t pts_n (pts.size ());
+
+ cstrings args;
- // Do we have input?
+ // Do we have stdin?
//
- auto& pts (tt.prerequisite_targets);
- if (pts.size () != 0 && pts[0] != nullptr)
+ // We simulate stdin redirect (<file) with a fake (already terminate)
+ // cat pipe (cat file |).
+ //
+ bool stdin (pass_n != pts_n && pts[pass_n] != nullptr);
+
+ process cat;
+ if (stdin)
{
- const file& it (pts[0]->as<file> ());
+ const file& it (pts[pass_n]->as<file> ());
const path& ip (it.path ());
assert (!ip.empty ()); // Should have been assigned by update.
+
+ try
+ {
+ cat.in_ofd = fdopen (ip, fdopen_mode::in);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to open " << ip << ": " << e;
+ }
+
+ // Purely for diagnostics.
+ //
+ args.push_back ("cat");
args.push_back (ip.string ().c_str ());
+ args.push_back (nullptr);
}
- // Maybe arguments then?
+
+ process_path pp (run_search (p, true));
+ args.push_back (pp.recall_string ());
+
+ // Do we have options and/or arguments?
//
- else
+ if (auto l = tt[test_options])
+ append_options (args, cast<strings> (l));
+
+ if (auto l = tt[test_arguments])
+ append_options (args, cast<strings> (l));
+
+ // Do we have inputs?
+ //
+ for (size_t i (pass_n + 2); i < pts_n; ++i)
{
- if (auto l = tt["test.arguments"])
- append_options (args, cast<strings> (l));
+ const file& it (pts[i]->as<file> ());
+ const path& ip (it.path ());
+ assert (!ip.empty ()); // Should have been assigned by update.
+ args.push_back (ip.string ().c_str ());
}
args.push_back (nullptr);
- // Do we have output?
+ // Do we have stdout?
//
path dp ("diff");
process_path dpp;
- if (pts.size () != 0 && pts[1] != nullptr)
+ if (pass_n != pts_n && pts[pass_n + 1] != nullptr)
{
- const file& ot (pts[1]->as<file> ());
+ const file& ot (pts[pass_n + 1]->as<file> ());
const path& op (ot.path ());
assert (!op.empty ()); // Should have been assigned by update.
@@ -775,7 +757,7 @@ namespace build2
// Ignore Windows newline fluff if that's what we are running on.
//
- if (cast<target_triplet> (tt["test.target"]).class_ == "windows")
+ if (cast<target_triplet> (tt[test_target]).class_ == "windows")
args.push_back ("--strip-trailing-cr");
args.push_back (op.string ().c_str ());
@@ -791,7 +773,10 @@ namespace build2
text << "test " << tt;
diag_record dr;
- if (!run_test (tt, dr, args.data ()))
+ if (!run_test (tt,
+ dr,
+ args.data () + (stdin ? 3 : 0), // Skip cat.
+ stdin ? &cat : nullptr))
{
dr << info << "test command line: ";
print_process (dr, args);
@@ -800,18 +785,5 @@ namespace build2
return target_state::changed;
}
-
- target_state alias_rule::
- perform_test (action a, const target& t) const
- {
- // Run the alias recipe first then the test.
- //
- target_state r (straight_execute_prerequisites (a, t));
-
- // Note that we reuse the prerequisite_targets prepared by the standard
- // search and match.
- //
- return r |= perform_script (a, t);
- }
}
}
diff --git a/build2/test/rule.hxx b/build2/test/rule.hxx
index 819121c..b331263 100644
--- a/build2/test/rule.hxx
+++ b/build2/test/rule.hxx
@@ -17,34 +17,46 @@ namespace build2
{
namespace test
{
- class rule_common: public build2::rule, protected virtual common
+ class rule: public build2::rule, protected virtual common
{
public:
- virtual match_result
+ explicit
+ rule (common_data&& d): common (move (d)) {}
+
+ virtual bool
match (action, target&, const string&) const override;
+ virtual recipe
+ apply (action, target&) const override;
+
+ static target_state
+ perform_update (action, const target&);
+
+ target_state
+ perform_test (action, const target&, size_t) const;
+
target_state
- perform_script (action, const target&) const;
+ perform_script (action, const target&, size_t) const;
};
- class rule: public rule_common
+ class default_rule: public rule // For disambiguation in module.
{
public:
- virtual recipe
- apply (action, target&) const override;
-
- static target_state
- perform_test (action, const target&);
+ explicit
+ default_rule (common_data&& d): common (move (d)), rule (move (d)) {}
};
- class alias_rule: public rule_common
+ // In addition to the above rule's semantics, this rule sees through to
+ // the group's members.
+ //
+ class group_rule: public rule
{
public:
+ explicit
+ group_rule (common_data&& d): common (move (d)), rule (move (d)) {}
+
virtual recipe
apply (action, target&) const override;
-
- target_state
- perform_test (action, const target&) const;
};
}
}
diff --git a/build2/test/script/runner.cxx b/build2/test/script/runner.cxx
index cddd3a7..9588ac2 100644
--- a/build2/test/script/runner.cxx
+++ b/build2/test/script/runner.cxx
@@ -181,6 +181,8 @@ namespace build2
// For targets other than Windows leave the string intact.
//
+ // @@ Would be nice to use cached value from test::common_data.
+ //
if (cast<target_triplet> (scr.test_target["test.target"]).class_ !=
"windows")
return s;
@@ -294,6 +296,8 @@ namespace build2
// Ignore Windows newline fluff if that's what we are running on.
//
+ // @@ Would be nice to use cached value from test::common_data.
+ //
if (cast<target_triplet> (
sp.root->test_target["test.target"]).class_ == "windows")
args.push_back ("--strip-trailing-cr");
diff --git a/build2/test/script/script.cxx b/build2/test/script/script.cxx
index 51c08cb..0516b0f 100644
--- a/build2/test/script/script.cxx
+++ b/build2/test/script/script.cxx
@@ -503,6 +503,8 @@ namespace build2
// buildfiles except for test: while in buildfiles it can be a
// target name, in testscripts it should be resolved to a path.
//
+ // Note: entering in a custom variable pool.
+ //
test_var (var_pool.insert<path> ("test")),
options_var (var_pool.insert<strings> ("test.options")),
arguments_var (var_pool.insert<strings> ("test.arguments")),
@@ -527,7 +529,9 @@ namespace build2
// script
//
script::
- script (const target& tt, const testscript& st, const dir_path& rwd)
+ script (const target& tt,
+ const testscript& st,
+ const dir_path& rwd)
: group (st.name == "testscript" ? string () : st.name, this),
test_target (tt),
script_target (st)
@@ -574,7 +578,7 @@ namespace build2
{
// Must be a target name.
//
- // @@ OUT: what if this is a @-qualified pair or names?
+ // @@ OUT: what if this is a @-qualified pair of names?
//
t = search_existing (*n, tt.base_scope ());
diff --git a/build2/variable.cxx b/build2/variable.cxx
index d1f95c5..eb74aad 100644
--- a/build2/variable.cxx
+++ b/build2/variable.cxx
@@ -302,7 +302,7 @@ namespace build2
}
void
- typify (value& v, const value_type& t, const variable* var)
+ typify (value& v, const value_type& t, const variable* var, memory_order mo)
{
if (v.type == nullptr)
{
@@ -312,11 +312,16 @@ namespace build2
//
names ns (move (v).as<names> ());
v = nullptr;
- v.type = &t;
- v.assign (move (ns), var);
+
+ // Use value_type::assign directly to delay v.type change.
+ //
+ t.assign (v, move (ns), var);
+ v.null = false;
}
else
v.type = &t;
+
+ v.type.store (&t, mo);
}
else if (v.type != &t)
{
@@ -342,8 +347,10 @@ namespace build2
variable_cache_mutex_shard[
hash<value*> () (&v) % variable_cache_mutex_shard_size]);
+ // Note: v.type is rechecked by typify() under lock.
+ //
ulock l (m);
- typify (v, t, var); // v.type is rechecked by typify(), stored under lock.
+ typify (v, t, var, memory_order_release);
}
void
diff --git a/build2/variable.ixx b/build2/variable.ixx
index fd6b7b2..dcc1304 100644
--- a/build2/variable.ixx
+++ b/build2/variable.ixx
@@ -237,6 +237,16 @@ namespace build2
typify (v, t, var);
}
+ void
+ typify (value&, const value_type&, const variable*, memory_order);
+
+ inline void
+ typify (value& v, const value_type& t, const variable* var)
+ {
+ typify (v, t, var, memory_order_relaxed);
+ }
+
+
inline vector_view<const name>
reverse (const value& v, names& storage)
{
diff --git a/build2/version/init.cxx b/build2/version/init.cxx
index 63e32ab..7b8bd01 100644
--- a/build2/version/init.cxx
+++ b/build2/version/init.cxx
@@ -27,8 +27,8 @@ namespace build2
{
static const path manifest ("manifest");
- static const version_doc version_doc_;
- static const version_in version_in_;
+ static const doc_rule doc_rule_;
+ static const in_rule in_rule_;
bool
boot (scope& rs, const location& l, unique_ptr<module_base>& mod)
@@ -311,13 +311,13 @@ namespace build2
{
auto& r (rs.rules);
- r.insert<doc> (perform_update_id, "version.doc", version_doc_);
- r.insert<doc> (perform_clean_id, "version.doc", version_doc_);
- r.insert<doc> (configure_update_id, "version.doc", version_doc_);
+ r.insert<doc> (perform_update_id, "version.doc", doc_rule_);
+ r.insert<doc> (perform_clean_id, "version.doc", doc_rule_);
+ r.insert<doc> (configure_update_id, "version.doc", doc_rule_);
- r.insert<file> (perform_update_id, "version.in", version_in_);
- r.insert<file> (perform_clean_id, "version.in", version_in_);
- r.insert<file> (configure_update_id, "version.in", version_in_);
+ r.insert<file> (perform_update_id, "version.in", in_rule_);
+ r.insert<file> (perform_clean_id, "version.in", in_rule_);
+ r.insert<file> (configure_update_id, "version.in", in_rule_);
}
return true;
diff --git a/build2/version/rule.cxx b/build2/version/rule.cxx
index 9e127ca..bbfe1f6 100644
--- a/build2/version/rule.cxx
+++ b/build2/version/rule.cxx
@@ -45,12 +45,12 @@ namespace build2
return d == rs.src_path ();
}
- // version_doc
+ // doc_rule
//
- match_result version_doc::
+ bool doc_rule::
match (action a, target& xt, const string&) const
{
- tracer trace ("version::version_doc::match");
+ tracer trace ("version::doc_rule::match");
doc& t (static_cast<doc&> (xt));
@@ -76,7 +76,7 @@ namespace build2
return false;
}
- recipe version_doc::
+ recipe doc_rule::
apply (action a, target& xt) const
{
doc& t (static_cast<doc&> (xt));
@@ -101,7 +101,7 @@ namespace build2
}
}
- target_state version_doc::
+ target_state doc_rule::
perform_update (action a, const target& xt)
{
const doc& t (xt.as<const doc&> ());
@@ -168,12 +168,12 @@ namespace build2
return target_state::changed;
}
- // version_in
+ // in_rule
//
- match_result version_in::
+ bool in_rule::
match (action a, target& xt, const string&) const
{
- tracer trace ("version::version_in::match");
+ tracer trace ("version::in_rule::match");
file& t (static_cast<file&> (xt));
const scope& rs (t.root_scope ());
@@ -195,7 +195,7 @@ namespace build2
return fm && fi;
}
- recipe version_in::
+ recipe in_rule::
apply (action a, target& xt) const
{
file& t (static_cast<file&> (xt));
@@ -220,10 +220,10 @@ namespace build2
}
}
- target_state version_in::
+ target_state in_rule::
perform_update (action a, const target& xt)
{
- tracer trace ("version::version_in::perform_update");
+ tracer trace ("version::in_rule::perform_update");
const file& t (xt.as<const file&> ());
const path& tp (t.path ());
diff --git a/build2/version/rule.hxx b/build2/version/rule.hxx
index e686694..9172ba3 100644
--- a/build2/version/rule.hxx
+++ b/build2/version/rule.hxx
@@ -16,12 +16,12 @@ namespace build2
{
// Generate a version file.
//
- class version_doc: public rule
+ class doc_rule: public rule
{
public:
- version_doc () {}
+ doc_rule () {}
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override;
virtual recipe
@@ -33,12 +33,12 @@ namespace build2
// Preprocess an .in file.
//
- class version_in: public rule
+ class in_rule: public rule
{
public:
- version_in () {}
+ in_rule () {}
- virtual match_result
+ virtual bool
match (action, target&, const string&) const override;
virtual recipe
diff --git a/doc/testscript.cli b/doc/testscript.cli
index 7b3d472..666d553 100644
--- a/doc/testscript.cli
+++ b/doc/testscript.cli
@@ -39,10 +39,10 @@ target as a test along with passing options and arguments, providing the
result. For example:
\
+exe{hello}: file{names.txt}: test.stdin = true
+exe{hello}: file{greetings.txt}: test.stdout = true
exe{hello}: test.options = --greeting 'Hi'
exe{hello}: test.arguments = - # Read names from stdin.
-exe{hello}: test.input = names.txt
-exe{hello}: test.output = greetings.txt
\
This works well for simple, single-run tests. If, however, our testing
diff --git a/tests/cc/preprocessed/testscript b/tests/cc/preprocessed/testscript
index fb69e65..644e121 100644
--- a/tests/cc/preprocessed/testscript
+++ b/tests/cc/preprocessed/testscript
@@ -12,7 +12,7 @@ test.arguments = config.cxx="$recall($cxx.path)" update
# trace: cxx::compile::extract_(headers|modules): target: .../obje{(test).o...}
#
filter = sed -n -e \
- \''s/^trace: cxx::compile::extract_([^:]+): target:[^{]+\{([^.]+).*/\1 \2/p'\'
+ \''s/^trace: cxx::compile_rule::extract_([^:]+): target:[^{]+\{([^.]+).*/\1 \2/p'\'
+cat <<EOI >=build/root.build
cxx.std = latest
diff --git a/tests/test/config-test/testscript b/tests/test/config-test/testscript
index 0d08eb0..7423322 100644
--- a/tests/test/config-test/testscript
+++ b/tests/test/config-test/testscript
@@ -82,8 +82,7 @@ EOI
+touch proj/units/simple/driver
+cat <<EOI >=proj/units/simple/buildfile
driver = $src_root/../../exe{driver}
-#@@ TMP file{driver}@./: $driver
-./: file{driver} $driver
+file{driver}@./: $driver
file{driver}@./: test = $driver
file{driver}@./: test.arguments = units/simple
EOI