aboutsummaryrefslogtreecommitdiff
path: root/libbuild2/operation.cxx
diff options
context:
space:
mode:
Diffstat (limited to 'libbuild2/operation.cxx')
-rw-r--r--libbuild2/operation.cxx925
1 files changed, 858 insertions, 67 deletions
diff --git a/libbuild2/operation.cxx b/libbuild2/operation.cxx
index 826cfff..6f88e38 100644
--- a/libbuild2/operation.cxx
+++ b/libbuild2/operation.cxx
@@ -3,7 +3,12 @@
#include <libbuild2/operation.hxx>
-#include <iostream> // cout
+#include <iostream> // cout
+#include <unordered_map>
+
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/serializer.hxx>
+#endif
#include <libbuild2/file.hxx>
#include <libbuild2/scope.hxx>
@@ -13,6 +18,10 @@
#include <libbuild2/algorithm.hxx>
#include <libbuild2/diagnostics.hxx>
+#if 0
+#include <libbuild2/adhoc-rule-buildscript.hxx> // @@ For a hack below.
+#endif
+
using namespace std;
using namespace butl;
@@ -55,7 +64,7 @@ namespace build2
true, // bootstrap_outer
nullptr, // meta-operation pre
nullptr, // operation pre
- &load,
+ &perform_load,
nullptr, // search
nullptr, // match
nullptr, // execute
@@ -67,16 +76,17 @@ namespace build2
// perform
//
void
- load (const values&,
- scope& root,
- const path& bf,
- const dir_path& out_base,
- const dir_path& src_base,
- const location&)
+ perform_load (const values&,
+ scope& root,
+ const path& bf,
+ const dir_path& out_base,
+ const dir_path& src_base,
+ const location&)
{
// Load project's root.build.
//
- load_root (root);
+ if (!root.root_extra->loaded)
+ load_root (root);
// Create the base scope. Note that its existence doesn't mean it was
// already setup as a base scope; it can be the same as root.
@@ -91,15 +101,15 @@ namespace build2
}
void
- search (const values&,
- const scope&,
- const scope& bs,
- const path& bf,
- const target_key& tk,
- const location& l,
- action_targets& ts)
+ perform_search (const values&,
+ const scope&,
+ const scope& bs,
+ const path& bf,
+ const target_key& tk,
+ const location& l,
+ action_targets& ts)
{
- tracer trace ("search");
+ tracer trace ("perform_search");
context& ctx (bs.ctx);
phase_lock pl (ctx, run_phase::match);
@@ -127,10 +137,126 @@ namespace build2
ts.push_back (t);
}
+ // Verify that no two targets share a path unless they both are "read-only"
+ // (have noop recipes).
+ //
+ // Note: somewhat similar logic in dyndep::verify_existing_file().
+ //
+ static void
+ verify_targets (context& ctx, action a)
+ {
+ // On the first pass we collect all the targets that have non-noop
+ // recipes. On the second pass we check if there are any other targets
+ // that have the same path. Note that we must also deal with two non-noop
+ // targets that have the same path.
+ //
+ // Strictly speaking we may need to produce some sort of progress if this
+ // takes long. However, currently we are looking at verification speed of
+ // ~1ms per 2K targets, which means it will only becomes noticeable with
+ // over 1M targets.
+ //
+ unordered_map<reference_wrapper<const path>,
+ const target*,
+ hash<path>,
+ equal_to<path>> map;
+
+ // Half of the total appears to be a reasonable heuristics.
+ //
+ map.reserve (ctx.targets.size () / 2);
+
+ size_t count_matched (ctx.count_matched ());
+
+ bool e (false);
+ for (size_t pass (1); pass != 3; ++pass)
+ {
+ for (const auto& pt: ctx.targets)
+ {
+ // We are only interested in path-based targets.
+ //
+ const path_target* t (pt->is_a<path_target> ());
+ if (t == nullptr)
+ continue;
+
+ // We are only interested in the matched targets.
+ //
+ const target::opstate& s (t->state[a]);
+
+ if (s.task_count.load (memory_order_relaxed) < count_matched)
+ continue;
+
+ // Skip if for some reason the path is not assigned.
+ //
+ const path& p (t->path (memory_order_relaxed));
+ if (p.empty ())
+ continue;
+
+ recipe_function* const* rf (s.recipe.target<recipe_function*> ());
+ bool noop (rf != nullptr && *rf == &noop_action);
+
+ if ((noop ? 2 : 1) != pass)
+ continue;
+
+ const target* t1;
+ if (pass == 1)
+ {
+ auto r (map.emplace (p, t));
+
+ if (r.second)
+ continue;
+
+ t1 = r.first->second;
+ }
+ else
+ {
+ auto i (map.find (p));
+
+ if (i == map.end ())
+ continue;
+
+ t1 = i->second;
+ }
+
+ e = true;
+
+ diag_record dr (error);
+
+ dr << "multiple targets share path " << p <<
+ info << "first target: " << *t1 <<
+ info << "second target: " << *t <<
+ info << "target " << *t1 << " has non-noop recipe";
+
+ if (pass == 1)
+ {
+ dr << info << "target " << *t << " has non-noop recipe";
+ }
+ else if (t->decl != target_decl::real)
+ {
+ if (t->decl == target_decl::implied)
+ {
+ dr << info << "target " << *t << " is implied by a buildfile";
+ }
+ else
+ {
+ dr << info << "target " << *t << " is not declared in a buildfile";
+
+ if (t->decl == target_decl::prereq_file)
+ dr << " but has corresponding existing file";
+
+ dr << info << "perhaps it is a dynamic dependency?";
+ }
+ }
+ }
+ }
+
+ if (e)
+ throw failed ();
+ }
+
void
- match (const values&, action a, action_targets& ts, uint16_t diag, bool prog)
+ perform_match (const values&, action a, action_targets& ts,
+ uint16_t diag, bool prog)
{
- tracer trace ("match");
+ tracer trace ("perform_match");
if (ts.empty ())
return;
@@ -142,25 +268,50 @@ namespace build2
// Setup progress reporting if requested.
//
- string what; // Note: must outlive monitor_guard.
+ struct monitor_data
+ {
+ size_t incr;
+ string what;
+ atomic<timestamp::rep> time {timestamp_nonexistent_rep};
+ } md; // Note: must outlive monitor_guard.
scheduler::monitor_guard mg;
if (prog && show_progress (2 /* max_verb */))
{
- size_t incr (stderr_term ? 1 : 10); // Scale depending on output type.
-
- what = " targets to " + diag_do (ctx, a);
+ // Note that showing progress is not free and it can take up to 10% of
+ // the up-to-date check on some projects (e.g., Boost). So we jump
+ // through a few hoops to make sure we don't overindulge.
+ //
+ md.incr = stderr_term // Scale depending on output type.
+ ? (ctx.sched->serial () ? 1 : 5)
+ : 100;
+ md.what = " targets to " + diag_do (ctx, a);
- mg = ctx.sched.monitor (
+ mg = ctx.sched->monitor (
ctx.target_count,
- incr,
- [incr, &what] (size_t c) -> size_t
+ md.incr,
+ [&md] (size_t c) -> size_t
{
+ size_t r (c + md.incr);
+
+ if (stderr_term)
+ {
+ timestamp o (duration (md.time.load (memory_order_consume)));
+ timestamp n (system_clock::now ());
+
+ if (n - o < chrono::milliseconds (80))
+ return r;
+
+ md.time.store (n.time_since_epoch ().count (),
+ memory_order_release);
+ }
+
diag_progress_lock pl;
diag_progress = ' ';
diag_progress += to_string (c);
- diag_progress += what;
- return c + incr;
+ diag_progress += md.what;
+
+ return r;
});
}
@@ -168,6 +319,7 @@ namespace build2
// many we have started. Wait with unlocked phase to allow phase
// switching.
//
+ bool fail (false);
size_t i (0), n (ts.size ());
{
atomic_count task_count (0);
@@ -178,21 +330,85 @@ namespace build2
const target& t (ts[i].as<target> ());
l5 ([&]{trace << diag_doing (a, t);});
- target_state s (match_async (a, t, 0, task_count, false));
+ target_state s (match_async (a, t,
+ 0, task_count,
+ match_extra::all_options,
+ false /* fail */));
// Bail out if the target has failed and we weren't instructed to
// keep going.
//
- if (s == target_state::failed && !ctx.keep_going)
+ if (s == target_state::failed)
{
- ++i;
- break;
+ fail = true;
+
+ if (!ctx.keep_going)
+ {
+ ++i;
+ break;
+ }
}
}
wg.wait ();
}
+ // If we have any targets with post hoc prerequisites, match those.
+ //
+ // See match_posthoc() for the overall approach description.
+ //
+ bool posthoc_fail (false);
+ if (!ctx.current_posthoc_targets.empty () && (!fail || ctx.keep_going))
+ {
+ using posthoc_target = context::posthoc_target;
+ using posthoc_prerequisite_target = posthoc_target::prerequisite_target;
+
+ // Note that on each iteration we may end up with new entries at the
+ // back. Since we start and end each iteration in serial execution, we
+ // don't need to mess with the mutex.
+ //
+ for (const posthoc_target& p: ctx.current_posthoc_targets)
+ {
+ action a (p.action); // May not be the same as argument action.
+ const target& t (p.target);
+
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while matching to " << diag_do (t.ctx, a)
+ << " post hoc prerequisites of " << t;
+ });
+
+ // Cannot use normal match because incrementing dependency counts in
+ // the face of cycles does not work well (we will deadlock for the
+ // reverse execution mode).
+ //
+ // @@ PERF: match in parallel (need match_direct_async(), etc).
+ //
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
+ {
+ if (pt.target != nullptr)
+ {
+ target_state s (match_direct_sync (a, *pt.target,
+ pt.match_options,
+ false /* fail */));
+
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
+ }
+ }
+
+ if (posthoc_fail && !ctx.keep_going)
+ break;
+ }
+ }
+
// Clear the progress if present.
//
if (mg)
@@ -203,15 +419,25 @@ namespace build2
// We are now running serially. Re-examine targets that we have matched.
//
- bool fail (false);
for (size_t j (0); j != n; ++j)
{
action_target& at (ts[j]);
const target& t (at.as<target> ());
- target_state s (j < i
- ? match (a, t, false)
- : target_state::postponed);
+ // We cannot attribute post hoc failures to specific targets so it
+ // seems the best we can do is just fail them all.
+ //
+ target_state s;
+ if (j < i)
+ {
+ s = match_complete (a, t, match_extra::all_options, false /* fail */);
+
+ if (posthoc_fail)
+ s = /*t.state[a].state =*/ target_state::failed;
+ }
+ else
+ s = target_state::postponed;
+
switch (s)
{
case target_state::postponed:
@@ -226,6 +452,7 @@ namespace build2
}
case target_state::unknown:
case target_state::unchanged:
+ case target_state::changed: // Can happend for ad hoc group member.
{
break; // Matched successfully.
}
@@ -247,6 +474,12 @@ namespace build2
if (fail)
throw failed ();
+
+ // @@ This feels a bit ad hoc. Maybe we should invent operation hooks
+ // for this (e.g., post-search, post-match, post-execute)?
+ //
+ if (a == perform_update_id)
+ verify_targets (ctx, a);
}
// Phase restored to load.
@@ -255,16 +488,103 @@ namespace build2
}
void
- execute (const values&, action a, action_targets& ts,
- uint16_t diag, bool prog)
+ perform_execute (const values&, action a, action_targets& ts,
+ uint16_t diag, bool prog)
{
- tracer trace ("execute");
+ tracer trace ("perform_execute");
if (ts.empty ())
return;
context& ctx (ts[0].as<target> ().ctx);
+ bool posthoc_fail (false);
+ auto execute_posthoc = [&ctx, &posthoc_fail] ()
+ {
+ using posthoc_target = context::posthoc_target;
+ using posthoc_prerequisite_target = posthoc_target::prerequisite_target;
+
+ for (const posthoc_target& p: ctx.current_posthoc_targets)
+ {
+ action a (p.action); // May not be the same as argument action.
+ const target& t (p.target);
+
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while " << diag_doing (t.ctx, a)
+ << " post hoc prerequisites of " << t;
+ });
+
+#if 0
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
+ {
+ if (pt.target != nullptr)
+ {
+ target_state s (
+ execute_direct_sync (a, *pt.target, false /* fail */));
+
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
+ }
+ }
+#else
+ // Note: similar logic/reasoning to below except we use direct
+ // execution.
+ //
+ atomic_count tc (0);
+ wait_guard wg (ctx, tc);
+
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
+ {
+ if (pt.target != nullptr)
+ {
+ target_state s (
+ execute_direct_async (a, *pt.target, 0, tc, false /*fail*/));
+
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
+ }
+ }
+
+ wg.wait ();
+
+ // Process the result.
+ //
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
+ {
+ if (pt.target != nullptr)
+ {
+ // Similar to below, no need to wait.
+ //
+ target_state s (pt.target->executed_state (a, false /* fail */));
+
+ if (s == target_state::failed)
+ {
+ // Note: no need to keep going.
+ //
+ posthoc_fail = true;
+ break;
+ }
+ }
+ }
+#endif
+ if (posthoc_fail && !ctx.keep_going)
+ break;
+ }
+ };
+
// Reverse the order of targets if the execution mode is 'last'.
//
if (ctx.current_mode == execution_mode::last)
@@ -272,6 +592,7 @@ namespace build2
phase_lock pl (ctx, run_phase::execute); // Never switched.
+ bool fail (false);
{
// Tune the scheduler.
//
@@ -280,7 +601,7 @@ namespace build2
switch (ctx.current_inner_oif->concurrency)
{
- case 0: sched_tune = tune_guard (ctx.sched, 1); break; // Run serially.
+ case 0: sched_tune = tune_guard (*ctx.sched, 1); break; // Run serially.
case 1: break; // Run as is.
default: assert (false); // Not supported.
}
@@ -303,7 +624,7 @@ namespace build2
{
what = "% of targets " + diag_did (ctx, a);
- mg = ctx.sched.monitor (
+ mg = ctx.sched->monitor (
ctx.target_count,
init - incr,
[init, incr, &what, &ctx] (size_t c) -> size_t
@@ -328,9 +649,18 @@ namespace build2
}
}
+ // In the 'last' execution mode run post hoc first.
+ //
+ if (ctx.current_mode == execution_mode::last)
+ {
+ if (!ctx.current_posthoc_targets.empty ())
+ execute_posthoc ();
+ }
+
// Similar logic to execute_members(): first start asynchronous
// execution of all the top-level targets.
//
+ if (!posthoc_fail || ctx.keep_going)
{
atomic_count task_count (0);
wait_guard wg (ctx, task_count);
@@ -346,13 +676,24 @@ namespace build2
// Bail out if the target has failed and we weren't instructed to
// keep going.
//
- if (s == target_state::failed && !ctx.keep_going)
- break;
+ if (s == target_state::failed)
+ {
+ fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
}
wg.wait ();
}
+ if (ctx.current_mode == execution_mode::first)
+ {
+ if (!ctx.current_posthoc_targets.empty () && (!fail || ctx.keep_going))
+ execute_posthoc ();
+ }
+
// We are now running serially.
//
@@ -388,12 +729,24 @@ namespace build2
// Re-examine all the targets and print diagnostics.
//
- bool fail (false);
for (action_target& at: ts)
{
const target& t (at.as<target> ());
- switch ((at.state = t.executed_state (a, false)))
+ // Similar to match we cannot attribute post hoc failures to specific
+ // targets so it seems the best we can do is just fail them all.
+ //
+ if (!posthoc_fail)
+ {
+ // Note that here we call executed_state() directly instead of
+ // execute_complete() since we know there is no need to wait.
+ //
+ at.state = t.executed_state (a, false /* fail */);
+ }
+ else
+ at.state = /*t.state[a].state =*/ target_state::failed;
+
+ switch (at.state)
{
case target_state::unknown:
{
@@ -438,26 +791,191 @@ namespace build2
if (fail)
throw failed ();
- // We should have executed every target that we matched, provided we
+#ifndef NDEBUG
+ size_t base (ctx.count_base ());
+
+ // For now we disable these checks if we've performed any group member
+ // resolutions that required a match (with apply()) but not execute.
+ //
+ if (ctx.target_count.load (memory_order_relaxed) != 0 &&
+ ctx.resolve_count.load (memory_order_relaxed) != 0)
+ {
+ // These counts are only tracked for the inner operation.
+ //
+ action ia (a.outer () ? a.inner_action () : a);
+
+ // While it may seem that just decrementing the counters for every
+ // target with the resolve_counted flag set should be enough, this will
+ // miss any prerequisites that this target has matched but did not
+ // execute, which may affect both task_count and dependency_count. Note
+ // that this applies recursively and we effectively need to pretend to
+ // execute this target and all its prerequisites, recursively without
+ // actually executing any of their recepies.
+ //
+ // That last bit means we must be able to interpret the populated
+ // prerequisite_targets generically, which is a requirement we place on
+ // rules that resolve groups in apply (see target::group_members() for
+ // details). It so happens that our own adhoc_buildscript_rule doesn't
+ // follow this rule (see execute_update_prerequisites()) so we detect
+ // and handle this with a hack.
+ //
+ // @@ Hm, but there is no guarantee that this holds recursively since
+ // prerequisites may not be see-through groups. For this to work we
+ // would have to impose this restriction globally. Which we could
+ // probably do, just need to audit things carefully (especially
+ // cc::link_rule). But we already sort of rely on that for dump! Maybe
+ // should just require it everywhere and fix adhoc_buildscript_rule.
+ //
+ // @@ There are special recipes that don't populate prerequisite_targets
+ // like group_recipe! Are we banning any user-defined such recipes?
+ // Need to actually look if we have anything else like this. There
+ // is also inner_recipe, though doesn't apply here (only for outer).
+ //
+ // @@ TMP: do and enable after the 0.16.0 release.
+ //
+ // Note: recursive lambda.
+ //
+#if 0
+ auto pretend_execute = [base, ia] (target& t,
+ const auto& pretend_execute) -> void
+ {
+ context& ctx (t.ctx);
+
+ // Note: tries to emulate the execute_impl() functions semantics.
+ //
+ auto execute_impl = [base, ia, &ctx, &pretend_execute] (target& t)
+ {
+ target::opstate& s (t.state[ia]);
+
+ size_t gd (ctx.dependency_count.fetch_sub (1, memory_order_relaxed));
+ size_t td (s.dependents.fetch_sub (1, memory_order_release));
+ assert (td != 0 && gd != 0);
+
+ // Execute unless already executed.
+ //
+ if (s.task_count.load (memory_order_relaxed) !=
+ base + target::offset_executed)
+ pretend_execute (t, pretend_execute);
+ };
+
+ target::opstate& s (t.state[ia]);
+
+ if (s.state != target_state::unchanged) // Noop recipe.
+ {
+ if (s.recipe_group_action)
+ {
+ execute_impl (const_cast<target&> (*t.group));
+ }
+ else
+ {
+ // @@ Special hack for adhoc_buildscript_rule (remember to drop
+ // include above if getting rid of).
+ //
+ bool adhoc (
+ ia == perform_update_id &&
+ s.rule != nullptr &&
+ dynamic_cast<const adhoc_buildscript_rule*> (
+ &s.rule->second.get ()) != nullptr);
+
+ for (const prerequisite_target& p: t.prerequisite_targets[ia])
+ {
+ const target* pt;
+
+ if (adhoc)
+ pt = (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
+ nullptr);
+ else
+ pt = p.target;
+
+ if (pt != nullptr)
+ execute_impl (const_cast<target&> (*pt));
+ }
+
+ ctx.target_count.fetch_sub (1, memory_order_relaxed);
+ if (s.resolve_counted)
+ {
+ s.resolve_counted = false;
+ ctx.resolve_count.fetch_sub (1, memory_order_relaxed);
+ }
+ }
+
+ s.state = target_state::changed;
+ }
+
+ s.task_count.store (base + target::offset_executed,
+ memory_order_relaxed);
+ };
+#endif
+
+ for (const auto& pt: ctx.targets)
+ {
+ target& t (*pt);
+ target::opstate& s (t.state[ia]);
+
+ // We are only interested in the targets that have been matched for
+ // this operation and are in the applied state.
+ //
+ if (s.task_count.load (memory_order_relaxed) !=
+ base + target::offset_applied)
+ continue;
+
+ if (s.resolve_counted)
+ {
+#if 0
+ pretend_execute (t, pretend_execute);
+
+ if (ctx.resolve_count.load (memory_order_relaxed) == 0)
+ break;
+#else
+ return; // Skip all the below checks.
+#endif
+ }
+ }
+ }
+
+ // We should have executed every target that we have matched, provided we
// haven't failed (in which case we could have bailed out early).
//
assert (ctx.target_count.load (memory_order_relaxed) == 0);
+ assert (ctx.resolve_count.load (memory_order_relaxed) == 0); // Sanity check.
-#ifndef NDEBUG
if (ctx.dependency_count.load (memory_order_relaxed) != 0)
{
+ auto dependents = [base] (action a, const target& t)
+ {
+ const target::opstate& s (t.state[a]);
+
+ // Only consider targets that have been matched for this operation
+ // (since matching is what causes the dependents count reset).
+ //
+ size_t c (s.task_count.load (memory_order_relaxed));
+
+ return (c >= base + target::offset_applied
+ ? s.dependents.load (memory_order_relaxed)
+ : 0);
+ };
+
diag_record dr;
dr << info << "detected unexecuted matched targets:";
for (const auto& pt: ctx.targets)
{
const target& t (*pt);
- if (size_t n = t[a].dependents.load (memory_order_relaxed))
+
+ if (size_t n = dependents (a, t))
dr << text << t << ' ' << n;
+
+ if (a.outer ())
+ {
+ if (size_t n = dependents (a.inner_action (), t))
+ dr << text << t << ' ' << n;
+ }
}
}
-#endif
+
assert (ctx.dependency_count.load (memory_order_relaxed) == 0);
+#endif
}
const meta_operation_info mo_perform {
@@ -470,10 +988,10 @@ namespace build2
true, // bootstrap_outer
nullptr, // meta-operation pre
nullptr, // operation pre
- &load,
- &search,
- &match,
- &execute,
+ &perform_load,
+ &perform_search,
+ &perform_match,
+ &perform_execute,
nullptr, // operation post
nullptr, // meta-operation post
nullptr // include
@@ -481,8 +999,73 @@ namespace build2
// info
//
+
+ // Note: similar approach to forward() in configure.
+ //
+ struct info_params
+ {
+ bool json = false;
+ bool subprojects = true;
+ };
+
+ // Note: should not fail if mo is NULL (see info_subprojects() below).
+ //
+ static info_params
+ info_parse_params (const values& params,
+ const char* mo = nullptr,
+ const location& l = location ())
+ {
+ info_params r;
+
+ if (params.size () == 1)
+ {
+ for (const name& n: cast<names> (params[0]))
+ {
+ if (n.simple ())
+ {
+ if (n.value == "json")
+ {
+ r.json = true;
+ continue;
+ }
+
+ if (n.value == "no_subprojects")
+ {
+ r.subprojects = false;
+ continue;
+ }
+
+ // Fall through.
+ }
+
+ if (mo != nullptr)
+ fail (l) << "unexpected parameter '" << n << "' for "
+ << "meta-operation " << mo;
+ }
+ }
+ else if (!params.empty ())
+ {
+ if (mo != nullptr)
+ fail (l) << "unexpected parameters for meta-operation " << mo;
+ }
+
+ return r;
+ }
+
+ bool
+ info_subprojects (const values& params)
+ {
+ return info_parse_params (params).subprojects;
+ }
+
+ static void
+ info_pre (context&, const values& params, const location& l)
+ {
+ info_parse_params (params, "info", l); // Validate.
+ }
+
static operation_id
- info_operation_pre (const values&, operation_id o)
+ info_operation_pre (context&, const values&, operation_id o)
{
if (o != default_id)
fail << "explicit operation specified for meta-operation info";
@@ -531,7 +1114,7 @@ namespace build2
}
static void
- info_execute (const values&, action, action_targets& ts, uint16_t, bool)
+ info_execute_lines (action_targets& ts, bool subp)
{
for (size_t i (0); i != ts.size (); ++i)
{
@@ -560,22 +1143,224 @@ namespace build2
}
};
+ // Print bootstrapped modules.
+ //
+ auto print_mods = [&rs] ()
+ {
+ for (const module_state& ms: rs.root_extra->loaded_modules)
+ cout << ' ' << ms.name;
+ };
+
+ // Print a potentially empty/null instance.
+ //
+ auto print_empty = [] (const auto& x)
+ {
+ if (!x.empty ())
+ cout << ' ' << x;
+ };
+
+ auto print_null = [] (const auto* p)
+ {
+ if (p != nullptr && !p->empty ())
+ cout << ' ' << *p;
+ };
+
+ // Print a potentially null/empty directory path without trailing slash.
+ //
+ auto print_dir = [] (const dir_path& d)
+ {
+ if (!d.empty ())
+ cout << ' ' << d.string ();
+ };
+
+ auto print_pdir = [&print_dir] (const dir_path* d)
+ {
+ if (d != nullptr)
+ print_dir (*d);
+ };
+
// This could be a simple project that doesn't set project name.
//
cout
- << "project: " << project (rs) << endl
- << "version: " << cast_empty<string> (rs[ctx.var_version]) << endl
- << "summary: " << cast_empty<string> (rs[ctx.var_project_summary]) << endl
- << "url: " << cast_empty<string> (rs[ctx.var_project_url]) << endl
- << "src_root: " << cast<dir_path> (rs[ctx.var_src_root]) << endl
- << "out_root: " << cast<dir_path> (rs[ctx.var_out_root]) << endl
- << "amalgamation: " << (*rs.root_extra->amalgamation != nullptr ? **rs.root_extra->amalgamation : empty_dir_path) << endl
- << "subprojects: " << (*rs.root_extra->subprojects != nullptr ? **rs.root_extra->subprojects : subprojects ()) << endl
- << "operations:"; print_ops (rs.root_extra->operations, ctx.operation_table); cout << endl
- << "meta-operations:"; print_ops (rs.root_extra->meta_operations, ctx.meta_operation_table); cout << endl;
+ << "project:" ; print_empty (project (rs)); cout << endl
+ << "version:" ; print_empty (cast_empty<string> (rs[ctx.var_version])); cout << endl
+ << "summary:" ; print_empty (cast_empty<string> (rs[ctx.var_project_summary])); cout << endl
+ << "url:" ; print_empty (cast_empty<string> (rs[ctx.var_project_url])); cout << endl
+ << "src_root:" ; print_dir (cast<dir_path> (rs[ctx.var_src_root])); cout << endl
+ << "out_root:" ; print_dir (cast<dir_path> (rs[ctx.var_out_root])); cout << endl
+ << "amalgamation:" ; print_pdir (*rs.root_extra->amalgamation); cout << endl;
+ if (subp)
+ {
+ cout
+ << "subprojects:" ; print_null (*rs.root_extra->subprojects); cout << endl;
+ }
+ cout
+ << "operations:" ; print_ops (rs.root_extra->operations, ctx.operation_table); cout << endl
+ << "meta-operations:"; print_ops (rs.root_extra->meta_operations, ctx.meta_operation_table); cout << endl
+ << "modules:" ; print_mods (); cout << endl;
}
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ info_execute_json (action_targets& ts, bool subp)
+ {
+ json::stream_serializer s (cout);
+ s.begin_array ();
+
+ for (size_t i (0); i != ts.size (); ++i)
+ {
+ const scope& rs (ts[i].as<scope> ());
+
+ context& ctx (rs.ctx);
+
+ s.begin_object ();
+
+ // Print a potentially empty string.
+ //
+ auto print_string = [&s] (const char* n,
+ const string& v,
+ bool check = false)
+ {
+ if (!v.empty ())
+ s.member (n, v, check);
+ };
+
+ // Print a potentially null/empty directory path without trailing slash.
+ //
+ auto print_dir = [&s] (const char* n, const dir_path& v)
+ {
+ if (!v.empty ())
+ s.member (n, v.string ());
+ };
+
+ auto print_pdir = [&print_dir] (const char* n, const dir_path* v)
+ {
+ if (v != nullptr)
+ print_dir (n, *v);
+ };
+
+ // Print [meta_]operation names (see info_lines() for details).
+ //
+ auto print_ops = [&s] (const char* name,
+ const auto& ov,
+ const auto& ot,
+ const auto& printer)
+ {
+ s.member_name (name, false /* check */);
+
+ s.begin_array ();
+
+ for (uint8_t id (2); id < ov.size (); ++id)
+ {
+ if (ov[id] != nullptr)
+ printer (ot[id]);
+ }
+
+ s.end_array ();
+ };
+
+ // Note that we won't check some values for being valid UTF-8, since
+ // their characters belong to even stricter character sets and/or are
+ // read from buildfile which is already verified to be valid UTF-8.
+ //
+ print_string ("project", project (rs).string ());
+ print_string ("version", cast_empty<string> (rs[ctx.var_version]));
+ print_string ("summary", cast_empty<string> (rs[ctx.var_project_summary]));
+ print_string ("url", cast_empty<string> (rs[ctx.var_project_url]));
+ print_dir ("src_root", cast<dir_path> (rs[ctx.var_src_root]));
+ print_dir ("out_root", cast<dir_path> (rs[ctx.var_out_root]));
+ print_pdir ("amalgamation", *rs.root_extra->amalgamation);
+
+ // Print subprojects.
+ //
+ if (subp)
+ {
+ const subprojects* sps (*rs.root_extra->subprojects);
+
+ if (sps != nullptr && !sps->empty ())
+ {
+ s.member_name ("subprojects", false /* check */);
+ s.begin_array ();
+
+ for (const auto& sp: *sps)
+ {
+ s.begin_object ();
+
+ print_dir ("path", sp.second);
+
+ // See find_subprojects() for details.
+ //
+ const string& n (sp.first.string ());
+
+ if (!path::traits_type::is_separator (n.back ()))
+ print_string ("name", n);
+
+ s.end_object ();
+ }
+
+ s.end_array ();
+ }
+ }
+
+ print_ops ("operations",
+ rs.root_extra->operations,
+ ctx.operation_table,
+ [&s] (const string& v) {s.value (v, false /* check */);});
+
+ print_ops ("meta-operations",
+ rs.root_extra->meta_operations,
+ ctx.meta_operation_table,
+ [&s] (const meta_operation_data& v)
+ {
+ s.value (v.name, false /* check */);
+ });
+
+ // Print modules.
+ //
+ if (!rs.root_extra->loaded_modules.empty ())
+ {
+ s.member_name ("modules", false /* check */);
+ s.begin_array ();
+
+ for (const module_state& ms: rs.root_extra->loaded_modules)
+ s.value (ms.name, false /* check */);
+
+ s.end_array ();
+ }
+
+ s.end_object ();
+ }
+
+ s.end_array ();
+ cout << endl;
+ }
+#else
+ static void
+ info_execute_json (action_targets&, bool)
+ {
+ }
+#endif //BUILD2_BOOTSTRAP
+
+ static void
+ info_execute (const values& params,
+ action,
+ action_targets& ts,
+ uint16_t,
+ bool)
+ {
+ info_params ip (info_parse_params (params));
+
+ // Note that both outputs will not be "ideal" if the user does something
+ // like `b info(foo/) info(bar/)` instead of `b info(foo/ bar/)`. Oh,
+ // well.
+ //
+ if (ip.json)
+ info_execute_json (ts, ip.subprojects);
+ else
+ info_execute_lines (ts, ip.subprojects);
+ }
+
const meta_operation_info mo_info {
info_id,
"info",
@@ -583,8 +1368,8 @@ namespace build2
"",
"",
"",
- false, // bootstrap_outer
- nullptr, // meta-operation pre
+ false, // bootstrap_outer
+ &info_pre, // meta-operation pre
&info_operation_pre,
&info_load,
&info_search,
@@ -610,6 +1395,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
@@ -636,6 +1423,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
@@ -652,6 +1441,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
}