aboutsummaryrefslogtreecommitdiff
path: root/libbuild2/context.cxx
diff options
context:
space:
mode:
Diffstat (limited to 'libbuild2/context.cxx')
-rw-r--r--libbuild2/context.cxx166
1 files changed, 152 insertions, 14 deletions
diff --git a/libbuild2/context.cxx b/libbuild2/context.cxx
index c016a40..7b465b4 100644
--- a/libbuild2/context.cxx
+++ b/libbuild2/context.cxx
@@ -141,6 +141,18 @@ namespace build2
v = move (val);
};
+ // Build system mode.
+ //
+ // This value signals any special mode the build system may be running
+ // in. The two core modes are `no-external-modules` (bootstrapping of
+ // external modules is disabled) and `normal` (normal build system
+ // execution). Build system drivers may invent additional modes (for
+ // example, the bpkg `skeleton` mode that is used to evaluate depends
+ // clauses).
+ //
+ set ("build.mode",
+ no_external_modules ? "no-external-modules" : "normal");
+
set ("build.work", work);
set ("build.home", home);
@@ -574,9 +586,10 @@ namespace build2
var_export_metadata = &vp.insert ("export.metadata", v_t); // Untyped.
var_extension = &vp.insert<string> ("extension", v_t);
- var_clean = &vp.insert<bool> ("clean", v_t);
- var_backlink = &vp.insert<string> ("backlink", v_t);
- var_include = &vp.insert<string> ("include", v_q);
+ var_update = &vp.insert<string> ("update", v_q);
+ var_clean = &vp.insert<bool> ("clean", v_t);
+ var_backlink = &vp.insert<string> ("backlink", v_t);
+ var_include = &vp.insert<string> ("include", v_q);
// Backlink executables and (generated) documentation by default.
//
@@ -588,14 +601,13 @@ namespace build2
{
rule_map& r (gs.rules); // Note: global scope!
- //@@ outer
- r.insert<alias> (perform_id, 0, "alias", alias_rule::instance);
+ r.insert<alias> (perform_id, 0, "build.alias", alias_rule::instance);
- r.insert<fsdir> (perform_update_id, "fsdir", fsdir_rule::instance);
- r.insert<fsdir> (perform_clean_id, "fsdir", fsdir_rule::instance);
+ r.insert<fsdir> (perform_update_id, "build.fsdir", fsdir_rule::instance);
+ r.insert<fsdir> (perform_clean_id, "build.fsdir", fsdir_rule::instance);
- r.insert<mtime_target> (perform_update_id, "file", file_rule::instance);
- r.insert<mtime_target> (perform_clean_id, "file", file_rule::instance);
+ r.insert<mtime_target> (perform_update_id, "build.file", file_rule::instance);
+ r.insert<mtime_target> (perform_clean_id, "build.file", file_rule::instance);
}
}
@@ -606,6 +618,67 @@ namespace build2
}
void context::
+ enter_project_overrides (scope& rs,
+ const dir_path& out_base,
+ const variable_overrides& ovrs)
+ {
+ // The mildly tricky part here is to distinguish the situation where we
+ // are bootstrapping the same project multiple times. The first override
+ // that we set cannot already exist (because the override variable names
+ // are unique) so if it is already set, then it can only mean this project
+ // is already bootstrapped.
+ //
+ // This is further complicated by the project vs amalgamation logic (we
+ // may have already done the amalgamation but not the project). So we
+ // split it into two passes.
+ //
+ auto& sm (scopes.rw ());
+
+ for (const variable_override& o: ovrs)
+ {
+ if (o.ovr.visibility != variable_visibility::global)
+ continue;
+
+ // If we have a directory, enter the scope, similar to how we do
+ // it in the context ctor.
+ //
+ scope& s (
+ o.dir
+ ? *sm.insert_out ((out_base / *o.dir).normalize ())->second.front ()
+ : *rs.weak_scope ());
+
+ auto p (s.vars.insert (o.ovr));
+
+ if (!p.second)
+ break;
+
+ value& v (p.first);
+ v = o.val;
+ }
+
+ for (const variable_override& o: ovrs)
+ {
+ // Ours is either project (%foo) or scope (/foo).
+ //
+ if (o.ovr.visibility == variable_visibility::global)
+ continue;
+
+ scope& s (
+ o.dir
+ ? *sm.insert_out ((out_base / *o.dir).normalize ())->second.front ()
+ : rs);
+
+ auto p (s.vars.insert (o.ovr));
+
+ if (!p.second)
+ break;
+
+ value& v (p.first);
+ v = o.val;
+ }
+ }
+
+ void context::
current_meta_operation (const meta_operation_info& mif)
{
if (current_mname != mif.name)
@@ -623,13 +696,38 @@ namespace build2
const operation_info* outer_oif,
bool diag_noise)
{
- current_oname = (outer_oif == nullptr ? inner_oif : *outer_oif).name;
+ const auto& oif (outer_oif == nullptr ? inner_oif : *outer_oif);
+
+ current_oname = oif.name;
current_inner_oif = &inner_oif;
current_outer_oif = outer_oif;
current_on++;
current_mode = inner_oif.mode;
current_diag_noise = diag_noise;
+ auto find_ovar = [this] (const char* n)
+ {
+ const variable* v (var_pool.find (n));
+
+ // The operation variable should have prerequisite or target visibility.
+ //
+ assert (v != nullptr &&
+ (v->visibility == variable_visibility::prereq ||
+ v->visibility == variable_visibility::target));
+
+ return v;
+ };
+
+ current_inner_ovar =
+ inner_oif.var_name != nullptr
+ ? find_ovar (inner_oif.var_name)
+ : nullptr;
+
+ current_outer_ovar =
+ outer_oif != nullptr && outer_oif->var_name != nullptr
+ ? find_ovar (outer_oif->var_name)
+ : nullptr;
+
// Reset counters (serial execution).
//
dependency_count.store (0, memory_order_relaxed);
@@ -667,6 +765,8 @@ namespace build2
}
else if (ctx_.phase != n)
{
+ ++contention; // Protected by m_.
+
ctx_.sched.deactivate (false /* external */);
for (; ctx_.phase != n; v->wait (l)) ;
r = !fail_;
@@ -686,6 +786,8 @@ namespace build2
ctx_.sched.deactivate (false /* external */);
lm_.lock ();
ctx_.sched.activate (false /* external */);
+
+ ++contention_load; // Protected by lm_.
}
r = !fail_; // Re-query.
}
@@ -746,7 +848,7 @@ namespace build2
}
}
- bool run_phase_mutex::
+ optional<bool> run_phase_mutex::
relock (run_phase o, run_phase n)
{
// Pretty much a fused unlock/lock implementation except that we always
@@ -755,6 +857,7 @@ namespace build2
assert (o != n);
bool r;
+ bool s (true); // True switch.
if (o == run_phase::load)
lm_.unlock ();
@@ -803,6 +906,8 @@ namespace build2
}
else // phase != n
{
+ ++contention; // Protected by m_.
+
ctx_.sched.deactivate (false /* external */);
for (; ctx_.phase != n; v->wait (l)) ;
r = !fail_;
@@ -815,14 +920,23 @@ namespace build2
{
if (!lm_.try_lock ())
{
+ // If we failed to acquire the load mutex, then we know there is (or
+ // was) someone before us in the load phase. And it's impossible to
+ // switch to a different phase between our calls to try_lock() above
+ // and lock() below because of our +1 in lc_.
+ //
+ s = false;
+
ctx_.sched.deactivate (false /* external */);
lm_.lock ();
ctx_.sched.activate (false /* external */);
+
+ ++contention_load; // Protected by lm_.
}
r = !fail_; // Re-query.
}
- return r;
+ return r ? optional<bool> (s) : nullopt;
}
// C++17 deprecated uncaught_exception() so use uncaught_exceptions() if
@@ -937,7 +1051,8 @@ namespace build2
phase_lock* pl (phase_lock_instance);
assert (&pl->ctx == &ctx);
- if (!ctx.phase_mutex.relock (old_phase, new_phase))
+ optional<bool> r (ctx.phase_mutex.relock (old_phase, new_phase));
+ if (!r)
{
ctx.phase_mutex.relock (new_phase, old_phase);
throw failed ();
@@ -946,14 +1061,37 @@ namespace build2
pl->phase = new_phase;
if (new_phase == run_phase::load) // Note: load lock is exclusive.
+ {
ctx.load_generation++;
+ // Invalidate cached target base_scope values if we are switching from a
+ // non-load phase (we don't cache during load which means load->load
+ // switch doesn't have anything to invalidate).
+ //
+ // @@ This is still quite expensive on project like Boost with a large
+ // number of files (targets) and a large number of load phase
+ // switches (due to directory buildfiles).
+ //
+ // Thinking some more on this, we shouldn't need to do this since such
+ // loads can (or at least should) only perform "island appends" see
+ // comment on context::phase for details.
+ //
+#if 0
+ if (*r)
+ {
+ for (const unique_ptr<target>& t: ctx.targets)
+ t->base_scope_.store (nullptr, memory_order_relaxed);
+ }
+#endif
+ }
+
//text << this_thread::get_id () << " phase switch "
// << old_phase << " " << new_phase;
}
#if 0
- // NOTE: see push/pop_phase() logic if trying to enable this.
+ // NOTE: see push/pop_phase() logic if trying to enable this. Also
+ // the load stuff above.
//
phase_switch::
phase_switch (phase_unlock&& u, phase_lock&& l)