aboutsummaryrefslogtreecommitdiff
path: root/libbuild2/context.cxx
diff options
context:
space:
mode:
authorBoris Kolpackov <boris@codesynthesis.com>2019-08-22 14:38:57 +0200
committerBoris Kolpackov <boris@codesynthesis.com>2019-08-22 16:16:32 +0200
commit4f5b6cb7ed4e05e98cce7e692462f49e24b7a39a (patch)
tree4184fa33e116ec74747feec0c15e30219c7d087b /libbuild2/context.cxx
parent739f68b9e45c925ccc5a28b9b796030272575e2b (diff)
Targets, scopes, vars
Diffstat (limited to 'libbuild2/context.cxx')
-rw-r--r--libbuild2/context.cxx785
1 files changed, 397 insertions, 388 deletions
diff --git a/libbuild2/context.cxx b/libbuild2/context.cxx
index 1cc8bbc..720e8d8 100644
--- a/libbuild2/context.cxx
+++ b/libbuild2/context.cxx
@@ -10,6 +10,8 @@
#include <libbuild2/rule.hxx>
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
+#include <libbuild2/variable.hxx>
+#include <libbuild2/operation.hxx>
#include <libbuild2/diagnostics.hxx>
#include <libbutl/ft/exception.hxx> // uncaught_exceptions
@@ -25,379 +27,48 @@ using namespace butl;
namespace build2
{
- scheduler sched;
-
- run_phase phase;
- run_phase_mutex phase_mutex;
-
- size_t load_generation;
-
- bool run_phase_mutex::
- lock (run_phase p)
- {
- bool r;
-
- {
- mlock l (m_);
- bool u (lc_ == 0 && mc_ == 0 && ec_ == 0); // Unlocked.
-
- // Increment the counter.
- //
- condition_variable* v (nullptr);
- switch (p)
- {
- case run_phase::load: lc_++; v = &lv_; break;
- case run_phase::match: mc_++; v = &mv_; break;
- case run_phase::execute: ec_++; v = &ev_; break;
- }
-
- // If unlocked, switch directly to the new phase. Otherwise wait for the
- // phase switch. Note that in the unlocked case we don't need to notify
- // since there is nobody waiting (all counters are zero).
- //
- if (u)
- {
- phase = p;
- r = !fail_;
- }
- else if (phase != p)
- {
- sched.deactivate (false /* external */);
- for (; phase != p; v->wait (l)) ;
- r = !fail_;
- l.unlock (); // Important: activate() can block.
- sched.activate (false /* external */);
- }
- else
- r = !fail_;
- }
-
- // In case of load, acquire the exclusive access mutex.
- //
- if (p == run_phase::load)
- {
- lm_.lock ();
- r = !fail_; // Re-query.
- }
-
- return r;
- }
-
- void run_phase_mutex::
- unlock (run_phase p)
- {
- // In case of load, release the exclusive access mutex.
- //
- if (p == run_phase::load)
- lm_.unlock ();
-
- {
- mlock l (m_);
-
- // Decrement the counter and see if this phase has become unlocked.
- //
- bool u (false);
- switch (p)
- {
- case run_phase::load: u = (--lc_ == 0); break;
- case run_phase::match: u = (--mc_ == 0); break;
- case run_phase::execute: u = (--ec_ == 0); break;
- }
-
- // If the phase is unlocked, pick a new phase and notify the waiters.
- // Note that we notify all load waiters so that they can all serialize
- // behind the second-level mutex.
- //
- if (u)
- {
- condition_variable* v;
-
- if (lc_ != 0) {phase = run_phase::load; v = &lv_;}
- else if (mc_ != 0) {phase = run_phase::match; v = &mv_;}
- else if (ec_ != 0) {phase = run_phase::execute; v = &ev_;}
- else {phase = run_phase::load; v = nullptr;}
-
- if (v != nullptr)
- {
- l.unlock ();
- v->notify_all ();
- }
- }
- }
- }
-
- bool run_phase_mutex::
- relock (run_phase o, run_phase n)
- {
- // Pretty much a fused unlock/lock implementation except that we always
- // switch into the new phase.
- //
- assert (o != n);
-
- bool r;
-
- if (o == run_phase::load)
- lm_.unlock ();
-
- {
- mlock l (m_);
- bool u (false);
-
- switch (o)
- {
- case run_phase::load: u = (--lc_ == 0); break;
- case run_phase::match: u = (--mc_ == 0); break;
- case run_phase::execute: u = (--ec_ == 0); break;
- }
-
- // Set if will be waiting or notifying others.
- //
- condition_variable* v (nullptr);
- switch (n)
- {
- case run_phase::load: v = lc_++ != 0 || !u ? &lv_ : nullptr; break;
- case run_phase::match: v = mc_++ != 0 || !u ? &mv_ : nullptr; break;
- case run_phase::execute: v = ec_++ != 0 || !u ? &ev_ : nullptr; break;
- }
-
- if (u)
- {
- phase = n;
- r = !fail_;
-
- // Notify others that could be waiting for this phase.
- //
- if (v != nullptr)
- {
- l.unlock ();
- v->notify_all ();
- }
- }
- else // phase != n
- {
- sched.deactivate (false /* external */);
- for (; phase != n; v->wait (l)) ;
- r = !fail_;
- l.unlock (); // Important: activate() can block.
- sched.activate (false /* external */);
- }
- }
-
- if (n == run_phase::load)
- {
- lm_.lock ();
- r = !fail_; // Re-query.
- }
-
- return r;
- }
-
- // C++17 deprecated uncaught_exception() so use uncaught_exceptions() if
- // available.
+ // Create global scope. Note that the empty path is a prefix for any other
+ // path. See the comment in <libbutl/prefix-map.mxx> for details.
//
- static inline bool
- uncaught_exception ()
+ static inline scope&
+ create_global_scope (scope_map& m)
{
-#ifdef __cpp_lib_uncaught_exceptions
- return std::uncaught_exceptions () != 0;
-#else
- return std::uncaught_exception ();
-#endif
- }
-
- // phase_lock
- //
- static
-#ifdef __cpp_thread_local
- thread_local
-#else
- __thread
-#endif
- phase_lock* phase_lock_instance;
-
- phase_lock::
- phase_lock (run_phase p)
- : p (p)
- {
- if (phase_lock* l = phase_lock_instance)
- assert (l->p == p);
- else
- {
- if (!phase_mutex.lock (p))
- {
- phase_mutex.unlock (p);
- throw failed ();
- }
-
- phase_lock_instance = this;
-
- //text << this_thread::get_id () << " phase acquire " << p;
- }
- }
-
- phase_lock::
- ~phase_lock ()
- {
- if (phase_lock_instance == this)
- {
- phase_lock_instance = nullptr;
- phase_mutex.unlock (p);
-
- //text << this_thread::get_id () << " phase release " << p;
- }
- }
-
- // phase_unlock
- //
- phase_unlock::
- phase_unlock (bool u)
- : l (u ? phase_lock_instance : nullptr)
- {
- if (u)
- {
- phase_lock_instance = nullptr;
- phase_mutex.unlock (l->p);
-
- //text << this_thread::get_id () << " phase unlock " << l->p;
- }
- }
-
- phase_unlock::
- ~phase_unlock () noexcept (false)
- {
- if (l != nullptr)
- {
- bool r (phase_mutex.lock (l->p));
- phase_lock_instance = l;
-
- // Fail unless we are already failing. Note that we keep the phase
- // locked since there will be phase_lock down the stack to unlock it.
- //
- if (!r && !uncaught_exception ())
- throw failed ();
-
- //text << this_thread::get_id () << " phase lock " << l->p;
- }
- }
-
- // phase_switch
- //
- phase_switch::
- phase_switch (run_phase n)
- : o (phase), n (n)
- {
- if (!phase_mutex.relock (o, n))
- {
- phase_mutex.relock (n, o);
- throw failed ();
- }
-
- phase_lock_instance->p = n;
-
- if (n == run_phase::load) // Note: load lock is exclusive.
- load_generation++;
-
- //text << this_thread::get_id () << " phase switch " << o << " " << n;
- }
-
- phase_switch::
- ~phase_switch () noexcept (false)
- {
- // If we are coming off a failed load phase, mark the phase_mutex as
- // failed to terminate all other threads since the build state may no
- // longer be valid.
- //
- if (n == run_phase::load && uncaught_exception ())
- {
- mlock l (phase_mutex.m_);
- phase_mutex.fail_ = true;
- }
-
- bool r (phase_mutex.relock (n, o));
- phase_lock_instance->p = o;
-
- // Similar logic to ~phase_unlock().
- //
- if (!r && !uncaught_exception ())
- throw failed ();
-
- //text << this_thread::get_id () << " phase restore " << n << " " << o;
- }
-
- string current_mname;
- string current_oname;
-
- const meta_operation_info* current_mif;
- const operation_info* current_inner_oif;
- const operation_info* current_outer_oif;
- size_t current_on;
- execution_mode current_mode;
- bool current_diag_noise;
-
- atomic_count dependency_count;
- atomic_count target_count;
- atomic_count skip_count;
-
- bool keep_going = false;
- bool dry_run = false;
-
- void
- set_current_mif (const meta_operation_info& mif)
- {
- if (current_mname != mif.name)
- {
- current_mname = mif.name;
- global_scope->rw ().assign (var_build_meta_operation) = mif.name;
- }
-
- current_mif = &mif;
- current_on = 0; // Reset.
- }
+ auto i (m.insert (dir_path ()));
+ scope& r (i->second);
+ r.out_path_ = &i->first;
+ return r;
+ };
- void
- set_current_oif (const operation_info& inner_oif,
- const operation_info* outer_oif,
- bool diag_noise)
+ struct context::data
{
- current_oname = (outer_oif == nullptr ? inner_oif : *outer_oif).name;
- current_inner_oif = &inner_oif;
- current_outer_oif = outer_oif;
- current_on++;
- current_mode = inner_oif.mode;
- current_diag_noise = diag_noise;
-
- // Reset counters (serial execution).
- //
- dependency_count.store (0, memory_order_relaxed);
- target_count.store (0, memory_order_relaxed);
- skip_count.store (0, memory_order_relaxed);
- }
-
- variable_overrides
- reset (const strings& cmd_vars)
+ scope_map scopes;
+ target_set targets;
+ variable_pool var_pool;
+ variable_overrides var_overrides;
+
+ data (context& c): scopes (c), targets (c), var_pool (true /* global */) {}
+ };
+
+ context::
+ context (scheduler& s, const strings& cmd_vars)
+ : data_ (new data (*this)),
+ sched (s),
+ scopes (data_->scopes),
+ global_scope (create_global_scope (data_->scopes)),
+ targets (data_->targets),
+ var_pool (data_->var_pool),
+ var_overrides (data_->var_overrides)
{
- tracer trace ("reset");
-
- // @@ Do we want to unload dynamically loaded modules? Note that this will
- // be purely an optimization since a module could be linked-in (i.e., a
- // module cannot expect to be unloaded/re-initialized for each meta-
- // operation).
+ tracer trace ("context");
- l6 ([&]{trace << "resetting build state";});
+ l6 ([&]{trace << "initializing build state";});
- auto& vp (variable_pool::instance);
- auto& sm (scope_map::instance);
+ scope_map& sm (data_->scopes);
+ variable_pool& vp (data_->var_pool);
- variable_overrides vos;
-
- targets.clear ();
- sm.clear ();
- vp.clear ();
-
- // Reset meta/operation tables. Note that the order should match the id
- // constants in <libbuild2/operation.hxx>.
+ // Initialize the meta/operation tables. Note that the order should match
+ // the id constants in <libbuild2/operation.hxx>.
//
- meta_operation_table.clear ();
meta_operation_table.insert ("noop");
meta_operation_table.insert ("perform");
meta_operation_table.insert ("configure");
@@ -420,23 +91,10 @@ namespace build2
operation_table.insert ("uninstall");
operation_table.insert ("update-for-install");
- // Create global scope. Note that the empty path is a prefix for any other
- // path. See the comment in <libbutl/prefix-map.mxx> for details.
- //
- auto make_global_scope = [] () -> scope&
- {
- auto i (scope_map::instance.insert (dir_path ()));
- scope& r (i->second);
- r.out_path_ = &i->first;
- global_scope = scope::global_ = &r;
- return r;
- };
-
- scope& gs (make_global_scope ());
-
// Setup the global scope before parsing any variable overrides since they
// may reference these things.
//
+ scope& gs (global_scope.rw ());
gs.assign<dir_path> ("build.work") = work;
gs.assign<dir_path> ("build.home") = home;
@@ -458,10 +116,10 @@ namespace build2
{
const standard_version& v (build_version);
- auto set = [&gs] (const char* var, auto val)
+ auto set = [&gs, &vp] (const char* var, auto val)
{
using T = decltype (val);
- gs.assign (variable_pool::instance.insert<T> (var)) = move (val);
+ gs.assign (vp.insert<T> (var)) = move (val);
};
// Note: here we assume epoch will always be 1 and therefore omit the
@@ -509,7 +167,7 @@ namespace build2
// were built with. While it is not as precise (for example, a binary
// built for i686 might be running on x86_64), it is good enough of an
// approximation/fallback since most of the time we are interested in just
- // the target class (e.g., linux, windows, macosx).
+ // the target class (e.g., linux, windows, macos).
//
{
// Did the user ask us to use config.guess?
@@ -739,7 +397,7 @@ namespace build2
// things simple. Pass original variable for diagnostics. Use current
// working directory as pattern base.
//
- parser p;
+ parser p (*this);
pair<value, token> r (p.parse_variable_value (l, gs, &work, var));
if (r.second.type != token_type::eos)
@@ -752,8 +410,7 @@ namespace build2
fail << "typed override of variable " << n;
// Global and absolute scope overrides we can enter directly. Project
- // and relative scope ones will be entered by the caller for each
- // amalgamation/project.
+ // and relative scope ones will be entered later for each project.
//
if (c == '!' || (dir && dir->absolute ()))
{
@@ -766,7 +423,7 @@ namespace build2
v = move (r.first);
}
else
- vos.push_back (
+ data_->var_overrides.push_back (
variable_override {var, *o, move (dir), move (r.first)});
}
@@ -784,7 +441,7 @@ namespace build2
vp.insert_pattern<path> (
"config.import.**", true, variable_visibility::normal, true);
- // module.cxx:load_module().
+ // module.cxx:boot/init_module().
//
{
auto v_p (variable_visibility::project);
@@ -847,13 +504,365 @@ namespace build2
r.insert<mtime_target> (perform_update_id, "file", file_rule::instance);
r.insert<mtime_target> (perform_clean_id, "file", file_rule::instance);
}
+ }
+
+ context::
+ ~context ()
+ {
+ // Cannot be inline since context::data is undefined.
+ }
+
+ void context::
+ current_mif (const meta_operation_info& mif)
+ {
+ if (current_mname != mif.name)
+ {
+ current_mname = mif.name;
+ global_scope.rw ().assign (var_build_meta_operation) = mif.name;
+ }
+
+ build2::current_mif = &mif;
+ current_on = 0; // Reset.
+ }
+
+ void context::
+ current_oif (const operation_info& inner_oif,
+ const operation_info* outer_oif,
+ bool diag_noise)
+ {
+ current_oname = (outer_oif == nullptr ? inner_oif : *outer_oif).name;
+ current_inner_oif = &inner_oif;
+ current_outer_oif = outer_oif;
+ current_on++;
+ current_mode = inner_oif.mode;
+ current_diag_noise = diag_noise;
+
+ // Reset counters (serial execution).
+ //
+ dependency_count.store (0, memory_order_relaxed);
+ target_count.store (0, memory_order_relaxed);
+ skip_count.store (0, memory_order_relaxed);
+ }
+
+ scheduler sched;
+
+ run_phase phase;
+ run_phase_mutex phase_mutex;
+
+ size_t load_generation;
+
+ bool run_phase_mutex::
+ lock (run_phase p)
+ {
+ bool r;
+
+ {
+ mlock l (m_);
+ bool u (lc_ == 0 && mc_ == 0 && ec_ == 0); // Unlocked.
+
+ // Increment the counter.
+ //
+ condition_variable* v (nullptr);
+ switch (p)
+ {
+ case run_phase::load: lc_++; v = &lv_; break;
+ case run_phase::match: mc_++; v = &mv_; break;
+ case run_phase::execute: ec_++; v = &ev_; break;
+ }
+
+ // If unlocked, switch directly to the new phase. Otherwise wait for the
+ // phase switch. Note that in the unlocked case we don't need to notify
+ // since there is nobody waiting (all counters are zero).
+ //
+ if (u)
+ {
+ phase = p;
+ r = !fail_;
+ }
+ else if (phase != p)
+ {
+ sched.deactivate (false /* external */);
+ for (; phase != p; v->wait (l)) ;
+ r = !fail_;
+ l.unlock (); // Important: activate() can block.
+ sched.activate (false /* external */);
+ }
+ else
+ r = !fail_;
+ }
+
+ // In case of load, acquire the exclusive access mutex.
+ //
+ if (p == run_phase::load)
+ {
+ lm_.lock ();
+ r = !fail_; // Re-query.
+ }
+
+ return r;
+ }
+
+ void run_phase_mutex::
+ unlock (run_phase p)
+ {
+ // In case of load, release the exclusive access mutex.
+ //
+ if (p == run_phase::load)
+ lm_.unlock ();
+
+ {
+ mlock l (m_);
+
+ // Decrement the counter and see if this phase has become unlocked.
+ //
+ bool u (false);
+ switch (p)
+ {
+ case run_phase::load: u = (--lc_ == 0); break;
+ case run_phase::match: u = (--mc_ == 0); break;
+ case run_phase::execute: u = (--ec_ == 0); break;
+ }
- return vos;
+ // If the phase is unlocked, pick a new phase and notify the waiters.
+ // Note that we notify all load waiters so that they can all serialize
+ // behind the second-level mutex.
+ //
+ if (u)
+ {
+ condition_variable* v;
+
+ if (lc_ != 0) {phase = run_phase::load; v = &lv_;}
+ else if (mc_ != 0) {phase = run_phase::match; v = &mv_;}
+ else if (ec_ != 0) {phase = run_phase::execute; v = &ev_;}
+ else {phase = run_phase::load; v = nullptr;}
+
+ if (v != nullptr)
+ {
+ l.unlock ();
+ v->notify_all ();
+ }
+ }
+ }
}
+ bool run_phase_mutex::
+ relock (run_phase o, run_phase n)
+ {
+ // Pretty much a fused unlock/lock implementation except that we always
+ // switch into the new phase.
+ //
+ assert (o != n);
+
+ bool r;
+
+ if (o == run_phase::load)
+ lm_.unlock ();
+
+ {
+ mlock l (m_);
+ bool u (false);
+
+ switch (o)
+ {
+ case run_phase::load: u = (--lc_ == 0); break;
+ case run_phase::match: u = (--mc_ == 0); break;
+ case run_phase::execute: u = (--ec_ == 0); break;
+ }
+
+ // Set if will be waiting or notifying others.
+ //
+ condition_variable* v (nullptr);
+ switch (n)
+ {
+ case run_phase::load: v = lc_++ != 0 || !u ? &lv_ : nullptr; break;
+ case run_phase::match: v = mc_++ != 0 || !u ? &mv_ : nullptr; break;
+ case run_phase::execute: v = ec_++ != 0 || !u ? &ev_ : nullptr; break;
+ }
+
+ if (u)
+ {
+ phase = n;
+ r = !fail_;
+
+ // Notify others that could be waiting for this phase.
+ //
+ if (v != nullptr)
+ {
+ l.unlock ();
+ v->notify_all ();
+ }
+ }
+ else // phase != n
+ {
+ sched.deactivate (false /* external */);
+ for (; phase != n; v->wait (l)) ;
+ r = !fail_;
+ l.unlock (); // Important: activate() can block.
+ sched.activate (false /* external */);
+ }
+ }
+
+ if (n == run_phase::load)
+ {
+ lm_.lock ();
+ r = !fail_; // Re-query.
+ }
+
+ return r;
+ }
+
+ // C++17 deprecated uncaught_exception() so use uncaught_exceptions() if
+ // available.
+ //
+ static inline bool
+ uncaught_exception ()
+ {
+#ifdef __cpp_lib_uncaught_exceptions
+ return std::uncaught_exceptions () != 0;
+#else
+ return std::uncaught_exception ();
+#endif
+ }
+
+ // phase_lock
+ //
+ static
+#ifdef __cpp_thread_local
+ thread_local
+#else
+ __thread
+#endif
+ phase_lock* phase_lock_instance;
+
+ phase_lock::
+ phase_lock (run_phase p)
+ : p (p)
+ {
+ if (phase_lock* l = phase_lock_instance)
+ assert (l->p == p);
+ else
+ {
+ if (!phase_mutex.lock (p))
+ {
+ phase_mutex.unlock (p);
+ throw failed ();
+ }
+
+ phase_lock_instance = this;
+
+ //text << this_thread::get_id () << " phase acquire " << p;
+ }
+ }
+
+ phase_lock::
+ ~phase_lock ()
+ {
+ if (phase_lock_instance == this)
+ {
+ phase_lock_instance = nullptr;
+ phase_mutex.unlock (p);
+
+ //text << this_thread::get_id () << " phase release " << p;
+ }
+ }
+
+ // phase_unlock
+ //
+ phase_unlock::
+ phase_unlock (bool u)
+ : l (u ? phase_lock_instance : nullptr)
+ {
+ if (u)
+ {
+ phase_lock_instance = nullptr;
+ phase_mutex.unlock (l->p);
+
+ //text << this_thread::get_id () << " phase unlock " << l->p;
+ }
+ }
+
+ phase_unlock::
+ ~phase_unlock () noexcept (false)
+ {
+ if (l != nullptr)
+ {
+ bool r (phase_mutex.lock (l->p));
+ phase_lock_instance = l;
+
+ // Fail unless we are already failing. Note that we keep the phase
+ // locked since there will be phase_lock down the stack to unlock it.
+ //
+ if (!r && !uncaught_exception ())
+ throw failed ();
+
+ //text << this_thread::get_id () << " phase lock " << l->p;
+ }
+ }
+
+ // phase_switch
+ //
+ phase_switch::
+ phase_switch (run_phase n)
+ : o (phase), n (n)
+ {
+ if (!phase_mutex.relock (o, n))
+ {
+ phase_mutex.relock (n, o);
+ throw failed ();
+ }
+
+ phase_lock_instance->p = n;
+
+ if (n == run_phase::load) // Note: load lock is exclusive.
+ load_generation++;
+
+ //text << this_thread::get_id () << " phase switch " << o << " " << n;
+ }
+
+ phase_switch::
+ ~phase_switch () noexcept (false)
+ {
+ // If we are coming off a failed load phase, mark the phase_mutex as
+ // failed to terminate all other threads since the build state may no
+ // longer be valid.
+ //
+ if (n == run_phase::load && uncaught_exception ())
+ {
+ mlock l (phase_mutex.m_);
+ phase_mutex.fail_ = true;
+ }
+
+ bool r (phase_mutex.relock (n, o));
+ phase_lock_instance->p = o;
+
+ // Similar logic to ~phase_unlock().
+ //
+ if (!r && !uncaught_exception ())
+ throw failed ();
+
+ //text << this_thread::get_id () << " phase restore " << n << " " << o;
+ }
+
+ string current_mname;
+ string current_oname;
+
+ const meta_operation_info* current_mif;
+ const operation_info* current_inner_oif;
+ const operation_info* current_outer_oif;
+ size_t current_on;
+ execution_mode current_mode;
+ bool current_diag_noise;
+
+ atomic_count dependency_count;
+ atomic_count target_count;
+ atomic_count skip_count;
+
+ bool keep_going = false;
+ bool dry_run = false;
+
void (*config_save_variable) (scope&, const variable&, uint64_t);
- const string& (*config_preprocess_create) (const variable_overrides&,
+ const string& (*config_preprocess_create) (context&,
values&,
vector_view<opspec>&,
bool,