// file : build2/context -*- C++ -*- // copyright : Copyright (c) 2014-2017 Code Synthesis Ltd // license : MIT; see accompanying LICENSE file #ifndef BUILD2_CONTEXT #define BUILD2_CONTEXT #include #include #include #include #include namespace build2 { // In order to perform each operation the build system goes through the // following phases: // // load - load the buildfiles // search & match - search prerequisites and match rules // execute - execute the matched rule // // The phase can only be changed during serial or exclusive execution // (see below). // extern run_phase phase; // The build system model (internal state) is protected at the top level by // the model mutex. During serial execution the model mutex is unlocked. // extern shared_mutex model_mutex; // Parallel execution always starts with acquiring a shared model lock (by // creating model_slock; see below). Pointers to these locks are cached in // the model_lock TLS variable (which is NULL during serial execution). // // The build system starts with a "serial load" phase and then continues // with parallel search & match and execute. Search & match, however, can be // interrupted with an "exclusive load" by re-locking the shared lock as // exclusive (using model_rlock below), changing the phase, and loading // additional buildfiles. // // Serial load can perform arbitrary changes to the model. Exclusive load, // however, can only perform "island appends". That is, it can create new // "nodes" (variables, scopes, etc) but not change already existing nodes or // invalidate any references to such (the idea here is that one should be // able to load additional buildfiles as long as they don't interfere with // the existing build state). The "islands" are identified by the // load_generation number (0 for serial load). It is incremented/restored by // phase_guard and is stored in various "nodes" (variables, etc) to allow // modifications "within the islands". // // @@ MT: do we really have to hold shared lock during execute? // @@ MT: we can also interrupt load s&m with execute -- neither handled // nor documented. // extern #ifdef __cpp_thread_local thread_local #else __thread #endif slock* model_lock; extern size_t load_generation; struct phase_guard { explicit phase_guard (run_phase p) : o (phase) { phase = p; if (phase == run_phase::load) ++load_generation; } ~phase_guard () { if (phase == run_phase::load) --load_generation; phase = o; } run_phase o; }; // A shared model lock. If there is already an instance of model_slock in // this thread, then the new instance simply references it (asserting that // it is locked). // // The reason for this semantics is to support the following scheduling // pattern: // // scheduler::atomic_count task_count (0); // // { // model_slock ml; // (1) // // for (...) // { // sched.async (task_count, // [] (...) // { // model_slock ml; // (2) // ... // }, // ...); // } // } // // sched.wait (); // (3) // // Here is what's going on here: // // 1. We first get a shared lock "for ourselves" since after the first // iteration of the loop, things may become asynchronous (including // attempts to relock for exclusive access and change the structure we // are iteration upon). // // 2. The task can be queued or it can be executed synchronously inside // async() (refer to the scheduler class for details on this semantics). // // If this is an async()-synchronous execution, then the task will create // a referencing model_slock. If, however, this is a queued execution // (including wait()-synchronous), then the task will create a top-level // model_slock. // // Note that we only acquire the lock once the task starts executing // (there is no reason to hold the lock while the task is sitting in the // queue). This optimization assumes that whatever else we pass to the // task (for example, a reference to a target) is immutable (so such a // reference cannot become invalid). // // 3. Before calling wait(), we release our shared lock to allow re-locking // for exclusive access. And once wait() returns we are again running // serially. // struct model_slock { model_slock () { if (slock* l = model_lock) assert (l->owns_lock ()); else model_lock = &(l_ = slock (model_mutex)); } ~model_slock () { if (&l_ == model_lock) model_lock = nullptr; } operator slock& () {return *model_lock;} operator const slock& () const {return *model_lock;} private: slock l_; }; // Re-lock shared to exclusive for the lifetime or rlock. // struct model_rlock { model_rlock () : sl_ (model_lock) { if (sl_ != nullptr) { sl_->unlock (); ul_ = ulock (*sl_->mutex ()); } } ~model_rlock () { if (sl_ != nullptr) { ul_.unlock (); sl_->lock (); } } // Can be treated as const ulock. // operator const ulock& () const {return ul_;} private: slock* sl_; ulock ul_; }; // Cached variables. // extern const variable* var_src_root; extern const variable* var_out_root; extern const variable* var_src_base; extern const variable* var_out_base; extern const variable* var_project; extern const variable* var_amalgamation; extern const variable* var_subprojects; extern const variable* var_import_target; // import.target // Current action (meta/operation). // // The names unlike info are available during boot but may not yet be // lifted. The name is always for an outer operation (or meta operation // that hasn't been recognized as such yet). // extern const string* current_mname; extern const string* current_oname; extern const meta_operation_info* current_mif; extern const operation_info* current_inner_oif; extern const operation_info* current_outer_oif; extern execution_mode current_mode; inline void set_current_mif (const meta_operation_info& mif) { current_mif = &mif; current_mname = &mif.name; } inline void set_current_oif (const operation_info& inner_oif, const operation_info* outer_oif = nullptr) { current_inner_oif = &inner_oif; current_outer_oif = outer_oif; current_oname = &(outer_oif == nullptr ? inner_oif : *outer_oif).name; current_mode = inner_oif.mode; } // Total number of dependency relationships in the current action. // Together with the target::dependents count it is incremented // during the rule search & match phase and is decremented during // execution with the expectation of it reaching 0. Used as a sanity // check. // extern atomic_count dependency_count; // Variable override value cache. // extern variable_override_cache var_override_cache; // Reset the build state. In particular, this removes all the targets, // scopes, and variables. // variable_overrides reset (const strings& cmd_vars); // Return the project name or empty string if unnamed. // inline const string& project (const scope& root) { auto l (root[var_project]); return l ? cast (l) : empty_string; } // Return the src/out directory corresponding to the given out/src. The // passed directory should be a sub-directory of out/src_root. // dir_path src_out (const dir_path& out, const scope& root); dir_path src_out (const dir_path& out, const dir_path& out_root, const dir_path& src_root); dir_path out_src (const dir_path& src, const scope& root); dir_path out_src (const dir_path& src, const dir_path& out_root, const dir_path& src_root); // Action phrases, e.g., "configure update exe{foo}", "updating exe{foo}", // and "updating exe{foo} is configured". Use like this: // // info << "while " << diag_doing (a, t); // class target; struct diag_phrase { const action& a; const target& t; void (*f) (ostream&, const action&, const target&); }; inline ostream& operator<< (ostream& os, const diag_phrase& p) { p.f (os, p.a, p.t); return os; } void diag_do (ostream&, const action&, const target&); inline diag_phrase diag_do (const action& a, const target& t) { return diag_phrase {a, t, &diag_do}; } void diag_doing (ostream&, const action&, const target&); inline diag_phrase diag_doing (const action& a, const target& t) { return diag_phrase {a, t, &diag_doing}; } void diag_done (ostream&, const action&, const target&); inline diag_phrase diag_done (const action& a, const target& t) { return diag_phrase {a, t, &diag_done}; } } #endif // BUILD2_CONTEXT