From 57b10c06925d0bdf6ffb38488ee908f085109e95 Mon Sep 17 00:00:00 2001 From: Karen Arutyunov Date: Thu, 4 Jul 2019 19:12:15 +0300 Subject: Move config, dist, test, and install modules into library --- libbuild2/buildfile | 4 + libbuild2/config/init.cxx | 159 + libbuild2/config/init.hxx | 36 + libbuild2/config/module.cxx | 54 + libbuild2/config/module.hxx | 93 + libbuild2/config/operation.cxx | 997 ++++++ libbuild2/config/operation.hxx | 29 + libbuild2/config/utility.cxx | 307 ++ libbuild2/config/utility.hxx | 179 + libbuild2/config/utility.txx | 66 + libbuild2/dist/init.cxx | 192 ++ libbuild2/dist/init.hxx | 36 + libbuild2/dist/module.cxx | 15 + libbuild2/dist/module.hxx | 71 + libbuild2/dist/operation.cxx | 868 +++++ libbuild2/dist/operation.hxx | 21 + libbuild2/dist/rule.cxx | 88 + libbuild2/dist/rule.hxx | 39 + libbuild2/install/functions.cxx | 33 + libbuild2/install/init.cxx | 309 ++ libbuild2/install/init.hxx | 36 + libbuild2/install/operation.cxx | 84 + libbuild2/install/operation.hxx | 23 + libbuild2/install/rule.cxx | 1223 +++++++ libbuild2/install/rule.hxx | 197 ++ libbuild2/install/utility.hxx | 78 + libbuild2/test/common.cxx | 220 ++ libbuild2/test/common.hxx | 72 + libbuild2/test/init.cxx | 231 ++ libbuild2/test/init.hxx | 36 + libbuild2/test/module.hxx | 37 + libbuild2/test/operation.cxx | 55 + libbuild2/test/operation.hxx | 22 + libbuild2/test/rule.cxx | 882 +++++ libbuild2/test/rule.hxx | 67 + libbuild2/test/script/builtin.cxx | 1979 +++++++++++ libbuild2/test/script/builtin.hxx | 74 + .../script/lexer+command-expansion.test.testscript | 248 ++ .../test/script/lexer+command-line.test.testscript | 208 ++ .../script/lexer+description-line.test.testscript | 33 + .../test/script/lexer+first-token.test.testscript | 97 + .../test/script/lexer+second-token.test.testscript | 68 + .../script/lexer+variable-line.test.testscript | 28 + .../test/script/lexer+variable.test.testscript | 70 + libbuild2/test/script/lexer.cxx | 551 ++++ libbuild2/test/script/lexer.hxx | 94 + libbuild2/test/script/lexer.test.cxx | 85 + .../test/script/parser+cleanup.test.testscript | 58 + .../test/script/parser+command-if.test.testscript | 548 ++++ .../script/parser+command-re-parse.test.testscript | 12 + .../test/script/parser+description.test.testscript | 486 +++ .../test/script/parser+directive.test.testscript | 74 + libbuild2/test/script/parser+exit.test.testscript | 27 + .../test/script/parser+expansion.test.testscript | 36 + .../script/parser+here-document.test.testscript | 213 ++ .../test/script/parser+here-string.test.testscript | 19 + .../test/script/parser+include.test.testscript | 104 + .../test/script/parser+pipe-expr.test.testscript | 133 + .../test/script/parser+pre-parse.test.testscript | 23 + .../test/script/parser+redirect.test.testscript | 356 ++ libbuild2/test/script/parser+regex.test.testscript | 223 ++ .../test/script/parser+scope-if.test.testscript | 554 ++++ libbuild2/test/script/parser+scope.test.testscript | 280 ++ .../script/parser+setup-teardown.test.testscript | 151 + libbuild2/test/script/parser.cxx | 3451 ++++++++++++++++++++ libbuild2/test/script/parser.hxx | 250 ++ libbuild2/test/script/parser.test.cxx | 245 ++ libbuild2/test/script/regex.cxx | 440 +++ libbuild2/test/script/regex.hxx | 703 ++++ libbuild2/test/script/regex.ixx | 35 + libbuild2/test/script/regex.test.cxx | 302 ++ libbuild2/test/script/runner.cxx | 1891 +++++++++++ libbuild2/test/script/runner.hxx | 101 + libbuild2/test/script/script.cxx | 741 +++++ libbuild2/test/script/script.hxx | 559 ++++ libbuild2/test/script/script.ixx | 60 + libbuild2/test/script/token.cxx | 57 + libbuild2/test/script/token.hxx | 65 + libbuild2/test/target.cxx | 63 + libbuild2/test/target.hxx | 31 + 80 files changed, 22685 insertions(+) create mode 100644 libbuild2/config/init.cxx create mode 100644 libbuild2/config/init.hxx create mode 100644 libbuild2/config/module.cxx create mode 100644 libbuild2/config/module.hxx create mode 100644 libbuild2/config/operation.cxx create mode 100644 libbuild2/config/operation.hxx create mode 100644 libbuild2/config/utility.cxx create mode 100644 libbuild2/config/utility.hxx create mode 100644 libbuild2/config/utility.txx create mode 100644 libbuild2/dist/init.cxx create mode 100644 libbuild2/dist/init.hxx create mode 100644 libbuild2/dist/module.cxx create mode 100644 libbuild2/dist/module.hxx create mode 100644 libbuild2/dist/operation.cxx create mode 100644 libbuild2/dist/operation.hxx create mode 100644 libbuild2/dist/rule.cxx create mode 100644 libbuild2/dist/rule.hxx create mode 100644 libbuild2/install/functions.cxx create mode 100644 libbuild2/install/init.cxx create mode 100644 libbuild2/install/init.hxx create mode 100644 libbuild2/install/operation.cxx create mode 100644 libbuild2/install/operation.hxx create mode 100644 libbuild2/install/rule.cxx create mode 100644 libbuild2/install/rule.hxx create mode 100644 libbuild2/install/utility.hxx create mode 100644 libbuild2/test/common.cxx create mode 100644 libbuild2/test/common.hxx create mode 100644 libbuild2/test/init.cxx create mode 100644 libbuild2/test/init.hxx create mode 100644 libbuild2/test/module.hxx create mode 100644 libbuild2/test/operation.cxx create mode 100644 libbuild2/test/operation.hxx create mode 100644 libbuild2/test/rule.cxx create mode 100644 libbuild2/test/rule.hxx create mode 100644 libbuild2/test/script/builtin.cxx create mode 100644 libbuild2/test/script/builtin.hxx create mode 100644 libbuild2/test/script/lexer+command-expansion.test.testscript create mode 100644 libbuild2/test/script/lexer+command-line.test.testscript create mode 100644 libbuild2/test/script/lexer+description-line.test.testscript create mode 100644 libbuild2/test/script/lexer+first-token.test.testscript create mode 100644 libbuild2/test/script/lexer+second-token.test.testscript create mode 100644 libbuild2/test/script/lexer+variable-line.test.testscript create mode 100644 libbuild2/test/script/lexer+variable.test.testscript create mode 100644 libbuild2/test/script/lexer.cxx create mode 100644 libbuild2/test/script/lexer.hxx create mode 100644 libbuild2/test/script/lexer.test.cxx create mode 100644 libbuild2/test/script/parser+cleanup.test.testscript create mode 100644 libbuild2/test/script/parser+command-if.test.testscript create mode 100644 libbuild2/test/script/parser+command-re-parse.test.testscript create mode 100644 libbuild2/test/script/parser+description.test.testscript create mode 100644 libbuild2/test/script/parser+directive.test.testscript create mode 100644 libbuild2/test/script/parser+exit.test.testscript create mode 100644 libbuild2/test/script/parser+expansion.test.testscript create mode 100644 libbuild2/test/script/parser+here-document.test.testscript create mode 100644 libbuild2/test/script/parser+here-string.test.testscript create mode 100644 libbuild2/test/script/parser+include.test.testscript create mode 100644 libbuild2/test/script/parser+pipe-expr.test.testscript create mode 100644 libbuild2/test/script/parser+pre-parse.test.testscript create mode 100644 libbuild2/test/script/parser+redirect.test.testscript create mode 100644 libbuild2/test/script/parser+regex.test.testscript create mode 100644 libbuild2/test/script/parser+scope-if.test.testscript create mode 100644 libbuild2/test/script/parser+scope.test.testscript create mode 100644 libbuild2/test/script/parser+setup-teardown.test.testscript create mode 100644 libbuild2/test/script/parser.cxx create mode 100644 libbuild2/test/script/parser.hxx create mode 100644 libbuild2/test/script/parser.test.cxx create mode 100644 libbuild2/test/script/regex.cxx create mode 100644 libbuild2/test/script/regex.hxx create mode 100644 libbuild2/test/script/regex.ixx create mode 100644 libbuild2/test/script/regex.test.cxx create mode 100644 libbuild2/test/script/runner.cxx create mode 100644 libbuild2/test/script/runner.hxx create mode 100644 libbuild2/test/script/script.cxx create mode 100644 libbuild2/test/script/script.hxx create mode 100644 libbuild2/test/script/script.ixx create mode 100644 libbuild2/test/script/token.cxx create mode 100644 libbuild2/test/script/token.hxx create mode 100644 libbuild2/test/target.cxx create mode 100644 libbuild2/test/target.hxx (limited to 'libbuild2') diff --git a/libbuild2/buildfile b/libbuild2/buildfile index 99f616c..f23f023 100644 --- a/libbuild2/buildfile +++ b/libbuild2/buildfile @@ -4,6 +4,10 @@ import int_libs = libbutl%lib{butl} +# The config, test, install, and dist are "core modules" that come bundled +# with libbuild2. Note that the core can still function without them or with +# their alternative implementations. +# ./: lib{build2}: libul{build2}: {hxx ixx txx cxx}{** -config \ -version \ -**.test...} \ diff --git a/libbuild2/config/init.cxx b/libbuild2/config/init.cxx new file mode 100644 index 0000000..73275c6 --- /dev/null +++ b/libbuild2/config/init.cxx @@ -0,0 +1,159 @@ +// file : libbuild2/config/init.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include +#include +#include +#include // exists() +#include + +#include +#include +#include + +using namespace std; +using namespace butl; + +namespace build2 +{ + namespace config + { + bool + boot (scope& rs, const location&, unique_ptr& mod) + { + tracer trace ("config::boot"); + + l5 ([&]{trace << "for " << rs;}); + + const string& mname (current_mname); + const string& oname (current_oname); + + // Only create the module if we are configuring or creating. This is a + // bit tricky since the build2 core may not yet know if this is the + // case. But we know. + // + if (( mname == "configure" || mname == "create") || + (mname.empty () && (oname == "configure" || oname == "create"))) + { + unique_ptr m (new module); + + // Adjust priority for the import pseudo-module so that + // config.import.* values come first in config.build. + // + m->save_module ("import", INT32_MIN); + + mod = move (m); + } + + // Register meta-operations. Note that we don't register create_id + // since it will be pre-processed into configure. + // + rs.insert_meta_operation (configure_id, mo_configure); + rs.insert_meta_operation (disfigure_id, mo_disfigure); + + return true; // Initialize first (load config.build). + } + + bool + init (scope& rs, + scope&, + const location& l, + unique_ptr&, + bool first, + bool, + const variable_map& config_hints) + { + tracer trace ("config::init"); + + if (!first) + { + warn (l) << "multiple config module initializations"; + return true; + } + + const dir_path& out_root (rs.out_path ()); + l5 ([&]{trace << "for " << out_root;}); + + assert (config_hints.empty ()); // We don't known any hints. + + auto& vp (var_pool.rw (rs)); + + // Load config.build if one exists (we don't need to worry about + // disfigure since we will never be init'ed). + // + const variable& c_v (vp.insert ("config.version", false)); + + { + path f (config_file (rs)); + + if (exists (f)) + { + // Check the config version. We assume that old versions cannot + // understand new configs and new versions are incompatible with old + // configs. + // + // We extract the value manually instead of loading and then + // checking in order to be able to fixup/migrate the file which we + // may want to do in the future. + // + { + // Assume missing version is 0. + // + auto p (extract_variable (f, c_v)); + uint64_t v (p.second ? cast (p.first) : 0); + + if (v != module::version) + fail (l) << "incompatible config file " << f << + info << "config file version " << v + << (p.second ? "" : " (missing)") << + info << "config module version " << module::version << + info << "consider reconfiguring " << project (rs) << '@' + << out_root; + } + + source (rs, rs, f); + } + } + + // Register alias and fallback rule for the configure meta-operation. + // + // We need this rule for out-of-any-project dependencies (e.g., + // libraries imported from /usr/lib). We are registring it on the + // global scope similar to builtin rules. + // + { + auto& r (rs.global ().rules); + r.insert ( + configure_id, 0, "config.file", file_rule::instance); + } + { + auto& r (rs.rules); + + //@@ outer + r.insert (configure_id, 0, "config.alias", alias_rule::instance); + + // This allows a custom configure rule while doing nothing by default. + // + r.insert (configure_id, 0, "config", noop_rule::instance); + r.insert (configure_id, 0, "config.file", noop_rule::instance); + } + + return true; + } + + module_functions + build2_config_load () + { + // Initialize the config entry points in the build system core. + // + config_save_variable = &config::save_variable; + config_preprocess_create = &config::preprocess_create; + + return module_functions {&boot, &init}; + } + } +} diff --git a/libbuild2/config/init.hxx b/libbuild2/config/init.hxx new file mode 100644 index 0000000..ff5e923 --- /dev/null +++ b/libbuild2/config/init.hxx @@ -0,0 +1,36 @@ +// file : libbuild2/config/init.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_CONFIG_INIT_HXX +#define LIBBUILD2_CONFIG_INIT_HXX + +#include +#include + +#include + +#include + +namespace build2 +{ + namespace config + { + bool + boot (scope&, const location&, unique_ptr&); + + bool + init (scope&, + scope&, + const location&, + unique_ptr&, + bool, + bool, + const variable_map&); + + extern "C" LIBBUILD2_SYMEXPORT module_functions + build2_config_load (); + } +} + +#endif // LIBBUILD2_CONFIG_INIT_HXX diff --git a/libbuild2/config/module.cxx b/libbuild2/config/module.cxx new file mode 100644 index 0000000..7e9b765 --- /dev/null +++ b/libbuild2/config/module.cxx @@ -0,0 +1,54 @@ +// file : libbuild2/config/module.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +using namespace std; + +namespace build2 +{ + namespace config + { + void module:: + save_variable (const variable& var, uint64_t flags) + { + const string& n (var.name); + + // First try to find the module with the name that is the longest + // prefix of this variable name. + // + auto& sm (saved_modules); + auto i (sm.find_sup (n)); + + // If no module matched, then create one based on the variable name. + // + if (i == sm.end ()) + { + // @@ For now with 'config.' prefix. + // + i = sm.insert (string (n, 0, n.find ('.', 7))); + } + + // Don't insert duplicates. The config.import vars are particularly + // susceptible to duplication. + // + saved_variables& sv (i->second); + auto j (sv.find (var)); + + if (j == sv.end ()) + sv.push_back (saved_variable {var, flags}); + else + assert (j->flags == flags); + } + + void module:: + save_module (const char* name, int prio) + { + saved_modules.insert (string ("config.") += name, prio); + } + + const string module::name ("config"); + const uint64_t module::version (1); + } +} diff --git a/libbuild2/config/module.hxx b/libbuild2/config/module.hxx new file mode 100644 index 0000000..6222319 --- /dev/null +++ b/libbuild2/config/module.hxx @@ -0,0 +1,93 @@ +// file : libbuild2/config/module.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_CONFIG_MODULE_HXX +#define LIBBUILD2_CONFIG_MODULE_HXX + +#include + +#include + +#include +#include + +#include +#include + +namespace build2 +{ + namespace config + { + // An ordered list of modules each with an ordered list of list of + // config.* variables and their "save flags" (see save_variable()) that + // are used (as opposed to just being specified) in this configuration. + // Populated by the config utility functions (required(), optional()) + // and saved in the order populated. + // + struct saved_variable + { + reference_wrapper var; + uint64_t flags; + }; + + struct saved_variables: vector + { + // Normally each module only have a handful of config variables and we + // only do this during configuration so for now we do linear search + // instead of adding a map. + // + const_iterator + find (const variable& var) const + { + return std::find_if ( + begin (), + end (), + [&var] (const saved_variable& v) {return var == v.var;}); + } + }; + + struct saved_modules: butl::prefix_map + { + // Priority order with INT32_MIN being the highest. Modules with the + // same priority are saved in the order inserted. + // + // Generally, the idea is that we want higher-level modules at the top + // of the file since that's the configuration that we usualy want to + // change. So we have the following priority bands/defaults: + // + // 101-200/150 - code generators (e.g., yacc, bison) + // 201-300/250 - compilers (e.g., C, C++), + // 301-400/350 - binutils (ar, ld) + // + std::multimap order; + + iterator + insert (string name, int prio = 0) + { + auto p (emplace (move (name), saved_variables ())); + + if (p.second) + order.emplace (prio, p.first); + + return p.first; + } + }; + + struct module: module_base + { + config::saved_modules saved_modules; + + void + save_variable (const variable&, uint64_t flags = 0); + + void + save_module (const char* name, int prio = 0); + + static const string name; + static const uint64_t version; + }; + } +} + +#endif // LIBBUILD2_CONFIG_MODULE_HXX diff --git a/libbuild2/config/operation.cxx b/libbuild2/config/operation.cxx new file mode 100644 index 0000000..c3ce4b7 --- /dev/null +++ b/libbuild2/config/operation.cxx @@ -0,0 +1,997 @@ +// file : libbuild2/config/operation.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +using namespace std; +using namespace butl; + +namespace build2 +{ + namespace config + { + // configure + // + static void + save_src_root (const scope& root) + { + const dir_path& out_root (root.out_path ()); + const dir_path& src_root (root.src_path ()); + + path f (out_root / root.root_extra->src_root_file); + + if (verb >= 2) + text << "cat >" << f; + + try + { + ofdstream ofs (f); + + ofs << "# Created automatically by the config module." << endl + << "#" << endl + << "src_root = "; + to_stream (ofs, name (src_root), true, '@'); // Quote. + ofs << endl; + + ofs.close (); + } + catch (const io_error& e) + { + fail << "unable to write " << f << ": " << e; + } + } + + static void + save_out_root (const scope& root) + { + const dir_path& out_root (root.out_path ()); + const dir_path& src_root (root.src_path ()); + + path f (src_root / root.root_extra->out_root_file); + + if (verb) + text << (verb >= 2 ? "cat >" : "save ") << f; + + try + { + ofdstream ofs (f); + + ofs << "# Created automatically by the config module." << endl + << "#" << endl + << "out_root = "; + to_stream (ofs, name (out_root), true, '@'); // Quote. + ofs << endl; + + ofs.close (); + } + catch (const io_error& e) + { + fail << "unable to write " << f << ": " << e; + } + } + + using project_set = set; // Use pointers to get comparison. + + static void + save_config (const scope& root, const project_set& projects) + { + path f (config_file (root)); + + if (verb) + text << (verb >= 2 ? "cat >" : "save ") << f; + + const module& mod (*root.lookup_module (module::name)); + + try + { + ofdstream ofs (f); + + ofs << "# Created automatically by the config module, but feel " << + "free to edit." << endl + << "#" << endl; + + ofs << "config.version = " << module::version << endl; + + if (auto l = root.vars[var_amalgamation]) + { + const dir_path& d (cast (l)); + + ofs << endl + << "# Base configuration inherited from " << d << endl + << "#" << endl; + } + + // Save config variables. + // + names storage; + + for (auto p: mod.saved_modules.order) + { + const string& sname (p.second->first); + const saved_variables& svars (p.second->second); + + bool first (true); // Separate modules with a blank line. + for (const saved_variable& sv: svars) + { + const variable& var (sv.var); + + pair org (root.find_original (var)); + pair ovr (var.overrides == nullptr + ? org + : root.find_override (var, org)); + const lookup& l (ovr.first); + + // We definitely write values that are set on our root scope or + // are global overrides. Anything in-between is presumably + // inherited. We might also not have any value at all (see + // unconfigured()). + // + if (!l.defined ()) + continue; + + if (!(l.belongs (root) || l.belongs (*global_scope))) + { + // This is presumably an inherited value. But it could also be + // some left-over garbage. For example, an amalgamation could + // have used a module but then dropped it while its config + // values are still lingering in config.build. They are probably + // still valid and we should probably continue using them but we + // definitely want to move them to our config.build since they + // will be dropped from the amalgamation's config.build. Let's + // also warn the user just in case. + // + // There is also another case that falls under this now that + // overrides are by default amalgamation-wide rather than just + // "project and subprojects": we may be (re-)configuring a + // subproject but the override is now set on the outer project's + // root. + // + bool found (false); + const scope* r (&root); + while ((r = r->parent_scope ()->root_scope ()) != nullptr) + { + if (l.belongs (*r)) + { + // Find the config module. + // + if (auto* m = r->lookup_module (module::name)) + { + // Find the corresponding saved module. + // + auto i (m->saved_modules.find (sname)); + + if (i != m->saved_modules.end ()) + { + // Find the variable. + // + const saved_variables& sv (i->second); + found = sv.find (var) != sv.end (); + + // Handle that other case: if this is an override but + // the outer project itself is not being configured, + // then we need to save this override. + // + // One problem with using the already configured project + // set is that the outer project may be configured only + // after us in which case both projects will save the + // value. But perhaps this is a feature, not a bug since + // this is how project-local (%) override behaves. + // + if (found && + org.first != ovr.first && + projects.find (r) == projects.end ()) + found = false; + } + } + + break; + } + } + + if (found) // Inherited. + continue; + + location loc (&f); + + // If this value is not defined in a project's root scope, then + // something is broken. + // + if (r == nullptr) + fail (loc) << "inherited variable " << var << " value " + << "is not from a root scope"; + + // If none of the outer project's configurations use this value, + // then we warn and save as our own. One special case where we + // don't want to warn the user is if the variable is overriden. + // + if (org.first == ovr.first) + { + diag_record dr; + dr << warn (loc) << "saving previously inherited variable " + << var; + + dr << info (loc) << "because project " << *r + << " no longer uses it in its configuration"; + + if (verb >= 2) + { + dr << info (loc) << "variable value: "; + + if (*l) + { + storage.clear (); + dr << "'" << reverse (*l, storage) << "'"; + } + else + dr << "[null]"; + } + } + } + + const string& n (var.name); + const value& v (*l); + + // We will only write config.*.configured if it is false (true is + // implied by its absence). We will also ignore false values if + // there is any other value for this module (see unconfigured()). + // + if (n.size () > 11 && + n.compare (n.size () - 11, 11, ".configured") == 0) + { + if (cast (v) || svars.size () != 1) + continue; + } + + // If we got here then we are saving this variable. Handle the + // blank line. + // + if (first) + { + ofs << endl; + first = false; + } + + // Handle the save_commented flag. + // + if ((org.first.defined () && org.first->extra) && // Default value. + org.first == ovr.first && // Not overriden. + (sv.flags & save_commented) == save_commented) + { + ofs << '#' << n << " =" << endl; + continue; + } + + if (v) + { + storage.clear (); + names_view ns (reverse (v, storage)); + + ofs << n; + + if (ns.empty ()) + ofs << " ="; + else + { + ofs << " = "; + to_stream (ofs, ns, true, '@'); // Quote. + } + + ofs << endl; + } + else + ofs << n << " = [null]" << endl; + } + } + + ofs.close (); + } + catch (const io_error& e) + { + fail << "unable to write " << f << ": " << e; + } + } + + static void + configure_project (action a, const scope& root, project_set& projects) + { + tracer trace ("configure_project"); + + const dir_path& out_root (root.out_path ()); + const dir_path& src_root (root.src_path ()); + + if (!projects.insert (&root).second) + { + l5 ([&]{trace << "skipping already configured " << out_root;}); + return; + } + + // Make sure the directories exist. + // + if (out_root != src_root) + { + mkdir_p (out_root / root.root_extra->build_dir); + mkdir (out_root / root.root_extra->bootstrap_dir, 2); + } + + // We distinguish between a complete configure and operation- + // specific. + // + if (a.operation () == default_id) + { + l5 ([&]{trace << "completely configuring " << out_root;}); + + // Save src-root.build unless out_root is the same as src. + // + if (out_root != src_root) + save_src_root (root); + + // Save config.build. + // + save_config (root, projects); + } + else + { + } + + // Configure subprojects that have been loaded. + // + if (auto l = root.vars[var_subprojects]) + { + for (auto p: cast (l)) + { + const dir_path& pd (p.second); + dir_path out_nroot (out_root / pd); + const scope& nroot (scopes.find (out_nroot)); + + // @@ Strictly speaking we need to check whether the config + // module was loaded for this subproject. + // + if (nroot.out_path () != out_nroot) // This subproject not loaded. + continue; + + configure_project (a, nroot, projects); + } + } + } + + static void + configure_forward (const scope& root, project_set& projects) + { + tracer trace ("configure_forward"); + + const dir_path& out_root (root.out_path ()); + const dir_path& src_root (root.src_path ()); + + if (!projects.insert (&root).second) + { + l5 ([&]{trace << "skipping already configured " << src_root;}); + return; + } + + mkdir (src_root / root.root_extra->bootstrap_dir, 2); // Make sure exists. + save_out_root (root); + + // Configure subprojects. Since we don't load buildfiles if configuring + // a forward, we do it for all known subprojects. + // + if (auto l = root.vars[var_subprojects]) + { + for (auto p: cast (l)) + { + dir_path out_nroot (out_root / p.second); + const scope& nroot (scopes.find (out_nroot)); + assert (nroot.out_path () == out_nroot); + + configure_forward (nroot, projects); + } + } + } + + operation_id (*pre) (const values&, meta_operation_id, const location&); + + static operation_id + configure_operation_pre (const values&, operation_id o) + { + // Don't translate default to update. In our case unspecified + // means configure everything. + // + return o; + } + + // The (vague) idea is that in the future we may turn this into to some + // sort of key-value sequence (similar to the config initializer idea), + // for example: + // + // configure(out/@src/, forward foo bar@123) + // + // Though using commas instead spaces and '=' instead of '@' would have + // been nicer. + // + static bool + forward (const values& params, + const char* mo = nullptr, + const location& l = location ()) + { + if (params.size () == 1) + { + const names& ns (cast (params[0])); + + if (ns.size () == 1 && ns[0].simple () && ns[0].value == "forward") + return true; + else if (!ns.empty ()) + fail (l) << "unexpected parameter '" << ns << "' for " + << "meta-operation " << mo; + } + else if (!params.empty ()) + fail (l) << "unexpected parameters for meta-operation " << mo; + + return false; + } + + static void + configure_pre (const values& params, const location& l) + { + forward (params, "configure", l); // Validate. + } + + static void + configure_load (const values& params, + scope& root, + const path& buildfile, + const dir_path& out_base, + const dir_path& src_base, + const location& l) + { + if (forward (params)) + { + // We don't need to load the buildfiles in order to configure + // forwarding but in order to configure subprojects we have to + // bootstrap them (similar to disfigure). + // + create_bootstrap_inner (root); + + if (root.out_path () == root.src_path ()) + fail (l) << "forwarding to source directory " << root.src_path (); + } + else + load (params, root, buildfile, out_base, src_base, l); // Normal load. + } + + static void + configure_search (const values& params, + const scope& root, + const scope& base, + const path& bf, + const target_key& tk, + const location& l, + action_targets& ts) + { + if (forward (params)) + { + // For forwarding we only collect the projects (again, similar to + // disfigure). + // + ts.push_back (&root); + } + else + search (params, root, base, bf, tk, l, ts); // Normal search. + } + + static void + configure_match (const values&, action, action_targets&, uint16_t, bool) + { + // Don't match anything -- see execute (). + } + + static void + configure_execute (const values& params, + action a, + action_targets& ts, + uint16_t, + bool) + { + bool fwd (forward (params)); + + project_set projects; + + for (const action_target& at: ts) + { + if (fwd) + { + // Forward configuration. + // + const scope& root (*static_cast (at.target)); + configure_forward (root, projects); + continue; + } + + // Normal configuration. + // + // Match rules to configure every operation supported by each project. + // Note that we are not calling operation_pre/post() callbacks here + // since the meta operation is configure and we know what we are + // doing. + // + // Note that we cannot do this in parallel. We cannot parallelize the + // outer loop because we should match for a single action at a time. + // And we cannot swap the loops because the list of operations is + // target-specific. However, inside match(), things can proceed in + // parallel. + // + const target& t (at.as_target ()); + const scope* rs (t.base_scope ().root_scope ()); + + if (rs == nullptr) + fail << "out of project target " << t; + + const operations& ops (rs->root_extra->operations); + + for (operation_id id (default_id + 1); // Skip default_id. + id < ops.size (); + ++id) + { + if (const operation_info* oif = ops[id]) + { + // Skip aliases (e.g., update-for-install). + // + if (oif->id != id) + continue; + + set_current_oif (*oif); + + phase_lock pl (run_phase::match); + match (action (configure_id, id), t); + } + } + + configure_project (a, *rs, projects); + } + } + + const meta_operation_info mo_configure { + configure_id, + "configure", + "configure", + "configuring", + "configured", + "is configured", + true, // bootstrap_outer + &configure_pre, // meta-operation pre + &configure_operation_pre, + &configure_load, // normal load unless configuring forward + &configure_search, // normal search unless configuring forward + &configure_match, + &configure_execute, + nullptr, // operation post + nullptr, // meta-operation post + nullptr // include + }; + + // disfigure + // + + static bool + disfigure_project (action a, const scope& root, project_set& projects) + { + tracer trace ("disfigure_project"); + + const dir_path& out_root (root.out_path ()); + const dir_path& src_root (root.src_path ()); + + if (!projects.insert (&root).second) + { + l5 ([&]{trace << "skipping already disfigured " << out_root;}); + return false; + } + + bool r (false); // Keep track of whether we actually did anything. + + // Disfigure subprojects. Since we don't load buildfiles during + // disfigure, we do it for all known subprojects. + // + if (auto l = root.vars[var_subprojects]) + { + for (auto p: cast (l)) + { + const dir_path& pd (p.second); + dir_path out_nroot (out_root / pd); + const scope& nroot (scopes.find (out_nroot)); + assert (nroot.out_path () == out_nroot); // See disfigure_load(). + + r = disfigure_project (a, nroot, projects) || r; + + // We use mkdir_p() to create the out_root of a subproject + // which means there could be empty parent directories left + // behind. Clean them up. + // + if (!pd.simple () && out_root != src_root) + { + for (dir_path d (pd.directory ()); + !d.empty (); + d = d.directory ()) + { + rmdir_status s (rmdir (out_root / d, 2)); + + if (s == rmdir_status::not_empty) + break; // No use trying do remove parent ones. + + r = (s == rmdir_status::success) || r; + } + } + } + } + + // We distinguish between a complete disfigure and operation- + // specific. + // + if (a.operation () == default_id) + { + l5 ([&]{trace << "completely disfiguring " << out_root;}); + + r = rmfile (config_file (root)) || r; + + if (out_root != src_root) + { + r = rmfile (out_root / root.root_extra->src_root_file, 2) || r; + + // Clean up the directories. + // + // Note: try to remove the root/ hooks directory if it is empty. + // + r = rmdir (out_root / root.root_extra->root_dir, 2) || r; + r = rmdir (out_root / root.root_extra->bootstrap_dir, 2) || r; + r = rmdir (out_root / root.root_extra->build_dir, 2) || r; + + switch (rmdir (out_root)) + { + case rmdir_status::not_empty: + { + // We used to issue a warning but it is actually a valid usecase + // to leave the build output around in case, for example, of a + // reconfigure. + // + if (verb) + info << "directory " << out_root << " is " + << (out_root == work + ? "current working directory" + : "not empty") << ", not removing"; + break; + } + case rmdir_status::success: + r = true; + default: + break; + } + } + } + else + { + } + + return r; + } + + static bool + disfigure_forward (const scope& root, project_set& projects) + { + // Pretty similar logic to disfigure_project(). + // + tracer trace ("disfigure_forward"); + + const dir_path& out_root (root.out_path ()); + const dir_path& src_root (root.src_path ()); + + if (!projects.insert (&root).second) + { + l5 ([&]{trace << "skipping already disfigured " << src_root;}); + return false; + } + + bool r (false); + + if (auto l = root.vars[var_subprojects]) + { + for (auto p: cast (l)) + { + dir_path out_nroot (out_root / p.second); + const scope& nroot (scopes.find (out_nroot)); + assert (nroot.out_path () == out_nroot); + + r = disfigure_forward (nroot, projects) || r; + } + } + + // Remove the out-root.build file and try to remove the bootstrap/ + // directory if it is empty. + // + r = rmfile (src_root / root.root_extra->out_root_file) || r; + r = rmdir (src_root / root.root_extra->bootstrap_dir, 2) || r; + + return r; + } + + static void + disfigure_pre (const values& params, const location& l) + { + forward (params, "disfigure", l); // Validate. + } + + static operation_id + disfigure_operation_pre (const values&, operation_id o) + { + // Don't translate default to update. In our case unspecified + // means disfigure everything. + // + return o; + } + + static void + disfigure_load (const values&, + scope& root, + const path&, + const dir_path&, + const dir_path&, + const location&) + { + // Since we don't load buildfiles during disfigure but still want to + // disfigure all the subprojects (see disfigure_project() below), we + // bootstrap all the known subprojects. + // + create_bootstrap_inner (root); + } + + static void + disfigure_search (const values&, + const scope& root, + const scope&, + const path&, + const target_key&, + const location&, + action_targets& ts) + { + ts.push_back (&root); + } + + static void + disfigure_match (const values&, action, action_targets&, uint16_t, bool) + { + } + + static void + disfigure_execute (const values& params, + action a, + action_targets& ts, + uint16_t diag, + bool) + { + tracer trace ("disfigure_execute"); + + bool fwd (forward (params)); + + project_set projects; + + // Note: doing everything in the load phase (disfigure_project () does + // modify the build state). + // + for (const action_target& at: ts) + { + const scope& root (*static_cast (at.target)); + + if (!(fwd + ? disfigure_forward ( root, projects) + : disfigure_project (a, root, projects))) + { + // Create a dir{$out_root/} target to signify the project's root in + // diagnostics. Not very clean but seems harmless. + // + target& t ( + targets.insert (dir::static_type, + fwd ? root.src_path () : root.out_path (), + dir_path (), // Out tree. + "", + nullopt, + true, // Implied. + trace).first); + + if (verb != 0 && diag >= 2) + info << diag_done (a, t); + } + } + } + + const meta_operation_info mo_disfigure { + disfigure_id, + "disfigure", + "disfigure", + "disfiguring", + "disfigured", + "is disfigured", + false, // bootstrap_outer + disfigure_pre, // meta-operation pre + &disfigure_operation_pre, + &disfigure_load, + &disfigure_search, + &disfigure_match, + &disfigure_execute, + nullptr, // operation post + nullptr, // meta-operation post + nullptr // include + }; + + // create + // + static void + save_config (const dir_path& d, const variable_overrides& var_ovs) + { + // Since there aren't any sub-projects yet, any config.import.* values + // that the user may want to specify won't be saved in config.build. So + // let's go ahead and mark them all to be saved. To do this, however, we + // need the config module (which is where this information is stored). + // And the module is created by init() during bootstrap. So what we are + // going to do is bootstrap the newly created project, similar to the + // way main() does it. + // + scope& gs (*scope::global_); + scope& rs (load_project (gs, d, d, false /* fwd */, false /* load */)); + module& m (*rs.lookup_module (module::name)); + + // Save all the global config.import.* variables. + // + variable_pool& vp (var_pool.rw (rs)); + for (auto p (gs.vars.find_namespace (vp.insert ("config.import"))); + p.first != p.second; + ++p.first) + { + const variable& var (p.first->first); + + // Annoyingly, this can be (always is?) one of the overrides + // (__override, __prefix, etc). + // + size_t n (var.override ()); + m.save_variable (n != 0 ? *vp.find (string (var.name, 0, n)) : var); + } + + // Now project-specific. For now we just save all of them and let + // save_config() above weed out the ones that don't apply. + // + for (const variable_override& vo: var_ovs) + { + const variable& var (vo.var); + + if (var.name.compare (0, 14, "config.import.") == 0) + m.save_variable (var); + } + } + + const string& + preprocess_create (const variable_overrides& var_ovs, + values& params, + vector_view& spec, + bool lifted, + const location& l) + { + tracer trace ("preprocess_create"); + + // The overall plan is to create the project(s), update the buildspec, + // clear the parameters, and then continue as if we were the configure + // meta-operation. + + // Start with process parameters. The first parameter, if any, is a list + // of root.build modules. The second parameter, if any, is a list of + // bootstrap.build modules. If the second is not specified, then the + // default is test, dist, and install (config is mandatory). + // + strings bmod {"test", "dist", "install"}; + strings rmod; + try + { + size_t n (params.size ()); + + if (n > 0) + rmod = convert (move (params[0])); + + if (n > 1) + bmod = convert (move (params[1])); + + if (n > 2) + fail (l) << "unexpected parameters for meta-operation create"; + } + catch (const invalid_argument& e) + { + fail (l) << "invalid module name: " << e.what (); + } + + current_oname = empty_string; // Make sure valid. + + // Now handle each target in each operation spec. + // + for (const opspec& os: spec) + { + // First do some sanity checks: there should be no explicit operation + // and our targets should all be directories. + // + if (!lifted && !os.name.empty ()) + fail (l) << "explicit operation specified for meta-operation create"; + + for (const targetspec& ts: os) + { + const name& tn (ts.name); + + // Figure out the project directory. This logic must be consistent + // with find_target_type() and other places (grep for ".."). + // + dir_path d; + + if (tn.simple () && + (tn.empty () || tn.value == "." || tn.value == "..")) + d = dir_path (tn.value); + else if (tn.directory ()) + d = tn.dir; + else if (tn.typed () && tn.type == "dir") + d = tn.dir / dir_path (tn.value); + else + fail(l) << "non-directory target '" << ts << "' in " + << "meta-operation create"; + + if (d.relative ()) + d = work / d; + + d.normalize (true); + + // If src_base was explicitly specified, make sure it is the same as + // the project directory. + // + if (!ts.src_base.empty ()) + { + dir_path s (ts.src_base); + + if (s.relative ()) + s = work / s; + + s.normalize (true); + + if (s != d) + fail(l) << "different src/out directories for target '" << ts + << "' in meta-operation create"; + } + + l5 ([&]{trace << "creating project in " << d;}); + + // For now we disable amalgamating this project. Sooner or later + // someone will probably want to do this, though (i.e., nested + // configurations). + // + create_project (d, + dir_path (), /* amalgamation */ + bmod, + "", /* root_pre */ + rmod, + "", /* root_post */ + true, /* config */ + true, /* buildfile */ + "the create meta-operation"); + + save_config (d, var_ovs); + } + } + + params.clear (); + return mo_configure.name; + } + } +} diff --git a/libbuild2/config/operation.hxx b/libbuild2/config/operation.hxx new file mode 100644 index 0000000..0a88f96 --- /dev/null +++ b/libbuild2/config/operation.hxx @@ -0,0 +1,29 @@ +// file : libbuild2/config/operation.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_CONFIG_OPERATION_HXX +#define LIBBUILD2_CONFIG_OPERATION_HXX + +#include +#include + +#include + +namespace build2 +{ + namespace config + { + extern const meta_operation_info mo_configure; + extern const meta_operation_info mo_disfigure; + + const string& + preprocess_create (const variable_overrides&, + values&, + vector_view&, + bool, + const location&); + } +} + +#endif // LIBBUILD2_CONFIG_OPERATION_HXX diff --git a/libbuild2/config/utility.cxx b/libbuild2/config/utility.cxx new file mode 100644 index 0000000..746639d --- /dev/null +++ b/libbuild2/config/utility.cxx @@ -0,0 +1,307 @@ +// file : libbuild2/config/utility.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include +#include +#include + +#include + +using namespace std; + +namespace build2 +{ + namespace config + { + pair + omitted (scope& r, const variable& var) + { + // This is a stripped-down version of the required() twisted + // implementation. + + pair org (r.find_original (var)); + + bool n (false); // New flag. + lookup l (org.first); + + // Treat an inherited value that was set to default as new. + // + if (l.defined () && l->extra) + n = true; + + if (var.overrides != nullptr) + { + pair ovr (r.find_override (var, move (org))); + + if (l != ovr.first) // Overriden? + { + // Override is always treated as new. + // + n = true; + l = move (ovr.first); + } + } + + if (l.defined () && current_mif->id == configure_id) + save_variable (r, var); + + return pair (l, n); + } + + lookup + optional (scope& r, const variable& var) + { + if (current_mif->id == configure_id) + save_variable (r, var); + + auto l (r[var]); + return l.defined () + ? l + : lookup (r.assign (var), var, r); // NULL. + } + + bool + specified (scope& r, const string& n) + { + // Search all outer scopes for any value in this namespace. + // + // What about "pure" overrides, i.e., those without any original values? + // Well, they will also be found since their names have the original + // variable as a prefix. But do they apply? Yes, since we haven't found + // any original values, they will be "visible"; see find_override() for + // details. + // + const variable& vns (var_pool.rw (r).insert ("config." + n)); + for (scope* s (&r); s != nullptr; s = s->parent_scope ()) + { + for (auto p (s->vars.find_namespace (vns)); + p.first != p.second; + ++p.first) + { + const variable& var (p.first->first); + + // Ignore config.*.configured. + // + if (var.name.size () < 11 || + var.name.compare (var.name.size () - 11, 11, ".configured") != 0) + return true; + } + } + + return false; + } + + bool + unconfigured (scope& rs, const string& n) + { + // Pattern-typed in boot() as bool. + // + const variable& var ( + var_pool.rw (rs).insert ("config." + n + ".configured")); + + if (current_mif->id == configure_id) + save_variable (rs, var); + + auto l (rs[var]); // Include inherited values. + return l && !cast (l); + } + + bool + unconfigured (scope& rs, const string& n, bool v) + { + // Pattern-typed in boot() as bool. + // + const variable& var ( + var_pool.rw (rs).insert ("config." + n + ".configured")); + + if (current_mif->id == configure_id) + save_variable (rs, var); + + value& x (rs.assign (var)); + + if (x.null || cast (x) != !v) + { + x = !v; + return true; + } + else + return false; + } + + void + save_variable (scope& r, const variable& var, uint64_t flags) + { + if (current_mif->id != configure_id) + return; + + // The project might not be using the config module. But then how + // could we be configuring it? Good question. + // + if (module* m = r.lookup_module (module::name)) + m->save_variable (var, flags); + } + + void + save_module (scope& r, const char* name, int prio) + { + if (current_mif->id != configure_id) + return; + + if (module* m = r.lookup_module (module::name)) + m->save_module (name, prio); + } + + void + create_project (const dir_path& d, + const build2::optional& amal, + const strings& bmod, + const string& rpre, + const strings& rmod, + const string& rpos, + bool config, + bool buildfile, + const char* who, + uint16_t verbosity) + { + string hdr ("# Generated by " + string (who) + ". Edit if you know" + " what you are doing.\n" + "#"); + + // If the directory exists, verify it's empty. Otherwise, create it. + // + if (exists (d)) + { + if (!empty (d)) + fail << "directory " << d << " exists and is not empty"; + } + else + mkdir_p (d, verbosity); + + // Create the build/ subdirectory. + // + // Note that for now we use the standard build file/directory scheme. + // + mkdir (d / std_build_dir, verbosity); + + // Write build/bootstrap.build. + // + { + path f (d / std_bootstrap_file); + + if (verb >= verbosity) + text << (verb >= 2 ? "cat >" : "save ") << f; + + try + { + ofdstream ofs (f); + + ofs << hdr << endl + << "project =" << endl; + + if (amal) + { + ofs << "amalgamation ="; + + if (!amal->empty ()) + ofs << ' ' << amal->representation (); + + ofs << endl; + } + + ofs << endl; + + if (config) + ofs << "using config" << endl; + + for (const string& m: bmod) + { + if (!config || m != "config") + ofs << "using " << m << endl; + } + + ofs.close (); + } + catch (const io_error& e) + { + fail << "unable to write " << f << ": " << e; + } + } + + // Write build/root.build. + // + { + path f (d / std_root_file); + + if (verb >= verbosity) + text << (verb >= 2 ? "cat >" : "save ") << f; + + try + { + ofdstream ofs (f); + + ofs << hdr << endl; + + if (!rpre.empty ()) + ofs << rpre << endl + << endl; + + for (const string& cm: rmod) + { + // If the module name start with '?', then use optional load. + // + bool opt (cm.front () == '?'); + string m (cm, opt ? 1 : 0); + + // Append .config unless the module name ends with '.', in which + // case strip it. + // + if (m.back () == '.') + m.pop_back (); + else + m += ".config"; + + ofs << "using" << (opt ? "?" : "") << " " << m << endl; + } + + if (!rpos.empty ()) + ofs << endl + << rpre << endl; + + ofs.close (); + } + catch (const io_error& e) + { + fail << "unable to write " << f << ": " << e; + } + } + + // Write root buildfile. + // + if (buildfile) + { + path f (d / std_buildfile_file); + + if (verb >= verbosity) + text << (verb >= 2 ? "cat >" : "save ") << f; + + try + { + ofdstream ofs (f); + + ofs << hdr << endl + << "./: {*/ -build/}" << endl; + + ofs.close (); + } + catch (const io_error& e) + { + fail << "unable to write " << f << ": " << e; + } + } + } + } +} diff --git a/libbuild2/config/utility.hxx b/libbuild2/config/utility.hxx new file mode 100644 index 0000000..e41aaa7 --- /dev/null +++ b/libbuild2/config/utility.hxx @@ -0,0 +1,179 @@ +// file : libbuild2/config/utility.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_CONFIG_UTILITY_HXX +#define LIBBUILD2_CONFIG_UTILITY_HXX + +#include +#include + +#include +#include +#include + +#include + +namespace build2 +{ + class scope; + + namespace config + { + // Set, if necessary, a required config.* variable. + // + // If override is true and the variable doesn't come from this root scope + // or from the command line (i.e., it is inherited from the amalgamtion), + // then its value is "overridden" to the default value on this root scope. + // See save_variable() for more information on save_flags. + // + // Return the reference to the value as well as the indication of whether + // the value is "new", that is, it was set to the default value (inherited + // or not, including overrides). We also treat command line overrides + // (inherited or not) as new. This flag is usually used to test that the + // new value is valid, print report, etc. We return the value as lookup + // (always defined) to pass alone its location (could be used to detect + // inheritance, etc). + // + // Note also that if save_flags has save_commented, then a default value + // is never considered "new" since for such variables absence of a value + // means the default value. + // + template + pair + required (scope& root, + const variable&, + const T& default_value, + bool override = false, + uint64_t save_flags = 0); + + // Note that the variable is expected to have already been registered. + // + template + inline pair + required (scope& root, + const string& name, + const T& default_value, + bool override = false, + uint64_t save_flags = 0) + { + return required ( + root, var_pool[name], default_value, override, save_flags); + } + + inline pair + required (scope& root, + const string& name, + const char* default_value, + bool override = false, + uint64_t save_flags = 0) + { + return required ( + root, name, string (default_value), override, save_flags); + } + + // As above, but leave the unspecified value as undefined rather than + // setting it to the default value. + // + // This can be useful when we don't have a default value but may figure + // out some fallback. See config.bin.target for an example. + // + LIBBUILD2_SYMEXPORT pair + omitted (scope& root, const variable&); + + // Note that the variable is expected to have already been registered. + // + inline pair + omitted (scope& root, const string& name) + { + return omitted (root, var_pool[name]); + } + + // Set, if necessary, an optional config.* variable. In particular, an + // unspecified variable is set to NULL which is used to distinguish + // between the "configured as unspecified" and "not yet configured" cases. + // + // Return the value (as always defined lookup), which can be NULL. + // + // @@ Rename since clashes with the optional class template. + // + LIBBUILD2_SYMEXPORT lookup + optional (scope& root, const variable&); + + // Note that the variable is expected to have already been registered. + // + inline lookup + optional (scope& root, const string& name) + { + return optional (root, var_pool[name]); + } + + // Check whether there are any variables specified from the config + // namespace. The idea is that we can check if there are any, say, + // config.install.* values. If there are none, then we can assume + // this functionality is not (yet) used and omit writing a whole + // bunch of NULL config.install.* values to the config.build file. + // We call it omitted/delayed configuration. + // + // Note that this function detects and ignores the special + // config.*.configured variable which may be used by a module to + // "remember" that it is unconfigured (e.g., in order to avoid re- + // running the tests, etc). + // + LIBBUILD2_SYMEXPORT bool + specified (scope& root, const string& name); + + // Check if there is a false config.*.configured value. This mechanism can + // be used to "remember" that the module is left unconfigured in order to + // avoid re-running the tests, etc. + // + LIBBUILD2_SYMEXPORT bool + unconfigured (scope& root, const string& name); + + // Set the config.*.configured value. Note that you only need to set it to + // false. It will be automatically ignored if there are any other config.* + // values for this module. Return true if this sets a new value. + // + LIBBUILD2_SYMEXPORT bool + unconfigured (scope& root, const string& name, bool); + + // Enter the variable so that it is saved during configuration. See + // config::module for details. + // + const uint64_t save_commented = 0x01; // Save default value as commented. + + LIBBUILD2_SYMEXPORT void + save_variable (scope& root, const variable&, uint64_t flags = 0); + + // Establish module order/priority. See config::module for details. + // + LIBBUILD2_SYMEXPORT void + save_module (scope& root, const char* name, int prio = 0); + + // Create a project in the specified directory. + // + LIBBUILD2_SYMEXPORT void + create_project (const dir_path& d, + const build2::optional& amalgamation, + const strings& boot_modules, // Bootstrap modules. + const string& root_pre, // Extra root.build text. + const strings& root_modules, // Root modules. + const string& root_post, // Extra root.build text. + bool config, // Load config module. + bool buildfile, // Create root buildfile. + const char* who, // Who is creating it. + uint16_t verbosity = 1); // Diagnostic verbosity. + + inline path + config_file (const scope& root) + { + return (root.out_path () / + root.root_extra->build_dir / + "config." + root.root_extra->build_ext); + } + } +} + +#include + +#endif // LIBBUILD2_CONFIG_UTILITY_HXX diff --git a/libbuild2/config/utility.txx b/libbuild2/config/utility.txx new file mode 100644 index 0000000..d2ffa69 --- /dev/null +++ b/libbuild2/config/utility.txx @@ -0,0 +1,66 @@ +// file : libbuild2/config/utility.txx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include +#include + +namespace build2 +{ + namespace config + { + template + pair + required (scope& root, + const variable& var, + const T& def_val, + bool def_ovr, + uint64_t save_flags) + { + // Note: see also omitted() if changing anything here. + + if (current_mif->id == configure_id) + save_variable (root, var, save_flags); + + pair org (root.find_original (var)); + + bool n (false); // New flag. + lookup l (org.first); + + // The interaction with command line overrides can get tricky. For + // example, the override to defaul value could make (non-recursive) + // command line override in the outer scope no longer apply. So what we + // are going to do is first ignore overrides and perform the normal + // logic on the original. Then we apply the overrides on the result. + // + if (!l.defined () || (def_ovr && !l.belongs (root))) + { + value& v (root.assign (var) = def_val); + v.extra = true; // Default value flag. + + n = (save_flags & save_commented) == 0; // Absence means default. + l = lookup (v, var, root); + org = make_pair (l, 1); // Lookup depth is 1 since it's in root.vars. + } + // Treat an inherited value that was set to default as new. + // + else if (l->extra) + n = (save_flags & save_commented) == 0; // Absence means default. + + if (var.overrides != nullptr) + { + pair ovr (root.find_override (var, move (org))); + + if (l != ovr.first) // Overriden? + { + // Override is always treated as new. + // + n = true; + l = move (ovr.first); + } + } + + return pair (l, n); + } + } +} diff --git a/libbuild2/dist/init.cxx b/libbuild2/dist/init.cxx new file mode 100644 index 0000000..959b2dd --- /dev/null +++ b/libbuild2/dist/init.cxx @@ -0,0 +1,192 @@ +// file : libbuild2/dist/init.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include +#include + +#include + +#include +#include +#include + +using namespace std; +using namespace butl; + +namespace build2 +{ + namespace dist + { + static const rule rule_; + + bool + boot (scope& rs, const location&, unique_ptr& mod) + { + tracer trace ("dist::boot"); + + l5 ([&]{trace << "for " << rs;}); + + // Register the meta-operation. + // + rs.insert_meta_operation (dist_id, mo_dist); + + // Enter module variables. Do it during boot in case they get assigned + // in bootstrap.build (which is customary for, e.g., dist.package). + // + auto& vp (var_pool.rw (rs)); + + // Note: some overridable, some not. + // + // config.dist.archives is a list of archive extensions (e.g., zip, + // tar.gz) that can be optionally prefixed with a directory. If it is + // relative, then it is prefixed with config.dist.root. Otherwise, the + // archive is written to the absolute location. + // + // config.dist.checksums is a list of archive checksum extensions (e.g., + // sha1, sha256) that can also be optionally prefixed with a directory + // with the same semantics as config.dist.archives. If the directory is + // absent, then the checksum file is written into the same directory as + // the corresponding archive. + // + vp.insert ("config.dist.root", true); + vp.insert ("config.dist.archives", true); + vp.insert ("config.dist.checksums", true); + vp.insert ("config.dist.cmd", true); + + // Allow distribution of uncommitted projects. This is enforced by the + // version module. + // + vp.insert ("config.dist.uncommitted", true); + + vp.insert ("dist.root"); + vp.insert ("dist.cmd"); + vp.insert ("dist.archives"); + vp.insert ("dist.checksums"); + vp.insert ("dist.uncommitted"); + + vp.insert ("dist", variable_visibility::target); // Flag. + + // Project's package name. + // + auto& v_d_p ( + vp.insert ("dist.package", variable_visibility::project)); + + // Create the module. + // + mod.reset (new module (v_d_p)); + + return false; + } + + bool + init (scope& rs, + scope&, + const location& l, + unique_ptr&, + bool first, + bool, + const variable_map& config_hints) + { + tracer trace ("dist::init"); + + if (!first) + { + warn (l) << "multiple dist module initializations"; + return true; + } + + const dir_path& out_root (rs.out_path ()); + l5 ([&]{trace << "for " << out_root;}); + + assert (config_hints.empty ()); // We don't known any hints. + + // Register our wildcard rule. Do it explicitly for the alias to prevent + // something like insert(dist_id, test_id) taking precedence. + // + rs.rules.insert (dist_id, 0, "dist", rule_); + rs.rules.insert (dist_id, 0, "dist.alias", rule_); //@@ outer? + + // Configuration. + // + // Note that we don't use any defaults for root -- the location + // must be explicitly specified or we will complain if and when + // we try to dist. + // + bool s (config::specified (rs, "dist")); + + // Adjust module priority so that the config.dist.* values are saved at + // the end of config.build. + // + if (s) + config::save_module (rs, "dist", INT32_MAX); + + // dist.root + // + { + value& v (rs.assign ("dist.root")); + + if (s) + { + if (lookup l = config::optional (rs, "config.dist.root")) + v = cast (l); // Strip abs_dir_path. + } + } + + // dist.cmd + // + { + value& v (rs.assign ("dist.cmd")); + + if (s) + { + if (lookup l = config::required (rs, + "config.dist.cmd", + path ("install")).first) + v = run_search (cast (l), true); + } + } + + // dist.archives + // dist.checksums + // + { + value& a (rs.assign ("dist.archives")); + value& c (rs.assign ("dist.checksums")); + + if (s) + { + if (lookup l = config::optional (rs, "config.dist.archives")) + a = *l; + + if (lookup l = config::optional (rs, "config.dist.checksums")) + { + c = *l; + + if (!c.empty () && (!a || a.empty ())) + fail << "config.dist.checksums specified without " + << "config.dist.archives"; + + } + } + } + + // dist.uncommitted + // + // Omit it from the configuration unless specified. + // + config::omitted (rs, "config.dist.uncommitted"); + + return true; + } + + module_functions + build2_dist_load () + { + return module_functions {&boot, &init}; + } + } +} diff --git a/libbuild2/dist/init.hxx b/libbuild2/dist/init.hxx new file mode 100644 index 0000000..41c82a7 --- /dev/null +++ b/libbuild2/dist/init.hxx @@ -0,0 +1,36 @@ +// file : libbuild2/dist/init.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_DIST_INIT_HXX +#define LIBBUILD2_DIST_INIT_HXX + +#include +#include + +#include + +#include + +namespace build2 +{ + namespace dist + { + bool + boot (scope&, const location&, unique_ptr&); + + bool + init (scope&, + scope&, + const location&, + unique_ptr&, + bool, + bool, + const variable_map&); + + extern "C" LIBBUILD2_SYMEXPORT module_functions + build2_dist_load (); + } +} + +#endif // LIBBUILD2_DIST_INIT_HXX diff --git a/libbuild2/dist/module.cxx b/libbuild2/dist/module.cxx new file mode 100644 index 0000000..e9b9955 --- /dev/null +++ b/libbuild2/dist/module.cxx @@ -0,0 +1,15 @@ +// file : libbuild2/dist/module.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +using namespace std; + +namespace build2 +{ + namespace dist + { + const string module::name ("dist"); + } +} diff --git a/libbuild2/dist/module.hxx b/libbuild2/dist/module.hxx new file mode 100644 index 0000000..abc1400 --- /dev/null +++ b/libbuild2/dist/module.hxx @@ -0,0 +1,71 @@ +// file : libbuild2/dist/module.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_DIST_MODULE_HXX +#define LIBBUILD2_DIST_MODULE_HXX + +#include +#include + +#include +#include + +#include + +namespace build2 +{ + namespace dist + { + struct LIBBUILD2_SYMEXPORT module: module_base + { + static const string name; + + const variable& var_dist_package; + + // Distribution post-processing callbacks. + // + // The last component in the pattern may contain shell wildcards. If the + // path contains a directory, then it is matched from the distribution + // root only. Otherwise, it is matched against all the files being + // distributed. For example: + // + // buildfile - every buildfile + // ./buildfile - root buildfile only + // tests/buildfile - tests/buildfile only + // + // The callback is called with the absolute path of the matching file + // after it has been copied to the distribution directory. The project's + // root scope and callback-specific data are passed along. + // + // Note that if registered, the callbacks are also called (recursively) + // in subprojects. + // + using callback_func = void (const path&, const scope&, void*); + + void + register_callback (path pattern, callback_func* f, void* data) + { + callbacks_.push_back (callback {move (pattern), f, data}); + } + + // Implementation details. + // + module (const variable& v_d_p) + : var_dist_package (v_d_p) {} + + public: + struct callback + { + const path pattern; + callback_func* function; + void* data; + }; + using callbacks = vector; + + callbacks callbacks_; + }; + } +} + +#endif // LIBBUILD2_DIST_MODULE_HXX diff --git a/libbuild2/dist/operation.cxx b/libbuild2/dist/operation.cxx new file mode 100644 index 0000000..ac3912e --- /dev/null +++ b/libbuild2/dist/operation.cxx @@ -0,0 +1,868 @@ +// file : libbuild2/dist/operation.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include + +#include // path_match() + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace std; +using namespace butl; + +namespace build2 +{ + namespace dist + { + // install -d + // + static void + install (const process_path& cmd, const dir_path&); + + // install + // + // Return the destination file path. + // + static path + install (const process_path& cmd, const file&, const dir_path&); + + // tar|zip ... /. + // + // Return the archive file path. + // + static path + archive (const dir_path& root, + const string& pkg, + const dir_path& dir, + const string& ext); + + // sum > /. + // + // Return the checksum file path. + // + static path + checksum (const path& arc, const dir_path& dir, const string& ext); + + static operation_id + dist_operation_pre (const values&, operation_id o) + { + if (o != default_id) + fail << "explicit operation specified for meta-operation dist"; + + return o; + } + + static void + dist_execute (const values&, action, action_targets& ts, + uint16_t, bool prog) + { + tracer trace ("dist_execute"); + + // For now we assume all the targets are from the same project. + // + const target& t (ts[0].as_target ()); + const scope* rs (t.base_scope ().root_scope ()); + + if (rs == nullptr) + fail << "out of project target " << t; + + const dir_path& out_root (rs->out_path ()); + const dir_path& src_root (rs->src_path ()); + + if (out_root == src_root) + fail << "in-tree distribution of target " << t << + info << "distribution requires out-of-tree build"; + + // Make sure we have the necessary configuration before we get down to + // business. + // + auto l (rs->vars["dist.root"]); + + if (!l || l->empty ()) + fail << "unknown root distribution directory" << + info << "did you forget to specify config.dist.root?"; + + // We used to complain if dist.root does not exist but then, similar + // to install, got tired of user's complaints. So now we just let + // install -d for the package directory create it if necessary. + // + const dir_path& dist_root (cast (l)); + + l = rs->vars["dist.package"]; + + if (!l || l->empty ()) + fail << "unknown distribution package name" << + info << "did you forget to set dist.package?"; + + const string& dist_package (cast (l)); + const process_path& dist_cmd (cast (rs->vars["dist.cmd"])); + + // Verify all the targets are from the same project. + // + for (const action_target& at: ts) + { + const target& t (at.as_target ()); + + if (rs != t.base_scope ().root_scope ()) + fail << "target " << t << " is from a different project" << + info << "one dist meta-operation can handle one project" << + info << "consider using several dist meta-operations"; + } + + // We used to print 'dist ' at verbosity level 1 but that has + // proven to be just noise. Though we still want to print something + // since otherwise, once the progress line is cleared, we may end up + // with nothing printed at all. + // + // Note that because of this we can also suppress diagnostics noise + // (e.g., output directory creation) in all the operations below. + // + if (verb == 1) + text << "dist " << dist_package; + + // Match a rule for every operation supported by this project. Skip + // default_id. + // + // Note that we are not calling operation_pre/post() callbacks here + // since the meta operation is dist and we know what we are doing. + // + values params; + const path locf (""); + const location loc (&locf); // Dummy location. + + const operations& ops (rs->root_extra->operations); + + for (operations::size_type id (default_id + 1); // Skip default_id. + id < ops.size (); + ++id) + { + if (const operation_info* oif = ops[id]) + { + // Skip aliases (e.g., update-for-install). In fact, one can argue + // the default update should be sufficient since it is assumed to + // update all prerequisites and we no longer support ad hoc stuff + // like test.input. Though here we are using the dist meta-operation, + // not perform. + // + if (oif->id != id) + continue; + + // Use standard (perform) match. + // + if (oif->pre != nullptr) + { + if (operation_id pid = oif->pre (params, dist_id, loc)) + { + const operation_info* poif (ops[pid]); + set_current_oif (*poif, oif, false /* diag_noise */); + action a (dist_id, poif->id, oif->id); + match (params, a, ts, + 1 /* diag (failures only) */, + false /* progress */); + } + } + + set_current_oif (*oif, nullptr, false /* diag_noise */); + action a (dist_id, oif->id); + match (params, a, ts, + 1 /* diag (failures only) */, + false /* progress */); + + if (oif->post != nullptr) + { + if (operation_id pid = oif->post (params, dist_id)) + { + const operation_info* poif (ops[pid]); + set_current_oif (*poif, oif, false /* diag_noise */); + action a (dist_id, poif->id, oif->id); + match (params, a, ts, + 1 /* diag (failures only) */, + false /* progress */); + } + } + } + } + + // Add buildfiles that are not normally loaded as part of the project, + // for example, the export stub. They will still be ignored on the next + // step if the user explicitly marked them dist=false. + // + auto add_adhoc = [&trace] (const scope& rs, const path& f) + { + path p (rs.src_path () / f); + if (exists (p)) + { + dir_path d (p.directory ()); + + // Figure out if we need out. + // + dir_path out (rs.src_path () != rs.out_path () + ? out_src (d, rs) + : dir_path ()); + + targets.insert ( + move (d), + move (out), + p.leaf ().base ().string (), + p.extension (), // Specified. + trace); + } + }; + + add_adhoc (*rs, rs->root_extra->export_file); + + // The same for subprojects that have been loaded. + // + if (auto l = rs->vars[var_subprojects]) + { + for (auto p: cast (l)) + { + const dir_path& pd (p.second); + dir_path out_nroot (out_root / pd); + const scope& nrs (scopes.find (out_nroot)); + + if (nrs.out_path () != out_nroot) // This subproject not loaded. + continue; + + if (!nrs.src_path ().sub (src_root)) // Not a strong amalgamation. + continue; + + add_adhoc (nrs, nrs.root_extra->export_file); + } + } + + // Collect the files. We want to take the snapshot of targets since + // updating some of them may result in more targets being entered. + // + // Note that we are not showing progress here (e.g., "N targets to + // distribute") since it will be useless (too fast). + // + action_targets files; + const variable& dist_var (var_pool["dist"]); + + for (const auto& pt: targets) + { + file* ft (pt->is_a ()); + + if (ft == nullptr) // Not a file. + continue; + + if (ft->dir.sub (src_root)) + { + // Include unless explicitly excluded. + // + auto l ((*ft)[dist_var]); + + if (l && !cast (l)) + l5 ([&]{trace << "excluding " << *ft;}); + else + files.push_back (ft); + + continue; + } + + if (ft->dir.sub (out_root)) + { + // Exclude unless explicitly included. + // + auto l ((*ft)[dist_var]); + + if (l && cast (l)) + { + l5 ([&]{trace << "including " << *ft;}); + files.push_back (ft); + } + + continue; + } + } + + // Make sure what we need to distribute is up to date. + // + { + if (mo_perform.meta_operation_pre != nullptr) + mo_perform.meta_operation_pre (params, loc); + + // This is a hack since according to the rules we need to completely + // reset the state. We could have done that (i.e., saved target names + // and then re-searched them in the new tree) but that would just slow + // things down while this little cheat seems harmless (i.e., assume + // the dist mete-opreation is "compatible" with perform). + // + // Note also that we don't do any structured result printing. + // + size_t on (current_on); + set_current_mif (mo_perform); + current_on = on + 1; + + if (mo_perform.operation_pre != nullptr) + mo_perform.operation_pre (params, update_id); + + set_current_oif (op_update, nullptr, false /* diag_noise */); + + action a (perform_id, update_id); + + mo_perform.match (params, a, files, + 1 /* diag (failures only) */, + prog /* progress */); + + mo_perform.execute (params, a, files, + 1 /* diag (failures only) */, + prog /* progress */); + + if (mo_perform.operation_post != nullptr) + mo_perform.operation_post (params, update_id); + + if (mo_perform.meta_operation_post != nullptr) + mo_perform.meta_operation_post (params); + } + + dir_path td (dist_root / dir_path (dist_package)); + + // Clean up the target directory. + // + if (build2::rmdir_r (td, true, 2) == rmdir_status::not_empty) + fail << "unable to clean target directory " << td; + + auto_rmdir rm_td (td); // Clean it up if things go bad. + install (dist_cmd, td); + + // Copy over all the files. Apply post-processing callbacks. + // + module& mod (*rs->lookup_module (module::name)); + + prog = prog && show_progress (1 /* max_verb */); + size_t prog_percent (0); + + for (size_t i (0), n (files.size ()); i != n; ++i) + { + const file& t (*files[i].as_target ().is_a ()); + + // Figure out where this file is inside the target directory. + // + bool src (t.dir.sub (src_root)); + dir_path dl (src ? t.dir.leaf (src_root) : t.dir.leaf (out_root)); + + dir_path d (td / dl); + if (!exists (d)) + install (dist_cmd, d); + + path r (install (dist_cmd, t, d)); + + // See if this file is in a subproject. + // + const scope* srs (rs); + const module::callbacks* cbs (&mod.callbacks_); + + if (auto l = rs->vars[var_subprojects]) + { + for (auto p: cast (l)) + { + const dir_path& pd (p.second); + if (dl.sub (pd)) + { + srs = &scopes.find (out_root / pd); + + if (auto* m = srs->lookup_module (module::name)) + cbs = &m->callbacks_; + else + fail << "dist module not loaded in subproject " << pd; + + break; + } + } + } + + for (module::callback cb: *cbs) + { + const path& pat (cb.pattern); + + // If we have a directory, then it should be relative to the project + // root. + // + if (!pat.simple ()) + { + assert (pat.relative ()); + + dir_path d ((src ? srs->src_path () : srs->out_path ()) / + pat.directory ()); + d.normalize (); + + if (d != t.dir) + continue; + } + + if (path_match (pat.leaf ().string (), t.path ().leaf ().string ())) + cb.function (r, *srs, cb.data); + } + + if (prog) + { + // Note that this is not merely an optimization since if stderr is + // not a terminal, we print real lines for progress. + // + size_t p ((i * 100) / n); + + if (prog_percent != p) + { + prog_percent = p; + + diag_progress_lock pl; + diag_progress = ' '; + diag_progress += to_string (prog_percent); + diag_progress += "% of targets distributed"; + } + } + } + + // Clear the progress if shown. + // + if (prog) + { + diag_progress_lock pl; + diag_progress.clear (); + } + + rm_td.cancel (); + + // Archive and checksum if requested. + // + if (lookup as = rs->vars["dist.archives"]) + { + lookup cs (rs->vars["dist.checksums"]); + + // Split the dist.{archives,checksums} value into a directory and + // extension. + // + auto split = [] (const path& p, const dir_path& r, const char* what) + { + dir_path d (p.relative () ? r : dir_path ()); + d /= p.directory (); + + const string& s (p.string ()); + size_t i (path::traits_type::find_leaf (s)); + + if (i == string::npos) + fail << "invalid extension '" << s << "' in " << what; + + if (s[i] == '.') // Skip the dot if specified. + ++i; + + return pair (move (d), string (s, i)); + }; + + for (const path& p: cast (as)) + { + auto ap (split (p, dist_root, "dist.archives")); + path a (archive (dist_root, dist_package, ap.first, ap.second)); + + if (cs) + { + for (const path& p: cast (cs)) + { + auto cp (split (p, ap.first, "dist.checksums")); + checksum (a, cp.first, cp.second); + } + } + } + } + } + + // install -d + // + static void + install (const process_path& cmd, const dir_path& d) + { + path reld (relative (d)); + + cstrings args {cmd.recall_string (), "-d"}; + + args.push_back ("-m"); + args.push_back ("755"); + args.push_back (reld.string ().c_str ()); + args.push_back (nullptr); + + if (verb >= 2) + print_process (args); + + run (cmd, args); + } + + // install + // + static path + install (const process_path& cmd, const file& t, const dir_path& d) + { + dir_path reld (relative (d)); + path relf (relative (t.path ())); + + cstrings args {cmd.recall_string ()}; + + // Preserve timestamps. This could becomes important if, for + // example, we have pre-generated sources. Note that the + // install-sh script doesn't support this option, while both + // Linux and BSD install's do. + // + args.push_back ("-p"); + + // Assume the file is executable if the owner has execute + // permission, in which case we make it executable for + // everyone. + // + args.push_back ("-m"); + args.push_back ( + (path_permissions (t.path ()) & permissions::xu) == permissions::xu + ? "755" + : "644"); + + args.push_back (relf.string ().c_str ()); + args.push_back (reld.string ().c_str ()); + args.push_back (nullptr); + + if (verb >= 2) + print_process (args); + + run (cmd, args); + + return d / relf.leaf (); + } + + static path + archive (const dir_path& root, + const string& pkg, + const dir_path& dir, + const string& e) + { + path an (pkg + '.' + e); + + // Delete old archive for good measure. + // + path ap (dir / an); + if (exists (ap, false)) + rmfile (ap); + + // Use zip for .zip archives. Also recognize and handle a few well-known + // tar.xx cases (in case tar doesn't support -a or has other issues like + // MSYS). Everything else goes to tar in the auto-compress mode (-a). + // + cstrings args; + + // Separate compressor (gzip, xz, etc) state. + // + size_t i (0); // Command line start or 0 if not used. + auto_rmfile out_rm; // Output file cleanup (must come first). + auto_fd out_fd; // Output file. + + if (e == "zip") + { + // On Windows we use libarchive's bsdtar (zip is an MSYS executabales). + // + // While not explicitly stated, the compression-level option works + // for zip archives. + // +#ifdef _WIN32 + args = {"bsdtar", + "-a", // -a with the .zip extension seems to be the only way. + "--options=compression-level=9", + "-cf", ap.string ().c_str (), + pkg.c_str (), + nullptr}; +#else + args = {"zip", + "-9", + "-rq", ap.string ().c_str (), + pkg.c_str (), + nullptr}; +#endif + } + else + { + // On Windows we use libarchive's bsdtar with auto-compression (tar + // itself and quite a few compressors are MSYS executables). + // + const char* l (nullptr); // Compression level (option). + +#ifdef _WIN32 + const char* tar = "bsdtar"; + + if (e == "tar.gz") + l = "--options=compression-level=9"; +#else + const char* tar = "tar"; + + // For gzip it's a good idea to use -9 by default. For bzip2, -9 is + // the default. And for xz, -9 is not recommended as the default due + // memory requirements. + // + // Note also that the compression level can be altered via the GZIP + // (GZIP_OPT also seems to work), BZIP2, and XZ_OPT environment + // variables, respectively. + // + const char* c (nullptr); + + if (e == "tar.gz") { c = "gzip"; l = "-9"; } + else if (e == "tar.xz") { c = "xz"; } + else if (e == "tar.bz2") { c = "bzip2"; } + + if (c != nullptr) + { + args = {tar, + "--format", "ustar", + "-cf", "-", + pkg.c_str (), + nullptr}; + + i = args.size (); + args.push_back (c); + if (l != nullptr) + args.push_back (l); + args.push_back (nullptr); + args.push_back (nullptr); // Pipe end. + + try + { + out_fd = fdopen (ap, + fdopen_mode::out | fdopen_mode::binary | + fdopen_mode::truncate | fdopen_mode::create); + out_rm = auto_rmfile (ap); + } + catch (const io_error& e) + { + fail << "unable to open " << ap << ": " << e; + } + } + else +#endif + if (e == "tar") + args = {tar, + "--format", "ustar", + "-cf", ap.string ().c_str (), + pkg.c_str (), + nullptr}; + else + { + args = {tar, + "--format", "ustar", + "-a"}; + + if (l != nullptr) + args.push_back (l); + + args.push_back ("-cf"); + args.push_back (ap.string ().c_str ()); + args.push_back (pkg.c_str ()); + args.push_back (nullptr); + } + } + + process_path app; // Archiver path. + process_path cpp; // Compressor path. + + app = run_search (args[0]); + + if (i != 0) + cpp = run_search (args[i]); + + if (verb >= 2) + print_process (args); + else if (verb) + text << args[0] << ' ' << ap; + + process apr; + process cpr; + + // Change the archiver's working directory to dist_root. + // + apr = run_start (app, + args.data (), + 0 /* stdin */, + (i != 0 ? -1 : 1) /* stdout */, + true /* error */, + root); + + // Start the compressor if required. + // + if (i != 0) + { + cpr = run_start (cpp, + args.data () + i, + apr.in_ofd.get () /* stdin */, + out_fd.get () /* stdout */); + + cpr.in_ofd.reset (); // Close the archiver's stdout on our side. + run_finish (args.data () + i, cpr); + } + + run_finish (args.data (), apr); + + out_rm.cancel (); + return ap; + } + + static path + checksum (const path& ap, const dir_path& dir, const string& e) + { + path an (ap.leaf ()); + dir_path ad (ap.directory ()); + + path cn (an + '.' + e); + + // Delete old checksum for good measure. + // + path cp (dir / cn); + if (exists (cp, false)) + rmfile (cp); + + auto_rmfile c_rm; // Note: must come first. + auto_fd c_fd; + try + { + c_fd = fdopen (cp, + fdopen_mode::out | + fdopen_mode::create | + fdopen_mode::truncate); + c_rm = auto_rmfile (cp); + } + catch (const io_error& e) + { + fail << "unable to open " << cp << ": " << e; + } + + // The plan is as follows: look for the sum program (e.g., sha1sum, + // md5sum, etc). If found, then use that, otherwise, fall back to our + // built-in checksum calculation support. + // + // There are two benefits to first trying the external program: it may + // supports more checksum algorithms and could be faster than our + // built-in code. + // + string pn (e + "sum"); + process_path pp (process::try_path_search (pn, true /* init */)); + + if (!pp.empty ()) + { + const char* args[] { + pp.recall_string (), + "-b" /* binary */, + an.string ().c_str (), + nullptr}; + + if (verb >= 2) + print_process (args); + else if (verb) + text << args[0] << ' ' << cp; + + // Note that to only get the archive name (without the directory) in + // the output we have to run from the archive's directory. + // + process pr (run_start (pp, + args, + 0 /* stdin */, + c_fd.get () /* stdout */, + true /* error */, + ad /* cwd */)); + run_finish (args, pr); + } + else + { + string (*f) (ifdstream&); + + // Note: remember to update info: below if adding another algorithm. + // + if (e == "sha1") + f = [] (ifdstream& i) -> string {return sha1 (i).string ();}; + else if (e == "sha256") + f = [] (ifdstream& i) -> string {return sha256 (i).string ();}; + else + fail << "no built-in support for checksum algorithm " << e + << " nor " << e << "sum program found" << + info << "built-in support is available for sha1, sha256" << endf; + + if (verb >= 2) + text << "cat >" << cp; + else if (verb) + text << e << "sum " << cp; + + string c; + try + { + ifdstream is (ap, fdopen_mode::in | fdopen_mode::binary); + c = f (is); + is.close (); + } + catch (const io_error& e) + { + fail << "unable to read " << ap << ": " << e; + } + + try + { + ofdstream os (move (c_fd)); + os << c << " *" << an << endl; + os.close (); + } + catch (const io_error& e) + { + fail << "unable to write " << cp << ": " << e; + } + } + + c_rm.cancel (); + return cp; + } + + static include_type + dist_include (action, + const target&, + const prerequisite_member& p, + include_type i) + { + tracer trace ("dist_include"); + + // Override excluded to adhoc so that every source is included into the + // distribution. Note that this should be harmless to a custom rule + // given the prescribed semantics of adhoc (match/execute but otherwise + // ignore) is followed. + // + if (i == include_type::excluded) + { + l5 ([&]{trace << "overriding exclusion of " << p;}); + i = include_type::adhoc; + } + + return i; + } + + const meta_operation_info mo_dist { + dist_id, + "dist", + "distribute", + "distributing", + "distributed", + "has nothing to distribute", // We cannot "be distributed". + true, // bootstrap_outer + nullptr, // meta-operation pre + &dist_operation_pre, + &load, // normal load + &search, // normal search + nullptr, // no match (see dist_execute()). + &dist_execute, + nullptr, // operation post + nullptr, // meta-operation post + &dist_include + }; + } +} diff --git a/libbuild2/dist/operation.hxx b/libbuild2/dist/operation.hxx new file mode 100644 index 0000000..aa59c36 --- /dev/null +++ b/libbuild2/dist/operation.hxx @@ -0,0 +1,21 @@ +// file : libbuild2/dist/operation.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_DIST_OPERATION_HXX +#define LIBBUILD2_DIST_OPERATION_HXX + +#include +#include + +#include + +namespace build2 +{ + namespace dist + { + extern const meta_operation_info mo_dist; + } +} + +#endif // LIBBUILD2_DIST_OPERATION_HXX diff --git a/libbuild2/dist/rule.cxx b/libbuild2/dist/rule.cxx new file mode 100644 index 0000000..357d70e --- /dev/null +++ b/libbuild2/dist/rule.cxx @@ -0,0 +1,88 @@ +// file : libbuild2/dist/rule.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include +#include +#include + +using namespace std; + +namespace build2 +{ + namespace dist + { + bool rule:: + match (action, target&, const string&) const + { + return true; // We always match. + } + + recipe rule:: + apply (action a, target& t) const + { + const dir_path& out_root (t.root_scope ().out_path ()); + + // If we can, go inside see-through groups. + // + for (prerequisite_member p: + group_prerequisite_members (a, t, members_mode::maybe)) + { + // Note: no exclusion tests, we want all of them (and see also the + // dist_include() override). + + // Skip prerequisites imported from other projects. + // + if (p.proj ()) + continue; + + // We used to always search and match but that resulted in the + // undesirable behavior in case one of the "source" files is + // missing. In this case we would enter a target as "output", this + // rule would match it, and then dist_execute() would ignore it by + // default. + // + // So now if this is a file target (we still want to always "see + // through" other targets like aliases), we will only match it if (1) + // it exists in src or (2) it exists as a target. It feels like we + // don't need the stronger "... and not implied" condition since if it + // is mentioned as a target, then it is in out (we don't do the same + // target in both src/out). + // + // @@ Note that this is still an issue in a custom dist rule. + // + const target* pt (nullptr); + if (p.is_a ()) + { + pt = p.load (); + + if (pt == nullptr) + { + // Search for an existing target or existing file in src. + // + const prerequisite_key& k (p.key ()); + pt = k.tk.type->search (t, k); + + if (pt == nullptr) + fail << "prerequisite " << k << " is not existing source file " + << "nor known output target" << endf; + + search_custom (p.prerequisite, *pt); // Cache. + } + } + else + pt = &p.search (t); + + // Don't match targets that are outside of our project. + // + if (pt->dir.sub (out_root)) + build2::match (a, *pt); + } + + return noop_recipe; // We will never be executed. + } + } +} diff --git a/libbuild2/dist/rule.hxx b/libbuild2/dist/rule.hxx new file mode 100644 index 0000000..df32de5 --- /dev/null +++ b/libbuild2/dist/rule.hxx @@ -0,0 +1,39 @@ +// file : libbuild2/dist/rule.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_DIST_RULE_HXX +#define LIBBUILD2_DIST_RULE_HXX + +#include +#include + +#include +#include +#include + +namespace build2 +{ + namespace dist + { + // This is the default rule that simply matches all the prerequisites. + // + // A custom rule (usually the same as perform_update) may be necessary to + // establish group links (so that we see the dist variable set on a + // group). + // + class rule: public build2::rule + { + public: + rule () {} + + virtual bool + match (action, target&, const string&) const override; + + virtual recipe + apply (action, target&) const override; + }; + } +} + +#endif // LIBBUILD2_DIST_RULE_HXX diff --git a/libbuild2/install/functions.cxx b/libbuild2/install/functions.cxx new file mode 100644 index 0000000..f067918 --- /dev/null +++ b/libbuild2/install/functions.cxx @@ -0,0 +1,33 @@ +// file : libbuild2/install/functions.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include +#include + +#include + +using namespace std; + +namespace build2 +{ + namespace install + { + void + functions () + { + function_family f ("install"); + + // Resolve potentially relative install.* value to an absolute directory + // based on (other) install.* values visible from the calling scope. + // + f[".resolve"] = [] (const scope* s, dir_path d) + { + if (s == nullptr) + fail << "install.resolve() called out of scope" << endf; + + return resolve_dir (*s, move (d)); + }; + } + } +} diff --git a/libbuild2/install/init.cxx b/libbuild2/install/init.cxx new file mode 100644 index 0000000..fb3d9ea --- /dev/null +++ b/libbuild2/install/init.cxx @@ -0,0 +1,309 @@ +// file : libbuild2/install/init.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +using namespace std; +using namespace butl; + +namespace build2 +{ + namespace install + { + // Set install..* values based on config.install..* ones + // or the defaults. If none of config.install.* values were specified, + // then we do omitted/delayed configuration. Note that we still need + // to set all the install.* values to defaults, as if we had the + // default configuration. + // + // If override is true, then override values that came from outer + // configurations. We have to do this for paths that contain the + // package name. + // + // For global values we only set config.install.* variables. Non-global + // values with NULL defaults are omitted. + // + template + static void + set_var (bool spec, + scope& r, + const char* name, + const char* var, + const CT* dv, + bool override = false) + { + string vn; + lookup l; + + bool global (*name == '\0'); + + if (spec) + { + // Note: overridable. + // + vn = "config.install"; + if (!global) + { + vn += '.'; + vn += name; + } + vn += var; + const variable& vr (var_pool.rw (r).insert (move (vn), true)); + + l = dv != nullptr + ? config::required (r, vr, *dv, override).first + : (global + ? config::optional (r, vr) + : config::omitted (r, vr).first); + } + + if (global) + return; + + // Note: not overridable. + // + vn = "install."; + vn += name; + vn += var; + const variable& vr (var_pool.rw (r).insert (move (vn))); + + value& v (r.assign (vr)); + + if (spec) + { + if (l) + v = cast (l); // Strip CT to T. + } + else + { + if (dv != nullptr) + v = *dv; + } + } + + template + static void + set_dir (bool s, // specified + scope& r, // root scope + const char* n, // var name + const T& p, // path + bool o = false, // override + const string& fm = string (), // file mode + const string& dm = string (), // dir mode + const build2::path& c = build2::path ()) // command + { + using build2::path; + + bool global (*n == '\0'); + + if (!global) + set_var (s, r, n, "", p.empty () ? nullptr : &p, o); + + set_var (s, r, n, ".cmd", c.empty () ? nullptr : &c); + set_var (s, r, n, ".options", (strings*) (nullptr)); + set_var (s, r, n, ".mode", fm.empty () ? nullptr : &fm); + set_var (s, r, n, ".dir_mode", dm.empty () ? nullptr : &dm); + set_var (s, r, n, ".sudo", (string*) (nullptr)); + + // This one doesn't have config.* value (only set in a buildfile). + // + if (!global) + var_pool.rw (r).insert (string ("install.") + n + ".subdirs"); + } + + void + functions (); // functions.cxx + + bool + boot (scope& rs, const location&, unique_ptr&) + { + tracer trace ("install::boot"); + l5 ([&]{trace << "for " << rs;}); + + // Register install function family if this is the first instance of the + // install modules. + // + if (!function_family::defined ("install")) + functions (); + + // Register our operations. + // + rs.insert_operation (install_id, op_install); + rs.insert_operation (uninstall_id, op_uninstall); + rs.insert_operation (update_for_install_id, op_update_for_install); + + return false; + } + + static const path cmd ("install"); + + static const dir_path dir_root ("root"); + + static const dir_path dir_sbin (dir_path ("exec_root") /= "sbin"); + static const dir_path dir_bin (dir_path ("exec_root") /= "bin"); + static const dir_path dir_lib (dir_path ("exec_root") /= "lib"); + static const dir_path dir_libexec (dir_path ("exec_root") /= "libexec"); + static const dir_path dir_pkgconfig (dir_path ("lib") /= "pkgconfig"); + + static const dir_path dir_data (dir_path ("data_root") /= "share"); + static const dir_path dir_include (dir_path ("data_root") /= "include"); + + static const dir_path dir_doc (dir_path (dir_data) /= "doc"); + static const dir_path dir_man (dir_path (dir_data) /= "man"); + static const dir_path dir_man1 (dir_path ("man") /= "man1"); + + static const group_rule group_rule_ (true /* see_through_only */); + + bool + init (scope& rs, + scope& bs, + const location& l, + unique_ptr&, + bool first, + bool, + const variable_map& config_hints) + { + tracer trace ("install::init"); + + if (!first) + { + warn (l) << "multiple install module initializations"; + return true; + } + + const dir_path& out_root (rs.out_path ()); + l5 ([&]{trace << "for " << out_root;}); + + assert (config_hints.empty ()); // We don't known any hints. + + // Enter module variables. + // + auto& vp (var_pool.rw (rs)); + + // Note that the set_dir() calls below enter some more. + // + { + // Note: not overridable. + // + // The install variable is a path, not dir_path, since it can be used + // to both specify the target directory (to install with the same file + // name) or target file (to install with a different name). And the + // way we distinguish between the two is via the presence/absence of + // the trailing directory separator. + // + vp.insert ("install", variable_visibility::target); + vp.insert ("install.mode", variable_visibility::project); + vp.insert ("install.subdirs", variable_visibility::project); + } + + // Register our rules. + // + { + auto& r (bs.rules); + + const auto& ar (alias_rule::instance); + const auto& dr (fsdir_rule::instance); + const auto& fr (file_rule::instance); + const auto& gr (group_rule_); + + r.insert (perform_install_id, "install.alias", ar); + r.insert (perform_uninstall_id, "uninstall.alias", ar); + + r.insert (perform_install_id, "install.fsdir", dr); + r.insert (perform_uninstall_id, "install.fsdir", dr); + + r.insert (perform_install_id, "install.file", fr); + r.insert (perform_uninstall_id, "uninstall.file", fr); + + r.insert (perform_install_id, "install.file", gr); + r.insert (perform_uninstall_id, "uninstall.file", gr); + } + + // Configuration. + // + // Note that we don't use any defaults for root -- the location + // must be explicitly specified or the installer will complain + // if and when we try to install. + // + { + using build2::path; + + bool s (config::specified (rs, "install")); + + // Adjust module priority so that the (numerous) config.install.* + // values are saved at the end of config.build. + // + if (s) + config::save_module (rs, "install", INT32_MAX); + + const string& n (project (rs).string ()); + + // Global config.install.* values. + // + set_dir (s, rs, "", abs_dir_path (), false, "644", "755", cmd); + + set_dir (s, rs, "root", abs_dir_path ()); + + set_dir (s, rs, "data_root", dir_root); + set_dir (s, rs, "exec_root", dir_root, false, "755"); + + set_dir (s, rs, "sbin", dir_sbin); + set_dir (s, rs, "bin", dir_bin); + set_dir (s, rs, "lib", dir_lib); + set_dir (s, rs, "libexec", dir_path (dir_libexec) /= n, true); + set_dir (s, rs, "pkgconfig", dir_pkgconfig, false, "644"); + + set_dir (s, rs, "data", dir_path (dir_data) /= n, true); + set_dir (s, rs, "include", dir_include); + + set_dir (s, rs, "doc", dir_path (dir_doc) /= n, true); + set_dir (s, rs, "man", dir_man); + set_dir (s, rs, "man1", dir_man1); + + // Support for chroot'ed install (aka DESTDIR). + // + { + auto& var (vp.insert ( "install.chroot")); + auto& cvar (vp.insert ("config.install.chroot", true)); + + value& v (rs.assign (var)); + + if (s) + { + if (lookup l = config::optional (rs, cvar)) + v = cast (l); // Strip abs_dir_path. + } + } + } + + // Configure "installability" for built-in target types. + // + install_path (bs, dir_path ("bin")); // Install into install.bin. + install_path (bs, dir_path ("doc")); // Install into install.doc. + install_path (bs, dir_path ("man")); // Install into install.man. + install_path (bs, dir_path ("man1")); // Install into install.man1. + + return true; + } + + module_functions + build2_install_load () + { + return module_functions {&boot, &init}; + } + } +} diff --git a/libbuild2/install/init.hxx b/libbuild2/install/init.hxx new file mode 100644 index 0000000..fa0a1e1 --- /dev/null +++ b/libbuild2/install/init.hxx @@ -0,0 +1,36 @@ +// file : libbuild2/install/init.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_INSTALL_INIT_HXX +#define LIBBUILD2_INSTALL_INIT_HXX + +#include +#include + +#include + +#include + +namespace build2 +{ + namespace install + { + bool + boot (scope&, const location&, unique_ptr&); + + bool + init (scope&, + scope&, + const location&, + unique_ptr&, + bool, + bool, + const variable_map&); + + extern "C" LIBBUILD2_SYMEXPORT module_functions + build2_install_load (); + } +} + +#endif // LIBBUILD2_INSTALL_INIT_HXX diff --git a/libbuild2/install/operation.cxx b/libbuild2/install/operation.cxx new file mode 100644 index 0000000..1135ad6 --- /dev/null +++ b/libbuild2/install/operation.cxx @@ -0,0 +1,84 @@ +// file : libbuild2/install/operation.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +using namespace std; +using namespace butl; + +namespace build2 +{ + namespace install + { + static operation_id + install_pre (const values& params, meta_operation_id mo, const location& l) + { + if (!params.empty ()) + fail (l) << "unexpected parameters for operation install"; + + // Run update as a pre-operation, unless we are disfiguring. + // + return mo != disfigure_id ? update_id : 0; + } + + // Note that we run both install and uninstall serially. The reason for + // this is all the fuzzy things we are trying to do like removing empty + // outer directories if they are empty. If we do this in parallel, then + // those things get racy. Also, since all we do here is creating/removing + // files, there is not going to be much speedup from doing it in parallel. + + const operation_info op_install { + install_id, + 0, + "install", + "install", + "installing", + "installed", + "has nothing to install", // We cannot "be installed". + execution_mode::first, + 0, + &install_pre, + nullptr + }; + + // Note that we run update as a pre-operation, just like install. Which + // may seem bizarre at first. We do it to obtain the exact same dependency + // graph as install so that we uninstall exactly the same set of files as + // install would install. Note that just matching the rules without + // executing them may not be enough: for example, a presence of an ad hoc + // group member may only be discovered after executing the rule (e.g., VC + // link.exe only creates a DLL's import library if there are any exported + // symbols). + // + const operation_info op_uninstall { + uninstall_id, + 0, + "uninstall", + "uninstall", + "uninstalling", + "uninstalled", + "is not installed", + execution_mode::last, + 0, + &install_pre, + nullptr + }; + + // Also the explicit update-for-install operation alias. + // + const operation_info op_update_for_install { + update_id, // Note: not update_for_install_id. + install_id, + op_update.name, + op_update.name_do, + op_update.name_doing, + op_update.name_did, + op_update.name_done, + op_update.mode, + op_update.concurrency, + op_update.pre, + op_update.post + }; + } +} diff --git a/libbuild2/install/operation.hxx b/libbuild2/install/operation.hxx new file mode 100644 index 0000000..40cf25d --- /dev/null +++ b/libbuild2/install/operation.hxx @@ -0,0 +1,23 @@ +// file : libbuild2/install/operation.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_INSTALL_OPERATION_HXX +#define LIBBUILD2_INSTALL_OPERATION_HXX + +#include +#include + +#include + +namespace build2 +{ + namespace install + { + extern const operation_info op_install; + extern const operation_info op_uninstall; + extern const operation_info op_update_for_install; + } +} + +#endif // LIBBUILD2_INSTALL_OPERATION_HXX diff --git a/libbuild2/install/rule.cxx b/libbuild2/install/rule.cxx new file mode 100644 index 0000000..0b34832 --- /dev/null +++ b/libbuild2/install/rule.cxx @@ -0,0 +1,1223 @@ +// file : libbuild2/install/rule.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include +#include // resolve_dir() declaration + +#include // dir_exists(), file_exists() + +#include +#include +#include +#include +#include + +using namespace std; +using namespace butl; + +namespace build2 +{ + namespace install + { + // Lookup the install or install.* variable. Return NULL if not found or + // if the value is the special 'false' name (which means do not install; + // so the result can be used as bool). T is either scope or target. + // + template + static const P* + lookup_install (T& t, const string& var) + { + auto l (t[var]); + + if (!l) + return nullptr; + + const P& r (cast

(l)); + return r.simple () && r.string () == "false" ? nullptr : &r; + } + + // alias_rule + // + const alias_rule alias_rule::instance; + + bool alias_rule:: + match (action, target&, const string&) const + { + // We always match. + // + // Note that we are called both as the outer part during the update-for- + // un/install pre-operation and as the inner part during the un/install + // operation itself. + // + return true; + } + + const target* alias_rule:: + filter (action a, const target& t, prerequisite_iterator& i) const + { + assert (i->member == nullptr); + return filter (a, t, i->prerequisite); + } + + const target* alias_rule:: + filter (action, const target& t, const prerequisite& p) const + { + const target& pt (search (t, p)); + return pt.in (t.weak_scope ()) ? &pt : nullptr; + } + + recipe alias_rule:: + apply (action a, target& t) const + { + tracer trace ("install::alias_rule::apply"); + + // Pass-through to our installable prerequisites. + // + // @@ Shouldn't we do match in parallel (here and below)? + // + auto& pts (t.prerequisite_targets[a]); + + auto pms (group_prerequisite_members (a, t, members_mode::never)); + for (auto i (pms.begin ()), e (pms.end ()); i != e; ++i) + { + const prerequisite& p (i->prerequisite); + + // Ignore excluded. + // + include_type pi (include (a, t, p)); + + if (!pi) + continue; + + // Ignore unresolved targets that are imported from other projects. + // We are definitely not installing those. + // + if (p.proj) + continue; + + // Let a customized rule have its say. + // + // Note: we assume that if the filter enters the group, then it + // iterates over all its members. + // + const target* pt (filter (a, t, i)); + if (pt == nullptr) + { + l5 ([&]{trace << "ignoring " << p << " (filtered out)";}); + continue; + } + + // Check if this prerequisite is explicitly "not installable", that + // is, there is the 'install' variable and its value is false. + // + // At first, this might seem redundand since we could have let the + // file_rule below take care of it. The nuance is this: this + // prerequsite can be in a different subproject that hasn't loaded the + // install module (and therefore has no file_rule registered). The + // typical example would be the 'tests' subproject. + // + // Note: not the same as lookup_install() above. + // + auto l ((*pt)["install"]); + if (l && cast (l).string () == "false") + { + l5 ([&]{trace << "ignoring " << *pt << " (not installable)";}); + continue; + } + + // If this is not a file-based target (e.g., a target group such as + // libu{}) then ignore it if there is no rule to install. + // + if (pt->is_a ()) + build2::match (a, *pt); + else if (!try_match (a, *pt).first) + { + l5 ([&]{trace << "ignoring " << *pt << " (no rule)";}); + pt = nullptr; + } + + if (pt != nullptr) + pts.push_back (prerequisite_target (pt, pi)); + } + + return default_recipe; + } + + // fsdir_rule + // + const fsdir_rule fsdir_rule::instance; + + bool fsdir_rule:: + match (action, target&, const string&) const + { + // We always match. + // + // Note that we are called both as the outer part during the update-for- + // un/install pre-operation and as the inner part during the un/install + // operation itself. + // + return true; + } + + recipe fsdir_rule:: + apply (action a, target& t) const + { + // If this is outer part of the update-for-un/install, delegate to the + // default fsdir rule. Otherwise, this is a noop (we don't install + // fsdir{}). + // + // For now we also assume we don't need to do anything for prerequisites + // (the only sensible prerequisite of fsdir{} is another fsdir{}). + // + if (a.operation () == update_id) + { + match_inner (a, t); + return &execute_inner; + } + else + return noop_recipe; + } + + // group_rule + // + const group_rule group_rule::instance (false /* see_through_only */); + + bool group_rule:: + match (action a, target& t, const string& h) const + { + return (!see_through || t.type ().see_through) && + alias_rule::match (a, t, h); + } + + const target* group_rule:: + filter (action, const target&, const target& m) const + { + return &m; + } + + recipe group_rule:: + apply (action a, target& t) const + { + tracer trace ("install::group_rule::apply"); + + // Resolve group members. + // + // Remember that we are called twice: first during update for install + // (pre-operation) and then during install. During the former, we rely + // on the normall update rule to resolve the group members. During the + // latter, there will be no rule to do this but the group will already + // have been resolved by the pre-operation. + // + // If the rule could not resolve the group, then we ignore it. + // + group_view gv (a.outer () + ? resolve_members (a, t) + : t.group_members (a)); + + if (gv.members != nullptr) + { + auto& pts (t.prerequisite_targets[a]); + + for (size_t i (0); i != gv.count; ++i) + { + const target* m (gv.members[i]); + + if (m == nullptr) + continue; + + // Let a customized rule have its say. + // + const target* mt (filter (a, t, *m)); + if (mt == nullptr) + { + l5 ([&]{trace << "ignoring " << *m << " (filtered out)";}); + continue; + } + + // See if we were explicitly instructed not to touch this target + // (the same semantics as in the prerequisites match). + // + // Note: not the same as lookup_install() above. + // + auto l ((*mt)["install"]); + if (l && cast (l).string () == "false") + { + l5 ([&]{trace << "ignoring " << *mt << " (not installable)";}); + continue; + } + + build2::match (a, *mt); + pts.push_back (mt); // Never ad hoc. + } + } + + // Delegate to the base rule. + // + return alias_rule::apply (a, t); + } + + + // file_rule + // + const file_rule file_rule::instance; + + bool file_rule:: + match (action, target&, const string&) const + { + // We always match, even if this target is not installable (so that we + // can ignore it; see apply()). + // + return true; + } + + const target* file_rule:: + filter (action a, const target& t, prerequisite_iterator& i) const + { + assert (i->member == nullptr); + return filter (a, t, i->prerequisite); + } + + const target* file_rule:: + filter (action, const target& t, const prerequisite& p) const + { + const target& pt (search (t, p)); + return pt.in (t.root_scope ()) ? &pt : nullptr; + } + + recipe file_rule:: + apply (action a, target& t) const + { + tracer trace ("install::file_rule::apply"); + + // Note that we are called both as the outer part during the update-for- + // un/install pre-operation and as the inner part during the un/install + // operation itself. + // + // In both cases we first determine if the target is installable and + // return noop if it's not. Otherwise, in the first case (update-for- + // un/install) we delegate to the normal update and in the second + // (un/install) -- perform the test. + // + if (!lookup_install (t, "install")) + return noop_recipe; + + // In both cases, the next step is to search, match, and collect all the + // installable prerequisites. + // + // But first, in case of the update pre-operation, match the inner rule + // (actual update). We used to do this after matching the prerequisites + // but the inner rule may provide some rule-specific information (like + // the target extension for exe{}) that may be required during the + // prerequisite search (like the base name for in{}). + // + optional unchanged; + if (a.operation () == update_id) + unchanged = match_inner (a, t, unmatch::unchanged); + + auto& pts (t.prerequisite_targets[a]); + + auto pms (group_prerequisite_members (a, t, members_mode::never)); + for (auto i (pms.begin ()), e (pms.end ()); i != e; ++i) + { + const prerequisite& p (i->prerequisite); + + // Ignore excluded. + // + include_type pi (include (a, t, p)); + + if (!pi) + continue; + + // Ignore unresolved targets that are imported from other projects. + // We are definitely not installing those. + // + if (p.proj) + continue; + + // Let a customized rule have its say. + // + // Note: we assume that if the filter enters the group, then it + // iterates over all its members. + // + const target* pt (filter (a, t, i)); + if (pt == nullptr) + { + l5 ([&]{trace << "ignoring " << p << " (filtered out)";}); + continue; + } + + // See if we were explicitly instructed not to touch this target (the + // same semantics as in alias_rule). + // + // Note: not the same as lookup_install() above. + // + auto l ((*pt)["install"]); + if (l && cast (l).string () == "false") + { + l5 ([&]{trace << "ignoring " << *pt << " (not installable)";}); + continue; + } + + if (pt->is_a ()) + { + // If the matched rule returned noop_recipe, then the target state + // is set to unchanged as an optimization. Use this knowledge to + // optimize things on our side as well since this will help a lot + // when updating static installable content (headers, documentation, + // etc). + // + if (build2::match (a, *pt, unmatch::unchanged)) + pt = nullptr; + } + else if (!try_match (a, *pt).first) + { + l5 ([&]{trace << "ignoring " << *pt << " (no rule)";}); + pt = nullptr; + } + + if (pt != nullptr) + pts.push_back (prerequisite_target (pt, pi)); + } + + if (a.operation () == update_id) + { + return *unchanged + ? (pts.empty () ? noop_recipe : default_recipe) + : &perform_update; + } + else + { + return [this] (action a, const target& t) + { + return a.operation () == install_id + ? perform_install (a, t) + : perform_uninstall (a, t); + }; + } + } + + target_state file_rule:: + perform_update (action a, const target& t) + { + // First execute the inner recipe then prerequisites. + // + target_state ts (execute_inner (a, t)); + + if (t.prerequisite_targets[a].size () != 0) + ts |= straight_execute_prerequisites (a, t); + + return ts; + } + + bool file_rule:: + install_extra (const file&, const install_dir&) const + { + return false; + } + + bool file_rule:: + uninstall_extra (const file&, const install_dir&) const + { + return false; + } + + auto_rmfile file_rule:: + install_pre (const file& t, const install_dir&) const + { + return auto_rmfile (t.path (), false /* active */); + } + + bool file_rule:: + install_post (const file& t, const install_dir& id, auto_rmfile&&) const + { + return install_extra (t, id); + } + + struct install_dir + { + dir_path dir; + + // If not NULL, then point to the corresponding install.* value. + // + const string* sudo = nullptr; + const path* cmd = nullptr; + const strings* options = nullptr; + const string* mode = nullptr; + const string* dir_mode = nullptr; + + explicit + install_dir (dir_path d = dir_path ()): dir (move (d)) {} + + install_dir (dir_path d, const install_dir& b) + : dir (move (d)), + sudo (b.sudo), + cmd (b.cmd), + options (b.options), + mode (b.mode), + dir_mode (b.dir_mode) {} + }; + + using install_dirs = vector; + + // Calculate a subdirectory based on l's location (*.subdirs) and if not + // empty add it to install_dirs. Return the new last element. + // + static install_dir& + resolve_subdir (install_dirs& rs, + const target& t, + const scope& s, + const lookup& l) + { + // Find the scope from which this value came and use as a base + // to calculate the subdirectory. + // + for (const scope* p (&s); p != nullptr; p = p->parent_scope ()) + { + if (l.belongs (*p, true)) // Include target type/pattern-specific. + { + // The target can be in out or src. + // + const dir_path& d (t.out_dir ().leaf (p->out_path ())); + + // Add it as another leading directory rather than modifying + // the last one directly; somehow, it feels right. + // + if (!d.empty ()) + rs.emplace_back (rs.back ().dir / d, rs.back ()); + break; + } + } + + return rs.back (); + } + + // Resolve installation directory name to absolute directory path. Return + // all the super-directories leading up to the destination (last). + // + // If target is not NULL, then also handle the subdirs logic. + // + static install_dirs + resolve (const scope& s, + const target* t, + dir_path d, + bool fail_unknown = true, + const string* var = nullptr) + { + install_dirs rs; + + if (d.absolute ()) + rs.emplace_back (move (d.normalize ())); + else + { + // If it is relative, then the first component is treated as the + // installation directory name, e.g., bin, sbin, lib, etc. Look it + // up and recurse. + // + if (d.empty ()) + fail << "empty installation directory name"; + + const string& sn (*d.begin ()); + const string var ("install." + sn); + if (const dir_path* dn = lookup_install (s, var)) + { + if (dn->empty ()) + fail << "empty installation directory for name " << sn << + info << "did you specified empty config." << var << "?"; + + rs = resolve (s, t, *dn, fail_unknown, &var); + + if (rs.empty ()) + { + assert (!fail_unknown); + return rs; // Empty. + } + + d = rs.back ().dir / dir_path (++d.begin (), d.end ()); + rs.emplace_back (move (d.normalize ()), rs.back ()); + } + else + { + if (fail_unknown) + fail << "unknown installation directory name '" << sn << "'" << + info << "did you forget to specify config." << var << "?"; + + return rs; // Empty. + } + } + + install_dir* r (&rs.back ()); + + // Override components in install_dir if we have our own. + // + if (var != nullptr) + { + if (auto l = s[*var + ".sudo"]) r->sudo = &cast (l); + if (auto l = s[*var + ".cmd"]) r->cmd = &cast (l); + if (auto l = s[*var + ".mode"]) r->mode = &cast (l); + if (auto l = s[*var + ".dir_mode"]) r->dir_mode = &cast (l); + if (auto l = s[*var + ".options"]) r->options = &cast (l); + + if (t != nullptr) + { + if (auto l = s[*var + ".subdirs"]) + { + if (cast (l)) + r = &resolve_subdir (rs, *t, s, l); + } + } + } + + // Set globals for unspecified components. + // + if (r->sudo == nullptr) + r->sudo = cast_null (s["config.install.sudo"]); + + if (r->cmd == nullptr) + r->cmd = &cast (s["config.install.cmd"]); + + if (r->options == nullptr) + r->options = cast_null (s["config.install.options"]); + + if (r->mode == nullptr) + r->mode = &cast (s["config.install.mode"]); + + if (r->dir_mode == nullptr) + r->dir_mode = &cast (s["config.install.dir_mode"]); + + return rs; + } + + static inline install_dirs + resolve (const target& t, dir_path d, bool fail_unknown = true) + { + return resolve (t.base_scope (), &t, d, fail_unknown); + } + + dir_path + resolve_dir (const target& t, dir_path d, bool fail_unknown) + { + install_dirs r (resolve (t, move (d), fail_unknown)); + return r.empty () ? dir_path () : move (r.back ().dir); + } + + dir_path + resolve_dir (const scope& s, dir_path d, bool fail_unknown) + { + install_dirs r (resolve (s, nullptr, move (d), fail_unknown)); + return r.empty () ? dir_path () : move (r.back ().dir); + } + + path + resolve_file (const file& f) + { + // Note: similar logic to perform_install(). + // + const path* p (lookup_install (f, "install")); + + if (p == nullptr) // Not installable. + return path (); + + bool n (!p->to_directory ()); + dir_path d (n ? p->directory () : path_cast (*p)); + + install_dirs ids (resolve (f, d)); + + if (!n) + { + if (auto l = f["install.subdirs"]) + { + if (cast (l)) + resolve_subdir (ids, f, f.base_scope (), l); + } + } + + return ids.back ().dir / (n ? p->leaf () : f.path ().leaf ()); + } + + // On Windows we use MSYS2 install.exe and MSYS2 by default ignores + // filesystem permissions (noacl mount option). And this means, for + // example, that .exe that we install won't be runnable by Windows (MSYS2 + // itself will still run them since it recognizes the file extension). + // + // NOTE: this is no longer the case and we now use noacl (and acl causes + // other problems; see baseutils fstab for details). + // + // The way we work around this (at least in our distribution of the MSYS2 + // tools) is by changing the mount option for cygdrives (/c, /d, etc) to + // acl. But that's not all: we also have to install via a path that "hits" + // one of those mount points, c:\foo won't work, we have to use /c/foo. + // So this function translates an absolute Windows path to its MSYS + // representation. + // + // Note that we return the result as a string, not dir_path since path + // starting with / are illegal on Windows. Also note that the result + // doesn't have the trailing slash. + // + static string + msys_path (const dir_path& d) + { + assert (d.absolute ()); + string s (d.representation ()); + + // First replace ':' with the drive letter (so the path is no longer + // absolute) but postpone setting the first character to / until we are + // a string. + // + s[1] = lcase (s[0]); + s = dir_path (move (s)).posix_string (); + s[0] = '/'; + + return s; + } + + // Given an abolute path return its chroot'ed version, if any, accoring to + // install.chroot. + // + template + static inline P + chroot_path (const scope& rs, const P& p) + { + if (const dir_path* d = cast_null (rs["install.chroot"])) + { + dir_path r (p.root_directory ()); + assert (!r.empty ()); // Must be absolute. + + return *d / p.leaf (r); + } + + return p; + } + + // install -d

+ // + static void + install_d (const scope& rs, + const install_dir& base, + const dir_path& d, + bool verbose = true) + { + // Here is the problem: if this is a dry-run, then we will keep showing + // the same directory creation commands over and over again (because we + // don't actually create them). There are two alternative ways to solve + // this: actually create the directories or simply don't show anything. + // While we use the former approach during update (see mkdir() in + // filesystem), here it feels like we really shouldn't be touching the + // destination filesystem. Plus, not showing anything will be symmetric + // with uninstall since the directories won't be empty (because we don't + // actually uninstall any files). + // + if (dry_run) + return; + + dir_path chd (chroot_path (rs, d)); + + try + { + if (dir_exists (chd)) // May throw (e.g., EACCES). + return; + } + catch (const system_error& e) + { + fail << "invalid installation directory " << chd << ": " << e; + } + + // While install -d will create all the intermediate components between + // base and dir, we do it explicitly, one at a time. This way the output + // is symmetrical to uninstall() below. + // + // Note that if the chroot directory does not exist, then install -d + // will create it and we don't bother removing it. + // + if (d != base.dir) + { + dir_path pd (d.directory ()); + + if (pd != base.dir) + install_d (rs, base, pd, verbose); + } + + cstrings args; + + string reld ( + cast ((*global_scope)["build.host.class"]) == "windows" + ? msys_path (chd) + : relative (chd).string ()); + + if (base.sudo != nullptr) + args.push_back (base.sudo->c_str ()); + + args.push_back (base.cmd->string ().c_str ()); + args.push_back ("-d"); + + if (base.options != nullptr) + append_options (args, *base.options); + + args.push_back ("-m"); + args.push_back (base.dir_mode->c_str ()); + args.push_back (reld.c_str ()); + args.push_back (nullptr); + + process_path pp (run_search (args[0])); + + if (verb >= 2) + print_process (args); + else if (verb && verbose) + text << "install " << chd; + + run (pp, args); + } + + // install / + // install + // + static void + install_f (const scope& rs, + const install_dir& base, + const path& name, + const file& t, + const path& f, + bool verbose) + { + path relf (relative (f)); + + dir_path chd (chroot_path (rs, base.dir)); + + string reld ( + cast ((*global_scope)["build.host.class"]) == "windows" + ? msys_path (chd) + : relative (chd).string ()); + + if (!name.empty ()) + { + reld += path::traits_type::directory_separator; + reld += name.string (); + } + + cstrings args; + + if (base.sudo != nullptr) + args.push_back (base.sudo->c_str ()); + + args.push_back (base.cmd->string ().c_str ()); + + if (base.options != nullptr) + append_options (args, *base.options); + + args.push_back ("-m"); + args.push_back (base.mode->c_str ()); + args.push_back (relf.string ().c_str ()); + args.push_back (reld.c_str ()); + args.push_back (nullptr); + + process_path pp (run_search (args[0])); + + if (verb >= 2) + print_process (args); + else if (verb && verbose) + text << "install " << t; + + if (!dry_run) + run (pp, args); + } + + void file_rule:: + install_l (const scope& rs, + const install_dir& base, + const path& target, + const path& link, + bool verbose) + { + path rell (relative (chroot_path (rs, base.dir))); + rell /= link; + + // We can create a symlink directly without calling ln. This, however, + // won't work if we have sudo. Also, we would have to deal with existing + // destinations (ln's -f takes care of that). So we are just going to + // always use ln. + // + const char* args_a[] = { + base.sudo != nullptr ? base.sudo->c_str () : nullptr, + "ln", + "-sf", + target.string ().c_str (), + rell.string ().c_str (), + nullptr}; + + const char** args (&args_a[base.sudo == nullptr ? 1 : 0]); + + process_path pp (run_search (args[0])); + + if (verb >= 2) + print_process (args); + else if (verb && verbose) + text << "install " << rell << " -> " << target; + + if (!dry_run) + run (pp, args); + } + + target_state file_rule:: + perform_install (action a, const target& xt) const + { + const file& t (xt.as ()); + const path& tp (t.path ()); + + // Path should have been assigned by update unless it is unreal. + // + assert (!tp.empty () || t.mtime () == timestamp_unreal); + + const scope& rs (t.root_scope ()); + + auto install_target = [&rs, this] (const file& t, + const path& p, + bool verbose) + { + // Note: similar logic to resolve_file(). + // + bool n (!p.to_directory ()); + dir_path d (n ? p.directory () : path_cast (p)); + + // Resolve target directory. + // + install_dirs ids (resolve (t, d)); + + // Handle install.subdirs if one was specified. Unless the target path + // includes the file name in which case we assume it's a "final" path. + // + if (!n) + { + if (auto l = t["install.subdirs"]) + { + if (cast (l)) + resolve_subdir (ids, t, t.base_scope (), l); + } + } + + // Create leading directories. Note that we are using the leading + // directory (if there is one) for the creation information (mode, + // sudo, etc). + // + for (auto i (ids.begin ()), j (i); i != ids.end (); j = i++) + install_d (rs, *j, i->dir, verbose); // install -d + + install_dir& id (ids.back ()); + + // Override mode if one was specified. + // + if (auto l = t["install.mode"]) + id.mode = &cast (l); + + // Install the target. + // + auto_rmfile f (install_pre (t, id)); + + // If install_pre() returned a different file name, make sure we + // install it as the original. + // + const path& tp (t.path ()); + const path& fp (f.path); + + install_f ( + rs, + id, + n ? p.leaf () : fp.leaf () != tp.leaf () ? tp.leaf () : path (), + t, + f.path, + verbose); + + install_post (t, id, move (f)); + }; + + // First handle installable prerequisites. + // + target_state r (straight_execute_prerequisites (a, t)); + + // Then installable ad hoc group members, if any. + // + for (const target* m (t.member); m != nullptr; m = m->member) + { + if (const path* p = lookup_install (*m, "install")) + { + install_target (m->as (), *p, tp.empty () /* verbose */); + r |= target_state::changed; + } + } + + // Finally install the target itself (since we got here we know the + // install variable is there). + // + if (!tp.empty ()) + { + install_target (t, cast (t["install"]), true /* verbose */); + r |= target_state::changed; + } + + return r; + } + + // uninstall -d + // + // We try to remove all the directories between base and dir but not base + // itself unless base == dir. Return false if nothing has been removed + // (i.e., the directories do not exist or are not empty). + // + static bool + uninstall_d (const scope& rs, + const install_dir& base, + const dir_path& d, + bool verbose) + { + // See install_d() for the rationale. + // + if (dry_run) + return false; + + dir_path chd (chroot_path (rs, d)); + + // Figure out if we should try to remove this directory. Note that if + // it doesn't exist, then we may still need to remove outer ones. + // + bool r (false); + try + { + if ((r = dir_exists (chd))) // May throw (e.g., EACCES). + { + if (!dir_empty (chd)) // May also throw. + return false; // Won't be able to remove any outer directories. + } + } + catch (const system_error& e) + { + fail << "invalid installation directory " << chd << ": " << e; + } + + if (r) + { + dir_path reld (relative (chd)); + + // Normally when we need to remove a file or directory we do it + // directly without calling rm/rmdir. This however, won't work if we + // have sudo. So we are going to do it both ways. + // + // While there is no sudo on Windows, deleting things that are being + // used can get complicated. So we will always use rm/rmdir there. + // +#ifndef _WIN32 + if (base.sudo == nullptr) + { + if (verb >= 2) + text << "rmdir " << reld; + else if (verb && verbose) + text << "uninstall " << reld; + + try + { + try_rmdir (chd); + } + catch (const system_error& e) + { + fail << "unable to remove directory " << chd << ": " << e; + } + } + else +#endif + { + const char* args_a[] = { + base.sudo != nullptr ? base.sudo->c_str () : nullptr, + "rmdir", + reld.string ().c_str (), + nullptr}; + + const char** args (&args_a[base.sudo == nullptr ? 1 : 0]); + + process_path pp (run_search (args[0])); + + if (verb >= 2) + print_process (args); + else if (verb && verbose) + text << "uninstall " << reld; + + run (pp, args); + } + } + + // If we have more empty directories between base and dir, then try + // to clean them up as well. + // + if (d != base.dir) + { + dir_path pd (d.directory ()); + + if (pd != base.dir) + r = uninstall_d (rs, base, pd, verbose) || r; + } + + return r; + } + + bool file_rule:: + uninstall_f (const scope& rs, + const install_dir& base, + const file* t, + const path& name, + bool verbose) + { + assert (t != nullptr || !name.empty ()); + path f (chroot_path (rs, base.dir) / + (name.empty () ? t->path ().leaf () : name)); + + try + { + // Note: don't follow symlinks so if the target is a dangling symlinks + // we will proceed to removing it. + // + if (!file_exists (f, false)) // May throw (e.g., EACCES). + return false; + } + catch (const system_error& e) + { + fail << "invalid installation path " << f << ": " << e; + } + + path relf (relative (f)); + + if (verb == 1 && verbose) + { + if (t != nullptr) + text << "uninstall " << *t; + else + text << "uninstall " << relf; + } + + // The same story as with uninstall -d. + // +#ifndef _WIN32 + if (base.sudo == nullptr) + { + if (verb >= 2) + text << "rm " << relf; + + if (!dry_run) + { + try + { + try_rmfile (f); + } + catch (const system_error& e) + { + fail << "unable to remove file " << f << ": " << e; + } + } + } + else +#endif + { + const char* args_a[] = { + base.sudo != nullptr ? base.sudo->c_str () : nullptr, + "rm", + "-f", + relf.string ().c_str (), + nullptr}; + + const char** args (&args_a[base.sudo == nullptr ? 1 : 0]); + + process_path pp (run_search (args[0])); + + if (verb >= 2) + print_process (args); + + if (!dry_run) + run (pp, args); + } + + return true; + } + + target_state file_rule:: + perform_uninstall (action a, const target& xt) const + { + const file& t (xt.as ()); + const path& tp (t.path ()); + + // Path should have been assigned by update unless it is unreal. + // + assert (!tp.empty () || t.mtime () == timestamp_unreal); + + const scope& rs (t.root_scope ()); + + auto uninstall_target = [&rs, this] (const file& t, + const path& p, + bool verbose) -> target_state + { + bool n (!p.to_directory ()); + dir_path d (n ? p.directory () : path_cast (p)); + + // Resolve target directory. + // + install_dirs ids (resolve (t, d)); + + // Handle install.subdirs if one was specified. + // + if (!n) + { + if (auto l = t["install.subdirs"]) + { + if (cast (l)) + resolve_subdir (ids, t, t.base_scope (), l); + } + } + + // Remove extras and the target itself. + // + const install_dir& id (ids.back ()); + + target_state r (uninstall_extra (t, id) + ? target_state::changed + : target_state::unchanged); + + if (uninstall_f (rs, id, &t, n ? p.leaf () : path (), verbose)) + r |= target_state::changed; + + // Clean up empty leading directories (in reverse). + // + // Note that we are using the leading directory (if there is one) + // for the clean up information (sudo, etc). + // + for (auto i (ids.rbegin ()), j (i), e (ids.rend ()); i != e; j = ++i) + { + if (install::uninstall_d (rs, ++j != e ? *j : *i, i->dir, verbose)) + r |= target_state::changed; + } + + return r; + }; + + // Reverse order of installation: first the target itself (since we got + // here we know the install variable is there). + // + target_state r (target_state::unchanged); + + if (!tp.empty ()) + r |= uninstall_target (t, cast (t["install"]), true); + + // Then installable ad hoc group members, if any. To be anally precise + // we would have to do it in reverse, but that's not easy (it's a + // single-linked list). + // + for (const target* m (t.member); m != nullptr; m = m->member) + { + if (const path* p = lookup_install (*m, "install")) + r |= uninstall_target (m->as (), + *p, + tp.empty () || r != target_state::changed); + } + + // Finally handle installable prerequisites. + // + r |= reverse_execute_prerequisites (a, t); + + return r; + } + } +} diff --git a/libbuild2/install/rule.hxx b/libbuild2/install/rule.hxx new file mode 100644 index 0000000..ff99c6e --- /dev/null +++ b/libbuild2/install/rule.hxx @@ -0,0 +1,197 @@ +// file : libbuild2/install/rule.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_INSTALL_RULE_HXX +#define LIBBUILD2_INSTALL_RULE_HXX + +#include +#include + +#include +#include +#include +#include + +#include + +namespace build2 +{ + namespace install + { + class LIBBUILD2_SYMEXPORT alias_rule: public rule + { + public: + virtual bool + match (action, target&, const string&) const override; + + // Return NULL if this prerequisite should be ignored and pointer to its + // target otherwise. The default implementation accepts all prerequsites + // from the target's (weak) amalgamation. + // + // The prerequisite is passed as an iterator allowing the filter to + // "see" inside groups. + // + using prerequisite_iterator = + prerequisite_members_range::iterator; + + virtual const target* + filter (action, const target&, prerequisite_iterator&) const; + + virtual const target* + filter (action, const target&, const prerequisite&) const; + + virtual recipe + apply (action, target&) const override; + + alias_rule () {} + static const alias_rule instance; + }; + + class fsdir_rule: public rule + { + public: + virtual bool + match (action, target&, const string&) const override; + + virtual recipe + apply (action, target&) const override; + + fsdir_rule () {} + static const fsdir_rule instance; + }; + + // In addition to the alias rule's semantics, this rule sees through to + // the group's members. + // + // The default group_rule::instance matches any target for which it was + // registered. It is to be used for non-see-through groups that should + // exhibit the see-through behavior for install (see lib{} in the bin + // module for an example). + // + // We also register (for all targets) another instance of this rule that + // only matches see-through groups. + // + class LIBBUILD2_SYMEXPORT group_rule: public alias_rule + { + public: + virtual bool + match (action, target&, const string&) const override; + + // Return NULL if this group member should be ignored and pointer to its + // target otherwise. The default implementation accepts all members. + // + virtual const target* + filter (action, const target&, const target& group_member) const; + + using alias_rule::filter; // "Unhide" to make Clang happy. + + virtual recipe + apply (action, target&) const override; + + group_rule (bool see_through_only): see_through (see_through_only) {} + static const group_rule instance; + + bool see_through; + }; + + struct install_dir; + + class LIBBUILD2_SYMEXPORT file_rule: public rule + { + public: + virtual bool + match (action, target&, const string&) const override; + + // Return NULL if this prerequisite should be ignored and pointer to its + // target otherwise. The default implementation ignores prerequsites + // that are outside of this target's project. + // + // @@ I wonder why we do weak amalgamation for alias but project for + // file? And then override this for prerequisite libraries/modules + // in cc::install_rule and bash::install_rule... + // + // The prerequisite is passed as an iterator allowing the filter to + // "see" inside groups. + // + using prerequisite_iterator = + prerequisite_members_range::iterator; + + virtual const target* + filter (action, const target&, prerequisite_iterator&) const; + + virtual const target* + filter (action, const target&, const prerequisite&) const; + + virtual recipe + apply (action, target&) const override; + + static target_state + perform_update (action, const target&); + + // Extra un/installation hooks. Return true if anything was actually + // un/installed. + // + using install_dir = install::install_dir; // For derived rules. + + virtual bool + install_extra (const file&, const install_dir&) const; + + virtual bool + uninstall_extra (const file&, const install_dir&) const; + + // Lower-level pre/post installation hooks that can be used to override + // the source file path being installed (for example, to implement + // post-processing, etc). + // + // Note that one cannot generally perform post-processing in-place + // because of permissions. + // + virtual auto_rmfile + install_pre (const file&, const install_dir&) const; + + virtual bool + install_post (const file&, const install_dir&, auto_rmfile&&) const; + + // Installation/uninstallation "commands". + // + // If verbose is false, then only print the command at verbosity level 2 + // or higher. Note that these functions respect the dry_run flag. + + // Install a symlink: base/link -> target. + // + static void + install_l (const scope& rs, + const install_dir& base, + const path& target, + const path& link, + bool verbose); + + // Uninstall a file or symlink: + // + // uninstall / rm /.leaf (); name empty + // uninstall rm /; target can be NULL + // + // Return false if nothing has been removed (i.e., the file does not + // exist). + // + static bool + uninstall_f (const scope& rs, + const install_dir& base, + const file* target, + const path& name, + bool verbose); + + target_state + perform_install (action, const target&) const; + + target_state + perform_uninstall (action, const target&) const; + + static const file_rule instance; + file_rule () {} + }; + } +} + +#endif // LIBBUILD2_INSTALL_RULE_HXX diff --git a/libbuild2/install/utility.hxx b/libbuild2/install/utility.hxx new file mode 100644 index 0000000..13fcceb --- /dev/null +++ b/libbuild2/install/utility.hxx @@ -0,0 +1,78 @@ +// file : libbuild2/install/utility.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_INSTALL_UTILITY_HXX +#define LIBBUILD2_INSTALL_UTILITY_HXX + +#include +#include + +#include +#include + +#include + +namespace build2 +{ + namespace install + { + // Set install path, mode for a target type. + // + inline void + install_path (scope& s, const target_type& tt, dir_path d) + { + auto r ( + s.target_vars[tt]["*"].insert ( + var_pool.rw (s).insert ("install"))); + + if (r.second) // Already set by the user? + r.first.get () = path_cast (move (d)); + } + + template + inline void + install_path (scope& s, dir_path d) + { + return install_path (s, T::static_type, move (d)); + } + + inline void + install_mode (scope& s, const target_type& tt, string m) + { + auto r ( + s.target_vars[tt]["*"].insert ( + var_pool.rw (s).insert ("install.mode"))); + + if (r.second) // Already set by the user? + r.first.get () = move (m); + } + + template + inline void + install_mode (scope& s, string m) + { + return install_mode (s, T::static_type, move (m)); + } + + // Resolve relative installation directory path (e.g., include/libfoo) to + // its absolute directory path (e.g., /usr/include/libfoo). If the + // resolution encountered an unknown directory, issue diagnostics and fail + // unless fail_unknown is false, in which case return empty directory. + // + // Note: implemented in rule.cxx. + // + LIBBUILD2_SYMEXPORT dir_path + resolve_dir (const target&, dir_path, bool fail_unknown = true); + + LIBBUILD2_SYMEXPORT dir_path + resolve_dir (const scope&, dir_path, bool fail_unknown = true); + + // Resolve file installation path returning empty path if not installable. + // + LIBBUILD2_SYMEXPORT path + resolve_file (const file&); // rule.cxx + } +} + +#endif // LIBBUILD2_INSTALL_UTILITY_HXX diff --git a/libbuild2/test/common.cxx b/libbuild2/test/common.cxx new file mode 100644 index 0000000..11c5d90 --- /dev/null +++ b/libbuild2/test/common.cxx @@ -0,0 +1,220 @@ +// file : libbuild2/test/common.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include + +using namespace std; + +namespace build2 +{ + namespace test + { + // Determine if we have the target (first), id path (second), or both (in + // which case we also advance the iterator). + // + static pair + sense (names::const_iterator& i) + { + const name* tn (nullptr); + const name* pn (nullptr); + + if (i->pair) + { + tn = &*i++; + pn = &*i; + } + else + { + // If it has a type (exe{hello}) or a directory (basics/), then + // we assume it is a target. + // + (i->typed () || !i->dir.empty () ? tn : pn) = &*i; + } + + // Validate the target. + // + if (tn != nullptr) + { + if (tn->qualified ()) + fail << "project-qualified target '" << *tn << " in config.test"; + } + + // Validate the id path. + // + if (pn != nullptr) + { + if (!pn->simple () || pn->empty ()) + fail << "invalid id path '" << *pn << " in config.test"; + } + + return make_pair (tn, pn); + } + + bool common:: + pass (const target& a) const + { + if (test_ == nullptr) + return true; + + // We need to "enable" aliases that "lead up" to the targets we are + // interested in. So see if any target is in a subdirectory of this + // alias. + // + // If we don't see any targets (e.g., only id paths), then we assume all + // targets match and therefore we always pass. + // + bool r (true); + + // Directory part from root to this alias (the same in src and out). + // + const dir_path d (a.out_dir ().leaf (root_->out_path ())); + + for (auto i (test_->begin ()); i != test_->end (); ++i) + { + if (const name* n = sense (i).first) + { + // Reset result to false if no match (but we have seen a target). + // + r = n->dir.sub (d); + + // See test() below for details on this special case. + // + if (!r && !n->typed ()) + r = d.sub (n->dir); + + if (r) + break; + } + } + + return r; + } + + bool common:: + test (const target& t) const + { + if (test_ == nullptr) + return true; + + // If we don't see any targets (e.g., only id paths), then we assume + // all of them match. + // + bool r (true); + + // Directory part from root to this alias (the same in src and out). + // + const dir_path d (t.out_dir ().leaf (root_->out_path ())); + const target_type& tt (t.type ()); + + for (auto i (test_->begin ()); i != test_->end (); ++i) + { + if (const name* n = sense (i).first) + { + // Reset result to false if no match (but we have seen a target). + // + + // When specifying a directory, for example, config.tests=tests/, + // one would intuitively expect that all the tests under it will + // run. But that's not what will happen with the below test: while + // the dir{tests/} itself will match, any target underneath won't. + // So we are going to handle this type if a target specially by + // making it match any target in or under it. + // + // Note that we only do this for tests/, not dir{tests/} since it is + // not always the semantics that one wants. Sometimes one may want + // to run tests (scripts) just for the tests/ target but not for any + // of its prerequisites. So dir{tests/} is a way to disable this + // special logic. + // + // Note: the same code as in test() below. + // + if (!n->typed ()) + r = d.sub (n->dir); + else + // First quickly and cheaply weed out names that cannot possibly + // match. Only then search for a target (as if it was a + // prerequisite), which can be expensive. + // + // We cannot specify an src target in config.test since we used + // the pair separator for ids. As a result, we search for both + // out and src targets. + // + r = + t.name == n->value && // Name matches. + tt.name == n->type && // Target type matches. + d == n->dir && // Directory matches. + (search_existing (*n, *root_) == &t || + search_existing (*n, *root_, d) == &t); + + if (r) + break; + } + } + + return r; + } + + bool common:: + test (const target& t, const path& id) const + { + if (test_ == nullptr) + return true; + + // If we don't see any id paths (e.g., only targets), then we assume + // all of them match. + // + bool r (true); + + // Directory part from root to this alias (the same in src and out). + // + const dir_path d (t.out_dir ().leaf (root_->out_path ())); + const target_type& tt (t.type ()); + + for (auto i (test_->begin ()); i != test_->end (); ++i) + { + auto p (sense (i)); + + if (const name* n = p.second) + { + // If there is a target, check that it matches ours. + // + if (const name* n = p.first) + { + // Note: the same code as in test() above. + // + bool r; + + if (!n->typed ()) + r = d.sub (n->dir); + else + r = + t.name == n->value && + tt.name == n->type && + d == n->dir && + (search_existing (*n, *root_) == &t || + search_existing (*n, *root_, d) == &t); + + if (!r) + continue; // Not our target. + } + + // If the id (group) "leads up" to what we want to run or we + // (group) lead up to the id, then match. + // + const path p (n->value); + + // Reset result to false if no match (but we have seen an id path). + // + if ((r = p.sub (id) || id.sub (p))) + break; + } + } + + return r; + } + } +} diff --git a/libbuild2/test/common.hxx b/libbuild2/test/common.hxx new file mode 100644 index 0000000..5bb78ee --- /dev/null +++ b/libbuild2/test/common.hxx @@ -0,0 +1,72 @@ +// file : libbuild2/test/common.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_TEST_COMMON_HXX +#define LIBBUILD2_TEST_COMMON_HXX + +#include +#include + +#include + +namespace build2 +{ + namespace test + { + enum class output_before {fail, warn, clean}; + enum class output_after {clean, keep}; + + struct common_data + { + const variable& config_test; + const variable& config_test_output; + + const variable& var_test; + const variable& test_options; + const variable& test_arguments; + + const variable& test_stdin; + const variable& test_stdout; + const variable& test_roundtrip; + const variable& test_input; + + const variable& test_target; + }; + + struct common: common_data + { + // The config.test.output values. + // + output_before before = output_before::warn; + output_after after = output_after::clean; + + // The config.test query interface. + // + const names* test_ = nullptr; // The config.test value if any. + scope* root_ = nullptr; // The root scope for target resolution. + + // Return true if the specified alias target should pass-through to its + // prerequisites. + // + bool + pass (const target& alias_target) const; + + // Return true if the specified target should be tested. + // + bool + test (const target& test_target) const; + + // Return true if the specified target should be tested with the + // specified testscript test (or group). + // + bool + test (const target& test_target, const path& id_path) const; + + explicit + common (common_data&& d): common_data (move (d)) {} + }; + } +} + +#endif // LIBBUILD2_TEST_COMMON_HXX diff --git a/libbuild2/test/init.cxx b/libbuild2/test/init.cxx new file mode 100644 index 0000000..3d13acc --- /dev/null +++ b/libbuild2/test/init.cxx @@ -0,0 +1,231 @@ +// file : libbuild2/test/init.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include // script::regex::init() + +using namespace std; +using namespace butl; + +namespace build2 +{ + namespace test + { + bool + boot (scope& rs, const location&, unique_ptr& mod) + { + tracer trace ("test::boot"); + + l5 ([&]{trace << "for " << rs;}); + + // Register our operations. + // + rs.insert_operation (test_id, op_test); + rs.insert_operation (update_for_test_id, op_update_for_test); + + // Enter module variables. Do it during boot in case they get assigned + // in bootstrap.build. + // + auto& vp (var_pool.rw (rs)); + + common_data d { + + // Tests to execute. + // + // Specified as @ pairs with both sides being + // optional. The variable is untyped (we want a list of name-pairs), + // overridable, and inheritable. The target is relative (in essence a + // prerequisite) which is resolved from the (root) scope where the + // config.test value is defined. + // + vp.insert ("config.test", true), + + // Test working directory before/after cleanup (see Testscript spec + // for semantics). + // + vp.insert ("config.test.output", true), + + // The test variable is a name which can be a path (with the + // true/false special values) or a target name. + // + // Note: none are overridable. + // + vp.insert ("test", variable_visibility::target), + vp.insert ("test.options", variable_visibility::project), + vp.insert ("test.arguments", variable_visibility::project), + + // Prerequisite-specific. + // + // test.stdin and test.stdout can be used to mark a prerequisite as a + // file to redirect stdin from and to compare stdout to, respectively. + // test.roundtrip is a shortcut to mark a prerequisite as both stdin + // and stdout. + // + // Prerequisites marked with test.input are treated as additional test + // inputs: they are made sure to be up to date and their paths are + // passed as additional command line arguments (after test.options and + // test.arguments). Their primary use is to pass inputs that may have + // varying file names/paths, for example: + // + // exe{parent}: exe{child}: test.input = true + // + // Note that currently this mechanism is only available to simple + // tests though we could also support it for testscript (e.g., by + // appending the input paths to test.arguments or by passing them in a + // separate test.inputs variable). + // + vp.insert ("test.stdin", variable_visibility::prereq), + vp.insert ("test.stdout", variable_visibility::prereq), + vp.insert ("test.roundtrip", variable_visibility::prereq), + vp.insert ("test.input", variable_visibility::prereq), + + // Test target platform. + // + vp.insert ("test.target", variable_visibility::project) + }; + + // These are only used in testscript. + // + vp.insert ("test.redirects", variable_visibility::project); + vp.insert ("test.cleanups", variable_visibility::project); + + // Unless already set, default test.target to build.host. Note that it + // can still be overriden by the user, e.g., in root.build. + // + { + value& v (rs.assign (d.test_target)); + + if (!v || v.empty ()) + v = cast ((*global_scope)["build.host"]); + } + + mod.reset (new module (move (d))); + return false; + } + + bool + init (scope& rs, + scope&, + const location& l, + unique_ptr& mod, + bool first, + bool, + const variable_map& config_hints) + { + tracer trace ("test::init"); + + if (!first) + { + warn (l) << "multiple test module initializations"; + return true; + } + + const dir_path& out_root (rs.out_path ()); + l5 ([&]{trace << "for " << out_root;}); + + assert (mod != nullptr); + module& m (static_cast (*mod)); + + // Configure. + // + assert (config_hints.empty ()); // We don't known any hints. + + // Adjust module priority so that the config.test.* values are saved at + // the end of config.build. + // + config::save_module (rs, "test", INT32_MAX); + + // config.test + // + if (lookup l = config::omitted (rs, m.config_test).first) + { + // Figure out which root scope it came from. + // + scope* s (&rs); + for (; + s != nullptr && !l.belongs (*s); + s = s->parent_scope ()->root_scope ()) + assert (s != nullptr); + + m.test_ = &cast (l); + m.root_ = s; + } + + // config.test.output + // + if (lookup l = config::omitted (rs, m.config_test_output).first) + { + const name_pair& p (cast (l)); + + // If second half is empty, then first is the after value. + // + const name& a (p.second.empty () ? p.first : p.second); // after + const name& b (p.second.empty () ? p.second : p.first); // before + + // Parse and validate. + // + if (!b.simple ()) + fail << "invalid config.test.output before value '" << b << "'"; + + if (!a.simple ()) + fail << "invalid config.test.output after value '" << a << "'"; + + if (a.value == "clean") m.after = output_after::clean; + else if (a.value == "keep") m.after = output_after::keep; + else fail << "invalid config.test.output after value '" << a << "'"; + + if (b.value == "fail") m.before = output_before::fail; + else if (b.value == "warn") m.before = output_before::warn; + else if (b.value == "clean") m.before = output_before::clean; + else if (b.value == "") m.before = output_before::clean; + else fail << "invalid config.test.output before value '" << b << "'"; + } + + //@@ TODO: Need ability to specify extra diff options (e.g., + // --strip-trailing-cr, now hardcoded). + // + //@@ TODO: Pring report. + + // Register target types. + // + { + auto& t (rs.target_types); + + auto& tt (t.insert ()); + t.insert_file ("testscript", tt); + } + + // Register our test running rule. + // + { + default_rule& dr (m); + + rs.rules.insert (perform_test_id, "test", dr); + rs.rules.insert (perform_test_id, "test", dr); + } + + return true; + } + + module_functions + build2_test_load () + { + script::regex::init (); + + return module_functions {&boot, &init}; + } + } +} diff --git a/libbuild2/test/init.hxx b/libbuild2/test/init.hxx new file mode 100644 index 0000000..a76b720 --- /dev/null +++ b/libbuild2/test/init.hxx @@ -0,0 +1,36 @@ +// file : libbuild2/test/init.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_TEST_INIT_HXX +#define LIBBUILD2_TEST_INIT_HXX + +#include +#include + +#include + +#include + +namespace build2 +{ + namespace test + { + bool + boot (scope&, const location&, unique_ptr&); + + bool + init (scope&, + scope&, + const location&, + unique_ptr&, + bool, + bool, + const variable_map&); + + extern "C" LIBBUILD2_SYMEXPORT module_functions + build2_test_load (); + } +} + +#endif // LIBBUILD2_TEST_INIT_HXX diff --git a/libbuild2/test/module.hxx b/libbuild2/test/module.hxx new file mode 100644 index 0000000..584cb84 --- /dev/null +++ b/libbuild2/test/module.hxx @@ -0,0 +1,37 @@ +// file : libbuild2/test/module.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_TEST_MODULE_HXX +#define LIBBUILD2_TEST_MODULE_HXX + +#include +#include + +#include + +#include +#include + +namespace build2 +{ + namespace test + { + struct module: module_base, virtual common, default_rule, group_rule + { + const test::group_rule& + group_rule () const + { + return *this; + } + + explicit + module (common_data&& d) + : common (move (d)), + test::default_rule (move (d)), + test::group_rule (move (d)) {} + }; + } +} + +#endif // LIBBUILD2_TEST_MODULE_HXX diff --git a/libbuild2/test/operation.cxx b/libbuild2/test/operation.cxx new file mode 100644 index 0000000..3ff7702 --- /dev/null +++ b/libbuild2/test/operation.cxx @@ -0,0 +1,55 @@ +// file : libbuild2/test/operation.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +using namespace std; +using namespace butl; + +namespace build2 +{ + namespace test + { + static operation_id + test_pre (const values& params, meta_operation_id mo, const location& l) + { + if (!params.empty ()) + fail (l) << "unexpected parameters for operation test"; + + // Run update as a pre-operation, unless we are disfiguring. + // + return mo != disfigure_id ? update_id : 0; + } + + const operation_info op_test { + test_id, + 0, + "test", + "test", + "testing", + "tested", + "has nothing to test", // We cannot "be tested". + execution_mode::first, + 1, + &test_pre, + nullptr + }; + + // Also the explicit update-for-test operation alias. + // + const operation_info op_update_for_test { + update_id, // Note: not update_for_test_id. + test_id, + op_update.name, + op_update.name_do, + op_update.name_doing, + op_update.name_did, + op_update.name_done, + op_update.mode, + op_update.concurrency, + op_update.pre, + op_update.post + }; + } +} diff --git a/libbuild2/test/operation.hxx b/libbuild2/test/operation.hxx new file mode 100644 index 0000000..8a9aed7 --- /dev/null +++ b/libbuild2/test/operation.hxx @@ -0,0 +1,22 @@ +// file : libbuild2/test/operation.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_TEST_OPERATION_HXX +#define LIBBUILD2_TEST_OPERATION_HXX + +#include +#include + +#include + +namespace build2 +{ + namespace test + { + extern const operation_info op_test; + extern const operation_info op_update_for_test; + } +} + +#endif // LIBBUILD2_TEST_OPERATION_HXX diff --git a/libbuild2/test/rule.cxx b/libbuild2/test/rule.cxx new file mode 100644 index 0000000..a6796b4 --- /dev/null +++ b/libbuild2/test/rule.cxx @@ -0,0 +1,882 @@ +// file : libbuild2/test/rule.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +using namespace std; +using namespace butl; + +namespace build2 +{ + namespace test + { + bool rule:: + match (action, target&, const string&) const + { + // We always match, even if this target is not testable (so that we can + // ignore it; see apply()). + // + return true; + } + + recipe rule:: + apply (action a, target& t) const + { + // Note that we are called both as the outer part during the update-for- + // test pre-operation and as the inner part during the test operation + // itself. + // + // In both cases we first determine if the target is testable and return + // noop if it's not. Otherwise, in the first case (update for test) we + // delegate to the normal update and in the second (test) -- perform the + // test. + // + // And to add a bit more complexity, we want to handle aliases slightly + // differently: we may not want to ignore their prerequisites if the + // alias is not testable since their prerequisites could be. + // + // Here is the state matrix: + // + // test'able | pass'able | neither + // | | + // update-for-test delegate (& pass) | pass | noop + // ---------------------------------------+-------------+--------- + // test test (& pass) | pass | noop + // + auto& pts (t.prerequisite_targets[a]); + + // Resolve group members. + // + if (!see_through || t.type ().see_through) + { + // Remember that we are called twice: first during update for test + // (pre-operation) and then during test. During the former, we rely on + // the normall update rule to resolve the group members. During the + // latter, there will be no rule to do this but the group will already + // have been resolved by the pre-operation. + // + // If the rule could not resolve the group, then we ignore it. + // + group_view gv (a.outer () + ? resolve_members (a, t) + : t.group_members (a)); + + if (gv.members != nullptr) + { + for (size_t i (0); i != gv.count; ++i) + { + if (const target* m = gv.members[i]) + pts.push_back (m); + } + + match_members (a, t, pts); + } + } + + // If we are passing-through, then match our prerequisites. + // + if (t.is_a () && pass (t)) + { + // For the test operation we have to implement our own search and + // match because we need to ignore prerequisites that are outside of + // our project. They can be from projects that don't use the test + // module (and thus won't have a suitable rule). Or they can be from + // no project at all (e.g., installed). Also, generally, not testing + // stuff that's not ours seems right. + // + match_prerequisites (a, t, t.root_scope ()); + } + + size_t pass_n (pts.size ()); // Number of pass-through prerequisites. + + // See if it's testable and if so, what kind. + // + bool test (false); + bool script (false); + + if (this->test (t)) + { + // We have two very different cases: testscript and simple test (plus + // it may not be a testable target at all). So as the first step + // determine which case this is. + // + // If we have any prerequisites of the testscript{} type, then this is + // the testscript case. + // + // If we can, go inside see-through groups. Normally groups won't be + // resolvable for this action but then normally they won't contain any + // testscripts either. In other words, if there is a group that + // contains testscripts as members then it will need to arrange for + // the members to be resolvable (e.g., by registering an appropriate + // rule for the test operation). + // + for (prerequisite_member p: + group_prerequisite_members (a, t, members_mode::maybe)) + { + if (include (a, t, p) != include_type::normal) // Excluded/ad hoc. + continue; + + if (p.is_a ()) + { + if (!script) + { + script = true; + + // We treat this target as testable unless the test variable is + // explicitly set to false. + // + const name* n (cast_null (t[var_test])); + test = (n == nullptr || !n->simple () || n->value != "false"); + + if (!test) + break; + } + + // Collect testscripts after the pass-through prerequisites. + // + const target& pt (p.search (t)); + + // Note that for the test operation itself we don't match nor + // execute them relying on update to assign their paths. + // + // Causing update for test inputs/scripts is tricky: we cannot + // match for update-for-install because this same rule will match + // and since the target is not testable, it will return the noop + // recipe. + // + // So what we are going to do is directly match (and also execute; + // see below) a recipe for the inner update (who thought we could + // do that... but it seems we can). While at first it might feel + // iffy, it does make sense: the outer rule we would have matched + // would have simply delegated to the inner so we might as well + // take a shortcut. The only potential drawback of this approach + // is that we won't be able to provide any for-test customizations + // when updating test inputs/scripts. But such a need seems rather + // far fetched. + // + if (a.operation () == update_id) + match_inner (a, pt); + + pts.push_back (&pt); + } + } + + // If this is not a script, then determine if it is a simple test. + // Ignore testscript files themselves at the outset. + // + if (!script && !t.is_a ()) + { + // For the simple case whether this is a test is controlled by the + // test variable. Also, it feels redundant to specify, say, "test = + // true" and "test.stdout = test.out" -- the latter already says this + // is a test. + // + const name* n (cast_null (t[var_test])); + + // If the test variable is explicitly set to false then we treat + // it as not testable regardless of what other test.* variables + // or prerequisites we might have. + // + // Note that the test variable can be set to an "override" target + // (which means 'true' for our purposes). + // + if (n != nullptr && n->simple () && n->value == "false") + test = false; + else + { + // Look for test input/stdin/stdout prerequisites. The same group + // reasoning as in the testscript case above. + // + for (prerequisite_member p: + group_prerequisite_members (a, t, members_mode::maybe)) + { + const auto& vars (p.prerequisite.vars); + + if (vars.empty ()) // Common case. + continue; + + if (include (a, t, p) != include_type::normal) // Excluded/ad hoc. + continue; + + bool rt ( cast_false (vars[test_roundtrip])); + bool si (rt || cast_false (vars[test_stdin])); + bool so (rt || cast_false (vars[test_stdout])); + bool in ( cast_false (vars[test_input])); + + if (si || so || in) + { + // Verify it is file-based. + // + if (!p.is_a ()) + { + fail << "test." << (si ? "stdin" : so ? "stdout" : "input") + << " prerequisite " << p << " of target " << t + << " is not a file"; + } + + if (!test) + { + test = true; + + // First matching prerequisite. Establish the structure in + // pts: the first element (after pass_n) is stdin (can be + // NULL), the second is stdout (can be NULL), and everything + // after that (if any) is inputs. + // + pts.push_back (nullptr); // stdin + pts.push_back (nullptr); // stdout + } + + // Collect them after the pass-through prerequisites. + // + // Note that for the test operation itself we don't match nor + // execute them relying on update to assign their paths. + // + auto match = [a, &p, &t] () -> const target* + { + const target& pt (p.search (t)); + + // The same match_inner() rationale as for the testcript + // prerequisites above. + // + if (a.operation () == update_id) + match_inner (a, pt); + + return &pt; + }; + + if (si) + { + if (pts[pass_n] != nullptr) + fail << "multiple test.stdin prerequisites for target " + << t; + + pts[pass_n] = match (); + } + + if (so) + { + if (pts[pass_n + 1] != nullptr) + fail << "multiple test.stdout prerequisites for target " + << t; + + pts[pass_n + 1] = match (); + } + + if (in) + pts.push_back (match ()); + } + } + + if (!test) + test = (n != nullptr); // We have the test variable. + + if (!test) + test = t[test_options] || t[test_arguments]; + } + } + } + + // Neither testing nor passing-through. + // + if (!test && pass_n == 0) + return noop_recipe; + + // If we are only passing-through, then use the default recipe (which + // will execute all the matched prerequisites). + // + if (!test) + return default_recipe; + + // Being here means we are definitely testing and maybe passing-through. + // + if (a.operation () == update_id) + { + // For the update pre-operation match the inner rule (actual update). + // + match_inner (a, t); + + return [pass_n] (action a, const target& t) + { + return perform_update (a, t, pass_n); + }; + } + else + { + if (script) + { + return [pass_n, this] (action a, const target& t) + { + return perform_script (a, t, pass_n); + }; + } + else + { + return [pass_n, this] (action a, const target& t) + { + return perform_test (a, t, pass_n); + }; + } + } + } + + target_state rule:: + perform_update (action a, const target& t, size_t pass_n) + { + // First execute the inner recipe then execute prerequisites. + // + target_state ts (execute_inner (a, t)); + + if (pass_n != 0) + ts |= straight_execute_prerequisites (a, t, pass_n); + + ts |= straight_execute_prerequisites_inner (a, t, 0, pass_n); + + return ts; + } + + static script::scope_state + perform_script_impl (const target& t, + const testscript& ts, + const dir_path& wd, + const common& c) + { + using namespace script; + + scope_state r; + + try + { + build2::test::script::script s (t, ts, wd); + + { + parser p; + p.pre_parse (s); + + default_runner r (c); + p.execute (s, r); + } + + r = s.state; + } + catch (const failed&) + { + r = scope_state::failed; + } + + return r; + } + + target_state rule:: + perform_script (action a, const target& t, size_t pass_n) const + { + // First pass through. + // + if (pass_n != 0) + straight_execute_prerequisites (a, t, pass_n); + + // Figure out whether the testscript file is called 'testscript', in + // which case it should be the only one. + // + auto& pts (t.prerequisite_targets[a]); + size_t pts_n (pts.size ()); + + bool one; + { + optional o; + for (size_t i (pass_n); i != pts_n; ++i) + { + const testscript& ts (*pts[i]->is_a ()); + + bool r (ts.name == "testscript"); + + if ((r && o) || (!r && o && *o)) + fail << "both 'testscript' and other names specified for " << t; + + o = r; + } + + assert (o); // We should have a testscript or we wouldn't be here. + one = *o; + } + + // Calculate root working directory. It is in the out_base of the target + // and is called just test for dir{} targets and test- for + // other targets. + // + dir_path wd (t.out_dir ()); + + if (t.is_a ()) + wd /= "test"; + else + wd /= "test-" + t.name; + + // Are we backlinking the test working directory to src? (See + // backlink_*() in algorithm.cxx for details.) + // + const scope& bs (t.base_scope ()); + const scope& rs (*bs.root_scope ()); + const path& buildignore_file (rs.root_extra->buildignore_file); + + dir_path bl; + if (cast_false (rs.vars[var_forwarded])) + { + bl = bs.src_path () / wd.leaf (bs.out_path ()); + clean_backlink (bl, verb_never); + } + + // If this is a (potentially) multi-testscript test, then create (and + // later cleanup) the root directory. If this is just 'testscript', then + // the root directory is used directly as test's working directory and + // it's the runner's responsibility to create and clean it up. + // + // Note that we create the root directory containing the .buildignore + // file to make sure that it is ignored by name patterns (see the + // buildignore description for details). + // + // What should we do if the directory already exists? We used to fail + // which meant the user had to go and clean things up manually every + // time a test failed. This turned out to be really annoying. So now we + // issue a warning and clean it up automatically. The drawbacks of this + // approach are the potential loss of data from the previous failed test + // run and the possibility of deleting user-created files. + // + if (exists (static_cast (wd), false)) + fail << "working directory " << wd << " is a file/symlink"; + + if (exists (wd)) + { + if (before != output_before::clean) + { + bool fail (before == output_before::fail); + + (fail ? error : warn) << "working directory " << wd << " exists " + << (empty_buildignore (wd, buildignore_file) + ? "" + : "and is not empty ") + << "at the beginning of the test"; + + if (fail) + throw failed (); + } + + // Remove the directory itself not to confuse the runner which tries + // to detect when tests stomp on each others feet. + // + build2::rmdir_r (wd, true, 2); + } + + // Delay actually creating the directory in case all the tests are + // ignored (via config.test). + // + bool mk (!one); + + // Start asynchronous execution of the testscripts. + // + wait_guard wg; + + if (!dry_run) + wg = wait_guard (target::count_busy (), t[a].task_count); + + // Result vector. + // + using script::scope_state; + + vector res; + res.reserve (pts_n - pass_n); // Make sure there are no reallocations. + + for (size_t i (pass_n); i != pts_n; ++i) + { + const testscript& ts (*pts[i]->is_a ()); + + // If this is just the testscript, then its id path is empty (and it + // can only be ignored by ignoring the test target, which makes sense + // since it's the only testscript file). + // + if (one || test (t, path (ts.name))) + { + // Because the creation of the output directory is shared between us + // and the script implementation (plus the fact that we actually + // don't clean the existing one), we are going to ignore it for + // dry-run. + // + if (!dry_run) + { + if (mk) + { + mkdir_buildignore (wd, buildignore_file, 2); + mk = false; + } + } + + if (verb) + { + diag_record dr (text); + dr << "test " << ts; + + if (!t.is_a ()) + dr << ' ' << t; + } + + res.push_back (dry_run ? scope_state::passed : scope_state::unknown); + + if (!dry_run) + { + scope_state& r (res.back ()); + + if (!sched.async (target::count_busy (), + t[a].task_count, + [this] (const diag_frame* ds, + scope_state& r, + const target& t, + const testscript& ts, + const dir_path& wd) + { + diag_frame::stack_guard dsg (ds); + r = perform_script_impl (t, ts, wd, *this); + }, + diag_frame::stack (), + ref (r), + cref (t), + cref (ts), + cref (wd))) + { + // Executed synchronously. If failed and we were not asked to + // keep going, bail out. + // + if (r == scope_state::failed && !keep_going) + break; + } + } + } + } + + if (!dry_run) + wg.wait (); + + // Re-examine. + // + bool bad (false); + for (scope_state r: res) + { + switch (r) + { + case scope_state::passed: break; + case scope_state::failed: bad = true; break; + case scope_state::unknown: assert (false); + } + + if (bad) + break; + } + + // Cleanup. + // + if (!dry_run) + { + if (!bad && !one && !mk && after == output_after::clean) + { + if (!empty_buildignore (wd, buildignore_file)) + fail << "working directory " << wd << " is not empty at the " + << "end of the test"; + + rmdir_buildignore (wd, buildignore_file, 2); + } + } + + // Backlink if the working directory exists. + // + // If we dry-run then presumably all tests passed and we shouldn't + // have anything left unless we are keeping the output. + // + if (!bl.empty () && (dry_run ? after == output_after::keep : exists (wd))) + update_backlink (wd, bl, true /* changed */); + + if (bad) + throw failed (); + + return target_state::changed; + } + + // The format of args shall be: + // + // name1 arg arg ... nullptr + // name2 arg arg ... nullptr + // ... + // nameN arg arg ... nullptr nullptr + // + static bool + run_test (const target& t, + diag_record& dr, + char const** args, + process* prev = nullptr) + { + // Find the next process, if any. + // + char const** next (args); + for (next++; *next != nullptr; next++) ; + next++; + + // Redirect stdout to a pipe unless we are last. + // + int out (*next != nullptr ? -1 : 1); + bool pr; + process_exit pe; + + try + { + process p (prev == nullptr + ? process (args, 0, out) // First process. + : process (args, *prev, out)); // Next process. + + pr = *next == nullptr || run_test (t, dr, next, &p); + p.wait (); + + assert (p.exit); + pe = *p.exit; + } + catch (const process_error& e) + { + error << "unable to execute " << args[0] << ": " << e; + + if (e.child) + exit (1); + + throw failed (); + } + + bool wr (pe.normal () && pe.code () == 0); + + if (!wr) + { + if (pr) // First failure? + dr << fail << "test " << t << " failed"; // Multi test: test 1. + + dr << error; + print_process (dr, args); + dr << " " << pe; + } + + return pr && wr; + } + + target_state rule:: + perform_test (action a, const target& tt, size_t pass_n) const + { + // First pass through. + // + if (pass_n != 0) + straight_execute_prerequisites (a, tt, pass_n); + + // See if we have the test executable override. + // + path p; + { + // Note that the test variable's visibility is target. + // + lookup l (tt[var_test]); + + // Note that we have similar code for scripted tests. + // + const target* t (nullptr); + + if (l.defined ()) + { + const name* n (cast_null (l)); + + if (n == nullptr) + fail << "invalid test executable override: null value"; + else if (n->empty ()) + fail << "invalid test executable override: empty value"; + else if (n->simple ()) + { + // Ignore the special 'true' value. + // + if (n->value != "true") + p = path (n->value); + else + t = &tt; + } + else if (n->directory ()) + fail << "invalid test executable override: '" << *n << "'"; + else + { + // Must be a target name. + // + // @@ OUT: what if this is a @-qualified pair of names? + // + t = search_existing (*n, tt.base_scope ()); + + if (t == nullptr) + fail << "invalid test executable override: unknown target: '" + << *n << "'"; + } + } + else + // By default we set it to the test target's path. + // + t = &tt; + + if (t != nullptr) + { + if (auto* pt = t->is_a ()) + { + // Do some sanity checks: the target better be up-to-date with + // an assigned path. + // + p = pt->path (); + + if (p.empty ()) + fail << "target " << *pt << " specified in the test variable " + << "is out of date" << + info << "consider specifying it as a prerequisite of " << tt; + } + else + fail << "target " << *t << (t != &tt + ? " specified in the test variable " + : " requested to be tested ") + << "is not path-based"; + } + } + + // See apply() for the structure of prerequisite_targets in the presence + // of test.{input,stdin,stdout}. + // + auto& pts (tt.prerequisite_targets[a]); + size_t pts_n (pts.size ()); + + cstrings args; + + // Do we have stdin? + // + // We simulate stdin redirect (as ()); + const path& ip (it.path ()); + assert (!ip.empty ()); // Should have been assigned by update. + + cat = process (process_exit (0)); // Successfully exited. + + if (!dry_run) + { + try + { + cat.in_ofd = fdopen (ip, fdopen_mode::in); + } + catch (const io_error& e) + { + fail << "unable to open " << ip << ": " << e; + } + } + + // Purely for diagnostics. + // + args.push_back ("cat"); + args.push_back (ip.string ().c_str ()); + args.push_back (nullptr); + } + + // If dry-run, the target may not exist. + // + process_path pp (!dry_run + ? run_search (p, true /* init */) + : try_run_search (p, true)); + args.push_back (pp.empty () ? p.string ().c_str () : pp.recall_string ()); + + // Do we have options and/or arguments? + // + if (auto l = tt[test_options]) + append_options (args, cast (l)); + + if (auto l = tt[test_arguments]) + append_options (args, cast (l)); + + // Do we have inputs? + // + for (size_t i (pass_n + 2); i < pts_n; ++i) + { + const file& it (pts[i]->as ()); + const path& ip (it.path ()); + assert (!ip.empty ()); // Should have been assigned by update. + args.push_back (ip.string ().c_str ()); + } + + args.push_back (nullptr); + + // Do we have stdout? + // + path dp ("diff"); + process_path dpp; + if (pass_n != pts_n && pts[pass_n + 1] != nullptr) + { + const file& ot (pts[pass_n + 1]->as ()); + const path& op (ot.path ()); + assert (!op.empty ()); // Should have been assigned by update. + + dpp = run_search (dp, true); + + args.push_back (dpp.recall_string ()); + args.push_back ("-u"); + + // Note that MinGW-built diff utility (as of 3.3) fails trying to + // detect if stdin contains text or binary data. We will help it a bit + // to workaround the issue. + // +#ifdef _WIN32 + args.push_back ("--text"); +#endif + + // Ignore Windows newline fluff if that's what we are running on. + // + if (cast (tt[test_target]).class_ == "windows") + args.push_back ("--strip-trailing-cr"); + + args.push_back (op.string ().c_str ()); + args.push_back ("-"); + args.push_back (nullptr); + } + + args.push_back (nullptr); // Second. + + if (verb >= 2) + print_process (args); + else if (verb) + text << "test " << tt; + + if (!dry_run) + { + diag_record dr; + if (!run_test (tt, + dr, + args.data () + (sin ? 3 : 0), // Skip cat. + sin ? &cat : nullptr)) + { + dr << info << "test command line: "; + print_process (dr, args); + dr << endf; // return + } + } + + return target_state::changed; + } + } +} diff --git a/libbuild2/test/rule.hxx b/libbuild2/test/rule.hxx new file mode 100644 index 0000000..7837074 --- /dev/null +++ b/libbuild2/test/rule.hxx @@ -0,0 +1,67 @@ +// file : libbuild2/test/rule.hxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#ifndef LIBBUILD2_TEST_RULE_HXX +#define LIBBUILD2_TEST_RULE_HXX + +#include +#include + +#include +#include + +#include + +namespace build2 +{ + namespace test + { + class rule: public build2::rule, protected virtual common + { + public: + virtual bool + match (action, target&, const string&) const override; + + virtual recipe + apply (action, target&) const override; + + static target_state + perform_update (action, const target&, size_t); + + target_state + perform_test (action, const target&, size_t) const; + + target_state + perform_script (action, const target&, size_t) const; + + rule (common_data&& d, bool see_through_only) + : common (move (d)), see_through (see_through_only) {} + + bool see_through; + }; + + class default_rule: public rule + { + public: + explicit + default_rule (common_data&& d) + : common (move (d)), + rule (move (d), true /* see_through_only */) {} + }; + + // To be used for non-see-through groups that should exhibit the see- + // through behavior for install (see lib{} in the bin module for an + // example). + // + class group_rule: public rule + { + public: + explicit + group_rule (common_data&& d) + : common (move (d)), rule (move (d), false /* see_through_only */) {} + }; + } +} + +#endif // LIBBUILD2_TEST_RULE_HXX diff --git a/libbuild2/test/script/builtin.cxx b/libbuild2/test/script/builtin.cxx new file mode 100644 index 0000000..ab57d4f --- /dev/null +++ b/libbuild2/test/script/builtin.cxx @@ -0,0 +1,1979 @@ +// file : libbuild2/test/script/builtin.cxx -*- C++ -*- +// copyright : Copyright (c) 2014-2019 Code Synthesis Ltd +// license : MIT; see accompanying LICENSE file + +#include + +#include +#include +#include +#include +#include // strtoull() + +#include +#include // use default operator<< implementation +#include // fdopen_mode, fdstream_mode +#include + +#include // sched + +#include + +// Strictly speaking a builtin which reads/writes from/to standard streams +// must be asynchronous so that the caller can communicate with it through +// pipes without being blocked on I/O operations. However, as an optimization, +// we allow builtins that only print diagnostics to STDERR to be synchronous +// assuming that their output will always fit the pipe buffer. Synchronous +// builtins must not read from STDIN and write to STDOUT. Later we may relax +// this rule to allow a "short" output for such builtins. +// +using namespace std; +using namespace butl; + +namespace build2 +{ + namespace test + { + namespace script + { + using builtin_impl = uint8_t (scope&, + const strings& args, + auto_fd in, auto_fd out, auto_fd err); + + // Operation failed, diagnostics has already been issued. + // + struct failed {}; + + // Accumulate an error message, print it atomically in dtor to the + // provided stream and throw failed afterwards if requested. Prefixes + // the message with the builtin name. + // + // Move constructible-only, not assignable (based to diag_record). + // + class error_record + { + public: + template + friend const error_record& + operator<< (const error_record& r, const T& x) + { + r.ss_ << x; + return r; + } + + error_record (ostream& o, bool fail, const char* name) + : os_ (o), fail_ (fail), empty_ (false) + { + ss_ << name << ": "; + } + + // Older versions of libstdc++ don't have the ostringstream move + // support. Luckily, GCC doesn't seem to be actually needing move due + // to copy/move elision. + // +#ifdef __GLIBCXX__ + error_record (error_record&&); +#else + error_record (error_record&& r) + : os_ (r.os_), + ss_ (move (r.ss_)), + fail_ (r.fail_), + empty_ (r.empty_) + { + r.empty_ = true; + } +#endif + + ~error_record () noexcept (false) + { + if (!empty_) + { + // The output stream can be in a bad state (for example as a + // result of unsuccessful attempt to report a previous error), so + // we check it. + // + if (os_.good ()) + { + ss_.put ('\n'); + os_ << ss_.str (); + os_.flush (); + } + + if (fail_) + throw failed (); + } + } + + private: + ostream& os_; + mutable ostringstream ss_; + + bool fail_; + bool empty_; + }; + + // Parse and normalize a path. Also, unless it is already absolute, make + // the path absolute using the specified directory. Throw invalid_path + // if the path is empty, and on parsing and normalization failures. + // + static path + parse_path (string s, const dir_path& d) + { + path p (move (s)); + + if (p.empty ()) + throw invalid_path (""); + + if (p.relative ()) + p = d / move (p); + + p.normalize (); + return p; + } + + // Builtin commands functions. + // + + // cat ... + // + // Note that POSIX doesn't specify if after I/O operation failure the + // command should proceed with the rest of the arguments. The current + // implementation exits immediatelly in such a case. + // + // @@ Shouldn't we check that we don't print a nonempty regular file to + // itself, as that would merely exhaust the output device? POSIX + // allows (but not requires) such a check and some implementations do + // this. That would require to fstat() file descriptors and complicate + // the code a bit. Was able to reproduce on a big file (should be + // bigger than the stream buffer size) with the test + // 'cat file >+file'. + // + // Note: must be executed asynchronously. + // + static uint8_t + cat (scope& sp, + const strings& args, + auto_fd in, auto_fd out, auto_fd err) noexcept + try + { + uint8_t r (1); + ofdstream cerr (move (err)); + + auto error = [&cerr] (bool fail = true) + { + return error_record (cerr, fail, "cat"); + }; + + try + { + ifdstream cin (move (in), fdstream_mode::binary); + ofdstream cout (move (out), fdstream_mode::binary); + + // Copy input stream to STDOUT. + // + auto copy = [&cout] (istream& is) + { + if (is.peek () != ifdstream::traits_type::eof ()) + cout << is.rdbuf (); + + is.clear (istream::eofbit); // Sets eofbit. + }; + + // Path of a file being printed to STDOUT. An empty path represents + // STDIN. Used in diagnostics. + // + path p; + + try + { + // Print STDIN. + // + if (args.empty ()) + copy (cin); + + // Print files. + // + for (auto i (args.begin ()); i != args.end (); ++i) + { + if (*i == "-") + { + if (!cin.eof ()) + { + p.clear (); + copy (cin); + } + + continue; + } + + p = parse_path (*i, sp.wd_path); + + ifdstream is (p, ifdstream::binary); + copy (is); + is.close (); + } + } + catch (const io_error& e) + { + error_record d (error ()); + d << "unable to print "; + + if (p.empty ()) + d << "stdin"; + else + d << "'" << p << "'"; + + d << ": " << e; + } + + cin.close (); + cout.close (); + r = 0; + } + catch (const invalid_path& e) + { + error (false) << "invalid path '" << e.path << "'"; + } + // Can be thrown while creating/closing cin, cout or writing to cerr. + // + catch (const io_error& e) + { + error (false) << e; + } + catch (const failed&) + { + // Diagnostics has already been issued. + } + + cerr.close (); + return r; + } + catch (const std::exception&) + { + return 1; + } + + // Make a copy of a file at the specified path, preserving permissions, + // and registering a cleanup for a newly created file. The file paths + // must be absolute. Fail if an exception is thrown by the underlying + // copy operation. + // + static void + cpfile (scope& sp, + const path& from, const path& to, + bool overwrite, + bool attrs, + bool cleanup, + const function& fail) + { + try + { + bool exists (file_exists (to)); + + cpflags f ( + overwrite + ? cpflags::overwrite_permissions | cpflags::overwrite_content + : cpflags::none); + + if (attrs) + f |= cpflags::overwrite_permissions | cpflags::copy_timestamps; + + cpfile (from, to, f); + + if (!exists && cleanup) + sp.clean ({cleanup_type::always, to}, true); + } + catch (const system_error& e) + { + fail () << "unable to copy file '" << from << "' to '" << to + << "': " << e; + } + } + + // Make a copy of a directory at the specified path, registering a + // cleanup for the created directory. The directory paths must be + // absolute. Fail if the destination directory already exists or + // an exception is thrown by the underlying copy operation. + // + static void + cpdir (scope& sp, + const dir_path& from, const dir_path& to, + bool attrs, + bool cleanup, + const function& fail) + { + try + { + if (try_mkdir (to) == mkdir_status::already_exists) + throw_generic_error (EEXIST); + + if (cleanup) + sp.clean ({cleanup_type::always, to}, true); + + for (const auto& de: dir_iterator (from, + false /* ignore_dangling */)) + { + path f (from / de.path ()); + path t (to / de.path ()); + + if (de.type () == entry_type::directory) + cpdir (sp, + path_cast (move (f)), + path_cast (move (t)), + attrs, + cleanup, + fail); + else + cpfile (sp, f, t, false /* overwrite */, attrs, cleanup, fail); + } + + // Note that it is essential to copy timestamps and permissions after + // the directory content is copied. + // + if (attrs) + { + path_permissions (to, path_permissions (from)); + dir_time (to, dir_time (from)); + } + } + catch (const system_error& e) + { + fail () << "unable to copy directory '" << from << "' to '" << to + << "': " << e; + } + } + + // cp [-p] [--no-cleanup] + // cp [-p] [--no-cleanup] -R|-r + // cp [-p] [--no-cleanup] ... / + // cp [-p] [--no-cleanup] -R|-r ... / + // + // Note: can be executed synchronously. + // + static uint8_t + cp (scope& sp, + const strings& args, + auto_fd in, auto_fd out, auto_fd err) noexcept + try + { + uint8_t r (1); + ofdstream cerr (move (err)); + + auto error = [&cerr] (bool fail = true) + { + return error_record (cerr, fail, "cp"); + }; + + try + { + in.close (); + out.close (); + + auto i (args.begin ()); + auto e (args.end ()); + + // Process options. + // + bool recursive (false); + bool attrs (false); + bool cleanup (true); + for (; i != e; ++i) + { + const string& o (*i); + + if (o == "-R" || o == "-r") + recursive = true; + else if (o == "-p") + attrs = true; + else if (o == "--no-cleanup") + cleanup = false; + else + { + if (o == "--") + ++i; + + break; + } + } + + // Copy files or directories. + // + if (i == e) + error () << "missing arguments"; + + const dir_path& wd (sp.wd_path); + + auto j (args.rbegin ()); + path dst (parse_path (*j++, wd)); + e = j.base (); + + if (i == e) + error () << "missing source path"; + + auto fail = [&error] () {return error (true);}; + + // If destination is not a directory path (no trailing separator) + // then make a copy of the filesystem entry at the specified path + // (the only source path is allowed in such a case). Otherwise copy + // the source filesystem entries into the destination directory. + // + if (!dst.to_directory ()) + { + path src (parse_path (*i++, wd)); + + // If there are multiple sources but no trailing separator for the + // destination, then, most likelly, it is missing. + // + if (i != e) + error () << "multiple source paths without trailing separator " + << "for destination directory"; + + if (!recursive) + // Synopsis 1: make a file copy at the specified path. + // + cpfile (sp, + src, + dst, + true /* overwrite */, + attrs, + cleanup, + fail); + else + // Synopsis 2: make a directory copy at the specified path. + // + cpdir (sp, + path_cast (src), path_cast (dst), + attrs, + cleanup, + fail); + } + else + { + for (; i != e; ++i) + { + path src (parse_path (*i, wd)); + + if (recursive && dir_exists (src)) + // Synopsis 4: copy a filesystem entry into the specified + // directory. Note that we handle only source directories here. + // Source files are handled below. + // + cpdir (sp, + path_cast (src), + path_cast (dst / src.leaf ()), + attrs, + cleanup, + fail); + else + // Synopsis 3: copy a file into the specified directory. Also, + // here we cover synopsis 4 for the source path being a file. + // + cpfile (sp, + src, + dst / src.leaf (), + true /* overwrite */, + attrs, + cleanup, + fail); + } + } + + r = 0; + } + catch (const invalid_path& e) + { + error (false) << "invalid path '" << e.path << "'"; + } + // Can be thrown while closing in, out or writing to cerr. + // + catch (const io_error& e) + { + error (false) << e; + } + catch (const failed&) + { + // Diagnostics has already been issued. + } + + cerr.close (); + return r; + } + catch (const std::exception&) + { + return 1; + } + + // echo ... + // + // Note: must be executed asynchronously. + // + static uint8_t + echo (scope&, + const strings& args, + auto_fd in, auto_fd out, auto_fd err) noexcept + try + { + uint8_t r (1); + ofdstream cerr (move (err)); + + try + { + in.close (); + ofdstream cout (move (out)); + + for (auto b (args.begin ()), i (b), e (args.end ()); i != e; ++i) + cout << (i != b ? " " : "") << *i; + + cout << '\n'; + cout.close (); + r = 0; + } + catch (const std::exception& e) + { + cerr << "echo: " << e << endl; + } + + cerr.close (); + return r; + } + catch (const std::exception&) + { + return 1; + } + + // false + // + // Failure to close the file descriptors is silently ignored. + // + // Note: can be executed synchronously. + // + static builtin + false_ (scope&, uint8_t& r, const strings&, auto_fd, auto_fd, auto_fd) + { + return builtin (r = 1); + } + + // true + // + // Failure to close the file descriptors is silently ignored. + // + // Note: can be executed synchronously. + // + static builtin + true_ (scope&, uint8_t& r, const strings&, auto_fd, auto_fd, auto_fd) + { + return builtin (r = 0); + } + + // Create a symlink to a file or directory at the specified path. The + // paths must be absolute. Fall back to creating a hardlink, if symlink + // creation is not supported for the link path. If hardlink creation is + // not supported either, then fall back to copies. If requested, created + // filesystem entries are registered for cleanup. Fail if the target + // filesystem entry doesn't exist or an exception is thrown by the + // underlying filesystem operation (specifically for an already existing + // filesystem entry at the link path). + // + // Note that supporting optional removal of an existing filesystem entry + // at the link path (the -f option) tends to get hairy. As soon as an + // existing and the resulting filesystem entries could be of different + // types, we would end up with canceling an old cleanup and registering + // the new one. Also removing non-empty directories doesn't look very + // natural, but would be required if we want the behavior on POSIX and + // Windows to be consistent. + // + static void + mksymlink (scope& sp, + const path& target, const path& link, + bool cleanup, + const function& fail) + { + // Determine the target type, fail if the target doesn't exist. + // + bool dir (false); + + try + { + pair pe (path_entry (target)); + + if (!pe.first) + fail () << "unable to create symlink to '" << target << "': " + << "no such file or directory"; + + dir = pe.second.type == entry_type::directory; + } + catch (const system_error& e) + { + fail () << "unable to stat '" << target << "': " << e; + } + + // First we try to create a symlink. If that fails (e.g., "Windows + // happens"), then we resort to hard links. If that doesn't work out + // either (e.g., not on the same filesystem), then we fall back to + // copies. So things are going to get a bit nested. + // + try + { + mksymlink (target, link, dir); + + if (cleanup) + sp.clean ({cleanup_type::always, link}, true); + } + catch (const system_error& e) + { + // Note that we are not guaranteed (here and below) that the + // system_error exception is of the generic category. + // + int c (e.code ().value ()); + if (!(e.code ().category () == generic_category () && + (c == ENOSYS || // Not implemented. + c == EPERM))) // Not supported by the filesystem(s). + fail () << "unable to create symlink '" << link << "' to '" + << target << "': " << e; + + try + { + mkhardlink (target, link, dir); + + if (cleanup) + sp.clean ({cleanup_type::always, link}, true); + } + catch (const system_error& e) + { + c = e.code ().value (); + if (!(e.code ().category () == generic_category () && + (c == ENOSYS || // Not implemented. + c == EPERM || // Not supported by the filesystem(s). + c == EXDEV))) // On different filesystems. + fail () << "unable to create hardlink '" << link << "' to '" + << target << "': " << e; + + if (dir) + cpdir (sp, + path_cast (target), path_cast (link), + false, + cleanup, + fail); + else + cpfile (sp, + target, + link, + false /* overwrite */, + true /* attrs */, + cleanup, + fail); + } + } + } + + // ln [--no-cleanup] -s + // ln [--no-cleanup] -s ... / + // + // Note: can be executed synchronously. + // + static uint8_t + ln (scope& sp, + const strings& args, + auto_fd in, auto_fd out, auto_fd err) noexcept + try + { + uint8_t r (1); + ofdstream cerr (move (err)); + + auto error = [&cerr] (bool fail = true) + { + return error_record (cerr, fail, "ln"); + }; + + try + { + in.close (); + out.close (); + + auto i (args.begin ()); + auto e (args.end ()); + + // Process options. + // + bool cleanup (true); + bool symlink (false); + + for (; i != e; ++i) + { + const string& o (*i); + + if (o == "--no-cleanup") + cleanup = false; + else if (o == "-s") + symlink = true; + else + { + if (o == "--") + ++i; + + break; + } + } + + if (!symlink) + error () << "missing -s option"; + + // Create file or directory symlinks. + // + if (i == e) + error () << "missing arguments"; + + const dir_path& wd (sp.wd_path); + + auto j (args.rbegin ()); + path link (parse_path (*j++, wd)); + e = j.base (); + + if (i == e) + error () << "missing target path"; + + auto fail = [&error] () {return error (true);}; + + // If link is not a directory path (no trailing separator), then + // create a symlink to the target path at the specified link path + // (the only target path is allowed in such a case). Otherwise create + // links to the target paths inside the specified directory. + // + if (!link.to_directory ()) + { + path target (parse_path (*i++, wd)); + + // If there are multiple targets but no trailing separator for the + // link, then, most likelly, it is missing. + // + if (i != e) + error () << "multiple target paths with non-directory link path"; + + // Synopsis 1: create a target path symlink at the specified path. + // + mksymlink (sp, target, link, cleanup, fail); + } + else + { + for (; i != e; ++i) + { + path target (parse_path (*i, wd)); + + // Synopsis 2: create a target path symlink in the specified + // directory. + // + mksymlink (sp, target, link / target.leaf (), cleanup, fail); + } + } + + r = 0; + } + catch (const invalid_path& e) + { + error (false) << "invalid path '" << e.path << "'"; + } + // Can be thrown while closing in, out or writing to cerr. + // + catch (const io_error& e) + { + error (false) << e; + } + catch (const failed&) + { + // Diagnostics has already been issued. + } + + cerr.close (); + return r; + } + catch (const std::exception&) + { + return 1; + } + + // Create a directory if not exist and its parent directories if + // necessary. Throw system_error on failure. Register created + // directories for cleanup. The directory path must be absolute. + // + static void + mkdir_p (scope& sp, const dir_path& p, bool cleanup) + { + if (!dir_exists (p)) + { + if (!p.root ()) + mkdir_p (sp, p.directory (), cleanup); + + try_mkdir (p); // Returns success or throws. + + if (cleanup) + sp.clean ({cleanup_type::always, p}, true); + } + } + + // mkdir [--no-cleanup] [-p] ... + // + // Note that POSIX doesn't specify if after a directory creation failure + // the command should proceed with the rest of the arguments. The current + // implementation exits immediatelly in such a case. + // + // Note: can be executed synchronously. + // + static uint8_t + mkdir (scope& sp, + const strings& args, + auto_fd in, auto_fd out, auto_fd err) noexcept + try + { + uint8_t r (1); + ofdstream cerr (move (err)); + + auto error = [&cerr] (bool fail = true) + { + return error_record (cerr, fail, "mkdir"); + }; + + try + { + in.close (); + out.close (); + + auto i (args.begin ()); + auto e (args.end ()); + + // Process options. + // + bool parent (false); + bool cleanup (true); + for (; i != e; ++i) + { + const string& o (*i); + + if (o == "-p") + parent = true; + else if (o == "--no-cleanup") + cleanup = false; + else + { + if (*i == "--") + ++i; + + break; + } + } + + // Create directories. + // + if (i == e) + error () << "missing directory"; + + for (; i != e; ++i) + { + dir_path p (path_cast (parse_path (*i, sp.wd_path))); + + try + { + if (parent) + mkdir_p (sp, p, cleanup); + else if (try_mkdir (p) == mkdir_status::success) + { + if (cleanup) + sp.clean ({cleanup_type::always, p}, true); + } + else // == mkdir_status::already_exists + throw_generic_error (EEXIST); + } + catch (const system_error& e) + { + error () << "unable to create directory '" << p << "': " << e; + } + } + + r = 0; + } + catch (const invalid_path& e) + { + error (false) << "invalid path '" << e.path << "'"; + } + // Can be thrown while closing in, out or writing to cerr. + // + catch (const io_error& e) + { + error (false) << e; + } + catch (const failed&) + { + // Diagnostics has already been issued. + } + + cerr.close (); + return r; + } + catch (const std::exception&) + { + return 1; + } + + // mv [--no-cleanup] [-f] + // mv [--no-cleanup] [-f] ... / + // + // Note: can be executed synchronously. + // + static uint8_t + mv (scope& sp, + const strings& args, + auto_fd in, auto_fd out, auto_fd err) noexcept + try + { + uint8_t r (1); + ofdstream cerr (move (err)); + + auto error = [&cerr] (bool fail = true) + { + return error_record (cerr, fail, "mv"); + }; + + try + { + in.close (); + out.close (); + + auto i (args.begin ()); + auto e (args.end ()); + + // Process options. + // + bool no_cleanup (false); + bool force (false); + for (; i != e; ++i) + { + const string& o (*i); + + if (o == "--no-cleanup") + no_cleanup = true; + else if (*i == "-f") + force = true; + else + { + if (o == "--") + ++i; + + break; + } + } + + // Move filesystem entries. + // + if (i == e) + error () << "missing arguments"; + + const dir_path& wd (sp.wd_path); + + auto j (args.rbegin ()); + path dst (parse_path (*j++, wd)); + e = j.base (); + + if (i == e) + error () << "missing source path"; + + auto mv = [no_cleanup, force, &wd, &sp, &error] (const path& from, + const path& to) + { + const dir_path& rwd (sp.root->wd_path); + + if (!from.sub (rwd) && !force) + error () << "'" << from << "' is out of working directory '" + << rwd << "'"; + + try + { + auto check_wd = [&wd, &error] (const path& p) + { + if (wd.sub (path_cast (p))) + error () << "'" << p << "' contains test working directory '" + << wd << "'"; + }; + + check_wd (from); + check_wd (to); + + bool exists (butl::entry_exists (to)); + + // Fail if the source and destination paths are the same. + // + // Note that for mventry() function (that is based on the POSIX + // rename() function) this is a noop. + // + if (exists && to == from) + error () << "unable to move entity '" << from << "' to itself"; + + // Rename/move the filesystem entry, replacing an existing one. + // + mventry (from, + to, + cpflags::overwrite_permissions | + cpflags::overwrite_content); + + // Unless suppressed, adjust the cleanups that are sub-paths of + // the source path. + // + if (!no_cleanup) + { + // "Move" the matching cleanup if the destination path doesn't + // exist and is a sub-path of the working directory. Otherwise + // just remove it. + // + // Note that it's not enough to just change the cleanup paths. + // We also need to make sure that these cleanups happen before + // the destination directory (or any of its parents) cleanup, + // that is potentially registered. To achieve that we can just + // relocate these cleanup entries to the end of the list, + // preserving their mutual order. Remember that cleanups in + // the list are executed in the reversed order. + // + bool mv_cleanups (!exists && to.sub (rwd)); + cleanups cs; + + // Remove the source path sub-path cleanups from the list, + // adjusting/caching them if required (see above). + // + for (auto i (sp.cleanups.begin ()); i != sp.cleanups.end (); ) + { + cleanup& c (*i); + path& p (c.path); + + if (p.sub (from)) + { + if (mv_cleanups) + { + // Note that we need to preserve the cleanup path + // trailing separator which indicates the removal + // method. Also note that leaf(), in particular, does + // that. + // + p = p != from + ? to / p.leaf (path_cast (from)) + : p.to_directory () + ? path_cast (to) + : to; + + cs.push_back (move (c)); + } + + i = sp.cleanups.erase (i); + } + else + ++i; + } + + // Re-insert the adjusted cleanups at the end of the list. + // + sp.cleanups.insert (sp.cleanups.end (), + make_move_iterator (cs.begin ()), + make_move_iterator (cs.end ())); + } + } + catch (const system_error& e) + { + error () << "unable to move entity '" << from << "' to '" << to + << "': " << e; + } + }; + + // If destination is not a directory path (no trailing separator) + // then move the filesystem entry to the specified path (the only + // source path is allowed in such a case). Otherwise move the source + // filesystem entries into the destination directory. + // + if (!dst.to_directory ()) + { + path src (parse_path (*i++, wd)); + + // If there are multiple sources but no trailing separator for the + // destination, then, most likelly, it is missing. + // + if (i != e) + error () << "multiple source paths without trailing separator " + << "for destination directory"; + + // Synopsis 1: move an entity to the specified path. + // + mv (src, dst); + } + else + { + // Synopsis 2: move entities into the specified directory. + // + for (; i != e; ++i) + { + path src (parse_path (*i, wd)); + mv (src, dst / src.leaf ()); + } + } + + r = 0; + } + catch (const invalid_path& e) + { + error (false) << "invalid path '" << e.path << "'"; + } + // Can be thrown while closing in, out or writing to cerr. + // + catch (const io_error& e) + { + error (false) << e; + } + catch (const failed&) + { + // Diagnostics has already been issued. + } + + cerr.close (); + return r; + } + catch (const std::exception&) + { + return 1; + } + + // rm [-r] [-f] ... + // + // The implementation deviates from POSIX in a number of ways. It doesn't + // interact with a user and fails immediatelly if unable to process an + // argument. It doesn't check for dots containment in the path, and + // doesn't consider files and directory permissions in any way just + // trying to remove a filesystem entry. Always fails if empty path is + // specified. + // + // Note: can be executed synchronously. + // + static uint8_t + rm (scope& sp, + const strings& args, + auto_fd in, auto_fd out, auto_fd err) noexcept + try + { + uint8_t r (1); + ofdstream cerr (move (err)); + + auto error = [&cerr] (bool fail = true) + { + return error_record (cerr, fail, "rm"); + }; + + try + { + in.close (); + out.close (); + + auto i (args.begin ()); + auto e (args.end ()); + + // Process options. + // + bool dir (false); + bool force (false); + for (; i != e; ++i) + { + if (*i == "-r") + dir = true; + else if (*i == "-f") + force = true; + else + { + if (*i == "--") + ++i; + + break; + } + } + + // Remove entries. + // + if (i == e && !force) + error () << "missing file"; + + const dir_path& wd (sp.wd_path); + const dir_path& rwd (sp.root->wd_path); + + for (; i != e; ++i) + { + path p (parse_path (*i, wd)); + + if (!p.sub (rwd) && !force) + error () << "'" << p << "' is out of working directory '" << rwd + << "'"; + + try + { + dir_path d (path_cast (p)); + + if (dir_exists (d)) + { + if (!dir) + error () << "'" << p << "' is a directory"; + + if (wd.sub (d)) + error () << "'" << p << "' contains test working directory '" + << wd << "'"; + + // The call can result in rmdir_status::not_exist. That's not + // very likelly but there is also nothing bad about it. + // + try_rmdir_r (d); + } + else if (try_rmfile (p) == rmfile_status::not_exist && !force) + throw_generic_error (ENOENT); + } + catch (const system_error& e) + { + error () << "unable to remove '" << p << "': " << e; + } + } + + r = 0; + } + catch (const invalid_path& e) + { + error (false) << "invalid path '" << e.path << "'"; + } + // Can be thrown while closing in, out or writing to cerr. + // + catch (const io_error& e) + { + error (false) << e; + } + catch (const failed&) + { + // Diagnostics has already been issued. + } + + cerr.close (); + return r; + } + catch (const std::exception&) + { + return 1; + } + + // rmdir [-f] ... + // + // Note: can be executed synchronously. + // + static uint8_t + rmdir (scope& sp, + const strings& args, + auto_fd in, auto_fd out, auto_fd err) noexcept + try + { + uint8_t r (1); + ofdstream cerr (move (err)); + + auto error = [&cerr] (bool fail = true) + { + return error_record (cerr, fail, "rmdir"); + }; + + try + { + in.close (); + out.close (); + + auto i (args.begin ()); + auto e (args.end ()); + + // Process options. + // + bool force (false); + for (; i != e; ++i) + { + if (*i == "-f") + force = true; + else + { + if (*i == "--") + ++i; + + break; + } + } + + // Remove directories. + // + if (i == e && !force) + error () << "missing directory"; + + const dir_path& wd (sp.wd_path); + const dir_path& rwd (sp.root->wd_path); + + for (; i != e; ++i) + { + dir_path p (path_cast (parse_path (*i, wd))); + + if (wd.sub (p)) + error () << "'" << p << "' contains test working directory '" + << wd << "'"; + + if (!p.sub (rwd) && !force) + error () << "'" << p << "' is out of working directory '" + << rwd << "'"; + + try + { + rmdir_status s (try_rmdir (p)); + + if (s == rmdir_status::not_empty) + throw_generic_error (ENOTEMPTY); + else if (s == rmdir_status::not_exist && !force) + throw_generic_error (ENOENT); + } + catch (const system_error& e) + { + error () << "unable to remove '" << p << "': " << e; + } + } + + r = 0; + } + catch (const invalid_path& e) + { + error (false) << "invalid path '" << e.path << "'"; + } + // Can be thrown while closing in, out or writing to cerr. + // + catch (const io_error& e) + { + error (false) << e; + } + catch (const failed&) + { + // Diagnostics has already been issued. + } + + cerr.close (); + return r; + } + catch (const std::exception&) + { + return 1; + } + + // sed [-n] [-i] -e