// file : bbot/worker.cxx -*- C++ -*- // license : MIT; see accompanying LICENSE file #ifndef _WIN32 # include // signal() #else # include // SetErrorMode(), Sleep() #endif #include #include #include // strchr(), strncmp() #include #include #include // find(), find_if(), remove_if() #include #include #include #include // to_utf8() #include #include #include #include #include #include #include #include #include using namespace butl; using namespace bbot; using std::cout; using std::endl; namespace bbot { int main (int argc, char* argv[]); static int build (size_t argc, const char* argv[]); process_path argv0; worker_options ops; dir_path env_dir; // Note that upload can be quite large and take a while to upload under high // load. // const size_t tftp_blksize (1468); // Between 512 (default) and 65464. const size_t tftp_put_timeout (3600); // 1 hour (also the default). const size_t tftp_get_timeout (10); // 10 seconds. const size_t tftp_get_retries (3); // Task request retries (see startup()). } bool exists (const dir_path& d) try { return dir_exists (d); } catch (const system_error& e) { fail << "unable to stat path " << d << ": " << e << endf; } static dir_path current_directory () try { return dir_path::current_directory (); } catch (const system_error& e) { fail << "unable to obtain current directory: " << e << endf; } static dir_path change_wd (tracer& t, string* log, const dir_path& d, bool create = false) try { if (create) { if (verb >= 3) t << "mkdir -p " << d; if (log != nullptr) *log += "mkdir -p " + d.representation () + '\n'; try_mkdir_p (d); } dir_path r (current_directory ()); if (verb >= 3) t << "cd " << d; if (log != nullptr) *log += "cd " + d.representation () + '\n'; dir_path::current_directory (d); return r; } catch (const system_error& e) { fail << "unable to change current directory to " << d << ": " << e << endf; } static void mv (tracer& t, string* log, const dir_path& from, const dir_path& to) try { if (verb >= 3) t << "mv " << from << ' ' << to; if (log != nullptr) *log += "mv " + from.representation () + ' ' + to.representation () + "\n"; mvdir (from, to); } catch (const system_error& e) { fail << "unable to move directory '" << from << "' to '" << to << "': " << e << endf; } static void rm_r (tracer& t, string* log, const dir_path& d) try { if (verb >= 3) t << "rm -r " << d; if (log != nullptr) *log += "rm -r " + d.representation () + '\n'; rmdir_r (d); } catch (const system_error& e) { fail << "unable to remove directory " << d << ": " << e << endf; } // Step IDs. // enum class step_id { // Common fallbacks for bpkg_*_create/b_test_installed_create and // bpkg_*_configure_build/b_test_installed_configure, respectively. Note: // not breakpoints. // b_create, b_configure, // Note that bpkg_module_* options are only used if the main package is a // build system module (using just ~build2 otherwise). They also have no // fallback (build system modules are just too different to try to handle // them together with target and host; e.g., install root). However, // bpkg_module_create is complemented with arguments from un-prefixed step // ids, the same way as other *.create[_for_*] steps (note that un-prefixed // steps are not fallbacks, they are always added first). // bpkg_create, // Breakpoint and base. bpkg_target_create, //: b_create, bpkg_create bpkg_host_create, //: b_create, bpkg_create bpkg_module_create, //: no fallback bpkg_link, bpkg_configure_add, bpkg_configure_fetch, // Global (as opposed to package-specific) bpkg-pkg-build options (applies // to all *_configure_build* steps). Note: not a breakpoint. // bpkg_global_configure_build, // Note that bpkg_configure_build serves as a breakpoint for the // bpkg-pkg-build call that configures (at once) the main package and all // its external tests. // bpkg_configure_build, // Breakpoint and base. bpkg_target_configure_build, //: b_configure, bpkg_configure_build bpkg_host_configure_build, //: b_configure, bpkg_configure_build bpkg_module_configure_build, //: b_configure, bpkg_configure_build bpkg_update, bpkg_test, // Note that separate test packages are configures as part of the // bpkg_configure_build step above with options taken from // bpkg_{target,host}_configure_build, depending on tests package type. // bpkg_test_separate_update, //: bpkg_update bpkg_test_separate_test, //: bpkg_test // Note that we only perform the installation tests if this is a target // package or a self-hosted configuration. // bpkg_install, // Note: skipped for modules. // b_test_installed_create, //: b_create b_test_installed_configure, //: b_configure b_test_installed_test, // Note that for a host package this can involve both run-time and build- // time tests (which means we may also need a shared configuration for // modules). // // The *_for_{target,host,module} denote main package type, not // configuration being created, which will always be target (more precisely, // target or host, but host only in a self-hosted case, which means it's // the same as target). // // Note that if this is a non-self-hosted configuration, we can only end up // here if building target package and so can just use *_create and *_build // values in buildtabs. // bpkg_test_separate_installed_create, // Breakpoint and base. bpkg_test_separate_installed_create_for_target, //: bpkg_test_separate_installed_create bpkg_test_separate_installed_create_for_host, //: bpkg_test_separate_installed_create bpkg_test_separate_installed_create_for_module, //: no fallback bpkg_test_separate_installed_link, // breakpoint only bpkg_test_separate_installed_configure_add, //: bpkg_configure_add bpkg_test_separate_installed_configure_fetch, //: bpkg_configure_fetch bpkg_test_separate_installed_configure_build, // Breakpoint and base. bpkg_test_separate_installed_configure_build_for_target, //: bpkg_test_separate_installed_configure_build bpkg_test_separate_installed_configure_build_for_host, //: bpkg_test_separate_installed_configure_build bpkg_test_separate_installed_configure_build_for_module, //: bpkg_test_separate_installed_configure_build bpkg_test_separate_installed_update, //: bpkg_update bpkg_test_separate_installed_test, //: bpkg_test bpkg_uninstall, end }; static const strings step_id_str { "b.create", "b.configure", "bpkg.create", "bpkg.target.create", "bpkg.host.create", "bpkg.module.create", "bpkg.link", "bpkg.configure.add", "bpkg.configure.fetch", "bpkg.global.configure.build", "bpkg.configure.build", "bpkg.target.configure.build", "bpkg.host.configure.build", "bpkg.module.configure.build", "bpkg.update", "bpkg.test", "bpkg.test-separate.update", "bpkg.test-separate.test", "bpkg.install", "b.test-installed.create", "b.test-installed.configure", "b.test-installed.test", "bpkg.test-separate-installed.create", "bpkg.test-separate-installed.create_for_target", "bpkg.test-separate-installed.create_for_host", "bpkg.test-separate-installed.create_for_module", "bpkg.test-separate-installed.link", "bpkg.test-separate-installed.configure.add", "bpkg.test-separate-installed.configure.fetch", "bpkg.test-separate-installed.configure.build", "bpkg.test-separate-installed.configure.build_for_target", "bpkg.test-separate-installed.configure.build_for_host", "bpkg.test-separate-installed.configure.build_for_module", "bpkg.test-separate-installed.update", "bpkg.test-separate-installed.test", "bpkg.uninstall", "end"}; using std::regex; namespace regex_constants = std::regex_constants; using regexes = vector; // Run the worker script command. Name is used for logging and diagnostics // only. Match lines read from the command's stderr against the regular // expressions and return the warning result status (instead of success) in // case of a match. Save the executed command into last_cmd. // // Redirect stdout to stderr if the out argument is NULL. Otherwise, save the // process output into the referenced variable. Note: currently assumes that // the output will always fit into the pipe buffer. // // If bkp_step is present and is equal to the command step, then prior to // running this command ask the user if to continue or abort the task // execution. If bkp_status is present, then ask for that if the command // execution results with the specified or more critical status. // // For the special end step no command is executed. In this case only the user // is potentially prompted and the step is traced/logged. // template static result_status run_cmd (step_id step, tracer& t, string& log, optional* out, const regexes& warn_detect, const string& name, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const process_env& pe, A&&... a) { // UTF-8-sanitize and log the diagnostics. Also print the raw diagnostics // to stderr at verbosity level 3 or higher. // auto add = [&log, &t] (string&& s, bool trace = true) { if (verb >= 3) { if (trace) t << s; else text << s; } to_utf8 (s, '?', codepoint_types::graphic, U"\n\r\t"); log += s; log += '\n'; }; string next_cmd; // Prompt the user if to continue the task execution and, if they refuse, // log this and throw abort. // struct abort {}; auto prompt = [&last_cmd, &next_cmd, &add] (const string& what) { diag_record dr (text); dr << '\n' << what << '\n' << " current dir: " << current_directory () << '\n' << " environment: " << ops.env_script () << ' ' << ops.env_target (); if (!last_cmd.empty ()) dr << '\n' << " last command: " << last_cmd; if (!next_cmd.empty ()) dr << '\n' << " next command: " << next_cmd; dr.flush (); if (!yn_prompt ( "continue execution (or you may shutdown the machine)? [y/n]")) { add ("execution aborted by interactive user"); throw abort (); } }; auto prompt_step = [step, &t, &log, &bkp_step, &prompt] () { const string& sid (step_id_str[static_cast (step)]); // Prompt the user if the breakpoint is reached. // if (bkp_step && *bkp_step == step) prompt (sid + " step reached"); string ts (to_string (system_clock::now (), "%Y-%m-%d %H:%M:%S %Z", true /* special */, true /* local */)); // Log the step id and the command to be executed. // l3 ([&]{t << "step id: " << sid << ' ' << ts;}); #ifndef _WIN32 log += "# step id: "; #else log += "rem step id: "; #endif log += sid; log += ' '; log += ts; log += '\n'; }; try { // Trace, log, and save the command line. // auto cmdc = [&t, &log, &next_cmd, &prompt_step] (const char* c[], size_t n) { std::ostringstream os; process::print (os, c, n); next_cmd = os.str (); prompt_step (); t (c, n); log += next_cmd; log += '\n'; }; result_status r (result_status::success); if (step != step_id::end) { try { // Redirect stdout to stderr, if the caller is not interested in it. // // Text mode seems appropriate. // fdpipe out_pipe (out != nullptr ? fdopen_pipe () : fdpipe ()); fdpipe err_pipe (fdopen_pipe ()); process pr ( process_start_callback (cmdc, fdopen_null (), // Never reads from stdin. out != nullptr ? out_pipe.out.get () : 2, err_pipe, pe, forward (a)...)); out_pipe.out.close (); err_pipe.out.close (); { // Skip on exception. // ifdstream is (move (err_pipe.in), fdstream_mode::skip); for (string l; is.peek () != ifdstream::traits_type::eof (); ) { getline (is, l); // Match the log line with the warning-detecting regular // expressions until the first match. // if (r != result_status::warning) { for (const regex& re: warn_detect) { // Only examine the first 512 bytes. Long lines (e.g., linker // command lines) could trigger implementation-specific // limitations (like stack overflow). Plus, it is a // performance concern. // if (regex_search (l.begin (), (l.size () < 512 ? l.end () : l.begin () + 512), re)) { r = result_status::warning; break; } } } add (move (l), false /* trace */); } } if (!pr.wait ()) { const process_exit& e (*pr.exit); add (name + " " + to_string (e)); r = e.normal () ? result_status::error : result_status::abnormal; } // Only read the buffered output if the process terminated normally. // if (out != nullptr && pr.exit->normal ()) { // Note: shouldn't throw since the output is buffered. // ifdstream is (move (out_pipe.in)); *out = is.read_text (); } last_cmd = move (next_cmd); if (bkp_status && r >= *bkp_status) { next_cmd.clear (); // Note: used by prompt(). prompt (!r ? "error occured" : "warning is issued"); } } catch (const process_error& e) { fail << "unable to execute " << name << ": " << e; } catch (const io_error& e) { fail << "unable to read " << name << " diagnostics: " << e; } } else { next_cmd.clear (); // Note: used by prompt_step(). prompt_step (); } return r; } catch (const abort&) { return result_status::abort; } } template static result_status run_bpkg (step_id step, const V& envvars, tracer& t, string& log, optional* out, const regexes& warn_detect, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const char* verbosity, const string& cmd, A&&... a) { return run_cmd (step, t, log, out, warn_detect, "bpkg " + cmd, bkp_step, bkp_status, last_cmd, process_env ("bpkg", envvars), verbosity, cmd, forward (a)...); } template static result_status run_bpkg (step_id step, const V& envvars, tracer& t, string& log, const regexes& warn_detect, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const char* verbosity, const string& cmd, A&&... a) { return run_bpkg (step, envvars, t, log, nullptr /* out */, warn_detect, bkp_step, bkp_status, last_cmd, verbosity, cmd, forward (a)...); } template static result_status run_bpkg (step_id step, tracer& t, string& log, optional* out, const regexes& warn_detect, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const char* verbosity, const string& cmd, A&&... a) { const char* const* envvars (nullptr); return run_bpkg (step, envvars, t, log, out, warn_detect, bkp_step, bkp_status, last_cmd, verbosity, cmd, forward (a)...); } template static result_status run_bpkg (step_id step, tracer& t, string& log, const regexes& warn_detect, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const char* verbosity, const string& cmd, A&&... a) { const char* const* envvars (nullptr); return run_bpkg (step, envvars, t, log, warn_detect, bkp_step, bkp_status, last_cmd, verbosity, cmd, forward (a)...); } template static result_status run_b (step_id step, const V& envvars, tracer& t, string& log, const regexes& warn_detect, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const char* verbosity, const strings& buildspecs, A&&... a) { string name ("b"); for (const string& s: buildspecs) { if (!name.empty ()) name += ' '; name += s; } return run_cmd (step, t, log, nullptr /* out */, warn_detect, name, bkp_step, bkp_status, last_cmd, process_env ("b", envvars), verbosity, buildspecs, forward (a)...); } template static result_status run_b (step_id step, const V& envvars, tracer& t, string& log, const regexes& warn_detect, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const char* verbosity, const string& buildspec, A&&... a) { return run_cmd (step, t, log, nullptr /* out */, warn_detect, "b " + buildspec, bkp_step, bkp_status, last_cmd, process_env ("b", envvars), verbosity, buildspec, forward (a)...); } template static result_status run_b (step_id step, tracer& t, string& log, const regexes& warn_detect, const optional& bkp_step, const optional& bkp_status, string& last_cmd, const char* verbosity, const string& buildspec, A&&... a) { const char* const* envvars (nullptr); return run_b (step, envvars, t, log, warn_detect, bkp_step, bkp_status, last_cmd, verbosity, buildspec, forward (a)...); } // Upload compressed manifest to the specified TFTP URL with curl. Issue // diagnostics and throw failed on invalid manifest or process management // errors and throw io_error for input/output errors or non-zero curl exit. // template static void upload_manifest (tracer& trace, const string& url, const T& m, const string& what) { try { // Piping the data directly into curl's stdin sometimes results in the // broken pipe error on the client and partial/truncated upload on the // server. This happens quite regularly on older Linux distributions // (e.g., Debian 8, Ubuntu 16.04) but also sometimes on Windows. On the // other hand, uploading from a file appears to work reliably (we still // get an odd error on Windows from time to time with larger uploads). // // Let's not break lines in the manifest values not to further increase // the size of the manifest encoded representation. Also here we don't // care much about readability of the manifest since it will only be read // by the bbot agent anyway. // #if 0 // Note: need to add compression support if re-enable this. tftp_curl c (trace, path ("-"), nullfd, curl::put, url, "--tftp-blksize", tftp_blksize, "--max-time", tftp_put_timeout); manifest_serializer s (c.out, url, true /* long_lines */); m.serialize (s); c.out.close (); #else auto_rmfile tmp; try { tmp = auto_rmfile (path::temp_path (what + "-manifest.lz4")); ofdstream ofs (tmp.path, fdopen_mode::binary); olz4stream ozs (ofs, 9, 5 /* 256KB */, nullopt /* content_size */); manifest_serializer s (ozs, tmp.path.string (), true /* long_lines */); m.serialize (s); ozs.close (); ofs.close (); } catch (const io_error& e) // In case not derived from system_error. { fail << "unable to save " << what << " manifest: " << e; } catch (const system_error& e) { fail << "unable to save " << what << " manifest: " << e; } tftp_curl c (trace, tmp.path, nullfd, curl::put, url, "--tftp-blksize", tftp_blksize, "--max-time", tftp_put_timeout); #endif if (!c.wait ()) throw_generic_ios_failure (EIO, "non-zero curl exit code"); } catch (const manifest_serialization& e) { fail << "invalid " << what << " manifest: " << e.description; } catch (const process_error& e) { fail << "unable to execute curl: " << e; } catch (const system_error& e) { const auto& c (e.code ()); if (c.category () == generic_category ()) throw_generic_ios_failure (c.value (), e.what ()); else throw_system_ios_failure (c.value (), e.what ()); } } static const string worker_checksum ("2"); // Logic version. static int bbot:: build (size_t argc, const char* argv[]) { using namespace bpkg; using string_parser::unquote; tracer trace ("build"); // Our overall plan is as follows: // // 1. Parse the task manifest (it should be in CWD). // // 2. Run bpkg to create the package/tests configurations, add the // repository to them, and configure, build, test, optionally install, // test installed and uninstall the package all while saving the logs in // the result manifest. // // 3. Upload the result manifest. // // NOTE: consider updating worker_checksum if making any logic changes. // // Note also that we are being "watched" by the startup version of us which // will upload an appropriate result in case we exit with an error. So here // for abnormal situations (like a failure to parse the manifest), we just // fail. // task_manifest tm ( parse_manifest (path ("task.manifest"), "task")); // Reset the dependency checksum if the task's worker checksum doesn't match // the current one. // if (!tm.worker_checksum || *tm.worker_checksum != worker_checksum) tm.dependency_checksum = nullopt; result_manifest rm { tm.name, tm.version, result_status::success, operation_results {}, worker_checksum, nullopt /* dependency_checksum */ }; auto add_result = [&rm] (string o) -> operation_result& { rm.results.push_back ( operation_result {move (o), result_status::success, ""}); return rm.results.back (); }; dir_path rwd; // Root working directory. // Resolve the breakpoint specified by the interactive manifest value into // the step id or the result status breakpoint. If the breakpoint is // invalid, then log the error and abort the build. Note that we reuse the // configure operation log here not to complicate things. // optional bkp_step; optional bkp_status; string last_cmd; // Used in the user prompt. for (;;) // The "breakout" loop. { auto fail_operation = [&trace] (operation_result& r, const string& e, result_status s) { l3 ([&]{trace << e;}); r.log += "error: " + e + '\n'; r.status = s; }; // Regular expressions that detect different forms of build2 toolchain // warnings. Accidently (or not), they also cover GCC and Clang warnings // (for the English locale). // // The expressions will be matched multiple times, so let's make the // matching faster, with the potential cost of making regular expressions // creation slower. // regex::flag_type f (regex_constants::optimize); // ECMAScript is implied. regexes wre { regex ("^warning: ", f), regex ("^.+: warning: ", f)}; for (const string& re: tm.unquoted_warning_regex ()) wre.emplace_back (re, f); if (tm.interactive) { const string& b (*tm.interactive); if (b == "error") bkp_status = result_status::error; else if (b == "warning") bkp_status = result_status::warning; else { for (size_t i (0); i < step_id_str.size (); ++i) { if (b == step_id_str[i]) { bkp_step = static_cast (i); break; } } } if (!bkp_step && !bkp_status) { fail_operation (add_result ("configure"), "invalid interactive build breakpoint '" + b + "'", result_status::abort); break; } } // Split the argument into prefix (empty if not present) and unquoted // value. Return nullopt if the prefix is invalid. // auto parse_arg = [] (const string& a) -> optional> { size_t p (a.find_first_of (":=\"'")); if (p == string::npos || a[p] != ':') // No prefix. return make_pair (string (), unquote (a)); for (const string& id: step_id_str) { if (a.compare (0, p, id, 0, p) == 0 && (id.size () == p || (id.size () > p && id[p] == '.'))) return make_pair (a.substr (0, p), unquote (a.substr (p + 1))); } return nullopt; // Prefix is invalid. }; // Parse configuration arguments. Report failures to the bbot controller. // std::multimap config_args; for (const string& c: tm.config) { optional> v (parse_arg (c)); if (!v) { rm.status |= result_status::abort; l3 ([&]{trace << "invalid configuration argument prefix in " << "'" << c << "'";}); break; } if (v->second[0] != '-' && v->second.find ('=') == string::npos) { rm.status |= result_status::abort; l3 ([&]{trace << "invalid configuration argument '" << c << "'";}); break; } config_args.emplace (move (*v)); } if (!rm.status) break; // Parse environment arguments. // std::multimap modules; std::multimap env_args; for (size_t i (1); i != argc; ++i) { const char* a (argv[i]); optional> v (parse_arg (a)); if (!v) fail << "invalid environment argument prefix in '" << a << "'"; bool mod (v->second[0] != '-' && v->second.find ('=') == string::npos); if (mod && !v->first.empty () && v->first != "b.create" && v->first != "bpkg.create" && v->first != "bpkg.target.create" && v->first != "bpkg.host.create" && v->first != "bpkg.module.create" && v->first != "b.test-installed.create" && v->first != "bpkg.test-separate-installed.create" && v->first != "bpkg.test-separate-installed.create_for_target" && v->first != "bpkg.test-separate-installed.create_for_host" && v->first != "bpkg.test-separate-installed.create_for_module") fail << "invalid module prefix in '" << a << "'"; (mod ? modules : env_args).emplace (move (*v)); } // Return command arguments for the specified step id, complementing // *.create[_for_*] steps with un-prefixed arguments. If no arguments are // specified for the step then use the specified fallbacks, potentially // both. Arguments with more specific prefixes come last. // auto step_args = [] (const std::multimap& args, step_id step, optional fallback1 = nullopt, optional fallback2 = nullopt) -> cstrings { cstrings r; // Add arguments for a specified, potentially empty, prefix. // auto add_args = [&args, &r] (const string& prefix) { auto range (args.equal_range (prefix)); for (auto i (range.first); i != range.second; ++i) r.emplace_back (i->second.c_str ()); }; // Add un-prefixed arguments if this is one of the *.create[_for_*] // steps. // switch (step) { case step_id::b_create: case step_id::bpkg_create: case step_id::bpkg_target_create: case step_id::bpkg_host_create: case step_id::bpkg_module_create: case step_id::b_test_installed_create: case step_id::bpkg_test_separate_installed_create: case step_id::bpkg_test_separate_installed_create_for_target: case step_id::bpkg_test_separate_installed_create_for_host: case step_id::bpkg_test_separate_installed_create_for_module: { add_args (""); break; } default: break; } auto add_step_args = [&add_args] (step_id step) { const string& s (step_id_str[static_cast (step)]); for (size_t n (0);; ++n) { n = s.find ('.', n); add_args (n == string::npos ? s : string (s, 0, n)); if (n == string::npos) break; } }; // If no arguments found for the step id, then use the fallback step // ids, if specified. // if (args.find (step_id_str[static_cast (step)]) != args.end ()) { add_step_args (step); } else { // Note that if we ever need to specify fallback pairs with common // ancestors, we may want to suppress duplicate ancestor step ids. // if (fallback1) add_step_args (*fallback1); if (fallback2) add_step_args (*fallback2); } return r; }; // Search for config.install.root variable. If it is present and has a // non-empty value, then test the package installation and uninstall. Note // that passing [null] value would be meaningless, so we don't recognize // it as a special one. // dir_path install_root; { size_t n (19); auto space = [] (char c) {return c == ' ' || c == '\t';}; for (const char* s: reverse_iterate (step_args (config_args, step_id::bpkg_create))) { if (strncmp (s, "config.install.root", n) == 0 && (s[n] == '=' || space (s[n]))) { while (space (s[n])) ++n; // Skip spaces. if (s[n] == '=') ++n; // Skip the equal sign. while (space (s[n])) ++n; // Skip spaces. // Note that the config.install.root variable value may // potentially be quoted. // install_root = dir_path (unquote (s + n)); break; } } } // bpkg-rep-fetch trust options. // cstrings trust_ops; { const char* t ("--trust-no"); for (const string& fp: tm.trust) { if (fp == "yes") t = "--trust-yes"; else { trust_ops.push_back ("--trust"); trust_ops.push_back (fp.c_str ()); } } trust_ops.push_back (t); } const string& pkg (tm.name.string ()); const version& ver (tm.version); const string repo (tm.repository.string ()); const dir_path pkg_dir (pkg + '-' + ver.string ()); // Specify the revision explicitly for the bpkg-build command not to end // up with a race condition building the latest revision rather than the // zero revision. // const string pkg_rev (pkg + '/' + version (ver.epoch, ver.upstream, ver.release, ver.effective_revision (), ver.iteration).string ()); // Query the project's build system information with `b info`. // auto prj_info = [&trace] (const dir_path& d, bool ext_mods, const char* what) { // Note that the `b info` diagnostics won't be copied into any of the // build logs. This is fine as this is likely to be an infrastructure // problem, given that the project distribution has been successfully // created. It's actually not quite clear which log this diagnostics // could go into. // try { return b_info (d, ext_mods, verb, trace); } catch (const b_error& e) { if (e.normal ()) throw failed (); // Assume the build2 process issued diagnostics. fail << "unable to query " << what << ' ' << d << " info: " << e << endf; } }; rwd = current_directory (); // If the package comes from a version control-based repository, then we // will also test its dist meta-operation. Specifically, we will checkout // the package outside the configuration directory passing --checkout-root // to the configure-only pkg-build command, re-distribute the checked out // directory in the load distribution mode, and then use this distribution // as a source to build the package. // dir_path dist_root (rwd / dir_path ("dist")); dir_path dist_src (dist_root / pkg_dir); dir_path dist_install_root (rwd / dir_path ("dist-install")); dir_path dist_install_src (dist_install_root / pkg_dir); dir_path dist_installed_root (rwd / dir_path ("dist-installed")); // Redistribute the package source directory (pkg_dir) checked out into // the directory other than the configuration directory (dist_root) and // replace it with the newly created distribution. Assume that the current // directory is the package configuration directory. Optionally pass the // config.import.* variable override and/or set the environment variables // for the build2 process. Return true if the dist meta-operation // succeeds. // auto redist = [&trace, &wre, &bkp_step, &bkp_status, &last_cmd] (step_id step, operation_result& r, const dir_path& dist_root, const dir_path& pkg_dir, // - const optional& import = nullopt, const small_vector& envvars = {}) { // Temporarily change the current directory to the distribution root // parent directory from the configuration directory to shorten the // command line paths and try to avoid the '..' path prefix. // dir_path dp (dist_root.directory ()); dir_path dn (dist_root.leaf ()); // Redistribute the package using the configured output directory. // dir_path cnf_dir (change_wd (trace, &r.log, dp)); dir_path out_dir (cnf_dir.relative (dp) / pkg_dir); dir_path src_dir (dn / pkg_dir); // Create the re-distribution root directory next to the distribution // root. // dir_path redist_root ("re" + dn.string ()); r.status |= run_b ( step, envvars, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "config.dist.root=" + redist_root.string (), import, ("dist('" + src_dir.representation () + "'@'" + out_dir.representation () + "')")); if (!r.status) return false; // Replace the package source directory with the re-distribution result. // rm_r (trace, &r.log, src_dir); mv (trace, &r.log, redist_root / pkg_dir, src_dir); change_wd (trace, &r.log, cnf_dir); // Return back to the configuration. return true; }; // Note that if this is not a self-hosted configuration, then we do not // build external runtime tests nor run internal for host or module // packages because the assumption is that they have been built/run (and // with buildtab settings such as warnings, etc) when testing the // self-hosted configuration this non-self-hosted one is based on. Also, // by the same reason, we don't install tools or modules for // non-self-hosted configurations. // // Actually, it could make sense to build and install tools and module // from a target configuration in this case. But that means for a // non-self-hosted configuration a tool/module may want to test two // things: its output build and its own build, which means we would need a // way to control which of the two things (or both) are to be tested // (think of two cross-compiler configurations, Emscripten and MinGW: for // the former a source code generator would normally only want to test the // output while for the latter -- both; maybe we could have a `cross-host` // class, meaning that the configuration is not host itself but its target // is). In any case, seeing that there is no way to verify such own build // works, we ignore this for now. // // Also note that build system modules can only have external build-time // tests (which is verified by bpkg-rep-fetch) and target packages cannot // have external build-time tests (which we verify ourselves). // bool selfhost (tm.host && *tm.host); // Detect if the package is of the target, host, or module type. // auto requirement = [&tm] (const char* id) { return find_if (tm.requirements.begin (), tm.requirements.end (), [id] (const requirement_alternatives& r) { if (r.size () == 1) { const requirement_alternative& a (r[0]); return find (a.begin (), a.end (), id) != a.end (); } return false; }) != tm.requirements.end (); }; bool module_pkg (pkg.compare (0, 10, "libbuild2-") == 0); bool bootstrap (module_pkg && requirement ("bootstrap")); bool host_pkg (!module_pkg && requirement ("host")); bool target_pkg (!module_pkg && !host_pkg); // Split external test packages into the runtime and build-time lists. // // Note that runtime and build-time test packages are always configured in // different bpkg configurations, since they can depend on different // versions of the same package. // small_vector runtime_tests; small_vector buildtime_tests; for (test_dependency& t: tm.tests) { if (t.buildtime) buildtime_tests.push_back (move (t)); else if (target_pkg || selfhost) runtime_tests.push_back (move (t)); } bool has_buildtime_tests (!buildtime_tests.empty ()); bool has_runtime_tests (!runtime_tests.empty ()); // Abort if a target package has external build-time tests. // if (target_pkg && has_buildtime_tests) { fail_operation ( add_result ("configure"), "build-time tests in package not marked with `requires: host`", result_status::abort); break; } // Create the required build configurations. // // Note that if this is a target package, then we intentionally do not // create host or module configuration letting the automatic private // configuration creation to take its course (since that would probably be // the most typical usage scenario). // // Also note that we may need a separate target configuration to build the // host package for installation. This is required to avoid a potential // conflict between the main package and a tool it may try to run during // the build. We also do the same for module packages which, while cannot // have build-time dependencies, could have private code generators. This // configuration needs to have the target type (so that it uses any // build-time dependencies from build-host/module configurations). Note // also that we currently only do this for self-hosted configuration // (since we don't install otherwise, see above). // dir_path target_conf ("build"); dir_path host_conf ("build-host"); dir_path module_conf ("build-module"); dir_path install_conf ("build-install"); // Main package config. // const dir_path& main_pkg_conf (target_pkg ? target_conf : host_pkg ? host_conf : module_conf); // Create the target configuration if this is a target package or if the // host/module package has external build-time tests. // bool create_target (target_pkg || has_buildtime_tests); // Create the host configuration if this is a host package. // // Also create it for the module package with external build-time tests. // The idea is to be able to test a tool which might only be tested via // the module. To be precise, we need to check that the tests package has // a build-time dependency (on the tool) but that's not easy to do and so // we will create a host configuration if a module has any build-time // tests. // bool create_host (host_pkg || (module_pkg && has_buildtime_tests)); // Create the module configuration if the package is a build system // module. // // Also create it for the host package with the external build-time tests, // so that a single build2 configuration is used for both target and host // packages (this is important in case they happen to use the same // module). // bool create_module (module_pkg || (host_pkg && has_buildtime_tests)); // Create the configuration for installing the main package of the host or // module type, unless it's not supposed to be installed. // bool create_install (!target_pkg && !install_root.empty () && selfhost); // Root configuration through which we will be configuring the cluster // (note: does not necessarily match the main package type). // // In other words, this is configuration that will be specified for // bpkg-pkg-build as the current configuration (via -d). It must be the // configuration that links to all the other configurations, except // install. // // Note that the install configuration, if present, is either the // cluster's "second root" (for a host package) or is an independent // cluster (for a module package). In either case it needs to additionally // be specified as a current configuration on the command line. // const dir_path& root_conf (create_target ? target_conf : create_host ? host_conf : module_conf); // Note that bpkg doesn't support configuring bootstrap module // dependents well, not distinguishing such modules from regular ones // (see pkg_configure() for details). Thus, we need to pass the // !config.import.* global override wherever required ourselves. // optional bootstrap_import; if (bootstrap) bootstrap_import = "!config.import." + tm.name.variable () + "=" + (rwd / main_pkg_conf).string (); // Configure. // { operation_result& r (add_result ("configure")); // Noop, just for the log record. // change_wd (trace, &r.log, rwd); // If we end up with multiple current configurations (root and install) // then when running the bpkg-pkg-build command we need to specify the // configuration for each package explicitly via --config-uuid. // // Let's not generate random UUIDs but use some predefined values which // we can easily recognize in the build logs. // const char* target_uuid ("00000000-0000-0000-0000-000000000001"); const char* host_uuid ("00000000-0000-0000-0000-000000000002"); const char* module_uuid ("00000000-0000-0000-0000-000000000003"); const char* install_uuid ("00000000-0000-0000-0000-000000000004"); // Let's however distinguish the target package as a simple common case // and simplify the configuration creation and packages configuration // commands making them more readable in the build log. For this simple // case only one configuration needs to be created explicitly and so it // doesn't need the UUID. Also there is no need in any package-specific // options for the bpkg-pkg-build command in this case. // // Create the target configuration. // // bpkg create // if (create_target) { step_id b (step_id::bpkg_create); // Breakpoint. step_id s (step_id::bpkg_target_create); // Step. step_id f1 (step_id::b_create); // First fallback. step_id f2 (step_id::bpkg_create); // Second fallback. r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create", "-d", target_conf, !target_pkg ? cstrings ({"--uuid", target_uuid}) : cstrings (), step_args (modules, s, f1, f2), step_args (env_args, s, f1, f2), step_args (config_args, s, f1, f2)); if (!r.status) break; } // Create the host configurations. // if (create_host) { step_id b (step_id::bpkg_create); if (host_pkg && selfhost) { // Create the host configuration. // { step_id s (step_id::bpkg_host_create); step_id f1 (step_id::b_create); step_id f2 (step_id::bpkg_create); // bpkg create --type host // r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create", "-d", host_conf, "--type", "host", "--uuid", host_uuid, step_args (modules, s, f1, f2), step_args (env_args, s, f1, f2), step_args (config_args, s, f1, f2)); if (!r.status) break; } // Create the install configuration. // // bpkg create // if (create_install) { step_id s (step_id::bpkg_target_create); step_id f1 (step_id::b_create); step_id f2 (step_id::bpkg_create); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create", "-d", install_conf, "--uuid", install_uuid, step_args (modules, s, f1, f2), step_args (env_args, s, f1, f2), step_args (config_args, s, f1, f2)); if (!r.status) break; } } else { // b create() config.config.load=~host // // Note also that we suppress warnings about unused config.* values. // r.status |= run_b ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create(" + host_conf.representation () + ",cc)", "config.config.load=~host", "config.config.persist+='config.*'@unused=drop"); if (!r.status) break; // bpkg create --existing --type host // r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "create", "--existing", "-d", host_conf, "--type", "host", "--uuid", host_uuid); if (!r.status) break; } } // Create the module configurations. // if (create_module) { step_id b (step_id::bpkg_create); // Create the module configuration. // { // b create() config.config.load=~build2 [ ] // // Note also that we suppress warnings about unused config.* values. // // What if a module wants to use CLI? The current thinking is that we // will be "whitelisting" base (i.e., those that can plausibly be used // by multiple modules) libraries and tools for use by build system // modules. So if and when we whitelist CLI, we will add it here, next // to cc. // string mods; cstrings eas; cstrings cas; if (module_pkg && selfhost) { step_id s (step_id::bpkg_module_create); for (const char* m: step_args (modules, s)) { if (!mods.empty ()) mods += ' '; mods += m; } eas = step_args (env_args, s); cas = step_args (config_args, s); } else mods = "cc"; r.status |= run_b ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create(" + module_conf.representation () + "," + mods + ")", "config.config.load=~build2", "config.config.persist+='config.*'@unused=drop", eas, cas); if (!r.status) break; // bpkg create --existing --type build2 // r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "create", "--existing", "-d", module_conf, "--type", "build2", "--uuid", module_uuid); if (!r.status) break; } // Create the install configuration. // if (create_install && module_pkg) { step_id s (step_id::bpkg_module_create); string mods; for (const char* m: step_args (modules, s)) { if (!mods.empty ()) mods += ' '; mods += m; } // b create() config.config.load=~build2 [ ] // r.status |= run_b ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create(" + install_conf.representation () + "," + mods + ")", "config.config.load=~build2", "config.config.persist+='config.*'@unused=drop", step_args (env_args, s), step_args (config_args, s)); if (!r.status) break; // bpkg create --existing // r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "create", "--existing", "-d", install_conf, "--uuid", install_uuid); if (!r.status) break; } } // Link the configurations. // // bpkg link -d // { step_id b (step_id::bpkg_link); if (create_target) { if (create_host) { r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "link", "-d", target_conf, host_conf); if (!r.status) break; } if (create_module) { r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "link", "-d", target_conf, module_conf); if (!r.status) break; } } if (create_host) { if (create_module) { r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "link", "-d", host_conf, module_conf); if (!r.status) break; } } // Link the install configuration only for the host package. Note that // the module package may not have build-time dependencies and so // doesn't need configurations for them. // if (create_install && host_pkg) { r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "link", "-d", install_conf, host_conf); if (!r.status) break; if (create_module) { r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "link", "-d", install_conf, module_conf); if (!r.status) break; } } } // Fetch repositories into the main package configuration, the target // configuration for external build-time tests, if any, and the install // configuration, if present. // // bpkg add // { step_id b (step_id::bpkg_configure_add); step_id s (step_id::bpkg_configure_add); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "add", "-d", main_pkg_conf, step_args (env_args, s), step_args (config_args, s), repo); if (!r.status) break; } // bpkg fetch // { step_id b (step_id::bpkg_configure_fetch); step_id s (step_id::bpkg_configure_fetch); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "fetch", "-d", main_pkg_conf, step_args (env_args, s), step_args (config_args, s), trust_ops); if (!r.status) break; } if (create_install) { // bpkg add // { step_id b (step_id::bpkg_configure_add); step_id s (step_id::bpkg_configure_add); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "add", "-d", install_conf, step_args (env_args, s), step_args (config_args, s), repo); if (!r.status) break; } // bpkg fetch // { step_id b (step_id::bpkg_configure_fetch); step_id s (step_id::bpkg_configure_fetch); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "fetch", "-d", install_conf, step_args (env_args, s), step_args (config_args, s), trust_ops); if (!r.status) break; } } if (has_buildtime_tests) { // bpkg add // { step_id b (step_id::bpkg_configure_add); step_id s (step_id::bpkg_configure_add); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "add", "-d", target_conf, step_args (env_args, s), step_args (config_args, s), repo); if (!r.status) break; } // bpkg fetch // { step_id b (step_id::bpkg_configure_fetch); step_id s (step_id::bpkg_configure_fetch); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "fetch", "-d", target_conf, step_args (env_args, s), step_args (config_args, s), trust_ops); if (!r.status) break; } } // Configure all the packages using a single bpkg-pkg-build command. // // First, prepare the common and package arguments. // strings common_args; strings pkg_args; if (target_pkg) // The simple common case (see above)? { // The overall command looks like this: // // bpkg build --configure-only -- // ... // step_id s (step_id::bpkg_target_configure_build); step_id f1 (step_id::b_configure); step_id f2 (step_id::bpkg_configure_build); cstrings eas (step_args (env_args, s, f1, f2)); cstrings cas (step_args (config_args, s, f1, f2)); common_args.push_back ("--checkout-root"); common_args.push_back (dist_root.string ()); common_args.insert (common_args.end (), eas.begin (), eas.end ()); common_args.insert (common_args.end (), cas.begin (), cas.end ()); pkg_args.push_back (pkg_rev); // Add test dependency package constraints (for example 'bar > 1.0.0'). // for (auto t: runtime_tests) pkg_args.push_back (t.string ()); } else { // The overall command looks like this (but some parts may be omitted): // // bpkg build --configure-only -- // { }+ // { }+ { ... } // { }+ // { }+ { ... } // // Add the main package args. // // Also add the external runtime test packages here since they share // the configuration directory with the main package. // { step_id s (target_pkg ? step_id::bpkg_target_configure_build : host_pkg ? step_id::bpkg_host_configure_build : step_id::bpkg_module_configure_build); step_id f1 (step_id::b_configure); step_id f2 (step_id::bpkg_configure_build); cstrings eas (step_args (env_args, s, f1, f2)); cstrings cas (step_args (config_args, s, f1, f2)); // Main package configuration name. // const char* conf_uuid (target_pkg ? target_uuid : host_pkg ? host_uuid : module_uuid); // Add the main package. // { pkg_args.push_back ("{"); pkg_args.push_back ("--config-uuid"); pkg_args.push_back (conf_uuid); pkg_args.push_back ("--checkout-root"); pkg_args.push_back (dist_root.string ()); pkg_args.insert (pkg_args.end (), eas.begin (), eas.end ()); pkg_args.insert (pkg_args.end (), cas.begin (), cas.end ()); pkg_args.push_back ("}+"); pkg_args.push_back (pkg_rev); } // Add the runtime test packages. // if (has_runtime_tests) { pkg_args.push_back ("{"); pkg_args.push_back ("--config-uuid"); pkg_args.push_back (conf_uuid); pkg_args.push_back ("--checkout-root"); pkg_args.push_back (dist_root.string ()); if (bootstrap_import) pkg_args.push_back (*bootstrap_import); pkg_args.insert (pkg_args.end (), eas.begin (), eas.end ()); pkg_args.insert (pkg_args.end (), cas.begin (), cas.end ()); pkg_args.push_back ("}+"); // Add test dependency package constraints and group them if there // are multiple of them. // if (runtime_tests.size () != 1) pkg_args.push_back ("{"); for (auto t: runtime_tests) pkg_args.push_back (t.string ()); if (runtime_tests.size () != 1) pkg_args.push_back ("}"); } } // Add the main package configured in the install configuration and // the external build-time test packages // { step_id s (step_id::bpkg_target_configure_build); step_id f1 (step_id::b_configure); step_id f2 (step_id::bpkg_configure_build); cstrings eas (step_args (env_args, s, f1, f2)); cstrings cas (step_args (config_args, s, f1, f2)); // Add the main package. // if (create_install) { common_args.push_back ("-d"); common_args.push_back (install_conf.string ()); pkg_args.push_back ("{"); pkg_args.push_back ("--config-uuid"); pkg_args.push_back (install_uuid); // Note that we do another re-distribution (with a separate // --checkout-root) in case the package is missing file that // are only used during installation. // pkg_args.push_back ("--checkout-root"); pkg_args.push_back (dist_install_root.string ()); pkg_args.insert (pkg_args.end (), eas.begin (), eas.end ()); pkg_args.insert (pkg_args.end (), cas.begin (), cas.end ()); pkg_args.push_back ("}+"); pkg_args.push_back (pkg_rev); } // Add the build-time test packages. // if (has_buildtime_tests) { pkg_args.push_back ("{"); pkg_args.push_back ("--config-uuid"); pkg_args.push_back (target_uuid); pkg_args.push_back ("--checkout-root"); pkg_args.push_back (dist_root.string ()); if (bootstrap_import) pkg_args.push_back (*bootstrap_import); pkg_args.insert (pkg_args.end (), eas.begin (), eas.end ()); pkg_args.insert (pkg_args.end (), cas.begin (), cas.end ()); pkg_args.push_back ("}+"); // Add test dependency package constraints and group them if there // are multiple of them. // if (buildtime_tests.size () != 1) pkg_args.push_back ("{"); // Strip the build-time mark. // for (auto t: buildtime_tests) pkg_args.push_back (t.dependency::string ()); if (buildtime_tests.size () != 1) pkg_args.push_back ("}"); } } } // Finally, configure all the packages. // { step_id b (step_id::bpkg_configure_build); step_id s (step_id::bpkg_global_configure_build); optional dependency_checksum; r.status |= run_bpkg ( b, trace, r.log, &dependency_checksum, wre, bkp_step, bkp_status, last_cmd, "-v", "build", "--configure-only", "--rebuild-checksum", tm.dependency_checksum ? *tm.dependency_checksum : "", "--yes", "-d", root_conf, step_args (env_args, s), step_args (config_args, s), common_args, "--", pkg_args); // The dependency checksum is tricky, here are the possibilities: // // - absent: bpkg terminated abnormally (or was not executed due to // a breakpoint) -- nothing to do here. // // - empty: bpkg terminated normally with error before calculating the // checksum -- nothing to do here either. // // - one line: bpkg checksum that we want. // // - many lines: someone else (e.g., buildfile) printed to stdout, // which we consider an error. // if (dependency_checksum && !dependency_checksum->empty ()) { string& s (*dependency_checksum); // Make sure that the output contains a single line, and bail out // with the error status if that's not the case. // if (s.find ('\n') == s.size () - 1) { s.pop_back (); // If the dependency checksum didn't change, then save it to the // result manifest, clean the logs and bail out with the skip // result status. // if (tm.dependency_checksum && *tm.dependency_checksum == s) { l3 ([&]{trace << "skip";}); rm.status = result_status::skip; rm.dependency_checksum = move (s); rm.results.clear (); break; } // Save the (new) dependency checksum to the result manifest. // // Also note that we save the checksum if bpkg failed after the // checksum was printed. As a result, we won't be rebuilding the // package until the error is fixed (in a package or worker) and // the checksum changes, which feels like a proper behavior. // rm.dependency_checksum = move (s); } else fail_operation (r, "unexpected bpkg output:\n'" + s + "'", result_status::error); } if (!r.status) break; } // Redistribute the main package in both build and install // configurations, if required (test packages will be handled later). // if (exists (dist_src)) { change_wd (trace, &r.log, main_pkg_conf); step_id b (step_id::bpkg_configure_build); if (!redist (b, r, dist_root, pkg_dir)) break; } if (exists (dist_install_src)) { change_wd (trace, &r.log, rwd / install_conf); step_id b (step_id::bpkg_configure_build); if (!redist (b, r, dist_install_root, pkg_dir)) break; } rm.status |= r.status; } #ifdef _WIN32 // Give Windows a chance to (presumably) scan any files we may have just // unpacked. Failed that, if we try to overwrite any such file (e.g., a // generated header) we may end up with a permission denied error. Note // also that this is in addition to the 2 seconds retry we have in our // fdopen() implementation, which is not always enough. // Sleep (5000); #endif // Update the main package. // { operation_result& r (add_result ("update")); change_wd (trace, &r.log, rwd / main_pkg_conf); // bpkg update // step_id b (step_id::bpkg_update); step_id s (step_id::bpkg_update); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "update", step_args (env_args, s), step_args (config_args, s), pkg); if (!r.status) break; rm.status |= r.status; } // Re-distribute if comes from a version control-based repository, update, // and test external test packages in the bpkg configuration in the // current working directory. Optionally pass the config.import.* variable // override and/or set the environment variables for the bpkg processes. // Return true if all operations for all packages succeeded. // // Pass true as the installed argument to use the test separate installed // phase step ids (bpkg.test-separate-installed.*) and the test separate // phase step ids (bpkg.test-separate.*) otherwise. In both cases fall // back to the main phase step ids (bpkg.*) when no environment/ // configuration arguments are specified for them. // auto test = [&trace, &wre, &bkp_step, &bkp_status, &last_cmd, &step_args, &config_args, &env_args, &bootstrap_import, &redist] (operation_result& r, const small_vector& tests, const dir_path& dist_root, bool installed, const small_vector& envvars = {}) { const optional& import (!installed ? bootstrap_import : nullopt); for (const test_dependency& td: tests) { const string& pkg (td.name.string ()); // Re-distribute. // if (exists (dist_root)) { // Note that re-distributing the test package is a bit tricky since // we don't know its version and so cannot deduce its source // directory name easily. We could potentially run the bpkg-status // command after the package is configured and parse the output to // obtain the version. Let's, however, keep it simple and find the // source directory using the package directory name pattern. // try { dir_path pkg_dir; path_search (dir_path (pkg + "-*/"), [&pkg_dir] (path&& pe, const string&, bool interm) { if (!interm) pkg_dir = path_cast (move (pe)); return interm; }, dist_root); if (!pkg_dir.empty ()) { step_id b ( installed ? step_id::bpkg_test_separate_installed_configure_build : step_id::bpkg_configure_build); if (!redist (b, r, dist_root, pkg_dir, import, envvars)) return false; } } catch (const system_error& e) { fail << "unable to scan directory " << dist_root << ": " << e; } } // Update. // // bpkg update // { step_id b (installed ? step_id::bpkg_test_separate_installed_update : step_id::bpkg_test_separate_update); step_id s (b); step_id f (step_id::bpkg_update); r.status |= run_bpkg ( b, envvars, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "update", step_args (env_args, s, f), step_args (config_args, s, f), import, pkg); if (!r.status) return false; } // Test. // // Note that we assume that the package supports the test operation // since this is its main purpose. // // bpkg test // { step_id b (installed ? step_id::bpkg_test_separate_installed_test : step_id::bpkg_test_separate_test); step_id s (b); step_id f (step_id::bpkg_test); r.status |= run_bpkg ( b, envvars, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "test", "--package-cwd", // See above for details. step_args (env_args, s, f), step_args (config_args, s, f), import, pkg); if (!r.status) return false; } } return true; }; // Test the main package. // b_project_info prj (prj_info (pkg_dir, true /* ext_mods */, "project")); // Run the internal tests if the test operation is supported by the // project but only for the target package or if the configuration is // self-hosted. // bool has_internal_tests ((target_pkg || selfhost) && find (prj.operations.begin (), prj.operations.end (), "test") != prj.operations.end ()); if (has_internal_tests || has_runtime_tests || has_buildtime_tests) { operation_result& r (add_result ("test")); // Run internal tests. // if (has_internal_tests) { // Use --package-cwd to help ported to build2 third-party packages a // bit (see bpkg-pkg-test(1) for details). // // Note that internal tests that load the module itself don't make // much sense, thus we don't pass the config.import.* variable on // the command line for modules that require bootstrap. // // bpkg test // step_id b (step_id::bpkg_test); step_id s (step_id::bpkg_test); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "test", "--package-cwd", step_args (env_args, s), step_args (config_args, s), pkg); if (!r.status) break; } // External runtime tests. // // Note that we assume that these packages belong to the dependent // package's repository or its complement repositories, recursively. // Thus, we test them in the configuration used to build the dependent // package. // if (has_runtime_tests) { if (!test (r, runtime_tests, dist_root, false /* installed */)) break; } // External build-time tests. // if (has_buildtime_tests) { change_wd (trace, &r.log, rwd / target_conf); if (!test (r, buildtime_tests, dist_root, false /* installed */)) break; } rm.status |= r.status; } // Install the package, optionally test the installation and uninstall // afterwards. // // These operations are triggered by presence of config.install.root // configuration variable having a non-empty value for // bpkg.configure.create step. // if (install_root.empty ()) break; // If this is not a self-hosted configuration, then skip installing host // and module packages. // if (!target_pkg && !selfhost) break; // Now the overall plan is as follows: // // 1. Install the package. // // 2. If the package has subprojects that support the test operation, then // configure, build, and test them out of the source tree against the // installed package using the build system directly. // // 3. If any of the test packages are specified, then configure, build, // and test them in a separate bpkg configuration(s) against the // installed package. // // 4. Uninstall the package. install_conf = rwd / (create_install ? install_conf : main_pkg_conf); // Install. // { operation_result& r (add_result ("install")); change_wd (trace, &r.log, install_conf); // Note that for a host or module package we don't need the target // configuration anymore, if present. So let's free up the space a // little bit. // if (!target_pkg && create_target) rm_r (trace, &r.log, rwd / target_conf); // bpkg install // step_id b (step_id::bpkg_install); step_id s (step_id::bpkg_install); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "install", step_args (env_args, s), step_args (config_args, s), pkg); if (!r.status) break; rm.status |= r.status; } // Run the internal tests if the project contains "testable" subprojects, // but not for a module. // has_internal_tests = false; dir_paths subprj_dirs; // "Testable" package subprojects. if (!module_pkg) { // Collect the "testable" subprojects. // for (const b_project_info::subproject& sp: prj.subprojects) { // Retrieve the subproject information similar to how we've done it // for the package. // b_project_info si (prj_info (pkg_dir / sp.path, true /* ext_mods */, "subproject")); const strings& ops (si.operations); if (find (ops.begin (), ops.end (), "test") != ops.end ()) subprj_dirs.push_back (sp.path); } has_internal_tests = !subprj_dirs.empty (); } if (has_internal_tests || has_runtime_tests || has_buildtime_tests) { operation_result& r (add_result ("test-installed")); change_wd (trace, &r.log, rwd); // Make sure that the installed package executables are properly // imported when configuring and running tests, unless we are testing // the build system module (that supposedly doesn't install any // executables). // small_vector envvars; if (!module_pkg) { // Note that we add the $config.install.root/bin directory at the // beginning of the PATH environment variable value, so the installed // executables are found first. // string paths ("PATH=" + (install_root / "bin").string ()); if (optional s = getenv ("PATH")) { paths += path::traits_type::path_separator; paths += *s; } envvars.push_back (move (paths)); } // Run internal tests. // if (has_internal_tests) { // Create the configuration. // // b create(, ) // // Amalgamation directory that will contain configuration subdirectory // for package tests out of source tree build. // dir_path out_dir ("build-installed"); { step_id b (step_id::b_test_installed_create); step_id s (step_id::b_test_installed_create); step_id f (step_id::b_create); string mods; // build2 create meta-operation parameters. for (const char* m: step_args (modules, s, f)) { mods += mods.empty () ? ", " : " "; mods += m; } r.status |= run_b ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create('" + out_dir.representation () + "'" + mods + ")", step_args (env_args, s, f), step_args (config_args, s, f)); if (!r.status) break; } // Configure testable subprojects sequentially and test/build in // parallel afterwards. // strings test_specs; for (const dir_path& d: subprj_dirs) { // b configure(@) // // step_id b (step_id::b_test_installed_configure); step_id s (step_id::b_test_installed_configure); step_id f (step_id::b_configure); dir_path subprj_src_dir (exists (dist_src) ? dist_src / d : main_pkg_conf / pkg_dir / d); dir_path subprj_out_dir (out_dir / d); r.status |= run_b ( b, envvars, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "configure('" + subprj_src_dir.representation () + "'@'" + subprj_out_dir.representation () + "')", step_args (env_args, s, f), step_args (config_args, s, f)); if (!r.status) break; test_specs.push_back ( "test('" + subprj_out_dir.representation () + "')"); } if (!r.status) break; // Build/test subprojects. // // b test()... // { step_id b (step_id::b_test_installed_test); step_id s (step_id::b_test_installed_test); r.status |= run_b ( b, envvars, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", test_specs, step_args (env_args, s), step_args (config_args, s)); if (!r.status) break; } } // Run runtime and build-time tests. // // Note that we only build runtime tests for target packages and for // host packages in self-hosted configurations. // if (has_runtime_tests || has_buildtime_tests) { // Create the required build configurations. // dir_path target_conf ("build-installed-bpkg"); dir_path host_conf ("build-installed-bpkg-host"); dir_path module_conf ("build-installed-bpkg-module"); // Create the target configuration if this is a target package having // external runtime tests or a host/module package having external // build-time tests. // bool create_target (target_pkg || has_buildtime_tests); // Note that even if there are no runtime tests for a host/module // package, we still need to create the host/build2 configuration to // configure the system package in. // bool create_host (host_pkg || module_pkg); bool create_module (module_pkg || (host_pkg && has_buildtime_tests)); // Note: a module package cannot have runtime tests and so the module // configuration is only created to serve build-time tests. Thus, the // host or target configuration is always created as well and the // module configuration is never a root configuration. // assert (create_target || create_host); // Root configuration through which we will be configuring the // cluster. // const dir_path& root_conf (create_target ? target_conf : host_conf); // Runtime tests configuration. Should only be used if there are any. // const dir_path& runtime_tests_conf (target_pkg ? target_conf : host_conf); // Create the target configuration. // // bpkg create // if (create_target) { step_id b (step_id::bpkg_test_separate_installed_create); // Note that here and below the _for_* step ids are determined by // the main package type (and, yes, that means we will use the same // step ids for target and host configuration -- that, however, // should be ok since host configuration will only be created in // the self-hosted case). // step_id s ( target_pkg ? step_id::bpkg_test_separate_installed_create_for_target : host_pkg ? step_id::bpkg_test_separate_installed_create_for_host : step_id::bpkg_test_separate_installed_create_for_module); // Note: no fallback for modules. // optional f (!module_pkg ? step_id::bpkg_test_separate_installed_create : optional ()); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create", "-d", target_conf, step_args (modules, s, f), step_args (env_args, s, f), step_args (config_args, s, f)); if (!r.status) break; } // Create the host configuration. // if (create_host) { // bpkg create --type host // step_id b (step_id::bpkg_test_separate_installed_create); step_id s ( host_pkg ? step_id::bpkg_test_separate_installed_create_for_host : step_id::bpkg_test_separate_installed_create_for_module); // Note: no fallback for modules. // optional f (!module_pkg ? step_id::bpkg_test_separate_installed_create : optional ()); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create", "-d", host_conf, "--type", "host", "--name", "host", step_args (modules, s, f), step_args (env_args, s, f), step_args (config_args, s, f)); if (!r.status) break; } // Create the module configuration. // // Note that we never build any tests in it but only configure the // system package. Note, however, that the host/module package // build-time tests can potentially build some other modules here. // if (create_module) { // b create() config.config.load=~build2 // step_id b (step_id::bpkg_test_separate_installed_create); r.status |= run_b ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-V", "create(" + module_conf.representation () + ",cc)", "config.config.load=~build2", "config.config.persist+='config.*'@unused=drop"); if (!r.status) break; // bpkg create --existing --type build2 // r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "create", "--existing", "-d", module_conf, "--type", "build2", "--name", "module"); if (!r.status) break; } // Link the configurations. // // bpkg link -d // { step_id b (step_id::bpkg_test_separate_installed_link); if (create_target) { if (create_host) { r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "link", "-d", target_conf, host_conf); if (!r.status) break; } if (create_module) { r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "link", "-d", target_conf, module_conf); if (!r.status) break; } } if (create_host) { if (create_module) { r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "link", "-d", host_conf, module_conf); if (!r.status) break; } } } // Add and fetch the repositories. // if (has_runtime_tests) { // bpkg add // { step_id b (step_id::bpkg_test_separate_installed_configure_add); step_id s (step_id::bpkg_test_separate_installed_configure_add); step_id f (step_id::bpkg_configure_add); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "add", "-d", runtime_tests_conf, step_args (env_args, s, f), step_args (config_args, s, f), repo); if (!r.status) break; } // bpkg fetch // { step_id b (step_id::bpkg_test_separate_installed_configure_fetch); step_id s (step_id::bpkg_test_separate_installed_configure_fetch); step_id f (step_id::bpkg_configure_fetch); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "fetch", "-d", runtime_tests_conf, step_args (env_args, s, f), step_args (config_args, s, f), trust_ops); if (!r.status) break; } } if (has_buildtime_tests) { // bpkg add // { step_id b (step_id::bpkg_test_separate_installed_configure_add); step_id s (step_id::bpkg_test_separate_installed_configure_add); step_id f (step_id::bpkg_configure_add); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "add", "-d", target_conf, step_args (env_args, s, f), step_args (config_args, s, f), repo); if (!r.status) break; } // bpkg fetch // { step_id b (step_id::bpkg_test_separate_installed_configure_fetch); step_id s (step_id::bpkg_test_separate_installed_configure_fetch); step_id f (step_id::bpkg_configure_fetch); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "fetch", "-d", target_conf, step_args (env_args, s, f), step_args (config_args, s, f), trust_ops); if (!r.status) break; } } // Configure all the packages using a single bpkg-pkg-build command. // // bpkg build --configure-only // { }+ { ... } // ... // ?sys: // strings pkg_args; if (has_runtime_tests) { // Note that only host package runtime tests can (but not // necessarily) be configured in a linked configuration and require // --config-name to be specified for them. // assert (!module_pkg); string conf_name (runtime_tests_conf == root_conf ? "" : "host"); bool og (!conf_name.empty ()); if (og) { pkg_args.push_back ("{"); pkg_args.push_back ("--config-name"); pkg_args.push_back (conf_name); pkg_args.push_back ("}+"); } if (og && runtime_tests.size () != 1) pkg_args.push_back ("{"); for (auto t: runtime_tests) pkg_args.push_back (t.string ()); if (og && runtime_tests.size () != 1) pkg_args.push_back ("}"); } if (has_buildtime_tests) { // Strip the build-time mark. // for (auto t: buildtime_tests) pkg_args.push_back (t.dependency::string ()); } pkg_args.push_back ("?sys:" + pkg); // Finally, configure all the test packages. // { step_id b (step_id::bpkg_test_separate_installed_configure_build); step_id g (step_id::bpkg_global_configure_build); // Global. step_id s ( target_pkg ? step_id::bpkg_test_separate_installed_create_for_target : host_pkg ? step_id::bpkg_test_separate_installed_create_for_host : step_id::bpkg_test_separate_installed_create_for_module); step_id f (step_id::bpkg_test_separate_installed_configure_build); r.status |= run_bpkg ( b, envvars, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "build", "--configure-only", "--checkout-root", dist_installed_root, "--yes", "-d", root_conf, step_args (env_args, g), step_args (env_args, s, f), step_args (config_args, g), step_args (config_args, s, f), "--", pkg_args); if (!r.status) break; } #ifdef _WIN32 Sleep (5000); // See above. #endif // Run external runtime tests. // if (has_runtime_tests) { const dir_path& runtime_tests_conf (target_pkg ? target_conf : host_conf); change_wd (trace, &r.log, runtime_tests_conf); if (!test (r, runtime_tests, dist_installed_root, true /* installed */, envvars)) break; } // Run external build-time tests. // if (has_buildtime_tests) { change_wd (trace, &r.log, rwd / target_conf); if (!test (r, buildtime_tests, dist_installed_root, true /* installed */, envvars)) break; } rm.status |= r.status; } } // Uninstall. // { operation_result& r (add_result ("uninstall")); change_wd (trace, &r.log, install_conf); // bpkg uninstall // step_id b (step_id::bpkg_uninstall); step_id s (step_id::bpkg_uninstall); r.status |= run_bpkg ( b, trace, r.log, wre, bkp_step, bkp_status, last_cmd, "-v", "uninstall", step_args (env_args, s), step_args (config_args, s), pkg); if (!r.status) break; rm.status |= r.status; } break; } if (!rm.results.empty ()) { operation_result& r (rm.results.back ()); rm.status |= r.status; // Merge last in case of a break. // Unless there is an error (or worse) encountered, log the special 'end' // step and, if this step is specified in the interactive manifest value, // ask the user if to continue the task execution. // bool error (!rm.status); if (!error) { r.status |= run_cmd (step_id::end, trace, r.log, nullptr /* out */, regexes (), "" /* name */, bkp_step, bkp_status, last_cmd, process_env ()); rm.status |= r.status; } // Truncate logs if they would exceed the upload limit. // // @@ TMP: currently this limit is hard-coded. In the future it should be // sent along with the task manifest. // const size_t upload_limit (10 * 1024 * 1024); { // Reserve 10K for other manifest values (alternatively, we could do it // exactly in upload_manifest()). // const size_t manifest_size (10 * 1024); size_t n (manifest_size); for (const operation_result& r: rm.results) n += r.log.size (); if (n > upload_limit) { // Divide the size equally among all the operations and truncate any // that exceed their allowance. This way we will get some information // for each operation. // n = (upload_limit - manifest_size) / rm.results.size (); for (operation_result& r: rm.results) { if (r.log.size () <= n) continue; // We need to be careful not to truncate it in the middle of UTF-8 // sequence. So we look for the last newline that still fits. // size_t p (n - 80 /* for the "log truncated" note */); for (; p != 0 && r.log[p] != '\n'; --p) ; r.log.resize (p != 0 ? p + 1 : 0); // Keep the newline. r.log += "-------------------------------LOG TRUNCATED-------------------------------\n"; } } } } else assert (rm.status == result_status::abort || rm.status == result_status::skip); if (!rwd.empty ()) change_wd (trace, nullptr /* log */, rwd); // Upload the result. // const string url ("tftp://" + ops.tftp_host () + "/result.manifest.lz4"); try { upload_manifest (trace, url, rm, "result"); // We use exit code 2 to signal abnormal termination but where we managed // to upload the result manifest. See startup() for details. // return rm.status != result_status::abnormal ? 0 : 2; } catch (const io_error& e) { error << "unable to upload result manifest to " << url << ": " << e; } // We use exit code 3 to signal an unsuccessful attempt to upload the result // manifest. See startup() for details. // return 3; } static int startup () { tracer trace ("startup"); // Our overall plan is as follows: // // 1. Download the task manifest into the build directory (CWD). // // 2. Parse it and get the target. // // 3. Find the environment setup executable for this target. // // 4. Execute the environment setup executable. // // 5. If the environment setup executable fails, then upload the (failed) // result ourselves. // const string url ("tftp://" + ops.tftp_host () + "/task.manifest"); const path mf ("task.manifest"); // If we fail, try to upload the result manifest (abnormal termination). The // idea is that the machine gets suspended and we can investigate what's // going on by logging in and examining the diagnostics (e.g., via // journalctl, etc). // task_manifest tm; try { // Download the task. // // We are downloading from our host so there shouldn't normally be any // connectivity issues. Unless, of course, we are on Windows where all // kinds of flakiness is business as usual. Note that having a long enough // timeout is not enough: if we try to connect before the network is up, // we will keep waiting forever, even after it is up. So we have to // timeout and try again. This is also pretty bad (unlike, say during // bootstrap which doesn't happen very often) since we are wasting the // machine time. So we are going to log it as a warning and not merely a // trace since if this is a common occurrence, then something has to be // done about it. // for (size_t retry (1);; ++retry) { try { tftp_curl c (trace, nullfd, mf, curl::get, url, "--tftp-blksize", tftp_blksize, "--max-time", tftp_get_timeout); if (!c.wait ()) throw_generic_error (EIO); break; } catch (const system_error& e) { bool bail (retry > tftp_get_retries); diag_record dr (bail ? error : warn); dr << "unable to download task manifest from " << url << " on " << retry << " try: " << e; if (bail) throw failed (); } } // Parse it. // tm = parse_manifest (mf, "task"); // Find the environment setup executable. // // While the executable path contains a directory (so the PATH search does // not apply) we still use process::path_search() to automatically handle // appending platform-specific executable extensions (.exe/.bat, etc). // process_path pp; if (tm.environment) { try { pp = process::try_path_search (env_dir / *tm.environment, false /* init */); } catch (const invalid_path& e) { fail << "invalid environment name '" << e.path << "': " << e; } if (pp.empty ()) fail << "no environment setup executable in " << env_dir << " " << "for environment name '" << *tm.environment << "'"; } else { pp = process::try_path_search (env_dir / "default", false /* init */); if (pp.empty ()) fail << "no default environment setup executable in " << env_dir; } // Run it. // strings os; string tg (tm.target.string ()); // Use the name=value notation for options to minimize the number of // arguments passed to the environment setup executable. Note that the // etc/environments/default-*.bat scripts can only handle the limited // number of arguments. // if (ops.systemd_daemon ()) os.push_back ("--systemd-daemon"); if (ops.verbose_specified ()) os.push_back ("--verbose=" + to_string (ops.verbose ())); if (ops.tftp_host_specified ()) os.push_back ("--tftp-host=" + ops.tftp_host ()); os.push_back (string ("--env-script=") + pp.effect_string ()); os.push_back ("--env-target=" + tg); // Note that we use the effective (absolute) path instead of recall since // we may have changed the CWD. // // Also note that the worker can ask the user if to continue the task // execution when the interactive build breakpoint is reached. Thus, we // don't redirect stdin to /dev/null. // // Exit code 2 signals abnormal termination but where the worker uploaded // the result itself. // // Exit code 3 signals an unsuccessful attempt by the worker to upload the // result manifest. There is no reason to retry (most likely there is // nobody listening on the other end anymore). // switch (run_io_exit (trace, 0, 2, 2, pp, tg, argv0.effect_string (), os)) { case 3: case 2: return 1; case 0: return 0; default: fail << "process " << pp << " exited with non-zero code" << endf; } } catch (const failed&) { const string url ("tftp://" + ops.tftp_host () + "/result.manifest.lz4"); // If we failed before being able to parse the task manifest, use the // "unknown" values for the package name and version. // result_manifest rm { tm.name.empty () ? bpkg::package_name ("unknown") : tm.name, tm.version.empty () ? bpkg::version ("0") : tm.version, result_status::abnormal, operation_results {}, worker_checksum, nullopt /* dependency_checksum */ }; try { upload_manifest (trace, url, rm, "result"); } catch (const io_error& e) { fail << "unable to upload result manifest to " << url << ": " << e; } return 1; } } static int bootstrap () { bootstrap_manifest bm { bootstrap_manifest::versions_type { {"bbot", standard_version (BBOT_VERSION_STR)}, {"libbbot", standard_version (LIBBBOT_VERSION_STR)}, {"libbpkg", standard_version (LIBBPKG_VERSION_STR)}, {"libbutl", standard_version (LIBBUTL_VERSION_STR)} } }; serialize_manifest (bm, cout, "stdout", "bootstrap"); return 0; } int bbot:: main (int argc, char* argv[]) try { tracer trace ("main"); // This is a little hack to make our baseutils for Windows work when called // with absolute path. In a nutshell, MSYS2's exec*p() doesn't search in the // parent's executable directory, only in PATH. And since we are running // without a shell (that would read /etc/profile which sets PATH to some // sensible values), we are only getting Win32 PATH values. And MSYS2 /bin // is not one of them. So what we are going to do is add /bin at the end of // PATH (which will be passed as is by the MSYS2 machinery). This will make // MSYS2 search in /bin (where our baseutils live). And for everyone else // this should be harmless since it is not a valid Win32 path. // #ifdef _WIN32 { string mp; if (optional p = getenv ("PATH")) { mp = move (*p); mp += ';'; } mp += "/bin"; setenv ("PATH", mp); } #endif // On POSIX ignore SIGPIPE which is signaled to a pipe-writing process if // the pipe reading end is closed. Note that by default this signal // terminates a process. Also note that there is no way to disable this // behavior on a file descriptor basis or for the write() function call. // // On Windows disable displaying error reporting dialog box. Note that the // error mode is inherited by child processes. // #ifndef _WIN32 if (signal (SIGPIPE, SIG_IGN) == SIG_ERR) fail << "unable to ignore broken pipe (SIGPIPE) signal: " << system_error (errno, std::generic_category ()); // Sanitize. #else SetErrorMode (SetErrorMode (0) | // Returns the current mode. SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); #endif cli::argv_scanner scan (argc, argv, true); ops.parse (scan); verb = ops.verbose (); // @@ systemd 231 added JOURNAL_STREAM environment variable which allows // detecting if stderr is connected to the journal. // if (ops.systemd_daemon ()) systemd_diagnostics (false); // Version. // if (ops.version ()) { cout << "bbot-worker " << BBOT_VERSION_ID << endl << "libbbot " << LIBBBOT_VERSION_ID << endl << "libbpkg " << LIBBPKG_VERSION_ID << endl << "libbutl " << LIBBUTL_VERSION_ID << endl << "Copyright (c) " << BBOT_COPYRIGHT << "." << endl << "This is free software released under the MIT license." << endl; return 0; } // Help. // if (ops.help ()) { pager p ("bbot-worker help", false); print_bbot_worker_usage (p.stream ()); // If the pager failed, assume it has issued some diagnostics. // return p.wait () ? 0 : 1; } // Figure out our mode. // if (ops.bootstrap () && ops.startup ()) fail << "--bootstrap and --startup are mutually exclusive"; enum class mode {boot, start, build} m (mode::build); if (ops.bootstrap ()) m = mode::boot; if (ops.startup ()) m = mode::start; if (ops.systemd_daemon ()) { info << "bbot worker " << BBOT_VERSION_ID; } // Figure out our path (used for re-exec). // argv0 = process::path_search (argv[0], true); // Sort out the build directory. // if (ops.build_specified ()) change_wd (trace, nullptr /* log */, ops.build (), true /* create */); // Sort out the environment directory. // try { env_dir = ops.environments_specified () ? ops.environments () : dir_path::home_directory (); if (!dir_exists (env_dir)) throw_generic_error (ENOENT); } catch (const system_error& e) { fail << "invalid environment directory: " << e; } int r (1); switch (m) { case mode::boot: r = bootstrap (); break; case mode::start: r = startup (); break; case mode::build: r = build (static_cast (argc), const_cast (argv)); break; } return r; } catch (const failed&) { return 1; // Diagnostics has already been issued. } catch (const cli::exception& e) { error << e; return 1; } int main (int argc, char* argv[]) { return bbot::main (argc, argv); }