aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--NEWS6
-rw-r--r--bbot/agent/agent-options.cxx1045
-rw-r--r--bbot/agent/agent-options.hxx401
-rw-r--r--bbot/agent/agent-options.ixx465
-rw-r--r--bbot/agent/agent.cli41
-rw-r--r--bbot/agent/agent.cxx1797
-rw-r--r--bbot/agent/agent.hxx4
-rw-r--r--bbot/agent/machine.cxx86
-rw-r--r--bbot/agent/machine.hxx14
-rw-r--r--bbot/agent/tftp.hxx2
-rw-r--r--bbot/bbot-agent@.service8
-rw-r--r--bbot/buildfile2
-rw-r--r--bbot/common-options.cxx740
-rw-r--r--bbot/common-options.hxx450
-rw-r--r--bbot/common-options.ixx281
-rw-r--r--bbot/machine-manifest.cxx48
-rw-r--r--bbot/machine-manifest.hxx29
-rw-r--r--bbot/machine-manifest.test.testscript152
-rw-r--r--bbot/utility.hxx1
-rw-r--r--bbot/worker/worker-options.cxx693
-rw-r--r--bbot/worker/worker-options.hxx171
-rw-r--r--bbot/worker/worker-options.ixx123
-rw-r--r--bbot/worker/worker.cxx477
-rw-r--r--buildfile2
-rwxr-xr-xdoc/cli.sh2
-rw-r--r--doc/manual.cli322
-rw-r--r--manifest10
-rw-r--r--repositories.manifest6
-rw-r--r--tests/integration/testscript35
-rw-r--r--tests/machine/testscript2
30 files changed, 6709 insertions, 706 deletions
diff --git a/NEWS b/NEWS
index bf4df52..29154da 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,9 @@
+Version 0.17.0
+
+ * Support for auxiliary machines. See the bbot manual for details.
+
+ * Support for bbot.sys-install:config.install.root variable in worker.
+
Version 0.16.0
* New bpkg.bindist.*, bbot.sys-install.*, bbot.install.ldconfig steps.
diff --git a/bbot/agent/agent-options.cxx b/bbot/agent/agent-options.cxx
new file mode 100644
index 0000000..b497bb9
--- /dev/null
+++ b/bbot/agent/agent-options.cxx
@@ -0,0 +1,1045 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+#include <bbot/types-parsers.hxx>
+//
+// End prologue.
+
+#include <bbot/agent/agent-options.hxx>
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+#include <utility>
+#include <ostream>
+#include <sstream>
+#include <cstring>
+
+namespace bbot
+{
+ namespace cli
+ {
+ template <typename X>
+ struct parser
+ {
+ static void
+ parse (X& x, bool& xs, scanner& s)
+ {
+ using namespace std;
+
+ const char* o (s.next ());
+ if (s.more ())
+ {
+ string v (s.next ());
+ istringstream is (v);
+ if (!(is >> x && is.peek () == istringstream::traits_type::eof ()))
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <>
+ struct parser<bool>
+ {
+ static void
+ parse (bool& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <>
+ struct parser<std::string>
+ {
+ static void
+ parse (std::string& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ x = s.next ();
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename X>
+ struct parser<std::pair<X, std::size_t> >
+ {
+ static void
+ parse (std::pair<X, std::size_t>& x, bool& xs, scanner& s)
+ {
+ x.second = s.position ();
+ parser<X>::parse (x.first, xs, s);
+ }
+ };
+
+ template <typename X>
+ struct parser<std::vector<X> >
+ {
+ static void
+ parse (std::vector<X>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.push_back (x);
+ xs = true;
+ }
+ };
+
+ template <typename X, typename C>
+ struct parser<std::set<X, C> >
+ {
+ static void
+ parse (std::set<X, C>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.insert (x);
+ xs = true;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::map<K, V, C> >
+ {
+ static void
+ parse (std::map<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m[k] = v;
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename X, typename T, T X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, s);
+ }
+
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
+ template <typename X, typename T, T X::*M, bool X::*S>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, x.*S, s);
+ }
+ }
+}
+
+#include <map>
+
+namespace bbot
+{
+ // agent_options
+ //
+
+ agent_options::
+ agent_options ()
+ : help_ (),
+ version_ (),
+ verbose_ (1),
+ verbose_specified_ (false),
+ systemd_daemon_ (),
+ toolchain_name_ ("default"),
+ toolchain_name_specified_ (false),
+ toolchain_num_ (1),
+ toolchain_num_specified_ (false),
+ toolchain_lock_ (),
+ toolchain_lock_specified_ (false),
+ toolchain_ver_ (),
+ toolchain_ver_specified_ (false),
+ toolchain_id_ (),
+ toolchain_id_specified_ (false),
+ interactive_ (interactive_mode::false_),
+ interactive_specified_ (false),
+ instance_ (1),
+ instance_specified_ (false),
+ instance_max_ (0),
+ instance_max_specified_ (false),
+ cpu_ (1),
+ cpu_specified_ (false),
+ build_ram_ (4 * 1024 * 1024),
+ build_ram_specified_ (false),
+ auxiliary_ram_ (0),
+ auxiliary_ram_specified_ (false),
+ bridge_ ("br1"),
+ bridge_specified_ (false),
+ auth_key_ (),
+ auth_key_specified_ (false),
+ trust_ (),
+ trust_specified_ (false),
+ machines_ ("/build/machines/"),
+ machines_specified_ (false),
+ tftp_ ("/build/tftp/"),
+ tftp_specified_ (false),
+ tftp_port_ (23400),
+ tftp_port_specified_ (false),
+ bootstrap_startup_ (300),
+ bootstrap_startup_specified_ (false),
+ bootstrap_timeout_ (3600),
+ bootstrap_timeout_specified_ (false),
+ bootstrap_auxiliary_ (900),
+ bootstrap_auxiliary_specified_ (false),
+ bootstrap_retries_ (2),
+ bootstrap_retries_specified_ (false),
+ build_startup_ (240),
+ build_startup_specified_ (false),
+ build_timeout_ (5400),
+ build_timeout_specified_ (false),
+ build_retries_ (2),
+ build_retries_specified_ (false),
+ intactive_timeout_ (10800),
+ intactive_timeout_specified_ (false),
+ connect_timeout_ (60),
+ connect_timeout_specified_ (false),
+ request_timeout_ (300),
+ request_timeout_specified_ (false),
+ request_retries_ (4),
+ request_retries_specified_ (false),
+ openssl_ ("openssl"),
+ openssl_specified_ (false),
+ openssl_option_ (),
+ openssl_option_specified_ (false),
+ dump_machines_ (),
+ dump_task_ (),
+ dump_result_ (),
+ fake_bootstrap_ (),
+ fake_build_ (),
+ fake_machine_ (),
+ fake_machine_specified_ (false),
+ fake_request_ (),
+ fake_request_specified_ (false)
+ {
+ }
+
+ bool agent_options::
+ parse (int& argc,
+ char** argv,
+ bool erase,
+ ::bbot::cli::unknown_mode opt,
+ ::bbot::cli::unknown_mode arg)
+ {
+ ::bbot::cli::argv_scanner s (argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ bool agent_options::
+ parse (int start,
+ int& argc,
+ char** argv,
+ bool erase,
+ ::bbot::cli::unknown_mode opt,
+ ::bbot::cli::unknown_mode arg)
+ {
+ ::bbot::cli::argv_scanner s (start, argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ bool agent_options::
+ parse (int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::bbot::cli::unknown_mode opt,
+ ::bbot::cli::unknown_mode arg)
+ {
+ ::bbot::cli::argv_scanner s (argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ end = s.end ();
+ return r;
+ }
+
+ bool agent_options::
+ parse (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::bbot::cli::unknown_mode opt,
+ ::bbot::cli::unknown_mode arg)
+ {
+ ::bbot::cli::argv_scanner s (start, argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ end = s.end ();
+ return r;
+ }
+
+ bool agent_options::
+ parse (::bbot::cli::scanner& s,
+ ::bbot::cli::unknown_mode opt,
+ ::bbot::cli::unknown_mode arg)
+ {
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ ::bbot::cli::usage_para agent_options::
+ print_usage (::std::ostream& os, ::bbot::cli::usage_para p)
+ {
+ CLI_POTENTIALLY_UNUSED (os);
+
+ if (p != ::bbot::cli::usage_para::none)
+ os << ::std::endl;
+
+ os << "\033[1mOPTIONS\033[0m" << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--help\033[0m Print usage information and exit." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--version\033[0m Print version and exit." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--verbose\033[0m \033[4mlevel\033[0m Set the diagnostics verbosity to \033[4mlevel\033[0m between 0 and" << ::std::endl
+ << " 6 with level 1 being the default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--systemd-daemon\033[0m Run as a simple systemd daemon." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--toolchain-name\033[0m \033[4mstr\033[0m Toolchain name, \033[1mdefault\033[0m by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--toolchain-num\033[0m \033[4mnum\033[0m Toolchain number, 1 by default. If agents are running" << ::std::endl
+ << " for several toolchains, then each of them should have" << ::std::endl
+ << " a unique toolchain number between 1 and 9. This" << ::std::endl
+ << " number is used as an offset for network ports," << ::std::endl
+ << " interfaces, etc." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--toolchain-lock\033[0m \033[4mpath\033[0m Absolute path to the global toolchain lock file. If" << ::std::endl
+ << " unspecified, then" << ::std::endl
+ << " \033[1m/var/lock/bbot-agent-\033[0m\033[4mtoolchain-name\033[0m\033[1m.lock\033[0m\033[0m is used by" << ::std::endl
+ << " default. If empty path is specified then no global" << ::std::endl
+ << " locking is performed. If one of the \033[1m--fake-*\033[0m options" << ::std::endl
+ << " is specified, then no locking is performed by" << ::std::endl
+ << " default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--toolchain-ver\033[0m \033[4mstdver\033[0m Toolchain version. If unspecified, then the agent's" << ::std::endl
+ << " version will be used (which will be imprecise for" << ::std::endl
+ << " snapshot versions)." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--toolchain-id\033[0m \033[4mstr\033[0m Toolchain id. If unspecified or empty, then no" << ::std::endl
+ << " re-bootstrapping on toolchain changes will be" << ::std::endl
+ << " performed (which is primarily useful for testing)." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--interactive\033[0m \033[4mmode\033[0m Interactive build support. Valid values for this" << ::std::endl
+ << " option are \033[1mfalse\033[0m (only non-interactive), \033[1mtrue\033[0m (only" << ::std::endl
+ << " interactive), and \033[1mboth\033[0m. If this option is not" << ::std::endl
+ << " specified, then only non-interactive builds are" << ::std::endl
+ << " supported." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--instance\033[0m \033[4mnum\033[0m Instance number, 1 by default. If several instances" << ::std::endl
+ << " of an agent are running for the same toolchain, then" << ::std::endl
+ << " each of them should have a unique instance number" << ::std::endl
+ << " between 1 and 99. This number is used as an offset" << ::std::endl
+ << " for network ports, interfaces, etc." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--instance-max\033[0m \033[4mnum\033[0m Maximum number of instances that can perform tasks" << ::std::endl
+ << " concurrently. If the number of instances that have" << ::std::endl
+ << " been started is greater than this number (normally by" << ::std::endl
+ << " just one), then when the maximum number of tasks is" << ::std::endl
+ << " already being performed, the extra instances operate" << ::std::endl
+ << " in the \033[4mpriority monitor\033[0m mode: they only query" << ::std::endl
+ << " controller URLs with priorities higher than of the" << ::std::endl
+ << " existing tasks and can only perform a task by" << ::std::endl
+ << " interrupting one of them. If the maximum number of" << ::std::endl
+ << " instances is \033[1m0\033[0m (default), then it is assumed the" << ::std::endl
+ << " number of instances started is the maximum number," << ::std::endl
+ << " essentially disabling the priority monitor" << ::std::endl
+ << " functionality." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--cpu\033[0m \033[4mnum\033[0m Number of CPUs (threads) to use, 1 by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--build-ram\033[0m \033[4mnum\033[0m Amount of RAM (in KiB) to use for the build machine," << ::std::endl
+ << " 4GiB by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--auxiliary-ram\033[0m \033[4mnum\033[0m Amount of RAM (in KiB) to use for auxiliary machines." << ::std::endl
+ << " To disable running auxiliary machines, specify \033[1m0\033[0m. If" << ::std::endl
+ << " unspecified, then currently the behavior is the same" << ::std::endl
+ << " as specifying \033[1m0\033[0m but this may change in the future" << ::std::endl
+ << " (for example, to support a more dynamic allocation" << ::std::endl
+ << " strategy)." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--bridge\033[0m \033[4miface\033[0m Bridge interface to use for machine networking, \033[1mbr1\033[0m" << ::std::endl
+ << " by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--auth-key\033[0m \033[4mfile\033[0m Private key for the public key-based agent" << ::std::endl
+ << " authentication. If not specified, then the agent will" << ::std::endl
+ << " not be able to request tasks from controllers that" << ::std::endl
+ << " require authentication." << ::std::endl
+ << ::std::endl
+ << " The file is expected to contain a single PEM-encoded" << ::std::endl
+ << " private key without a password. A suitable key can be" << ::std::endl
+ << " generated using the following command:" << ::std::endl
+ << ::std::endl
+ << " $ openssl genrsa 4096 >key.pem" << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--trust\033[0m \033[4mfingerprint\033[0m Trust repository certificate with a SHA256" << ::std::endl
+ << " \033[4mfingerprint\033[0m." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--machines\033[0m \033[4mdir\033[0m The location of the build machines, \033[1m/build/machines/\033[0m" << ::std::endl
+ << " by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--tftp\033[0m \033[4mdir\033[0m The location of the TFTP server root, \033[1m/build/tftp/\033[0m by" << ::std::endl
+ << " default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--tftp-port\033[0m \033[4mnum\033[0m TFTP server port base, 23400 by default. The actual" << ::std::endl
+ << " port is calculated by adding an offset calculated" << ::std::endl
+ << " based on the toolchain, instance, and machine" << ::std::endl
+ << " numbers." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--bootstrap-startup\033[0m \033[4msec\033[0m Maximum number of seconds to wait for build machine" << ::std::endl
+ << " bootstrap startup, 300 (5 minutes) by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--bootstrap-timeout\033[0m \033[4msec\033[0m Maximum number of seconds to wait for build machine" << ::std::endl
+ << " bootstrap completion, 3600 (60 minutes) by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--bootstrap-auxiliary\033[0m \033[4msec\033[0m Maximum number of seconds to wait for auxiliary" << ::std::endl
+ << " machine bootstrap completion, 900 (15 minutes) by" << ::std::endl
+ << " default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--bootstrap-retries\033[0m \033[4mnum\033[0m Number of times to retry a mis-booted bootstrap, 2 (3" << ::std::endl
+ << " attempts total) by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--build-startup\033[0m \033[4msec\033[0m Maximum number of seconds to wait for build startup," << ::std::endl
+ << " 240 (4 minutes) by default. This value is used for" << ::std::endl
+ << " both build and auxiliary machines." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--build-timeout\033[0m \033[4msec\033[0m Maximum number of seconds to wait for build" << ::std::endl
+ << " completion, 5400 (90 minutes) by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--build-retries\033[0m \033[4mnum\033[0m Number of times to retry a mis-booted build, 2 (3" << ::std::endl
+ << " attempts total) by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--intactive-timeout\033[0m \033[4msec\033[0m Maximum number of seconds to wait for interactive" << ::std::endl
+ << " build completion, 10800 (3 hours) by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--connect-timeout\033[0m \033[4msec\033[0m Maximum number of seconds to wait for controller" << ::std::endl
+ << " request connection, 60 (1 minute) by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--request-timeout\033[0m \033[4msec\033[0m Maximum number of seconds to wait for controller" << ::std::endl
+ << " request completion, 300 (5 minutes) by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--request-retries\033[0m \033[4mnum\033[0m Number of times to retry a controller request, 4 (5" << ::std::endl
+ << " attempts total) by default. Note that both the total" << ::std::endl
+ << " time for all retries as well as the time of each" << ::std::endl
+ << " retry are limited by the same --request-timeout\033[0m" << ::std::endl
+ << " value. This means that a successful request may take" << ::std::endl
+ << " up to twice as long if a connection was established" << ::std::endl
+ << " at the end of the retry window and took just as long" << ::std::endl
+ << " to complete." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--openssl\033[0m \033[4mpath\033[0m The openssl program to be used for crypto operations." << ::std::endl
+ << " You can also specify additional options that should" << ::std::endl
+ << " be passed to the openssl program with" << ::std::endl
+ << " \033[1m--openssl-option\033[0m. If the openssl program is not" << ::std::endl
+ << " explicitly specified, then \033[1mbbot-agent\033[0m will use" << ::std::endl
+ << " \033[1mopenssl\033[0m by default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--openssl-option\033[0m \033[4mopt\033[0m Additional option to be passed to the openssl program" << ::std::endl
+ << " (see \033[1m--openssl\033[0m for details). Repeat this option to" << ::std::endl
+ << " specify multiple openssl options." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-machines\033[0m Dump the available machines to \033[1mstdout\033[0m," << ::std::endl
+ << " (re)-bootstrapping any if necessary, and exit." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-task\033[0m Dump the received build task to \033[1mstdout\033[0m and exit." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-result\033[0m Dump the obtained build result to \033[1mstdout\033[0m and exit." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--fake-bootstrap\033[0m Fake the machine bootstrap process by creating the" << ::std::endl
+ << " expected bootstrapped machine manifest." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--fake-build\033[0m Fake the package building process by creating the" << ::std::endl
+ << " aborted build result." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--fake-machine\033[0m \033[4mfile\033[0m Fake the machine enumeration process by reading the" << ::std::endl
+ << " machine header manifest from \033[4mfile\033[0m (or \033[1mstdin\033[0m if \033[4mfile\033[0m" << ::std::endl
+ << " is '\033[1m-\033[0m')." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--fake-request\033[0m \033[4mfile\033[0m Fake the task request process by reading the task" << ::std::endl
+ << " manifest from \033[4mfile\033[0m (or \033[1mstdin\033[0m if \033[4mfile\033[0m is '\033[1m-\033[0m')." << ::std::endl;
+
+ p = ::bbot::cli::usage_para::option;
+
+ return p;
+ }
+
+ typedef
+ std::map<std::string, void (*) (agent_options&, ::bbot::cli::scanner&)>
+ _cli_agent_options_map;
+
+ static _cli_agent_options_map _cli_agent_options_map_;
+
+ struct _cli_agent_options_map_init
+ {
+ _cli_agent_options_map_init ()
+ {
+ _cli_agent_options_map_["--help"] =
+ &::bbot::cli::thunk< agent_options, &agent_options::help_ >;
+ _cli_agent_options_map_["--version"] =
+ &::bbot::cli::thunk< agent_options, &agent_options::version_ >;
+ _cli_agent_options_map_["--verbose"] =
+ &::bbot::cli::thunk< agent_options, uint16_t, &agent_options::verbose_,
+ &agent_options::verbose_specified_ >;
+ _cli_agent_options_map_["--systemd-daemon"] =
+ &::bbot::cli::thunk< agent_options, &agent_options::systemd_daemon_ >;
+ _cli_agent_options_map_["--toolchain-name"] =
+ &::bbot::cli::thunk< agent_options, string, &agent_options::toolchain_name_,
+ &agent_options::toolchain_name_specified_ >;
+ _cli_agent_options_map_["--toolchain-num"] =
+ &::bbot::cli::thunk< agent_options, uint16_t, &agent_options::toolchain_num_,
+ &agent_options::toolchain_num_specified_ >;
+ _cli_agent_options_map_["--toolchain-lock"] =
+ &::bbot::cli::thunk< agent_options, string, &agent_options::toolchain_lock_,
+ &agent_options::toolchain_lock_specified_ >;
+ _cli_agent_options_map_["--toolchain-ver"] =
+ &::bbot::cli::thunk< agent_options, standard_version, &agent_options::toolchain_ver_,
+ &agent_options::toolchain_ver_specified_ >;
+ _cli_agent_options_map_["--toolchain-id"] =
+ &::bbot::cli::thunk< agent_options, string, &agent_options::toolchain_id_,
+ &agent_options::toolchain_id_specified_ >;
+ _cli_agent_options_map_["--interactive"] =
+ &::bbot::cli::thunk< agent_options, interactive_mode, &agent_options::interactive_,
+ &agent_options::interactive_specified_ >;
+ _cli_agent_options_map_["--instance"] =
+ &::bbot::cli::thunk< agent_options, uint16_t, &agent_options::instance_,
+ &agent_options::instance_specified_ >;
+ _cli_agent_options_map_["--instance-max"] =
+ &::bbot::cli::thunk< agent_options, uint16_t, &agent_options::instance_max_,
+ &agent_options::instance_max_specified_ >;
+ _cli_agent_options_map_["--cpu"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::cpu_,
+ &agent_options::cpu_specified_ >;
+ _cli_agent_options_map_["--build-ram"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::build_ram_,
+ &agent_options::build_ram_specified_ >;
+ _cli_agent_options_map_["--auxiliary-ram"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::auxiliary_ram_,
+ &agent_options::auxiliary_ram_specified_ >;
+ _cli_agent_options_map_["--bridge"] =
+ &::bbot::cli::thunk< agent_options, string, &agent_options::bridge_,
+ &agent_options::bridge_specified_ >;
+ _cli_agent_options_map_["--auth-key"] =
+ &::bbot::cli::thunk< agent_options, path, &agent_options::auth_key_,
+ &agent_options::auth_key_specified_ >;
+ _cli_agent_options_map_["--trust"] =
+ &::bbot::cli::thunk< agent_options, strings, &agent_options::trust_,
+ &agent_options::trust_specified_ >;
+ _cli_agent_options_map_["--machines"] =
+ &::bbot::cli::thunk< agent_options, dir_path, &agent_options::machines_,
+ &agent_options::machines_specified_ >;
+ _cli_agent_options_map_["--tftp"] =
+ &::bbot::cli::thunk< agent_options, dir_path, &agent_options::tftp_,
+ &agent_options::tftp_specified_ >;
+ _cli_agent_options_map_["--tftp-port"] =
+ &::bbot::cli::thunk< agent_options, uint16_t, &agent_options::tftp_port_,
+ &agent_options::tftp_port_specified_ >;
+ _cli_agent_options_map_["--bootstrap-startup"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::bootstrap_startup_,
+ &agent_options::bootstrap_startup_specified_ >;
+ _cli_agent_options_map_["--bootstrap-timeout"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::bootstrap_timeout_,
+ &agent_options::bootstrap_timeout_specified_ >;
+ _cli_agent_options_map_["--bootstrap-auxiliary"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::bootstrap_auxiliary_,
+ &agent_options::bootstrap_auxiliary_specified_ >;
+ _cli_agent_options_map_["--bootstrap-retries"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::bootstrap_retries_,
+ &agent_options::bootstrap_retries_specified_ >;
+ _cli_agent_options_map_["--build-startup"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::build_startup_,
+ &agent_options::build_startup_specified_ >;
+ _cli_agent_options_map_["--build-timeout"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::build_timeout_,
+ &agent_options::build_timeout_specified_ >;
+ _cli_agent_options_map_["--build-retries"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::build_retries_,
+ &agent_options::build_retries_specified_ >;
+ _cli_agent_options_map_["--intactive-timeout"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::intactive_timeout_,
+ &agent_options::intactive_timeout_specified_ >;
+ _cli_agent_options_map_["--connect-timeout"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::connect_timeout_,
+ &agent_options::connect_timeout_specified_ >;
+ _cli_agent_options_map_["--request-timeout"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::request_timeout_,
+ &agent_options::request_timeout_specified_ >;
+ _cli_agent_options_map_["--request-retries"] =
+ &::bbot::cli::thunk< agent_options, size_t, &agent_options::request_retries_,
+ &agent_options::request_retries_specified_ >;
+ _cli_agent_options_map_["--openssl"] =
+ &::bbot::cli::thunk< agent_options, path, &agent_options::openssl_,
+ &agent_options::openssl_specified_ >;
+ _cli_agent_options_map_["--openssl-option"] =
+ &::bbot::cli::thunk< agent_options, strings, &agent_options::openssl_option_,
+ &agent_options::openssl_option_specified_ >;
+ _cli_agent_options_map_["--dump-machines"] =
+ &::bbot::cli::thunk< agent_options, &agent_options::dump_machines_ >;
+ _cli_agent_options_map_["--dump-task"] =
+ &::bbot::cli::thunk< agent_options, &agent_options::dump_task_ >;
+ _cli_agent_options_map_["--dump-result"] =
+ &::bbot::cli::thunk< agent_options, &agent_options::dump_result_ >;
+ _cli_agent_options_map_["--fake-bootstrap"] =
+ &::bbot::cli::thunk< agent_options, &agent_options::fake_bootstrap_ >;
+ _cli_agent_options_map_["--fake-build"] =
+ &::bbot::cli::thunk< agent_options, &agent_options::fake_build_ >;
+ _cli_agent_options_map_["--fake-machine"] =
+ &::bbot::cli::thunk< agent_options, path, &agent_options::fake_machine_,
+ &agent_options::fake_machine_specified_ >;
+ _cli_agent_options_map_["--fake-request"] =
+ &::bbot::cli::thunk< agent_options, path, &agent_options::fake_request_,
+ &agent_options::fake_request_specified_ >;
+ }
+ };
+
+ static _cli_agent_options_map_init _cli_agent_options_map_init_;
+
+ bool agent_options::
+ _parse (const char* o, ::bbot::cli::scanner& s)
+ {
+ _cli_agent_options_map::const_iterator i (_cli_agent_options_map_.find (o));
+
+ if (i != _cli_agent_options_map_.end ())
+ {
+ (*(i->second)) (*this, s);
+ return true;
+ }
+
+ return false;
+ }
+
+ bool agent_options::
+ _parse (::bbot::cli::scanner& s,
+ ::bbot::cli::unknown_mode opt_mode,
+ ::bbot::cli::unknown_mode arg_mode)
+ {
+ // Can't skip combined flags (--no-combined-flags).
+ //
+ assert (opt_mode != ::bbot::cli::unknown_mode::skip);
+
+ bool r = false;
+ bool opt = true;
+
+ while (s.more ())
+ {
+ const char* o = s.peek ();
+
+ if (std::strcmp (o, "--") == 0)
+ {
+ opt = false;
+ s.skip ();
+ r = true;
+ continue;
+ }
+
+ if (opt)
+ {
+ if (_parse (o, s))
+ {
+ r = true;
+ continue;
+ }
+
+ if (std::strncmp (o, "-", 1) == 0 && o[1] != '\0')
+ {
+ // Handle combined option values.
+ //
+ std::string co;
+ if (const char* v = std::strchr (o, '='))
+ {
+ co.assign (o, 0, v - o);
+ ++v;
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (co.c_str ()),
+ const_cast<char*> (v)
+ };
+
+ ::bbot::cli::argv_scanner ns (0, ac, av);
+
+ if (_parse (co.c_str (), ns))
+ {
+ // Parsed the option but not its value?
+ //
+ if (ns.end () != 2)
+ throw ::bbot::cli::invalid_value (co, v);
+
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = co.c_str ();
+ }
+ }
+
+ // Handle combined flags.
+ //
+ char cf[3];
+ {
+ const char* p = o + 1;
+ for (; *p != '\0'; ++p)
+ {
+ if (!((*p >= 'a' && *p <= 'z') ||
+ (*p >= 'A' && *p <= 'Z') ||
+ (*p >= '0' && *p <= '9')))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ for (p = o + 1; *p != '\0'; ++p)
+ {
+ std::strcpy (cf, "-");
+ cf[1] = *p;
+ cf[2] = '\0';
+
+ int ac (1);
+ char* av[] =
+ {
+ cf
+ };
+
+ ::bbot::cli::argv_scanner ns (0, ac, av);
+
+ if (!_parse (cf, ns))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ // All handled.
+ //
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = cf;
+ }
+ }
+ }
+
+ switch (opt_mode)
+ {
+ case ::bbot::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::bbot::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::bbot::cli::unknown_mode::fail:
+ {
+ throw ::bbot::cli::unknown_option (o);
+ }
+ }
+
+ break;
+ }
+ }
+
+ switch (arg_mode)
+ {
+ case ::bbot::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::bbot::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::bbot::cli::unknown_mode::fail:
+ {
+ throw ::bbot::cli::unknown_argument (o);
+ }
+ }
+
+ break;
+ }
+
+ return r;
+ }
+}
+
+namespace bbot
+{
+ ::bbot::cli::usage_para
+ print_bbot_agent_usage (::std::ostream& os, ::bbot::cli::usage_para p)
+ {
+ CLI_POTENTIALLY_UNUSED (os);
+
+ if (p != ::bbot::cli::usage_para::none)
+ os << ::std::endl;
+
+ os << "\033[1mSYNOPSIS\033[0m" << ::std::endl
+ << ::std::endl
+ << "\033[1mbbot-agent --help\033[0m" << ::std::endl
+ << "\033[1mbbot-agent --version\033[0m" << ::std::endl
+ << "\033[1mbbot-agent\033[0m [\033[4moptions\033[0m] [\033[4mpriority\033[0m=]\033[4murl\033[0m...\033[0m" << ::std::endl
+ << ::std::endl
+ << "\033[1mDESCRIPTION\033[0m" << ::std::endl
+ << ::std::endl
+ << "\033[1mbbot-agent\033[0m @@ TODO." << ::std::endl
+ << ::std::endl
+ << "The controller URL \033[4mpriority\033[0m is a four or five-digit decimal value. If it is" << ::std::endl
+ << "absent, then \033[1m0\033[0m (lowest priority) is assumed. URLs with equal priority are" << ::std::endl
+ << "queried at random." << ::std::endl
+ << ::std::endl
+ << "The \033[4mpriority\033[0m value has the [\033[4mF\033[0m]\033[4mDCBA\033[0m\033[0m form which encodes four priority levels" << ::std::endl
+ << "(\033[4mDCBA\033[0m) each occupying one decimal digit (so there are 10 distinct priorities in" << ::std::endl
+ << "each level) plus the optional boost flag (\033[4mF\033[0m). These levels offer different" << ::std::endl
+ << "trade-offs between the speed of completing a higher priority task and" << ::std::endl
+ << "potentially discarding work that has already been done." << ::std::endl
+ << ::std::endl
+ << "The first priority level (\033[4mA\033[0m) is a simple preference: among the URLs with equal" << ::std::endl
+ << "values for other levels (\033[4mDCB\033[0m), those with higher first level priorities are" << ::std::endl
+ << "queried first." << ::std::endl
+ << ::std::endl
+ << "The second priority level (\033[4mB\033[0m) has the semantics of the first level plus it" << ::std::endl
+ << "prevents URLs with lower second priority level from being queried until the" << ::std::endl
+ << "task with a higher second priority level has completed, effectively conserving" << ::std::endl
+ << "the resources for the higher priority task." << ::std::endl
+ << ::std::endl
+ << "The third priority level (\033[4mC\033[0m) has the semantics of the second level plus it may" << ::std::endl
+ << "interrupt one lower third priority level task in order to perform the higher" << ::std::endl
+ << "third priority task (the interrupt is necessary if the desired machine is used" << ::std::endl
+ << "by the lower priority task or the number of tasks already being performed is" << ::std::endl
+ << "the maximum allowed to be performed concurrently; see \033[1m--instance-max\033[0m)." << ::std::endl
+ << ::std::endl
+ << "Finally, the fourth priority level (\033[4mD\033[0m) has the semantics of the third level" << ::std::endl
+ << "except that not one but all the lower fourth priority level tasks are" << ::std::endl
+ << "interrupting, effectively dedicating all the available resources to the higher" << ::std::endl
+ << "priority task. This level can also be combined with the boost flag \033[4mF\033[0m. If this" << ::std::endl
+ << "flag is \033[1m1\033[0m then the higher priority task's CPU number (\033[1m--cpu\033[0m) is boosted to the" << ::std::endl
+ << "full number of available hardware threads (or, to view it another way, the" << ::std::endl
+ << "fourth priority level has 20 possible values, not 10, with the first 0-9 being" << ::std::endl
+ << "without the boost while the last 10-19 being with the boost). Note that this" << ::std::endl
+ << "boosting semantics may not be accurate if the agent is executed with CPU" << ::std::endl
+ << "affinity. Also note that there is no corresponding RAM boosting and it's" << ::std::endl
+ << "possible that in some configurations the amount of RAM will be insufficient for" << ::std::endl
+ << "the boosted CPU count." << ::std::endl
+ << ::std::endl
+ << "Note that the priority levels are hierarchical in a sense that within a given" << ::std::endl
+ << "higher level URLs can be further prioritized using the lower levels. As an" << ::std::endl
+ << "example, consider a deployment with three controller URLs: background package" << ::std::endl
+ << "rebuilds (\033[1mpkg.example.org\033[0m), user-initiated CI (\033[1mci.example.org\033[0m), and" << ::std::endl
+ << "user-initiated interactive CI (\033[1mici.example.org\033[0m). Given the following" << ::std::endl
+ << "priorities:" << ::std::endl
+ << ::std::endl
+ << "0000=https://pkg.example.org" << ::std::endl
+ << "0100=https://ci.example.org" << ::std::endl
+ << "0101=https://ici.example.org" << ::std::endl
+ << ::std::endl
+ << "Both types of CI tasks will interrupt one background rebuild task if necessary" << ::std::endl
+ << "while the interactive CI tasks will be merely preferred over non-interactive." << ::std::endl
+ << ::std::endl
+ << "Note that on termination \033[1mbbot-agent\033[0m may leave behind a machine lock and working" << ::std::endl
+ << "machine snapshot. It is expected that the caller (normally Build OS monitor)" << ::std::endl
+ << "cleans them up before restarting the agent." << ::std::endl;
+
+ p = ::bbot::agent_options::print_usage (os, ::bbot::cli::usage_para::text);
+
+ if (p != ::bbot::cli::usage_para::none)
+ os << ::std::endl;
+
+ os << "\033[1mEXIT STATUS\033[0m" << ::std::endl
+ << ::std::endl
+ << "Non-zero exit status is returned in case of an error." << ::std::endl;
+
+ p = ::bbot::cli::usage_para::text;
+
+ return p;
+ }
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
diff --git a/bbot/agent/agent-options.hxx b/bbot/agent/agent-options.hxx
new file mode 100644
index 0000000..86eef43
--- /dev/null
+++ b/bbot/agent/agent-options.hxx
@@ -0,0 +1,401 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+#ifndef BBOT_AGENT_AGENT_OPTIONS_HXX
+#define BBOT_AGENT_AGENT_OPTIONS_HXX
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+#include <libbbot/manifest.hxx>
+
+#include <bbot/common-options.hxx>
+
+namespace bbot
+{
+ class agent_options
+ {
+ public:
+ agent_options ();
+
+ // Return true if anything has been parsed.
+ //
+ bool
+ parse (int& argc,
+ char** argv,
+ bool erase = false,
+ ::bbot::cli::unknown_mode option = ::bbot::cli::unknown_mode::fail,
+ ::bbot::cli::unknown_mode argument = ::bbot::cli::unknown_mode::stop);
+
+ bool
+ parse (int start,
+ int& argc,
+ char** argv,
+ bool erase = false,
+ ::bbot::cli::unknown_mode option = ::bbot::cli::unknown_mode::fail,
+ ::bbot::cli::unknown_mode argument = ::bbot::cli::unknown_mode::stop);
+
+ bool
+ parse (int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::bbot::cli::unknown_mode option = ::bbot::cli::unknown_mode::fail,
+ ::bbot::cli::unknown_mode argument = ::bbot::cli::unknown_mode::stop);
+
+ bool
+ parse (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::bbot::cli::unknown_mode option = ::bbot::cli::unknown_mode::fail,
+ ::bbot::cli::unknown_mode argument = ::bbot::cli::unknown_mode::stop);
+
+ bool
+ parse (::bbot::cli::scanner&,
+ ::bbot::cli::unknown_mode option = ::bbot::cli::unknown_mode::fail,
+ ::bbot::cli::unknown_mode argument = ::bbot::cli::unknown_mode::stop);
+
+ // Option accessors.
+ //
+ const bool&
+ help () const;
+
+ const bool&
+ version () const;
+
+ const uint16_t&
+ verbose () const;
+
+ bool
+ verbose_specified () const;
+
+ const bool&
+ systemd_daemon () const;
+
+ const string&
+ toolchain_name () const;
+
+ bool
+ toolchain_name_specified () const;
+
+ const uint16_t&
+ toolchain_num () const;
+
+ bool
+ toolchain_num_specified () const;
+
+ const string&
+ toolchain_lock () const;
+
+ bool
+ toolchain_lock_specified () const;
+
+ const standard_version&
+ toolchain_ver () const;
+
+ bool
+ toolchain_ver_specified () const;
+
+ const string&
+ toolchain_id () const;
+
+ bool
+ toolchain_id_specified () const;
+
+ const interactive_mode&
+ interactive () const;
+
+ bool
+ interactive_specified () const;
+
+ const uint16_t&
+ instance () const;
+
+ bool
+ instance_specified () const;
+
+ const uint16_t&
+ instance_max () const;
+
+ bool
+ instance_max_specified () const;
+
+ const size_t&
+ cpu () const;
+
+ bool
+ cpu_specified () const;
+
+ const size_t&
+ build_ram () const;
+
+ bool
+ build_ram_specified () const;
+
+ const size_t&
+ auxiliary_ram () const;
+
+ bool
+ auxiliary_ram_specified () const;
+
+ const string&
+ bridge () const;
+
+ bool
+ bridge_specified () const;
+
+ const path&
+ auth_key () const;
+
+ bool
+ auth_key_specified () const;
+
+ const strings&
+ trust () const;
+
+ bool
+ trust_specified () const;
+
+ const dir_path&
+ machines () const;
+
+ bool
+ machines_specified () const;
+
+ const dir_path&
+ tftp () const;
+
+ bool
+ tftp_specified () const;
+
+ const uint16_t&
+ tftp_port () const;
+
+ bool
+ tftp_port_specified () const;
+
+ const size_t&
+ bootstrap_startup () const;
+
+ bool
+ bootstrap_startup_specified () const;
+
+ const size_t&
+ bootstrap_timeout () const;
+
+ bool
+ bootstrap_timeout_specified () const;
+
+ const size_t&
+ bootstrap_auxiliary () const;
+
+ bool
+ bootstrap_auxiliary_specified () const;
+
+ const size_t&
+ bootstrap_retries () const;
+
+ bool
+ bootstrap_retries_specified () const;
+
+ const size_t&
+ build_startup () const;
+
+ bool
+ build_startup_specified () const;
+
+ const size_t&
+ build_timeout () const;
+
+ bool
+ build_timeout_specified () const;
+
+ const size_t&
+ build_retries () const;
+
+ bool
+ build_retries_specified () const;
+
+ const size_t&
+ intactive_timeout () const;
+
+ bool
+ intactive_timeout_specified () const;
+
+ const size_t&
+ connect_timeout () const;
+
+ bool
+ connect_timeout_specified () const;
+
+ const size_t&
+ request_timeout () const;
+
+ bool
+ request_timeout_specified () const;
+
+ const size_t&
+ request_retries () const;
+
+ bool
+ request_retries_specified () const;
+
+ const path&
+ openssl () const;
+
+ bool
+ openssl_specified () const;
+
+ const strings&
+ openssl_option () const;
+
+ bool
+ openssl_option_specified () const;
+
+ const bool&
+ dump_machines () const;
+
+ const bool&
+ dump_task () const;
+
+ const bool&
+ dump_result () const;
+
+ const bool&
+ fake_bootstrap () const;
+
+ const bool&
+ fake_build () const;
+
+ const path&
+ fake_machine () const;
+
+ bool
+ fake_machine_specified () const;
+
+ const path&
+ fake_request () const;
+
+ bool
+ fake_request_specified () const;
+
+ // Print usage information.
+ //
+ static ::bbot::cli::usage_para
+ print_usage (::std::ostream&,
+ ::bbot::cli::usage_para = ::bbot::cli::usage_para::none);
+
+ // Implementation details.
+ //
+ protected:
+ bool
+ _parse (const char*, ::bbot::cli::scanner&);
+
+ private:
+ bool
+ _parse (::bbot::cli::scanner&,
+ ::bbot::cli::unknown_mode option,
+ ::bbot::cli::unknown_mode argument);
+
+ public:
+ bool help_;
+ bool version_;
+ uint16_t verbose_;
+ bool verbose_specified_;
+ bool systemd_daemon_;
+ string toolchain_name_;
+ bool toolchain_name_specified_;
+ uint16_t toolchain_num_;
+ bool toolchain_num_specified_;
+ string toolchain_lock_;
+ bool toolchain_lock_specified_;
+ standard_version toolchain_ver_;
+ bool toolchain_ver_specified_;
+ string toolchain_id_;
+ bool toolchain_id_specified_;
+ interactive_mode interactive_;
+ bool interactive_specified_;
+ uint16_t instance_;
+ bool instance_specified_;
+ uint16_t instance_max_;
+ bool instance_max_specified_;
+ size_t cpu_;
+ bool cpu_specified_;
+ size_t build_ram_;
+ bool build_ram_specified_;
+ size_t auxiliary_ram_;
+ bool auxiliary_ram_specified_;
+ string bridge_;
+ bool bridge_specified_;
+ path auth_key_;
+ bool auth_key_specified_;
+ strings trust_;
+ bool trust_specified_;
+ dir_path machines_;
+ bool machines_specified_;
+ dir_path tftp_;
+ bool tftp_specified_;
+ uint16_t tftp_port_;
+ bool tftp_port_specified_;
+ size_t bootstrap_startup_;
+ bool bootstrap_startup_specified_;
+ size_t bootstrap_timeout_;
+ bool bootstrap_timeout_specified_;
+ size_t bootstrap_auxiliary_;
+ bool bootstrap_auxiliary_specified_;
+ size_t bootstrap_retries_;
+ bool bootstrap_retries_specified_;
+ size_t build_startup_;
+ bool build_startup_specified_;
+ size_t build_timeout_;
+ bool build_timeout_specified_;
+ size_t build_retries_;
+ bool build_retries_specified_;
+ size_t intactive_timeout_;
+ bool intactive_timeout_specified_;
+ size_t connect_timeout_;
+ bool connect_timeout_specified_;
+ size_t request_timeout_;
+ bool request_timeout_specified_;
+ size_t request_retries_;
+ bool request_retries_specified_;
+ path openssl_;
+ bool openssl_specified_;
+ strings openssl_option_;
+ bool openssl_option_specified_;
+ bool dump_machines_;
+ bool dump_task_;
+ bool dump_result_;
+ bool fake_bootstrap_;
+ bool fake_build_;
+ path fake_machine_;
+ bool fake_machine_specified_;
+ path fake_request_;
+ bool fake_request_specified_;
+ };
+}
+
+// Print page usage information.
+//
+namespace bbot
+{
+ ::bbot::cli::usage_para
+ print_bbot_agent_usage (::std::ostream&,
+ ::bbot::cli::usage_para = ::bbot::cli::usage_para::none);
+}
+
+#include <bbot/agent/agent-options.ixx>
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
+#endif // BBOT_AGENT_AGENT_OPTIONS_HXX
diff --git a/bbot/agent/agent-options.ixx b/bbot/agent/agent-options.ixx
new file mode 100644
index 0000000..e59d8fe
--- /dev/null
+++ b/bbot/agent/agent-options.ixx
@@ -0,0 +1,465 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+namespace bbot
+{
+ // agent_options
+ //
+
+ inline const bool& agent_options::
+ help () const
+ {
+ return this->help_;
+ }
+
+ inline const bool& agent_options::
+ version () const
+ {
+ return this->version_;
+ }
+
+ inline const uint16_t& agent_options::
+ verbose () const
+ {
+ return this->verbose_;
+ }
+
+ inline bool agent_options::
+ verbose_specified () const
+ {
+ return this->verbose_specified_;
+ }
+
+ inline const bool& agent_options::
+ systemd_daemon () const
+ {
+ return this->systemd_daemon_;
+ }
+
+ inline const string& agent_options::
+ toolchain_name () const
+ {
+ return this->toolchain_name_;
+ }
+
+ inline bool agent_options::
+ toolchain_name_specified () const
+ {
+ return this->toolchain_name_specified_;
+ }
+
+ inline const uint16_t& agent_options::
+ toolchain_num () const
+ {
+ return this->toolchain_num_;
+ }
+
+ inline bool agent_options::
+ toolchain_num_specified () const
+ {
+ return this->toolchain_num_specified_;
+ }
+
+ inline const string& agent_options::
+ toolchain_lock () const
+ {
+ return this->toolchain_lock_;
+ }
+
+ inline bool agent_options::
+ toolchain_lock_specified () const
+ {
+ return this->toolchain_lock_specified_;
+ }
+
+ inline const standard_version& agent_options::
+ toolchain_ver () const
+ {
+ return this->toolchain_ver_;
+ }
+
+ inline bool agent_options::
+ toolchain_ver_specified () const
+ {
+ return this->toolchain_ver_specified_;
+ }
+
+ inline const string& agent_options::
+ toolchain_id () const
+ {
+ return this->toolchain_id_;
+ }
+
+ inline bool agent_options::
+ toolchain_id_specified () const
+ {
+ return this->toolchain_id_specified_;
+ }
+
+ inline const interactive_mode& agent_options::
+ interactive () const
+ {
+ return this->interactive_;
+ }
+
+ inline bool agent_options::
+ interactive_specified () const
+ {
+ return this->interactive_specified_;
+ }
+
+ inline const uint16_t& agent_options::
+ instance () const
+ {
+ return this->instance_;
+ }
+
+ inline bool agent_options::
+ instance_specified () const
+ {
+ return this->instance_specified_;
+ }
+
+ inline const uint16_t& agent_options::
+ instance_max () const
+ {
+ return this->instance_max_;
+ }
+
+ inline bool agent_options::
+ instance_max_specified () const
+ {
+ return this->instance_max_specified_;
+ }
+
+ inline const size_t& agent_options::
+ cpu () const
+ {
+ return this->cpu_;
+ }
+
+ inline bool agent_options::
+ cpu_specified () const
+ {
+ return this->cpu_specified_;
+ }
+
+ inline const size_t& agent_options::
+ build_ram () const
+ {
+ return this->build_ram_;
+ }
+
+ inline bool agent_options::
+ build_ram_specified () const
+ {
+ return this->build_ram_specified_;
+ }
+
+ inline const size_t& agent_options::
+ auxiliary_ram () const
+ {
+ return this->auxiliary_ram_;
+ }
+
+ inline bool agent_options::
+ auxiliary_ram_specified () const
+ {
+ return this->auxiliary_ram_specified_;
+ }
+
+ inline const string& agent_options::
+ bridge () const
+ {
+ return this->bridge_;
+ }
+
+ inline bool agent_options::
+ bridge_specified () const
+ {
+ return this->bridge_specified_;
+ }
+
+ inline const path& agent_options::
+ auth_key () const
+ {
+ return this->auth_key_;
+ }
+
+ inline bool agent_options::
+ auth_key_specified () const
+ {
+ return this->auth_key_specified_;
+ }
+
+ inline const strings& agent_options::
+ trust () const
+ {
+ return this->trust_;
+ }
+
+ inline bool agent_options::
+ trust_specified () const
+ {
+ return this->trust_specified_;
+ }
+
+ inline const dir_path& agent_options::
+ machines () const
+ {
+ return this->machines_;
+ }
+
+ inline bool agent_options::
+ machines_specified () const
+ {
+ return this->machines_specified_;
+ }
+
+ inline const dir_path& agent_options::
+ tftp () const
+ {
+ return this->tftp_;
+ }
+
+ inline bool agent_options::
+ tftp_specified () const
+ {
+ return this->tftp_specified_;
+ }
+
+ inline const uint16_t& agent_options::
+ tftp_port () const
+ {
+ return this->tftp_port_;
+ }
+
+ inline bool agent_options::
+ tftp_port_specified () const
+ {
+ return this->tftp_port_specified_;
+ }
+
+ inline const size_t& agent_options::
+ bootstrap_startup () const
+ {
+ return this->bootstrap_startup_;
+ }
+
+ inline bool agent_options::
+ bootstrap_startup_specified () const
+ {
+ return this->bootstrap_startup_specified_;
+ }
+
+ inline const size_t& agent_options::
+ bootstrap_timeout () const
+ {
+ return this->bootstrap_timeout_;
+ }
+
+ inline bool agent_options::
+ bootstrap_timeout_specified () const
+ {
+ return this->bootstrap_timeout_specified_;
+ }
+
+ inline const size_t& agent_options::
+ bootstrap_auxiliary () const
+ {
+ return this->bootstrap_auxiliary_;
+ }
+
+ inline bool agent_options::
+ bootstrap_auxiliary_specified () const
+ {
+ return this->bootstrap_auxiliary_specified_;
+ }
+
+ inline const size_t& agent_options::
+ bootstrap_retries () const
+ {
+ return this->bootstrap_retries_;
+ }
+
+ inline bool agent_options::
+ bootstrap_retries_specified () const
+ {
+ return this->bootstrap_retries_specified_;
+ }
+
+ inline const size_t& agent_options::
+ build_startup () const
+ {
+ return this->build_startup_;
+ }
+
+ inline bool agent_options::
+ build_startup_specified () const
+ {
+ return this->build_startup_specified_;
+ }
+
+ inline const size_t& agent_options::
+ build_timeout () const
+ {
+ return this->build_timeout_;
+ }
+
+ inline bool agent_options::
+ build_timeout_specified () const
+ {
+ return this->build_timeout_specified_;
+ }
+
+ inline const size_t& agent_options::
+ build_retries () const
+ {
+ return this->build_retries_;
+ }
+
+ inline bool agent_options::
+ build_retries_specified () const
+ {
+ return this->build_retries_specified_;
+ }
+
+ inline const size_t& agent_options::
+ intactive_timeout () const
+ {
+ return this->intactive_timeout_;
+ }
+
+ inline bool agent_options::
+ intactive_timeout_specified () const
+ {
+ return this->intactive_timeout_specified_;
+ }
+
+ inline const size_t& agent_options::
+ connect_timeout () const
+ {
+ return this->connect_timeout_;
+ }
+
+ inline bool agent_options::
+ connect_timeout_specified () const
+ {
+ return this->connect_timeout_specified_;
+ }
+
+ inline const size_t& agent_options::
+ request_timeout () const
+ {
+ return this->request_timeout_;
+ }
+
+ inline bool agent_options::
+ request_timeout_specified () const
+ {
+ return this->request_timeout_specified_;
+ }
+
+ inline const size_t& agent_options::
+ request_retries () const
+ {
+ return this->request_retries_;
+ }
+
+ inline bool agent_options::
+ request_retries_specified () const
+ {
+ return this->request_retries_specified_;
+ }
+
+ inline const path& agent_options::
+ openssl () const
+ {
+ return this->openssl_;
+ }
+
+ inline bool agent_options::
+ openssl_specified () const
+ {
+ return this->openssl_specified_;
+ }
+
+ inline const strings& agent_options::
+ openssl_option () const
+ {
+ return this->openssl_option_;
+ }
+
+ inline bool agent_options::
+ openssl_option_specified () const
+ {
+ return this->openssl_option_specified_;
+ }
+
+ inline const bool& agent_options::
+ dump_machines () const
+ {
+ return this->dump_machines_;
+ }
+
+ inline const bool& agent_options::
+ dump_task () const
+ {
+ return this->dump_task_;
+ }
+
+ inline const bool& agent_options::
+ dump_result () const
+ {
+ return this->dump_result_;
+ }
+
+ inline const bool& agent_options::
+ fake_bootstrap () const
+ {
+ return this->fake_bootstrap_;
+ }
+
+ inline const bool& agent_options::
+ fake_build () const
+ {
+ return this->fake_build_;
+ }
+
+ inline const path& agent_options::
+ fake_machine () const
+ {
+ return this->fake_machine_;
+ }
+
+ inline bool agent_options::
+ fake_machine_specified () const
+ {
+ return this->fake_machine_specified_;
+ }
+
+ inline const path& agent_options::
+ fake_request () const
+ {
+ return this->fake_request_;
+ }
+
+ inline bool agent_options::
+ fake_request_specified () const
+ {
+ return this->fake_request_specified_;
+ }
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
diff --git a/bbot/agent/agent.cli b/bbot/agent/agent.cli
index eb3553d..23765cf 100644
--- a/bbot/agent/agent.cli
+++ b/bbot/agent/agent.cli
@@ -119,7 +119,7 @@ namespace bbot
"<num>",
"Toolchain number, 1 by default. If agents are running for several
toolchains, then each of them should have a unique toolchain number
- between 1 and 99. This number is used as an offset for network ports,
+ between 1 and 9. This number is used as an offset for network ports,
interfaces, etc."
}
@@ -189,10 +189,24 @@ namespace bbot
"Number of CPUs (threads) to use, 1 by default."
}
- size_t --ram (1024 * 1024) // 1G
+ size_t --build-ram (4 * 1024 * 1024) // 4GiB
{
"<num>",
- "Amount of RAM (in kB) to use, 1G by default."
+ "Amount of RAM (in KiB) to use for the build machine, 4GiB by default."
+ }
+
+ size_t --auxiliary-ram = 0
+ {
+ "<num>",
+ "Amount of RAM (in KiB) to use for auxiliary machines. To disable
+ running auxiliary machines, specify \cb{0}. If unspecified, then
+ currently the behavior is the same as specifying \cb{0} but this
+ may change in the future (for example, to support a more dynamic
+ allocation strategy)."
+
+ // Note: it's not going to be easy to set it to unspecified in
+ // bbot-agent@.service so we may have to invent some special value,
+ // like `auto`.
}
string --bridge = "br1"
@@ -237,28 +251,35 @@ namespace bbot
}
// Low 23401+, 23501+, 23601+, etc., all look good collision-wise with
- // with anything useful.
+ // anything useful.
//
uint16_t --tftp-port = 23400
{
"<num>",
"TFTP server port base, 23400 by default. The actual port is calculated
- by adding an offset calculated based on the toolchain and instance
- numbers."
+ by adding an offset calculated based on the toolchain, instance, and
+ machine numbers."
}
size_t --bootstrap-startup = 300
{
"<sec>",
- "Maximum number of seconds to wait for machine bootstrap startup,
+ "Maximum number of seconds to wait for build machine bootstrap startup,
300 (5 minutes) by default."
}
size_t --bootstrap-timeout = 3600
{
"<sec>",
- "Maximum number of seconds to wait for machine bootstrap completion,
- 3600 (60 minutes) by default."
+ "Maximum number of seconds to wait for build machine bootstrap
+ completion, 3600 (60 minutes) by default."
+ }
+
+ size_t --bootstrap-auxiliary = 900
+ {
+ "<sec>",
+ "Maximum number of seconds to wait for auxiliary machine bootstrap
+ completion, 900 (15 minutes) by default."
}
size_t --bootstrap-retries = 2
@@ -272,7 +293,7 @@ namespace bbot
{
"<sec>",
"Maximum number of seconds to wait for build startup, 240 (4 minutes) by
- default."
+ default. This value is used for both build and auxiliary machines."
}
size_t --build-timeout = 5400
diff --git a/bbot/agent/agent.cxx b/bbot/agent/agent.cxx
index 062fd68..b6f1783 100644
--- a/bbot/agent/agent.cxx
+++ b/bbot/agent/agent.cxx
@@ -56,6 +56,27 @@ using namespace bbot;
using std::cout;
using std::endl;
+// If RAM minimum is not specified for a machine, then let's assume something
+// plausible like 256MiB. This way we won't end up with degenerate cases where
+// we attempt to start a machine with some absurd amount of RAM.
+//
+const std::uint64_t default_ram_minimum = 262144;
+
+static inline std::uint64_t
+effective_ram_minimum (const machine_header_manifest& m)
+{
+ // Note: neither ram_minimum nor ram_maximum should be 0.
+ //
+ assert ((!m.ram_minimum || *m.ram_minimum != 0) &&
+ (!m.ram_maximum || *m.ram_maximum != 0));
+
+ return (m.ram_minimum
+ ? *m.ram_minimum
+ : (m.ram_maximum && *m.ram_maximum < default_ram_minimum
+ ? *m.ram_maximum
+ : default_ram_minimum));
+}
+
static std::mt19937 rand_gen (std::random_device {} ());
// According to the standard, atomic's use in the signal handler is only safe
@@ -153,16 +174,16 @@ btrfs_exit (tracer& t, A&&... a)
"btrfs", forward<A> (a)...);
}
-// Bootstrap the machine. Return the bootstrapped machine manifest if
-// successful and nullopt otherwise (in which case the machine directory
-// should be cleaned and the machine ignored for now).
+// Bootstrap a build machine. Return the bootstrapped machine manifest if
+// successful and nullopt otherwise (in which case the caller should clean up
+// the machine directory and ignore the machine for now).
//
static optional<bootstrapped_machine_manifest>
-bootstrap_machine (const dir_path& md,
- const machine_manifest& mm,
- optional<bootstrapped_machine_manifest> obmm)
+bootstrap_build_machine (const dir_path& md,
+ const machine_manifest& mm,
+ optional<bootstrapped_machine_manifest> obmm)
{
- tracer trace ("bootstrap_machine", md.string ().c_str ());
+ tracer trace ("bootstrap_build_machine", md.string ().c_str ());
bootstrapped_machine_manifest r {
mm,
@@ -184,10 +205,12 @@ bootstrap_machine (const dir_path& md,
else
try
{
+ // Note: similar code in bootstrap_auxiliary_machine().
+
// Start the TFTP server (server chroot is --tftp). Map:
//
- // GET requests to .../toolchains/<name>/*
- // PUT requests to .../bootstrap/<name>-<instance>/*
+ // GET requests to .../toolchains/<toolchain>/*
+ // PUT requests to .../bootstrap/<toolchain>-<instance>/*
//
const string in_name (tc_name + '-' + to_string (inst));
auto_rmdir arm ((dir_path (ops.tftp ()) /= "bootstrap") /= in_name);
@@ -211,7 +234,7 @@ bootstrap_machine (const dir_path& md,
{
tftp_server tftpd ("Gr ^/?(.+)$ /toolchains/" + tc_name + "/\\1\n" +
"Pr ^/?(.+)$ /bootstrap/" + in_name + "/\\1\n",
- ops.tftp_port () + offset);
+ ops.tftp_port () + offset + 0 /* build machine */);
l3 ([&]{trace << "tftp server on port " << tftpd.port ();});
@@ -220,6 +243,9 @@ bootstrap_machine (const dir_path& md,
unique_ptr<machine> m (
start_machine (md,
mm,
+ 0 /* machine_num (build) */,
+ ops.cpu (),
+ ops.build_ram (),
obmm ? obmm->machine.mac : nullopt,
ops.bridge (),
tftpd.port (),
@@ -234,8 +260,11 @@ bootstrap_machine (const dir_path& md,
make_exception_guard (
[&m, &md] ()
{
- info << "trying to force machine " << md << " down";
- try {m->forcedown (false);} catch (const failed&) {}
+ if (m != nullptr)
+ {
+ info << "trying to force machine " << md << " down";
+ try {m->forcedown (false);} catch (const failed&) {}
+ }
}));
// What happens if the bootstrap process hangs? The simple thing would
@@ -260,7 +289,7 @@ bootstrap_machine (const dir_path& md,
m->cleanup ();
info << "resuming after machine suspension";
- // Note: snapshot cleaned up by the caller of bootstrap_machine().
+ // Note: snapshot cleaned up by the caller.
}
catch (const failed&) {}
@@ -313,8 +342,7 @@ bootstrap_machine (const dir_path& md,
if (!check_machine ())
{
- // Note: snapshot cleaned up by the caller of bootstrap_machine().
- return nullopt;
+ return nullopt; // Note: snapshot cleaned up by the caller.
}
}
@@ -336,6 +364,7 @@ bootstrap_machine (const dir_path& md,
m->print_info (dr);
try {m->forcedown (false);} catch (const failed&) {}
+ m = nullptr; // Disable exceptions guard above.
continue;
}
@@ -359,8 +388,7 @@ bootstrap_machine (const dir_path& md,
//
if (!(file_not_empty (mf) || file_not_empty (mfo)))
{
- // Note: snapshot cleaned up by the caller of bootstrap_machine().
- return nullopt;
+ return nullopt; // Note: snapshot cleaned up by the caller.
}
}
@@ -411,6 +439,223 @@ bootstrap_machine (const dir_path& md,
return r;
}
+// Bootstrap an auxiliary machine. Return the bootstrapped machine manifest if
+// successful and nullopt otherwise (in which case the caller should clean up
+// the machine directory and ignore the machine for now).
+//
+static vector<size_t>
+divide_auxiliary_ram (const vector<const machine_header_manifest*>&);
+
+static optional<bootstrapped_machine_manifest>
+bootstrap_auxiliary_machine (const dir_path& md,
+ const machine_manifest& mm,
+ optional<bootstrapped_machine_manifest> obmm)
+{
+ tracer trace ("bootstrap_auxiliary_machine", md.string ().c_str ());
+
+ bootstrapped_machine_manifest r {
+ mm,
+ toolchain_manifest {}, // Unused for auxiliary,
+ bootstrap_manifest {} // Unused for auxiliary.
+ };
+
+ if (ops.fake_bootstrap ())
+ {
+ r.machine.mac = "de:ad:be:ef:de:ad";
+ }
+ else
+ try
+ {
+ // Similar to bootstrap_build_machine() except here we just wait for the
+ // upload of the environment.
+
+ // Start the TFTP server (server chroot is --tftp). Map:
+ //
+ // GET requests to /dev/null
+ // PUT requests to .../bootstrap/<toolchain>-<instance>/*
+ //
+ const string in_name (tc_name + '-' + to_string (inst));
+ auto_rmdir arm ((dir_path (ops.tftp ()) /= "bootstrap") /= in_name);
+ try_mkdir_p (arm.path);
+
+ // Environment upload.
+ //
+ path ef (arm.path / "environment");
+ try_rmfile (ef);
+
+ // Note that unlike build, here we use the same VM snapshot for retries,
+ // which is not ideal.
+ //
+ for (size_t retry (0);; ++retry)
+ {
+ tftp_server tftpd ("Gr ^/?(.+)$ " + string ("/dev/null") + '\n' +
+ "Pr ^/?(.+)$ /bootstrap/" + in_name + "/\\1\n",
+ ops.tftp_port () + offset + 1 /* auxiliary machine */);
+
+ l3 ([&]{trace << "tftp server on port " << tftpd.port ();});
+
+ // If the machine specified RAM minimum, use that to make sure the
+ // machine can actually function with this amount of RAM. Otherwise, use
+ // the minium of RAM maximum (if specified) and the available auxiliary
+ // RAM (so we know this machine will at least work alone). For the
+ // latter case use divide_auxiliary_ram() to be consistent with the
+ // build case (see that function implementation for nuances).
+ //
+ size_t ram;
+ if (mm.ram_minimum)
+ ram = *mm.ram_minimum;
+ else
+ {
+ vector<size_t> rams (divide_auxiliary_ram ({&mm}));
+ assert (!rams.empty ()); // We should have skipped such a machine.
+ ram = rams.front ();
+ }
+
+ // Start the machine.
+ //
+ unique_ptr<machine> m (
+ start_machine (md,
+ mm,
+ 1 /* machine_num (first auxiliary) */,
+ ops.cpu (),
+ ram,
+ obmm ? obmm->machine.mac : nullopt,
+ ops.bridge (),
+ tftpd.port (),
+ false /* pub_vnc */));
+
+ {
+ // NOTE: see bootstrap_build_machine() for comments.
+
+ auto mg (
+ make_exception_guard (
+ [&m, &md] ()
+ {
+ if (m != nullptr)
+ {
+ info << "trying to force machine " << md << " down";
+ try {m->forcedown (false);} catch (const failed&) {}
+ }
+ }));
+
+ auto soft_fail = [&md, &m] (const char* msg)
+ {
+ {
+ diag_record dr (error);
+ dr << msg << " for machine " << md << ", suspending";
+ m->print_info (dr);
+ }
+
+ try
+ {
+ m->suspend (false);
+ m->wait (false);
+ m->cleanup ();
+ info << "resuming after machine suspension";
+
+ // Note: snapshot cleaned up by the caller.
+ }
+ catch (const failed&) {}
+
+ return nullopt;
+ };
+
+ auto check_machine = [&md, &m] ()
+ {
+ try
+ {
+ size_t t (0);
+ if (!m->wait (t /* seconds */, false /* fail_hard */))
+ return true; // Still running.
+
+ // Exited successfully.
+ }
+ catch (const failed&)
+ {
+ // Failed, exit code diagnostics has already been issued.
+ }
+
+ diag_record dr (error);
+ dr << "machine " << md << " exited unexpectedly";
+ m->print_info (dr);
+
+ return false;
+ };
+
+ // Wait up to the specified timeout for the auxiliary machine to
+ // bootstrap. Note that such a machine may do extra setup work on the
+ // first boot (such as install some packages, etc) which may take some
+ // time.
+ //
+ size_t to;
+ const size_t bootstrap_to (ops.bootstrap_auxiliary ());
+ const size_t shutdown_to (5 * 60);
+
+ // Serve TFTP requests while periodically checking for the environment
+ // file.
+ //
+ for (to = bootstrap_to; to != 0; )
+ {
+ if (tftpd.serve (to, 2))
+ continue;
+
+ if (!check_machine ())
+ {
+ if (!file_not_empty (ef))
+ {
+ return nullopt; // Note: snapshot cleaned up by the caller.
+ }
+ }
+
+ if (file_not_empty (ef))
+ {
+ if (!tftpd.serve (to, 5))
+ break;
+ }
+ }
+
+ if (to == 0)
+ {
+ if (retry > ops.bootstrap_retries ())
+ return soft_fail ("bootstrap timeout");
+
+ // Note: keeping the logs behind (no cleanup).
+
+ diag_record dr (warn);
+ dr << "machine " << mm.name << " mis-booted, retrying";
+ m->print_info (dr);
+
+ try {m->forcedown (false);} catch (const failed&) {}
+ m = nullptr; // Disable exceptions guard above.
+ continue;
+ }
+
+ l3 ([&]{trace << "completed bootstrap in " << bootstrap_to - to << "s";});
+
+ // Shut the machine down cleanly.
+ //
+ if (!m->shutdown ((to = shutdown_to)))
+ return soft_fail ("bootstrap shutdown timeout");
+
+ l3 ([&]{trace << "completed shutdown in " << shutdown_to - to << "s";});
+
+ m->cleanup ();
+ }
+
+ r.machine.mac = m->mac; // Save the MAC address.
+
+ break;
+ }
+ }
+ catch (const system_error& e)
+ {
+ fail << "bootstrap error: " << e;
+ }
+
+ serialize_manifest (r, md / "manifest", "bootstrapped machine");
+ return r;
+}
+
// Global toolchain lock.
//
// The overall locking protocol is as follows:
@@ -426,10 +671,11 @@ bootstrap_machine (const dir_path& md,
// proceeds to bootstrap the machine, releases its lock, and restarts the
// process from scratch.
//
-// 4. Otherwise, upon receiving a task response for one of the machines, the
-// agent releases all the other machine locks followed by the global lock,
-// proceeds to perform the task on the selected machine, releases its lock,
-// and restarts the process from scratch.
+// 4. Otherwise, upon receiving a task response for one of the machines (plus,
+// potentially, a number of auxiliary machines), the agent releases all the
+// other machine locks followed by the global lock, proceeds to perform the
+// task on the selected machine(s), releases their locks, and restarts the
+// process from scratch.
//
// One notable implication of this protocol is that the machine locks are
// only acquired while holding the global toolchain lock but can be released
@@ -528,6 +774,13 @@ lock_toolchain (unsigned int timeout)
// guaranteed to be atomic (in case later we want to support exclusive
// bootstrap and shared build).
//
+// Note also that we per-toolchain lock auxiliary machines even though they
+// are not toolchain-specific. Doing it this way allows us to handle both
+// types of machines consistently with regards to priorities, interrupts, etc.
+// It also means we will have each auxiliary machine available per-toolchain
+// rather than a single machine shared between all the toolchains, which is
+// a good thing.
+//
class machine_lock
{
public:
@@ -807,20 +1060,24 @@ compare_bbot (const bootstrap_manifest& m)
// bootstrapping/suspended machines have to be returned to get the correct
// count of currently active instances for the inst_max comparison.)
//
+// Note that both build and auxiliary machines are returned. For auxiliary,
+// toolchain and bootstrap manifests are unused and therefore always empty.
+//
struct bootstrapped_machine
{
- dir_path path;
machine_lock lock;
+ const dir_path path;
bootstrapped_machine_manifest manifest;
};
using bootstrapped_machines = vector<bootstrapped_machine>;
static pair<toolchain_lock, bootstrapped_machines>
enumerate_machines (const dir_path& machines)
-try
{
tracer trace ("enumerate_machines", machines.string ().c_str ());
+ size_t dir_iter_retries (0); // Directory iteration retry count (see below).
+
for (;;) // From-scratch retry loop for after bootstrap (see below).
{
pair<toolchain_lock, bootstrapped_machines> pr;
@@ -850,8 +1107,8 @@ try
r.push_back (
bootstrapped_machine {
- dir_path (ops.machines ()) /= mh.name, // For diagnostics.
machine_lock (path (), nullfd), // Fake lock.
+ dir_path (ops.machines ()) /= mh.name, // For diagnostics.
bootstrapped_machine_manifest {
machine_manifest {
move (mh.id),
@@ -860,15 +1117,18 @@ try
machine_type::kvm,
string ("de:ad:be:ef:de:ad"),
nullopt,
- strings ()},
+ strings (),
+ nullopt,
+ nullopt,
+ nullopt},
toolchain_manifest {tc_id},
bootstrap_manifest {}}});
return pr;
}
- // Notice and warn if there are no machines (as opposed to all of them
- // being busy).
+ // Notice and warn if there are no build machines (as opposed to all of
+ // them being busy).
//
bool none (true);
@@ -893,262 +1153,317 @@ try
// The first level are machine volumes.
//
- for (const dir_entry& ve: dir_iterator (machines, dir_iterator::no_follow))
+ try
{
- const string vn (ve.path ().string ());
-
- // Ignore hidden directories.
- //
- if (ve.type () != entry_type::directory || vn[0] == '.')
- continue;
-
- const dir_path vd (dir_path (machines) /= vn);
-
- // Inside we have machines.
- //
- try
+ bool dir_iter_retry (false);
+ for (const dir_entry& ve:
+ dir_iterator (machines, dir_iterator::no_follow))
{
- for (const dir_entry& me: dir_iterator (vd, dir_iterator::no_follow))
- {
- const string mn (me.path ().string ());
+ const string vn (ve.path ().string ());
- if (me.type () != entry_type::directory || mn[0] == '.')
- continue;
-
- const dir_path md (dir_path (vd) /= mn);
+ // Ignore hidden directories.
+ //
+ if (ve.type () != entry_type::directory || vn[0] == '.')
+ continue;
- // Our endgoal here is to obtain a bootstrapped snapshot of this
- // machine while watching out for potential race conditions (other
- // instances as well as machines being added/upgraded/removed; see
- // the manual for details).
- //
- // So here is our overall plan:
- //
- // 1. Resolve current subvolume link for our bootstrap protocol.
- //
- // 2. Lock the machine. This excludes any other instance from trying
- // to perform the following steps.
- //
- // 3. If there is no link, cleanup old bootstrap (if any) and ignore
- // this machine.
- //
- // 4. Try to create a snapshot of current subvolume (this operation
- // is atomic). If failed (e.g., someone changed the link and
- // removed the subvolume in the meantime), retry from #1.
- //
- // 5. Compare the snapshot to the already bootstrapped version (if
- // any) and see if we need to re-bootstrap. If so, use the
- // snapshot as a starting point. Rename to bootstrapped at the
- // end (atomic).
- //
- dir_path lp (dir_path (md) /= (mn + '-' + bs_prot)); // -<P>
- dir_path tp (dir_path (md) /= (mn + '-' + tc_name)); // -<toolchain>
+ const dir_path vd (dir_path (machines) /= vn);
- auto delete_bootstrapped = [&tp, &trace] () // Delete -<toolchain>.
+ // Inside we have machines.
+ //
+ try
+ {
+ for (const dir_entry& me: dir_iterator (vd, dir_iterator::no_follow))
{
- run_btrfs (trace, "property", "set", "-ts", tp, "ro", "false");
- run_btrfs (trace, "subvolume", "delete", tp);
- };
+ const string mn (me.path ().string ());
- for (size_t retry (0);; ++retry)
- {
- if (retry != 0)
- sleep (1);
+ if (me.type () != entry_type::directory || mn[0] == '.')
+ continue;
+
+ const dir_path md (dir_path (vd) /= mn);
- // Resolve the link to subvolume path.
+ // Our endgoal here is to obtain a bootstrapped snapshot of this
+ // machine while watching out for potential race conditions (other
+ // instances as well as machines being added/upgraded/removed; see
+ // the manual for details).
+ //
+ // So here is our overall plan:
+ //
+ // 1. Resolve current subvolume link for our bootstrap protocol.
+ //
+ // 2. Lock the machine. This excludes any other instance from
+ // trying to perform the following steps.
+ //
+ // 3. If there is no link, cleanup old bootstrap (if any) and
+ // ignore this machine.
+ //
+ // 4. Try to create a snapshot of current subvolume (this
+ // operation is atomic). If failed (e.g., someone changed the
+ // link and removed the subvolume in the meantime), retry from
+ // #1.
//
- dir_path sp; // <name>-<P>.<R>
+ // 5. Compare the snapshot to the already bootstrapped version (if
+ // any) and see if we need to re-bootstrap. If so, use the
+ // snapshot as a starting point. Rename to bootstrapped at the
+ // end (atomic).
+ //
+ dir_path lp (dir_path (md) /= (mn + '-' + bs_prot)); // -<P>
+ dir_path tp (dir_path (md) /= (mn + '-' + tc_name)); // -<toolchain>
- try
+ auto delete_bootstrapped = [&tp, &trace] () // Delete -<toolchain>.
{
- sp = path_cast<dir_path> (readsymlink (lp));
+ run_btrfs (trace, "property", "set", "-ts", tp, "ro", "false");
+ run_btrfs (trace, "subvolume", "delete", tp);
+ };
- if (sp.relative ())
- sp = md / sp;
- }
- catch (const system_error& e)
+ for (size_t retry (0);; ++retry)
{
- // Leave the subvolume path empty if the subvolume link doesn't
- // exist and fail on any other error.
- //
- if (e.code ().category () != std::generic_category () ||
- e.code ().value () != ENOENT)
- fail << "unable to read subvolume link " << lp << ": " << e;
- }
-
- none = none && sp.empty ();
+ if (retry != 0)
+ sleep (1);
- // Try to lock the machine.
- //
- machine_lock ml (lock_machine (tl, tp));
+ // Resolve the link to subvolume path.
+ //
+ dir_path sp; // <name>-<P>.<R>
- if (!ml.locked ())
- {
- machine_manifest mm;
- if (ml.prio)
+ try
{
- // Get the machine manifest (subset of the steps performed for
- // the locked case below).
- //
- // Note that it's possible the machine we get is not what was
- // originally locked by the other process (e.g., it has been
- // upgraded since). It's also possible that if and when we
- // interrupt and lock this machine, it will be a different
- // machine (e.g., it has been upgraded since we read this
- // machine manifest). To deal with all of that we will be
- // reloading this information if/when we acquire the lock to
- // this machine.
- //
- if (sp.empty ())
- {
- l3 ([&]{trace << "skipping " << md << ": no subvolume link";});
- break;
- }
-
- l3 ([&]{trace << "keeping " << md << ": locked by " << ml.pid
- << " with priority " << *ml.prio;});
+ sp = path_cast<dir_path> (readsymlink (lp));
- mm = parse_manifest<machine_manifest> (
- sp / "manifest", "machine");
+ if (sp.relative ())
+ sp = md / sp;
}
- else // Bootstrapping/suspended.
+ catch (const system_error& e)
{
- l3 ([&]{trace << "keeping " << md << ": being bootstrapped "
- << "or suspened by " << ml.pid;});
+ // Leave the subvolume path empty if the subvolume link
+ // doesn't exist and fail on any other error.
+ //
+ if (e.code ().category () != std::generic_category () ||
+ e.code ().value () != ENOENT)
+ fail << "unable to read subvolume link " << lp << ": " << e;
}
- // Add the machine to the lists and bail out.
+ // Try to lock the machine.
//
- r.push_back (bootstrapped_machine {
- move (tp),
- move (ml),
- bootstrapped_machine_manifest {move (mm), {}, {}}});
+ machine_lock ml (lock_machine (tl, tp));
- break;
- }
-
- bool te (dir_exists (tp));
+ if (!ml.locked ())
+ {
+ machine_manifest mm;
+ if (ml.prio)
+ {
+ // Get the machine manifest (subset of the steps performed
+ // for the locked case below).
+ //
+ // Note that it's possible the machine we get is not what
+ // was originally locked by the other process (e.g., it has
+ // been upgraded since). It's also possible that if and when
+ // we interrupt and lock this machine, it will be a
+ // different machine (e.g., it has been upgraded since we
+ // read this machine manifest). To deal with all of that we
+ // will be reloading this information if/when we acquire the
+ // lock to this machine.
+ //
+ if (sp.empty ())
+ {
+ l3 ([&]{trace << "skipping " << md << ": no subvolume link";});
+ break;
+ }
+
+ l3 ([&]{trace << "keeping " << md << ": locked by " << ml.pid
+ << " with priority " << *ml.prio;});
+
+ mm = parse_manifest<machine_manifest> (
+ sp / "manifest", "machine");
+
+ none =
+ none && mm.effective_role () == machine_role::auxiliary;
+ }
+ else // Bootstrapping/suspended.
+ {
+ l3 ([&]{trace << "keeping " << md << ": being bootstrapped "
+ << "or suspened by " << ml.pid;});
- // If the resolution fails, then this means there is no current
- // machine subvolume (for this bootstrap protocol). In this case
- // we clean up our toolchain subvolume (-<toolchain>, if any) and
- // ignore this machine.
- //
- if (sp.empty ())
- {
- if (te)
- delete_bootstrapped ();
+ // Assume it is a build machine (we cannot determine whether
+ // it is build or auxiliary without loading its manifest).
+ //
+ none = false;
+ }
- l3 ([&]{trace << "skipping " << md << ": no subvolume link";});
- break;
- }
+ // Add the machine to the lists and bail out.
+ //
+ r.push_back (bootstrapped_machine {
+ move (ml),
+ move (tp),
+ bootstrapped_machine_manifest {move (mm), {}, {}}});
- // <name>-<toolchain>-<xxx>
- //
- dir_path xp (snapshot_path (tp));
+ break;
+ }
- if (btrfs_exit (trace, "subvolume", "snapshot", sp, xp) != 0)
- {
- if (retry >= 10)
- fail << "unable to snapshot subvolume " << sp;
+ bool te (dir_exists (tp));
- continue;
- }
+ // If the resolution fails, then this means there is no current
+ // machine subvolume (for this bootstrap protocol). In this case
+ // we clean up our toolchain subvolume (-<toolchain>, if any)
+ // and ignore this machine.
+ //
+ if (sp.empty ())
+ {
+ if (te)
+ delete_bootstrapped ();
- // Load the (original) machine manifest.
- //
- auto mm (
- parse_manifest<machine_manifest> (sp / "manifest", "machine"));
+ l3 ([&]{trace << "skipping " << md << ": no subvolume link";});
+ break;
+ }
- // If we already have <name>-<toolchain>, see if it needs to be
- // re-bootstrapped. Things that render it obsolete:
- //
- // 1. New machine revision (compare machine ids).
- // 2. New toolchain (compare toolchain ids).
- // 3. New bbot/libbbot (compare versions).
- //
- // The last case has a complication: what should we do if we have
- // bootstrapped a newer version of bbot? This would mean that we
- // are about to be stopped and upgraded (and the upgraded version
- // will probably be able to use the result). So we simply ignore
- // this machine for this run.
- //
- // Note: see similar code in the machine interruption logic.
- //
- optional<bootstrapped_machine_manifest> bmm;
- if (te)
- {
- bmm = parse_manifest<bootstrapped_machine_manifest> (
- tp / "manifest", "bootstrapped machine");
+ // <name>-<toolchain>-<xxx>
+ //
+ dir_path xp (snapshot_path (tp));
- if (bmm->machine.id != mm.id)
+ if (btrfs_exit (trace, "subvolume", "snapshot", sp, xp) != 0)
{
- l3 ([&]{trace << "re-bootstrap " << tp << ": new machine";});
- te = false;
+ if (retry >= 10)
+ fail << "unable to snapshot subvolume " << sp;
+
+ continue;
}
- if (!tc_id.empty () && bmm->toolchain.id != tc_id)
+ // Load the (original) machine manifest.
+ //
+ machine_manifest mm (
+ parse_manifest<machine_manifest> (sp / "manifest", "machine"));
+
+ bool aux (mm.effective_role () == machine_role::auxiliary);
+
+ // Skip machines for which we don't have sufficient RAM.
+ //
+ if (effective_ram_minimum (mm) >
+ (aux ? ops.auxiliary_ram () : ops.build_ram ()))
{
- l3 ([&]{trace << "re-bootstrap " << tp << ": new toolchain";});
- te = false;
+ l3 ([&]{trace << "skipping " << md << ": insufficient RAM";});
+ run_btrfs (trace, "subvolume", "delete", xp);
+ break;
}
- if (int i = compare_bbot (bmm->bootstrap))
+ none = none && aux;
+
+ // If we already have <name>-<toolchain>, see if it needs to be
+ // re-bootstrapped. Things that render it obsolete:
+ //
+ // 1. New machine revision (compare machine ids).
+ // 2. New toolchain (compare toolchain ids, not auxiliary).
+ // 3. New bbot/libbbot (compare versions, not auxiliary).
+ //
+ // The last case has a complication: what should we do if we
+ // have bootstrapped a newer version of bbot? This would mean
+ // that we are about to be stopped and upgraded (and the
+ // upgraded version will probably be able to use the result). So
+ // we simply ignore this machine for this run.
+ //
+ // Note: see similar code in the machine interruption logic.
+ //
+ optional<bootstrapped_machine_manifest> bmm;
+ if (te)
{
- if (i < 0)
+ bmm = parse_manifest<bootstrapped_machine_manifest> (
+ tp / "manifest", "bootstrapped machine");
+
+ if (bmm->machine.id != mm.id)
{
- l3 ([&]{trace << "re-bootstrap " << tp << ": new bbot";});
+ l3 ([&]{trace << "re-bootstrap " << tp << ": new machine";});
te = false;
}
- else
+
+ if (!aux)
{
- l3 ([&]{trace << "ignoring " << tp << ": old bbot";});
- run_btrfs (trace, "subvolume", "delete", xp);
- break;
+ if (!tc_id.empty () && bmm->toolchain.id != tc_id)
+ {
+ l3 ([&]{trace << "re-bootstrap " << tp << ": new toolchain";});
+ te = false;
+ }
+
+ if (int i = compare_bbot (bmm->bootstrap))
+ {
+ if (i < 0)
+ {
+ l3 ([&]{trace << "re-bootstrap " << tp << ": new bbot";});
+ te = false;
+ }
+ else
+ {
+ l3 ([&]{trace << "ignoring " << tp << ": old bbot";});
+ run_btrfs (trace, "subvolume", "delete", xp);
+ break;
+ }
+ }
}
+
+ if (!te)
+ delete_bootstrapped ();
}
+ else
+ l3 ([&]{trace << "bootstrap " << tp;});
if (!te)
- delete_bootstrapped ();
- }
- else
- l3 ([&]{trace << "bootstrap " << tp;});
-
- if (!te)
- {
- // Ignore any other machines that need bootstrapping.
- //
- if (!pboot)
{
- pboot = pending_bootstrap {
- move (ml), move (tp), move (xp), move (mm), move (bmm)};
+ // Ignore any other machines that need bootstrapping.
+ //
+ if (!pboot)
+ {
+ pboot = pending_bootstrap {
+ move (ml), move (tp), move (xp), move (mm), move (bmm)};
+ }
+ else
+ run_btrfs (trace, "subvolume", "delete", xp);
+
+ break;
}
else
run_btrfs (trace, "subvolume", "delete", xp);
+ // Add the machine to the lists.
+ //
+ r.push_back (
+ bootstrapped_machine {move (ml), move (tp), move (*bmm)});
+
break;
- }
- else
- run_btrfs (trace, "subvolume", "delete", xp);
+ } // Retry loop.
+ } // Inner dir_iterator loop.
+ }
+ catch (const system_error& e)
+ {
+ // Once in a while we get ENOENT while iterating over the machines
+ // volume directory. This directory contains the machine directories
+ // (not btrfs subvolumes) and is not being changed when we get this
+ // error. Maybe this is due to directory sizes/timestamps changes,
+ // but then we would expect to get this error a lot more often..? So
+ // this feels like a btrfs bug which we are going to retry a few
+ // times. See GH issue #349 for additional information.
+ //
+ dir_iter_retry = (dir_iter_retries++ != 3);
- // Add the machine to the lists.
- //
- r.push_back (
- bootstrapped_machine {move (tp), move (ml), move (*bmm)});
+ (dir_iter_retry
+ ? warn
+ : error) << "unable to iterate over " << vd << ": " << e;
+ if (dir_iter_retry)
break;
- } // Retry loop.
- } // Inner dir_iterator loop.
- }
- catch (const system_error& e)
- {
- fail << "unable to iterate over " << vd << ": " << e;
- }
- } // Outer dir_iterator loop.
+ else
+ throw failed ();
+ }
+ } // Outer dir_iterator loop.
+
+ if (dir_iter_retry)
+ continue; // Re-enumerate from scratch.
+ else
+ dir_iter_retries = 0; // Reset for re-enumeration due to other reasons.
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to iterate over " << machines << ": " << e;
+ }
// See if there is a pending bootstrap and whether we can perform it.
//
- // What should we do if we can't (i.e., we are in the priority minitor
+ // What should we do if we can't (i.e., we are in the priority monitor
// mode)? Well, we could have found some machines that are already
// bootstrapped (busy or not) and there may be a higher-priority task for
// one of them, so it feels natural to return whatever we've got.
@@ -1161,12 +1476,19 @@ try
// Determine how many machines are busy (locked by other processes) and
// make sure it's below the --instance-max limit, if specified.
//
+ // We should only count build machines unless being bootstrapped (see
+ // above).
+ //
if (inst_max != 0)
{
size_t busy (0);
for (const bootstrapped_machine& m: r)
- if (!m.lock.locked ())
+ {
+ if (!m.lock.locked () &&
+ (!m.lock.prio ||
+ m.manifest.machine.effective_role () != machine_role::auxiliary))
++busy;
+ }
assert (busy <= inst_max);
@@ -1196,8 +1518,12 @@ try
ml.bootstrap (tl);
tl.unlock ();
+ bool aux (pboot->mm.effective_role () == machine_role::auxiliary);
+
optional<bootstrapped_machine_manifest> bmm (
- bootstrap_machine (xp, pboot->mm, move (pboot->bmm)));
+ aux
+ ? bootstrap_auxiliary_machine (xp, pboot->mm, move (pboot->bmm))
+ : bootstrap_build_machine (xp, pboot->mm, move (pboot->bmm)));
if (!bmm)
{
@@ -1217,16 +1543,19 @@ try
l2 ([&]{trace << "bootstrapped " << bmm->machine.name;});
- // Check the bootstrapped bbot version as above and ignore this machine
- // if it's newer than us.
+ // Check the bootstrapped bbot version as above and ignore this build
+ // machine if it's newer than us.
//
- if (int i = compare_bbot (bmm->bootstrap))
+ if (!aux)
{
- if (i > 0)
- l3 ([&]{trace << "ignoring " << tp << ": old bbot";});
- else
- warn << "bootstrapped " << tp << " bbot worker is older "
- << "than agent; assuming test setup";
+ if (int i = compare_bbot (bmm->bootstrap))
+ {
+ if (i > 0)
+ l3 ([&]{trace << "ignoring " << tp << ": old bbot";});
+ else
+ warn << "bootstrapped " << tp << " bbot worker is older "
+ << "than agent; assuming test setup";
+ }
}
continue; // Re-enumerate from scratch.
@@ -1241,14 +1570,514 @@ try
// Unreachable.
}
+
+// Perform the build task throwing interrupt if it has been interrupted.
+//
+struct interrupt {};
+
+// Start an auxiliary machine (steps 1-3 described in perfrom_task() below).
+//
+// Note that if the returned machine is NULL, then it means it has failed to
+// start up (in which case the diagnostics has already been issued and
+// snapshot cleaned up).
+//
+// Note: can throw interrupt.
+//
+struct auxiliary_machine_result
+{
+ dir_path snapshot;
+ unique_ptr<bbot::machine> machine;
+};
+
+using auxiliary_machine_results = vector<auxiliary_machine_result>;
+
+static pair<auxiliary_machine_result, string /* environment */>
+start_auxiliary_machine (bootstrapped_machine& am,
+ const string& env_name,
+ uint16_t machine_num,
+ size_t ram,
+ const string& in_name, // <toolchain>-<instance>
+ const dir_path& tftp_put_dir,
+ optional<size_t> boost_cpus)
+try
+{
+ tracer trace ("start_auxiliary_machine", am.path.string ().c_str ());
+
+ // NOTE: a simplified version of perform_task() below.
+
+ machine_lock& ml (am.lock);
+ const dir_path& md (am.path);
+ const bootstrapped_machine_manifest& mm (am.manifest);
+
+ path ef (tftp_put_dir / "environment"); // Environment upload file.
+ path efm (ef + '-' + mm.machine.name); // Environment upload saved file.
+ try_rmfile (ef);
+ try_rmfile (efm);
+
+ // <name>-<toolchain>-<xxx>
+ //
+ const dir_path xp (snapshot_path (md));
+
+ for (size_t retry (0);; ++retry)
+ {
+ if (retry != 0)
+ run_btrfs (trace, "subvolume", "delete", xp);
+
+ run_btrfs (trace, "subvolume", "snapshot", md, xp);
+
+ // Start the TFTP server. Map:
+ //
+ // GET requests to /dev/null
+ // PUT requests to .../build/<toolchain>-<instance>/put/*
+ //
+ // Note that we only need to run the TFTP server until we get the
+ // environment upload. Which means we could have reused the same port as
+ // the build machine. But let's keep things parallel to the VNC ports and
+ // use a seperate TFTP port for each auxiliary machine.
+ //
+ tftp_server tftpd ("Gr ^/?(.+)$ " + string ("/dev/null") + '\n' +
+ "Pr ^/?(.+)$ /build/" + in_name + "/put/\\1\n",
+ ops.tftp_port () + offset + machine_num);
+
+ l3 ([&]{trace << "tftp server on port " << tftpd.port ();});
+
+ // Note: the machine handling logic is similar to bootstrap. Except here
+ // we have to cleanup the snapshot ourselves in case of suspension or
+ // unexpected exit.
+
+ // Start the machine.
+ //
+ // Note that for now we don't support logging in into auxiliary machines
+ // in the interactive mode. Maybe one day.
+ //
+ unique_ptr<machine> m (
+ start_machine (xp,
+ mm.machine,
+ machine_num,
+ boost_cpus ? *boost_cpus : ops.cpu (),
+ ram,
+ mm.machine.mac,
+ ops.bridge (),
+ tftpd.port (),
+ false /* public_vnc */));
+
+ auto mg (
+ make_exception_guard (
+ [&m, &xp] ()
+ {
+ if (m != nullptr)
+ {
+ info << "trying to force machine " << xp << " down";
+ try {m->forcedown (false);} catch (const failed&) {}
+ }
+ }));
+
+ auto soft_fail = [&trace, &ml, &xp, &m] (const char* msg)
+ {
+ {
+ diag_record dr (error);
+ dr << msg << " for machine " << xp << ", suspending";
+ m->print_info (dr);
+ }
+
+ try
+ {
+ // Update the information in the machine lock to signal that the
+ // machine is suspended and cannot be interrupted.
+ //
+ ml.suspend_task ();
+
+ m->suspend (false);
+ m->wait (false);
+ m->cleanup ();
+ run_btrfs (trace, "subvolume", "delete", xp);
+ info << "resuming after machine suspension";
+ }
+ catch (const failed&) {}
+
+ return make_pair (auxiliary_machine_result {move (xp), nullptr},
+ string ());
+ };
+
+ auto check_machine = [&xp, &m] ()
+ {
+ try
+ {
+ size_t t (0);
+ if (!m->wait (t /* seconds */, false /* fail_hard */))
+ return true;
+ }
+ catch (const failed&) {}
+
+ diag_record dr (warn);
+ dr << "machine " << xp << " exited unexpectedly";
+ m->print_info (dr);
+
+ return false;
+ };
+
+ auto check_interrupt = [&trace, &xp, &m] ()
+ {
+ if (sigurs1.load (std::memory_order_consume) == 0)
+ return;
+
+ l2 ([&]{trace << "machine " << xp << " interruped";});
+
+ try {m->forcedown (false);} catch (const failed&) {}
+ m->cleanup ();
+ m = nullptr; // Disable exceptions guard above.
+ run_btrfs (trace, "subvolume", "delete", xp);
+
+ throw interrupt ();
+ };
+
+ // Wait for up to 4 minutes (by default) for the environment upload (the
+ // same logic as in bootstrap_auxiliary_machine() except here the machine
+ // cannot just exit).
+ //
+ size_t to;
+ const size_t startup_to (ops.build_startup ());
+
+ for (to = startup_to; to != 0; )
+ {
+ check_interrupt ();
+
+ if (tftpd.serve (to, 2))
+ continue;
+
+ if (!check_machine ())
+ {
+ // An auxiliary machine should not just exit.
+ //
+ return make_pair (auxiliary_machine_result {move (xp), nullptr},
+ string ());
+ }
+
+ if (file_not_empty (ef))
+ {
+ if (!tftpd.serve (to, 5))
+ break;
+ }
+ }
+
+ if (to == 0)
+ {
+ if (retry > ops.build_retries ())
+ return soft_fail ("build startup timeout");
+
+ // Note: keeping the logs behind (no cleanup).
+
+ diag_record dr (warn);
+ dr << "machine " << mm.machine.name << " mis-booted, retrying";
+ m->print_info (dr);
+
+ try {m->forcedown (false);} catch (const failed&) {}
+ m = nullptr; // Disable exceptions guard above.
+ continue;
+ }
+
+ l3 ([&]{trace << "completed startup in " << startup_to - to << "s";});
+
+ // Read the uploaded environment and, if necessary, append the name prefix
+ // (which we first make a valid C identifier and uppercase).
+ //
+ // Note that it may seem like a good idea to validate the format here.
+ // But that means we will essentially need to parse it twice (here and in
+ // worker). Plus, in worker we can comminucate some diagnostics by writing
+ // it to the build log (here all we can easily do is abort the task). So
+ // here we just append the name prefix to trimmed non-blank/comment lines.
+ //
+ string env_pfx (env_name.empty ()
+ ? string ()
+ : ucase (sanitize_identifier (env_name)) + '_');
+ string env;
+ try
+ {
+ ifdstream is (ef, ifdstream::badbit);
+ for (string l; !eof (getline (is, l)); )
+ {
+ trim (l);
+
+ if (!env_pfx.empty () && !l.empty () && l.front () != '#')
+ l.insert (0, env_pfx);
+
+ env += l; env += '\n';
+ }
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << ef << ": " << e;
+ }
+
+ // Rename and keep the environment file for debugging (it will be removed
+ // at the end as part of the tftp_put_dir cleanup).
+ //
+ mvfile (ef, efm);
+
+ return make_pair (auxiliary_machine_result {move (xp), move (m)},
+ move (env));
+ }
+
+ // Unreachable.
+}
catch (const system_error& e)
{
- fail << "unable to iterate over " << machines << ": " << e << endf;
+ fail << "auxiliary machine startup error: " << e << endf;
}
-// Perform the build task throwing interrupt if it has been interrupted.
+// Divide the auxiliary RAM among the specified machines.
//
-struct interrupt {};
+// Issue diagnostics and return empty vector if the auxiliary RAM is
+// insufficient.
+//
+static vector<size_t> // Parallel to mms.
+divide_auxiliary_ram (const vector<const machine_header_manifest*>& mms)
+{
+ size_t ram (ops.auxiliary_ram ());
+
+ vector<size_t> rams;
+ vector<size_t> rnds; // Allocation rounds (see below).
+
+ // First pass: allocate the minimums.
+ //
+ for (const machine_header_manifest* mm: mms)
+ {
+ size_t v (effective_ram_minimum (*mm));
+
+ assert (!mm->ram_maximum || v <= *mm->ram_maximum); // Sanity check.
+
+ rams.push_back (v);
+ rnds.push_back (0);
+
+ if (ram >= v)
+ ram -= v;
+ else
+ {
+ diag_record dr (error);
+ dr << "insufficient auxiliary RAM " << ops.auxiliary_ram () << "KiB";
+
+ for (size_t i (0); i != rams.size (); ++i)
+ dr << info << mms[i]->name << " requires minimum " << rams[i] << "KiB";
+
+ return {};
+ }
+ }
+
+ // Second pass: distribute the remaining RAM.
+ //
+ // We are going to do it in the ram_minimum increments to avoid ending up
+ // with odd amounts (while Linux can probably grok anything, who knows about
+ // Windows).
+ //
+ // To make the distribution fair we are going to count how many times we
+ // have increased each machine's allocation (the rnds vector).
+ //
+ for (size_t a (1); ram != 0; ) // Allocation round.
+ {
+ // Find a machine that would be satisfied with the least amount of RAM but
+ // which hasn't yet been given anything on this allocation round.
+ //
+ size_t min_i; // Min index.
+ size_t min_v (0); // Min value.
+
+ // We are done if we couldn't give out any RAM and haven't seen any
+ // machines that have already been given something on this allocation
+ // round.
+ //
+ bool done (true);
+
+ for (size_t i (0); i != rams.size (); ++i)
+ {
+ if (rnds[i] != a)
+ {
+ const machine_header_manifest& mm (*mms[i]);
+
+ size_t o (rams[i]);
+ size_t v (effective_ram_minimum (mm));
+
+ // Don't allocate past maximum.
+ //
+ if (mm.ram_maximum && *mm.ram_maximum < o + v)
+ {
+ v = *mm.ram_maximum - o;
+
+ if (v == 0)
+ continue;
+ }
+
+ if (v <= ram && (min_v == 0 || min_v > v))
+ {
+ min_i = i;
+ min_v = v;
+ }
+ }
+ else
+ done = false;
+ }
+
+ if (min_v != 0)
+ {
+ rnds[min_i] = a;
+ rams[min_i] += min_v;
+ ram -= min_v;
+ }
+ else
+ {
+ if (done)
+ break;
+
+ ++a; // Next allocation round.
+ }
+ }
+
+ return rams;
+}
+
+// Stop all the auxiliary machines and clear the passed list.
+//
+static void
+stop_auxiliary_machines (auxiliary_machine_results& amrs)
+{
+ tracer trace ("stop_auxiliary_machines");
+
+ if (!amrs.empty ())
+ {
+ // Do it in two passes to make sure all the machines are at least down.
+ //
+ for (const auxiliary_machine_result& amr: amrs)
+ {
+ if (amr.machine != nullptr)
+ {
+ try {amr.machine->forcedown (false);} catch (const failed&) {}
+ }
+ }
+
+ // Make sure we don't retry the above even if the below fails.
+ //
+ auxiliary_machine_results tmp;
+ tmp.swap (amrs);
+
+ for (const auxiliary_machine_result& amr: tmp)
+ {
+ if (amr.machine != nullptr)
+ {
+ amr.machine->cleanup ();
+ run_btrfs (trace, "subvolume", "delete", amr.snapshot);
+ }
+ }
+ }
+}
+
+// Start all the auxiliary machines and patch in their combined environment
+// into tm.auxiliary_environment.
+//
+// Return the started machines or empty list if any of them failed to start up
+// (which means this function should only be called for non-empty ams).
+//
+// Note that the order of auxiliary machines in ams may not match that in
+// tm.auxiliary_machines.
+//
+static auxiliary_machine_results
+start_auxiliary_machines (const vector<bootstrapped_machine*>& ams,
+ task_manifest& tm,
+ const string& in_name, // <toolchain>-<instance>
+ const dir_path& tftp_put_dir,
+ optional<size_t> boost_cpus)
+{
+ tracer trace ("start_auxiliary_machines");
+
+ size_t n (tm.auxiliary_machines.size ());
+
+ assert (n != 0 && ams.size () == n);
+
+ auxiliary_machine_results amrs;
+
+ // Divide the auxiliary RAM among the machines.
+ //
+ vector<size_t> rams;
+ {
+ vector<const machine_header_manifest*> mms;
+ mms.reserve (n);
+ for (bootstrapped_machine* am: ams)
+ mms.push_back (&am->manifest.machine);
+
+ rams = divide_auxiliary_ram (mms);
+ if (rams.empty ())
+ return amrs;
+
+ if (verb > 3) // l3
+ for (size_t i (0); i != n; ++i)
+ trace << mms[i]->name << " allocated " << rams[i] << "KiB";
+ }
+
+ // Start the machines.
+ //
+ // Let's use the order in which they were specified in the task manifest
+ // (which will naturally be the order in which they are specified in the
+ // package manifest). This way amrs and tm.auxiliary_machines will be
+ // parallel.
+ //
+ string envs; // Combined environments.
+
+ auto amg (
+ make_exception_guard (
+ [&amrs] ()
+ {
+ if (!amrs.empty ())
+ {
+ info << "trying to force auxiliary machines down";
+ stop_auxiliary_machines (amrs);
+ }
+ }));
+
+ for (size_t i (0); i != n; ++i)
+ {
+ const auxiliary_machine& tam (tm.auxiliary_machines[i]);
+
+ auto b (ams.begin ()), e (ams.end ());
+ auto j (find_if (b, e,
+ [&tam] (const bootstrapped_machine* m)
+ {
+ return m->manifest.machine.name == tam.name;
+ }));
+ assert (j != e);
+
+ // Note: can throw interrupt.
+ //
+ pair<auxiliary_machine_result, string> p (
+ start_auxiliary_machine (**j,
+ tam.environment_name,
+ i + 1,
+ rams[j - b], // Parallel to ams.
+ in_name,
+ tftp_put_dir,
+ boost_cpus));
+
+ if (p.first.machine == nullptr)
+ {
+ if (!amrs.empty ())
+ {
+ info << "trying to force auxiliary machines down";
+ stop_auxiliary_machines (amrs); // amrs is now empty.
+ }
+
+ return amrs;
+ }
+
+ amrs.push_back (move (p.first));
+
+ // Add the machine name as a header before its environment.
+ //
+ if (i != 0) envs += '\n';
+ envs += "# "; envs += tam.name; envs += '\n';
+ envs += "#\n";
+ envs += p.second; // Always includes trailing newline.
+ }
+
+ tm.auxiliary_environment = move (envs);
+
+ return amrs;
+}
struct perform_task_result
{
@@ -1278,16 +2107,18 @@ struct perform_task_result
upload_archive (move (a)) {}
};
+// Note that the task manifest is not const since we may need to patch in the
+// auxiliary_environment value.
+//
static perform_task_result
perform_task (toolchain_lock tl, // Note: assumes ownership.
- machine_lock& ml,
- const dir_path& md,
- const bootstrapped_machine_manifest& mm,
- const task_manifest& tm,
+ bootstrapped_machine& bm, // Build machine.
+ const vector<bootstrapped_machine*>& ams, // Auxiliary machines.
+ task_manifest& tm,
optional<size_t> boost_cpus)
try
{
- tracer trace ("perform_task", md.string ().c_str ());
+ tracer trace ("perform_task", bm.path.string ().c_str ());
// Arm the interrupt handler and release the global toolchain lock.
//
@@ -1296,6 +2127,10 @@ try
sigurs1.store (0, std::memory_order_release);
tl.unlock ();
+ machine_lock& ml (bm.lock);
+ const dir_path& md (bm.path);
+ const bootstrapped_machine_manifest& mm (bm.manifest);
+
const string in_name (tc_name + '-' + to_string (inst));
auto_rmdir arm ((dir_path (ops.tftp ()) /= "build") /= in_name);
@@ -1314,7 +2149,7 @@ try
// The overall plan is as follows:
//
- // 1. Snapshot the (bootstrapped) machine.
+ // 1. Snapshot the (bootstrapped) build machine.
//
// 2. Save the task manifest to the TFTP directory (to be accessed by the
// worker).
@@ -1326,6 +2161,18 @@ try
//
// 5. Clean up (force the machine down and delete the snapshot).
//
+ // If the task requires any auxiliary machines, then for each such machine
+ // perform the following steps 1-3 before step 1 above, and step 4 after
+ // step 5 above (that is, start all the auxiliary machines before the build
+ // machine and clean them up after):
+ //
+ // 1. Snapshot the (bootstrapped) auxiliary machine.
+ //
+ // 2. Start the TFTP server and the machine.
+ //
+ // 3. Handle TFTP upload requests until received the environment upload.
+ //
+ // 4. Clean up (force the machine down and delete the snapshot).
// TFTP server mapping (server chroot is --tftp):
//
@@ -1342,11 +2189,12 @@ try
path rf (pd / "result.manifest.lz4"); // Result manifest file.
path af (pd / "upload.tar"); // Archive of build artifacts to upload.
- serialize_manifest (tm, tf, "task");
-
if (ops.fake_machine_specified ())
{
- // Note: not handling interrupts here.
+ // Note: not handling interrupts here. Nor starting any auxiliary
+ // machines, naturally.
+
+ serialize_manifest (tm, tf, "task");
// Simply wait for the file to appear.
//
@@ -1368,6 +2216,38 @@ try
}
else
{
+ // Start the auxiliary machines if any.
+ //
+ // If anything goes wrong, force them all down (failed that, the machine
+ // destructor will block waiting for their exit).
+ //
+ auxiliary_machine_results amrs;
+
+ auto amg (
+ make_exception_guard (
+ [&amrs] ()
+ {
+ if (!amrs.empty ())
+ {
+ info << "trying to force auxiliary machines down";
+ stop_auxiliary_machines (amrs);
+ }
+ }));
+
+ if (!ams.empty ())
+ {
+ amrs = start_auxiliary_machines (ams, tm, in_name, pd, boost_cpus);
+
+ if (amrs.empty ())
+ return perform_task_result (move (arm), move (r)); // Abort.
+ }
+
+ // Note: tm.auxiliary_environment patched in by start_auxiliary_machines().
+ //
+ serialize_manifest (tm, tf, "task");
+
+ // Start the build machine and perform the build.
+ //
try_rmfile (rf);
try_rmfile (af);
@@ -1386,7 +2266,7 @@ try
//
tftp_server tftpd ("Gr ^/?(.+)$ /build/" + in_name + "/get/\\1\n" +
"Pr ^/?(.+)$ /build/" + in_name + "/put/\\1\n",
- ops.tftp_port () + offset);
+ ops.tftp_port () + offset + 0 /* build machine */);
l3 ([&]{trace << "tftp server on port " << tftpd.port ();});
@@ -1394,17 +2274,21 @@ try
// we have to cleanup the snapshot ourselves in case of suspension or
// unexpected exit.
//
+ // NOTE: see similar code in start_auxiliary_machine() above.
+ //
{
// Start the machine.
//
unique_ptr<machine> m (
start_machine (xp,
mm.machine,
+ 0 /* machine_num (build) */,
+ boost_cpus ? *boost_cpus : ops.cpu (),
+ ops.build_ram (),
mm.machine.mac,
ops.bridge (),
tftpd.port (),
- tm.interactive.has_value (),
- boost_cpus));
+ tm.interactive.has_value () /* public_vnc */));
auto mg (
make_exception_guard (
@@ -1417,7 +2301,10 @@ try
}
}));
- auto soft_fail = [&trace, &ml, &xp, &m, &arm, &r] (const char* msg)
+ auto soft_fail = [&trace,
+ &amrs,
+ &ml, &xp, &m,
+ &arm, &r] (const char* msg)
{
{
diag_record dr (error);
@@ -1425,6 +2312,18 @@ try
m->print_info (dr);
}
+ // What should we do about auxiliary machines? We could force them
+ // all down before suspending (and thus freeing them for use). That
+ // is the easy option. We could suspend them as well, but that feels
+ // like it will be a pain (will need to resume all of them when done
+ // investigating). Theoretically we could just let them run, but
+ // that won't play well with our interrupt logic since someone may
+ // attempt to interrupt us via one of them. So let's do easy for
+ // now.
+ //
+ // Note: always stop/suspend the build machine before the auxiliary
+ // machines to avoid any errors due the auxiliary machines being
+ // unavailable.
try
{
// Update the information in the machine lock to signal that the
@@ -1433,8 +2332,10 @@ try
ml.suspend_task ();
m->suspend (false);
+ stop_auxiliary_machines (amrs);
m->wait (false);
m->cleanup ();
+ m = nullptr; // Disable exceptions guard above.
run_btrfs (trace, "subvolume", "delete", xp);
info << "resuming after machine suspension";
}
@@ -1460,7 +2361,29 @@ try
return false;
};
- auto check_interrupt = [&trace, &xp, &m] ()
+ auto check_auxiliary_machines = [&amrs] ()
+ {
+ for (auxiliary_machine_result& amr: amrs)
+ {
+ try
+ {
+ size_t t (0);
+ if (!amr.machine->wait (t /* seconds */, false /* fail_hard */))
+ continue;
+ }
+ catch (const failed&) {}
+
+ diag_record dr (warn);
+ dr << "machine " << amr.snapshot << " exited unexpectedly";
+ amr.machine->print_info (dr);
+
+ return false;
+ }
+
+ return true;
+ };
+
+ auto check_interrupt = [&trace, &amrs, &xp, &m] ()
{
if (sigurs1.load (std::memory_order_consume) == 0)
return;
@@ -1468,6 +2391,7 @@ try
l2 ([&]{trace << "machine " << xp << " interruped";});
try {m->forcedown (false);} catch (const failed&) {}
+ stop_auxiliary_machines (amrs);
m->cleanup ();
m = nullptr; // Disable exceptions guard above.
run_btrfs (trace, "subvolume", "delete", xp);
@@ -1497,8 +2421,13 @@ try
if (tftpd.serve (to, 2))
break;
- if (!check_machine ())
+ bool bm; // Build machine still running.
+ if (!(bm = check_machine ()) || !check_auxiliary_machines ())
{
+ if (bm)
+ try {m->forcedown (false);} catch (const failed&) {}
+ stop_auxiliary_machines (amrs);
+ m = nullptr; // Disable exceptions guard above.
run_btrfs (trace, "subvolume", "delete", xp);
return perform_task_result (move (arm), move (r));
}
@@ -1516,6 +2445,7 @@ try
m->print_info (dr);
try {m->forcedown (false);} catch (const failed&) {}
+ m = nullptr; // Disable exceptions guard above.
continue;
}
@@ -1535,10 +2465,15 @@ try
if (tftpd.serve (to, 2))
continue;
- if (!check_machine ())
+ bool bm; // Build machine still running.
+ if (!(bm = check_machine ()) || !check_auxiliary_machines ())
{
- if (!file_not_empty (rf))
+ if (bm || !file_not_empty (rf))
{
+ if (bm)
+ try {m->forcedown (false);} catch (const failed&) {}
+ stop_auxiliary_machines (amrs);
+ m = nullptr; // Disable exceptions guard above.
run_btrfs (trace, "subvolume", "delete", xp);
return perform_task_result (move (arm), move (r));
}
@@ -1594,7 +2529,9 @@ try
// lease instead of a new one.
//
try {m->forcedown (false);} catch (const failed&) {}
+ stop_auxiliary_machines (amrs);
m->cleanup ();
+ m = nullptr; // Disable exceptions guard above.
}
}
@@ -1633,6 +2570,21 @@ try
verb = ops.verbose ();
+#if 0
+ // ./bbot-agent --auxiliary-ram 4194304
+ //
+ machine_header_manifest m1 {
+ "m1", "m1", "m1", machine_role::auxiliary, 512*1024, nullopt};
+ machine_header_manifest m2 {
+ "m2", "m2", "m2", machine_role::auxiliary, 1024*1024, 3*512*1024};
+ vector<const machine_header_manifest*> mms {&m1, &m2};
+ vector<size_t> rams (divide_auxiliary_ram (mms));
+ for (size_t i (0); i != rams.size (); ++i)
+ text << mms[i]->name << ' ' << rams[i] / 1024;
+
+ return 0;
+#endif
+
// @@ systemd 231 added JOURNAL_STREAM environment variable which allows
// detecting if stderr is connected to the journal.
//
@@ -1755,17 +2707,20 @@ try
: standard_version (BBOT_VERSION_STR));
tc_id = ops.toolchain_id ();
- if (tc_num == 0 || tc_num > 99)
- fail << "invalid --toolchain-num value " << tc_num;
+ if (tc_num == 0 || tc_num > 9)
+ fail << "--toolchain-num value " << tc_num << " out of range";
inst = ops.instance ();
if (inst == 0 || inst > 99)
- fail << "invalid --instance value " << inst;
+ fail << "--instance value " << inst << " out of range";
inst_max = ops.instance_max ();
- offset = (tc_num - 1) * 100 + inst;
+ // The last decimal position is used for machine number, 0 for the build
+ // machine, non-0 for auxiliary machines (of which we can have maximum 9).
+ //
+ offset = (tc_num - 1) * 1000 + inst * 10;
// Controller priority to URLs map.
//
@@ -1863,7 +2818,8 @@ try
dr <<
info << "cpu(s) " << ops.cpu () <<
- info << "ram(kB) " << ops.ram () <<
+ info << "build ram(KiB) " << ops.build_ram () <<
+ info << "auxil ram(KiB) " << ops.auxiliary_ram () <<
info << "bridge " << ops.bridge ();
if (fingerprint)
@@ -1916,7 +2872,7 @@ try
if (ops.interactive () != interactive_mode::false_)
{
imode = ops.interactive ();
- ilogin = machine_vnc (true /* public */);
+ ilogin = machine_vnc (0 /* machine_num */, true /* public */);
}
// Use the pkeyutl openssl command for signing the task response challenge
@@ -1958,25 +2914,29 @@ try
uint64_t prio_max (0);
bool prio_mon (false);
{
- uint16_t busy (0); // Number of machines locked by other processes.
- bool task (false); // There is a machine performing a task.
+ uint16_t busy (0); // Number of build machines locked by other processes.
+ bool task (false); // There is a build machine performing a task.
for (const bootstrapped_machine& m: ms)
{
if (!m.lock.locked ())
{
- ++busy;
-
if (m.lock.prio) // Not bootstrapping/suspended.
{
- task = true;
+ if (m.manifest.machine.effective_role () != machine_role::auxiliary)
+ {
+ ++busy;
+ task = true;
- if (prio_min > *m.lock.prio)
- prio_min = *m.lock.prio;
+ if (prio_min > *m.lock.prio)
+ prio_min = *m.lock.prio;
- if (prio_max < *m.lock.prio)
- prio_max = *m.lock.prio;
+ if (prio_max < *m.lock.prio)
+ prio_max = *m.lock.prio;
+ }
}
+ else
+ ++busy; // Assume build machine (see enumerate_machines()).
}
}
@@ -2066,14 +3026,19 @@ try
imode,
ilogin,
fingerprint,
+ ops.auxiliary_ram (),
machine_header_manifests {}};
// Determine which machines we need to offer for this priority.
//
+ bool aux_only (true); // Only auxiliary machines are available.
{
- bool interruptable (false);
+ bool interruptable (false); // There is build machine we can interrupt.
for (const bootstrapped_machine& m: ms)
{
+ const machine_manifest& mm (m.manifest.machine);
+ machine_role role (mm.effective_role ());
+
if (!m.lock.locked ())
{
if (!m.lock.prio) // Skip bootstrapping/suspended.
@@ -2091,12 +3056,18 @@ try
if ((prio / 100) <= (eprio / 100))
continue;
- interruptable = true;
+ if (role != machine_role::auxiliary)
+ interruptable = true;
}
- tq.machines.emplace_back (m.manifest.machine.id,
- m.manifest.machine.name,
- m.manifest.machine.summary);
+ tq.machines.emplace_back (mm.id,
+ mm.name,
+ mm.summary,
+ role,
+ effective_ram_minimum (mm),
+ mm.ram_maximum);
+
+ aux_only = aux_only && role == machine_role::auxiliary;
}
// Sanity check: in the priority monitor mode we should only ask for a
@@ -2114,10 +3085,13 @@ try
return 0;
}
+ if (aux_only)
+ tq.machines.clear ();
+
if (tq.machines.empty ())
{
- // If we have no machines for this priority then we won't have any
- // for any lower priority so bail out.
+ // If we have no build machines for this priority then we won't have
+ // any for any lower priority so bail out.
//
break;
}
@@ -2125,7 +3099,7 @@ try
// Send task requests.
//
// Note that we have to do it while holding the lock on all the machines
- // since we don't know which machine we will need.
+ // since we don't know which machine(s) we will need.
//
vector<strings::const_iterator> rurls (urls.size ());
std::iota (rurls.begin (), rurls.end (), urls.begin ());
@@ -2166,9 +3140,8 @@ try
"--max-time", ops.request_timeout (),
"--connect-timeout", ops.connect_timeout ());
- // This is tricky/hairy: we may fail hard parsing the output
- // before seeing that curl exited with an error and failing
- // softly.
+ // This is tricky/hairy: we may fail hard parsing the output before
+ // seeing that curl exited with an error and failing softly.
//
bool f (false);
@@ -2251,7 +3224,7 @@ try
} // prio loop.
- if (tq.machines.empty ()) // No machines.
+ if (tq.machines.empty ()) // No machines (auxiliary-only already handled).
{
// Normally this means all the machines are busy so sleep a bit less.
//
@@ -2271,15 +3244,67 @@ try
//
task_manifest& t (*tr.task);
- // First verify the requested machine is one of those we sent in tq.
+ // First verify the requested machines are from those we sent in tq and
+ // their roles match.
+ //
+ // Also verify the same machine is not picked multiple times by blanking
+ // out the corresponding entry in tq.machines. (Currently we are only
+ // capable of running one instance of each machine though we may want to
+ // relax that in the future, at which point we should send as many entries
+ // for the same machine in the task request as we are capable of running,
+ // applying the priority logic for each entry, etc).
+ //
+ {
+ auto check = [&tq, &url] (const string& name, machine_role r)
+ {
+ auto i (find_if (tq.machines.begin (), tq.machines.end (),
+ [&name] (const machine_header_manifest& mh)
+ {
+ return mh.name == name; // Yes, names, not ids.
+ }));
+
+ if (i == tq.machines.end ())
+ {
+ error << "task from " << url << " for unknown machine " << name;
+ return false;
+ }
+
+ if (i->effective_role () != r)
+ {
+ error << "task from " << url << " with mismatched role "
+ << " for machine " << name;
+ return false;
+ }
+
+ i->name.clear (); // Blank out.
+
+ return true;
+ };
+
+ auto check_aux = [&check] (const vector<auxiliary_machine>& ams)
+ {
+ for (const auxiliary_machine& am: ams)
+ if (!check (am.name, machine_role::auxiliary))
+ return false;
+ return true;
+ };
+
+ if (!check (t.machine, machine_role::build) ||
+ !check_aux (t.auxiliary_machines))
+ {
+ if (ops.dump_task ())
+ return 0;
+
+ continue;
+ }
+ }
+
+ // Also verify there are no more than 9 auxiliary machines (see the offset
+ // global variable for details).
//
- if (find_if (tq.machines.begin (), tq.machines.end (),
- [&t] (const machine_header_manifest& mh)
- {
- return mh.name == t.machine; // Yes, names, not ids.
- }) == tq.machines.end ())
+ if (t.auxiliary_machines.size () > 9)
{
- error << "task from " << url << " for unknown machine " << t.machine;
+ error << "task from " << url << " with more than 9 auxiliary machines";
if (ops.dump_task ())
return 0;
@@ -2317,21 +3342,42 @@ try
// feels like the most sensible option).
//
perform_task_result r;
- bootstrapped_machine* pm (nullptr);
+ bootstrapped_machine* pm (nullptr); // Build machine.
+ vector<bootstrapped_machine*> ams; // Auxiliary machines.
try
{
- // Next find the corresponding bootstrapped_machine instance in ms. Also
- // unlock all the other machines.
+ // First find the bootstrapped_machine instance in ms corresponding to
+ // the requested build machine. Also unlock all the other machines.
//
// While at it also see if we need to interrupt the selected machine (if
// busy), one of the existing (if we are at the max allowed instances,
// that is in the priority monitor mode), or all existing (if this is a
// priority level 4 task).
//
- vector<bootstrapped_machine*> ims;
+ // Auxiliary machines complicate the matter a bit: we may now need to
+ // interrupt some subset of {build machine, auxiliary machines} that are
+ // necessary to perform this task. Note, however, that auxiliary
+ // machines are always subordinate to build machines, meaning that if
+ // there is a busy auxiliary machine, then there will be a busy build
+ // machine with the same pid/priority (and so if we interrup one
+ // auxiliary, then we will also interrupt the corresponding build plus
+ // any other auxiliaries it may be using). Based on that let's try to
+ // divide and conquer this by first dealing with build machines and then
+ // adding any auxiliary ones.
+ //
+ vector<bootstrapped_machine*> ims; // Machines to be interrupted.
+ size_t imt (0); // Number of "target" machines to interrupt (see below).
+
+ // First pass: build machines.
+ //
for (bootstrapped_machine& m: ms)
{
- if (m.manifest.machine.name == t.machine)
+ const machine_manifest& mm (m.manifest.machine);
+
+ if (mm.effective_role () == machine_role::auxiliary)
+ continue;
+
+ if (mm.name == t.machine)
{
assert (pm == nullptr); // Sanity check.
pm = &m;
@@ -2359,16 +3405,71 @@ try
}
}
- assert (pm != nullptr);
+ assert (pm != nullptr); // Sanity check.
if (!pm->lock.locked ())
{
+ assert (pm->lock.prio); // Sanity check (not bootstrapping/suspended).
+
if (prio >= 1000)
ims.insert (ims.begin (), pm); // Interrupt first (see below).
else
ims = {pm};
+
+ imt++;
}
+ // Second pass: auxiliary machines.
+ //
+ for (bootstrapped_machine& m: ms)
+ {
+ const machine_manifest& mm (m.manifest.machine);
+
+ if (mm.effective_role () != machine_role::auxiliary)
+ continue;
+
+ if (find_if (t.auxiliary_machines.begin (), t.auxiliary_machines.end (),
+ [&mm] (const auxiliary_machine& am)
+ {
+ return am.name == mm.name;
+ }) != t.auxiliary_machines.end ())
+ {
+ if (!m.lock.locked ())
+ {
+ assert (m.lock.prio); // Sanity check (not bootstrapping/suspended).
+
+ if (ims.empty ())
+ {
+ ims.push_back (&m);
+ }
+ else if (ims.front () == pm)
+ {
+ ims.insert (ims.begin () + 1, &m); // Interrupt early (see below).
+ }
+ else if (prio < 1000 && prio_mon && ams.empty () /* first */)
+ {
+ // Tricky: replace the lowest priority task we have picked on
+ // the first pass with this one.
+ //
+ assert (ims.size () == 1); // Sanity check.
+ ims.back () = &m;
+ }
+ else
+ ims.insert (ims.begin (), &m); // Interrupt first (see below).
+
+ imt++;
+ }
+
+ ams.push_back (&m);
+ }
+ else if (m.lock.locked ())
+ m.lock.unlock ();
+ }
+
+ // Note: the order of machines may not match.
+ //
+ assert (ams.size () == t.auxiliary_machines.size ()); // Sanity check.
+
assert (!prio_mon || !ims.empty ()); // We should have at least one.
// Move the toolchain lock into this scope so that it's automatically
@@ -2381,23 +3482,26 @@ try
// Interrupt the machines, if necessary.
//
// Note that if we are interrupting multiple machines, then the target
- // machine, if needs to be interrupted, must be first. This way if we
- // are unable to successfully interrupt it, we don't interrupt the rest.
+ // build machine, if needs to be interrupted, must be first, followed
+ // but all the target auxiliary machines. This way if we are unable to
+ // successfully interrupt them, we don't interrupt the rest.
//
- for (bootstrapped_machine* im: ims)
+ vector<pid_t> pids; // Avoid re-interrupting the same pid.
+ for (size_t i (0); i != ims.size (); ++i)
{
- bool first (im == ims.front ());
+ bootstrapped_machine* im (ims[i]);
// Sanity checks.
//
assert (!im->lock.locked () && im->lock.prio);
- assert (im != pm || first);
+ assert (im != pm || i == 0);
const dir_path& tp (im->path); // -<toolchain> path.
+ pid_t pid (im->lock.pid);
l2 ([&]{trace << "interrupting "
- << (im == pm ? "target" : "lower priority")
- << " machine " << tp << ", pid " << im->lock.pid;});
+ << (i < imt ? "target" : "lower priority")
+ << " machine " << tp << ", pid " << pid;});
// The plan is to send the interrupt and then wait for the lock.
//
@@ -2409,21 +3513,26 @@ try
// But what can happen is the other task becomes suspended, which we
// will not be able to interrupt.
//
- if (kill (im->lock.pid, SIGUSR1) == -1)
+ if (find (pids.begin (), pids.end (), pid) == pids.end ())
{
- // Ignore the case where there is no such process (e.g., the other
- // process has terminated in which case the lock should be released
- // automatically).
- //
- if (errno != ESRCH)
- throw_generic_error (errno);
+ if (kill (pid, SIGUSR1) == -1)
+ {
+ // Ignore the case where there is no such process (e.g., the other
+ // process has terminated in which case the lock should be
+ // released automatically).
+ //
+ if (errno != ESRCH)
+ throw_generic_error (errno);
+ }
+
+ pids.push_back (pid);
}
- // If we are interrupting multiple machine, there is no use acquiring
- // the lock (or failing if unable to) for subsequent machines since
- // this is merely a performance optimization.
+ // If we are interrupting additional machine in order to free up
+ // resources, there is no use acquiring their lock (or failing if
+ // unable to) since this is merely a performance optimization.
//
- if (!first)
+ if (i >= imt)
continue;
// Try to lock the machine.
@@ -2445,7 +3554,7 @@ try
if (ml.locked ())
break;
- if (ml.pid != im->lock.pid)
+ if (ml.pid != pid)
{
error << "interrupted machine " << tp << " changed pid";
throw interrupt ();
@@ -2465,26 +3574,27 @@ try
throw interrupt ();
}
- // If the interrupted machine is what we will use, see if it needs a
- // re-bootstrap, the same as in enumerate_machines(). If not, then
- // transfer the bootstrap manifest and lock.
+ // This is an interrupted machine (build or auxiliary) that we will be
+ // using. See if it needs a re-bootstrap, the same as in
+ // enumerate_machines(). If not, then transfer the bootstrap manifest
+ // and lock.
//
- if (im == pm)
- {
- const machine_manifest& mm (im->manifest.machine);
+ const machine_manifest& mm (im->manifest.machine);
- bootstrapped_machine_manifest bmm (
- parse_manifest<bootstrapped_machine_manifest> (
- tp / "manifest", "bootstrapped machine"));
+ bootstrapped_machine_manifest bmm (
+ parse_manifest<bootstrapped_machine_manifest> (
+ tp / "manifest", "bootstrapped machine"));
- bool rb (false);
+ bool rb (false);
- if (bmm.machine.id != mm.id)
- {
- l3 ([&]{trace << "re-bootstrap " << tp << ": new machine";});
- rb = true;
- }
+ if (bmm.machine.id != mm.id)
+ {
+ l3 ([&]{trace << "re-bootstrap " << tp << ": new machine";});
+ rb = true;
+ }
+ if (im == pm) // Only for build machine.
+ {
if (!tc_id.empty () && bmm.toolchain.id != tc_id)
{
l3 ([&]{trace << "re-bootstrap " << tp << ": new toolchain";});
@@ -2504,15 +3614,15 @@ try
rb = true;
}
}
+ }
- // We are not going to try to re-bootstrap this machine "inline".
- //
- if (rb)
- throw interrupt ();
+ // We are not going to try to re-bootstrap this machine "inline".
+ //
+ if (rb)
+ throw interrupt ();
- im->manifest = move (bmm);
- im->lock = move (ml);
- }
+ im->manifest = move (bmm);
+ im->lock = move (ml);
}
// Check if we need to boost the number of CPUs to the full hardware
@@ -2522,8 +3632,11 @@ try
if (prio >= 10000)
bcpus = std::thread::hardware_concurrency ();
- pm->lock.perform_task (tl, prio);
- r = perform_task (move (tl), pm->lock, pm->path, pm->manifest, t, bcpus);
+ pm->lock.perform_task (tl, prio); // Build machine.
+ for (bootstrapped_machine* am: ams) // Auxiliary machines.
+ am->lock.perform_task (tl, prio);
+
+ r = perform_task (move (tl), *pm, ams, t, bcpus);
}
catch (const interrupt&)
{
@@ -2540,8 +3653,14 @@ try
nullopt /* dependency_checksum */});
}
+ // No need to hold the locks any longer.
+ //
if (pm != nullptr && pm->lock.locked ())
- pm->lock.unlock (); // No need to hold the lock any longer.
+ pm->lock.unlock ();
+
+ for (bootstrapped_machine* am: ams)
+ if (am->lock.locked ())
+ am->lock.unlock ();
result_manifest& rm (r.manifest);
diff --git a/bbot/agent/agent.hxx b/bbot/agent/agent.hxx
index 72b819b..9c8400f 100644
--- a/bbot/agent/agent.hxx
+++ b/bbot/agent/agent.hxx
@@ -22,14 +22,14 @@ namespace bbot
extern standard_version tc_ver; // Toolchain version.
extern string tc_id; // Toolchain id.
- extern uint16_t inst; // Instance number.
+ extern uint16_t inst; // Instance number (1-based).
extern string hname; // Our host name.
extern string hip; // Our IP address.
extern uid_t uid; // Our effective user id.
extern string uname; // Our effective user name.
- extern uint16_t offset; // Agent offset.
+ extern uint16_t offset; // Agent offset (10-9990; used for ports).
// Random number generator (currently not MT-safe and limited to RAND_MAX).
//
diff --git a/bbot/agent/machine.cxx b/bbot/agent/machine.cxx
index 84916f6..74c9b93 100644
--- a/bbot/agent/machine.cxx
+++ b/bbot/agent/machine.cxx
@@ -83,9 +83,9 @@ namespace bbot
}
static string
- create_tap (const string& br, uint16_t port)
+ create_tap (const string& br, uint16_t machine_num, uint16_t port)
{
- string t ("tap" + to_string (offset));
+ string t ("tap" + to_string (offset + machine_num));
tracer trace ("create_tap", t.c_str ());
@@ -126,8 +126,10 @@ namespace bbot
string bridge; // Bridge interface to which this tap belongs
uint16_t port; // UDP port to forward TFTP traffic to.
- tap (string b, uint16_t p)
- : iface (create_tap (b, p)), bridge (move (b)), port (p) {}
+ tap (string b, uint16_t machine_num, uint16_t p)
+ : iface (create_tap (b, machine_num, p)),
+ bridge (move (b)),
+ port (p) {}
~tap ()
{
@@ -169,11 +171,13 @@ namespace bbot
public:
kvm_machine (const dir_path&,
const machine_manifest&,
+ uint16_t machine_num,
+ size_t cpus,
+ size_t ram,
const optional<string>& mac,
const string& br_iface,
uint16_t tftp_port,
- bool pub_vnc,
- optional<size_t> boost_cpus = nullopt);
+ bool pub_vnc);
virtual bool
shutdown (size_t& seconds) override;
@@ -214,31 +218,47 @@ namespace bbot
kvm_machine::
kvm_machine (const dir_path& md,
const machine_manifest& mm,
+ uint16_t m_num,
+ size_t cpus,
+ size_t ram,
const optional<string>& omac,
const string& br,
- uint16_t port,
- bool pub_vnc,
- optional<size_t> bcpus)
+ uint16_t tftp_port,
+ bool pub_vnc)
: machine (mm.mac ? *mm.mac : // Fixed mac from machine manifest.
omac ? *omac : // Generated mac from previous bootstrap.
generate_mac ()),
kvm ("kvm"),
- net (br, port),
- vnc (machine_vnc (pub_vnc)),
+ net (br, m_num, tftp_port),
+ vnc (machine_vnc (m_num, pub_vnc)),
monitor ("/tmp/monitor-" + tc_name + '-' + to_string (inst))
{
tracer trace ("kvm_machine", md.string ().c_str ());
+ // Monitor path.
+ //
+ if (m_num != 0)
+ {
+ monitor += '-';
+ monitor += to_string (m_num);
+ }
+
if (sizeof (sockaddr_un::sun_path) <= monitor.size ())
throw invalid_argument ("monitor unix socket path too long");
// Machine name.
//
// While we currently can only have one running machine per toolchain, add
- // the instance number for debuggability.
+ // the instance number and non-0 machine number for debuggability.
//
string name (mm.name + '-' + tc_name + '-' + to_string (inst));
+ if (m_num != 0)
+ {
+ name += '-';
+ name += to_string (m_num);
+ }
+
// Machine log. Note that it is only removed with an explicit cleanup()
// call.
//
@@ -252,7 +272,7 @@ namespace bbot
// Note that for best results you may want to adjust (e.g., by over-
// committing) the number of CPUs to be power of 2.
//
- size_t cpus (bcpus ? *bcpus : ops.cpu ()), cores (cpus);
+ size_t cores (cpus);
size_t sockets (cores >= 256 && cores % 8 == 0 ? 4 :
cores >= 128 && cores % 4 == 0 ? 2 : 1);
@@ -261,26 +281,6 @@ namespace bbot
size_t threads (cores >= 16 && cores % 4 == 0 ? 2 : 1);
cores /= threads;
- // We probably don't want to commit all the available RAM to the VM since
- // some of it could be used on the host side for caching, etc. So the
- // heuristics that we will use is 4G or 1G per CPU, whichever is greater
- // and the rest divide equally between the host and the VM.
- //
- // But the experience showed that we actually want to be able to precisely
- // control the amount of RAM assigned to VMs (e.g., for tmpfs size) and
- // without back-fudging for this heuristics.
- //
-#if 0
- size_t ram ((cpus < 4 ? 4 : cpus) * 1024 * 1024); // Kb.
-
- if (ram > ops.ram ())
- ram = ops.ram ();
- else
- ram += (ops.ram () - ram) / 2;
-#else
- size_t ram (ops.ram ());
-#endif
-
// If we have options, use that instead of the default network and
// disk configuration.
//
@@ -434,7 +434,7 @@ namespace bbot
// collision-wise with anything useful.
//
"-vnc",
- (pub_vnc ? ":" : "127.0.0.1:") + to_string (offset), // 5900 + offset
+ (pub_vnc ? ":" : "127.0.0.1:") + to_string (offset + m_num), // 5900-base
// QMP.
//
@@ -672,31 +672,37 @@ namespace bbot
unique_ptr<machine>
start_machine (const dir_path& md,
const machine_manifest& mm,
+ uint16_t machine_num,
+ size_t cpus,
+ size_t ram,
const optional<string>& mac,
const string& br_iface,
uint16_t tftp_port,
- bool pub_vnc,
- optional<size_t> bcpus)
+ bool pub_vnc)
{
+ assert (machine_num < 10);
+
switch (mm.type)
{
case machine_type::kvm:
return make_unique<kvm_machine> (
- md, mm, mac, br_iface, tftp_port, pub_vnc, bcpus);
+ md, mm, machine_num, cpus, ram, mac, br_iface, tftp_port, pub_vnc);
case machine_type::nspawn:
- assert (false); //@@ TODO
+ assert (false); // @@ TODO
}
return nullptr;
}
string
- machine_vnc (bool pub)
+ machine_vnc (uint16_t num, bool pub)
{
+ assert (num < 10);
+
string r (pub ? hip : "127.0.0.1");
r += ':';
- r += to_string (5900 + offset);
+ r += to_string (5900 + offset + num);
return r;
}
}
diff --git a/bbot/agent/machine.hxx b/bbot/agent/machine.hxx
index 0bb74b9..13646db 100644
--- a/bbot/agent/machine.hxx
+++ b/bbot/agent/machine.hxx
@@ -78,20 +78,28 @@ namespace bbot
class machine_manifest;
+ // The machine number should be between 0-9 with 0 for the build machine and
+ // 1-9 for the auxiliary machines.
+ //
+ // Note that tftp_port is not a base (in other words, it is expected to
+ // already be appropriately offset).
+ //
unique_ptr<machine>
start_machine (const dir_path&,
const machine_manifest&,
+ uint16_t machine_num,
+ size_t cpus,
+ size_t ram, // In KiB.
const optional<string>& mac,
const string& br_iface,
uint16_t tftp_port,
- bool pub_vnc,
- optional<size_t> boost_cpus = nullopt);
+ bool public_vnc);
// Return the machine's public or private VNC session endpoint in the
// '<ip>:<port>' form.
//
string
- machine_vnc (bool pub_vnc);
+ machine_vnc (uint16_t machine_num, bool public_vnc);
}
#endif // BBOT_AGENT_MACHINE_HXX
diff --git a/bbot/agent/tftp.hxx b/bbot/agent/tftp.hxx
index 103c9d6..5306dd1 100644
--- a/bbot/agent/tftp.hxx
+++ b/bbot/agent/tftp.hxx
@@ -29,7 +29,7 @@ namespace bbot
port () const;
// Wait for a TFTP request for up to the specified number of seconds. If
- // increment is not 0, then wait in the specified incremenets (i.e., wait
+ // increment is not 0, then wait in the specified increments (i.e., wait
// for up to that number of seconds; useful when one needs to also
// periodically check for something else). Update the timeout value as
// well as return true if a request was served and false otherwise.
diff --git a/bbot/bbot-agent@.service b/bbot/bbot-agent@.service
index 18b7c9e..253cc61 100644
--- a/bbot/bbot-agent@.service
+++ b/bbot/bbot-agent@.service
@@ -14,7 +14,8 @@ Type=simple
Environment=VERBOSE=3
Environment=CPU=1
-Environment=RAM=1048576
+Environment=RAM_BUILD=4194304
+Environment=RAM_AUXIL=0
Environment=BRIDGE=br1
Environment=AUTH_KEY=
@@ -22,6 +23,7 @@ Environment=AUTH_KEY=
Environment=INTERACTIVE=false
Environment=BOOTSTRAP_TIMEOUT=3600
+Environment=BOOTSTRAP_AUXILIARY=900
Environment=BOOTSTRAP_RETRIES=2
Environment=BUILD_TIMEOUT=5400
@@ -49,11 +51,13 @@ ExecStart=/build/bots/default/bin/bbot-agent \
--systemd-daemon \
--verbose ${VERBOSE} \
--cpu ${CPU} \
- --ram ${RAM} \
+ --build-ram ${RAM_BUILD} \
+ --auxiliary-ram ${RAM_AUXIL} \
--bridge ${BRIDGE} \
--auth-key ${AUTH_KEY} \
--interactive ${INTERACTIVE} \
--bootstrap-timeout ${BOOTSTRAP_TIMEOUT} \
+ --bootstrap-auxiliary ${BOOTSTRAP_AUXILIARY} \
--bootstrap-retries ${BOOTSTRAP_RETRIES} \
--build-timeout ${BUILD_TIMEOUT} \
--build-retries ${BUILD_RETRIES} \
diff --git a/bbot/buildfile b/bbot/buildfile
index cb7b576..bbca810 100644
--- a/bbot/buildfile
+++ b/bbot/buildfile
@@ -99,7 +99,7 @@ if $cli.configured
# Usage options.
#
cli.options += --suppress-undocumented --long-usage --ansi-color \
---page-usage 'bbot::print_$name$_' --option-length 23
+--page-usage 'bbot::print_$name$_' --option-length 25
agent/cli.cxx{agent-options}: cli.options += --include-prefix bbot/agent \
--guard-prefix BBOT_AGENT
diff --git a/bbot/common-options.cxx b/bbot/common-options.cxx
new file mode 100644
index 0000000..7f351e9
--- /dev/null
+++ b/bbot/common-options.cxx
@@ -0,0 +1,740 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+#include <bbot/types-parsers.hxx>
+//
+// End prologue.
+
+#include <bbot/common-options.hxx>
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+#include <utility>
+#include <ostream>
+#include <sstream>
+#include <cstring>
+
+namespace bbot
+{
+ namespace cli
+ {
+ // unknown_option
+ //
+ unknown_option::
+ ~unknown_option () noexcept
+ {
+ }
+
+ void unknown_option::
+ print (::std::ostream& os) const
+ {
+ os << "unknown option '" << option ().c_str () << "'";
+ }
+
+ const char* unknown_option::
+ what () const noexcept
+ {
+ return "unknown option";
+ }
+
+ // unknown_argument
+ //
+ unknown_argument::
+ ~unknown_argument () noexcept
+ {
+ }
+
+ void unknown_argument::
+ print (::std::ostream& os) const
+ {
+ os << "unknown argument '" << argument ().c_str () << "'";
+ }
+
+ const char* unknown_argument::
+ what () const noexcept
+ {
+ return "unknown argument";
+ }
+
+ // missing_value
+ //
+ missing_value::
+ ~missing_value () noexcept
+ {
+ }
+
+ void missing_value::
+ print (::std::ostream& os) const
+ {
+ os << "missing value for option '" << option ().c_str () << "'";
+ }
+
+ const char* missing_value::
+ what () const noexcept
+ {
+ return "missing option value";
+ }
+
+ // invalid_value
+ //
+ invalid_value::
+ ~invalid_value () noexcept
+ {
+ }
+
+ void invalid_value::
+ print (::std::ostream& os) const
+ {
+ os << "invalid value '" << value ().c_str () << "' for option '"
+ << option ().c_str () << "'";
+
+ if (!message ().empty ())
+ os << ": " << message ().c_str ();
+ }
+
+ const char* invalid_value::
+ what () const noexcept
+ {
+ return "invalid option value";
+ }
+
+ // eos_reached
+ //
+ void eos_reached::
+ print (::std::ostream& os) const
+ {
+ os << what ();
+ }
+
+ const char* eos_reached::
+ what () const noexcept
+ {
+ return "end of argument stream reached";
+ }
+
+ // unexpected_group
+ //
+ unexpected_group::
+ ~unexpected_group () noexcept
+ {
+ }
+
+ void unexpected_group::
+ print (::std::ostream& os) const
+ {
+ os << "unexpected grouped argument '" << group_ << "' "
+ << "for argument '" << argument_ << "'";
+ }
+
+ const char* unexpected_group::
+ what () const noexcept
+ {
+ return "unexpected grouped argument";
+ }
+
+ // group_separator
+ //
+ group_separator::
+ ~group_separator () noexcept
+ {
+ }
+
+ void group_separator::
+ print (::std::ostream& os) const
+ {
+ bool ex (!expected_.empty ());
+ bool en (!encountered_.empty ());
+
+ if (ex)
+ {
+ os << "expected group separator '" << expected_ << "'";
+ if (en)
+ os << " instead of '" << encountered_ << "'";
+ }
+ else
+ os << "unexpected group separator '" << encountered_ << "'";
+
+ if (en)
+ os << ", use '\\" << encountered_ << "' to escape";
+ }
+
+ const char* group_separator::
+ what () const noexcept
+ {
+ bool ex (!expected_.empty ());
+ bool en (!encountered_.empty ());
+
+ return en
+ ? ex ? "wrong group separator" : "unexpected group separator"
+ : ex ? "expected group separator" : "";
+ }
+
+ // scanner
+ //
+ scanner::
+ ~scanner ()
+ {
+ }
+
+ // argv_scanner
+ //
+ bool argv_scanner::
+ more ()
+ {
+ return i_ < argc_;
+ }
+
+ const char* argv_scanner::
+ peek ()
+ {
+ if (i_ < argc_)
+ return argv_[i_];
+ else
+ throw eos_reached ();
+ }
+
+ const char* argv_scanner::
+ next ()
+ {
+ if (i_ < argc_)
+ {
+ const char* r (argv_[i_]);
+
+ if (erase_)
+ {
+ for (int i (i_ + 1); i < argc_; ++i)
+ argv_[i - 1] = argv_[i];
+
+ --argc_;
+ argv_[argc_] = 0;
+ }
+ else
+ ++i_;
+
+ ++start_position_;
+ return r;
+ }
+ else
+ throw eos_reached ();
+ }
+
+ void argv_scanner::
+ skip ()
+ {
+ if (i_ < argc_)
+ {
+ ++i_;
+ ++start_position_;
+ }
+ else
+ throw eos_reached ();
+ }
+
+ std::size_t argv_scanner::
+ position ()
+ {
+ return start_position_;
+ }
+
+ // vector_scanner
+ //
+ bool vector_scanner::
+ more ()
+ {
+ return i_ < v_.size ();
+ }
+
+ const char* vector_scanner::
+ peek ()
+ {
+ if (i_ < v_.size ())
+ return v_[i_].c_str ();
+ else
+ throw eos_reached ();
+ }
+
+ const char* vector_scanner::
+ next ()
+ {
+ if (i_ < v_.size ())
+ return v_[i_++].c_str ();
+ else
+ throw eos_reached ();
+ }
+
+ void vector_scanner::
+ skip ()
+ {
+ if (i_ < v_.size ())
+ ++i_;
+ else
+ throw eos_reached ();
+ }
+
+ std::size_t vector_scanner::
+ position ()
+ {
+ return start_position_ + i_;
+ }
+
+ // group_scanner
+ //
+ bool group_scanner::
+ more ()
+ {
+ // We don't want to call scan_group() here since that
+ // would invalidate references to previous arguments.
+ // But we do need to check that the previous group was
+ // handled.
+ //
+ if (state_ == scanned)
+ {
+ if (group_scan_.end () != group_.size ())
+ throw unexpected_group (arg_[i_][j_], group_scan_.next ());
+ }
+
+ return j_ != 0 || scan_.more ();
+ }
+
+ const char* group_scanner::
+ peek ()
+ {
+ if (state_ != peeked)
+ {
+ scan_group ();
+ state_ = peeked;
+ }
+
+ // Return unescaped.
+ return arg_[i_][j_ - 1].c_str ();
+ }
+
+ const char* group_scanner::
+ next ()
+ {
+ if (state_ != peeked)
+ scan_group ();
+ state_ = scanned;
+ // Return unescaped.
+ return arg_[i_][--j_].c_str ();
+ }
+
+ void group_scanner::
+ skip ()
+ {
+ if (state_ != peeked)
+ scan_group ();
+ state_ = skipped;
+ --j_;
+ }
+
+ std::size_t group_scanner::
+ position ()
+ {
+ return j_ == 0 ? scan_.position () : pos_ + (arg_[i_].size () - j_);
+ }
+
+ void group_scanner::
+ scan_group ()
+ {
+ // If the previous argument has been scanned, then make
+ // sure the group has been scanned (handled) as well.
+ //
+ if (state_ == scanned)
+ {
+ if (group_scan_.end () != group_.size ())
+ throw unexpected_group (arg_[i_][j_], group_scan_.next ());
+ }
+
+ // If we still have arguments in the pack, rewind the group.
+ //
+ if (j_ != 0)
+ {
+ group_scan_.reset ();
+ return;
+ }
+
+ i_ += (i_ == 0 ? 1 : -1);
+ group_.clear ();
+ group_scan_.reset ();
+ pos_ = scan_.position ();
+
+ // Note: using group_ won't cover empty groups and using
+ // j_ won't cover single-argument packs.
+ //
+ bool group (false), pack (false);
+
+ do
+ {
+ const char* a (scan_.next ());
+ size_t i (*a == '\\' ? 1 : 0);
+ separator s (sense (a + i));
+
+ if (s == none || i != 0)
+ {
+ if (arg_[i_].size () != 1)
+ arg_[i_].resize (1);
+
+ arg_[i_][0] = a + (s != none ? i : 0);
+ j_ = 1;
+ break;
+ }
+
+ // Start of a leading group for the next argument or
+ // argument pack. We will only know which once we see
+ // the closing separator.
+ //
+ if (s != open)
+ throw group_separator (a, "");
+
+ size_t n (group_.size ());
+
+ // Scan the group until the closing separator.
+ //
+ s = none;
+ while (s == none && scan_.more ())
+ {
+ a = scan_.next ();
+ i = (*a == '\\' ? 1 : 0);
+ s = sense (a + i);
+
+ if (s == none || i != 0)
+ {
+ group_.push_back (a + (s != none ? i : 0));
+ s = none;
+ }
+ }
+
+ if (s == close)
+ {
+ size_t m (group_.size ());
+
+ j_ = m - n;
+ if (j_ == 0)
+ throw group_separator ("{", "");
+
+ if (arg_[i_].size () != j_)
+ arg_[i_].resize (j_);
+
+ // Move from group_ to arg_. Add in reverse for ease
+ // of iteration.
+ //
+ for (size_t j (0); j != j_; ++j)
+ arg_[i_][j] = group_[m - j - 1];
+ group_.resize (n);
+
+ pack = true;
+ break;
+ }
+ else if (s == close_plus)
+ group = true;
+ else
+ throw group_separator ((s != none ? a : ""), "}+");
+ }
+ while (scan_.more ());
+
+ // Handle the case where we have seen the leading group
+ // but there are no more arguments.
+ //
+ if (group && j_ == 0)
+ throw group_separator ("{", "");
+
+ // Handle trailing groups, if any.
+ //
+ while (scan_.more ())
+ {
+ const char* a (scan_.peek ());
+ size_t i (*a == '\\' ? 1 : 0);
+ separator s (sense (a + i));
+
+ // Next argument, argument pack, or leading group.
+ //
+ if (s == none || s == open || i != 0)
+ break;
+
+ if (s != open_plus)
+ throw group_separator (a, "");
+
+ group = true;
+
+ // Scan the group until the closing separator.
+ //
+ scan_.next ();
+ s = none;
+ while (s == none && scan_.more ())
+ {
+ a = scan_.next ();
+ i = (*a == '\\' ? 1 : 0);
+ s = sense (a + i);
+
+ if (s == none || i != 0)
+ {
+ group_.push_back (a + (s != none ? i : 0));
+ s = none;
+ }
+ }
+
+ if (s != close)
+ throw group_separator ((s != none ? a : ""), "}");
+ }
+
+ // Handle the case where we have seen the argument pack
+ // without leading or trailing group.
+ //
+ if (pack && !group)
+ throw group_separator ("{", "");
+ }
+
+ template <typename X>
+ struct parser
+ {
+ static void
+ parse (X& x, bool& xs, scanner& s)
+ {
+ using namespace std;
+
+ const char* o (s.next ());
+ if (s.more ())
+ {
+ string v (s.next ());
+ istringstream is (v);
+ if (!(is >> x && is.peek () == istringstream::traits_type::eof ()))
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <>
+ struct parser<bool>
+ {
+ static void
+ parse (bool& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <>
+ struct parser<std::string>
+ {
+ static void
+ parse (std::string& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ x = s.next ();
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename X>
+ struct parser<std::pair<X, std::size_t> >
+ {
+ static void
+ parse (std::pair<X, std::size_t>& x, bool& xs, scanner& s)
+ {
+ x.second = s.position ();
+ parser<X>::parse (x.first, xs, s);
+ }
+ };
+
+ template <typename X>
+ struct parser<std::vector<X> >
+ {
+ static void
+ parse (std::vector<X>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.push_back (x);
+ xs = true;
+ }
+ };
+
+ template <typename X, typename C>
+ struct parser<std::set<X, C> >
+ {
+ static void
+ parse (std::set<X, C>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.insert (x);
+ xs = true;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::map<K, V, C> >
+ {
+ static void
+ parse (std::map<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m[k] = v;
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename X, typename T, T X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, s);
+ }
+
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
+ template <typename X, typename T, T X::*M, bool X::*S>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, x.*S, s);
+ }
+ }
+}
+
+#include <map>
+
+namespace bbot
+{
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
diff --git a/bbot/common-options.hxx b/bbot/common-options.hxx
new file mode 100644
index 0000000..e865d6e
--- /dev/null
+++ b/bbot/common-options.hxx
@@ -0,0 +1,450 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+#ifndef BBOT_COMMON_OPTIONS_HXX
+#define BBOT_COMMON_OPTIONS_HXX
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+#include <vector>
+#include <iosfwd>
+#include <string>
+#include <cstddef>
+#include <exception>
+
+#ifndef CLI_POTENTIALLY_UNUSED
+# if defined(_MSC_VER) || defined(__xlC__)
+# define CLI_POTENTIALLY_UNUSED(x) (void*)&x
+# else
+# define CLI_POTENTIALLY_UNUSED(x) (void)x
+# endif
+#endif
+
+namespace bbot
+{
+ namespace cli
+ {
+ class usage_para
+ {
+ public:
+ enum value
+ {
+ none,
+ text,
+ option
+ };
+
+ usage_para (value);
+
+ operator value () const
+ {
+ return v_;
+ }
+
+ private:
+ value v_;
+ };
+
+ class unknown_mode
+ {
+ public:
+ enum value
+ {
+ skip,
+ stop,
+ fail
+ };
+
+ unknown_mode (value);
+
+ operator value () const
+ {
+ return v_;
+ }
+
+ private:
+ value v_;
+ };
+
+ // Exceptions.
+ //
+
+ class exception: public std::exception
+ {
+ public:
+ virtual void
+ print (::std::ostream&) const = 0;
+ };
+
+ ::std::ostream&
+ operator<< (::std::ostream&, const exception&);
+
+ class unknown_option: public exception
+ {
+ public:
+ virtual
+ ~unknown_option () noexcept;
+
+ unknown_option (const std::string& option);
+
+ const std::string&
+ option () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string option_;
+ };
+
+ class unknown_argument: public exception
+ {
+ public:
+ virtual
+ ~unknown_argument () noexcept;
+
+ unknown_argument (const std::string& argument);
+
+ const std::string&
+ argument () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string argument_;
+ };
+
+ class missing_value: public exception
+ {
+ public:
+ virtual
+ ~missing_value () noexcept;
+
+ missing_value (const std::string& option);
+
+ const std::string&
+ option () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string option_;
+ };
+
+ class invalid_value: public exception
+ {
+ public:
+ virtual
+ ~invalid_value () noexcept;
+
+ invalid_value (const std::string& option,
+ const std::string& value,
+ const std::string& message = std::string ());
+
+ const std::string&
+ option () const;
+
+ const std::string&
+ value () const;
+
+ const std::string&
+ message () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string option_;
+ std::string value_;
+ std::string message_;
+ };
+
+ class eos_reached: public exception
+ {
+ public:
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+ };
+
+ class unexpected_group: public exception
+ {
+ public:
+ virtual
+ ~unexpected_group () noexcept;
+
+ unexpected_group (const std::string& argument,
+ const std::string& group);
+
+ const std::string&
+ argument () const;
+
+ const std::string&
+ group () const;
+
+ virtual void
+ print (std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string argument_;
+ std::string group_;
+ };
+
+ class group_separator: public exception
+ {
+ public:
+ virtual
+ ~group_separator () noexcept;
+
+ // Note: either (but not both) can be empty.
+ //
+ group_separator (const std::string& encountered,
+ const std::string& expected);
+
+ const std::string&
+ encountered () const;
+
+ const std::string&
+ expected () const;
+
+ virtual void
+ print (std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string encountered_;
+ std::string expected_;
+ };
+
+ // Command line argument scanner interface.
+ //
+ // The values returned by next() are guaranteed to be valid
+ // for the two previous arguments up until a call to a third
+ // peek() or next().
+ //
+ // The position() function returns a monotonically-increasing
+ // number which, if stored, can later be used to determine the
+ // relative position of the argument returned by the following
+ // call to next(). Note that if multiple scanners are used to
+ // extract arguments from multiple sources, then the end
+ // position of the previous scanner should be used as the
+ // start position of the next.
+ //
+ class scanner
+ {
+ public:
+ virtual
+ ~scanner ();
+
+ virtual bool
+ more () = 0;
+
+ virtual const char*
+ peek () = 0;
+
+ virtual const char*
+ next () = 0;
+
+ virtual void
+ skip () = 0;
+
+ virtual std::size_t
+ position () = 0;
+ };
+
+ class argv_scanner: public scanner
+ {
+ public:
+ argv_scanner (int& argc,
+ char** argv,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ argv_scanner (int start,
+ int& argc,
+ char** argv,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ int
+ end () const;
+
+ virtual bool
+ more ();
+
+ virtual const char*
+ peek ();
+
+ virtual const char*
+ next ();
+
+ virtual void
+ skip ();
+
+ virtual std::size_t
+ position ();
+
+ protected:
+ std::size_t start_position_;
+ int i_;
+ int& argc_;
+ char** argv_;
+ bool erase_;
+ };
+
+ class vector_scanner: public scanner
+ {
+ public:
+ vector_scanner (const std::vector<std::string>&,
+ std::size_t start = 0,
+ std::size_t start_position = 0);
+
+ std::size_t
+ end () const;
+
+ void
+ reset (std::size_t start = 0, std::size_t start_position = 0);
+
+ virtual bool
+ more ();
+
+ virtual const char*
+ peek ();
+
+ virtual const char*
+ next ();
+
+ virtual void
+ skip ();
+
+ virtual std::size_t
+ position ();
+
+ private:
+ std::size_t start_position_;
+ const std::vector<std::string>& v_;
+ std::size_t i_;
+ };
+
+ class group_scanner: public scanner
+ {
+ public:
+ group_scanner (scanner&);
+
+ virtual bool
+ more ();
+
+ virtual const char*
+ peek ();
+
+ virtual const char*
+ next ();
+
+ virtual void
+ skip ();
+
+ virtual std::size_t
+ position ();
+
+ // The group is only available after the call to next()
+ // (and skip() -- in case one needs to make sure the group
+ // was empty, or some such) and is only valid (and must be
+ // handled) until the next call to any of the scanner
+ // functions (including more()).
+ //
+ // Note also that argument positions within each group start
+ // from 0.
+ //
+ scanner&
+ group ();
+
+ // Escape an argument that is a group separator. Return the
+ // passed string if no escaping is required.
+ //
+ static const char*
+ escape (const char*);
+
+ private:
+ enum state
+ {
+ peeked, // Argument peeked at with peek().
+ scanned, // Argument scanned with next().
+ skipped, // Argument skipped with skip()/initial.
+ };
+
+ enum separator
+ {
+ none,
+ open, // {
+ close, // }
+ open_plus, // +{
+ close_plus // }+
+ };
+
+ static separator
+ sense (const char*);
+
+ // Scan the leading groups, the next argument/argument pack,
+ // and the trailing groups.
+ //
+ void
+ scan_group ();
+
+ scanner& scan_;
+ state state_;
+
+ // Circular buffer of two arguments.
+ //
+ std::vector<std::string> arg_[2];
+ std::size_t i_, j_, pos_;
+
+ std::vector<std::string> group_;
+ vector_scanner group_scan_;
+ };
+
+ template <typename X>
+ struct parser;
+ }
+}
+
+#include <bbot/types.hxx>
+
+namespace bbot
+{
+}
+
+#include <bbot/common-options.ixx>
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
+#endif // BBOT_COMMON_OPTIONS_HXX
diff --git a/bbot/common-options.ixx b/bbot/common-options.ixx
new file mode 100644
index 0000000..3a35de9
--- /dev/null
+++ b/bbot/common-options.ixx
@@ -0,0 +1,281 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+#include <cassert>
+
+namespace bbot
+{
+ namespace cli
+ {
+ // usage_para
+ //
+ inline usage_para::
+ usage_para (value v)
+ : v_ (v)
+ {
+ }
+
+ // unknown_mode
+ //
+ inline unknown_mode::
+ unknown_mode (value v)
+ : v_ (v)
+ {
+ }
+
+ // exception
+ //
+ inline ::std::ostream&
+ operator<< (::std::ostream& os, const exception& e)
+ {
+ e.print (os);
+ return os;
+ }
+
+ // unknown_option
+ //
+ inline unknown_option::
+ unknown_option (const std::string& option)
+ : option_ (option)
+ {
+ }
+
+ inline const std::string& unknown_option::
+ option () const
+ {
+ return option_;
+ }
+
+ // unknown_argument
+ //
+ inline unknown_argument::
+ unknown_argument (const std::string& argument)
+ : argument_ (argument)
+ {
+ }
+
+ inline const std::string& unknown_argument::
+ argument () const
+ {
+ return argument_;
+ }
+
+ // missing_value
+ //
+ inline missing_value::
+ missing_value (const std::string& option)
+ : option_ (option)
+ {
+ }
+
+ inline const std::string& missing_value::
+ option () const
+ {
+ return option_;
+ }
+
+ // invalid_value
+ //
+ inline invalid_value::
+ invalid_value (const std::string& option,
+ const std::string& value,
+ const std::string& message)
+ : option_ (option),
+ value_ (value),
+ message_ (message)
+ {
+ }
+
+ inline const std::string& invalid_value::
+ option () const
+ {
+ return option_;
+ }
+
+ inline const std::string& invalid_value::
+ value () const
+ {
+ return value_;
+ }
+
+ inline const std::string& invalid_value::
+ message () const
+ {
+ return message_;
+ }
+
+ // unexpected_group
+ //
+ inline unexpected_group::
+ unexpected_group (const std::string& argument,
+ const std::string& group)
+ : argument_ (argument), group_ (group)
+ {
+ }
+
+ inline const std::string& unexpected_group::
+ argument () const
+ {
+ return argument_;
+ }
+
+ inline const std::string& unexpected_group::
+ group () const
+ {
+ return group_;
+ }
+
+ // group_separator
+ //
+ inline group_separator::
+ group_separator (const std::string& encountered,
+ const std::string& expected)
+ : encountered_ (encountered), expected_ (expected)
+ {
+ }
+
+ inline const std::string& group_separator::
+ encountered () const
+ {
+ return encountered_;
+ }
+
+ inline const std::string& group_separator::
+ expected () const
+ {
+ return expected_;
+ }
+
+ // argv_scanner
+ //
+ inline argv_scanner::
+ argv_scanner (int& argc,
+ char** argv,
+ bool erase,
+ std::size_t sp)
+ : start_position_ (sp + 1),
+ i_ (1),
+ argc_ (argc),
+ argv_ (argv),
+ erase_ (erase)
+ {
+ }
+
+ inline argv_scanner::
+ argv_scanner (int start,
+ int& argc,
+ char** argv,
+ bool erase,
+ std::size_t sp)
+ : start_position_ (sp + static_cast<std::size_t> (start)),
+ i_ (start),
+ argc_ (argc),
+ argv_ (argv),
+ erase_ (erase)
+ {
+ }
+
+ inline int argv_scanner::
+ end () const
+ {
+ return i_;
+ }
+
+ // vector_scanner
+ //
+ inline vector_scanner::
+ vector_scanner (const std::vector<std::string>& v,
+ std::size_t i,
+ std::size_t sp)
+ : start_position_ (sp), v_ (v), i_ (i)
+ {
+ }
+
+ inline std::size_t vector_scanner::
+ end () const
+ {
+ return i_;
+ }
+
+ inline void vector_scanner::
+ reset (std::size_t i, std::size_t sp)
+ {
+ i_ = i;
+ start_position_ = sp;
+ }
+
+ // group_scanner
+ //
+ inline group_scanner::
+ group_scanner (scanner& s)
+ : scan_ (s), state_ (skipped), i_ (1), j_ (0), group_scan_ (group_)
+ {
+ }
+
+ inline scanner& group_scanner::
+ group ()
+ {
+ assert (state_ == scanned || state_ == skipped);
+ return group_scan_;
+ }
+
+ inline const char* group_scanner::
+ escape (const char* a)
+ {
+ switch (sense (a))
+ {
+ case separator::none: break;
+ case separator::open: return "\\{";
+ case separator::close: return "\\}";
+ case separator::open_plus: return "\\+{";
+ case separator::close_plus: return "\\}+";
+ }
+
+ return a;
+ }
+
+ inline group_scanner::separator group_scanner::
+ sense (const char* s)
+ {
+ switch (s[0])
+ {
+ case '{': return s[1] == '\0' ? open : none;
+ case '}':
+ {
+ switch (s[1])
+ {
+ case '+': return s[2] == '\0' ? close_plus : none;
+ default: return s[1] == '\0' ? close : none;
+ }
+ }
+ case '+':
+ {
+ switch (s[1])
+ {
+ case '{': return s[2] == '\0' ? open_plus : none;
+ default: return none;
+ }
+ }
+ }
+
+ return none;
+ }
+ }
+}
+
+namespace bbot
+{
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
diff --git a/bbot/machine-manifest.cxx b/bbot/machine-manifest.cxx
index 606baf0..bddf4d5 100644
--- a/bbot/machine-manifest.cxx
+++ b/bbot/machine-manifest.cxx
@@ -326,24 +326,36 @@ namespace bbot
if (!machine.mac)
bad_name ("mac address must be present in machine manifest");
- nv = p.next ();
- if (nv.empty ())
- bad_value ("toolchain manifest expected");
+ if (machine.effective_role () == machine_role::build)
+ {
+ nv = p.next ();
+ if (nv.empty ())
+ bad_value ("toolchain manifest expected");
- toolchain = toolchain_manifest (p, nv, iu);
+ toolchain = toolchain_manifest (p, nv, iu);
- nv = p.next ();
- if (nv.empty ())
- bad_value ("bootstrap manifest expected");
+ nv = p.next ();
+ if (nv.empty ())
+ bad_value ("bootstrap manifest expected");
- bootstrap = bootstrap_manifest (p, nv, iu);
+ bootstrap = bootstrap_manifest (p, nv, iu);
- // Make sure this is the end.
- //
- nv = p.next ();
- if (!nv.empty ())
- throw parsing (p.name (), nv.name_line, nv.name_column,
- "single bootstrapped machine manifest expected");
+ // Make sure this is the end.
+ //
+ nv = p.next ();
+ if (!nv.empty ())
+ throw parsing (p.name (), nv.name_line, nv.name_column,
+ "single bootstrapped machine manifest expected");
+ }
+ else
+ {
+ // Make sure this is the end.
+ //
+ nv = p.next ();
+ if (!nv.empty ())
+ throw parsing (p.name (), nv.name_line, nv.name_column,
+ "single machine manifest expected");
+ }
}
void bootstrapped_machine_manifest::
@@ -359,8 +371,12 @@ namespace bbot
"mac address must be present in machine manifest");
machine.serialize (s);
- toolchain.serialize (s);
- bootstrap.serialize (s);
+
+ if (machine.effective_role () == machine_role::build)
+ {
+ toolchain.serialize (s);
+ bootstrap.serialize (s);
+ }
s.next ("", ""); // End of stream.
}
diff --git a/bbot/machine-manifest.hxx b/bbot/machine-manifest.hxx
index 116dc32..b488425 100644
--- a/bbot/machine-manifest.hxx
+++ b/bbot/machine-manifest.hxx
@@ -40,20 +40,26 @@ namespace bbot
strings
unquoted_options () const; // Return empty if absent.
- machine_manifest (std::string i,
- std::string n,
- std::string s,
+ machine_manifest (string i,
+ string n,
+ string s,
machine_type t,
optional<string> m,
optional<strings> o,
- strings c)
- : machine_header_manifest (std::move (i),
- std::move (n),
- std::move (s)),
+ strings c,
+ optional<machine_role> r,
+ optional<uint64_t> rmn,
+ optional<uint64_t> rmx)
+ : machine_header_manifest (move (i),
+ move (n),
+ move (s),
+ r,
+ rmn,
+ rmx),
type (t),
- mac (std::move (m)),
- options (std::move (o)),
- changes (std::move (c)) {}
+ mac (move (m)),
+ options (move (o)),
+ changes (move (c)) {}
public:
machine_manifest () = default;
@@ -91,7 +97,8 @@ namespace bbot
};
// The manifest stored in <name>-<toolchain>/ consists of the machine
- // manifest (original), toolchain manifest, and bootstrap manifest.
+ // manifest (original) and, if this a build machine, toolchain manifest and
+ // bootstrap manifest.
//
class bootstrapped_machine_manifest
{
diff --git a/bbot/machine-manifest.test.testscript b/bbot/machine-manifest.test.testscript
index 6471473..e358ff3 100644
--- a/bbot/machine-manifest.test.testscript
+++ b/bbot/machine-manifest.test.testscript
@@ -238,27 +238,6 @@
{
test.options += -bm
- : valid
- :
- {
- : all-values
- :
- $* <<EOF >>EOF
- : 1
- :
- id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
- name: windows_10-msvc_14
- summary: Windows 10 build 1607 with VC 14 update 3
- type: kvm
- mac: de:ad:be:ef:de:ad
- :
- id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
- :
- bbot-version: 1.1.2
- libbbot-version: 1.1.1
- EOF
- }
-
: unknown
:
$* <<EOI 2>"stdin:2:1: error: unknown name 'x' in bootstrapped machine manifest" == 1
@@ -272,40 +251,107 @@
: 1
EOI
- : no-machine-mac
+ : build-role
:
- $* <<EOI 2>'stdin:2:1: error: mac address must be present in machine manifest' == 1
- : 1
- :
- id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
- name: windows_10-msvc_14
- summary: Windows 10 build 1607 with VC 14 update 3
- type: kvm
- EOI
+ {
+ : valid
+ :
+ {
+ : all-values
+ :
+ $* <<EOF >>EOF
+ : 1
+ :
+ id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ name: windows_10-msvc_14
+ summary: Windows 10 build 1607 with VC 14 update 3
+ type: kvm
+ mac: de:ad:be:ef:de:ad
+ :
+ id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ :
+ bbot-version: 1.1.2
+ libbbot-version: 1.1.1
+ EOF
+ }
- : no-toolchain
- :
- $* <<EOI 2>'stdin:8:1: error: toolchain manifest expected' == 1
- : 1
- :
- id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
- name: windows_10-msvc_14
- summary: Windows 10 build 1607 with VC 14 update 3
- type: kvm
- mac: de:ad:be:ef:de:ad
- EOI
+ : no-machine-mac
+ :
+ $* <<EOI 2>'stdin:2:1: error: mac address must be present in machine manifest' == 1
+ : 1
+ :
+ id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ name: windows_10-msvc_14
+ summary: Windows 10 build 1607 with VC 14 update 3
+ type: kvm
+ EOI
- : no-bootstrap
- :
- $* <<EOI 2>'stdin:10:1: error: bootstrap manifest expected' == 1
- : 1
- :
- id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
- name: windows_10-msvc_14
- summary: Windows 10 build 1607 with VC 14 update 3
- type: kvm
- mac: de:ad:be:ef:de:ad
+ : no-toolchain
+ :
+ $* <<EOI 2>'stdin:8:1: error: toolchain manifest expected' == 1
+ : 1
+ :
+ id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ name: windows_10-msvc_14
+ summary: Windows 10 build 1607 with VC 14 update 3
+ type: kvm
+ mac: de:ad:be:ef:de:ad
+ EOI
+
+ : no-bootstrap
+ :
+ $* <<EOI 2>'stdin:10:1: error: bootstrap manifest expected' == 1
+ : 1
+ :
+ id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ name: windows_10-msvc_14
+ summary: Windows 10 build 1607 with VC 14 update 3
+ type: kvm
+ mac: de:ad:be:ef:de:ad
+ :
+ id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
+ EOI
+ }
+
+ : auxiliary-role
:
- id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
- EOI
+ {
+ : valid
+ :
+ {
+ : all-values
+ :
+ $* <<EOF >>EOF
+ : 1
+ :
+ id: x86_64-linux_debian_12-postgresql_15-1.0
+ name: x86_64-linux_debian_12-postgresql_15
+ summary: Debian 12 "bookworm" with PostgreSQL 15.6.0 (auxiliary machine)
+ role: auxiliary
+ ram-minimum: 1048576
+ type: kvm
+ mac: e6:38:72:53:61:ae
+ changes:\
+ 1.0
+ - clone off linux_debian_12-small-1.0
+ - postgresql-15 15.6.0+deb12u1
+ \
+ EOF
+ }
+
+ : unexpected-manifest
+ :
+ $* <<EOI 2>'stdin:10:1: error: single machine manifest expected' == 1
+ : 1
+ :
+ id: x86_64-linux_debian_12-postgresql_15-1.0
+ name: x86_64-linux_debian_12-postgresql_15
+ summary: Debian 12 "bookworm" with PostgreSQL 15.6.0 (auxiliary machine)
+ role: auxiliary
+ ram-minimum: 1048576
+ type: kvm
+ mac: e6:38:72:53:61:ae
+ :
+ EOI
+ }
}
diff --git a/bbot/utility.hxx b/bbot/utility.hxx
index 9bc517c..7758db4 100644
--- a/bbot/utility.hxx
+++ b/bbot/utility.hxx
@@ -37,6 +37,7 @@ namespace bbot
// <libbutl/utility.hxx>
//
using butl::icasecmp;
+ using butl::sanitize_identifier;
using butl::reverse_iterate;
using butl::make_guard;
diff --git a/bbot/worker/worker-options.cxx b/bbot/worker/worker-options.cxx
new file mode 100644
index 0000000..d58ec05
--- /dev/null
+++ b/bbot/worker/worker-options.cxx
@@ -0,0 +1,693 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+#include <bbot/types-parsers.hxx>
+//
+// End prologue.
+
+#include <bbot/worker/worker-options.hxx>
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+#include <utility>
+#include <ostream>
+#include <sstream>
+#include <cstring>
+
+namespace bbot
+{
+ namespace cli
+ {
+ template <typename X>
+ struct parser
+ {
+ static void
+ parse (X& x, bool& xs, scanner& s)
+ {
+ using namespace std;
+
+ const char* o (s.next ());
+ if (s.more ())
+ {
+ string v (s.next ());
+ istringstream is (v);
+ if (!(is >> x && is.peek () == istringstream::traits_type::eof ()))
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <>
+ struct parser<bool>
+ {
+ static void
+ parse (bool& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <>
+ struct parser<std::string>
+ {
+ static void
+ parse (std::string& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ x = s.next ();
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename X>
+ struct parser<std::pair<X, std::size_t> >
+ {
+ static void
+ parse (std::pair<X, std::size_t>& x, bool& xs, scanner& s)
+ {
+ x.second = s.position ();
+ parser<X>::parse (x.first, xs, s);
+ }
+ };
+
+ template <typename X>
+ struct parser<std::vector<X> >
+ {
+ static void
+ parse (std::vector<X>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.push_back (x);
+ xs = true;
+ }
+ };
+
+ template <typename X, typename C>
+ struct parser<std::set<X, C> >
+ {
+ static void
+ parse (std::set<X, C>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.insert (x);
+ xs = true;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::map<K, V, C> >
+ {
+ static void
+ parse (std::map<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m[k] = v;
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename X, typename T, T X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, s);
+ }
+
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
+ template <typename X, typename T, T X::*M, bool X::*S>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, x.*S, s);
+ }
+ }
+}
+
+#include <map>
+
+namespace bbot
+{
+ // worker_options
+ //
+
+ worker_options::
+ worker_options ()
+ : help_ (),
+ version_ (),
+ verbose_ (1),
+ verbose_specified_ (false),
+ bootstrap_ (),
+ startup_ (),
+ systemd_daemon_ (),
+ build_ (),
+ build_specified_ (false),
+ environments_ (),
+ environments_specified_ (false),
+ env_script_ (),
+ env_script_specified_ (false),
+ env_target_ (),
+ env_target_specified_ (false),
+ tftp_host_ ("196.254.111.222"),
+ tftp_host_specified_ (false)
+ {
+ }
+
+ bool worker_options::
+ parse (int& argc,
+ char** argv,
+ bool erase,
+ ::bbot::cli::unknown_mode opt,
+ ::bbot::cli::unknown_mode arg)
+ {
+ ::bbot::cli::argv_scanner s (argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ bool worker_options::
+ parse (int start,
+ int& argc,
+ char** argv,
+ bool erase,
+ ::bbot::cli::unknown_mode opt,
+ ::bbot::cli::unknown_mode arg)
+ {
+ ::bbot::cli::argv_scanner s (start, argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ bool worker_options::
+ parse (int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::bbot::cli::unknown_mode opt,
+ ::bbot::cli::unknown_mode arg)
+ {
+ ::bbot::cli::argv_scanner s (argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ end = s.end ();
+ return r;
+ }
+
+ bool worker_options::
+ parse (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::bbot::cli::unknown_mode opt,
+ ::bbot::cli::unknown_mode arg)
+ {
+ ::bbot::cli::argv_scanner s (start, argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ end = s.end ();
+ return r;
+ }
+
+ bool worker_options::
+ parse (::bbot::cli::scanner& s,
+ ::bbot::cli::unknown_mode opt,
+ ::bbot::cli::unknown_mode arg)
+ {
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ ::bbot::cli::usage_para worker_options::
+ print_usage (::std::ostream& os, ::bbot::cli::usage_para p)
+ {
+ CLI_POTENTIALLY_UNUSED (os);
+
+ if (p != ::bbot::cli::usage_para::none)
+ os << ::std::endl;
+
+ os << "\033[1mOPTIONS\033[0m" << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--help\033[0m Print usage information and exit." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--version\033[0m Print version and exit." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--verbose\033[0m \033[4mlevel\033[0m Set the diagnostics verbosity to \033[4mlevel\033[0m between 0 and" << ::std::endl
+ << " 6 with level 1 being the default." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--bootstrap\033[0m Perform the inital machine bootstrap insteading of" << ::std::endl
+ << " building." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--startup\033[0m Perform the environment setup and then re-execute for" << ::std::endl
+ << " building." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--systemd-daemon\033[0m Run as a simple systemd daemon." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--build\033[0m \033[4mdir\033[0m The directory to perform the build in. If not" << ::std::endl
+ << " specified, then the current working directory is" << ::std::endl
+ << " used." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--environments\033[0m \033[4mdir\033[0m The directory containing the environment setup" << ::std::endl
+ << " executables. If not specified, then the user's home" << ::std::endl
+ << " directory is used." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--env-script\033[0m \033[4mpath\033[0m The environment setup executable path. This option is" << ::std::endl
+ << " normally passed by the worker running in the startup" << ::std::endl
+ << " mode to the worker executed in the build mode." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--env-target\033[0m \033[4mtarget\033[0m The environment setup executable target argument." << ::std::endl
+ << " This option is normally passed by the worker running" << ::std::endl
+ << " in the startup mode to the worker executed in the" << ::std::endl
+ << " build mode." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--tftp-host\033[0m \033[4maddr\033[0m The TFTP host address and, optionally, port to use to" << ::std::endl
+ << " download the build task and to upload the build" << ::std::endl
+ << " result. By default the link-local address" << ::std::endl
+ << " 196.254.111.222 with the standard TFTP port (69) is" << ::std::endl
+ << " used." << ::std::endl;
+
+ p = ::bbot::cli::usage_para::option;
+
+ return p;
+ }
+
+ typedef
+ std::map<std::string, void (*) (worker_options&, ::bbot::cli::scanner&)>
+ _cli_worker_options_map;
+
+ static _cli_worker_options_map _cli_worker_options_map_;
+
+ struct _cli_worker_options_map_init
+ {
+ _cli_worker_options_map_init ()
+ {
+ _cli_worker_options_map_["--help"] =
+ &::bbot::cli::thunk< worker_options, &worker_options::help_ >;
+ _cli_worker_options_map_["--version"] =
+ &::bbot::cli::thunk< worker_options, &worker_options::version_ >;
+ _cli_worker_options_map_["--verbose"] =
+ &::bbot::cli::thunk< worker_options, uint16_t, &worker_options::verbose_,
+ &worker_options::verbose_specified_ >;
+ _cli_worker_options_map_["--bootstrap"] =
+ &::bbot::cli::thunk< worker_options, &worker_options::bootstrap_ >;
+ _cli_worker_options_map_["--startup"] =
+ &::bbot::cli::thunk< worker_options, &worker_options::startup_ >;
+ _cli_worker_options_map_["--systemd-daemon"] =
+ &::bbot::cli::thunk< worker_options, &worker_options::systemd_daemon_ >;
+ _cli_worker_options_map_["--build"] =
+ &::bbot::cli::thunk< worker_options, dir_path, &worker_options::build_,
+ &worker_options::build_specified_ >;
+ _cli_worker_options_map_["--environments"] =
+ &::bbot::cli::thunk< worker_options, dir_path, &worker_options::environments_,
+ &worker_options::environments_specified_ >;
+ _cli_worker_options_map_["--env-script"] =
+ &::bbot::cli::thunk< worker_options, path, &worker_options::env_script_,
+ &worker_options::env_script_specified_ >;
+ _cli_worker_options_map_["--env-target"] =
+ &::bbot::cli::thunk< worker_options, string, &worker_options::env_target_,
+ &worker_options::env_target_specified_ >;
+ _cli_worker_options_map_["--tftp-host"] =
+ &::bbot::cli::thunk< worker_options, string, &worker_options::tftp_host_,
+ &worker_options::tftp_host_specified_ >;
+ }
+ };
+
+ static _cli_worker_options_map_init _cli_worker_options_map_init_;
+
+ bool worker_options::
+ _parse (const char* o, ::bbot::cli::scanner& s)
+ {
+ _cli_worker_options_map::const_iterator i (_cli_worker_options_map_.find (o));
+
+ if (i != _cli_worker_options_map_.end ())
+ {
+ (*(i->second)) (*this, s);
+ return true;
+ }
+
+ return false;
+ }
+
+ bool worker_options::
+ _parse (::bbot::cli::scanner& s,
+ ::bbot::cli::unknown_mode opt_mode,
+ ::bbot::cli::unknown_mode arg_mode)
+ {
+ // Can't skip combined flags (--no-combined-flags).
+ //
+ assert (opt_mode != ::bbot::cli::unknown_mode::skip);
+
+ bool r = false;
+ bool opt = true;
+
+ while (s.more ())
+ {
+ const char* o = s.peek ();
+
+ if (std::strcmp (o, "--") == 0)
+ {
+ opt = false;
+ s.skip ();
+ r = true;
+ continue;
+ }
+
+ if (opt)
+ {
+ if (_parse (o, s))
+ {
+ r = true;
+ continue;
+ }
+
+ if (std::strncmp (o, "-", 1) == 0 && o[1] != '\0')
+ {
+ // Handle combined option values.
+ //
+ std::string co;
+ if (const char* v = std::strchr (o, '='))
+ {
+ co.assign (o, 0, v - o);
+ ++v;
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (co.c_str ()),
+ const_cast<char*> (v)
+ };
+
+ ::bbot::cli::argv_scanner ns (0, ac, av);
+
+ if (_parse (co.c_str (), ns))
+ {
+ // Parsed the option but not its value?
+ //
+ if (ns.end () != 2)
+ throw ::bbot::cli::invalid_value (co, v);
+
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = co.c_str ();
+ }
+ }
+
+ // Handle combined flags.
+ //
+ char cf[3];
+ {
+ const char* p = o + 1;
+ for (; *p != '\0'; ++p)
+ {
+ if (!((*p >= 'a' && *p <= 'z') ||
+ (*p >= 'A' && *p <= 'Z') ||
+ (*p >= '0' && *p <= '9')))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ for (p = o + 1; *p != '\0'; ++p)
+ {
+ std::strcpy (cf, "-");
+ cf[1] = *p;
+ cf[2] = '\0';
+
+ int ac (1);
+ char* av[] =
+ {
+ cf
+ };
+
+ ::bbot::cli::argv_scanner ns (0, ac, av);
+
+ if (!_parse (cf, ns))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ // All handled.
+ //
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = cf;
+ }
+ }
+ }
+
+ switch (opt_mode)
+ {
+ case ::bbot::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::bbot::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::bbot::cli::unknown_mode::fail:
+ {
+ throw ::bbot::cli::unknown_option (o);
+ }
+ }
+
+ break;
+ }
+ }
+
+ switch (arg_mode)
+ {
+ case ::bbot::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::bbot::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::bbot::cli::unknown_mode::fail:
+ {
+ throw ::bbot::cli::unknown_argument (o);
+ }
+ }
+
+ break;
+ }
+
+ return r;
+ }
+}
+
+namespace bbot
+{
+ ::bbot::cli::usage_para
+ print_bbot_worker_usage (::std::ostream& os, ::bbot::cli::usage_para p)
+ {
+ CLI_POTENTIALLY_UNUSED (os);
+
+ if (p != ::bbot::cli::usage_para::none)
+ os << ::std::endl;
+
+ os << "\033[1mSYNOPSIS\033[0m" << ::std::endl
+ << ::std::endl
+ << "\033[1mbbot-worker --help\033[0m" << ::std::endl
+ << "\033[1mbbot-worker --version\033[0m" << ::std::endl
+ << "\033[1mbbot-worker --bootstrap\033[0m [\033[4moptions\033[0m]" << ::std::endl
+ << "\033[1mbbot-worker --startup\033[0m [\033[4moptions\033[0m]" << ::std::endl
+ << "\033[1mbbot-worker\033[0m [\033[4moptions\033[0m] \033[4mmodule\033[0m... \033[4mcfg-var\033[0m...\033[0m" << ::std::endl
+ << ::std::endl
+ << "\033[1mDESCRIPTION\033[0m" << ::std::endl
+ << ::std::endl
+ << "\033[1mbbot-worker\033[0m @@ TODO." << ::std::endl
+ << ::std::endl
+ << "If the \033[1m--bootstrap\033[0m mode option is specified, then the worker performs the" << ::std::endl
+ << "initial machine bootstrap and writes the bootstrap result manifest to stdout\033[0m." << ::std::endl
+ << "If the \033[1m--startup\033[0m mode option is specified, then the worker performs the" << ::std::endl
+ << "environment setup and then re-executes in the build mode. If neither of the" << ::std::endl
+ << "mode options is specified, then the worker proceeds to performing the build" << ::std::endl
+ << "task." << ::std::endl;
+
+ p = ::bbot::worker_options::print_usage (os, ::bbot::cli::usage_para::text);
+
+ if (p != ::bbot::cli::usage_para::none)
+ os << ::std::endl;
+
+ os << "\033[1mEXIT STATUS\033[0m" << ::std::endl
+ << ::std::endl
+ << "Non-zero exit status is returned in case of an error. In the build mode, exit" << ::std::endl
+ << "code 2 is used to signal abnormal termination where the worker uploaded the" << ::std::endl
+ << "result manifest itself." << ::std::endl;
+
+ p = ::bbot::cli::usage_para::text;
+
+ return p;
+ }
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
diff --git a/bbot/worker/worker-options.hxx b/bbot/worker/worker-options.hxx
new file mode 100644
index 0000000..15e5b66
--- /dev/null
+++ b/bbot/worker/worker-options.hxx
@@ -0,0 +1,171 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+#ifndef BBOT_WORKER_WORKER_OPTIONS_HXX
+#define BBOT_WORKER_WORKER_OPTIONS_HXX
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+#include <bbot/common-options.hxx>
+
+namespace bbot
+{
+ class worker_options
+ {
+ public:
+ worker_options ();
+
+ // Return true if anything has been parsed.
+ //
+ bool
+ parse (int& argc,
+ char** argv,
+ bool erase = false,
+ ::bbot::cli::unknown_mode option = ::bbot::cli::unknown_mode::fail,
+ ::bbot::cli::unknown_mode argument = ::bbot::cli::unknown_mode::stop);
+
+ bool
+ parse (int start,
+ int& argc,
+ char** argv,
+ bool erase = false,
+ ::bbot::cli::unknown_mode option = ::bbot::cli::unknown_mode::fail,
+ ::bbot::cli::unknown_mode argument = ::bbot::cli::unknown_mode::stop);
+
+ bool
+ parse (int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::bbot::cli::unknown_mode option = ::bbot::cli::unknown_mode::fail,
+ ::bbot::cli::unknown_mode argument = ::bbot::cli::unknown_mode::stop);
+
+ bool
+ parse (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::bbot::cli::unknown_mode option = ::bbot::cli::unknown_mode::fail,
+ ::bbot::cli::unknown_mode argument = ::bbot::cli::unknown_mode::stop);
+
+ bool
+ parse (::bbot::cli::scanner&,
+ ::bbot::cli::unknown_mode option = ::bbot::cli::unknown_mode::fail,
+ ::bbot::cli::unknown_mode argument = ::bbot::cli::unknown_mode::stop);
+
+ // Option accessors.
+ //
+ const bool&
+ help () const;
+
+ const bool&
+ version () const;
+
+ const uint16_t&
+ verbose () const;
+
+ bool
+ verbose_specified () const;
+
+ const bool&
+ bootstrap () const;
+
+ const bool&
+ startup () const;
+
+ const bool&
+ systemd_daemon () const;
+
+ const dir_path&
+ build () const;
+
+ bool
+ build_specified () const;
+
+ const dir_path&
+ environments () const;
+
+ bool
+ environments_specified () const;
+
+ const path&
+ env_script () const;
+
+ bool
+ env_script_specified () const;
+
+ const string&
+ env_target () const;
+
+ bool
+ env_target_specified () const;
+
+ const string&
+ tftp_host () const;
+
+ bool
+ tftp_host_specified () const;
+
+ // Print usage information.
+ //
+ static ::bbot::cli::usage_para
+ print_usage (::std::ostream&,
+ ::bbot::cli::usage_para = ::bbot::cli::usage_para::none);
+
+ // Implementation details.
+ //
+ protected:
+ bool
+ _parse (const char*, ::bbot::cli::scanner&);
+
+ private:
+ bool
+ _parse (::bbot::cli::scanner&,
+ ::bbot::cli::unknown_mode option,
+ ::bbot::cli::unknown_mode argument);
+
+ public:
+ bool help_;
+ bool version_;
+ uint16_t verbose_;
+ bool verbose_specified_;
+ bool bootstrap_;
+ bool startup_;
+ bool systemd_daemon_;
+ dir_path build_;
+ bool build_specified_;
+ dir_path environments_;
+ bool environments_specified_;
+ path env_script_;
+ bool env_script_specified_;
+ string env_target_;
+ bool env_target_specified_;
+ string tftp_host_;
+ bool tftp_host_specified_;
+ };
+}
+
+// Print page usage information.
+//
+namespace bbot
+{
+ ::bbot::cli::usage_para
+ print_bbot_worker_usage (::std::ostream&,
+ ::bbot::cli::usage_para = ::bbot::cli::usage_para::none);
+}
+
+#include <bbot/worker/worker-options.ixx>
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
+#endif // BBOT_WORKER_WORKER_OPTIONS_HXX
diff --git a/bbot/worker/worker-options.ixx b/bbot/worker/worker-options.ixx
new file mode 100644
index 0000000..bc5b21d
--- /dev/null
+++ b/bbot/worker/worker-options.ixx
@@ -0,0 +1,123 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+namespace bbot
+{
+ // worker_options
+ //
+
+ inline const bool& worker_options::
+ help () const
+ {
+ return this->help_;
+ }
+
+ inline const bool& worker_options::
+ version () const
+ {
+ return this->version_;
+ }
+
+ inline const uint16_t& worker_options::
+ verbose () const
+ {
+ return this->verbose_;
+ }
+
+ inline bool worker_options::
+ verbose_specified () const
+ {
+ return this->verbose_specified_;
+ }
+
+ inline const bool& worker_options::
+ bootstrap () const
+ {
+ return this->bootstrap_;
+ }
+
+ inline const bool& worker_options::
+ startup () const
+ {
+ return this->startup_;
+ }
+
+ inline const bool& worker_options::
+ systemd_daemon () const
+ {
+ return this->systemd_daemon_;
+ }
+
+ inline const dir_path& worker_options::
+ build () const
+ {
+ return this->build_;
+ }
+
+ inline bool worker_options::
+ build_specified () const
+ {
+ return this->build_specified_;
+ }
+
+ inline const dir_path& worker_options::
+ environments () const
+ {
+ return this->environments_;
+ }
+
+ inline bool worker_options::
+ environments_specified () const
+ {
+ return this->environments_specified_;
+ }
+
+ inline const path& worker_options::
+ env_script () const
+ {
+ return this->env_script_;
+ }
+
+ inline bool worker_options::
+ env_script_specified () const
+ {
+ return this->env_script_specified_;
+ }
+
+ inline const string& worker_options::
+ env_target () const
+ {
+ return this->env_target_;
+ }
+
+ inline bool worker_options::
+ env_target_specified () const
+ {
+ return this->env_target_specified_;
+ }
+
+ inline const string& worker_options::
+ tftp_host () const
+ {
+ return this->tftp_host_;
+ }
+
+ inline bool worker_options::
+ tftp_host_specified () const
+ {
+ return this->tftp_host_specified_;
+ }
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
diff --git a/bbot/worker/worker.cxx b/bbot/worker/worker.cxx
index 8217c45..b019337 100644
--- a/bbot/worker/worker.cxx
+++ b/bbot/worker/worker.cxx
@@ -575,6 +575,7 @@ run_cmd (step_id step,
const string& name,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const process_env& pe,
A&&... a)
@@ -588,22 +589,32 @@ run_cmd (step_id step,
//
struct abort {};
- auto prompt = [&last_cmd, &next_cmd, &t, &log] (const string& what)
+ auto prompt = [&aux_env, &last_cmd, &next_cmd, &t, &log] (const string& what)
{
diag_record dr (text);
dr << '\n'
<< what << '\n'
- << " current dir: " << current_directory () << '\n'
- << " environment: " << ops.env_script () << ' ' << ops.env_target ();
+ << " current dir: " << current_directory () << '\n'
+ << " environment: " << ops.env_script () << ' ' << ops.env_target ();
+
+ if (!aux_env.empty ())
+ {
+ dr << '\n'
+ << " auxiliary environment:";
+
+ for (const string& e: aux_env)
+ dr << '\n'
+ << " " << e;
+ }
if (!last_cmd.empty ())
dr << '\n'
- << " last command: " << last_cmd;
+ << " last command: " << last_cmd;
if (!next_cmd.empty ())
dr << '\n'
- << " next command: " << next_cmd;
+ << " next command: " << next_cmd;
dr.flush ();
@@ -782,6 +793,7 @@ run_cmd (step_id step,
const string& name,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const process_env& pe,
A&&... a)
@@ -793,9 +805,7 @@ run_cmd (step_id step,
out_str, out_file,
warn_detect,
name,
- bkp_step,
- bkp_status,
- last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
pe,
forward<A> (a)...);
}
@@ -811,6 +821,7 @@ run_bpkg (step_id step,
const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const string& cmd, A&&... a)
@@ -822,7 +833,7 @@ run_bpkg (step_id step,
&out, path () /* out_file */,
warn_detect,
"bpkg " + cmd,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ("bpkg", envvars),
verbosity, cmd, forward<A> (a)...);
}
@@ -835,6 +846,7 @@ run_bpkg (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const string& cmd, A&&... a)
@@ -845,7 +857,7 @@ run_bpkg (step_id step,
nullptr /* out_str */, path () /* out_file */,
warn_detect,
"bpkg " + cmd,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ("bpkg", envvars),
verbosity, cmd, forward<A> (a)...);
}
@@ -860,6 +872,7 @@ run_bpkg (step_id step,
const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const string& cmd, A&&... a)
@@ -873,7 +886,7 @@ run_bpkg (step_id step,
pre_run,
out,
warn_detect,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
verbosity, cmd, forward<A> (a)...);
}
@@ -885,6 +898,7 @@ run_bpkg (step_id step,
string& log, const path& out, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const string& cmd, A&&... a)
@@ -893,7 +907,7 @@ run_bpkg (step_id step,
t,
log, nullptr /* out_str */, out, warn_detect,
"bpkg " + cmd,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ("bpkg", envvars),
verbosity, cmd, forward<A> (a)...);
}
@@ -905,6 +919,7 @@ run_bpkg (step_id step,
string& log, const path& out, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const string& cmd, A&&... a)
@@ -915,7 +930,7 @@ run_bpkg (step_id step,
envvars,
t,
log, out, warn_detect,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
verbosity, cmd, forward<A> (a)...);
}
@@ -926,6 +941,7 @@ run_bpkg (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const string& cmd, A&&... a)
@@ -936,7 +952,7 @@ run_bpkg (step_id step,
envvars,
t,
log, warn_detect,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
verbosity, cmd, forward<A> (a)...);
}
@@ -948,6 +964,7 @@ run_b (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const strings& buildspecs, A&&... a)
@@ -966,7 +983,7 @@ run_b (step_id step,
nullptr /* out_str */, path () /* out_file */,
warn_detect,
name,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ("b", envvars),
verbosity, buildspecs, forward<A> (a)...);
}
@@ -979,6 +996,7 @@ run_b (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const string& buildspec, A&&... a)
@@ -988,7 +1006,7 @@ run_b (step_id step,
nullptr /* out_str */, path () /* out_file */,
warn_detect,
"b " + buildspec,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ("b", envvars),
verbosity, buildspec, forward<A> (a)...);
}
@@ -1000,6 +1018,7 @@ run_b (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const char* verbosity,
const string& buildspec, A&&... a)
@@ -1009,7 +1028,7 @@ run_b (step_id step,
envvars,
t,
log, warn_detect,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
verbosity, buildspec, forward<A> (a)...);
}
@@ -1020,6 +1039,7 @@ run_ldconfig (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
A&&... a)
{
@@ -1029,7 +1049,7 @@ run_ldconfig (step_id step,
nullptr /* out_str */, path () /* out_file*/,
warn_detect,
"sudo ldconfig",
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ("sudo"),
"ldconfig", forward<A> (a)...);
}
@@ -1041,6 +1061,7 @@ run_apt_get (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const string& cmd, A&&... a)
{
@@ -1052,7 +1073,7 @@ run_apt_get (step_id step,
nullptr /* out_str */, path () /* out_file*/,
warn_detect,
"sudo apt-get " + cmd,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ("sudo"),
"apt-get", cmd, forward<A> (a)...);
}
@@ -1064,6 +1085,7 @@ run_dnf (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
const string& cmd, A&&... a)
{
@@ -1075,7 +1097,7 @@ run_dnf (step_id step,
nullptr /* out_str */, path () /* out_file*/,
warn_detect,
"sudo dnf " + cmd,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ("sudo"),
"dnf", cmd, forward<A> (a)...);
}
@@ -1088,6 +1110,7 @@ run_tar (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
bool sudo,
A&&... a)
@@ -1098,7 +1121,7 @@ run_tar (step_id step,
nullptr /* out_str */, path () /* out_file*/,
warn_detect,
sudo ? "sudo tar" : "tar",
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env (sudo ? "sudo" : "tar"),
sudo ? "tar" : nullptr, forward<A> (a)...);
}
@@ -1110,6 +1133,7 @@ run_tar (step_id step,
string& log, const regexes& warn_detect,
const optional<step_id>& bkp_step,
const optional<result_status>& bkp_status,
+ const strings& aux_env,
string& last_cmd,
bool /* sudo */,
A&&... a)
@@ -1123,7 +1147,7 @@ run_tar (step_id step,
nullptr /* out_str */, path () /* out_file*/,
warn_detect,
"bsdtar",
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ("bsdtar"),
forward<A> (a)...);
}
@@ -1219,6 +1243,9 @@ upload_manifest (tracer& trace,
}
}
+static strings
+parse_auxiliary_environment (const string&, const char*); // See below.
+
static const string worker_checksum ("5"); // Logic version.
static int bbot::
@@ -1296,6 +1323,12 @@ build (size_t argc, const char* argv[])
optional<result_status> bkp_status;
string last_cmd; // Used in the user prompt.
+ // Parse the auxiliary environment, if present, to dump it into the
+ // configure operation log and to use it in the interactive build user
+ // prompt. Note that this environment is already set by the parent process.
+ //
+ strings aux_env;
+
for (;;) // The "breakout" loop.
{
auto fail_operation = [&trace] (operation_result& r,
@@ -1380,6 +1413,17 @@ build (size_t argc, const char* argv[])
}
}
+ // Parse the auxiliary environment, if present.
+ //
+ if (tm.auxiliary_environment)
+ {
+ // Note: cannot throw since has already been called successfully by the
+ // parent process.
+ //
+ aux_env = parse_auxiliary_environment (*tm.auxiliary_environment,
+ comment_begin);
+ }
+
// Split the argument into prefix (empty if not present) and unquoted
// value (absent if not present) and determine the step status. If the
// prefix is present and is prepended with the '+'/'-' character, then the
@@ -1811,6 +1855,11 @@ build (size_t argc, const char* argv[])
}
}
}
+ catch (const cli::exception& e)
+ {
+ fail (e.what (), false /* throw_abort */);
+ break;
+ }
catch (const string_parser::invalid_string& e)
{
fail (e.what (), false /* throw_abort */);
@@ -2017,7 +2066,7 @@ build (size_t argc, const char* argv[])
// for the build2 process. Return true if the dist meta-operation
// succeeds.
//
- auto redist = [&trace, &wre, &bkp_step, &bkp_status, &last_cmd]
+ auto redist = [&trace, &wre, &bkp_step, &bkp_status, &aux_env, &last_cmd]
(step_id step,
operation_result& r,
const dir_path& dist_root,
@@ -2047,7 +2096,7 @@ build (size_t argc, const char* argv[])
step,
envvars,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"config.dist.root=" + redist_root.string (),
import,
@@ -2353,8 +2402,22 @@ build (size_t argc, const char* argv[])
// Configure.
//
{
- operation_result* pr (&add_result ("configure"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("configure"));
+
+ // If we have auxiliary environment, show it in the logs.
+ //
+ if (!aux_env.empty ())
+ {
+ for (const string& e: aux_env)
+ {
+ r.log += e;
+ r.log += '\n';
+ }
+
+ // Add a trailing blank line to separate this from the rest.
+ //
+ r.log += '\n';
+ }
// Noop, just for the log record.
//
@@ -2403,7 +2466,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-V",
"create",
"-d", target_conf,
@@ -2439,7 +2502,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-V",
"create",
"-d", host_conf,
@@ -2469,7 +2532,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-V",
"create",
"-d", install_conf,
@@ -2485,17 +2548,17 @@ build (size_t argc, const char* argv[])
}
else
{
- // b create(<dir>) config.config.load=~host
+ // b create(<dir>) config.config.load=~host-no-warnings
//
// Note also that we suppress warnings about unused config.* values.
//
r.status |= run_b (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-V",
"create(" + host_conf.representation () + ",cc)",
- "config.config.load=~host",
+ "config.config.load=~host-no-warnings",
"config.config.persist+='config.*'@unused=drop");
if (!r.status)
@@ -2506,7 +2569,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"create",
"--existing",
@@ -2528,9 +2591,14 @@ build (size_t argc, const char* argv[])
// Create the module configuration.
//
{
- // b create(<dir>) config.config.load=~build2 [<env-config-args>
- // <tgt-config-args>
- // <pkg-config-args>]
+ // b create(<dir>) config.config.load=~build2[-no-warnings]
+ // [<env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>]
+ //
+ // If the main package is not a build system module or the
+ // configuration is not self-hosted, then we load the
+ // ~build2-no-warnings configuration rather than ~build2.
//
// Note also that we suppress warnings about unused config.* values.
//
@@ -2567,10 +2635,12 @@ build (size_t argc, const char* argv[])
r.status |= run_b (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-V",
"create(" + module_conf.representation () + ',' + mods + ')',
- "config.config.load=~build2",
+ (module_pkg && selfhost
+ ? "config.config.load=~build2"
+ : "config.config.load=~build2-no-warnings"),
"config.config.persist+='config.*'@unused=drop",
eas,
cas,
@@ -2584,7 +2654,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"create",
"--existing",
@@ -2611,14 +2681,14 @@ build (size_t argc, const char* argv[])
mods += m;
}
- // b create(<dir>) config.config.load=~build2 [<env-config-args>
- // <tgt-config-args>
- // <pkg-config-args>]
+ // b create(<dir>) config.config.load=~build2 <env-config-args>
+ // <tgt-config-args>
+ // <pkg-config-args>
//
r.status |= run_b (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-V",
"create(" + install_conf.representation () + ',' + mods + ')',
"config.config.load=~build2",
@@ -2635,7 +2705,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"create",
"--existing",
@@ -2661,7 +2731,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"link",
"-d", target_conf,
@@ -2676,7 +2746,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"link",
"-d", target_conf,
@@ -2694,7 +2764,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"link",
"-d", host_conf,
@@ -2714,7 +2784,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"link",
"-d", install_conf,
@@ -2728,7 +2798,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"link",
"-d", install_conf,
@@ -2754,7 +2824,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"add",
"-d", main_pkg_conf,
@@ -2777,7 +2847,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"fetch",
"-d", main_pkg_conf,
@@ -2802,7 +2872,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"add",
"-d", install_conf,
@@ -2825,7 +2895,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"fetch",
"-d", install_conf,
@@ -2851,7 +2921,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"add",
"-d", target_conf,
@@ -2874,7 +2944,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"fetch",
"-d", target_conf,
@@ -3356,7 +3426,7 @@ build (size_t argc, const char* argv[])
log_uuids,
dependency_checksum,
wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"build",
"--configure-only",
@@ -3487,8 +3557,7 @@ build (size_t argc, const char* argv[])
// Update the main package.
//
{
- operation_result* pr (&add_result ("update"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("update"));
change_wd (trace, &r.log, rwd / main_pkg_conf);
@@ -3501,7 +3570,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"update",
step_args (env_args, s),
@@ -3546,7 +3615,7 @@ build (size_t argc, const char* argv[])
// configuration arguments are specified for them.
//
auto test = [&trace, &wre,
- &bkp_step, &bkp_status, &last_cmd,
+ &bkp_step, &bkp_status, &aux_env, &last_cmd,
&step_args, &env_args, &tgt_args, &pkg_args,
&bootstrap_import,
&redist]
@@ -3629,7 +3698,7 @@ build (size_t argc, const char* argv[])
b,
envvars,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"update",
step_args (env_args, s, f),
@@ -3664,7 +3733,7 @@ build (size_t argc, const char* argv[])
b,
envvars,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"test",
"--package-cwd", // See above for details.
@@ -3695,8 +3764,7 @@ build (size_t argc, const char* argv[])
if (has_internal_tests || has_runtime_tests || has_buildtime_tests)
{
- operation_result* pr (&add_result ("test"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("test"));
// Run internal tests.
//
@@ -3718,7 +3786,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"test",
"--package-cwd",
@@ -3823,8 +3891,7 @@ build (size_t argc, const char* argv[])
//
if (install_root)
{
- operation_result* pr (&add_result ("install"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("install"));
change_wd (trace, &r.log, effective_install_conf);
@@ -3847,7 +3914,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"install",
step_args (env_args, s),
@@ -3872,7 +3939,7 @@ build (size_t argc, const char* argv[])
r.status |= run_ldconfig (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
step_args (env_args, s, nullopt, nullopt, ss),
step_args (tgt_args, s, nullopt, nullopt, ss),
step_args (pkg_args, s, nullopt, nullopt, ss));
@@ -3958,8 +4025,7 @@ build (size_t argc, const char* argv[])
if (bindist)
{
- operation_result* pr (&add_result ("bindist"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("bindist"));
// Fail if the breakpoint refers to a bpkg.bindist.* step but this
// step differs from the enabled one.
@@ -4024,7 +4090,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, bindist_result_file, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"bindist",
"--distribution", distribution,
@@ -4271,8 +4337,7 @@ build (size_t argc, const char* argv[])
//
if (sys_install)
{
- operation_result* pr (&add_result ("sys-install"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("sys-install"));
// Fail if the breakpoint refers to the bbot.sys-install step since
// it has no specific command associated.
@@ -4326,7 +4391,7 @@ build (size_t argc, const char* argv[])
r.status |= run_apt_get (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"update",
"--assume-yes",
step_args (env_args, s, nullopt, nullopt, ss),
@@ -4356,7 +4421,7 @@ build (size_t argc, const char* argv[])
r.status |= run_apt_get (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"install",
"--assume-yes",
step_args (env_args, s, nullopt, nullopt, ss),
@@ -4396,7 +4461,7 @@ build (size_t argc, const char* argv[])
r.status |= run_dnf (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"install",
"--refresh",
"--assumeyes",
@@ -4468,7 +4533,7 @@ build (size_t argc, const char* argv[])
r.status |= run_tar (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
true /* sudo */,
"-xf",
f,
@@ -4498,7 +4563,7 @@ build (size_t argc, const char* argv[])
r.status |= run_ldconfig (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
step_args (env_args, s, nullopt, nullopt, ss),
step_args (tgt_args, s, nullopt, nullopt, ss),
step_args (pkg_args, s, nullopt, nullopt, ss));
@@ -4596,8 +4661,7 @@ build (size_t argc, const char* argv[])
if (has_internal_tests || has_runtime_tests || has_buildtime_tests)
{
- operation_result* pr (&add_result ("test-installed"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("test-installed"));
change_wd (trace, &r.log, rwd);
@@ -4661,7 +4725,7 @@ build (size_t argc, const char* argv[])
r.status |= run_b (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-V",
"create('" + out_dir.representation () + '\'' + mods + ')',
step_args (env_args, s, f),
@@ -4702,7 +4766,7 @@ build (size_t argc, const char* argv[])
b,
envvars,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"configure('" +
subprj_src_dir.representation () + "'@'" +
@@ -4737,7 +4801,7 @@ build (size_t argc, const char* argv[])
b,
envvars,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
test_specs,
step_args (env_args, s),
@@ -4849,7 +4913,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-V",
"create",
"-d", target_conf,
@@ -4885,7 +4949,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-V",
"create",
"-d", host_conf,
@@ -4908,17 +4972,17 @@ build (size_t argc, const char* argv[])
//
if (create_module)
{
- // b create(<dir>) config.config.load=~build2
+ // b create(<dir>) config.config.load=~build2-no-warnings
//
step_id b (step_id::bpkg_test_separate_installed_create);
r.status |= run_b (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-V",
"create(" + module_conf.representation () + ",cc)",
- "config.config.load=~build2",
+ "config.config.load=~build2-no-warnings",
"config.config.persist+='config.*'@unused=drop");
if (!r.status)
@@ -4929,7 +4993,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"create",
"--existing",
@@ -4955,7 +5019,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"link",
"-d", target_conf,
@@ -4970,7 +5034,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"link",
"-d", target_conf,
@@ -4988,7 +5052,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"link",
"-d", host_conf,
@@ -5015,7 +5079,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"add",
"-d", runtime_tests_conf,
@@ -5039,7 +5103,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"fetch",
"-d", runtime_tests_conf,
@@ -5066,7 +5130,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"add",
"-d", target_conf,
@@ -5090,7 +5154,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"fetch",
"-d", target_conf,
@@ -5253,7 +5317,7 @@ build (size_t argc, const char* argv[])
b,
envvars,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"build",
"--configure-only",
@@ -5392,8 +5456,7 @@ build (size_t argc, const char* argv[])
(*bindist == step_id::bpkg_bindist_debian ||
*bindist == step_id::bpkg_bindist_fedora))
{
- operation_result* pr (&add_result ("sys-uninstall"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("sys-uninstall"));
// Noop, just for the log record.
//
@@ -5433,7 +5496,7 @@ build (size_t argc, const char* argv[])
r.status |= run_apt_get (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"remove",
"--assume-yes",
step_args (env_args, s, nullopt, nullopt, ss),
@@ -5472,7 +5535,7 @@ build (size_t argc, const char* argv[])
r.status |= run_dnf (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"remove",
"--assumeyes",
step_args (env_args, s, nullopt, nullopt, ss),
@@ -5514,8 +5577,7 @@ build (size_t argc, const char* argv[])
//
if (install_root)
{
- operation_result* pr (&add_result ("uninstall"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("uninstall"));
change_wd (trace, &r.log, effective_install_conf);
@@ -5528,7 +5590,7 @@ build (size_t argc, const char* argv[])
r.status |= run_bpkg (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
"-v",
"uninstall",
step_args (env_args, s),
@@ -5573,8 +5635,7 @@ build (size_t argc, const char* argv[])
if (bindist_upload)
{
- operation_result* pr (&add_result ("upload"));
- operation_result& r (*pr); // @@ TMP: Apple Clang 14.0.3 ICE
+ operation_result& r (add_result ("upload"));
change_wd (trace, &r.log, rwd);
@@ -5730,7 +5791,7 @@ build (size_t argc, const char* argv[])
r.status |= run_tar (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
false /* sudo */,
#ifndef __OpenBSD__
"--format", "ustar",
@@ -5760,7 +5821,7 @@ build (size_t argc, const char* argv[])
r.status |= run_tar (
b,
trace, r.log, wre,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
false /* sudo */,
"-tf",
upload_archive,
@@ -5787,8 +5848,6 @@ build (size_t argc, const char* argv[])
// artifacts preparation for upload, then use this log to report the
// error. Otherwise, add the new log for that.
//
- // @@ TMP: Apple Clang 14.0.3 ICE
- //
operation_result* pr (&rm.results.back ());
if (pr->operation != "upload")
@@ -5832,7 +5891,7 @@ build (size_t argc, const char* argv[])
nullptr /* out_str */, path () /* out_file */,
regexes (),
"" /* name */,
- bkp_step, bkp_status, last_cmd,
+ bkp_step, bkp_status, aux_env, last_cmd,
process_env ());
rm.status |= r.status;
@@ -5942,6 +6001,109 @@ build (size_t argc, const char* argv[])
return 3;
}
+// Parse the task_manifest::auxiliary_environment value into the list of
+// environment variable assignments as expected by the process API. Throw
+// invalid_argument if the auxiliary environment is invalid.
+//
+// If comment is not NULL, then add blank and comment lines prefixed with this
+// string (which is normally either '#' or 'rem'). This mode is used to print
+// the environment into the build log.
+//
+static strings
+parse_auxiliary_environment (const string& s, const char* comment = nullptr)
+{
+ strings r;
+
+ // Note: parse observing blanks.
+ //
+ for (size_t b (0), e (0), m (0), n (s.size ());
+ next_word (s, n, b, e, m, '\n', '\r'), b != n; )
+ {
+ string line (s, b, e - b);
+
+ if (trim (line).empty ()) // Blank.
+ {
+ if (comment != nullptr)
+ r.push_back (comment);
+
+ continue;
+ }
+
+ if (line.front () == '#') // Comment.
+ {
+ if (comment != nullptr)
+ {
+ line.erase (0, 1);
+ line.insert (0, comment);
+ r.push_back (move (line));
+ }
+
+ continue;
+ }
+
+ size_t p (line.find ('='));
+
+ if (p == string::npos)
+ throw invalid_argument ("missing '=' in '" + line + '\'');
+
+ string name (line, 0, p);
+
+ if (trim_right (name).empty () ||
+ name.find_first_not_of (
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789") != string::npos)
+ {
+ throw invalid_argument ("invalid variable name '" + name + '\'');
+ }
+
+ // Disallow certain well-known environment variables.
+ //
+ if (name == "PATH"
+#if defined(_WIN32)
+#elif defined(__APPLE__)
+ || name == "DYLD_LIBRARY_PATH"
+#else // Linux, FreeBSD, NetBSD, OpenBSD
+ || name == "LD_LIBRARY_PATH"
+#endif
+ )
+ {
+ throw invalid_argument ("disallowed variable name '" + name + '\'');
+ }
+
+ line.erase (0, p + 1); // Value.
+
+ // Note: we allow empty values.
+ //
+ if (!trim_left (line).empty ())
+ {
+ // Unquote.
+ //
+ char c (line.front ());
+ if (c == '"' || c == '\'')
+ {
+ if (line.size () == 1 || line.back () != c)
+ throw invalid_argument ("invalid quoted value '" + line + '\'');
+
+ line.pop_back ();
+ line.erase (0, 1);
+ }
+ }
+
+ // Reassemble.
+ //
+ line.insert (0, 1, '=');
+ line.insert (0, name);
+
+ r.push_back (move (line));
+ }
+
+ // Pop the final blank line comment.
+ //
+ if (comment != nullptr && r.back () == comment)
+ r.pop_back ();
+
+ return r;
+}
+
static int
startup ()
{
@@ -5951,11 +6113,13 @@ startup ()
//
// 1. Download the task manifest into the build directory (CWD).
//
- // 2. Parse it and get the target.
+ // 2. Parse it and get the target, environment name, and auxiliary
+ // environment.
//
- // 3. Find the environment setup executable for this target.
+ // 3. Find the environment setup executable for this name.
//
- // 4. Execute the environment setup executable.
+ // 4. Execute the environment setup executable for this target in the
+ // auxiliary environment.
//
// 5. If the environment setup executable fails, then upload the (failed)
// result ourselves.
@@ -5970,6 +6134,33 @@ startup ()
//
task_manifest tm;
+ auto upload_result = [&trace, &tm] (result_status rs,
+ operation_results&& ors)
+ {
+ const string url ("tftp://" + ops.tftp_host () + "/result.manifest.lz4");
+
+ // If we failed before being able to parse the task manifest, use the
+ // "unknown" values for the package name and version.
+ //
+ result_manifest rm {
+ tm.name.empty () ? bpkg::package_name ("unknown") : tm.name,
+ tm.version.empty () ? bpkg::version ("0") : tm.version,
+ rs,
+ move (ors),
+ worker_checksum,
+ nullopt /* dependency_checksum */
+ };
+
+ try
+ {
+ upload_manifest (trace, url, rm, "result");
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to upload result manifest to " << url << ": " << e;
+ }
+ };
+
try
{
// Download the task.
@@ -6051,6 +6242,31 @@ startup ()
fail << "no default environment setup executable in " << env_dir;
}
+ // Auxiliary environment.
+ //
+ strings aux_env;
+ if (tm.auxiliary_environment)
+ {
+ try
+ {
+ aux_env = parse_auxiliary_environment (*tm.auxiliary_environment);
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: include (unparsed) environment into the log so that we can
+ // see what we are dealing with.
+ //
+ operation_result r {
+ "configure",
+ result_status::abort,
+ *tm.auxiliary_environment + "\n" +
+ "error: invalid auxiliary environment: " + e.what () + '\n'};
+
+ upload_result (result_status::abort, {move (r)});
+ return 1;
+ }
+ }
+
// Run it.
//
strings os;
@@ -6088,7 +6304,12 @@ startup ()
// result manifest. There is no reason to retry (most likely there is
// nobody listening on the other end anymore).
//
- switch (run_io_exit (trace, 0, 2, 2, pp, tg, argv0.effect_string (), os))
+ switch (run_io_exit (trace,
+ 0 /* stdin */, 2 /* stdout */, 2 /* stderr */,
+ process_env (pp, aux_env),
+ tg,
+ argv0.effect_string (),
+ os))
{
case 3:
case 2: return 1;
@@ -6098,29 +6319,7 @@ startup ()
}
catch (const failed&)
{
- const string url ("tftp://" + ops.tftp_host () + "/result.manifest.lz4");
-
- // If we failed before being able to parse the task manifest, use the
- // "unknown" values for the package name and version.
- //
- result_manifest rm {
- tm.name.empty () ? bpkg::package_name ("unknown") : tm.name,
- tm.version.empty () ? bpkg::version ("0") : tm.version,
- result_status::abnormal,
- operation_results {},
- worker_checksum,
- nullopt /* dependency_checksum */
- };
-
- try
- {
- upload_manifest (trace, url, rm, "result");
- }
- catch (const io_error& e)
- {
- fail << "unable to upload result manifest to " << url << ": " << e;
- }
-
+ upload_result (result_status::abnormal, operation_results {});
return 1;
}
}
diff --git a/buildfile b/buildfile
index 333d727..8a8575d 100644
--- a/buildfile
+++ b/buildfile
@@ -1,7 +1,7 @@
# file : buildfile
# license : MIT; see accompanying LICENSE file
-./: {*/ -tests/ -build/} \
+./: {*/ -tests/ -build/ -doc/} \
doc{INSTALL NEWS README} legal{LICENSE} \
manifest
diff --git a/doc/cli.sh b/doc/cli.sh
index ae36428..0d76c29 100755
--- a/doc/cli.sh
+++ b/doc/cli.sh
@@ -1,6 +1,6 @@
#! /usr/bin/env bash
-version=0.17.0-a.0.z
+version=0.18.0-a.0.z
trap 'exit 1' ERR
set -o errtrace # Trap in functions.
diff --git a/doc/manual.cli b/doc/manual.cli
index 41f0eeb..64f2179 100644
--- a/doc/manual.cli
+++ b/doc/manual.cli
@@ -41,12 +41,24 @@ that are executed on the build host. Inside virtual machines/containers,
agent. Virtual machines and containers running a \c{bbot} instance in the
worker mode are collectively called \i{build machines}.
+In addition to a build machine, a build task may also require one or more
+\i{auxiliary machines} which provide additional components that are required
+for building or testing a package and that are impossible or impractical to
+provide as part of the build machine itself.
+
Let's now examine the workflow in the other direction, that is, from a worker
-to a controller. Once a build machine is booted (by the agent), the worker
-inside connects to the TFTP server running on the build host and downloads the
-\i{build task manifest}. It then proceeds to perform the build task and
-uploads the \i{build artifacts archive}, if any, followed by the \i{build
-result manifest} (which includes build logs) to the TFTP server.
+to a controller. Once a build machine (plus auxiliary machines, if any) are
+booted (by the agent), the worker inside the build machine connects to the
+TFTP server running on the build host and downloads the \i{build task
+manifest}. It then proceeds to perform the build task and uploads the \i{build
+artifacts archive}, if any, followed by the \i{build result manifest} (which
+includes build logs) to the TFTP server.
+
+Unlike build machines, auxiliary machines are not expected to run \c{bbot}.
+Instead, on boot, they are expected to upload to the TFTP server a list of
+environment variables to propagate to the build machine (see the
+\c{auxiliary-environment} task manifest value as well as \l{#arch-worker
+Worker Logic} for details).
Once an agent receives a build task for a specific build machine, it goes
through the following steps. First, it creates a directory on its TFTP server
@@ -94,12 +106,14 @@ implementation of the build artifacts upload handling.
\h#arch-machine-config|Configurations|
-The \c{bbot} architecture distinguishes between a \i{machine configuration},
-\i{build target configuration}, and a \i{build package configuration}. The
-machine configuration captures the operating system, installed compiler
-toolchain, and so on. The same build machine may be used to \"generate\"
-multiple \i{build target configurations}. For example, the same machine can
-normally be used to produce 32/64-bit and debug/optimized builds.
+The \c{bbot} architecture distinguishes between a \i{build machine
+configuration}, \i{build target configuration}, and a \i{build package
+configuration}. The machine configuration captures the operating system,
+installed compiler toolchain, and so on. The same build machine may be used to
+\"generate\" multiple \i{build target configurations}. For example, the same
+machine can normally be used to produce debug/optimized builds.
+
+\h2#arch-machine-config-build-machine|Build Machine Configuration|
The machine configuration is \i{approximately} encoded in its \i{machine
name}. The machine name is a list of components separated with \c{-}.
@@ -110,31 +124,31 @@ component.
The encoding is approximate in a sense that it captures only what's important
to distinguish in a particular \c{bbot} deployment.
-The first component normally identifies the operating system and has the
-following recommended form:
+The first three components normally identify the architecture, operating
+system, and optional variant. They have the following recommended form:
\
-[<arch>_][<class>_]<os>[_<version>]
+<arch>-[<class>_]<os>[_<version>][-<variant>]
\
For example:
\
-windows
-windows_10
-windows_10.1607
-i686_windows_xp
-bsd_freebsd_10
-linux_centos_6.2
-linux_ubuntu_16.04
-macos_10.12
+x86_64-windows
+x86_64-windows_10
+x86_64-windows_10.1607
+x86_64-windows_10-devmode
+x86_64-bsd_freebsd_10
+x86_64-linux_ubuntu_16.04
+x86_64-linux_rhel_9.2-bindist
+aarch64-macos_10.12
\
-The second component normally identifies the installed compiler toolchain and
+The last component normally identifies the installed compiler toolchain and
has the following recommended form:
\
-<id>[<version>][<vendor>][<runtime>]
+<id>[_<version>][_<vendor>][_<runtime>]
\
For example:
@@ -144,38 +158,53 @@ gcc
gcc_6
gcc_6.3
gcc_6.3_mingw_w64
+clang_3.9
clang_3.9_libc++
-clang_3.9_libstdc++
msvc_14
-msvc_14u3
-icc
+msvc_14.3
+clang_15.0_msvc_msvc_17.6
+clang_16.0_llvm_msvc_17.6
\
Some examples of complete machine names:
\
-windows_10-msvc_14u3
-macos_10.12-clang_10.0
-linux_ubuntu_16.04-gcc_6.3
-aarch64_linux_debian_11-gcc_12.2
+x86_64-windows_10-msvc_14.3
+x86_64-macos_10.12-clang_10.0
+aarch64-linux_ubuntu_16.04-gcc_6.3
+aarch64-linux_rhel_9.2-bindist-gcc_11
\
+\h2#arch-machine-config-build-target-config|Build Target Configuration|
+
Similarly, the build target configuration is encoded in a \i{configuration
name} using the same overall format. As described in \l{#arch-controller
Controller Logic}, target configurations are generated from machine
configurations. As a result, it usually makes sense to have the first
component identify the operating systems and the second component \- the
-toolchain with the rest identifying a particular target configuration variant,
-for example, optimized, sanitized, etc. For example:
+compiler toolchain with the rest identifying a particular target configuration
+variant, for example, optimized, sanitized, etc:
+
+\
+[<class>_]<os>[_<version>]-<toolchain>[-<variant>]
+\
+
+For example:
\
-windows-vc_14-O2
-linux-gcc_6-O3_asan
+windows_10-msvc_17.6
+windows_10-msvc_17.6-O2
+windows_10-msvc_17.6-static_O2
+windows_10-msvc_17.6-relocatable
+windows_10-clang_16.0_llvm_msvc_17.6_lld
+linux_debian_12-clang_16_libc++-static_O3
\
-While we can also specify the \c{<arch>} component in a build target
-configuration, this information is best conveyed as part of \c{<target>} as
-described in \l{#arch-controller Controller Logic}.
+Note that there is no \c{<arch>} component in a build target configuration:
+this information is best conveyed as part of \c{<target>} as described in
+\l{#arch-controller Controller Logic}.
+
+\h2#arch-machine-config-build-package-config|Build Package Configuration|
A package can be built in multiple package configurations per target
configuration. A build package configuration normally specifies the options
@@ -187,6 +216,42 @@ originate from the package manifest \c{*-build-config}, \c{*-builds},
\l{bpkg#manifest-package Package Manifest} for more information on these
values.
+
+\h2#arch-machine-config-auxiliary|Auxiliary Machines and Configurations|
+
+Besides the build machine and the build configuration that is derived from it,
+a package build may also involve one or more \i{auxiliary machines} and the
+corresponding \i{auxiliary configurations}.
+
+An auxiliary machine provides additional components that are required for
+building or testing a package and that are impossible or impractical to
+provide as part of the build machine itself. For example, a package may need
+access to a suitably configured database, such as PostgreSQL, in order to run
+its tests.
+
+The auxiliary machine name follows the same overall format as the build
+machine name except that the last component captures the information about the
+additional component in question rather than the compiler toolchain. For
+example:
+
+\
+x86_64-linux_debian_12-postgresql_16
+aarch64-linux_debian_12-mysql_8
+\
+
+The auxiliary configuration name is automatically derived from the machine
+name by removing the \c{<arch>} component. For example:
+
+\
+linux_debian_12-postgresql_16
+linux_debian_12-mysql_8
+\
+
+\N|Note that there is no generation of multiple auxiliary configurations from
+the same auxiliary machine since that would require some communication of the
+desired configuration variant to the machine.|
+
+
\h#arch-machine-header|Machine Header Manifest|
@@ TODO: need ref to general manifest overview in bpkg, or, better yet,
@@ -201,16 +266,28 @@ followed by the detailed description of each value in subsequent sections.
id: <machine-id>
name: <machine-name>
summary: <string>
+[role]: build|auxiliary
+[ram-minimum]: <kib>
+[ram-maximum]: <kib>
\
For example:
\
-id: windows_10-msvc_14-1.3
-name: windows_10-msvc_14
+id: x86_64-windows_10-msvc_14-1.3
+name: x86_64-windows_10-msvc_14
summary: Windows 10 build 1607 with VC 14 update 3
\
+\
+id: aarch64-linux_debian_12-postgresql_16-1.0
+name: aarch64-linux_debian_12-postgresql_16
+summary: Debian 12 with PostgreSQL 16 test user/database
+role: auxiliary
+ram-minimum: 2097152
+ram-maximum: 4194304
+\
+
\h2#arch-machine-header-id|\c{id}|
\
@@ -243,11 +320,34 @@ summary: <string>
The one-line description of the machine.
+\h2#arch-machine-header-role|\c{role}|
+
+\
+[role]: build|auxiliary
+\
+
+The machine role. If unspecified, then \c{build} is assumed.
+
+
+\h2#arch-machine-header-ram|\c{ram-minimum}, \c{ram-maximum}|
+
+\
+[ram-minimum]: <kib>
+[ram-maximum]: <kib>
+\
+
+The minimum and the maximum amount of RAM in KiB that the machine requires.
+The maximum amount is interpreted as the amount beyond which there will be no
+benefit. If unspecified, then it is assumed the machine will run with any
+minimum amount a deployment will provide and will always benefit from more
+RAM, respectively. Neither value should be \c{0}.
+
+
\h#arch-machine|Machine Manifest|
The build machine manifest contains the complete description of a build
machine on the build host (see the Build OS documentation for their origin and
-location). The machine manifest starts with the machine manifest header with
+location). The machine manifest starts with the machine header manifest with
all the header values appearing before any non-header values. The non-header
part of manifest synopsis is presented next followed by the detailed
description of each value in subsequent sections.
@@ -360,8 +460,11 @@ repository-url: <repository-url>
[dependency-checksum]: <checksum>
machine: <machine-name>
+[auxiliary-machine]: <machine-name>
+[auxiliary-machine-<name>]: <machine-name>
target: <target-triplet>
[environment]: <environment-name>
+[auxiliary-environment]: <environment-vars>
[target-config]: <tgt-config-args>
[package-config]: <pkg-config-args>
[host]: true|false
@@ -459,6 +562,21 @@ machine: <machine-name>
The name of the build machine to use.
+\h2#arch-task-auxiliary-machine|\c{auxiliary-machine}|
+
+\
+[auxiliary-machine]: <machine-name>
+[auxiliary-machine-<name>]: <machine-name>
+\
+
+The names of the auxiliary machines to use. These values correspond to the
+\c{build-auxiliary} and \c{build-auxiliary-<name>} values in the package
+manifest. While there each value specifies an auxiliary configuration pattern,
+here it specifies the concrete auxiliary machine name that was picked by the
+controller from the list of available auxiliary machines (sent as part of the
+task request) that match this pattern.
+
+
\h2#arch-task-target|\c{target}|
\
@@ -484,6 +602,52 @@ The name of the build environment to use. See \l{#arch-worker Worker Logic}
for details.
+\h2#arch-task-auxiliary-environment|\c{auxiliary-environment}|
+
+\
+[auxiliary-environment]: <environment-vars>
+\
+
+The environment variables describing the auxiliary machines. If any
+\c{auxiliary-machine*} values are specified, then after starting such
+machines, the agent prepares a combined list of environment variables that
+were uploaded by such machines and passes it in this value to the worker.
+
+The format of this value is a list of environment variable assignments
+one per line, in the form:
+
+\
+<name>=<value>
+\
+
+Whitespaces before \c{<name>}, around \c{=}, and after \c{<value>} as well as
+blank lines and lines that start with \c{#} are ignored. The \c{<name>} part
+must only contain capital alphabetic, numeric, and \c{_} characters. The
+\c{<value>} part as a whole can be single ('\ ') or double (\"\ \")
+quoted. For example:
+
+\
+DATABASE_HOST=192.168.0.1
+DATABASE_PORT=1245
+DATABASE_USER='John \"Johnny\" Doe'
+DATABASE_NAME=\" test database \"
+\
+
+If the corresponding machine is specified as \c{auxiliary-machine-<name>},
+then its environment variables are prefixed with capitalized \c{<name>_}. For
+example:
+
+\
+auxiliary-machine-pgsql: x86_64-linux_debian_12-postgresql_16
+auxiliary-environment:
+\\
+PGSQL_DATABASE_HOST=192.168.0.1
+PGSQL_DATABASE_PORT=1245
+...
+\\
+\
+
+
\h2#arch-task-target-config|\c{target-config}|
\
@@ -699,7 +863,7 @@ Note that the overall \c{status} value should appear before any per-operation
The \c{skip} status indicates that the received from the controller build task
checksums have not changed and the task execution has therefore been skipped
-under the assumtion that it would have produced the same result. See
+under the assumption that it would have produced the same result. See
\c{agent-checksum}, \c{worker-checksum}, and \c{dependency-checksum} for
details.
@@ -765,9 +929,14 @@ The version of the worker logic used to perform the package build task.
An agent (or controller acting as an agent) sends a task request to its
controller via HTTP/HTTPS POST method (@@ URL/API endpoint). The task request
-starts with the task request manifest followed by a list of machine manifests.
-The task request manifest synopsis is presented next followed by the detailed
-description of each value in subsequent sections.
+starts with the task request manifest followed by a list of machine header
+manifests. The task request manifest synopsis is presented next followed by
+the detailed description of each value in subsequent sections.
+
+\N|The controller is expected to pick each offered machine header manifest
+only once. If an agent is capable of running multiple instances of the same
+machine, then it must send the matching number of machine header manifests for
+such a machine.|
\
agent: <name>
@@ -776,6 +945,7 @@ toolchain-version: <standard-version>
[interactive-mode]: false|true|both
[interactive-login]: <login>
[fingerprint]: <agent-fingerprint>
+[auxiliary-ram]: <kib>
\
@@ -842,6 +1012,18 @@ authentication in which case it should respond with the 401 (unauthorized)
HTTP status code.
+\h2#arch-task-req-auxiliary-ram|\c{auxiliary-ram}|
+
+\
+[auxiliary-ram]: <kib>
+\
+
+The amount of RAM in KiB that is available for running auxiliary machines. If
+unspecified, then assume there is no hard limit (that is, the agent can
+allocate up to the host's available RAM minus the amount required to run the
+build machine).
+
+
\h#arch-task-res|Task Response Manifest|
A controller sends the task response manifest in response to the task request
@@ -969,20 +1151,24 @@ established for a particular build target. The environment has three
components: the execution environment (environment variables, etc), build
system modules, as well as configuration options and variables.
-Setting up of the environment is performed by an executable (script, batch
-file, etc). Specifically, upon receiving a build task, if it specifies the
-environment name then the worker looks for the environment setup executable
-with this name in a specific directory and for the executable called
-\c{default} otherwise. Not being able to locate the environment executable is
-an error.
-
-Once the environment setup executable is determined, the worker re-executes
-itself as that executable passing to it as command line arguments the target
-name, the path to the \c{bbot} worker to be executed once the environment is
-setup, and any additional options that need to be propagated to the re-executed
-worker. The environment setup executable is executed in the build directory as
-its current working directory. The build directory contains the build task
-\c{task.manifest} file.
+Setting up of the execution environment is performed by an executable (script,
+batch file, etc). Specifically, upon receiving a build task, if it specifies
+the environment name then the worker looks for the environment setup
+executable with this name in a specific directory and for the executable
+called \c{default} otherwise. Not being able to locate the environment
+executable is an error.
+
+In addition to the environment executable, if the task requires any auxiliary
+machines, then the \c{auxiliary-environment} value from the task manifest is
+incorporated into the execution environment.
+
+Specifically, once the environment setup executable is determined, the worker
+re-executes itself in the auxiliary environment and as that executable passing
+to it as command line arguments the target name, the path to the \c{bbot}
+worker to be executed once the environment is setup, and any additional
+options that need to be propagated to the re-executed worker. The environment
+setup executable is executed in the build directory as its current working
+directory. The build directory contains the build task \c{task.manifest} file.
The environment setup executable sets up the necessary execution environment
for example by adjusting \c{PATH} or running a suitable \c{vcvars} batch file.
@@ -1319,7 +1505,7 @@ Worker script for \c{host} packages:
{
# [bpkg.create]
#
- b -V create(<host-conf>, cc) config.config.load=~host
+ b -V create(<host-conf>, cc) config.config.load=~host-no-warnings
bpkg -v create --existing --type host -d <host-conf>
}
@@ -1363,7 +1549,7 @@ bpkg -v fetch -d <host-conf> --trust <repository-fp>
# [bpkg.create]
#
- b -V create(<module-conf>, cc) config.config.load=~build2
+ b -V create(<module-conf>, cc) config.config.load=~build2-no-warnings
bpkg -v create --existing --type build2 -d <module-conf>
# [bpkg.link]
@@ -1615,7 +1801,9 @@ bpkg -v update -d <host-conf> <package-name>
# [bpkg.test-separate-installed.create]
#
- b -V create(<module-conf>, cc) config.config.load=~build2
+ b -V create(<module-conf>, cc) \\
+ config.config.load=~build2-no-warnings
+
bpkg -v create --existing --type build2 -d <module-conf>
# [bpkg.test-separate-installed.link]
@@ -1748,7 +1936,7 @@ Worker script for \c{module} packages:
{
# [bpkg.create]
#
- b -V create(<module-conf>, cc) config.config.load=~build2
+ b -V create(<module-conf>, cc) config.config.load=~build2-no-warnings
bpkg -v create --existing --type build2 -d <module-conf>
}
@@ -1790,7 +1978,7 @@ bpkg -v fetch -d <module-conf> --trust <repository-fp>
# [bpkg.create]
#
- b -V create(<host-conf>, cc) config.config.load=~host
+ b -V create(<host-conf>, cc) config.config.load=~host-no-warnings
bpkg -v create --existing --type host -d <host-conf>
# [bpkg.link]
@@ -1960,7 +2148,9 @@ bpkg -v update -d <module-conf> <package-name>
{
# [bpkg.test-separate-installed.create]
#
- b -V create(<module-conf>, cc) config.config.load=~build2
+ b -V create(<module-conf>, cc) \\
+ config.config.load=~build2-no-warnings
+
bpkg -v create --existing --type build2 -d <module-conf>
# bpkg.test-separate-installed.create (
@@ -2211,7 +2401,7 @@ manifest. The matched machine name, the target, the environment name,
configuration options/variables, and regular expressions are included into the
build task manifest.
-Values in the \c{<tgt-config-arg>} list can be opionally prefixed with the
+Values in the \c{<tgt-config-arg>} list can be optionally prefixed with the
\i{step id} or a leading portion thereof to restrict it to a specific step,
operation, phase, or tool in the \i{worker script} (see \l{#arch-worker Worker
Logic}). The prefix can optionally begin with the \c{+} or \c{-} character (in
diff --git a/manifest b/manifest
index 5fcb879..73bca1e 100644
--- a/manifest
+++ b/manifest
@@ -1,6 +1,6 @@
: 1
name: bbot
-version: 0.17.0-a.0.z
+version: 0.18.0-a.0.z
project: build2
summary: build2 build bot
license: MIT
@@ -15,9 +15,9 @@ email: users@build2.org
build-warning-email: builds@build2.org
builds: all : &host
requires: c++14
-depends: * build2 >= 0.16.0-
-depends: * bpkg >= 0.16.0-
+depends: * build2 >= 0.16.0
+depends: * bpkg >= 0.16.0
# @@ DEP Should probably become conditional dependency.
#requires: ? cli ; Only required if changing .cli files.
-depends: libbutl [0.17.0-a.0.1 0.17.0-a.1)
-depends: libbbot [0.17.0-a.0.1 0.17.0-a.1)
+depends: libbutl [0.18.0-a.0.1 0.18.0-a.1)
+depends: libbbot [0.18.0-a.0.1 0.18.0-a.1)
diff --git a/repositories.manifest b/repositories.manifest
index b10bd68..dd6bafd 100644
--- a/repositories.manifest
+++ b/repositories.manifest
@@ -1,10 +1,8 @@
: 1
summary: build2 build bot repository
-:
-role: prerequisite
-location: ../libbutl.git##HEAD
:
role: prerequisite
-location: ../libbbot.git##HEAD
+location: https://stage.build2.org/1
+trust: EC:50:13:E2:3D:F7:92:B4:50:0B:BF:2A:1F:7D:31:04:C6:57:6F:BC:BE:04:2E:E0:58:14:FA:66:66:21:1F:14
diff --git a/tests/integration/testscript b/tests/integration/testscript
index 93a6807..2dcd849 100644
--- a/tests/integration/testscript
+++ b/tests/integration/testscript
@@ -64,7 +64,7 @@ b.test-installed.configure:\"config.cc.loptions=-L'$~/install/lib'\" \
bpkg.test-separate-installed.create:\"config.cc.loptions=-L'$~/install/lib'\""
pkg = libhello
-ver = 1.0.0+11
+ver = 1.0.0+12
#rep_url = "https://git.build2.org/hello/libhello.git#1.0"
#rep_type = git
rep_url = https://stage.build2.org/1
@@ -153,14 +153,14 @@ rfp = yes
#
#\
pkg = libbuild2-hello
-ver = 0.1.0
+ver = 0.2.0
rep_url = "https://github.com/build2/libbuild2-hello.git#master"
rep_type = git
#rep_url = https://stage.build2.org/1
#rep_type = pkg
rfp = yes
-tests="tests: * libbuild2-hello-tests == $ver"
-host='host: true'
+tests = "tests: * libbuild2-hello-tests == $ver"
+host = 'host: true'
#\
#package_config = 'package-config: -bpkg.install:'
#\
@@ -175,7 +175,7 @@ bpkg.module.create:config.bin.rpath=[null]
#
#\
pkg = libbuild2-kconfig
-ver = 0.3.0-a.0.20221118053819.f702eb65da87
+ver = 0.3.0
rep_url = "https://github.com/build2/libbuild2-kconfig.git#master"
rep_type = git
#ver = 0.1.0-a.0.20200910053253.a71aa3f3938b
@@ -409,7 +409,7 @@ rfp = yes
#\
pkg = libodb-sqlite
ver = 2.5.0-b.26.20240131175206.1c7f67f47770
-rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git"
rep_type = git
rfp = yes
tests="tests: odb-tests == $ver"' ? (!$defined(config.odb_tests.database)) config.odb_tests.database=sqlite'
@@ -424,7 +424,7 @@ package_config = 'package-config:
#\
pkg = libodb-pgsql
ver = 2.5.0-b.26.20240131175206.1c7f67f47770
-rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git"
rep_type = git
rfp = yes
tests="tests: odb-tests == $ver"' ? (!$defined(config.odb_tests.database)) config.odb_tests.database=pgsql'
@@ -439,7 +439,7 @@ package_config = 'package-config:
#\
pkg = odb-tests
ver = 2.5.0-b.26.20240131175206.1c7f67f47770
-rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git"
rep_type = git
rfp = yes
#\
@@ -453,7 +453,7 @@ config.odb_tests.database="sqlite pgsql"
#\
pkg = libodb-oracle
ver = 2.5.0-b.26.20240201133448.3fa01c83a095
-rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git"
rep_type = git
rfp = yes
package_config = 'package-config:
@@ -465,13 +465,25 @@ config.cc.poptions+=-I/usr/include/oracle/12.2/client64 config.cc.loptions+=-L/u
#\
pkg = libodb-qt
ver = 2.5.0-b.26.20240201180613.633ad7ccad39
-rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git#multi-package"
+rep_url = "https://git.codesynthesis.com/var/scm/odb/odb.git"
rep_type = git
rfp = yes
#\
#interactive="interactive: b.test-installed.configure"
#interactive="interactive: warning"
+#\
+aux_env = 'auxiliary-environment:
+\
+# x86_64-linux_debian_12-postgresql_15
+#
+DATABASE_HOST=10.0.213.126
+DATABASE_PORT=5432
+DATABASE_USER=test
+DATABASE_NAME=test
+\
+'
+#\
+cat <<"EOI" >=task
: 1
@@ -484,7 +496,8 @@ rfp = yes
$tests
machine: $machine
target: $target
- config: $config
+ $aux_env
+ target-config: $config
$package_config
$interactive
$host
diff --git a/tests/machine/testscript b/tests/machine/testscript
index 1590f96..76921ed 100644
--- a/tests/machine/testscript
+++ b/tests/machine/testscript
@@ -7,7 +7,7 @@
# iptables -t nat -S
# iptables -S
-test.options = --cpu 8 --ram 10485760 --verbose 3
+test.options = --cpu 8 --build-ram 10485760 --verbose 3
tftp = /build/tftp
machines = /btrfs/boris/machines # @@ TODO