aboutsummaryrefslogtreecommitdiff
path: root/mod
diff options
context:
space:
mode:
Diffstat (limited to 'mod')
-rw-r--r--mod/build-config-module.cxx53
-rw-r--r--mod/build-config-module.hxx57
-rw-r--r--mod/build-config.hxx49
-rw-r--r--mod/build-result-module.cxx286
-rw-r--r--mod/build-result-module.hxx78
-rw-r--r--mod/build-target-config.cxx (renamed from mod/build-config.cxx)27
-rw-r--r--mod/build-target-config.hxx79
-rw-r--r--mod/build.cxx177
-rw-r--r--mod/build.hxx19
-rw-r--r--mod/buildfile9
-rw-r--r--mod/ci-common.cxx494
-rw-r--r--mod/ci-common.hxx96
-rw-r--r--mod/database-module.cxx56
-rw-r--r--mod/database-module.hxx23
-rw-r--r--mod/database.cxx8
-rw-r--r--mod/diagnostics.hxx2
-rw-r--r--mod/external-handler.cxx12
-rw-r--r--mod/external-handler.hxx2
-rw-r--r--mod/mod-build-configs.cxx66
-rw-r--r--mod/mod-build-force.cxx162
-rw-r--r--mod/mod-build-force.hxx8
-rw-r--r--mod/mod-build-log.cxx80
-rw-r--r--mod/mod-build-result.cxx710
-rw-r--r--mod/mod-build-result.hxx13
-rw-r--r--mod/mod-build-task.cxx2267
-rw-r--r--mod/mod-build-task.hxx8
-rw-r--r--mod/mod-builds.cxx789
-rw-r--r--mod/mod-ci.cxx605
-rw-r--r--mod/mod-ci.hxx57
-rw-r--r--mod/mod-package-details.cxx29
-rw-r--r--mod/mod-package-version-details.cxx509
-rw-r--r--mod/mod-packages.cxx23
-rw-r--r--mod/mod-repository-details.cxx6
-rw-r--r--mod/mod-repository-root.cxx53
-rw-r--r--mod/mod-repository-root.hxx5
-rw-r--r--mod/mod-submit.cxx26
-rw-r--r--mod/mod-upload.cxx763
-rw-r--r--mod/mod-upload.hxx41
-rw-r--r--mod/module.cli445
-rw-r--r--mod/module.cxx63
-rw-r--r--mod/module.hxx5
-rw-r--r--mod/options-types.hxx13
-rw-r--r--mod/page.cxx304
-rw-r--r--mod/page.hxx72
-rw-r--r--mod/tenant-service.hxx155
-rw-r--r--mod/types-parsers.cxx114
-rw-r--r--mod/types-parsers.hxx31
47 files changed, 6861 insertions, 2088 deletions
diff --git a/mod/build-config-module.cxx b/mod/build-config-module.cxx
index 831cb78..97c9f9e 100644
--- a/mod/build-config-module.cxx
+++ b/mod/build-config-module.cxx
@@ -8,36 +8,35 @@
#include <map>
#include <sstream>
-#include <libbutl/sha256.mxx>
-#include <libbutl/utility.mxx> // throw_generic_error()
-#include <libbutl/openssl.mxx>
-#include <libbutl/filesystem.mxx> // dir_iterator, dir_entry
+#include <libbutl/sha256.hxx>
+#include <libbutl/utility.hxx> // throw_generic_error()
+#include <libbutl/openssl.hxx>
+#include <libbutl/filesystem.hxx> // dir_iterator, dir_entry
namespace brep
{
using namespace std;
using namespace butl;
using namespace bpkg;
- using namespace bbot;
- // Return pointer to the shared build configurations instance, creating one
- // on the first call. Throw tab_parsing on parsing error, io_error on the
- // underlying OS error. Note: not thread-safe.
+ // Return pointer to the shared build target configurations instance,
+ // creating one on the first call. Throw tab_parsing on parsing error,
+ // io_error on the underlying OS error. Note: not thread-safe.
//
- static shared_ptr<const build_configs>
+ static shared_ptr<const build_target_configs>
shared_build_config (const path& p)
{
- static map<path, weak_ptr<build_configs>> configs;
+ static map<path, weak_ptr<build_target_configs>> configs;
auto i (configs.find (p));
if (i != configs.end ())
{
- if (shared_ptr<build_configs> c = i->second.lock ())
+ if (shared_ptr<build_target_configs> c = i->second.lock ())
return c;
}
- shared_ptr<build_configs> c (
- make_shared<build_configs> (parse_buildtab (p)));
+ shared_ptr<build_target_configs> c (
+ make_shared<build_target_configs> (bbot::parse_buildtab (p)));
configs[p] = c;
return c;
@@ -72,7 +71,7 @@ namespace brep
try
{
- for (const dir_entry& de: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (d, dir_iterator::no_follow))
{
if (de.path ().extension () == "pem" &&
de.type () == entry_type::regular)
@@ -109,7 +108,7 @@ namespace brep
catch (const system_error& e)
{
ostringstream os;
- os<< "unable to iterate over agents keys directory '" << d << "'";
+ os << "unable to iterate over agents keys directory '" << d << "'";
throw_generic_error (e.code ().value (), os.str ().c_str ());
}
@@ -122,7 +121,7 @@ namespace brep
{
try
{
- build_conf_ = shared_build_config (bo.build_config ());
+ target_conf_ = shared_build_config (bo.build_config ());
}
catch (const io_error& e)
{
@@ -137,29 +136,21 @@ namespace brep
bot_agent_key_map_ =
shared_bot_agent_keys (bo, bo.build_bot_agent_keys ());
- cstrings conf_names;
-
- using conf_map_type = map<const char*,
- const build_config*,
- compare_c_string>;
+ using conf_map_type = map<build_target_config_id,
+ const build_target_config*>;
conf_map_type conf_map;
- for (const auto& c: *build_conf_)
- {
- const char* cn (c.name.c_str ());
- conf_map[cn] = &c;
- conf_names.push_back (cn);
- }
+ for (const auto& c: *target_conf_)
+ conf_map[build_target_config_id {c.target, c.name}] = &c;
- build_conf_names_ = make_shared<cstrings> (move (conf_names));
- build_conf_map_ = make_shared<conf_map_type> (move (conf_map));
+ target_conf_map_ = make_shared<conf_map_type> (move (conf_map));
}
bool build_config_module::
- belongs (const bbot::build_config& cfg, const char* cls) const
+ belongs (const build_target_config& cfg, const char* cls) const
{
- const map<string, string>& im (build_conf_->class_inheritance_map);
+ const map<string, string>& im (target_conf_->class_inheritance_map);
for (const string& c: cfg.classes)
{
diff --git a/mod/build-config-module.hxx b/mod/build-config-module.hxx
index ba2698d..78661c3 100644
--- a/mod/build-config-module.hxx
+++ b/mod/build-config-module.hxx
@@ -6,17 +6,15 @@
#include <map>
-#include <libbutl/utility.mxx> // compare_c_string
+#include <libbutl/target-triplet.hxx>
#include <libbpkg/manifest.hxx>
-#include <libbbot/build-config.hxx>
-
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
-#include <mod/build-config.hxx>
#include <mod/module-options.hxx>
+#include <mod/build-target-config.hxx>
// Base class for modules that utilize the build controller configuration.
//
@@ -39,16 +37,18 @@ namespace brep
init (const options::build&);
bool
- exclude (const small_vector<bpkg::build_class_expr, 1>& exprs,
- const vector<bpkg::build_constraint>& constrs,
- const bbot::build_config& cfg,
+ exclude (const build_package_config& pc,
+ const build_class_exprs& common_builds,
+ const build_constraints& common_constraints,
+ const build_target_config& tc,
string* reason = nullptr,
bool default_all_ucs = false) const
{
- return brep::exclude (exprs,
- constrs,
- cfg,
- build_conf_->class_inheritance_map,
+ return brep::exclude (pc,
+ common_builds,
+ common_constraints,
+ tc,
+ target_conf_->class_inheritance_map,
reason,
default_all_ucs);
}
@@ -56,26 +56,30 @@ namespace brep
// Check if the configuration belongs to the specified class.
//
bool
- belongs (const bbot::build_config&, const char*) const;
+ belongs (const build_target_config&, const char*) const;
bool
- belongs (const bbot::build_config& cfg, const string& cls) const
+ belongs (const build_target_config& cfg, const string& cls) const
{
return belongs (cfg, cls.c_str ());
}
- // Configuration/toolchain combination that, in particular, can be used as
- // a set value.
+ // Target/configuration/toolchain combination that, in particular, can be
+ // used as a set value.
//
- // Note: contains shallow references to the configuration, toolchain name,
- // and version.
+ // Note: all members are the shallow references.
//
struct config_toolchain
{
- const string& configuration;
+ const butl::target_triplet& target;
+ const string& target_config;
+ const string& package_config;
const string& toolchain_name;
const bpkg::version& toolchain_version;
+ // Note: the comparison reflects the order of unbuilt configurations on
+ // the Builds page.
+ //
bool
operator< (const config_toolchain& ct) const
{
@@ -85,19 +89,24 @@ namespace brep
if (toolchain_version != ct.toolchain_version)
return toolchain_version > ct.toolchain_version;
- return configuration.compare (ct.configuration) < 0;
+ if (int r = target.compare (ct.target))
+ return r < 0;
+
+ if (int r = target_config.compare (ct.target_config))
+ return r < 0;
+
+ return package_config.compare (ct.package_config) < 0;
}
};
protected:
// Build configurations.
//
- shared_ptr<const bbot::build_configs> build_conf_;
- shared_ptr<const cstrings> build_conf_names_;
+ shared_ptr<const build_target_configs> target_conf_;
- shared_ptr<const std::map<const char*,
- const bbot::build_config*,
- butl::compare_c_string>> build_conf_map_;
+ shared_ptr<const std::map<build_target_config_id,
+ const build_target_config*>>
+ target_conf_map_;
// Map of build bot agent public keys fingerprints to the key file paths.
//
diff --git a/mod/build-config.hxx b/mod/build-config.hxx
deleted file mode 100644
index e8dfe07..0000000
--- a/mod/build-config.hxx
+++ /dev/null
@@ -1,49 +0,0 @@
-// file : mod/build-config.hxx -*- C++ -*-
-// license : MIT; see accompanying LICENSE file
-
-#ifndef MOD_BUILD_CONFIG_HXX
-#define MOD_BUILD_CONFIG_HXX
-
-#include <map>
-
-#include <libbpkg/manifest.hxx>
-
-#include <libbbot/build-config.hxx>
-
-#include <libbrep/types.hxx>
-#include <libbrep/utility.hxx>
-
-namespace brep
-{
- // Return true if the specified build configuration is excluded by a package
- // based on its underlying build class set, build class expressions, and
- // build constraints, potentially extending the underlying set with the
- // special classes. Set the exclusion reason if requested. Optionally use
- // the `all` class as a default underlying build class set rather than the
- // `default` class (which is, for example, the case for the external test
- // packages not to reduce their build configuration set needlessly).
- //
- bool
- exclude (const small_vector<bpkg::build_class_expr, 1>&,
- const vector<bpkg::build_constraint>&,
- const bbot::build_config&,
- const std::map<string, string>& class_inheritance_map,
- string* reason = nullptr,
- bool default_all_ucs = false);
-
- // Convert dash-separated components (target, build configuration name,
- // machine name) or a pattern thereof into a path, replacing dashes with
- // slashes (directory separators), `**` with `*/**/*`, and appending the
- // trailing slash for a subsequent match using the path_match()
- // functionality (the idea here is for `linux**` to match `linux-gcc` which
- // is quite natural to expect). Throw invalid_path if the resulting path is
- // invalid.
- //
- // Note that the match_absent path match flag must be used for the above
- // `**` transformation to work.
- //
- path
- dash_components_to_path (const string&);
-}
-
-#endif // MOD_BUILD_CONFIG
diff --git a/mod/build-result-module.cxx b/mod/build-result-module.cxx
new file mode 100644
index 0000000..68fbe4c
--- /dev/null
+++ b/mod/build-result-module.cxx
@@ -0,0 +1,286 @@
+// file : mod/build-result-module.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <mod/build-result-module.hxx>
+
+#include <libbutl/openssl.hxx>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/process-io.hxx>
+#include <libbutl/semantic-version.hxx>
+
+namespace brep
+{
+ using namespace std;
+ using namespace butl;
+
+ // While currently the user-defined copy constructor is not required (we
+ // don't need to deep copy nullptr's), it is a good idea to keep the
+ // placeholder ready for less trivial cases.
+ //
+ build_result_module::
+ build_result_module (const build_result_module& r)
+ : database_module (r),
+ build_config_module (r),
+ use_openssl_pkeyutl_ (r.initialized_ ? r.use_openssl_pkeyutl_ : false)
+ {
+ }
+
+ void build_result_module::
+ init (const options::build& bo, const options::build_db& bdo)
+ {
+ HANDLER_DIAG;
+
+ build_config_module::init (bo);
+ database_module::init (bdo, bdo.build_db_retry ());
+
+ try
+ {
+ optional<openssl_info> oi (
+ openssl::info ([&trace, this] (const char* args[], size_t n)
+ {
+ l2 ([&]{trace << process_args {args, n};});
+ },
+ 2,
+ bo.openssl ()));
+
+ use_openssl_pkeyutl_ = oi &&
+ oi->name == "OpenSSL" &&
+ oi->version >= semantic_version {3, 0, 0};
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to obtain openssl version: " << e;
+ }
+ }
+
+ build_result_module::parse_session_result build_result_module::
+ parse_session (const string& s) const
+ {
+ using brep::version; // Not to confuse with module::version.
+
+ parse_session_result r;
+
+ size_t p (s.find ('/')); // End of tenant.
+
+ if (p == string::npos)
+ throw invalid_argument ("no package name");
+
+ if (tenant.compare (0, tenant.size (), s, 0, p) != 0)
+ throw invalid_argument ("tenant mismatch");
+
+ size_t b (p + 1); // Start of package name.
+ p = s.find ('/', b); // End of package name.
+
+ if (p == b)
+ throw invalid_argument ("empty package name");
+
+ if (p == string::npos)
+ throw invalid_argument ("no package version");
+
+ package_name name;
+
+ try
+ {
+ name = package_name (string (s, b, p - b));
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_argument (
+ string ("invalid package name : ") + e.what ());
+ }
+
+ b = p + 1; // Start of version.
+ p = s.find ('/', b); // End of version.
+
+ if (p == string::npos)
+ throw invalid_argument ("no target");
+
+ auto parse_version = [&s, &b, &p] (const char* what) -> version
+ {
+ // Intercept exception handling to add the parsing error attribution.
+ //
+ try
+ {
+ return brep::version (string (s, b, p - b));
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_argument (
+ string ("invalid ") + what + ": " + e.what ());
+ }
+ };
+
+ r.package_version = parse_version ("package version");
+
+ b = p + 1; // Start of target.
+ p = s.find ('/', b); // End of target.
+
+ if (p == string::npos)
+ throw invalid_argument ("no target configuration name");
+
+ target_triplet target;
+ try
+ {
+ target = target_triplet (string (s, b, p - b));
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_argument (string ("invalid target: ") + e.what ());
+ }
+
+ b = p + 1; // Start of target configuration name.
+ p = s.find ('/', b); // End of target configuration name.
+
+ if (p == string::npos)
+ throw invalid_argument ("no package configuration name");
+
+ string target_config (s, b, p - b);
+
+ if (target_config.empty ())
+ throw invalid_argument ("empty target configuration name");
+
+ b = p + 1; // Start of package configuration name.
+ p = s.find ('/', b); // End of package configuration name.
+
+ if (p == string::npos)
+ throw invalid_argument ("no toolchain name");
+
+ string package_config (s, b, p - b);
+
+ if (package_config.empty ())
+ throw invalid_argument ("empty package configuration name");
+
+ b = p + 1; // Start of toolchain name.
+ p = s.find ('/', b); // End of toolchain name.
+
+ if (p == string::npos)
+ throw invalid_argument ("no toolchain version");
+
+ string toolchain_name (s, b, p - b);
+
+ if (toolchain_name.empty ())
+ throw invalid_argument ("empty toolchain name");
+
+ b = p + 1; // Start of toolchain version.
+ p = s.find ('/', b); // End of toolchain version.
+
+ if (p == string::npos)
+ throw invalid_argument ("no timestamp");
+
+ r.toolchain_version = parse_version ("toolchain version");
+
+ r.id = build_id (package_id (move (tenant), move (name), r.package_version),
+ move (target),
+ move (target_config),
+ move (package_config),
+ move (toolchain_name),
+ r.toolchain_version);
+
+ try
+ {
+ size_t tsn;
+ string ts (s, p + 1);
+
+ r.timestamp = timestamp (chrono::duration_cast<timestamp::duration> (
+ chrono::nanoseconds (stoull (ts, &tsn))));
+
+ if (tsn != ts.size ())
+ throw invalid_argument ("trailing junk");
+ }
+ // Handle invalid_argument or out_of_range (both derive from logic_error),
+ // that can be thrown by stoull().
+ //
+ catch (const logic_error& e)
+ {
+ throw invalid_argument (string ("invalid timestamp: ") + e.what ());
+ }
+
+ return r;
+ }
+
+ bool build_result_module::
+ authenticate_session (const options::build& o,
+ const optional<vector<char>>& challenge,
+ const build& b,
+ const string& session) const
+ {
+ HANDLER_DIAG;
+
+ auto warn_auth = [&session, &warn] (const string& d)
+ {
+ warn << "session '" << session << "' authentication failed: " << d;
+ };
+
+ bool r (false);
+
+ // Must both be present or absent.
+ //
+ if (!b.agent_challenge != !challenge)
+ {
+ warn_auth (challenge ? "unexpected challenge": "challenge is expected");
+ }
+ else if (bot_agent_key_map_ == nullptr) // Authentication is disabled.
+ {
+ r = true;
+ }
+ else if (!b.agent_challenge) // Authentication is recently enabled.
+ {
+ warn_auth ("challenge is required now");
+ }
+ else
+ {
+ assert (b.agent_fingerprint && challenge);
+ auto i (bot_agent_key_map_->find (*b.agent_fingerprint));
+
+ // The agent's key is recently replaced.
+ //
+ if (i == bot_agent_key_map_->end ())
+ {
+ warn_auth ("agent's public key not found");
+ }
+ else
+ try
+ {
+ openssl os ([&trace, this] (const char* args[], size_t n)
+ {
+ l2 ([&]{trace << process_args {args, n};});
+ },
+ path ("-"), fdstream_mode::text, 2,
+ process_env (o.openssl (), o.openssl_envvar ()),
+ use_openssl_pkeyutl_ ? "pkeyutl" : "rsautl",
+ o.openssl_option (),
+ use_openssl_pkeyutl_ ? "-verifyrecover" : "-verify",
+ "-pubin",
+ "-inkey",
+ i->second);
+
+ for (const auto& c: *challenge)
+ os.out.put (c); // Sets badbit on failure.
+
+ os.out.close ();
+
+ string s;
+ getline (os.in, s);
+
+ bool v (os.in.eof ());
+ os.in.close ();
+
+ if (os.wait () && v)
+ {
+ r = (s == *b.agent_challenge);
+
+ if (!r)
+ warn_auth ("challenge mismatched");
+ }
+ else // The signature is presumably meaningless.
+ warn_auth ("unable to verify challenge");
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to verify challenge: " << e;
+ }
+ }
+
+ return r;
+ }
+}
diff --git a/mod/build-result-module.hxx b/mod/build-result-module.hxx
new file mode 100644
index 0000000..34466e4
--- /dev/null
+++ b/mod/build-result-module.hxx
@@ -0,0 +1,78 @@
+// file : mod/build-result-module.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef MOD_BUILD_RESULT_MODULE_HXX
+#define MOD_BUILD_RESULT_MODULE_HXX
+
+#include <libbrep/types.hxx>
+#include <libbrep/utility.hxx>
+
+#include <libbrep/build.hxx>
+
+#include <mod/module-options.hxx>
+#include <mod/database-module.hxx>
+#include <mod/build-config-module.hxx>
+
+namespace brep
+{
+ // Base class for modules that handle the build task results.
+ //
+ // Specifically, it loads build controller configuration, initializes the
+ // build database instance, and provides utilities for parsing and
+ // authenticating the build task session.
+ //
+ class build_result_module: public database_module,
+ protected build_config_module
+ {
+ protected:
+ build_result_module () = default;
+
+ // Create a shallow copy (handling instance) if initialized and a deep
+ // copy (context exemplar) otherwise.
+ //
+ explicit
+ build_result_module (const build_result_module&);
+
+ void
+ init (const options::build&, const options::build_db&);
+
+ using handler::init; // Unhide.
+
+ // Parse the build task session and verify that the session matches the
+ // tenant. Throw invalid_argument on errors.
+ //
+ struct parse_session_result
+ {
+ build_id id;
+ brep::version package_version;
+ brep::version toolchain_version;
+ brep::timestamp timestamp;
+ };
+
+ parse_session_result
+ parse_session (const string&) const;
+
+ // Return true if bbot agent authentication is disabled or the agent is
+ // recognized and challenge matches. If the session authentication fails
+ // (challenge is not expected, expected but doesn't match, etc), then log
+ // the failure reason with the warning severity and return false.
+ //
+ // Note that the session argument is used only for logging.
+ //
+ bool
+ authenticate_session (const options::build&,
+ const optional<vector<char>>& challenge,
+ const build&,
+ const string& session) const;
+
+ protected:
+ // True if the openssl version is greater or equal to 3.0.0 and so pkeyutl
+ // needs to be used instead of rsautl.
+ //
+ // Note that openssl 3.0.0 deprecates rsautl in favor of pkeyutl.
+ //
+ bool use_openssl_pkeyutl_;
+ };
+}
+
+#endif // MOD_BUILD_RESULT_MODULE_HXX
diff --git a/mod/build-config.cxx b/mod/build-target-config.cxx
index 43a85e8..a30cf07 100644
--- a/mod/build-config.cxx
+++ b/mod/build-target-config.cxx
@@ -1,17 +1,16 @@
-// file : mod/build-config-module.cxx -*- C++ -*-
+// file : mod/target-build-config.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <mod/build-config.hxx>
+#include <mod/build-target-config.hxx>
-#include <libbutl/utility.mxx> // alpha(), etc.
-#include <libbutl/path-pattern.mxx>
+#include <libbutl/utility.hxx> // alpha(), etc.
+#include <libbutl/path-pattern.hxx>
namespace brep
{
using namespace std;
using namespace butl;
using namespace bpkg;
- using namespace bbot;
// The default underlying class set expressions (see below).
//
@@ -22,13 +21,17 @@ namespace brep
{"all"}, '+', "All.");
bool
- exclude (const small_vector<build_class_expr, 1>& exprs,
- const vector<build_constraint>& constrs,
- const build_config& cfg,
+ exclude (const build_package_config& pc,
+ const build_class_exprs& cbs,
+ const build_constraints& ccs,
+ const build_target_config& tc,
const map<string, string>& class_inheritance_map,
string* reason,
bool default_all_ucs)
{
+ const build_class_exprs& exprs (pc.effective_builds (cbs));
+ const build_constraints& constrs (pc.effective_constraints (ccs));
+
// Save the first sentence of the reason, lower-case the first letter if
// the beginning looks like a word (all subsequent characters until a
// whitespace are lower-case letters).
@@ -74,11 +77,11 @@ namespace brep
// (changing the result from true to false) or non-including one (leaving
// the false result) as an exclusion reason.
//
- auto match = [&cfg, &m, reason, &sanitize, &class_inheritance_map]
+ auto match = [&tc, &m, reason, &sanitize, &class_inheritance_map]
(const build_class_expr& e)
{
bool pm (m);
- e.match (cfg.classes, class_inheritance_map, m);
+ e.match (tc.classes, class_inheritance_map, m);
if (reason != nullptr)
{
@@ -168,8 +171,8 @@ namespace brep
if (!constrs.empty ())
try
{
- path cn (dash_components_to_path (cfg.name));
- path tg (dash_components_to_path (cfg.target.string ()));
+ path cn (dash_components_to_path (tc.name));
+ path tg (dash_components_to_path (tc.target.string ()));
for (const build_constraint& c: constrs)
{
diff --git a/mod/build-target-config.hxx b/mod/build-target-config.hxx
new file mode 100644
index 0000000..180ca80
--- /dev/null
+++ b/mod/build-target-config.hxx
@@ -0,0 +1,79 @@
+// file : mod/build-target-config.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef MOD_BUILD_TARGET_CONFIG_HXX
+#define MOD_BUILD_TARGET_CONFIG_HXX
+
+#include <map>
+
+#include <libbutl/target-triplet.hxx>
+
+#include <libbpkg/manifest.hxx>
+
+#include <libbbot/build-target-config.hxx>
+
+#include <libbrep/types.hxx>
+#include <libbrep/utility.hxx>
+
+#include <libbrep/common.hxx>
+
+namespace brep
+{
+ using build_target_config = bbot::build_target_config;
+ using build_target_configs = bbot::build_target_configs;
+
+ // Return true if the specified build target configuration is excluded by a
+ // package configuration based on its underlying build class set, build
+ // class expressions, and build constraints, potentially extending the
+ // underlying set with the special classes. Set the exclusion reason if
+ // requested. Optionally use the `all` class as a default underlying build
+ // class set rather than the `default` class (which is, for example, the
+ // case for the external test packages not to reduce their build target
+ // configuration set needlessly).
+ //
+ bool
+ exclude (const build_package_config&,
+ const build_class_exprs& common_builds,
+ const build_constraints& common_constraints,
+ const build_target_config&,
+ const std::map<string, string>& class_inheritance_map,
+ string* reason = nullptr,
+ bool default_all_ucs = false);
+
+ // Convert dash-separated components (target, build target configuration
+ // name, machine name) or a pattern thereof into a path, replacing dashes
+ // with slashes (directory separators), `**` with `*/**/*`, and appending
+ // the trailing slash for a subsequent match using the path_match()
+ // functionality (the idea here is for `linux**` to match `linux-gcc` which
+ // is quite natural to expect). Throw invalid_path if the resulting path is
+ // invalid.
+ //
+ // Note that the match_absent path match flag must be used for the above
+ // `**` transformation to work.
+ //
+ path
+ dash_components_to_path (const string&);
+
+ // Build target/target configuration name combination that, in particular,
+ // identifies configurations in the buildtab and thus can be used as a
+ // set/map key.
+ //
+ // Note: contains shallow references to the target and configuration name.
+ //
+ struct build_target_config_id
+ {
+ reference_wrapper<const butl::target_triplet> target;
+ reference_wrapper<const string> config;
+
+ bool
+ operator< (const build_target_config_id& x) const
+ {
+ if (int r = target.get ().compare (x.target.get ()))
+ return r < 0;
+
+ return config.get ().compare (x.config.get ()) < 0;
+ }
+ };
+}
+
+#endif // MOD_BUILD_TARGET_CONFIG
diff --git a/mod/build.cxx b/mod/build.cxx
index 5b9d8aa..5c37acb 100644
--- a/mod/build.cxx
+++ b/mod/build.cxx
@@ -3,12 +3,22 @@
#include <mod/build.hxx>
+#include <odb/database.hxx>
+#include <odb/connection.hxx>
+#include <odb/transaction.hxx>
+
+#include <libbutl/sendmail.hxx>
+#include <libbutl/process-io.hxx>
+
#include <web/server/mime-url-encoding.hxx>
+#include <libbrep/build-package-odb.hxx>
+
#include <mod/utility.hxx>
namespace brep
{
+ using namespace std;
using namespace web;
string
@@ -20,12 +30,15 @@ namespace brep
// needs to be url-encoded, and only in the query part of the URL. We embed
// the package version into the URL path part and so don't encode it.
//
- string url (host + tenant_dir (root, b.tenant).representation () +
- mime_url_encode (b.package_name.string (), false) + '/' +
- b.package_version.string () + "/log/" +
- mime_url_encode (b.configuration, false /* query */) + '/' +
- mime_url_encode (b.toolchain_name, false /* query */) + '/' +
- b.toolchain_version.string ());
+ string url (
+ host + tenant_dir (root, b.tenant).representation () +
+ mime_url_encode (b.package_name.string (), false) + '/' +
+ b.package_version.string () + "/log/" +
+ mime_url_encode (b.target.string (), false /* query */) + '/' +
+ mime_url_encode (b.target_config_name, false /* query */) + '/' +
+ mime_url_encode (b.package_config_name, false /* query */) + '/' +
+ mime_url_encode (b.toolchain_name, false /* query */) + '/' +
+ b.toolchain_version.string ());
if (op != nullptr)
{
@@ -44,12 +57,154 @@ namespace brep
// we embed the package version into the URL query part, where it is not
// encoded by design.
//
- return host + tenant_dir (root, b.tenant).string () +
+ return host + tenant_dir (root, b.tenant).string () +
"?build-force&pn=" + mime_url_encode (b.package_name.string ()) +
- "&pv=" + b.package_version.string () +
- "&cf=" + mime_url_encode (b.configuration) +
- "&tn=" + mime_url_encode (b.toolchain_name) +
- "&tv=" + b.toolchain_version.string () +
+ "&pv=" + b.package_version.string () +
+ "&tg=" + mime_url_encode (b.target.string ()) +
+ "&tc=" + mime_url_encode (b.target_config_name) +
+ "&pc=" + mime_url_encode (b.package_config_name) +
+ "&tn=" + mime_url_encode (b.toolchain_name) +
+ "&tv=" + b.toolchain_version.string () +
"&reason=";
}
+
+ void
+ send_notification_email (const options::build_email_notification& o,
+ const odb::core::connection_ptr& conn,
+ const build& b,
+ const build_package& p,
+ const build_package_config& pc,
+ const string& what,
+ const basic_mark& error,
+ const basic_mark* trace)
+ {
+ using namespace odb::core;
+ using namespace butl;
+
+ assert (b.state == build_state::built && b.status);
+
+ // Bail out if sending build notification emails is disabled for this
+ // toolchain for this package.
+ //
+ {
+ const map<string, build_email>& tes (o.build_toolchain_email ());
+ auto i (tes.find (b.id.toolchain_name));
+ build_email mode (i != tes.end () ? i->second : build_email::latest);
+
+ if (mode == build_email::none)
+ {
+ return;
+ }
+ else if (mode == build_email::latest)
+ {
+ transaction t (conn->begin ());
+ database& db (t.database ());
+
+ const auto& id (query<buildable_package>::build_package::id);
+
+ buildable_package lp (
+ db.query_value<buildable_package> (
+ (id.tenant == b.tenant && id.name == b.package_name) +
+ order_by_version_desc (id.version) +
+ "LIMIT 1"));
+
+ t.commit ();
+
+ if (lp.package->version != p.version)
+ return;
+ }
+ }
+
+ string subj (what + ' ' +
+ to_string (*b.status) + ": " +
+ b.package_name.string () + '/' +
+ b.package_version.string () + ' ' +
+ b.target_config_name + '/' +
+ b.target.string () + ' ' +
+ b.package_config_name + ' ' +
+ b.toolchain_name + '-' + b.toolchain_version.string ());
+
+ // Send notification emails to the interested parties.
+ //
+ auto send_email = [&b, &subj, &o, &error, trace] (const string& to)
+ {
+ try
+ {
+ if (trace != nullptr)
+ *trace << "email '" << subj << "' to " << to;
+
+ // Redirect the diagnostics to webserver error log.
+ //
+ sendmail sm ([trace] (const char* args[], size_t n)
+ {
+ if (trace != nullptr)
+ *trace << process_args {args, n};
+ },
+ 2,
+ o.email (),
+ subj,
+ {to});
+
+ if (b.results.empty ())
+ {
+ sm.out << "No operation results available." << endl;
+ }
+ else
+ {
+ const string& host (o.host ());
+ const dir_path& root (o.root ());
+
+ ostream& os (sm.out);
+
+ os << "combined: " << *b.status << endl << endl
+ << " " << build_log_url (host, root, b) << endl << endl;
+
+ for (const auto& r: b.results)
+ os << r.operation << ": " << r.status << endl << endl
+ << " " << build_log_url (host, root, b, &r.operation)
+ << endl << endl;
+
+ os << "Force rebuild (enter the reason, use '+' instead of spaces):"
+ << endl << endl
+ << " " << build_force_url (host, root, b) << endl;
+ }
+
+ sm.out.close ();
+
+ if (!sm.wait ())
+ error << "sendmail " << *sm.exit;
+ }
+ // Handle process_error and io_error (both derive from system_error).
+ //
+ catch (const system_error& e)
+ {
+ error << "sendmail error: " << e;
+ }
+ };
+
+ // Send the build notification email if a non-empty package build email is
+ // specified.
+ //
+ if (const optional<email>& e = pc.effective_email (p.build_email))
+ {
+ if (!e->empty ())
+ send_email (*e);
+ }
+
+ // Send the build warning/error notification emails, if requested.
+ //
+ if (*b.status >= result_status::warning)
+ {
+ if (const optional<email>& e =
+ pc.effective_warning_email (p.build_warning_email))
+ send_email (*e);
+ }
+
+ if (*b.status >= result_status::error)
+ {
+ if (const optional<email>& e =
+ pc.effective_error_email (p.build_error_email))
+ send_email (*e);
+ }
+ }
}
diff --git a/mod/build.hxx b/mod/build.hxx
index f0846be..07e4411 100644
--- a/mod/build.hxx
+++ b/mod/build.hxx
@@ -4,10 +4,16 @@
#ifndef MOD_BUILD_HXX
#define MOD_BUILD_HXX
+#include <odb/forward.hxx> // odb::core::connection_ptr
+
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
#include <libbrep/build.hxx>
+#include <libbrep/build-package.hxx>
+
+#include <mod/diagnostics.hxx>
+#include <mod/module-options.hxx>
// Various package build-related utilities.
//
@@ -25,6 +31,19 @@ namespace brep
//
string
build_force_url (const string& host, const dir_path& root, const build&);
+
+ // Send the notification email for the specified package configuration
+ // build. The build is expected to be in the built state.
+ //
+ void
+ send_notification_email (const options::build_email_notification&,
+ const odb::core::connection_ptr&,
+ const build&,
+ const build_package&,
+ const build_package_config&,
+ const string& what, // build, rebuild, etc.
+ const basic_mark& error,
+ const basic_mark* trace);
}
#endif // MOD_BUILD_HXX
diff --git a/mod/buildfile b/mod/buildfile
index 191d966..c3895dc 100644
--- a/mod/buildfile
+++ b/mod/buildfile
@@ -25,7 +25,7 @@ include ../web/server/
./: mod{brep} {libue libus}{mod}
-libu_src = options-types types-parsers build-config
+libu_src = options-types types-parsers build-target-config
mod{brep}: {hxx ixx txx cxx}{* -module-options -{$libu_src}} \
libus{mod} ../libbrep/lib{brep} ../web/server/libus{web-server} \
@@ -35,6 +35,11 @@ mod{brep}: {hxx ixx txx cxx}{* -module-options -{$libu_src}} \
{hxx ixx txx cxx}{+{$libu_src} } \
$libs
+# Add support for tenant-associated service notifications to the CI module for
+# the debugging of the notifications machinery.
+#
+cxx.poptions += -DBREP_CI_TENANT_SERVICE
+
libus{mod}: ../web/xhtml/libus{xhtml}
libue{mod}: ../web/xhtml/libue{xhtml}
@@ -50,7 +55,7 @@ if $cli.configured
cli.options += --std c++11 -I $src_root --include-with-brackets \
--include-prefix mod --guard-prefix MOD --generate-specifier \
--cxx-prologue "#include <mod/types-parsers.hxx>" \
---cli-namespace brep::cli --generate-file-scanner --option-length 41 \
+--cli-namespace brep::cli --generate-file-scanner --option-length 46 \
--generate-modifier --generate-description --option-prefix ""
# Include the generated cli files into the distribution and don't remove
diff --git a/mod/ci-common.cxx b/mod/ci-common.cxx
new file mode 100644
index 0000000..cb61e66
--- /dev/null
+++ b/mod/ci-common.cxx
@@ -0,0 +1,494 @@
+// file : mod/ci-common.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <mod/ci-common.hxx>
+
+#include <libbutl/uuid.hxx>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/sendmail.hxx>
+#include <libbutl/timestamp.hxx>
+#include <libbutl/filesystem.hxx>
+#include <libbutl/process-io.hxx> // operator<<(ostream, process_args)
+#include <libbutl/manifest-serializer.hxx>
+
+#include <mod/external-handler.hxx>
+
+namespace brep
+{
+ using namespace std;
+ using namespace butl;
+
+ void ci_start::
+ init (shared_ptr<options::ci_start> o)
+ {
+ // Verify the data directory satisfies the requirements.
+ //
+ const dir_path& d (o->ci_data ());
+
+ if (d.relative ())
+ throw runtime_error ("ci-data directory path must be absolute");
+
+ if (!dir_exists (d))
+ throw runtime_error ("ci-data directory '" + d.string () +
+ "' does not exist");
+
+ if (o->ci_handler_specified () && o->ci_handler ().relative ())
+ throw runtime_error ("ci-handler path must be absolute");
+
+ options_ = move (o);
+ }
+
+ optional<ci_start::start_result> ci_start::
+ start (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark* trace,
+ optional<tenant_service>&& service,
+ const repository_location& repository,
+ const vector<package>& packages,
+ const optional<string>& client_ip,
+ const optional<string>& user_agent,
+ const optional<string>& interactive,
+ const optional<string>& simulate,
+ const vector<pair<string, string>>& custom_request,
+ const vector<pair<string, string>>& overrides)
+ {
+ using serializer = manifest_serializer;
+ using serialization = manifest_serialization;
+
+ assert (options_ != nullptr); // Shouldn't be called otherwise.
+
+ // If the tenant service is specified, then its type may not be empty.
+ //
+ assert (!service || !service->type.empty ());
+
+ // Generate the request id.
+ //
+ // Note that it will also be used as a CI result manifest reference,
+ // unless the latter is provided by the external handler.
+ //
+ string request_id;
+
+ try
+ {
+ request_id = uuid::generate ().string ();
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to generate request id: " << e;
+ return nullopt;
+ }
+
+ // Create the submission data directory.
+ //
+ dir_path dd (options_->ci_data () / dir_path (request_id));
+
+ try
+ {
+ // It's highly unlikely but still possible that the directory already
+ // exists. This can only happen if the generated uuid is not unique.
+ //
+ if (try_mkdir (dd) == mkdir_status::already_exists)
+ throw_generic_error (EEXIST);
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to create directory '" << dd << "': " << e;
+ return nullopt;
+ }
+
+ auto_rmdir ddr (dd);
+
+ // Return the start_result object for the client errors (normally the bad
+ // request status code (400) for the client data serialization errors).
+ //
+ auto client_error = [&request_id] (uint16_t status, string message)
+ {
+ return start_result {status,
+ move (message),
+ request_id,
+ vector<pair<string, string>> ()};
+ };
+
+ // Serialize the CI request manifest to a stream. On the serialization
+ // error return false together with the start_result object containing the
+ // bad request (400) code and the error message. On the stream error pass
+ // through the io_error exception. Otherwise return true.
+ //
+ timestamp ts (system_clock::now ());
+
+ auto rqm = [&request_id,
+ &ts,
+ &service,
+ &repository,
+ &packages,
+ &client_ip,
+ &user_agent,
+ &interactive,
+ &simulate,
+ &custom_request,
+ &client_error] (ostream& os, bool long_lines = false)
+ -> pair<bool, optional<start_result>>
+ {
+ try
+ {
+ serializer s (os, "request", long_lines);
+
+ // Serialize the submission manifest header.
+ //
+ s.next ("", "1"); // Start of manifest.
+ s.next ("id", request_id);
+ s.next ("repository", repository.string ());
+
+ for (const package& p: packages)
+ {
+ if (!p.version)
+ s.next ("package", p.name.string ());
+ else
+ s.next ("package",
+ p.name.string () + '/' + p.version->string ());
+ }
+
+ if (interactive)
+ s.next ("interactive", *interactive);
+
+ if (simulate)
+ s.next ("simulate", *simulate);
+
+ s.next ("timestamp",
+ butl::to_string (ts,
+ "%Y-%m-%dT%H:%M:%SZ",
+ false /* special */,
+ false /* local */));
+
+ if (client_ip)
+ s.next ("client-ip", *client_ip);
+
+ if (user_agent)
+ s.next ("user-agent", *user_agent);
+
+ if (service)
+ {
+ // Note that if the service id is not specified, then the handler
+ // will use the generated reference instead.
+ //
+ if (!service->id.empty ())
+ s.next ("service-id", service->id);
+
+ s.next ("service-type", service->type);
+
+ if (service->data)
+ s.next ("service-data", *service->data);
+ }
+
+ // Serialize the request custom parameters.
+ //
+ // Note that the serializer constraints the custom parameter names
+ // (can't start with '#', can't contain ':' and the whitespaces,
+ // etc).
+ //
+ for (const pair<string, string>& nv: custom_request)
+ s.next (nv.first, nv.second);
+
+ s.next ("", ""); // End of manifest.
+ return make_pair (true, optional<start_result> ());
+ }
+ catch (const serialization& e)
+ {
+ return make_pair (false,
+ optional<start_result> (
+ client_error (400,
+ string ("invalid parameter: ") +
+ e.what ())));
+ }
+ };
+
+ // Serialize the CI request manifest to the submission directory.
+ //
+ path rqf (dd / "request.manifest");
+
+ try
+ {
+ ofdstream os (rqf);
+ pair<bool, optional<start_result>> r (rqm (os));
+ os.close ();
+
+ if (!r.first)
+ return move (*r.second);
+ }
+ catch (const io_error& e)
+ {
+ error << "unable to write to '" << rqf << "': " << e;
+ return nullopt;
+ }
+
+ // Serialize the CI overrides manifest to a stream. On the serialization
+ // error return false together with the start_result object containing the
+ // bad request (400) code and the error message. On the stream error pass
+ // through the io_error exception. Otherwise return true.
+ //
+ auto ovm = [&overrides, &client_error] (ostream& os,
+ bool long_lines = false)
+ -> pair<bool, optional<start_result>>
+ {
+ try
+ {
+ serializer s (os, "overrides", long_lines);
+
+ s.next ("", "1"); // Start of manifest.
+
+ for (const pair<string, string>& nv: overrides)
+ s.next (nv.first, nv.second);
+
+ s.next ("", ""); // End of manifest.
+ return make_pair (true, optional<start_result> ());
+ }
+ catch (const serialization& e)
+ {
+ return make_pair (false,
+ optional<start_result> (
+ client_error (
+ 400,
+ string ("invalid manifest override: ") +
+ e.what ())));
+ }
+ };
+
+ // Serialize the CI overrides manifest to the submission directory.
+ //
+ path ovf (dd / "overrides.manifest");
+
+ if (!overrides.empty ())
+ try
+ {
+ ofdstream os (ovf);
+ pair<bool, optional<start_result>> r (ovm (os));
+ os.close ();
+
+ if (!r.first)
+ return move (*r.second);
+ }
+ catch (const io_error& e)
+ {
+ error << "unable to write to '" << ovf << "': " << e;
+ return nullopt;
+ }
+
+ // Given that the submission data is now successfully persisted we are no
+ // longer in charge of removing it, except for the cases when the
+ // submission handler terminates with an error (see below for details).
+ //
+ ddr.cancel ();
+
+ // If the handler terminates with non-zero exit status or specifies 5XX
+ // (HTTP server error) submission result manifest status value, then we
+ // stash the submission data directory for troubleshooting. Otherwise, if
+ // it's the 4XX (HTTP client error) status value, then we remove the
+ // directory.
+ //
+ auto stash_submit_dir = [&dd, error] ()
+ {
+ if (dir_exists (dd))
+ try
+ {
+ mvdir (dd, dir_path (dd + ".fail"));
+ }
+ catch (const system_error& e)
+ {
+ // Not much we can do here. Let's just log the issue and bail out
+ // leaving the directory in place.
+ //
+ error << "unable to rename directory '" << dd << "': " << e;
+ }
+ };
+
+ // Run the submission handler, if specified, reading the CI result
+ // manifest from its stdout and parse it into the resulting manifest
+ // object. Otherwise, create implied CI result manifest.
+ //
+ start_result sr;
+
+ if (options_->ci_handler_specified ())
+ {
+ using namespace external_handler;
+
+ optional<result_manifest> r (run (options_->ci_handler (),
+ options_->ci_handler_argument (),
+ dd,
+ options_->ci_handler_timeout (),
+ error,
+ warn,
+ trace));
+ if (!r)
+ {
+ stash_submit_dir ();
+ return nullopt; // The diagnostics is already issued.
+ }
+
+ sr.status = r->status;
+
+ for (manifest_name_value& nv: r->values)
+ {
+ string& n (nv.name);
+ string& v (nv.value);
+
+ if (n == "message")
+ sr.message = move (v);
+ else if (n == "reference")
+ sr.reference = move (v);
+ else if (n != "status")
+ sr.custom_result.emplace_back (move (n), move (v));
+ }
+
+ if (sr.reference.empty ())
+ sr.reference = move (request_id);
+ }
+ else // Create the implied CI result manifest.
+ {
+ sr.status = 200;
+ sr.message = "CI request is queued";
+ sr.reference = move (request_id);
+ }
+
+ // Serialize the CI result manifest manifest to a stream. On the
+ // serialization error log the error description and return false, on the
+ // stream error pass through the io_error exception, otherwise return
+ // true.
+ //
+ auto rsm = [&sr, &error] (ostream& os, bool long_lines = false) -> bool
+ {
+ try
+ {
+ serialize_manifest (sr, os, long_lines);
+ return true;
+ }
+ catch (const serialization& e)
+ {
+ error << "ref " << sr.reference << ": unable to serialize handler's "
+ << "output: " << e;
+ return false;
+ }
+ };
+
+ // If the submission data directory still exists then perform an
+ // appropriate action on it, depending on the submission result status.
+ // Note that the handler could move or remove the directory.
+ //
+ if (dir_exists (dd))
+ {
+ // Remove the directory if the client error is detected.
+ //
+ if (sr.status >= 400 && sr.status < 500)
+ {
+ rmdir_r (dd);
+ }
+ //
+ // Otherwise, save the result manifest, into the directory. Also stash
+ // the directory for troubleshooting in case of the server error.
+ //
+ else
+ {
+ path rsf (dd / "result.manifest");
+
+ try
+ {
+ ofdstream os (rsf);
+
+ // Not being able to stash the result manifest is not a reason to
+ // claim the submission failed. The error is logged nevertheless.
+ //
+ rsm (os);
+
+ os.close ();
+ }
+ catch (const io_error& e)
+ {
+ // Not fatal (see above).
+ //
+ error << "unable to write to '" << rsf << "': " << e;
+ }
+
+ if (sr.status >= 500 && sr.status < 600)
+ stash_submit_dir ();
+ }
+ }
+
+ // Send email, if configured, and the CI request submission is not
+ // simulated. Use the long lines manifest serialization mode for the
+ // convenience of copying/clicking URLs they contain.
+ //
+ // Note that we don't consider the email sending failure to be a
+ // submission failure as the submission data is successfully persisted and
+ // the handler is successfully executed, if configured. One can argue that
+ // email can be essential for the submission processing and missing it
+ // would result in the incomplete submission. In this case it's natural to
+ // assume that the web server error log is monitored and the email sending
+ // failure will be noticed.
+ //
+ if (options_->ci_email_specified () && !simulate)
+ try
+ {
+ // Redirect the diagnostics to the web server error log.
+ //
+ sendmail sm ([trace] (const char* args[], size_t n)
+ {
+ if (trace != nullptr)
+ *trace << process_args {args, n};
+ },
+ 2 /* stderr */,
+ options_->email (),
+ "CI request submission (" + sr.reference + ')',
+ {options_->ci_email ()});
+
+ // Write the CI request manifest.
+ //
+ pair<bool, optional<start_result>> r (
+ rqm (sm.out, true /* long_lines */));
+
+ assert (r.first); // The serialization succeeded once, so can't fail now.
+
+ // Write the CI overrides manifest.
+ //
+ sm.out << "\n\n";
+
+ r = ovm (sm.out, true /* long_lines */);
+ assert (r.first); // The serialization succeeded once, so can't fail now.
+
+ // Write the CI result manifest.
+ //
+ sm.out << "\n\n";
+
+ // We don't care about the result (see above).
+ //
+ rsm (sm.out, true /* long_lines */);
+
+ sm.out.close ();
+
+ if (!sm.wait ())
+ error << "sendmail " << *sm.exit;
+ }
+ // Handle process_error and io_error (both derive from system_error).
+ //
+ catch (const system_error& e)
+ {
+ error << "sendmail error: " << e;
+ }
+
+ return optional<start_result> (move (sr));
+ }
+
+ void ci_start::
+ serialize_manifest (const start_result& r, ostream& os, bool long_lines)
+ {
+ manifest_serializer s (os, "result", long_lines);
+
+ s.next ("", "1"); // Start of manifest.
+ s.next ("status", to_string (r.status));
+ s.next ("message", r.message);
+ s.next ("reference", r.reference);
+
+ for (const pair<string, string>& nv: r.custom_result)
+ s.next (nv.first, nv.second);
+
+ s.next ("", ""); // End of manifest.
+ }
+}
diff --git a/mod/ci-common.hxx b/mod/ci-common.hxx
new file mode 100644
index 0000000..6f62c4b
--- /dev/null
+++ b/mod/ci-common.hxx
@@ -0,0 +1,96 @@
+// file : mod/ci-common.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef MOD_CI_COMMON_HXX
+#define MOD_CI_COMMON_HXX
+
+#include <odb/forward.hxx> // database
+
+#include <libbrep/types.hxx>
+#include <libbrep/utility.hxx>
+
+#include <libbrep/common.hxx>
+
+#include <mod/diagnostics.hxx>
+#include <mod/module-options.hxx>
+
+namespace brep
+{
+ class ci_start
+ {
+ public:
+ void
+ init (shared_ptr<options::ci_start>);
+
+ // If the request handling has been performed normally, then return the
+ // information that corresponds to the CI result manifest (see CI Result
+ // Manifest in the manual). Otherwise (some internal has error occured),
+ // log the error and return nullopt.
+ //
+ // The arguments correspond to the CI request and overrides manifest
+ // values (see CI Request and Overrides Manifests in the manual). Note:
+ // request id and timestamp are generated by the implementation.
+ //
+ struct package
+ {
+ package_name name;
+ optional<brep::version> version;
+ };
+ // Note that the inability to generate the reference is an internal
+ // error. Thus, it is not optional.
+ //
+ struct start_result
+ {
+ uint16_t status;
+ string message;
+ string reference;
+ vector<pair<string, string>> custom_result;
+ };
+
+ // In the optional service information, if id is empty, then the generated
+ // reference is used instead.
+ //
+ optional<start_result>
+ start (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark* trace,
+ optional<tenant_service>&&,
+ const repository_location& repository,
+ const vector<package>& packages,
+ const optional<string>& client_ip,
+ const optional<string>& user_agent,
+ const optional<string>& interactive = nullopt,
+ const optional<string>& simulate = nullopt,
+ const vector<pair<string, string>>& custom_request = {},
+ const vector<pair<string, string>>& overrides = {});
+
+ // Helpers.
+ //
+
+ // Serialize the start result as a CI result manifest.
+ //
+ static void
+ serialize_manifest (const start_result&, ostream&, bool long_lines = false);
+
+ private:
+ shared_ptr<options::ci_start> options_;
+ };
+
+ class ci_cancel
+ {
+ public:
+ void
+ init (shared_ptr<options::ci_cancel>, shared_ptr<odb::core::database>);
+
+ // @@ TODO Archive the tenant.
+ //
+ void
+ cancel (/*...*/);
+
+ private:
+ shared_ptr<options::ci_cancel> options_;
+ shared_ptr<odb::core::database> build_db_;
+ };
+}
+
+#endif // MOD_CI_COMMON_HXX
diff --git a/mod/database-module.cxx b/mod/database-module.cxx
index f598bfd..07babc6 100644
--- a/mod/database-module.cxx
+++ b/mod/database-module.cxx
@@ -3,13 +3,20 @@
#include <mod/database-module.hxx>
+#include <odb/database.hxx>
#include <odb/exceptions.hxx>
+#include <odb/transaction.hxx>
+
+#include <libbrep/build-package.hxx>
+#include <libbrep/build-package-odb.hxx>
#include <mod/database.hxx>
#include <mod/module-options.hxx>
namespace brep
{
+ using namespace odb::core;
+
// While currently the user-defined copy constructor is not required (we
// don't need to deep copy nullptr's), it is a good idea to keep the
// placeholder ready for less trivial cases.
@@ -68,4 +75,53 @@ namespace brep
throw;
}
+
+ void database_module::
+ update_tenant_service_state (
+ const connection_ptr& conn,
+ const string& tid,
+ const function<optional<string> (const tenant_service&)>& f)
+ {
+ assert (f != nullptr); // Shouldn't be called otherwise.
+
+ // Must be initialized via the init(options::build_db) function call.
+ //
+ assert (build_db_ != nullptr);
+
+ for (size_t retry (retry_);; )
+ {
+ try
+ {
+ transaction tr (conn->begin ());
+
+ shared_ptr<build_tenant> t (build_db_->find<build_tenant> (tid));
+
+ if (t != nullptr && t->service)
+ {
+ tenant_service& s (*t->service);
+
+ if (optional<string> data = f (s))
+ {
+ s.data = move (*data);
+ build_db_->update (t);
+ }
+ }
+
+ tr.commit ();
+
+ // Bail out if we have successfully updated the service state.
+ //
+ break;
+ }
+ catch (const odb::recoverable& e)
+ {
+ if (retry-- == 0)
+ throw;
+
+ HANDLER_DIAG;
+ l1 ([&]{trace << e << "; " << retry + 1 << " tenant service "
+ << "state update retries left";});
+ }
+ }
+ }
}
diff --git a/mod/database-module.hxx b/mod/database-module.hxx
index f72ba83..910cb35 100644
--- a/mod/database-module.hxx
+++ b/mod/database-module.hxx
@@ -4,7 +4,7 @@
#ifndef MOD_DATABASE_MODULE_HXX
#define MOD_DATABASE_MODULE_HXX
-#include <odb/forward.hxx> // database
+#include <odb/forward.hxx> // odb::core::database, odb::core::connection_ptr
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
@@ -14,6 +14,8 @@
namespace brep
{
+ struct tenant_service;
+
// A handler that utilises the database. Specifically, it will retry the
// request in the face of recoverable database failures (deadlock, loss of
// connection, etc) up to a certain number of times.
@@ -50,6 +52,25 @@ namespace brep
virtual bool
handle (request&, response&) = 0;
+ // Helpers.
+ //
+
+ // Update the tenant-associated service state if the specified
+ // notification callback-returned function (expected to be not NULL)
+ // returns the new state data.
+ //
+ // Specifically, start the database transaction, query the service state,
+ // and call the callback-returned function on this state. If this call
+ // returns the data string (rather than nullopt), then update the service
+ // state with this data and persist the change. Repeat all the above steps
+ // on the recoverable database failures (deadlocks, etc).
+ //
+ void
+ update_tenant_service_state (
+ const odb::core::connection_ptr&,
+ const string& tid,
+ const function<optional<string> (const tenant_service&)>&);
+
protected:
size_t retry_ = 0; // Max of all retries.
diff --git a/mod/database.cxx b/mod/database.cxx
index d53ee50..02d521d 100644
--- a/mod/database.cxx
+++ b/mod/database.cxx
@@ -24,10 +24,10 @@ namespace brep
operator< (const db_key& x, const db_key& y)
{
int r;
- if ((r = x.user.compare (y.user)) != 0 ||
- (r = x.role.compare (y.role)) != 0 ||
+ if ((r = x.user.compare (y.user)) != 0 ||
+ (r = x.role.compare (y.role)) != 0 ||
(r = x.password.compare (y.password)) != 0 ||
- (r = x.name.compare (y.name)) != 0 ||
+ (r = x.name.compare (y.name)) != 0 ||
(r = x.host.compare (y.host)))
return r < 0;
@@ -59,7 +59,7 @@ namespace brep
// Change the connection current user to the execution user name.
//
if (!role_.empty ())
- conn->execute ("SET ROLE '" + role_ + "'");
+ conn->execute ("SET ROLE '" + role_ + '\'');
return conn;
}
diff --git a/mod/diagnostics.hxx b/mod/diagnostics.hxx
index 37ab25e..f83e1de 100644
--- a/mod/diagnostics.hxx
+++ b/mod/diagnostics.hxx
@@ -109,7 +109,7 @@ namespace brep
uncaught_ (r.uncaught_),
#endif
data_ (move (r.data_)),
- os_ (move (r.os_)),
+ os_ (move (r.os_)), // Note: can throw.
epilogue_ (r.epilogue_)
{
r.data_.clear (); // Empty.
diff --git a/mod/external-handler.cxx b/mod/external-handler.cxx
index 7f26680..3a85bd8 100644
--- a/mod/external-handler.cxx
+++ b/mod/external-handler.cxx
@@ -13,9 +13,10 @@
#include <type_traits> // static_assert
#include <system_error> // error_code, generic_category()
-#include <libbutl/process.mxx>
-#include <libbutl/fdstream.mxx>
-#include <libbutl/process-io.mxx> // operator<<(ostream, process_args)
+#include <libbutl/process.hxx>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/process-io.hxx> // operator<<(ostream, process_args)
+#include <libbutl/manifest-parser.hxx>
using namespace std;
using namespace butl;
@@ -95,6 +96,8 @@ namespace brep
data_dir));
pipe.out.close ();
+ // Kill the process and wait for its completion.
+ //
auto kill = [&pr, &warn, &handler, &ref] ()
{
// We may still end up well (see below), thus this is a warning.
@@ -103,6 +106,7 @@ namespace brep
<< " execution timeout expired";
pr.kill ();
+ pr.wait ();
};
try
@@ -313,7 +317,7 @@ namespace brep
assert (e != nullptr);
if (!(*e == '\0' && c >= 100 && c < 600))
- bad_value ("invalid HTTP status '" + v + "'");
+ bad_value ("invalid HTTP status '" + v + '\'');
// Save the HTTP status.
//
diff --git a/mod/external-handler.hxx b/mod/external-handler.hxx
index f8f7ee8..0276a25 100644
--- a/mod/external-handler.hxx
+++ b/mod/external-handler.hxx
@@ -4,7 +4,7 @@
#ifndef MOD_EXTERNAL_HANDLER_HXX
#define MOD_EXTERNAL_HANDLER_HXX
-#include <libbutl/manifest-parser.mxx>
+#include <libbutl/manifest-types.hxx>
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
diff --git a/mod/mod-build-configs.cxx b/mod/mod-build-configs.cxx
index 6731b28..9282544 100644
--- a/mod/mod-build-configs.cxx
+++ b/mod/mod-build-configs.cxx
@@ -3,8 +3,6 @@
#include <mod/mod-build-configs.hxx>
-#include <algorithm> // replace()
-
#include <libstudxml/serializer.hxx>
#include <web/server/module.hxx>
@@ -15,7 +13,6 @@
#include <mod/module-options.hxx>
using namespace std;
-using namespace bbot;
using namespace brep::cli;
// While currently the user-defined copy constructor is not required (we don't
@@ -40,6 +37,9 @@ init (scanner& s)
if (options_->build_config_specified ())
build_config_module::init (*options_);
+
+ if (options_->root ().empty ())
+ options_->root (dir_path ("/"));
}
bool brep::build_configs::
@@ -49,7 +49,7 @@ handle (request& rq, response& rs)
HANDLER_DIAG;
- if (build_conf_ == nullptr)
+ if (target_conf_ == nullptr)
throw invalid_request (501, "not implemented");
const size_t page_configs (options_->build_config_page_entries ());
@@ -57,6 +57,8 @@ handle (request& rq, response& rs)
params::build_configs params;
+ string& selected_class (params.class_name ()); // Note: can be empty.
+
try
{
name_value_scanner s (rq.parameters (1024));
@@ -67,8 +69,7 @@ handle (request& rq, response& rs)
// character (that is otherwise forbidden in a class name) to the plus
// character.
//
- string& cn (params.class_name ());
- replace (cn.begin (), cn.end (), ' ', '+');
+ replace (selected_class.begin (), selected_class.end (), ' ', '+');
}
catch (const cli::exception& e)
{
@@ -89,11 +90,11 @@ handle (request& rq, response& rs)
<< DIV_HEADER (options_->logo (), options_->menu (), root, tenant)
<< DIV(ID="content");
- auto url = [&root] (const string& cls)
+ auto url = [&root, this] (const string& cls)
{
- string r (root.string () + "?build-configs");
+ string r (tenant_dir (root, tenant).string () + "?build-configs");
- if (cls != "all")
+ if (!cls.empty ())
{
r += '=';
@@ -120,34 +121,44 @@ handle (request& rq, response& rs)
//
if (params.page () == 0)
{
- const strings& cls (build_conf_->classes);
- const map<string, string>& im (build_conf_->class_inheritance_map);
+ const strings& cls (target_conf_->classes);
+ const map<string, string>& im (target_conf_->class_inheritance_map);
s << DIV(ID="filter-heading") << "Build Configuration Classes" << ~DIV
<< P(ID="filter");
for (auto b (cls.begin ()), i (b), e (cls.end ()); i != e; ++i)
{
- if (i != b)
- s << ' ';
-
+ // Skip the 'hidden' class.
+ //
const string& c (*i);
- print_class_name (c, c == params.class_name ());
- // Append the base class, if present.
- //
- auto j (im.find (c));
- if (j != im.end ())
+ if (c != "hidden")
{
- s << ':';
- print_class_name (j->second);
+ // Note that here we rely on the fact that the first class in the list
+ // can never be 'hidden' (is always 'all').
+ //
+ if (i != b)
+ s << ' ';
+
+ print_class_name (c, c == selected_class);
+
+ // Append the base class, if present.
+ //
+ auto j (im.find (c));
+ if (j != im.end ())
+ {
+ s << ':';
+ print_class_name (j->second);
+ }
}
}
s << ~P;
}
- // Print build configurations that belong to the selected class.
+ // Print build configurations that belong to the selected class (all
+ // configurations if no class is selected) and are not hidden.
//
// We will calculate the total configuration count and cache configurations
// for printing (skipping an appropriate number of them for page number
@@ -155,14 +166,15 @@ handle (request& rq, response& rs)
// before printing the configurations.
//
size_t count (0);
- vector<const build_config*> configs;
+ vector<const build_target_config*> configs;
configs.reserve (page_configs);
size_t skip (page * page_configs);
size_t print (page_configs);
- for (const build_config& c: *build_conf_)
+ for (const build_target_config& c: *target_conf_)
{
- if (belongs (c, params.class_name ()))
+ if ((selected_class.empty () || belongs (c, selected_class)) &&
+ !belongs (c, "hidden"))
{
if (skip != 0)
--skip;
@@ -185,7 +197,7 @@ handle (request& rq, response& rs)
// Enclose the subsequent tables to be able to use nth-child CSS selector.
//
s << DIV;
- for (const build_config* c: configs)
+ for (const build_target_config* c: configs)
{
s << TABLE(CLASS="proplist config")
<< TBODY
@@ -217,7 +229,7 @@ handle (request& rq, response& rs)
count,
page_configs,
options_->build_config_pages (),
- url (params.class_name ()))
+ url (selected_class))
<< ~DIV
<< ~BODY
<< ~HTML;
diff --git a/mod/mod-build-force.cxx b/mod/mod-build-force.cxx
index bd172e3..bdae356 100644
--- a/mod/mod-build-force.cxx
+++ b/mod/mod-build-force.cxx
@@ -3,8 +3,6 @@
#include <mod/mod-build-force.hxx>
-#include <algorithm> // replace()
-
#include <odb/database.hxx>
#include <odb/transaction.hxx>
@@ -12,23 +10,32 @@
#include <libbrep/build.hxx>
#include <libbrep/build-odb.hxx>
+#include <libbrep/build-package.hxx>
+#include <libbrep/build-package-odb.hxx>
#include <mod/module-options.hxx>
+#include <mod/tenant-service.hxx>
using namespace std;
-using namespace bbot;
using namespace brep::cli;
using namespace odb::core;
+brep::build_force::
+build_force (const tenant_service_map& tsm)
+ : tenant_service_map_ (tsm)
+{
+}
+
// While currently the user-defined copy constructor is not required (we don't
// need to deep copy nullptr's), it is a good idea to keep the placeholder
// ready for less trivial cases.
//
brep::build_force::
-build_force (const build_force& r)
+build_force (const build_force& r, const tenant_service_map& tsm)
: database_module (r),
build_config_module (r),
- options_ (r.initialized_ ? r.options_ : nullptr)
+ options_ (r.initialized_ ? r.options_ : nullptr),
+ tenant_service_map_ (tsm)
{
}
@@ -115,10 +122,26 @@ handle (request& rq, response& rs)
version package_version (parse_version (params.version (),
"package version"));
- string& config (params.configuration ());
+ target_triplet target;
+
+ try
+ {
+ target = target_triplet (params.target ());
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_argument (string ("invalid target: ") + e.what ());
+ }
+
+ string& target_config (params.target_config ());
+
+ if (target_config.empty ())
+ throw invalid_argument ("no target configuration name");
- if (config.empty ())
- throw invalid_argument ("no configuration name");
+ string& package_config (params.package_config ());
+
+ if (package_config.empty ())
+ throw invalid_argument ("no package configuration name");
string& toolchain_name (params.toolchain_name ());
@@ -129,7 +152,9 @@ handle (request& rq, response& rs)
"toolchain version"));
id = build_id (package_id (move (tenant), move (p), package_version),
- move (config),
+ move (target),
+ move (target_config),
+ move (package_config),
move (toolchain_name),
toolchain_version);
}
@@ -149,42 +174,137 @@ handle (request& rq, response& rs)
// Make sure the build configuration still exists.
//
- if (build_conf_map_->find (id.configuration.c_str ()) ==
- build_conf_map_->end ())
- config_expired ("no configuration");
+ if (target_conf_map_->find (
+ build_target_config_id {id.target,
+ id.target_config_name}) ==
+ target_conf_map_->end ())
+ config_expired ("no target configuration");
// Load the package build configuration (if present), set the force flag and
// update the object's persistent state.
//
+ // If the incomplete package build is being forced to rebuild and the
+ // tenant_service_build_queued callback is associated with the package
+ // tenant, then stash the state, the build object, and the callback pointer
+ // and calculate the hints for the subsequent service `queued` notification.
+ //
+ const tenant_service_build_queued* tsq (nullptr);
+ optional<pair<tenant_service, shared_ptr<build>>> tss;
+ tenant_service_build_queued::build_queued_hints qhs;
+
+ connection_ptr conn (build_db_->connection ());
{
- transaction t (build_db_->begin ());
+ transaction t (conn->begin ());
package_build pb;
+ shared_ptr<build> b;
+
if (!build_db_->query_one<package_build> (
- query<package_build>::build::id == id, pb))
+ query<package_build>::build::id == id, pb) ||
+ (b = move (pb.build))->state == build_state::queued)
config_expired ("no package build");
- shared_ptr<build> b (pb.build);
force_state force (b->state == build_state::built
? force_state::forced
: force_state::forcing);
if (b->force != force)
{
+ // Log the force rebuild with the warning severity, truncating the
+ // reason if too long.
+ //
+ diag_record dr (warn);
+ dr << "force rebuild for ";
+
+ if (!b->tenant.empty ())
+ dr << b->tenant << ' ';
+
+ dr << b->package_name << '/' << b->package_version << ' '
+ << b->target_config_name << '/' << b->target << ' '
+ << b->package_config_name << ' '
+ << b->toolchain_name << '-' << b->toolchain_version
+ << " (state: " << to_string (b->state) << ' ' << to_string (b->force)
+ << "): ";
+
+ if (reason.size () < 50)
+ dr << reason;
+ else
+ dr << string (reason, 0, 50) << "...";
+
b->force = force;
build_db_->update (b);
- l1 ([&]{trace << "force rebuild for "
- << b->tenant << ' '
- << b->package_name << '/' << b->package_version << ' '
- << b->configuration << ' '
- << b->toolchain_name << '-' << b->toolchain_version
- << ": " << reason;});
+ if (force == force_state::forcing)
+ {
+ shared_ptr<build_tenant> t (build_db_->load<build_tenant> (b->tenant));
+
+ if (t->service)
+ {
+ auto i (tenant_service_map_.find (t->service->type));
+
+ if (i != tenant_service_map_.end ())
+ {
+ tsq = dynamic_cast<const tenant_service_build_queued*> (
+ i->second.get ());
+
+ // If we ought to call the
+ // tenant_service_build_queued::build_queued() callback, then also
+ // set the package tenant's queued timestamp to the current time
+ // to prevent the notifications race (see tenant::queued_timestamp
+ // for details).
+ //
+ if (tsq != nullptr)
+ {
+ // Calculate the tenant service hints.
+ //
+ buildable_package_count tpc (
+ build_db_->query_value<buildable_package_count> (
+ query<buildable_package_count>::build_tenant::id == t->id));
+
+ shared_ptr<build_package> p (
+ build_db_->load<build_package> (b->id.package));
+
+ qhs = tenant_service_build_queued::build_queued_hints {
+ tpc == 1, p->configs.size () == 1};
+
+ // Set the package tenant's queued timestamp.
+ //
+ t->queued_timestamp = system_clock::now ();
+ build_db_->update (t);
+
+ tss = make_pair (move (*t->service), move (b));
+ }
+ }
+ }
+ }
}
t.commit ();
}
+ // If the incomplete package build is being forced to rebuild and the
+ // tenant-associated third-party service needs to be notified about the
+ // queued builds, then call the tenant_service_build_queued::build_queued()
+ // callback function and update the service state, if requested.
+ //
+ if (tsq != nullptr)
+ {
+ assert (tss); // Wouldn't be here otherwise.
+
+ const tenant_service& ss (tss->first);
+ build& b (*tss->second);
+
+ vector<build> qbs;
+ qbs.push_back (move (b));
+
+ if (auto f = tsq->build_queued (ss,
+ qbs,
+ build_state::building,
+ qhs,
+ log_writer_))
+ update_tenant_service_state (conn, qbs.back ().tenant, f);
+ }
+
// We have all the data, so don't buffer the response content.
//
ostream& os (rs.content (200, "text/plain;charset=utf-8", false));
diff --git a/mod/mod-build-force.hxx b/mod/mod-build-force.hxx
index 22df383..ea9c141 100644
--- a/mod/mod-build-force.hxx
+++ b/mod/mod-build-force.hxx
@@ -8,6 +8,7 @@
#include <libbrep/utility.hxx>
#include <mod/module-options.hxx>
+#include <mod/tenant-service.hxx>
#include <mod/database-module.hxx>
#include <mod/build-config-module.hxx>
@@ -16,13 +17,13 @@ namespace brep
class build_force: public database_module, private build_config_module
{
public:
- build_force () = default;
+ explicit
+ build_force (const tenant_service_map&);
// Create a shallow copy (handling instance) if initialized and a deep
// copy (context exemplar) otherwise.
//
- explicit
- build_force (const build_force&);
+ build_force (const build_force&, const tenant_service_map&);
virtual bool
handle (request&, response&);
@@ -39,6 +40,7 @@ namespace brep
private:
shared_ptr<options::build_force> options_;
+ const tenant_service_map& tenant_service_map_;
};
}
diff --git a/mod/mod-build-log.cxx b/mod/mod-build-log.cxx
index 3032e52..c8e803b 100644
--- a/mod/mod-build-log.cxx
+++ b/mod/mod-build-log.cxx
@@ -3,12 +3,10 @@
#include <mod/mod-build-log.hxx>
-#include <algorithm> // find_if()
-
#include <odb/database.hxx>
#include <odb/transaction.hxx>
-#include <libbutl/timestamp.mxx> // to_stream()
+#include <libbutl/timestamp.hxx> // to_stream()
#include <web/server/module.hxx>
@@ -18,7 +16,6 @@
#include <mod/module-options.hxx>
using namespace std;
-using namespace bbot;
using namespace brep::cli;
using namespace odb::core;
@@ -68,7 +65,7 @@ handle (request& rq, response& rs)
//
// Note that the URL path must be in the following form:
//
- // <pkg-name>/<pkg-version>/log/<cfg-name>/<toolchain-name>/<toolchain-version>[/<operation>]
+ // <pkg-name>/<pkg-version>/log/<cfg-name>/<target>/<toolchain-name>/<toolchain-version>[/<operation>]
//
// Also note that the presence of the first 3 components is guaranteed by
// the repository_root module.
@@ -124,12 +121,33 @@ handle (request& rq, response& rs)
assert (i != lpath.end () && *i == "log");
if (++i == lpath.end ())
- throw invalid_argument ("no configuration name");
+ throw invalid_argument ("no target");
+
+ target_triplet target;
+ try
+ {
+ target = target_triplet (*i++);
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_argument (string ("invalid target: ") + e.what ());
+ }
+
+ if (i == lpath.end ())
+ throw invalid_argument ("no target configuration name");
- string config (*i++);
+ string target_config (*i++);
- if (config.empty ())
- throw invalid_argument ("empty configuration name");
+ if (target_config.empty ())
+ throw invalid_argument ("empty target configuration name");
+
+ if (i == lpath.end ())
+ throw invalid_argument ("no package configuration name");
+
+ string package_config (*i++);
+
+ if (package_config.empty ())
+ throw invalid_argument ("empty package configuration name");
if (i == lpath.end ())
throw invalid_argument ("no toolchain name");
@@ -145,7 +163,9 @@ handle (request& rq, response& rs)
version toolchain_version (parse_version (*i++, "toolchain version"));
id = build_id (package_id (tenant, move (name), package_version),
- move (config),
+ move (target),
+ move (target_config),
+ move (package_config),
move (toolchain_name),
toolchain_version);
@@ -182,7 +202,7 @@ handle (request& rq, response& rs)
auto config_expired = [&trace, &lpath, this] (const string& d)
{
l2 ([&]{trace << "package build configuration for " << lpath
- << (!tenant.empty () ? "(" + tenant + ")" : "")
+ << (!tenant.empty () ? '(' + tenant + ')' : "")
<< " expired: " << d;});
throw invalid_request (404, "package build configuration expired: " + d);
@@ -190,9 +210,11 @@ handle (request& rq, response& rs)
// Make sure the build configuration still exists.
//
- if (build_conf_map_->find (id.configuration.c_str ()) ==
- build_conf_map_->end ())
- config_expired ("no configuration");
+ if (target_conf_map_->find (
+ build_target_config_id {id.target,
+ id.target_config_name}) ==
+ target_conf_map_->end ())
+ config_expired ("no target configuration");
// Load the package build configuration (if present).
//
@@ -205,11 +227,16 @@ handle (request& rq, response& rs)
query<package_build>::build::id == id, pb))
config_expired ("no package build");
- b = pb.build;
+ b = move (pb.build);
if (b->state != build_state::built)
+ {
config_expired ("state is " + to_string (b->state));
+ }
else
+ {
build_db_->load (*b, b->results_section);
+ build_db_->load (*b, b->auxiliary_machines_section);
+ }
t.commit ();
}
@@ -228,15 +255,20 @@ handle (request& rq, response& rs)
if (!b->tenant.empty ())
os << options_->tenant_name () << ": " << b->tenant << endl << endl;
- os << "package: " << b->package_name << endl
- << "version: " << b->package_version << endl
- << "toolchain: " << b->toolchain_name << '-' << b->toolchain_version
- << endl
- << "config: " << b->configuration << endl
- << "machine: " << b->machine << " (" << b->machine_summary << ")"
- << endl
- << "target: " << b->target.string () << endl
- << "timestamp: ";
+ os << "package: " << b->package_name << endl
+ << "version: " << b->package_version << endl
+ << "toolchain: " << b->toolchain_name << '-'
+ << b->toolchain_version << endl
+ << "target: " << b->target << endl
+ << "target config: " << b->target_config_name << endl
+ << "package config: " << b->package_config_name << endl
+ << "build machine: " << b->machine.name << " -- "
+ << b->machine.summary << endl;
+
+ for (const build_machine& m: b->auxiliary_machines)
+ os << "auxiliary machine: " << m.name << " -- " << m.summary << endl;
+
+ os << "timestamp: ";
butl::to_stream (os,
b->timestamp,
diff --git a/mod/mod-build-result.cxx b/mod/mod-build-result.cxx
index 734ea5c..ccce17f 100644
--- a/mod/mod-build-result.cxx
+++ b/mod/mod-build-result.cxx
@@ -6,12 +6,8 @@
#include <odb/database.hxx>
#include <odb/transaction.hxx>
-#include <libbutl/openssl.mxx>
-#include <libbutl/sendmail.mxx>
-#include <libbutl/fdstream.mxx>
-#include <libbutl/process-io.mxx>
-#include <libbutl/manifest-parser.mxx>
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
#include <libbbot/manifest.hxx>
@@ -19,11 +15,12 @@
#include <libbrep/build.hxx>
#include <libbrep/build-odb.hxx>
-#include <libbrep/package.hxx>
-#include <libbrep/package-odb.hxx>
+#include <libbrep/build-package.hxx>
+#include <libbrep/build-package-odb.hxx>
-#include <mod/build.hxx> // *_url()
+#include <mod/build.hxx> // send_notification_email()
#include <mod/module-options.hxx>
+#include <mod/tenant-service.hxx>
using namespace std;
using namespace butl;
@@ -31,15 +28,21 @@ using namespace bbot;
using namespace brep::cli;
using namespace odb::core;
+brep::build_result::
+build_result (const tenant_service_map& tsm)
+ : tenant_service_map_ (tsm)
+{
+}
+
// While currently the user-defined copy constructor is not required (we don't
// need to deep copy nullptr's), it is a good idea to keep the placeholder
// ready for less trivial cases.
//
brep::build_result::
-build_result (const build_result& r)
- : database_module (r),
- build_config_module (r),
- options_ (r.initialized_ ? r.options_ : nullptr)
+build_result (const build_result& r, const tenant_service_map& tsm)
+ : build_result_module (r),
+ options_ (r.initialized_ ? r.options_ : nullptr),
+ tenant_service_map_ (tsm)
{
}
@@ -51,16 +54,8 @@ init (scanner& s)
options_ = make_shared<options::build_result> (
s, unknown_mode::fail, unknown_mode::fail);
- database_module::init (static_cast<const options::package_db&> (*options_),
- options_->package_db_retry ());
-
if (options_->build_config_specified ())
- {
- database_module::init (static_cast<const options::build_db&> (*options_),
- options_->build_db_retry ());
-
- build_config_module::init (*options_);
- }
+ build_result_module::init (*options_, *options_);
if (options_->root ().empty ())
options_->root (dir_path ("/"));
@@ -108,120 +103,23 @@ handle (request& rq, response&)
throw invalid_request (400, e.what ());
}
- // Parse the task response session to obtain the build id and the timestamp,
- // and to make sure the session matches tenant and the result manifest's
- // package name, and version.
+ // Parse the task response session and make sure the session matches tenant
+ // and the result manifest's package name, and version.
//
- build_id id;
- timestamp session_timestamp;
+ parse_session_result session;
+ const build_id& id (session.id);
try
{
- const string& s (rqm.session);
-
- size_t p (s.find ('/')); // End of tenant.
-
- if (p == string::npos)
- throw invalid_argument ("no package name");
-
- if (tenant.compare (0, tenant.size (), s, 0, p) != 0)
- throw invalid_argument ("tenant mismatch");
-
- size_t b (p + 1); // Start of package name.
- p = s.find ('/', b); // End of package name.
-
- if (p == b)
- throw invalid_argument ("empty package name");
-
- if (p == string::npos)
- throw invalid_argument ("no package version");
-
- package_name& name (rqm.result.name);
- {
- const string& n (name.string ());
- if (n.compare (0, n.size (), s, b, p - b) != 0)
- throw invalid_argument ("package name mismatch");
- }
-
- b = p + 1; // Start of version.
- p = s.find ('/', b); // End of version.
-
- if (p == string::npos)
- throw invalid_argument ("no configuration name");
-
- auto parse_version = [&s, &b, &p] (const char* what) -> version
- {
- // Intercept exception handling to add the parsing error attribution.
- //
- try
- {
- return brep::version (string (s, b, p - b));
- }
- catch (const invalid_argument& e)
- {
- throw invalid_argument (string ("invalid ") + what + ": " + e.what ());
- }
- };
+ // Note: also verifies that the tenant matches the session.
+ //
+ session = parse_session (rqm.session);
- version package_version (parse_version ("package version"));
+ if (rqm.result.name != id.package.name)
+ throw invalid_argument ("package name mismatch");
- if (package_version != rqm.result.version)
+ if (rqm.result.version != session.package_version)
throw invalid_argument ("package version mismatch");
-
- b = p + 1; // Start of configuration name.
- p = s.find ('/', b); // End of configuration name.
-
- if (p == string::npos)
- throw invalid_argument ("no toolchain name");
-
- string config (s, b, p - b);
-
- if (config.empty ())
- throw invalid_argument ("empty configuration name");
-
- b = p + 1; // Start of toolchain name.
- p = s.find ('/', b); // End of toolchain name.
-
- if (p == string::npos)
- throw invalid_argument ("no toolchain version");
-
- string toolchain_name (s, b, p - b);
-
- if (toolchain_name.empty ())
- throw invalid_argument ("empty toolchain name");
-
- b = p + 1; // Start of toolchain version.
- p = s.find ('/', b); // End of toolchain version.
-
- if (p == string::npos)
- throw invalid_argument ("no timestamp");
-
- version toolchain_version (parse_version ("toolchain version"));
-
- id = build_id (package_id (move (tenant), move (name), package_version),
- move (config),
- move (toolchain_name),
- toolchain_version);
-
- try
- {
- size_t tsn;
- string ts (s, p + 1);
-
- session_timestamp = timestamp (
- chrono::duration_cast<timestamp::duration> (
- chrono::nanoseconds (stoull (ts, &tsn))));
-
- if (tsn != ts.size ())
- throw invalid_argument ("trailing junk");
- }
- // Handle invalid_argument or out_of_range (both derive from logic_error),
- // that can be thrown by stoull().
- //
- catch (const logic_error& e)
- {
- throw invalid_argument (string ("invalid timestamp: ") + e.what ());
- }
}
catch (const invalid_argument& e)
{
@@ -233,52 +131,42 @@ handle (request& rq, response&)
// if the session is valid. The thinking is that this is a problem with the
// controller's setup (expires too fast), not with the agent's.
//
- auto warn_expired = [&rqm, &warn] (const string& d)
+ // Note, though, that there can be quite a common situation when a build
+ // machine is suspended by the bbot agent due to the build timeout. In this
+ // case the task result request may arrive anytime later (after the issue is
+ // investigated, etc) with the abort or abnormal status. By that arrival
+ // time a new build task may already be issued/completed for this package
+ // build configuration or this configuration may even be gone (brep has been
+ // reconfigured, package has gone, etc). We will log no warning in this
+ // case, assuming that such an expiration is not a problem with the
+ // controller's setup.
+ //
+ shared_ptr<build> b;
+ result_status rs (rqm.result.status);
+
+ auto warn_expired = [&rqm, &warn, &b, &session, rs] (const string& d)
{
- warn << "session '" << rqm.session << "' expired: " << d;
+ if (!((b == nullptr || b->timestamp > session.timestamp) &&
+ (rs == result_status::abort || rs == result_status::abnormal)))
+ warn << "session '" << rqm.session << "' expired: " << d;
};
// Make sure the build configuration still exists.
//
- const bbot::build_config* cfg;
+ const build_target_config* tc;
{
- auto i (build_conf_map_->find (id.configuration.c_str ()));
+ auto i (target_conf_map_->find (
+ build_target_config_id {id.target, id.target_config_name}));
- if (i == build_conf_map_->end ())
+ if (i == target_conf_map_->end ())
{
warn_expired ("no build configuration");
return true;
}
- cfg = i->second;
- }
-
- // Load the built package (if present).
- //
- // The only way not to deal with 2 databases simultaneously is to pull
- // another bunch of the package fields into the build_package foreign
- // object, which is a pain (see build_package.hxx for details). Doesn't seem
- // worth it here: email members are really secondary and we don't need to
- // switch transactions back and forth.
- //
- shared_ptr<package> pkg;
- {
- transaction t (package_db_->begin ());
- pkg = package_db_->find<package> (id.package);
- t.commit ();
- }
-
- if (pkg == nullptr)
- {
- warn_expired ("no package");
- return true;
+ tc = i->second;
}
- auto print_args = [&trace, this] (const char* args[], size_t n)
- {
- l2 ([&]{trace << process_args {args, n};});
- };
-
// Load and update the package build configuration (if present).
//
// NULL if the package build doesn't exist or is not updated for any reason
@@ -287,241 +175,389 @@ handle (request& rq, response&)
//
shared_ptr<build> bld;
- optional<result_status> prev_status;
+ // The built package configuration.
+ //
+ // Not NULL if bld is not NULL.
+ //
+ shared_ptr<build_package> pkg;
+ build_package_config* cfg (nullptr);
+
+ // Don't send email to the build-email address for the success-to-success
+ // status change, unless the build was forced.
+ //
bool build_notify (false);
bool unforced (true);
+ // If the package is built (result status differs from interrupt, etc) and
+ // the package tenant has a third-party service state associated with it,
+ // then check if the tenant_service_build_built callback is registered for
+ // the type of the associated service. If it is, then stash the state, the
+ // build object, and the callback pointer for the subsequent service `built`
+ // notification. Note that we send this notification for the skip result as
+ // well, since it is semantically equivalent to the previous build result
+ // with the actual build process being optimized out.
+ //
+ // If the package build is interrupted and the tenant_service_build_queued
+ // callback is associated with the package tenant, then stash the state, the
+ // build object, and the callback pointer and calculate the hints for the
+ // subsequent service `queued` notification.
+ //
+ const tenant_service_build_built* tsb (nullptr);
+ const tenant_service_build_queued* tsq (nullptr);
+ optional<pair<tenant_service, shared_ptr<build>>> tss;
+ tenant_service_build_queued::build_queued_hints qhs;
+
+ // Note that if the session authentication fails (probably due to the
+ // authentication settings change), then we log this case with the warning
+ // severity and respond with the 200 HTTP code as if the challenge is
+ // valid. The thinking is that we shouldn't alarm a law-abaiding agent and
+ // shouldn't provide any information to a malicious one.
+ //
+ connection_ptr conn (build_db_->connection ());
{
- transaction t (build_db_->begin ());
+ transaction t (conn->begin ());
package_build pb;
- shared_ptr<build> b;
+
+ auto build_timestamp = [&b] ()
+ {
+ return to_string (
+ chrono::duration_cast<std::chrono::nanoseconds> (
+ b->timestamp.time_since_epoch ()).count ());
+ };
+
if (!build_db_->query_one<package_build> (
query<package_build>::build::id == id, pb))
+ {
warn_expired ("no package build");
+ }
else if ((b = move (pb.build))->state != build_state::building)
- warn_expired ("package configuration state is " + to_string (b->state));
- else if (b->timestamp != session_timestamp)
- warn_expired ("non-matching timestamp");
- else
{
- // Check the challenge.
- //
- // If the challenge doesn't match expectations (probably due to the
- // authentication settings change), then we log this case with the
- // warning severity and respond with the 200 HTTP code as if the
- // challenge is valid. The thinking is that we shouldn't alarm a
- // law-abaiding agent and shouldn't provide any information to a
- // malicious one.
- //
- auto warn_auth = [&rqm, &warn] (const string& d)
+ warn_expired ("package configuration state is " + to_string (b->state) +
+ ", force state " + to_string (b->force) +
+ ", timestamp " + build_timestamp ());
+ }
+ else if (b->timestamp != session.timestamp)
+ {
+ warn_expired ("non-matching timestamp " + build_timestamp ());
+ }
+ else if (authenticate_session (*options_, rqm.challenge, *b, rqm.session))
+ {
+ const tenant_service_base* ts (nullptr);
+
+ shared_ptr<build_tenant> t (build_db_->load<build_tenant> (b->tenant));
+
+ if (t->service)
{
- warn << "session '" << rqm.session << "' authentication failed: " << d;
- };
+ auto i (tenant_service_map_.find (t->service->type));
- bool auth (false);
+ if (i != tenant_service_map_.end ())
+ ts = i->second.get ();
+ }
- // Must both be present or absent.
+ // If the build is interrupted, then revert it to the original built
+ // state if this is a rebuild. Otherwise (initial build), turn the build
+ // into the queued state if the tenant_service_build_queued callback is
+ // registered for the package tenant and delete it from the database
+ // otherwise.
+ //
+ // Note that if the tenant_service_build_queued callback is registered,
+ // we always send the `queued` notification for the interrupted build,
+ // even when we reverse it to the original built state. We could also
+ // turn the build into the queued state in this case, but it feels that
+ // there is no harm in keeping the previous build information available
+ // for the user.
//
- if (!b->agent_challenge != !rqm.challenge)
- warn_auth (rqm.challenge
- ? "unexpected challenge"
- : "challenge is expected");
- else if (bot_agent_key_map_ == nullptr) // Authentication is disabled.
- auth = true;
- else if (!b->agent_challenge) // Authentication is recently enabled.
- warn_auth ("challenge is required now");
- else
+ if (rs == result_status::interrupt)
{
- assert (b->agent_fingerprint && rqm.challenge);
- auto i (bot_agent_key_map_->find (*b->agent_fingerprint));
-
- // The agent's key is recently replaced.
+ // Schedule the `queued` notification, if the
+ // tenant_service_build_queued callback is registered for the tenant.
//
- if (i == bot_agent_key_map_->end ())
- warn_auth ("agent's public key not found");
- else
+ tsq = dynamic_cast<const tenant_service_build_queued*> (ts);
+
+ if (b->status) // Is this a rebuild?
{
- try
- {
- openssl os (print_args,
- path ("-"), fdstream_mode::text, 2,
- process_env (options_->openssl (),
- options_->openssl_envvar ()),
- "rsautl",
- options_->openssl_option (),
- "-verify", "-pubin", "-inkey", i->second);
-
- for (const auto& c: *rqm.challenge)
- os.out.put (c); // Sets badbit on failure.
-
- os.out.close ();
-
- string s;
- getline (os.in, s);
-
- bool v (os.in.eof ());
- os.in.close ();
-
- if (os.wait () && v)
- {
- auth = s == *b->agent_challenge;
-
- if (!auth)
- warn_auth ("challenge mismatched");
- }
- else // The signature is presumably meaningless.
- warn_auth ("unable to verify challenge");
- }
- catch (const system_error& e)
+ b->state = build_state::built;
+
+ // Keep the force rebuild indication. Note that the forcing state is
+ // only valid for the building state.
+ //
+ if (b->force == force_state::forcing)
+ b->force = force_state::forced;
+
+ // Cleanup the interactive build login information.
+ //
+ b->interactive = nullopt;
+
+ // Cleanup the authentication data.
+ //
+ b->agent_fingerprint = nullopt;
+ b->agent_challenge = nullopt;
+
+ // Note that we are unable to restore the pre-rebuild timestamp
+ // since it has been overwritten when the build task was issued.
+ // That, however, feels ok and we just keep it unchanged.
+ //
+ // Moreover, we actually use the fact that the build's timestamp is
+ // greater then its soft_timestamp as an indication that the build
+ // object represents the interrupted rebuild (see the build_task
+ // handler for details).
+ //
+ // @@ Actually, we also unable to restore the pre-rebuild machine
+ // and auxiliary machines, which are also displayed in the build
+ // log and may potentially be confusing. Should we drop them from
+ // the log in this case or replace with the "machine: unknown"
+ // record?
+
+ build_db_->update (b);
+ }
+ else // Initial build.
+ {
+ if (tsq != nullptr)
{
- fail << "unable to verify challenge: " << e;
+ // Since this is not a rebuild, there are no operation results and
+ // thus we don't need to load the results section to erase results
+ // from the database.
+ //
+ assert (b->results.empty ());
+
+ *b = build (move (b->tenant),
+ move (b->package_name),
+ move (b->package_version),
+ move (b->target),
+ move (b->target_config_name),
+ move (b->package_config_name),
+ move (b->toolchain_name),
+ move (b->toolchain_version));
+
+ build_db_->update (b);
}
+ else
+ build_db_->erase (b);
}
- }
- if (auth)
+ // If we ought to call the tenant_service_build_queued::build_queued()
+ // callback, then also set the package tenant's queued timestamp to
+ // the current time to prevent the notifications race (see
+ // tenant::queued_timestamp for details).
+ //
+ if (tsq != nullptr)
+ {
+ // Calculate the tenant service hints.
+ //
+ buildable_package_count tpc (
+ build_db_->query_value<buildable_package_count> (
+ query<buildable_package_count>::build_tenant::id == t->id));
+
+ shared_ptr<build_package> p (
+ build_db_->load<build_package> (b->id.package));
+
+ qhs = tenant_service_build_queued::build_queued_hints {
+ tpc == 1, p->configs.size () == 1};
+
+ // Set the package tenant's queued timestamp.
+ //
+ t->queued_timestamp = system_clock::now ();
+ build_db_->update (t);
+ }
+ }
+ else // Regular or skip build result.
{
- unforced = b->force == force_state::unforced;
+ // Schedule the `built` notification, if the
+ // tenant_service_build_built callback is registered for the tenant.
+ //
+ tsb = dynamic_cast<const tenant_service_build_built*> (ts);
- // Don't send email to the build-email address for the
- // success-to-success status change, unless the build was forced.
+ // Verify the result status/checksums.
//
- build_notify = !(rqm.result.status == result_status::success &&
- b->status &&
- *b->status == rqm.result.status &&
- unforced);
+ // Specifically, if the result status is skip, then it can only be in
+ // response to the soft rebuild task (all checksums are present in the
+ // build object) and the result checksums must match the build object
+ // checksums. On verification failure respond with the bad request
+ // HTTP code (400).
+ //
+ if (rs == result_status::skip)
+ {
+ if (!b->agent_checksum ||
+ !b->worker_checksum ||
+ !b->dependency_checksum)
+ throw invalid_request (400, "unexpected skip result status");
+
+ // Can only be absent for initial build, in which case the
+ // checksums are also absent and we would end up with the above
+ // 400 response.
+ //
+ assert (b->status);
+
+ // Verify that the result checksum matches the build checksum and
+ // throw invalid_request(400) if that's not the case.
+ //
+ auto verify = [] (const string& build_checksum,
+ const optional<string>& result_checksum,
+ const char* what)
+ {
+ if (!result_checksum)
+ throw invalid_request (
+ 400,
+ string (what) + " checksum is expected for skip result status");
+
+ if (*result_checksum != build_checksum)
+ throw invalid_request (
+ 400,
+ string (what) + " checksum '" + build_checksum +
+ "' is expected instead of '" + *result_checksum +
+ "' for skip result status");
+ };
+
+ verify (*b->agent_checksum, rqm.agent_checksum, "agent");
+
+ verify (*b->worker_checksum,
+ rqm.result.worker_checksum,
+ "worker");
+
+ verify (*b->dependency_checksum,
+ rqm.result.dependency_checksum,
+ "dependency");
+ }
- prev_status = move (b->status);
+ unforced = (b->force == force_state::unforced);
+
+ build_notify = !(rs == result_status::success &&
+ b->status &&
+ *b->status == rs &&
+ unforced);
b->state = build_state::built;
- b->status = rqm.result.status;
b->force = force_state::unforced;
+ // Cleanup the interactive build login information.
+ //
+ b->interactive = nullopt;
+
// Cleanup the authentication data.
//
b->agent_fingerprint = nullopt;
b->agent_challenge = nullopt;
- // Mark the section as loaded, so results are updated.
- //
- b->results_section.load ();
- b->results = move (rqm.result.results);
-
b->timestamp = system_clock::now ();
- b->completion_timestamp = b->timestamp;
+ b->soft_timestamp = b->timestamp;
+
+ // If the result status is other than skip, then save the status,
+ // results, and checksums and update the hard timestamp. Also stash
+ // the service notification information, if present.
+ //
+ if (rs != result_status::skip)
+ {
+ b->status = rs;
+ b->hard_timestamp = b->soft_timestamp;
+
+ // Mark the section as loaded, so results are updated.
+ //
+ b->results_section.load ();
+ b->results = move (rqm.result.results);
+
+ // Save the checksums.
+ //
+ b->agent_checksum = move (rqm.agent_checksum);
+ b->worker_checksum = move (rqm.result.worker_checksum);
+ b->dependency_checksum = move (rqm.result.dependency_checksum);
+ }
build_db_->update (b);
- shared_ptr<build_package> p (
- build_db_->load<build_package> (b->id.package));
+ pkg = build_db_->load<build_package> (b->id.package);
+ cfg = find (b->package_config_name, pkg->configs);
- if (belongs (*cfg, "all") &&
- !exclude (p->builds, p->constraints, *cfg))
- bld = move (b);
+ // The package configuration should be present (see mod-builds.cxx for
+ // details) but if it is not, let's log the warning.
+ //
+ if (cfg != nullptr)
+ {
+ // Don't send the build notification email if the task result is
+ // `skip`, the configuration is hidden, or is now excluded by the
+ // package.
+ //
+ if (rs != result_status::skip && !belongs (*tc, "hidden"))
+ {
+ build_db_->load (*pkg, pkg->constraints_section);
+
+ if (!exclude (*cfg, pkg->builds, pkg->constraints, *tc))
+ bld = b;
+ }
+ }
+ else
+ warn << "cannot find configuration '" << b->package_config_name
+ << "' for package " << pkg->id.name << '/' << pkg->version;
}
+
+ // If required, stash the service notification information.
+ //
+ if (tsb != nullptr || tsq != nullptr)
+ tss = make_pair (move (*t->service), move (b));
}
t.commit ();
}
- if (bld == nullptr)
- return true;
-
- string subj ((unforced ? "build " : "rebuild ") +
- to_string (*bld->status) + ": " +
- bld->package_name.string () + '/' +
- bld->package_version.string () + '/' +
- bld->configuration + '/' +
- bld->toolchain_name + '-' + bld->toolchain_version.string ());
+ // We either notify about the queued build or notify about the built package
+ // or don't notify at all.
+ //
+ assert (tsb == nullptr || tsq == nullptr);
- // Send notification emails to the interested parties.
+ // If the package build is interrupted and the tenant-associated third-party
+ // service needs to be notified about the queued builds, then call the
+ // tenant_service_build_queued::build_queued() callback function and update
+ // the service state, if requested.
//
- auto send_email = [&bld, &subj, &error, &trace, &print_args, this]
- (const string& to)
+ if (tsq != nullptr)
{
- try
- {
- l2 ([&]{trace << "email '" << subj << "' to " << to;});
-
- // Redirect the diagnostics to webserver error log.
- //
- // Note: if using this somewhere else, then need to factor out all this
- // exit status handling code.
- //
- sendmail sm (print_args,
- 2,
- options_->email (),
- subj,
- {to});
-
- if (bld->results.empty ())
- sm.out << "No operation results available." << endl;
- else
- {
- const string& host (options_->host ());
- const dir_path& root (options_->root ());
-
- ostream& os (sm.out);
-
- assert (bld->status);
- os << "combined: " << *bld->status << endl << endl
- << " " << build_log_url (host, root, *bld) << endl << endl;
+ assert (tss); // Wouldn't be here otherwise.
- for (const auto& r: bld->results)
- os << r.operation << ": " << r.status << endl << endl
- << " " << build_log_url (host, root, *bld, &r.operation)
- << endl << endl;
-
- os << "Force rebuild (enter the reason, use '+' instead of spaces):"
- << endl << endl
- << " " << build_force_url (host, root, *bld) << endl;
- }
+ const tenant_service& ss (tss->first);
- sm.out.close ();
+ vector<build> qbs;
+ qbs.push_back (move (*tss->second));
- if (!sm.wait ())
- error << "sendmail " << *sm.exit;
- }
- // Handle process_error and io_error (both derive from system_error).
- //
- catch (const system_error& e)
- {
- error << "sendmail error: " << e;
- }
- };
+ if (auto f = tsq->build_queued (ss,
+ qbs,
+ build_state::building,
+ qhs,
+ log_writer_))
+ update_tenant_service_state (conn, qbs.back ().tenant, f);
+ }
- // Don't send the build notification email if the empty package build email
- // is specified.
+ // If a third-party service needs to be notified about the built package,
+ // then call the tenant_service_build_built::build_built() callback function
+ // and update the service state, if requested.
//
- optional<email>& build_email (pkg->build_email);
- if (build_notify && (!build_email || !build_email->empty ()))
+ if (tsb != nullptr)
{
- // If none of the package build-* addresses is specified, then the build
- // email address is assumed to be the same as the package email address,
- // if specified, otherwise as the project email address, if specified,
- // otherwise the notification email is not sent.
- //
- optional<email> to;
+ assert (tss); // Wouldn't be here otherwise.
- if (build_email)
- to = move (build_email);
- else if (!pkg->build_warning_email && !pkg->build_error_email)
- to = move (pkg->package_email ? pkg->package_email : pkg->email);
+ const tenant_service& ss (tss->first);
+ const build& b (*tss->second);
- if (to)
- send_email (*to);
+ if (auto f = tsb->build_built (ss, b, log_writer_))
+ update_tenant_service_state (conn, b.tenant, f);
}
- assert (bld->status);
-
- // Send the build warning/error notification emails, if requested.
- //
- if (pkg->build_warning_email && *bld->status >= result_status::warning)
- send_email (*pkg->build_warning_email);
-
- if (pkg->build_error_email && *bld->status >= result_status::error)
- send_email (*pkg->build_error_email);
+ if (bld != nullptr)
+ {
+ // Don't sent the notification email for success-to-success status change,
+ // etc.
+ //
+ if (!build_notify)
+ (cfg->email ? cfg->email : pkg->build_email) = email ();
+
+ send_notification_email (*options_,
+ conn,
+ *bld,
+ *pkg,
+ *cfg,
+ unforced ? "build" : "rebuild",
+ error,
+ verb_ >= 2 ? &trace : nullptr);
+ }
return true;
}
diff --git a/mod/mod-build-result.hxx b/mod/mod-build-result.hxx
index 71a60f9..96449d5 100644
--- a/mod/mod-build-result.hxx
+++ b/mod/mod-build-result.hxx
@@ -8,21 +8,21 @@
#include <libbrep/utility.hxx>
#include <mod/module-options.hxx>
-#include <mod/database-module.hxx>
-#include <mod/build-config-module.hxx>
+#include <mod/tenant-service.hxx>
+#include <mod/build-result-module.hxx>
namespace brep
{
- class build_result: public database_module, private build_config_module
+ class build_result: public build_result_module
{
public:
- build_result () = default;
+ explicit
+ build_result (const tenant_service_map&);
// Create a shallow copy (handling instance) if initialized and a deep
// copy (context exemplar) otherwise.
//
- explicit
- build_result (const build_result&);
+ build_result (const build_result&, const tenant_service_map&);
virtual bool
handle (request&, response&);
@@ -36,6 +36,7 @@ namespace brep
private:
shared_ptr<options::build_result> options_;
+ const tenant_service_map& tenant_service_map_;
};
}
diff --git a/mod/mod-build-task.cxx b/mod/mod-build-task.cxx
index 04b2a36..773d041 100644
--- a/mod/mod-build-task.cxx
+++ b/mod/mod-build-task.cxx
@@ -4,23 +4,26 @@
#include <mod/mod-build-task.hxx>
#include <map>
+#include <regex>
#include <chrono>
+#include <random>
#include <odb/database.hxx>
#include <odb/transaction.hxx>
#include <odb/schema-catalog.hxx>
-#include <libbutl/sha256.mxx>
-#include <libbutl/utility.mxx> // compare_c_string
-#include <libbutl/openssl.mxx>
-#include <libbutl/fdstream.mxx> // nullfd
-#include <libbutl/process-io.mxx>
-#include <libbutl/path-pattern.mxx>
-#include <libbutl/manifest-parser.mxx>
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/ft/lang.hxx> // thread_local
+
+#include <libbutl/regex.hxx>
+#include <libbutl/sha256.hxx>
+#include <libbutl/openssl.hxx>
+#include <libbutl/fdstream.hxx> // nullfd
+#include <libbutl/process-io.hxx>
+#include <libbutl/path-pattern.hxx>
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
#include <libbbot/manifest.hxx>
-#include <libbbot/build-config.hxx>
#include <web/server/module.hxx>
@@ -29,7 +32,9 @@
#include <libbrep/build-package.hxx>
#include <libbrep/build-package-odb.hxx>
+#include <mod/build.hxx> // send_notification_email()
#include <mod/module-options.hxx>
+#include <mod/build-target-config.hxx>
using namespace std;
using namespace butl;
@@ -37,15 +42,40 @@ using namespace bbot;
using namespace brep::cli;
using namespace odb::core;
+static thread_local mt19937 rand_gen (random_device {} ());
+
+// Generate a random number in the specified range (max value is included).
+//
+static inline size_t
+rand (size_t min_val, size_t max_val)
+{
+ // Note that size_t is not whitelisted as a type the
+ // uniform_int_distribution class template can be instantiated with.
+ //
+ return min_val == max_val
+ ? min_val
+ : static_cast<size_t> (
+ uniform_int_distribution<unsigned long long> (
+ static_cast<unsigned long long> (min_val),
+ static_cast<unsigned long long> (max_val)) (rand_gen));
+}
+
+brep::build_task::
+build_task (const tenant_service_map& tsm)
+ : tenant_service_map_ (tsm)
+{
+}
+
// While currently the user-defined copy constructor is not required (we don't
// need to deep copy nullptr's), it is a good idea to keep the placeholder
// ready for less trivial cases.
//
brep::build_task::
-build_task (const build_task& r)
+build_task (const build_task& r, const tenant_service_map& tsm)
: database_module (r),
build_config_module (r),
- options_ (r.initialized_ ? r.options_ : nullptr)
+ options_ (r.initialized_ ? r.options_ : nullptr),
+ tenant_service_map_ (tsm)
{
}
@@ -59,13 +89,23 @@ init (scanner& s)
if (options_->build_config_specified ())
{
- // Verify that build-alt-rebuild-{start,stop} are both either specified or
- // not.
+ // Verify that build-alt-*-rebuild-{start,stop} are both either specified
+ // or not.
//
- if (options_->build_alt_rebuild_start_specified () !=
- options_->build_alt_rebuild_stop_specified ())
- fail << "build-alt-rebuild-start and build-alt-rebuild-stop "
- << "configuration options must both be either specified or not";
+ auto bad_alt = [&fail] (const char* what)
+ {
+ fail << "build-alt-" << what << "-rebuild-start and build-alt-" << what
+ << "-rebuild-stop configuration options must both be either "
+ << "specified or not";
+ };
+
+ if (options_->build_alt_soft_rebuild_start_specified () !=
+ options_->build_alt_soft_rebuild_stop_specified ())
+ bad_alt ("soft");
+
+ if (options_->build_alt_hard_rebuild_start_specified () !=
+ options_->build_alt_hard_rebuild_stop_specified ())
+ bad_alt ("hard");
database_module::init (*options_, options_->build_db_retry ());
@@ -86,6 +126,53 @@ init (scanner& s)
options_->root (dir_path ("/"));
}
+// Skip tenants with the freshly queued packages from the consideration (see
+// tenant::queued_timestamp for the details on the service notifications race
+// prevention).
+//
+template <typename T>
+static inline query<T>
+package_query (brep::params::build_task& params,
+ interactive_mode imode,
+ uint64_t queued_expiration_ns)
+{
+ using namespace brep;
+ using query = query<T>;
+
+ query q (!query::build_tenant::archived);
+
+ // Filter by repositories canonical names (if requested).
+ //
+ const vector<string>& rp (params.repository ());
+
+ if (!rp.empty ())
+ q = q &&
+ query::build_repository::id.canonical_name.in_range (rp.begin (),
+ rp.end ());
+
+ // If the interactive mode is false or true, then filter out the respective
+ // packages.
+ //
+ switch (imode)
+ {
+ case interactive_mode::false_:
+ {
+ q = q && query::build_tenant::interactive.is_null ();
+ break;
+ }
+ case interactive_mode::true_:
+ {
+ q = q && query::build_tenant::interactive.is_not_null ();
+ break;
+ }
+ case interactive_mode::both: break;
+ }
+
+ return q &&
+ (query::build_tenant::queued_timestamp.is_null () ||
+ query::build_tenant::queued_timestamp < queued_expiration_ns);
+}
+
bool brep::build_task::
handle (request& rq, response& rs)
{
@@ -142,139 +229,239 @@ handle (request& rq, response& rs)
agent_fp = move (tqm.fingerprint);
}
- task_response_manifest tsm;
+ // The resulting task manifest and the related build, package, and
+ // configuration objects. Note that the latter 3 are only meaningful if the
+ // the task manifest is present.
+ //
+ task_response_manifest task_response;
+ shared_ptr<build> task_build;
+ shared_ptr<build_package> task_package;
+ const build_package_config* task_config;
+
+ auto serialize_task_response_manifest = [&task_response, &rs] ()
+ {
+ // @@ Probably it would be a good idea to also send some cache control
+ // headers to avoid caching by HTTP proxies. That would require
+ // extension of the web::response interface.
+ //
+
+ manifest_serializer s (rs.content (200, "text/manifest;charset=utf-8"),
+ "task_response_manifest");
+ task_response.serialize (s);
+ };
- // Map build configurations to machines that are capable of building them.
- // The first matching machine is selected for each configuration. Also
- // create the configuration name list for use in database queries.
+ interactive_mode imode (tqm.effective_interactive_mode ());
+
+ // Restict the interactive mode (specified by the task request manifest) if
+ // the interactive parameter is specified and is other than "both". If
+ // values specified by the parameter and manifest are incompatible (false vs
+ // true), then just bail out responding with the manifest with an empty
+ // session.
+ //
+ if (params.interactive () != interactive_mode::both)
+ {
+ if (imode != interactive_mode::both)
+ {
+ if (params.interactive () != imode)
+ {
+ serialize_task_response_manifest ();
+ return true;
+ }
+ }
+ else
+ imode = params.interactive (); // Can only change both to true or false.
+ }
+
+ // Map build target configurations to machines that are capable of building
+ // them. The first matching machine is selected for each configuration.
//
struct config_machine
{
- const build_config* config;
+ const build_target_config* config;
machine_header_manifest* machine;
};
- using config_machines = map<const char*, config_machine, compare_c_string>;
+ using config_machines = map<build_target_config_id, config_machine>;
- cstrings cfg_names;
- config_machines cfg_machines;
+ config_machines conf_machines;
- for (const auto& c: *build_conf_)
+ for (const build_target_config& c: *target_conf_)
{
- for (auto& m: tqm.machines)
+ for (machine_header_manifest& m: tqm.machines)
{
- // The same story as in exclude() from build-config.cxx.
- //
+ if (m.effective_role () == machine_role::build)
try
{
+ // The same story as in exclude() from build-target-config.cxx.
+ //
if (path_match (dash_components_to_path (m.name),
dash_components_to_path (c.machine_pattern),
dir_path () /* start */,
- path_match_flags::match_absent) &&
- cfg_machines.insert (
- make_pair (c.name.c_str (), config_machine ({&c, &m}))).second)
- cfg_names.push_back (c.name.c_str ());
+ path_match_flags::match_absent))
+ {
+ conf_machines.emplace (build_target_config_id {c.target, c.name},
+ config_machine {&c, &m});
+ break;
+ }
}
catch (const invalid_path&) {}
}
}
- // Go through packages until we find one that has no build configuration
- // present in the database, or is in the building state but expired
- // (collectively called unbuilt). If such a package configuration is found
- // then put it into the building state, set the current timestamp and respond
- // with the task for building this package configuration.
+ // Collect the auxiliary configurations/machines available for the build.
+ //
+ struct auxiliary_config_machine
+ {
+ string config;
+ const machine_header_manifest* machine;
+ };
+
+ vector<auxiliary_config_machine> auxiliary_config_machines;
+
+ for (const machine_header_manifest& m: tqm.machines)
+ {
+ if (m.effective_role () == machine_role::auxiliary)
+ {
+ // Derive the auxiliary configuration name by stripping the first
+ // (architecture) component from the machine name.
+ //
+ size_t p (m.name.find ('-'));
+
+ if (p == string::npos || p == 0 || p == m.name.size () - 1)
+ throw invalid_request (400,
+ (string ("no ") +
+ (p == 0 ? "architecture" : "OS") +
+ " component in machine name '" + m.name + "'"));
+
+ auxiliary_config_machines.push_back (
+ auxiliary_config_machine {string (m.name, p + 1), &m});
+ }
+ }
+
+ // Go through package build configurations until we find one that has no
+ // build target configuration present in the database, or is in the building
+ // state but expired (collectively called unbuilt). If such a target
+ // configuration is found then put it into the building state, set the
+ // current timestamp and respond with the task for building this package
+ // configuration.
//
// While trying to find a non-built package configuration we will also
- // collect the list of the built package configurations which it's time to
- // rebuild. So if no unbuilt package is found, we will pickup one to
- // rebuild. The rebuild preference is given in the following order: the
- // greater force state, the greater overall status, the lower timestamp.
+ // collect the list of the built configurations which it's time to
+ // rebuild. So if no unbuilt package configuration is found, we will pickup
+ // one to rebuild. The rebuild preference is given in the following order:
+ // the greater force state, the greater overall status, the lower timestamp.
//
- if (!cfg_machines.empty ())
+ if (!conf_machines.empty ())
{
vector<shared_ptr<build>> rebuilds;
- // Create the task response manifest. The package must have the internal
- // repository loaded.
+ // Create the task response manifest. Must be called inside the build db
+ // transaction.
//
- auto task = [this] (shared_ptr<build>&& b,
- shared_ptr<build_package>&& p,
+ auto task = [this] (const build& b,
+ const build_package& p,
+ const build_package_config& pc,
+ small_vector<bpkg::test_dependency, 1>&& tests,
+ vector<auxiliary_machine>&& ams,
+ optional<string>&& interactive,
const config_machine& cm) -> task_response_manifest
{
uint64_t ts (
chrono::duration_cast<std::chrono::nanoseconds> (
- b->timestamp.time_since_epoch ()).count ());
-
- string session (b->tenant + '/' +
- b->package_name.string () + '/' +
- b->package_version.string () + '/' +
- b->configuration + '/' +
- b->toolchain_name + '/' +
- b->toolchain_version.string () + '/' +
+ b.timestamp.time_since_epoch ()).count ());
+
+ string session (b.tenant + '/' +
+ b.package_name.string () + '/' +
+ b.package_version.string () + '/' +
+ b.target.string () + '/' +
+ b.target_config_name + '/' +
+ b.package_config_name + '/' +
+ b.toolchain_name + '/' +
+ b.toolchain_version.string () + '/' +
to_string (ts));
- string result_url (options_->host () +
- tenant_dir (options_->root (), b->tenant).string () +
- "?build-result");
+ string tenant (tenant_dir (options_->root (), b.tenant).string ());
+ string result_url (options_->host () + tenant + "?build-result");
- lazy_shared_ptr<build_repository> r (p->internal_repository);
+ assert (transaction::has_current ());
+
+ assert (p.internal ()); // The package is expected to be buildable.
+
+ shared_ptr<build_repository> r (p.internal_repository.load ());
strings fps;
if (r->certificate_fingerprint)
fps.emplace_back (move (*r->certificate_fingerprint));
- // Exclude external test packages which exclude the task build
- // configuration.
- //
- small_vector<package, 1> tes;
+ const package_name& pn (p.id.name);
- for (const build_test_dependency& td: p->tests)
- {
- // Don't exclude unresolved external tests.
- //
- // Note that this may result in the build task failure. However,
- // silently excluding such tests could end up with missed software
- // bugs which feels much worse.
- //
- if (td.package != nullptr)
- {
- shared_ptr<build_package> p (td.package.load ());
+ bool module_pkg (pn.string ().compare (0, 10, "libbuild2-") == 0);
- // Use the `all` class as a least restrictive default underlying
- // build class set. Note that we should only apply the explicit
- // build restrictions to the external test packages (think about
- // the `builds: all` and `builds: -windows` manifest values for
- // the primary and external test packages, respectively).
- //
- if (exclude (p->builds,
- p->constraints,
- *cm.config,
- nullptr /* reason */,
- true /* default_all_ucs */))
- tes.push_back (package {move (p->id.name), move (p->version)});
- }
- }
-
- task_manifest task (move (b->package_name),
- move (b->package_version),
+ // Note that the auxiliary environment is crafted by the bbot agent
+ // after the auxiliary machines are booted.
+ //
+ task_manifest task (pn,
+ p.version,
move (r->location),
move (fps),
- move (tes),
+ p.requirements,
+ move (tests),
+ b.dependency_checksum,
cm.machine->name,
+ move (ams),
cm.config->target,
cm.config->environment,
+ nullopt /* auxiliary_environment */,
cm.config->args,
- cm.config->warning_regexes);
+ pc.arguments,
+ belongs (*cm.config, module_pkg ? "build2" : "host"),
+ cm.config->warning_regexes,
+ move (interactive),
+ b.worker_checksum);
+
+ // Collect the build artifacts upload URLs, skipping those which are
+ // excluded with the upload-*-exclude configuration options.
+ //
+ vector<upload_url> upload_urls;
+
+ for (const auto& ud: options_->upload_data ())
+ {
+ const string& t (ud.first);
+
+ auto exclude = [&t] (const multimap<string, string>& mm,
+ const string& v)
+ {
+ auto range (mm.equal_range (t));
+
+ for (auto i (range.first); i != range.second; ++i)
+ {
+ if (i->second == v)
+ return true;
+ }
+
+ return false;
+ };
+
+ if (!exclude (options_->upload_toolchain_exclude (),
+ b.toolchain_name) &&
+ !exclude (options_->upload_repository_exclude (),
+ r->canonical_name))
+ {
+ upload_urls.emplace_back (options_->host () + tenant + "?upload=" + t,
+ t);
+ }
+ }
return task_response_manifest (move (session),
- move (b->agent_challenge),
+ b.agent_challenge,
move (result_url),
+ move (upload_urls),
+ b.agent_checksum,
move (task));
};
- // Calculate the build (building state) or rebuild (built state) expiration
- // time for package configurations
+ // Calculate the build/rebuild (building/built state) and the `queued`
+ // notifications expiration time for package configurations.
//
timestamp now (system_clock::now ());
@@ -298,44 +485,98 @@ handle (request& rq, response& rs)
timestamp forced_rebuild_expiration (
expiration (options_->build_forced_rebuild_timeout ()));
- timestamp normal_rebuild_expiration;
+ uint64_t queued_expiration_ns (
+ expiration_ns (options_->build_queued_timeout ()));
- if (options_->build_alt_rebuild_start_specified ())
+ // Calculate the soft/hard rebuild expiration time, based on the
+ // respective build-{soft,hard}-rebuild-timeout and
+ // build-alt-{soft,hard}-rebuild-{start,stop,timeout} configuration
+ // options.
+ //
+ // If normal_timeout is zero, then return timestamp_unknown to indicate
+ // 'never expire'. Note that this value is less than any build timestamp
+ // value, including timestamp_nonexistent.
+ //
+ // NOTE: there is a similar code in monitor/monitor.cxx.
+ //
+ auto build_expiration = [&now] (
+ const optional<pair<duration, duration>>& alt_interval,
+ optional<size_t> alt_timeout,
+ size_t normal_timeout)
{
- const duration& start (options_->build_alt_rebuild_start ());
- const duration& stop (options_->build_alt_rebuild_stop ());
-
- duration dt (daytime (now));
+ if (normal_timeout == 0)
+ return timestamp_unknown;
- // Note that if the stop time is less than the start time then the
- // interval extends through the midnight.
- //
- bool alt_timeout (start <= stop
- ? dt >= start && dt < stop
- : dt >= start || dt < stop);
+ timestamp r;
+ chrono::seconds nt (normal_timeout);
- // If we out of the alternative rebuild timeout interval, then fall back
- // to using the normal rebuild timeout.
- //
- if (alt_timeout)
+ if (alt_interval)
{
- if (!options_->build_alt_rebuild_timeout_specified ())
+ const duration& start (alt_interval->first);
+ const duration& stop (alt_interval->second);
+
+ duration dt (daytime (now));
+
+ // Note that if the stop time is less than the start time then the
+ // interval extends through the midnight.
+ //
+ bool use_alt_timeout (start <= stop
+ ? dt >= start && dt < stop
+ : dt >= start || dt < stop);
+
+ // If we out of the alternative rebuild timeout interval, then fall
+ // back to using the normal rebuild timeout.
+ //
+ if (use_alt_timeout)
{
- duration interval_len (start <= stop
- ? stop - start
- : (24h - start) + stop);
+ // Calculate the alternative timeout, unless it is specified
+ // explicitly.
+ //
+ duration t;
- normal_rebuild_expiration = now - interval_len;
+ if (!alt_timeout)
+ {
+ t = start <= stop ? (stop - start) : ((24h - start) + stop);
+
+ // If the normal rebuild timeout is greater than 24 hours, then
+ // increase the default alternative timeout by (normal - 24h) (see
+ // build-alt-soft-rebuild-timeout configuration option for
+ // details).
+ //
+ if (nt > 24h)
+ t += nt - 24h;
+ }
+ else
+ t = chrono::seconds (*alt_timeout);
+
+ r = now - t;
}
- else
- normal_rebuild_expiration =
- expiration (options_->build_alt_rebuild_timeout ());
}
- }
- if (normal_rebuild_expiration == timestamp_nonexistent)
- normal_rebuild_expiration =
- expiration (options_->build_normal_rebuild_timeout ());
+ return r != timestamp_nonexistent ? r : (now - nt);
+ };
+
+ timestamp soft_rebuild_expiration (
+ build_expiration (
+ (options_->build_alt_soft_rebuild_start_specified ()
+ ? make_pair (options_->build_alt_soft_rebuild_start (),
+ options_->build_alt_soft_rebuild_stop ())
+ : optional<pair<duration, duration>> ()),
+ (options_->build_alt_soft_rebuild_timeout_specified ()
+ ? options_->build_alt_soft_rebuild_timeout ()
+ : optional<size_t> ()),
+ options_->build_soft_rebuild_timeout ()));
+
+ timestamp hard_rebuild_expiration (
+ build_expiration (
+ (options_->build_alt_hard_rebuild_start_specified ()
+ ? make_pair (options_->build_alt_hard_rebuild_start (),
+ options_->build_alt_hard_rebuild_stop ())
+ : optional<pair<duration, duration>> ()),
+ (options_->build_alt_hard_rebuild_timeout_specified ()
+ ? options_->build_alt_hard_rebuild_timeout ()
+ : optional<size_t> ()),
+ options_->build_hard_rebuild_timeout ()));
// Return the challenge (nonce) if brep is configured to authenticate bbot
// agents. Return nullopt otherwise.
@@ -397,6 +638,7 @@ handle (request& rq, response& rs)
// Convert butl::standard_version type to brep::version.
//
brep::version toolchain_version (tqm.toolchain_version.string ());
+ string& toolchain_name (tqm.toolchain_name);
// Prepare the buildable package prepared query.
//
@@ -417,354 +659,1659 @@ handle (request& rq, response& rs)
using pkg_query = query<buildable_package>;
using prep_pkg_query = prepared_query<buildable_package>;
- // Exclude archived tenants.
+ pkg_query pq (package_query<buildable_package> (params,
+ imode,
+ queued_expiration_ns));
+
+ // Transform (in-place) the interactive login information into the actual
+ // login command, if specified in the manifest and the transformation
+ // regexes are specified in the configuration.
//
- pkg_query pq (!pkg_query::build_tenant::archived);
+ if (tqm.interactive_login &&
+ options_->build_interactive_login_specified ())
+ {
+ optional<string> lc;
+ string l (tqm.agent + ' ' + *tqm.interactive_login);
+
+ // Use the first matching regex for the transformation.
+ //
+ for (const pair<regex, string>& rf: options_->build_interactive_login ())
+ {
+ pair<string, bool> r (regex_replace_match (l, rf.first, rf.second));
- // Filter by repositories canonical names (if requested).
+ if (r.second)
+ {
+ lc = move (r.first);
+ break;
+ }
+ }
+
+ if (!lc)
+ throw invalid_request (400, "unable to match login info '" + l + '\'');
+
+ tqm.interactive_login = move (lc);
+ }
+
+ // In the random package ordering mode iterate over the packages list by
+ // starting from the random offset and wrapping around when reaching the
+ // end.
+ //
+ // Note, however, that since there can be some packages which are already
+ // built for all configurations and are not archived yet, picking an
+ // unbuilt package this way may not work as desired. Think of the
+ // following case with 5 packages in 3 non-archived tenants:
+ //
+ // 0: A - unbuilt, tenant 1
+ // 1: B - built, tenant 2
+ // 2: C - built, tenant 2
+ // 3: D - built, tenant 2
+ // 4: E - unbuilt, tenant 3
+ //
+ // If we just pick a random starting offset in the [0, 4] range, then we
+ // will build A package with probability 0.2 and E with probability 0.8.
//
- const vector<string>& rp (params.repository ());
+ // To fix that we will only try to build a package from a tenant that the
+ // random starting offset refers to. Failed that, we will randomly pick
+ // new starting offset and retry. To make sure we don't retry indefinitely
+ // when there are no more packages to build (and also for the sake of
+ // optimization; see below), we will track positions of packages which we
+ // (unsuccessfully) have already tried to build and skip them while
+ // generating the random starting offsets and while iterating over
+ // packages.
+ //
+ // Also note that since we iterate over packages in chunks, each queried
+ // in a separate transaction, the number of packages may potentially
+ // increase or decrease while iterating over them. Thus, to keep things
+ // consistent, we may need to update our tried positions tracking state
+ // accordingly (not to cycle, not to refer to an entry out of the list
+ // boundaries, etc). Generally, regardless whether the number of packages
+ // has changed or not, the offsets and position statuses may now refer to
+ // some different packages. The only sensible thing we can do in such
+ // cases (without trying to detect this situation and restart from
+ // scratch) is to serve the request and issue some build task, if
+ // possible.
+ //
+ bool random (options_->build_package_order () == build_order::random);
+ size_t start_offset (0);
- if (!rp.empty ())
- pq = pq &&
- pkg_query::build_repository::id.canonical_name.in_range (rp.begin (),
- rp.end ());
+ // List of "tried to build" package statuses. True entries denote
+ // positions of packages which we have tried to build. Initially all
+ // entries are false.
+ //
+ vector<bool> tried_positions;
- // Specify the portion.
+ // Number of false entries in the above vector. Used merely as an
+ // optimization to bail out.
//
- size_t offset (0);
+ size_t untried_positions_count (0);
- pq += "ORDER BY" +
- pkg_query::build_package::id.tenant + "," +
- pkg_query::build_package::id.name +
- order_by_version (pkg_query::build_package::id.version, false) +
- "OFFSET" + pkg_query::_ref (offset) + "LIMIT 50";
+ // Return a random position of a package that we have not yet tried to
+ // build, if present, and nullopt otherwise.
+ //
+ auto rand_position = [&tried_positions,
+ &untried_positions_count] () -> optional<size_t>
+ {
+ assert (untried_positions_count <= tried_positions.size ());
- connection_ptr conn (build_db_->connection ());
+ if (untried_positions_count == 0)
+ return nullopt;
- prep_pkg_query pkg_prep_query (
- conn->prepare_query<buildable_package> (
- "mod-build-task-package-query", pq));
+ size_t r;
+ while (tried_positions[r = rand (0, tried_positions.size () - 1)]) ;
+ return r;
+ };
- // Prepare the build prepared query.
+ // Mark the package at specified position as tried to build. Assume that
+ // it is not yet been tried to build.
//
- // Note that we can not query the database for configurations that a
- // package was not built with, as the database contains only those package
- // configurations that have already been acted upon (initially empty).
+ auto position_tried = [&tried_positions,
+ &untried_positions_count] (size_t i)
+ {
+ assert (i < tried_positions.size () &&
+ !tried_positions[i] &&
+ untried_positions_count != 0);
+
+ tried_positions[i] = true;
+ --untried_positions_count;
+ };
+
+ // Resize the tried positions list and update the untried positions
+ // counter accordingly if the package number has changed.
//
- // This is why we query the database for package configurations that
- // should not be built (in the built state, or in the building state and
- // not expired). Having such a list we will select the first build
- // configuration that is not in the list (if available) for the response.
+ // For simplicity, assume that packages are added/removed to/from the end
+ // of the list. Note that misguessing in such a rare cases are possible
+ // but not harmful (see above for the reasoning).
//
- using bld_query = query<build>;
- using prep_bld_query = prepared_query<build>;
+ auto resize_tried_positions = [&tried_positions, &untried_positions_count]
+ (size_t n)
+ {
+ if (n > tried_positions.size ()) // Packages added?
+ {
+ untried_positions_count += n - tried_positions.size ();
+ tried_positions.resize (n, false);
+ }
+ else if (n < tried_positions.size ()) // Packages removed?
+ {
+ for (size_t i (n); i != tried_positions.size (); ++i)
+ {
+ if (!tried_positions[i])
+ {
+ assert (untried_positions_count != 0);
+ --untried_positions_count;
+ }
+ }
- package_id id;
+ tried_positions.resize (n);
+ }
+ else
+ {
+ // Not supposed to be called if the number of packages didn't change.
+ //
+ assert (false);
+ }
+ };
- bld_query bq (
- equal<build> (bld_query::id.package, id) &&
+ if (random)
+ {
+ using query = query<buildable_package_count>;
- bld_query::id.configuration.in_range (cfg_names.begin (),
- cfg_names.end ()) &&
+ query q (package_query<buildable_package_count> (params,
+ imode,
+ queued_expiration_ns));
- bld_query::id.toolchain_name == tqm.toolchain_name &&
+ transaction t (build_db_->begin ());
- compare_version_eq (bld_query::id.toolchain_version,
- canonical_version (toolchain_version),
- true /* revision */) &&
+ // If there are any non-archived interactive build tenants, then the
+ // chosen randomization approach doesn't really work since interactive
+ // tenants must be preferred over non-interactive ones, which is
+ // achieved by proper ordering of the package query result (see below).
+ // Thus, we just disable randomization if there are any interactive
+ // tenants.
+ //
+ // But shouldn't we randomize the order between packages in multiple
+ // interactive tenants? Given that such a tenant may only contain a
+ // single package and can only be built in a single configuration that
+ // is probably not important. However, we may assume that the
+ // randomization still happens naturally due to the random nature of the
+ // tenant id, which is used as a primary sorting criteria (see below).
+ //
+ size_t interactive_package_count (
+ build_db_->query_value<buildable_package_count> (
+ q && query::build_tenant::interactive.is_not_null ()));
- (bld_query::state == "built" ||
- (bld_query::force == "forcing" &&
- bld_query::timestamp > forced_result_expiration_ns) ||
- (bld_query::force != "forcing" && // Unforced or forced.
- bld_query::timestamp > normal_result_expiration_ns)));
+ if (interactive_package_count == 0)
+ {
+ untried_positions_count =
+ build_db_->query_value<buildable_package_count> (q);
+ }
+ else
+ random = false;
- prep_bld_query bld_prep_query (
- conn->prepare_query<build> ("mod-build-task-build-query", bq));
+ t.commit ();
- while (tsm.session.empty ())
+ if (untried_positions_count != 0)
+ {
+ tried_positions.resize (untried_positions_count, false);
+
+ optional<size_t> so (rand_position ());
+ assert (so); // Wouldn't be here otherwise.
+ start_offset = *so;
+ }
+ }
+
+ if (!random || !tried_positions.empty ())
{
- transaction t (conn->begin ());
+ // Specify the portion.
+ //
+ size_t offset (start_offset);
+ size_t limit (50);
- // Query (and cache) buildable packages.
+ pq += "ORDER BY";
+
+ // If the interactive mode is both, then order the packages so that ones
+ // from the interactive build tenants appear first.
//
- auto packages (pkg_prep_query.execute ());
+ if (imode == interactive_mode::both)
+ pq += pkg_query::build_tenant::interactive + "NULLS LAST,";
+
+ pq += pkg_query::build_package::id.tenant + "," +
+ pkg_query::build_package::id.name +
+ order_by_version (pkg_query::build_package::id.version, false) +
+ "OFFSET" + pkg_query::_ref (offset) +
+ "LIMIT" + pkg_query::_ref (limit);
+
+ connection_ptr conn (build_db_->connection ());
+
+ prep_pkg_query pkg_prep_query (
+ conn->prepare_query<buildable_package> (
+ "mod-build-task-package-query", pq));
- // Bail out if there is nothing left.
+ // Prepare the build prepared query.
+ //
+ // Note that we can not query the database for configurations that a
+ // package was not built with, as the database contains only those build
+ // configurations that have already been acted upon (initially empty).
+ //
+ // This is why we query the database for configurations that should not
+ // be built (in the built state, or in the building state and not
+ // expired). Having such a list we will select the first build
+ // configuration that is not in the list (if available) for the
+ // response.
+ //
+ using bld_query = query<build>;
+ using prep_bld_query = prepared_query<build>;
+
+ package_id id;
+ string pkg_config;
+
+ bld_query sq (false);
+ for (const auto& cm: conf_machines)
+ sq = sq || (bld_query::id.target == cm.first.target &&
+ bld_query::id.target_config_name == cm.first.config);
+
+ bld_query bq (
+ equal<build> (bld_query::id.package, id) &&
+ bld_query::id.package_config_name == bld_query::_ref (pkg_config) &&
+ sq &&
+ bld_query::id.toolchain_name == toolchain_name &&
+
+ compare_version_eq (bld_query::id.toolchain_version,
+ canonical_version (toolchain_version),
+ true /* revision */) &&
+
+ (bld_query::state == "built" ||
+ (bld_query::state == "building" &&
+ ((bld_query::force == "forcing" &&
+ bld_query::timestamp > forced_result_expiration_ns) ||
+ (bld_query::force != "forcing" && // Unforced or forced.
+ bld_query::timestamp > normal_result_expiration_ns)))));
+
+ prep_bld_query bld_prep_query (
+ conn->prepare_query<build> ("mod-build-task-build-query", bq));
+
+ // Return true if a package needs to be rebuilt.
//
- if (packages.empty ())
+ auto needs_rebuild = [&forced_rebuild_expiration,
+ &soft_rebuild_expiration,
+ &hard_rebuild_expiration] (const build& b)
{
- t.commit ();
- break;
- }
+ assert (b.state == build_state::built);
+
+ return (b.force == force_state::forced &&
+ b.timestamp <= forced_rebuild_expiration) ||
+ b.soft_timestamp <= soft_rebuild_expiration ||
+ b.hard_timestamp <= hard_rebuild_expiration;
+ };
- offset += packages.size ();
+ // Convert a build to the hard rebuild, resetting the agent checksum.
+ //
+ // Note that since the checksums are hierarchical, the agent checksum
+ // reset will trigger resets of the "subordinate" checksums up to the
+ // dependency checksum and so the package will be rebuilt.
+ //
+ // Also note that we keep the previous build task result and status
+ // intact since we may still need to revert the build into the built
+ // state if the task execution is interrupted.
+ //
+ auto convert_to_hard = [] (const shared_ptr<build>& b)
+ {
+ b->agent_checksum = nullopt;
+ };
+
+ // Return SHA256 checksum of the controller logic and the configuration
+ // target, environment, arguments, and warning-detecting regular
+ // expressions.
+ //
+ auto controller_checksum = [] (const build_target_config& c)
+ {
+ sha256 cs ("1"); // Hash the logic version.
+
+ cs.append (c.target.string ());
+ cs.append (c.environment ? *c.environment : "");
+
+ for (const string& a: c.args)
+ cs.append (a);
+
+ for (const string& re: c.warning_regexes)
+ cs.append (re);
+
+ return string (cs.string ());
+ };
+
+ // Return the machine id as a machine checksum.
+ //
+ // Note that we don't include auxiliary machine ids into this checksum
+ // since a different machine will most likely get picked for a pattern.
+ // And we view all auxiliary machines that match a pattern as equal for
+ // testing purposes (in other words, pattern is not the way to get
+ // coverage).
+ //
+ auto machine_checksum = [] (const machine_header_manifest& m)
+ {
+ return m.id;
+ };
- // Iterate over packages until we find one that needs building.
+ // Tenant that the start offset refers to.
+ //
+ optional<string> start_tenant;
+
+ // If the build task is created and the tenant of the being built
+ // package has a third-party service state associated with it, then
+ // check if the tenant_service_build_building and/or
+ // tenant_service_build_queued callbacks are registered for the type of
+ // the associated service. If they are, then stash the state, the build
+ // object, and the callback pointers for the subsequent service
+ // notifications.
+ //
+ // Also, if the tenant_service_build_queued callback is registered, then
+ // create, persist, and stash the queued build objects for all the
+ // unbuilt by the current toolchain and not yet queued configurations of
+ // the package the build task is created for and calculate the hints.
+ // Note that for the task build, we need to make sure that the
+ // third-party service receives the `queued` notification prior to the
+ // `building` notification (see mod/tenant-service.hxx for valid
+ // transitions). The `queued` notification is assumed to be already sent
+ // for the build if the respective object exists and any of the
+ // following is true for it:
+ //
+ // - It is in the queued state (initial_state is build_state::queued).
+ //
+ // - It is a user-forced rebuild of an incomplete build
+ // (rebuild_forced_build is true).
+ //
+ // - It is a rebuild of an interrupted rebuild (rebuild_forced_build is
+ // true).
+ //
+ const tenant_service_build_building* tsb (nullptr);
+ const tenant_service_build_queued* tsq (nullptr);
+ optional<pair<tenant_service, shared_ptr<build>>> tss;
+ vector<build> qbs;
+ tenant_service_build_queued::build_queued_hints qhs;
+ optional<build_state> initial_state;
+ bool rebuild_forced_build (false);
+ bool rebuild_interrupted_rebuild (false);
+
+ // Create, persist, and return the queued build objects for all the
+ // unbuilt by the current toolchain and not yet queued configurations of
+ // the specified package.
//
- for (auto& bp: packages)
+ // Note that the build object argument is only used for the toolchain
+ // information retrieval. Also note that the package constraints section
+ // is expected to be loaded.
+ //
+ auto queue_builds = [this] (const build_package& p, const build& b)
{
- id = move (bp.id);
+ assert (p.constraints_section.loaded ());
- // Iterate through the package configurations and erase those that
- // don't need building from the build configuration map. All those
- // configurations that remained can be built. We will take the first
- // one, if present.
+ // Query the existing build ids and stash them into the set.
//
- // Also save the built package configurations for which it's time to be
- // rebuilt.
+ set<build_id> existing_builds;
+
+ using query = query<package_build_id>;
+
+ query q (query::build::id.package == p.id &&
+ query::build::id.toolchain_name == b.toolchain_name &&
+ compare_version_eq (query::build::id.toolchain_version,
+ b.id.toolchain_version,
+ true /* revision */));
+
+ for (build_id& id: build_db_->query<package_build_id> (q))
+ existing_builds.emplace (move (id));
+
+ // Go through all the potential package builds and queue those which
+ // are not in the existing builds set.
//
- config_machines configs (cfg_machines); // Make a copy for this pkg.
- auto pkg_builds (bld_prep_query.execute ());
+ vector<build> r;
- for (auto i (pkg_builds.begin ()); i != pkg_builds.end (); ++i)
+ for (const build_package_config& pc: p.configs)
{
- auto j (configs.find (i->id.configuration.c_str ()));
+ for (const build_target_config& tc: *target_conf_)
+ {
+ if (!exclude (pc, p.builds, p.constraints, tc))
+ {
+ build_id id (p.id,
+ tc.target, tc.name,
+ pc.name,
+ b.toolchain_name, b.toolchain_version);
+
+ if (existing_builds.find (id) == existing_builds.end ())
+ {
+ r.emplace_back (move (id.package.tenant),
+ move (id.package.name),
+ p.version,
+ move (id.target),
+ move (id.target_config_name),
+ move (id.package_config_name),
+ move (id.toolchain_name),
+ b.toolchain_version);
+
+ // @@ TODO Persist the whole vector of builds with a single
+ // operation if/when bulk operations support is added
+ // for objects with containers.
+ //
+ build_db_->persist (r.back ());
+ }
+ }
+ }
+ }
+
+ return r;
+ };
+
+ auto queue_hints = [this] (const build_package& p)
+ {
+ buildable_package_count tpc (
+ build_db_->query_value<buildable_package_count> (
+ query<buildable_package_count>::build_tenant::id == p.id.tenant));
- // Outdated configurations are already excluded with the database
- // query.
+ return tenant_service_build_queued::build_queued_hints {
+ tpc == 1, p.configs.size () == 1};
+ };
+
+ // Collect the auxiliary machines required for testing of the specified
+ // package configuration and the external test packages, if present for
+ // the specified target configuration (task_auxiliary_machines),
+ // together with the auxiliary machines information that needs to be
+ // persisted in the database as a part of the build object
+ // (build_auxiliary_machines, which is parallel to
+ // task_auxiliary_machines). While at it collect the involved test
+ // dependencies. Return nullopt if any auxiliary configuration patterns
+ // may not be resolved to the auxiliary machines (no matching
+ // configuration, auxiliary machines RAM limit is exceeded, etc).
+ //
+ // Note that if the same auxiliary environment name is used for multiple
+ // packages (for example, for the main and tests packages or for the
+ // tests and examples packages, etc), then a shared auxiliary machine is
+ // used for all these packages. In this case all the respective
+ // configuration patterns must match the configuration derived from this
+ // machine name. If they don't, then return nullopt. The thinking here
+ // is that on the next task request a machine whose derived
+ // configuration matches all the patterns can potentially be picked.
+ //
+ struct collect_auxiliaries_result
+ {
+ vector<auxiliary_machine> task_auxiliary_machines;
+ vector<build_machine> build_auxiliary_machines;
+ small_vector<bpkg::test_dependency, 1> tests;
+ };
+
+ auto collect_auxiliaries = [&tqm, &auxiliary_config_machines, this]
+ (const shared_ptr<build_package>& p,
+ const build_package_config& pc,
+ const build_target_config& tc)
+ -> optional<collect_auxiliaries_result>
+ {
+ // The list of the picked build auxiliary machines together with the
+ // environment names they have been picked for.
+ //
+ vector<pair<auxiliary_config_machine, string>> picked_machines;
+
+ // Try to randomly pick the auxiliary machine that matches the
+ // specified pattern and which can be supplied with the minimum
+ // required RAM, if specified. Return false if such a machine is not
+ // available. If a machine is already picked for the specified
+ // environment name, then return true if the machine's configuration
+ // matches the specified pattern and false otherwise.
+ //
+ auto pick_machine =
+ [&tqm,
+ &picked_machines,
+ used_ram = uint64_t (0),
+ available_machines = auxiliary_config_machines]
+ (const build_auxiliary& ba) mutable -> bool
+ {
+ vector<size_t> ams; // Indexes of the available matching machines.
+ optional<uint64_t> ar (tqm.auxiliary_ram);
+
+ // If the machine configuration name pattern (which is legal) or any
+ // of the machine configuration names (illegal) are invalid paths,
+ // then we assume we cannot pick the machine.
+ //
+ try
+ {
+ // The same story as in exclude() from build-target-config.cxx.
+ //
+ auto match = [pattern = dash_components_to_path (ba.config)]
+ (const string& config)
+ {
+ return path_match (dash_components_to_path (config),
+ pattern,
+ dir_path () /* start */,
+ path_match_flags::match_absent);
+ };
+
+ // Check if a machine is already picked for the specified
+ // environment name.
+ //
+ for (const auto& m: picked_machines)
+ {
+ if (m.second == ba.environment_name)
+ return match (m.first.config);
+ }
+
+ // Collect the matching machines from the list of the available
+ // machines and bail out if there are none.
+ //
+ for (size_t i (0); i != available_machines.size (); ++i)
+ {
+ const auxiliary_config_machine& m (available_machines[i]);
+ optional<uint64_t> mr (m.machine->ram_minimum);
+
+ if (match (m.config) && (!mr || !ar || used_ram + *mr <= *ar))
+ ams.push_back (i);
+ }
+
+ if (ams.empty ())
+ return false;
+ }
+ catch (const invalid_path&)
+ {
+ return false;
+ }
+
+ // Pick the matching machine randomly.
+ //
+ size_t i (ams[rand (0, ams.size () - 1)]);
+ auxiliary_config_machine& cm (available_machines[i]);
+
+ // Bump the used RAM.
+ //
+ if (optional<uint64_t> r = cm.machine->ram_minimum)
+ used_ram += *r;
+
+ // Move out the picked machine from the available machines list.
//
- assert (j != configs.end ());
- configs.erase (j);
+ picked_machines.emplace_back (move (cm), ba.environment_name);
+ available_machines.erase (available_machines.begin () + i);
+ return true;
+ };
- if (i->state == build_state::built)
+ // Collect auxiliary machines for the main package build configuration.
+ //
+ for (const build_auxiliary& ba:
+ pc.effective_auxiliaries (p->auxiliaries))
+ {
+ if (!pick_machine (ba))
+ return nullopt; // No matched auxiliary machine.
+ }
+
+ // Collect the test packages and the auxiliary machines for their
+ // default build configurations. Exclude external test packages which
+ // exclude the current target configuration.
+ //
+ small_vector<bpkg::test_dependency, 1> tests;
+
+ build_db_->load (*p, p->requirements_tests_section);
+
+ for (const build_test_dependency& td: p->tests)
+ {
+ // Don't exclude unresolved external tests.
+ //
+ // Note that this may result in the build task failure. However,
+ // silently excluding such tests could end up with missed software
+ // bugs which feels much worse.
+ //
+ if (td.package != nullptr)
{
- assert (i->force != force_state::forcing);
+ shared_ptr<build_package> tp (td.package.load ());
+
+ // Try to use the test package configuration named the same as the
+ // current configuration of the main package. If there is no such
+ // a configuration, then fallback to using the default
+ // configuration (which must exist). If the selected test package
+ // configuration excludes the current target configuration, then
+ // exclude this external test package from the build task.
+ //
+ // Note that potentially the selected test package configuration
+ // may contain some (bpkg) arguments associated, but we currently
+ // don't provide build bot worker with such information. This,
+ // however, is probably too far fetched so let's keep it simple
+ // for now.
+ //
+ const build_package_config* tpc (find (pc.name, tp->configs));
+
+ if (tpc == nullptr)
+ {
+ tpc = find ("default", tp->configs);
- if (i->timestamp <= (i->force == force_state::forced
- ? forced_rebuild_expiration
- : normal_rebuild_expiration))
- rebuilds.emplace_back (i.load ());
+ assert (tpc != nullptr); // Must always be present.
+ }
+
+ // Use the `all` class as a least restrictive default underlying
+ // build class set. Note that we should only apply the explicit
+ // build restrictions to the external test packages (think about
+ // the `builds: all` and `builds: -windows` manifest values for
+ // the primary and external test packages, respectively).
+ //
+ build_db_->load (*tp, tp->constraints_section);
+
+ if (exclude (*tpc,
+ tp->builds,
+ tp->constraints,
+ tc,
+ nullptr /* reason */,
+ true /* default_all_ucs */))
+ continue;
+
+ for (const build_auxiliary& ba:
+ tpc->effective_auxiliaries (tp->auxiliaries))
+ {
+ if (!pick_machine (ba))
+ return nullopt; // No matched auxiliary machine.
+ }
}
+
+ tests.emplace_back (td.name,
+ td.type,
+ td.buildtime,
+ td.constraint,
+ td.enable,
+ td.reflect);
+ }
+
+ vector<auxiliary_machine> tms;
+ vector<build_machine> bms;
+
+ tms.reserve (picked_machines.size ());
+ bms.reserve (picked_machines.size ());
+
+ for (pair<auxiliary_config_machine, string>& pm: picked_machines)
+ {
+ const machine_header_manifest& m (*pm.first.machine);
+ tms.push_back (auxiliary_machine {m.name, move (pm.second)});
+ bms.push_back (build_machine {m.name, m.summary});
+ }
+
+ return collect_auxiliaries_result {
+ move (tms), move (bms), move (tests)};
+ };
+
+ // While at it, collect the aborted for various reasons builds
+ // (interactive builds in multiple configurations, builds with too many
+ // auxiliary machines, etc) to send the notification emails at the end
+ // of the request handling.
+ //
+ struct aborted_build
+ {
+ shared_ptr<build> b;
+ shared_ptr<build_package> p;
+ const build_package_config* pc;
+ const char* what;
+ };
+ vector<aborted_build> aborted_builds;
+
+ // Note: is only used for crafting of the notification email subjects.
+ //
+ bool unforced (true);
+
+ for (bool done (false); !task_response.task && !done; )
+ {
+ transaction tr (conn->begin ());
+
+ // We need to be careful in the random package ordering mode not to
+ // miss the end after having wrapped around.
+ //
+ done = (start_offset != 0 &&
+ offset < start_offset &&
+ offset + limit >= start_offset);
+
+ if (done)
+ limit = start_offset - offset;
+
+ // Query (and cache) buildable packages.
+ //
+ auto packages (pkg_prep_query.execute ());
+
+ size_t chunk_size (packages.size ());
+ size_t next_offset (offset + chunk_size);
+
+ // If we are in the random package ordering mode, then also check if
+ // the package number has changed and, if that's the case, resize the
+ // tried positions list accordingly.
+ //
+ if (random &&
+ (next_offset > tried_positions.size () ||
+ (next_offset < tried_positions.size () && chunk_size < limit)))
+ {
+ resize_tried_positions (next_offset);
}
- if (!configs.empty ())
+ // Bail out if there is nothing left, unless we need to wrap around in
+ // the random package ordering mode.
+ //
+ if (chunk_size == 0)
+ {
+ tr.commit ();
+
+ if (start_offset != 0 && offset >= start_offset)
+ offset = 0;
+ else
+ done = true;
+
+ continue;
+ }
+
+ size_t position (offset); // Current package position.
+ offset = next_offset;
+
+ // Iterate over packages until we find one that needs building or have
+ // to bail out in the random package ordering mode for some reason (no
+ // more untried positions, need to restart, etc).
+ //
+ // Note that it is not uncommon for the sequentially examined packages
+ // to belong to the same tenant (single tenant mode, etc). Thus, we
+ // will cache the loaded tenant objects.
+ //
+ shared_ptr<build_tenant> t;
+
+ for (auto& bp: packages)
{
- // Find the first build configuration that is not excluded by the
- // package.
+ shared_ptr<build_package>& p (bp.package);
+
+ id = p->id;
+
+ // Reset the tenant cache if the current package belongs to a
+ // different tenant.
+ //
+ if (t != nullptr && t->id != id.tenant)
+ t = nullptr;
+
+ // If we are in the random package ordering mode, then cache the
+ // tenant the start offset refers to, if not cached yet, and check
+ // if we are still iterating over packages from this tenant
+ // otherwise. If the latter is not the case, then restart from a new
+ // random untried offset, if present, and bail out otherwise.
//
- shared_ptr<build_package> p (build_db_->load<build_package> (id));
+ if (random)
+ {
+ if (!start_tenant)
+ {
+ start_tenant = id.tenant;
+ }
+ else if (*start_tenant != id.tenant)
+ {
+ if (optional<size_t> so = rand_position ())
+ {
+ start_offset = *so;
+ offset = start_offset;
+ start_tenant = nullopt;
+ limit = 50;
+ done = false;
+ }
+ else
+ done = true;
+
+ break;
+ }
- auto i (configs.begin ());
- auto e (configs.end ());
+ size_t pos (position++);
+
+ // Should have been resized, if required.
+ //
+ assert (pos < tried_positions.size ());
- for (;
- i != e &&
- exclude (p->builds, p->constraints, *i->second.config);
- ++i) ;
+ // Skip the position if it has already been tried.
+ //
+ if (tried_positions[pos])
+ continue;
+
+ position_tried (pos);
+ }
- if (i != e)
+ // Note that a request to interactively build a package in multiple
+ // configurations is most likely a mistake than a deliberate choice.
+ // Thus, for the interactive tenant let's check if the package can
+ // be built in multiple configurations. If that's the case then we
+ // will put all the potential builds into the aborted state and
+ // continue iterating looking for another package. Otherwise, just
+ // proceed for this package normally.
+ //
+ // It also feels like a good idea to archive an interactive tenant
+ // after a build object is created for it, regardless if the build
+ // task is issued or not. This way we make sure that an interactive
+ // build is never performed multiple times for such a tenant for any
+ // reason (multiple toolchains, buildtab change, etc). Note that the
+ // build result will still be accepted for an archived build.
+ //
+ if (bp.interactive)
{
- config_machine& cm (i->second);
- machine_header_manifest& mh (*cm.machine);
+ // Note that the tenant can be archived via some other package on
+ // some previous iteration. Skip the package if that's the case.
+ //
+ // Also note that if bp.archived is false, then we need to
+ // (re-)load the tenant object to re-check the archived flag.
+ //
+ if (!bp.archived)
+ {
+ if (t == nullptr)
+ t = build_db_->load<build_tenant> (id.tenant);
+
+ bp.archived = t->archived;
+ }
- build_id bid (move (id),
- cm.config->name,
- move (tqm.toolchain_name),
- toolchain_version);
+ if (bp.archived)
+ continue;
- shared_ptr<build> b (build_db_->find<build> (bid));
- optional<string> cl (challenge ());
+ assert (t != nullptr); // Wouldn't be here otherwise.
- // If build configuration doesn't exist then create the new one
- // and persist. Otherwise put it into the building state, refresh
- // the timestamp and update.
+ // Collect the potential build configurations as all combinations
+ // of the tenant's packages build configurations and the
+ // non-excluded (by the packages) build target
+ // configurations. Note that here we ignore the machines from the
+ // task request.
//
- if (b == nullptr)
+ struct build_config
{
- b = make_shared<build> (move (bid.package.tenant),
- move (bid.package.name),
- move (bp.version),
- move (bid.configuration),
- move (bid.toolchain_name),
- move (toolchain_version),
- move (agent_fp),
- move (cl),
- mh.name,
- move (mh.summary),
- cm.config->target);
-
- build_db_->persist (b);
+ shared_ptr<build_package> p;
+ const build_package_config* pc;
+ const build_target_config* tc;
+ };
+
+ small_vector<build_config, 1> build_configs;
+
+ // Note that we don't bother creating a prepared query here, since
+ // its highly unlikely to encounter multiple interactive tenants
+ // per task request. Given that we archive such tenants
+ // immediately, as a common case there will be none.
+ //
+ pkg_query pq (pkg_query::build_tenant::id == id.tenant);
+ for (auto& tp: build_db_->query<buildable_package> (pq))
+ {
+ shared_ptr<build_package>& p (tp.package);
+
+ build_db_->load (*p, p->constraints_section);
+
+ for (build_package_config& pc: p->configs)
+ {
+ for (const auto& tc: *target_conf_)
+ {
+ if (!exclude (pc, p->builds, p->constraints, tc))
+ build_configs.push_back (build_config {p, &pc, &tc});
+ }
+ }
}
- else
+
+ // If multiple build configurations are collected, then abort all
+ // the potential builds and continue iterating over the packages.
+ //
+ if (build_configs.size () > 1)
{
- // The package configuration is in the building state, and there
- // are no results.
+ // Abort the builds.
//
- // Note that in both cases we keep the status intact to be able
- // to compare it with the final one in the result request
- // handling in order to decide if to send the notification
- // email. The same is true for the forced flag (in the sense
- // that we don't set the force state to unforced).
+ for (build_config& c: build_configs)
+ {
+ shared_ptr<build_package>& p (c.p);
+ const string& pc (c.pc->name);
+ const build_target_config& tc (*c.tc);
+
+ build_id bid (p->id,
+ tc.target,
+ tc.name,
+ pc,
+ toolchain_name,
+ toolchain_version);
+
+ // Can there be any existing builds for such a tenant? Doesn't
+ // seem so, unless due to some manual intervention into the
+ // database. Anyway, let's just leave such a build alone.
+ //
+ shared_ptr<build> b (build_db_->find<build> (bid));
+
+ if (b == nullptr)
+ {
+ b = make_shared<build> (move (bid.package.tenant),
+ move (bid.package.name),
+ p->version,
+ move (bid.target),
+ move (bid.target_config_name),
+ move (bid.package_config_name),
+ move (bid.toolchain_name),
+ toolchain_version,
+ result_status::abort,
+ operation_results ({
+ operation_result {
+ "configure",
+ result_status::abort,
+ "error: multiple configurations "
+ "for interactive build\n"}}),
+ build_machine {
+ "brep", "build task module"});
+
+ build_db_->persist (b);
+
+ // Schedule the build notification email.
+ //
+ aborted_builds.push_back (aborted_build {
+ move (b), move (p), c.pc, "build"});
+ }
+ }
+
+ // Archive the tenant.
//
- // Load the section to assert the above statement.
- //
- build_db_->load (*b, b->results_section);
+ t->archived = true;
+ build_db_->update (t);
+
+ continue; // Skip the package.
+ }
+ }
+
+ // If true, then the package is (being) built for some
+ // configurations.
+ //
+ // Note that since we only query the built and forced rebuild
+ // objects there can be false negatives.
+ //
+ bool package_built (false);
- assert (b->state == build_state::building &&
- b->results.empty ());
+ for (const build_package_config& pc: p->configs)
+ {
+ pkg_config = pc.name;
+
+ // Iterate through the built configurations and erase them from the
+ // build configuration map. All those configurations that remained
+ // can be built. We will take the first one, if present.
+ //
+ // Also save the built configurations for which it's time to be
+ // rebuilt.
+ //
+ config_machines configs (conf_machines); // Make copy for this pkg.
+ auto pkg_builds (bld_prep_query.execute ());
- b->state = build_state::building;
+ if (!package_built && !pkg_builds.empty ())
+ package_built = true;
- // Switch the force state not to reissue the task after the
- // forced rebuild timeout. Note that the result handler will
- // still recognize that the rebuild was forced.
+ for (auto i (pkg_builds.begin ()); i != pkg_builds.end (); ++i)
+ {
+ auto j (
+ configs.find (build_target_config_id {
+ i->id.target, i->id.target_config_name}));
+
+ // Outdated configurations are already excluded with the
+ // database query.
//
- if (b->force == force_state::forcing)
- b->force = force_state::forced;
+ assert (j != configs.end ());
+ configs.erase (j);
- b->agent_fingerprint = move (agent_fp);
- b->agent_challenge = move (cl);
- b->machine = mh.name;
- b->machine_summary = move (mh.summary);
- b->target = cm.config->target;
- b->timestamp = system_clock::now ();
+ if (i->state == build_state::built)
+ {
+ assert (i->force != force_state::forcing);
- build_db_->update (b);
+ if (needs_rebuild (*i))
+ rebuilds.emplace_back (i.load ());
+ }
}
- // Finally, prepare the task response manifest.
- //
- // We iterate over buildable packages.
+ if (!configs.empty ())
+ {
+ // Find the first build configuration that is not excluded by
+ // the package configuration and for which all the requested
+ // auxiliary machines can be provided.
+ //
+ auto i (configs.begin ());
+ auto e (configs.end ());
+
+ build_db_->load (*p, p->constraints_section);
+
+ optional<collect_auxiliaries_result> aux;
+ for (; i != e; ++i)
+ {
+ const build_target_config& tc (*i->second.config);
+
+ if (!exclude (pc, p->builds, p->constraints, tc) &&
+ (aux = collect_auxiliaries (p, pc, tc)))
+ break;
+ }
+
+ if (i != e)
+ {
+ config_machine& cm (i->second);
+ machine_header_manifest& mh (*cm.machine);
+
+ build_id bid (move (id),
+ cm.config->target,
+ cm.config->name,
+ move (pkg_config),
+ move (toolchain_name),
+ toolchain_version);
+
+ shared_ptr<build> b (build_db_->find<build> (bid));
+ optional<string> cl (challenge ());
+
+ // Move the interactive build login information into the build
+ // object, if the package to be built interactively.
+ //
+ optional<string> login (bp.interactive
+ ? move (tqm.interactive_login)
+ : nullopt);
+
+ // If build configuration doesn't exist then create the new
+ // one and persist. Otherwise put it into the building state,
+ // refresh the timestamp and update.
+ //
+ if (b == nullptr)
+ {
+ b = make_shared<build> (move (bid.package.tenant),
+ move (bid.package.name),
+ p->version,
+ move (bid.target),
+ move (bid.target_config_name),
+ move (bid.package_config_name),
+ move (bid.toolchain_name),
+ move (toolchain_version),
+ move (login),
+ move (agent_fp),
+ move (cl),
+ build_machine {
+ mh.name, move (mh.summary)},
+ move (aux->build_auxiliary_machines),
+ controller_checksum (*cm.config),
+ machine_checksum (*cm.machine));
+
+ build_db_->persist (b);
+ }
+ else
+ {
+ // The build configuration is in the building or queued
+ // state.
+ //
+ // Note that in both the building and built cases we keep
+ // the status intact to be able to compare it with the final
+ // one in the result request handling in order to decide if
+ // to send the notification email or to revert it to the
+ // built state if interrupted. The same is true for the
+ // forced flag (in the sense that we don't set the force
+ // state to unforced).
+ //
+ assert (b->state != build_state::built);
+
+ initial_state = b->state;
+
+ b->state = build_state::building;
+ b->interactive = move (login);
+
+ unforced = (b->force == force_state::unforced);
+
+ // Switch the force state not to reissue the task after the
+ // forced rebuild timeout. Note that the result handler will
+ // still recognize that the rebuild was forced.
+ //
+ if (b->force == force_state::forcing)
+ {
+ b->force = force_state::forced;
+ rebuild_forced_build = true;
+ }
+
+ b->agent_fingerprint = move (agent_fp);
+ b->agent_challenge = move (cl);
+ b->machine = build_machine {mh.name, move (mh.summary)};
+
+ // Mark the section as loaded, so auxiliary_machines are
+ // updated.
+ //
+ b->auxiliary_machines_section.load ();
+
+ b->auxiliary_machines =
+ move (aux->build_auxiliary_machines);
+
+ string ccs (controller_checksum (*cm.config));
+ string mcs (machine_checksum (*cm.machine));
+
+ // Issue the hard rebuild if it is forced or the
+ // configuration or machine has changed.
+ //
+ if (b->hard_timestamp <= hard_rebuild_expiration ||
+ b->force == force_state::forced ||
+ b->controller_checksum != ccs ||
+ b->machine_checksum != mcs)
+ convert_to_hard (b);
+
+ b->controller_checksum = move (ccs);
+ b->machine_checksum = move (mcs);
+
+ b->timestamp = system_clock::now ();
+
+ build_db_->update (b);
+ }
+
+ if (t == nullptr)
+ t = build_db_->load<build_tenant> (b->tenant);
+
+ // Archive an interactive tenant.
+ //
+ if (bp.interactive)
+ {
+ t->archived = true;
+ build_db_->update (t);
+ }
+
+ // Finally, stash the service notification information, if
+ // present, and prepare the task response manifest.
+ //
+ if (t->service)
+ {
+ auto i (tenant_service_map_.find (t->service->type));
+
+ if (i != tenant_service_map_.end ())
+ {
+ const tenant_service_base* s (i->second.get ());
+
+ tsb = dynamic_cast<const tenant_service_build_building*> (s);
+ tsq = dynamic_cast<const tenant_service_build_queued*> (s);
+
+ if (tsq != nullptr)
+ {
+ qbs = queue_builds (*p, *b);
+
+ // If we ought to call the
+ // tenant_service_build_queued::build_queued() callback,
+ // then also set the package tenant's queued timestamp
+ // to the current time to prevent the notifications race
+ // (see tenant::queued_timestamp for details).
+ //
+ if (!qbs.empty () ||
+ !initial_state ||
+ (*initial_state != build_state::queued &&
+ !rebuild_forced_build))
+ {
+ qhs = queue_hints (*p);
+
+ t->queued_timestamp = system_clock::now ();
+ build_db_->update (t);
+ }
+ }
+
+ if (tsb != nullptr || tsq != nullptr)
+ tss = make_pair (*t->service, b);
+ }
+ }
+
+ task_response = task (*b,
+ *p,
+ pc,
+ move (aux->tests),
+ move (aux->task_auxiliary_machines),
+ move (bp.interactive),
+ cm);
+
+ task_build = move (b);
+ task_package = move (p);
+ task_config = &pc;
+
+ package_built = true;
+
+ break; // Bail out from the package configurations loop.
+ }
+ }
+ }
+
+ // If the task manifest is prepared, then bail out from the package
+ // loop, commit the transaction and respond. Otherwise, stash the
+ // build toolchain into the tenant, unless it is already stashed or
+ // the current package already has some configurations (being)
+ // built.
+ //
+ if (!task_response.task)
+ {
+ // Note that since there can be false negatives for the
+ // package_built flag (see above), there can be redundant tenant
+ // queries which, however, seems harmless (query uses the primary
+ // key and the object memory footprint is small).
//
- assert (p->internal ());
+ if (!package_built)
+ {
+ if (t == nullptr)
+ t = build_db_->load<build_tenant> (p->id.tenant);
- p->internal_repository.load ();
+ if (!t->toolchain)
+ {
+ t->toolchain = build_toolchain {toolchain_name,
+ toolchain_version};
- tsm = task (move (b), move (p), cm);
+ build_db_->update (t);
+ }
+ }
}
+ else
+ break;
}
- // If the task response manifest is prepared, then bail out from the
- // package loop, commit the transaction and respond.
- //
- if (!tsm.session.empty ())
- break;
+ tr.commit ();
}
- t.commit ();
- }
-
- // If we don't have an unbuilt package, then let's see if we have a
- // package to rebuild.
- //
- if (tsm.session.empty () && !rebuilds.empty ())
- {
- // Sort the package configuration rebuild list with the following sort
- // priority:
+ // If we don't have an unbuilt package, then let's see if we have a
+ // build configuration to rebuild.
//
- // 1: force state
- // 2: overall status
- // 3: timestamp (less is preferred)
+ if (!task_response.task && !rebuilds.empty ())
+ {
+ // Sort the configuration rebuild list with the following sort
+ // priority:
+ //
+ // 1: force state
+ // 2: overall status
+ // 3: timestamp (less is preferred)
+ //
+ auto cmp = [] (const shared_ptr<build>& x, const shared_ptr<build>& y)
+ {
+ if (x->force != y->force)
+ return x->force > y->force; // Forced goes first.
+
+ assert (x->status && y->status); // Both built.
+
+ if (x->status != y->status)
+ return x->status > y->status; // Larger status goes first.
+
+ // Older build completion goes first.
+ //
+ // Note that a completed build can have the state change timestamp
+ // (timestamp member) newer than the completion timestamp
+ // (soft_timestamp member) if the build was interrupted.
+ //
+ return x->soft_timestamp < y->soft_timestamp;
+ };
+
+ sort (rebuilds.begin (), rebuilds.end (), cmp);
+
+ optional<string> cl (challenge ());
+
+ // Pick the first build configuration from the ordered list.
+ //
+ // Note that the configurations and packages may not match the
+ // required criteria anymore (as we have committed the database
+ // transactions that were used to collect this data) so we recheck. If
+ // we find one that matches then put it into the building state,
+ // refresh the timestamp and update. Note that we don't amend the
+ // status and the force state to have them available in the result
+ // request handling (see above).
+ //
+ for (auto& b: rebuilds)
+ {
+ try
+ {
+ transaction t (conn->begin ());
+
+ b = build_db_->find<build> (b->id);
+
+ if (b != nullptr &&
+ b->state == build_state::built &&
+ needs_rebuild (*b))
+ {
+ auto i (conf_machines.find (
+ build_target_config_id {
+ b->target, b->target_config_name}));
+
+ // Only actual package configurations are loaded (see above).
+ //
+ assert (i != conf_machines.end ());
+ const config_machine& cm (i->second);
+
+ // Rebuild the package configuration if still present, is
+ // buildable, doesn't exclude the target configuration, can be
+ // provided with all the requested auxiliary machines, and
+ // matches the request's interactive mode.
+ //
+ // Note that while change of the latter seems rather far fetched,
+ // let's check it for good measure.
+ //
+ shared_ptr<build_package> p (
+ build_db_->find<build_package> (b->id.package));
+
+ shared_ptr<build_tenant> t (
+ p != nullptr
+ ? build_db_->load<build_tenant> (p->id.tenant)
+ : nullptr);
+
+ build_package_config* pc (p != nullptr
+ ? find (b->package_config_name,
+ p->configs)
+ : nullptr);
+
+ if (pc != nullptr &&
+ p->buildable &&
+ (imode == interactive_mode::both ||
+ (t->interactive.has_value () ==
+ (imode == interactive_mode::true_))))
+ {
+ build_db_->load (*p, p->constraints_section);
+
+ const build_target_config& tc (*cm.config);
+
+ optional<collect_auxiliaries_result> aux;
+ if (!exclude (*pc, p->builds, p->constraints, tc) &&
+ (aux = collect_auxiliaries (p, *pc, tc)))
+ {
+ assert (b->status);
+
+ initial_state = build_state::built;
+
+ rebuild_interrupted_rebuild =
+ (b->timestamp > b->soft_timestamp);
+
+ b->state = build_state::building;
+
+ // Save the interactive build login information into the
+ // build object, if the package to be built interactively.
+ //
+ // Can't move from, as may need it on the next iteration.
+ //
+ b->interactive = t->interactive
+ ? tqm.interactive_login
+ : nullopt;
+
+ unforced = (b->force == force_state::unforced);
+
+ // Can't move from, as may need them on the next iteration.
+ //
+ b->agent_fingerprint = agent_fp;
+ b->agent_challenge = cl;
+
+ const machine_header_manifest& mh (*cm.machine);
+ b->machine = build_machine {mh.name, mh.summary};
+
+ // Mark the section as loaded, so auxiliary_machines are
+ // updated.
+ //
+ b->auxiliary_machines_section.load ();
+
+ b->auxiliary_machines =
+ move (aux->build_auxiliary_machines);
+
+ // Issue the hard rebuild if the timeout expired, rebuild is
+ // forced, or the configuration or machine has changed.
+ //
+ // Note that we never reset the build status (see above for
+ // the reasoning).
+ //
+ string ccs (controller_checksum (*cm.config));
+ string mcs (machine_checksum (*cm.machine));
+
+ if (b->hard_timestamp <= hard_rebuild_expiration ||
+ b->force == force_state::forced ||
+ b->controller_checksum != ccs ||
+ b->machine_checksum != mcs)
+ convert_to_hard (b);
+
+ b->controller_checksum = move (ccs);
+ b->machine_checksum = move (mcs);
+
+ b->timestamp = system_clock::now ();
+
+ build_db_->update (b);
+
+ // Stash the service notification information, if present,
+ // and prepare the task response manifest.
+ //
+ if (t->service)
+ {
+ auto i (tenant_service_map_.find (t->service->type));
+
+ if (i != tenant_service_map_.end ())
+ {
+ const tenant_service_base* s (i->second.get ());
+
+ tsb = dynamic_cast<const tenant_service_build_building*> (s);
+ tsq = dynamic_cast<const tenant_service_build_queued*> (s);
+
+ if (tsq != nullptr)
+ {
+ qbs = queue_builds (*p, *b);
+
+ // If we ought to call the
+ // tenant_service_build_queued::build_queued()
+ // callback, then also set the package tenant's queued
+ // timestamp to the current time to prevent the
+ // notifications race (see tenant::queued_timestamp
+ // for details).
+ //
+ if (!qbs.empty () || !rebuild_interrupted_rebuild)
+ {
+ qhs = queue_hints (*p);
+
+ t->queued_timestamp = system_clock::now ();
+ build_db_->update (t);
+ }
+ }
+
+ if (tsb != nullptr || tsq != nullptr)
+ tss = make_pair (move (*t->service), b);
+ }
+ }
+
+ task_response = task (*b,
+ *p,
+ *pc,
+ move (aux->tests),
+ move (aux->task_auxiliary_machines),
+ move (t->interactive),
+ cm);
+
+ task_build = move (b);
+ task_package = move (p);
+ task_config = pc;
+ }
+ }
+ }
+
+ t.commit ();
+ }
+ catch (const odb::deadlock&)
+ {
+ // Just try with the next rebuild. But first, reset the task
+ // manifest and the session that we may have prepared.
+ //
+ task_response = task_response_manifest ();
+ }
+
+ // If the task manifest is prepared, then bail out from the package
+ // configuration rebuilds loop and respond.
+ //
+ if (task_response.task)
+ break;
+ }
+ }
+
+ // If the tenant-associated third-party service needs to be notified
+ // about the queued builds, then call the
+ // tenant_service_build_queued::build_queued() callback function and
+ // update the service state, if requested.
//
- auto cmp = [] (const shared_ptr<build>& x, const shared_ptr<build>& y)
+ if (tsq != nullptr)
{
- if (x->force != y->force)
- return x->force > y->force; // Forced goes first.
+ assert (tss); // Wouldn't be here otherwise.
- assert (x->status && y->status); // Both built.
+ const tenant_service& ss (tss->first);
- if (x->status != y->status)
- return x->status > y->status; // Larger status goes first.
+ // If the task build has no initial state (is just created), then
+ // temporarily move it into the list of the queued builds until the
+ // `queued` notification is delivered. Afterwards, restore it so that
+ // the `building` notification can also be sent.
+ //
+ build& b (*tss->second);
+ bool restore_build (false);
- return x->timestamp < y->timestamp; // Older goes first.
- };
+ if (!initial_state)
+ {
+ qbs.push_back (move (b));
+ restore_build = true;
+ }
- sort (rebuilds.begin (), rebuilds.end (), cmp);
+ if (!qbs.empty ())
+ {
+ if (auto f = tsq->build_queued (ss,
+ qbs,
+ nullopt /* initial_state */,
+ qhs,
+ log_writer_))
+ update_tenant_service_state (conn, qbs.back ().tenant, f);
+ }
- optional<string> cl (challenge ());
+ // Send the `queued` notification for the task build, unless it is
+ // already sent, and update the service state, if requested.
+ //
+ if (initial_state &&
+ *initial_state != build_state::queued &&
+ !rebuild_interrupted_rebuild &&
+ !rebuild_forced_build)
+ {
+ qbs.clear ();
+ qbs.push_back (move (b));
+ restore_build = true;
+
+ if (auto f = tsq->build_queued (ss,
+ qbs,
+ initial_state,
+ qhs,
+ log_writer_))
+ update_tenant_service_state (conn, qbs.back ().tenant, f);
+ }
- // Pick the first package configuration from the ordered list.
+ if (restore_build)
+ b = move (qbs.back ());
+ }
+
+ // If a third-party service needs to be notified about the package
+ // build, then call the tenant_service_build_built::build_building()
+ // callback function and, if requested, update the tenant-associated
+ // service state.
//
- // Note that the configurations and packages may not match the required
- // criteria anymore (as we have committed the database transactions that
- // were used to collect this data) so we recheck. If we find one that
- // matches then put it into the building state, refresh the timestamp and
- // update. Note that we don't amend the status and the force state to
- // have them available in the result request handling (see above).
+ if (tsb != nullptr)
+ {
+ assert (tss); // Wouldn't be here otherwise.
+
+ const tenant_service& ss (tss->first);
+ const build& b (*tss->second);
+
+ if (auto f = tsb->build_building (ss, b, log_writer_))
+ update_tenant_service_state (conn, b.tenant, f);
+ }
+
+ // If the task manifest is prepared, then check that the number of the
+ // build auxiliary machines is less than 10. If that's not the case,
+ // then turn the build into the built state with the abort status.
//
- for (auto& b: rebuilds)
+ if (task_response.task &&
+ task_response.task->auxiliary_machines.size () > 9)
{
- try
+ // Respond with the no-task manifest.
+ //
+ task_response = task_response_manifest ();
+
+ // If the package tenant has a third-party service state associated
+ // with it, then check if the tenant_service_build_built callback is
+ // registered for the type of the associated service. If it is, then
+ // stash the state, the build object, and the callback pointer for the
+ // subsequent service `built` notification.
+ //
+ const tenant_service_build_built* tsb (nullptr);
+ optional<pair<tenant_service, shared_ptr<build>>> tss;
{
- transaction t (build_db_->begin ());
+ transaction t (conn->begin ());
- b = build_db_->find<build> (b->id);
+ shared_ptr<build> b (build_db_->find<build> (task_build->id));
- if (b != nullptr && b->state == build_state::built &&
- b->timestamp <= (b->force == force_state::forced
- ? forced_rebuild_expiration
- : normal_rebuild_expiration))
+ // For good measure, check that the build object is in the building
+ // state and has not been updated.
+ //
+ if (b->state == build_state::building &&
+ b->timestamp == task_build->timestamp)
{
- auto i (cfg_machines.find (b->id.configuration.c_str ()));
+ b->state = build_state::built;
+ b->status = result_status::abort;
+ b->force = force_state::unforced;
- // Only actual package configurations are loaded (see above).
+ // Cleanup the interactive build login information.
//
- assert (i != cfg_machines.end ());
- const config_machine& cm (i->second);
+ b->interactive = nullopt;
- // Rebuild the package if still present, is buildable and doesn't
- // exclude the configuration.
+ // Cleanup the authentication data.
//
- shared_ptr<build_package> p (
- build_db_->find<build_package> (b->id.package));
-
- if (p != nullptr &&
- p->internal () &&
- !exclude (p->builds, p->constraints, *cm.config))
- {
- assert (b->status);
+ b->agent_fingerprint = nullopt;
+ b->agent_challenge = nullopt;
- b->state = build_state::building;
+ b->timestamp = system_clock::now ();
+ b->soft_timestamp = b->timestamp;
+ b->hard_timestamp = b->soft_timestamp;
- // Can't move from, as may need them on the next iteration.
- //
- b->agent_fingerprint = agent_fp;
- b->agent_challenge = cl;
-
- const machine_header_manifest& mh (*cm.machine);
- b->machine = mh.name;
- b->machine_summary = mh.summary;
-
- b->target = cm.config->target;
+ // Mark the section as loaded, so results are updated.
+ //
+ b->results_section.load ();
- // Mark the section as loaded, so results are updated.
- //
- b->results_section.load ();
- b->results.clear ();
+ b->results = operation_results ({
+ operation_result {
+ "configure",
+ result_status::abort,
+ "error: not more than 9 auxiliary machines are allowed"}});
- b->timestamp = system_clock::now ();
+ b->agent_checksum = nullopt;
+ b->worker_checksum = nullopt;
+ b->dependency_checksum = nullopt;
- build_db_->update (b);
+ build_db_->update (b);
- p->internal_repository.load ();
+ // Schedule the `built` notification, if the
+ // tenant_service_build_built callback is registered for the
+ // tenant.
+ //
+ shared_ptr<build_tenant> t (
+ build_db_->load<build_tenant> (b->tenant));
- tsm = task (move (b), move (p), cm);
+ if (t->service)
+ {
+ auto i (tenant_service_map_.find (t->service->type));
+
+ if (i != tenant_service_map_.end ())
+ {
+ tsb = dynamic_cast<const tenant_service_build_built*> (
+ i->second.get ());
+
+ // If required, stash the service notification information.
+ //
+ if (tsb != nullptr)
+ tss = make_pair (move (*t->service), b);
+ }
}
+
+ // Schedule the build notification email.
+ //
+ aborted_builds.push_back (
+ aborted_build {move (b),
+ move (task_package),
+ task_config,
+ unforced ? "build" : "rebuild"});
}
t.commit ();
}
- catch (const odb::deadlock&) {} // Just try with the next rebuild.
- // If the task response manifest is prepared, then bail out from the
- // package configuration rebuilds loop and respond.
+ // If a third-party service needs to be notified about the built
+ // package, then call the tenant_service_build_built::build_built()
+ // callback function and update the service state, if requested.
//
- if (!tsm.session.empty ())
- break;
+ if (tsb != nullptr)
+ {
+ assert (tss); // Wouldn't be here otherwise.
+
+ const tenant_service& ss (tss->first);
+ const build& b (*tss->second);
+
+ if (auto f = tsb->build_built (ss, b, log_writer_))
+ update_tenant_service_state (conn, b.tenant, f);
+ }
}
+
+ // Send notification emails for all the aborted builds.
+ //
+ for (const aborted_build& ab: aborted_builds)
+ send_notification_email (*options_,
+ conn,
+ *ab.b,
+ *ab.p,
+ *ab.pc,
+ ab.what,
+ error,
+ verb_ >= 2 ? &trace : nullptr);
}
}
- // @@ Probably it would be a good idea to also send some cache control
- // headers to avoid caching by HTTP proxies. That would require extension
- // of the web::response interface.
- //
-
- manifest_serializer s (rs.content (200, "text/manifest;charset=utf-8"),
- "task_response_manifest");
- tsm.serialize (s);
-
+ serialize_task_response_manifest ();
return true;
}
diff --git a/mod/mod-build-task.hxx b/mod/mod-build-task.hxx
index 7875db1..d0b3d44 100644
--- a/mod/mod-build-task.hxx
+++ b/mod/mod-build-task.hxx
@@ -8,6 +8,7 @@
#include <libbrep/utility.hxx>
#include <mod/module-options.hxx>
+#include <mod/tenant-service.hxx>
#include <mod/database-module.hxx>
#include <mod/build-config-module.hxx>
@@ -16,13 +17,13 @@ namespace brep
class build_task: public database_module, private build_config_module
{
public:
- build_task () = default;
+ explicit
+ build_task (const tenant_service_map&);
// Create a shallow copy (handling instance) if initialized and a deep
// copy (context exemplar) otherwise.
//
- explicit
- build_task (const build_task&);
+ build_task (const build_task&, const tenant_service_map&);
virtual bool
handle (request&, response&);
@@ -36,6 +37,7 @@ namespace brep
private:
shared_ptr<options::build_task> options_;
+ const tenant_service_map& tenant_service_map_;
};
}
diff --git a/mod/mod-builds.cxx b/mod/mod-builds.cxx
index ab9e93e..30562f3 100644
--- a/mod/mod-builds.cxx
+++ b/mod/mod-builds.cxx
@@ -4,15 +4,15 @@
#include <mod/mod-builds.hxx>
#include <set>
-#include <algorithm> // find_if()
#include <libstudxml/serializer.hxx>
#include <odb/database.hxx>
#include <odb/transaction.hxx>
-#include <libbutl/timestamp.mxx> // to_string()
-#include <libbutl/path-pattern.mxx>
+#include <libbutl/utility.hxx> // compare_c_string
+#include <libbutl/timestamp.hxx> // to_string()
+#include <libbutl/path-pattern.hxx>
#include <libbbot/manifest.hxx> // to_result_status(), to_string(result_status)
@@ -31,7 +31,6 @@
using namespace std;
using namespace butl;
-using namespace bbot;
using namespace web;
using namespace odb::core;
using namespace brep::cli;
@@ -133,28 +132,32 @@ match (const C qc, const string& pattern)
return qc + "SIMILAR TO" + query<T>::_val (transform (pattern));
}
+// If tenant is absent, then query builds from all the public tenants.
+//
template <typename T>
static inline query<T>
-build_query (const brep::cstrings* configs,
+build_query (const brep::vector<brep::build_target_config_id>* config_ids,
const brep::params::builds& params,
- const brep::optional<brep::string>& tenant,
- const brep::optional<bool>& archived)
+ const brep::optional<brep::string>& tenant)
{
using namespace brep;
using query = query<T>;
using qb = typename query::build;
-
- query q (configs != nullptr
- ? qb::id.configuration.in_range (configs->begin (), configs->end ())
- : query (true));
+ using qt = typename query::build_tenant;
const auto& pid (qb::id.package);
- if (tenant)
- q = q && pid.tenant == *tenant;
+ query q (tenant ? pid.tenant == *tenant : !qt::private_);
- if (archived)
- q = q && query::build_tenant::archived == *archived;
+ if (config_ids != nullptr)
+ {
+ query sq (false);
+ for (const auto& id: *config_ids)
+ sq = sq || (qb::id.target == id.target &&
+ qb::id.target_config_name == id.config);
+
+ q = q && sq;
+ }
// Note that there is no error reported if the filter parameters parsing
// fails. Instead, it is considered that no package builds match such a
@@ -173,7 +176,7 @@ build_query (const brep::cstrings* configs,
{
// May throw invalid_argument.
//
- version v (params.version (), false /* fold_zero_revision */);
+ version v (params.version (), version::none);
q = q && compare_version_eq (pid.version,
canonical_version (v),
@@ -182,11 +185,11 @@ build_query (const brep::cstrings* configs,
// Build toolchain name/version.
//
- const string& tc (params.toolchain ());
+ const string& th (params.toolchain ());
- if (tc != "*")
+ if (th != "*")
{
- size_t p (tc.find ('-'));
+ size_t p (th.find ('-'));
if (p == string::npos) // Invalid format.
throw invalid_argument ("");
@@ -194,8 +197,8 @@ build_query (const brep::cstrings* configs,
// the exact version revision, so an absent and zero revisions have the
// same semantics and the zero revision is folded.
//
- string tn (tc, 0, p);
- version tv (string (tc, p + 1)); // May throw invalid_argument.
+ string tn (th, 0, p);
+ version tv (string (th, p + 1)); // May throw invalid_argument.
q = q &&
qb::id.toolchain_name == tn &&
@@ -204,38 +207,44 @@ build_query (const brep::cstrings* configs,
true /* revision */);
}
- // Build configuration name.
+ // Build target.
//
- if (!params.configuration ().empty ())
- q = q && match<T> (qb::id.configuration, params.configuration ());
+ if (!params.target ().empty ())
+ q = q && match<T> (qb::id.target, params.target ());
- // Build machine name.
+ // Build target configuration name.
//
- if (!params.machine ().empty ())
- q = q && match<T> (qb::machine, params.machine ());
+ if (!params.target_config ().empty ())
+ q = q && match<T> (qb::id.target_config_name, params.target_config ());
- // Build target.
+ // Build package configuration name.
//
- if (!params.target ().empty ())
- q = q && match<T> (qb::target, params.target ());
+ if (!params.package_config ().empty ())
+ q = q && match<T> (qb::id.package_config_name, params.package_config ());
// Build result.
//
const string& rs (params.result ());
+ bool add_state (true);
if (rs != "*")
{
if (rs == "pending")
+ {
q = q && qb::force != "unforced";
+ }
else if (rs == "building")
+ {
q = q && qb::state == "building";
+ add_state = false;
+ }
else
{
query sq (qb::status == rs);
// May throw invalid_argument.
//
- result_status st (to_result_status (rs));
+ result_status st (bbot::to_result_status (rs));
if (st != result_status::success)
{
@@ -256,8 +265,12 @@ build_query (const brep::cstrings* configs,
// well (rebuild).
//
q = q && qb::state == "built" && sq;
+ add_state = false;
}
}
+
+ if (add_state)
+ q = q && qb::state != "queued";
}
catch (const invalid_argument&)
{
@@ -267,23 +280,19 @@ build_query (const brep::cstrings* configs,
return q;
}
+// If tenant is absent, then query packages from all the public tenants.
+//
template <typename T>
static inline query<T>
package_query (const brep::params::builds& params,
- const brep::optional<brep::string>& tenant,
- const brep::optional<bool>& archived)
+ const brep::optional<brep::string>& tenant)
{
using namespace brep;
using query = query<T>;
using qp = typename query::build_package;
+ using qt = typename query::build_tenant;
- query q (true);
-
- if (tenant)
- q = q && qp::id.tenant == *tenant;
-
- if (archived)
- q = q && query::build_tenant::archived == *archived;
+ query q (tenant ? qp::id.tenant == *tenant : !qt::private_);
// Note that there is no error reported if the filter parameters parsing
// fails. Instead, it is considered that no packages match such a query.
@@ -301,7 +310,7 @@ package_query (const brep::params::builds& params,
{
// May throw invalid_argument.
//
- version v (params.version (), false /* fold_zero_revision */);
+ version v (params.version (), version::none);
q = q && compare_version_eq (qp::id.version,
canonical_version (v),
@@ -355,11 +364,6 @@ handle (request& rq, response& rs)
throw invalid_request (400, e.what ());
}
- // Override the name parameter for the old URL (see options.cli for details).
- //
- if (params.name_legacy_specified ())
- params.name (params.name_legacy ());
-
const char* title ("Builds");
xml::serializer s (rs.content (), title);
@@ -383,14 +387,17 @@ handle (request& rq, response& rs)
<< DIV(ID="content");
// If the tenant is empty then we are in the global view and will display
- // builds from all the tenants.
+ // builds from all the public tenants.
//
optional<string> tn;
if (!tenant.empty ())
tn = tenant;
- // Return the list of distinct toolchain name/version pairs. The build db
- // transaction must be started.
+ // Return the list of distinct toolchain name/version pairs. If no builds
+ // are present for the tenant, then fallback to the toolchain recorded in
+ // the tenant object, if present.
+ //
+ // Note: the build db transaction must be started.
//
using toolchains = vector<pair<string, version>>;
@@ -406,11 +413,19 @@ handle (request& rq, response& rs)
false /* first */)))
r.emplace_back (move (t.name), move (t.version));
+ if (r.empty ())
+ {
+ shared_ptr<build_tenant> t (build_db_->find<build_tenant> (tenant));
+
+ if (t != nullptr && t->toolchain)
+ r.emplace_back (t->toolchain->name, t->toolchain->version);
+ }
+
return r;
};
auto print_form = [&s, &params, this] (const toolchains& toolchains,
- size_t build_count)
+ optional<size_t> build_count)
{
// Print the package builds filter form on the first page only.
//
@@ -421,16 +436,16 @@ handle (request& rq, response& rs)
// the selected toolchain is still present in the database. Otherwise
// fallback to the * wildcard selection.
//
- string ctc ("*");
+ string cth ("*");
vector<pair<string, string>> toolchain_opts ({{"*", "*"}});
{
for (const auto& t: toolchains)
{
- string tc (t.first + '-' + t.second.string ());
- toolchain_opts.emplace_back (tc, tc);
+ string th (t.first + '-' + t.second.string ());
+ toolchain_opts.emplace_back (th, th);
- if (tc == params.toolchain ())
- ctc = move (tc);
+ if (th == params.toolchain ())
+ cth = move (th);
}
}
@@ -446,28 +461,42 @@ handle (request& rq, response& rs)
<< TBODY
<< TR_INPUT ("name", "builds", params.name (), "*", true)
<< TR_INPUT ("version", "pv", params.version (), "*")
- << TR_SELECT ("toolchain", "tc", ctc, toolchain_opts)
+ << TR_SELECT ("toolchain", "th", cth, toolchain_opts)
+ << TR_INPUT ("target", "tg", params.target (), "*")
- << TR(CLASS="config")
- << TH << "config" << ~TH
+ << TR(CLASS="tgt-config")
+ << TH << "tgt config" << ~TH
<< TD
<< *INPUT(TYPE="text",
- NAME="cf",
- VALUE=params.configuration (),
+ NAME="tc",
+ VALUE=params.target_config (),
PLACEHOLDER="*",
- LIST="configs")
- << DATALIST(ID="configs")
+ LIST="target-configs")
+ << DATALIST(ID="target-configs")
<< *OPTION(VALUE="*");
- for (const auto& c: *build_conf_names_)
- s << *OPTION(VALUE=c);
+ // Print unique config names from the target config map.
+ //
+ set<const char*, butl::compare_c_string> conf_names;
+ for (const auto& c: *target_conf_map_)
+ {
+ if (conf_names.insert (c.first.config.get ().c_str ()).second)
+ s << *OPTION(VALUE=c.first.config.get ());
+ }
s << ~DATALIST
<< ~TD
<< ~TR
- << TR_INPUT ("machine", "mn", params.machine (), "*")
- << TR_INPUT ("target", "tg", params.target (), "*")
+ << TR(CLASS="pkg-config")
+ << TH << "pkg config" << ~TH
+ << TD
+ << *INPUT(TYPE="text",
+ NAME="pc",
+ VALUE=params.package_config (),
+ PLACEHOLDER="*")
+ << ~TD
+ << ~TR
<< TR_SELECT ("result", "rs", params.result (), build_results)
<< ~TBODY
<< ~TABLE
@@ -489,26 +518,25 @@ handle (request& rq, response& rs)
s << DIV_COUNTER (build_count, "Build", "Builds");
};
+ const string& tgt (params.target ());
+ const string& tgt_cfg (params.target_config ());
+ const string& pkg_cfg (params.package_config ());
+
// We will not display hidden configurations, unless the configuration is
// specified explicitly.
//
- bool exclude_hidden (params.configuration ().empty () ||
- path_pattern (params.configuration ()));
+ bool exclude_hidden (tgt_cfg.empty () || path_pattern (tgt_cfg));
- cstrings conf_names;
+ vector<build_target_config_id> conf_ids;
+ conf_ids.reserve (target_conf_map_->size ());
- if (exclude_hidden)
+ for (const auto& c: *target_conf_map_)
{
- for (const auto& c: *build_conf_map_)
- {
- if (belongs (*c.second, "all"))
- conf_names.push_back (c.first);
- }
+ if (!exclude_hidden || !belongs (*c.second, "hidden"))
+ conf_ids.push_back (c.first);
}
- else
- conf_names = *build_conf_names_;
- size_t count;
+ optional<size_t> count;
size_t page (params.page ());
if (params.result () != "unbuilt") // Print package build configurations.
@@ -523,37 +551,22 @@ handle (request& rq, response& rs)
// printing the builds.
//
count = 0;
- vector<shared_ptr<build>> builds;
+ vector<package_build> builds;
builds.reserve (page_configs);
- // Prepare the package build prepared query.
+ // Prepare the package build query.
//
using query = query<package_build>;
- using prep_query = prepared_query<package_build>;
- query q (build_query<package_build> (
- &conf_names, params, tn, nullopt /* archived */));
-
- // Specify the portion. Note that we will be querying builds in chunks,
- // not to hold locks for too long.
- //
- // Also note that for each build we also load the corresponding
- // package. Nevertheless, we use a fairly large portion to speed-up the
- // builds traversal but also cache the package objects (see below).
- //
- size_t offset (0);
+ query q (build_query<package_build> (&conf_ids, params, tn));
// Print package build configurations ordered by the timestamp (later goes
// first).
//
- q += "ORDER BY" + query::build::timestamp + "DESC" +
- "OFFSET" + query::_ref (offset) + "LIMIT 500";
+ q += "ORDER BY" + query::build::timestamp + "DESC";
connection_ptr conn (build_db_->connection ());
- prep_query pq (
- conn->prepare_query<package_build> ("mod-builds-query", q));
-
// Note that we can't skip the proper number of builds in the database
// query for a page numbers greater than one. So we will query builds from
// the very beginning and skip the appropriate number of them while
@@ -569,81 +582,101 @@ handle (request& rq, response& rs)
//
session sn;
- for (bool ne (true); ne; )
+ transaction t (conn->begin ());
+
+ // For some reason PostgreSQL (as of 9.4) picks the nested loop join
+ // strategy for the below package_build query, which executes quite slow
+ // even for reasonably small number of builds. Thus, we just discourage
+ // PostgreSQL from using this strategy in the current transaction.
+ //
+ // @@ TMP Re-check for the later PostgreSQL versions if we can drop this
+ // hint. If drop, then also grep for other places where this hint
+ // is used.
+ //
+ conn->execute ("SET LOCAL enable_nestloop=off");
+
+ // Iterate over builds and cache build objects that should be printed.
+ // Skip the appropriate number of them (for page number greater than
+ // one).
+ //
+ for (auto& pb: build_db_->query<package_build> (q))
{
- transaction t (conn->begin ());
+ shared_ptr<build>& b (pb.build);
+
+ auto i (
+ target_conf_map_->find (
+ build_target_config_id {b->target, b->target_config_name}));
- // Query package builds (and cache the result).
+ assert (i != target_conf_map_->end ());
+
+ // Match the target configuration against the package build
+ // configuration expressions/constraints.
//
- auto bs (pq.execute ());
+ shared_ptr<build_package> p (
+ build_db_->load<build_package> (b->id.package));
+
+ const build_package_config* pc (find (b->package_config_name,
+ p->configs));
- if ((ne = !bs.empty ()))
+ // The package configuration should be present since the configurations
+ // set cannot change if the package version doesn't change. If that's
+ // not the case, then the database has probably been manually amended.
+ // In this case let's just skip such a build as if it excluded and log
+ // the warning.
+ //
+ if (pc == nullptr)
{
- offset += bs.size ();
+ warn << "cannot find configuration '" << b->package_config_name
+ << "' for package " << p->id.name << '/' << p->version;
- // Iterate over builds and cache build objects that should be printed.
- // Skip the appropriate number of them (for page number greater than
- // one).
- //
- for (auto& pb: bs)
- {
- shared_ptr<build>& b (pb.build);
+ continue;
+ }
- auto i (build_conf_map_->find (b->configuration.c_str ()));
- assert (i != build_conf_map_->end ());
+ if (!p->constraints_section.loaded ())
+ build_db_->load (*p, p->constraints_section);
- // Match the configuration against the package build
- // expressions/constraints.
+ if (!exclude (*pc, p->builds, p->constraints, *i->second))
+ {
+ if (skip != 0)
+ --skip;
+ else if (print != 0)
+ {
+ // As we query builds in multiple transactions we may see the same
+ // build multiple times. Let's skip the duplicates. Note: we don't
+ // increment the counter in this case.
//
- shared_ptr<build_package> p (
- build_db_->load<build_package> (b->id.package));
-
- if (!exclude (p->builds, p->constraints, *i->second))
+ if (find_if (builds.begin (), builds.end (),
+ [&b] (const package_build& pb)
+ {
+ return b->id == pb.build->id;
+ }) != builds.end ())
+ continue;
+
+ if (b->state == build_state::built)
{
- if (skip != 0)
- --skip;
- else if (print != 0)
- {
- // As we query builds in multiple transactions we may see the
- // same build multiple times. Let's skip the duplicates. Note:
- // we don't increment the counter in this case.
- //
- if (find_if (builds.begin (),
- builds.end (),
- [&b] (const shared_ptr<build>& pb)
- {
- return b->id == pb->id;
- }) != builds.end ())
- continue;
-
- if (b->state == build_state::built)
- {
- build_db_->load (*b, b->results_section);
+ build_db_->load (*b, b->results_section);
- // Let's clear unneeded result logs for builds being cached.
- //
- for (operation_result& r: b->results)
- r.log.clear ();
- }
-
- builds.push_back (move (b));
+ // Let's clear unneeded result logs for builds being cached.
+ //
+ for (operation_result& r: b->results)
+ r.log.clear ();
+ }
- --print;
- }
+ builds.push_back (move (pb));
- ++count;
- }
+ --print;
}
+
+ ++(*count);
}
+ }
- // Print the filter form after the build count is calculated. Note:
- // query_toolchains() must be called inside the build db transaction.
- //
- else
- print_form (query_toolchains (), count);
+ // Print the filter form after the build count is calculated. Note:
+ // query_toolchains() must be called inside the build db transaction.
+ //
+ print_form (query_toolchains (), count);
- t.commit ();
- }
+ t.commit ();
// Finally, print the cached package build configurations.
//
@@ -652,34 +685,43 @@ handle (request& rq, response& rs)
// Enclose the subsequent tables to be able to use nth-child CSS selector.
//
s << DIV;
- for (const shared_ptr<build>& pb: builds)
+ for (const package_build& pb: builds)
{
- const build& b (*pb);
+ const build& b (*pb.build);
string ts (butl::to_string (b.timestamp,
"%Y-%m-%d %H:%M:%S %Z",
true /* special */,
true /* local */) +
- " (" + butl::to_string (now - b.timestamp, false) + " ago)");
+ " (" + butl::to_string (now - b.timestamp, false) + " ago");
+
+ if (pb.archived)
+ ts += ", archived";
+
+ ts += ')';
s << TABLE(CLASS="proplist build")
<< TBODY
- << TR_NAME (b.package_name, string (), root, b.tenant)
+ << TR_NAME (b.package_name, root, b.tenant)
<< TR_VERSION (b.package_name, b.package_version, root, b.tenant)
<< TR_VALUE ("toolchain",
b.toolchain_name + '-' +
b.toolchain_version.string ())
- << TR_VALUE ("config", b.configuration)
- << TR_VALUE ("machine", b.machine)
<< TR_VALUE ("target", b.target.string ())
- << TR_VALUE ("timestamp", ts)
- << TR_BUILD_RESULT (b, host, root);
+ << TR_VALUE ("tgt config", b.target_config_name)
+ << TR_VALUE ("pkg config", b.package_config_name)
+ << TR_VALUE ("timestamp", ts);
+
+ if (b.interactive) // Note: can only be present for the building state.
+ s << TR_VALUE ("login", *b.interactive);
+
+ s << TR_BUILD_RESULT (b, pb.archived, host, root);
// In the global view mode add the tenant builds link. Note that the
// global view (and the link) makes sense only in the multi-tenant mode.
//
if (!tn && !b.tenant.empty ())
- s << TR_TENANT (tenant_name, "builds", root, b.tenant);
+ s << TR_TENANT (tenant_name, "builds", root, b.tenant);
s << ~TBODY
<< ~TABLE;
@@ -689,47 +731,73 @@ handle (request& rq, response& rs)
else // Print unbuilt package configurations.
{
// Parameters to use for package build configurations queries. Note that
- // we cleanup the machine and the result filter arguments, as they are
- // irrelevant for unbuilt configurations.
+ // we cleanup the result filter argument, as it is irrelevant for unbuilt
+ // configurations.
//
params::builds bld_params (params);
- bld_params.machine ().clear ();
bld_params.result () = "*";
- // Query toolchains, filter build configurations and toolchains, and
- // create the set of configuration/toolchain combinations, that we will
- // print for packages. Also calculate the number of unbuilt package
- // configurations.
+ // Query toolchains, filter build target configurations and toolchains,
+ // and create the set of target configuration/toolchain combinations, that
+ // we will print for package configurations. Also calculate the number of
+ // unbuilt package configurations.
//
toolchains toolchains;
- // Note that config_toolchains contains shallow references to the
- // toolchain names and versions.
+ // Target configuration/toolchain combination.
//
- set<config_toolchain> config_toolchains;
+ // Note: all members are the shallow references.
+ //
+ struct target_config_toolchain
+ {
+ const butl::target_triplet& target;
+ const string& target_config;
+ const string& toolchain_name;
+ const bpkg::version& toolchain_version;
+ };
+
+ // Cache the build package objects that would otherwise be loaded twice:
+ // first time during calculating the builds count and then during printing
+ // the builds. Note that the build package is a subset of the package
+ // object and normally has a small memory footprint.
+ //
+ // @@ TMP It feels that we can try to combine the mentioned steps and
+ // improve the performance a bit. We won't need the session in this
+ // case.
+ //
+ session sn;
+
+ connection_ptr conn (build_db_->connection ());
+ transaction t (conn->begin ());
+
+ // Discourage PostgreSQL from using the nested loop join strategy in the
+ // current transaction (see above for details).
+ //
+ conn->execute ("SET LOCAL enable_nestloop=off");
+
+ vector<target_config_toolchain> config_toolchains;
{
- transaction t (build_db_->begin ());
toolchains = query_toolchains ();
- string tc_name;
- version tc_version;
- const string& tc (params.toolchain ());
+ string th_name;
+ version th_version;
+ const string& th (params.toolchain ());
- if (tc != "*")
+ if (th != "*")
try
{
- size_t p (tc.find ('-'));
+ size_t p (th.find ('-'));
if (p == string::npos) // Invalid format.
throw invalid_argument ("");
- tc_name.assign (tc, 0, p);
+ th_name.assign (th, 0, p);
// May throw invalid_argument.
//
// Note that an absent and zero revisions have the same semantics,
// so the zero revision is folded (see above for details).
//
- tc_version = version (string (tc, p + 1));
+ th_version = version (string (th, p + 1));
}
catch (const invalid_argument&)
{
@@ -739,63 +807,63 @@ handle (request& rq, response& rs)
throw invalid_request (400, "invalid toolchain");
}
- const string& pc (params.configuration ());
- const string& tg (params.target ());
- vector<const build_config*> configs;
+ vector<const build_target_config*> target_configs;
- for (const auto& c: *build_conf_)
+ for (const auto& c: *target_conf_)
{
- if ((pc.empty () || path_match (c.name, pc)) && // Filter by name.
+ // Filter by name.
+ //
+ if ((tgt_cfg.empty () || path_match (c.name, tgt_cfg)) &&
// Filter by target.
//
- (tg.empty () || path_match (c.target.string (), tg)) &&
+ (tgt.empty () || path_match (c.target.string (), tgt)) &&
- (!exclude_hidden || belongs (c, "all"))) // Filter hidden.
+ (!exclude_hidden || !belongs (c, "hidden"))) // Filter hidden.
{
- configs.push_back (&c);
+ target_configs.push_back (&c);
for (const auto& t: toolchains)
{
// Filter by toolchain.
//
- if (tc == "*" || (t.first == tc_name && t.second == tc_version))
- config_toolchains.insert ({c.name, t.first, t.second});
+ if (th == "*" || (t.first == th_name && t.second == th_version))
+ config_toolchains.push_back (
+ target_config_toolchain {c.target, c.name, t.first, t.second});
}
}
}
- // Calculate the number of unbuilt package configurations as a
- // difference between the maximum possible number of unbuilt
- // configurations and the number of existing package builds.
- //
- // Note that we also need to deduct the package-excluded configurations
- // count from the maximum possible number of unbuilt configurations. The
- // only way to achieve this is to traverse through the packages and
- // match their build expressions/constraints against our configurations.
- //
- // Also note that some existing builds can now be excluded by packages
- // due to the build configuration target or class set change. We should
- // deduct such builds count from the number of existing package builds.
- //
- size_t nmax (
- config_toolchains.size () *
- build_db_->query_value<buildable_package_count> (
- package_query<buildable_package_count> (
- params, tn, false /* archived */)));
-
- size_t ncur = build_db_->query_value<package_build_count> (
- build_query<package_build_count> (
- &conf_names, bld_params, tn, false /* archived */));
-
- // From now we will be using specific package name and version for each
- // build database query.
- //
- bld_params.name ().clear ();
- bld_params.version ().clear ();
-
if (!config_toolchains.empty ())
{
+ // Calculate the number of unbuilt package configurations as a
+ // difference between the possible number of unbuilt configurations
+ // and the number of existing package builds.
+ //
+ // Note that some existing builds can now be excluded by package
+ // configurations due to the build target configuration class set
+ // change. We should deduct such builds count from the number of
+ // existing package configurations builds.
+ //
+ // The only way to calculate both numbers is to traverse through the
+ // package configurations and match their build
+ // expressions/constraints against our target configurations.
+ //
+ size_t npos (0);
+
+ size_t ncur (build_db_->query_value<package_build_count> (
+ build_query<package_build_count> (&conf_ids, bld_params, tn)));
+
+ // From now we will be using specific values for the below filters for
+ // each build database query. Note that the toolchain is the only
+ // filter left in bld_params.
+ //
+ bld_params.name ().clear ();
+ bld_params.version ().clear ();
+ bld_params.target ().clear ();
+ bld_params.target_config ().clear ();
+ bld_params.package_config ().clear ();
+
// Prepare the build count prepared query.
//
// For each package-excluded configuration we will query the number of
@@ -805,58 +873,82 @@ handle (request& rq, response& rs)
using prep_bld_query = prepared_query<package_build_count>;
package_id id;
- string config;
+ target_triplet target;
+ string target_config_name;
+ string package_config_name;
const auto& bid (bld_query::build::id);
- bld_query bq (equal<package_build_count> (bid.package, id) &&
- bid.configuration == bld_query::_ref (config) &&
+ bld_query bq (
+ equal<package_build_count> (bid.package, id) &&
+ bid.target == bld_query::_ref (target) &&
+ bid.target_config_name == bld_query::_ref (target_config_name) &&
+ bid.package_config_name == bld_query::_ref (package_config_name) &&
// Note that the query already constrains configurations via the
- // configuration name and the tenant via the build package id.
+ // configuration name and target.
+ //
+ // Also note that while the query already constrains the tenant via
+ // the build package id, we still need to pass the tenant not to
+ // erroneously filter out the private tenants.
//
- build_query<package_build_count> (nullptr /* configs */,
+ build_query<package_build_count> (nullptr /* config_ids */,
bld_params,
- nullopt /* tenant */,
- false /* archived */));
+ tn));
prep_bld_query bld_prep_query (
build_db_->prepare_query<package_build_count> (
"mod-builds-build-count-query", bq));
- size_t nt (tc == "*" ? toolchains.size () : 1);
+ // Number of possible builds per package configuration.
+ //
+ size_t nt (th == "*" ? toolchains.size () : 1);
// The number of packages can potentially be large, and we may
// implement some caching in the future. However, the caching will not
// be easy as the cached values depend on the filter form parameters.
//
query<buildable_package> q (
- package_query<buildable_package> (
- params, tn, false /* archived */));
+ package_query<buildable_package> (params, tn));
for (auto& bp: build_db_->query<buildable_package> (q))
{
- id = move (bp.id);
+ shared_ptr<build_package>& p (bp.package);
- shared_ptr<build_package> p (build_db_->load<build_package> (id));
+ id = p->id;
- for (const auto& c: configs)
+ // Note: load the constrains section lazily.
+ //
+ for (const build_package_config& c: p->configs)
{
- if (exclude (p->builds, p->constraints, *c))
+ // Filter by package config name.
+ //
+ if (pkg_cfg.empty () || path_match (c.name, pkg_cfg))
{
- nmax -= nt;
-
- config = c->name;
- ncur -= bld_prep_query.execute_value ();
+ for (const auto& tc: target_configs)
+ {
+ if (!p->constraints_section.loaded ())
+ build_db_->load (*p, p->constraints_section);
+
+ if (exclude (c, p->builds, p->constraints, *tc))
+ {
+ target = tc->target;
+ target_config_name = tc->name;
+ package_config_name = c.name;
+ ncur -= bld_prep_query.execute_value ();
+ }
+ else
+ npos += nt;
+ }
}
}
}
- }
- assert (nmax >= ncur);
- count = nmax - ncur;
-
- t.commit ();
+ assert (npos >= ncur);
+ count = npos - ncur;
+ }
+ else
+ count = nullopt; // Unknown count.
}
// Print the filter form.
@@ -870,9 +962,11 @@ handle (request& rq, response& rs)
// 3: package tenant
// 4: toolchain name
// 5: toolchain version (descending)
- // 6: configuration name
+ // 6: target
+ // 7: target configuration name
+ // 8: package configuration name
//
- // Prepare the build package prepared query.
+ // Prepare the build package query.
//
// Note that we can't skip the proper number of packages in the database
// query for a page numbers greater than one. So we will query packages
@@ -887,28 +981,14 @@ handle (request& rq, response& rs)
// URL query parameter. Alternatively, we can invent the page number cap.
//
using pkg_query = query<buildable_package>;
- using prep_pkg_query = prepared_query<buildable_package>;
-
- pkg_query pq (
- package_query<buildable_package> (params, tn, false /* archived */));
- // Specify the portion. Note that we will still be querying packages in
- // chunks, not to hold locks for too long. For each package we will query
- // its builds, so let's keep the portion small.
- //
- size_t offset (0);
+ pkg_query pq (package_query<buildable_package> (params, tn));
pq += "ORDER BY" +
pkg_query::build_package::id.name +
order_by_version_desc (pkg_query::build_package::id.version,
false /* first */) + "," +
- pkg_query::build_package::id.tenant +
- "OFFSET" + pkg_query::_ref (offset) + "LIMIT 50";
-
- connection_ptr conn (build_db_->connection ());
-
- prep_pkg_query pkg_prep_query (
- conn->prepare_query<buildable_package> ("mod-builds-package-query", pq));
+ pkg_query::build_package::id.tenant;
// Prepare the build prepared query.
//
@@ -922,14 +1002,13 @@ handle (request& rq, response& rs)
package_id id;
- bld_query bq (
- equal<package_build> (bld_query::build::id.package, id) &&
+ bld_query bq (equal<package_build> (bld_query::build::id.package, id) &&
- // Note that the query already constrains the tenant via the build
- // package id.
- //
- build_query<package_build> (
- &conf_names, bld_params, nullopt /* tenant */, false /* archived */));
+ // Note that while the query already constrains the tenant
+ // via the build package id, we still need to pass the
+ // tenant not to erroneously filter out the private tenants.
+ //
+ build_query<package_build> (&conf_ids, bld_params, tn));
prep_bld_query bld_prep_query (
conn->prepare_query<package_build> ("mod-builds-build-query", bq));
@@ -940,99 +1019,115 @@ handle (request& rq, response& rs)
// Enclose the subsequent tables to be able to use nth-child CSS selector.
//
s << DIV;
- while (print != 0)
- {
- transaction t (conn->begin ());
- // Query (and cache) buildable packages.
- //
- auto packages (pkg_prep_query.execute ());
+ // Query (and cache) buildable packages.
+ //
+ auto packages (build_db_->query<buildable_package> (pq));
- if (packages.empty ())
- print = 0;
- else
+ if (packages.empty ())
+ print = 0;
+ else
+ {
+ // Iterate over packages and print unbuilt configurations. Skip the
+ // appropriate number of them first (for page number greater than one).
+ //
+ for (auto& bp: packages)
{
- offset += packages.size ();
+ shared_ptr<build_package>& p (bp.package);
- // Iterate over packages and print unbuilt configurations. Skip the
- // appropriate number of them first (for page number greater than one).
+ id = p->id;
+
+ // Copy configuration/toolchain combinations for this package,
+ // skipping excluded configurations.
//
- for (auto& p: packages)
- {
- id = move (p.id);
+ set<config_toolchain> unbuilt_configs;
- // Copy configuration/toolchain combinations for this package,
- // skipping excluded configurations.
+ // Load the constrains section lazily.
+ //
+ for (const build_package_config& pc: p->configs)
+ {
+ // Filter by package config name.
//
- set<config_toolchain> unbuilt_configs;
+ if (pkg_cfg.empty () || path_match (pc.name, pkg_cfg))
{
- shared_ptr<build_package> p (build_db_->load<build_package> (id));
-
- for (const auto& ct: config_toolchains)
+ for (const target_config_toolchain& ct: config_toolchains)
{
- auto i (build_conf_map_->find (ct.configuration.c_str ()));
- assert (i != build_conf_map_->end ());
-
- if (!exclude (p->builds, p->constraints, *i->second))
- unbuilt_configs.insert (ct);
+ auto i (
+ target_conf_map_->find (
+ build_target_config_id {ct.target, ct.target_config}));
+
+ assert (i != target_conf_map_->end ());
+
+ if (!p->constraints_section.loaded ())
+ build_db_->load (*p, p->constraints_section);
+
+ if (!exclude (pc, p->builds, p->constraints, *i->second))
+ unbuilt_configs.insert (
+ config_toolchain {ct.target,
+ ct.target_config,
+ pc.name,
+ ct.toolchain_name,
+ ct.toolchain_version});
}
}
+ }
- // Iterate through the package configuration builds and erase them
- // from the unbuilt configurations set.
- //
- for (const auto& pb: bld_prep_query.execute ())
- {
- const build& b (*pb.build);
+ // Iterate through the package configuration builds and erase them
+ // from the unbuilt configurations set.
+ //
+ for (const auto& pb: bld_prep_query.execute ())
+ {
+ const build& b (*pb.build);
- unbuilt_configs.erase ({
- b.id.configuration, b.toolchain_name, b.toolchain_version});
- }
+ unbuilt_configs.erase (config_toolchain {b.target,
+ b.target_config_name,
+ b.package_config_name,
+ b.toolchain_name,
+ b.toolchain_version});
+ }
- // Print unbuilt package configurations.
- //
- for (const auto& ct: unbuilt_configs)
+ // Print unbuilt package configurations.
+ //
+ for (const auto& ct: unbuilt_configs)
+ {
+ if (skip != 0)
{
- if (skip != 0)
- {
- --skip;
- continue;
- }
-
- auto i (build_conf_map_->find (ct.configuration.c_str ()));
- assert (i != build_conf_map_->end ());
-
- s << TABLE(CLASS="proplist build")
- << TBODY
- << TR_NAME (id.name, string (), root, id.tenant)
- << TR_VERSION (id.name, p.version, root, id.tenant)
- << TR_VALUE ("toolchain",
- string (ct.toolchain_name) + '-' +
- ct.toolchain_version.string ())
- << TR_VALUE ("config", ct.configuration)
- << TR_VALUE ("target", i->second->target.string ());
-
- // In the global view mode add the tenant builds link. Note that
- // the global view (and the link) makes sense only in the
- // multi-tenant mode.
- //
- if (!tn && !id.tenant.empty ())
- s << TR_TENANT (tenant_name, "builds", root, id.tenant);
+ --skip;
+ continue;
+ }
- s << ~TBODY
- << ~TABLE;
+ s << TABLE(CLASS="proplist build")
+ << TBODY
+ << TR_NAME (id.name, root, id.tenant)
+ << TR_VERSION (id.name, p->version, root, id.tenant)
+ << TR_VALUE ("toolchain",
+ string (ct.toolchain_name) + '-' +
+ ct.toolchain_version.string ())
+ << TR_VALUE ("target", ct.target.string ())
+ << TR_VALUE ("tgt config", ct.target_config)
+ << TR_VALUE ("pkg config", ct.package_config);
+
+ // In the global view mode add the tenant builds link. Note that
+ // the global view (and the link) makes sense only in the
+ // multi-tenant mode.
+ //
+ if (!tn && !id.tenant.empty ())
+ s << TR_TENANT (tenant_name, "builds", root, id.tenant);
- if (--print == 0) // Bail out the configuration loop.
- break;
- }
+ s << ~TBODY
+ << ~TABLE;
- if (print == 0) // Bail out the package loop.
+ if (--print == 0) // Bail out the configuration loop.
break;
}
- }
- t.commit ();
+ if (print == 0) // Bail out the package loop.
+ break;
+ }
}
+
+ t.commit ();
+
s << ~DIV;
}
@@ -1058,13 +1153,17 @@ handle (request& rq, response& rs)
};
add_filter ("pv", params.version ());
- add_filter ("tc", params.toolchain (), "*");
- add_filter ("cf", params.configuration ());
- add_filter ("mn", params.machine ());
- add_filter ("tg", params.target ());
+ add_filter ("th", params.toolchain (), "*");
+ add_filter ("tg", tgt);
+ add_filter ("tc", tgt_cfg);
+ add_filter ("pc", pkg_cfg);
add_filter ("rs", params.result (), "*");
- s << DIV_PAGER (page, count, page_configs, options_->build_pages (), u)
+ s << DIV_PAGER (page,
+ count ? *count : 0,
+ page_configs,
+ options_->build_pages (),
+ u)
<< ~DIV
<< ~BODY
<< ~HTML;
diff --git a/mod/mod-ci.cxx b/mod/mod-ci.cxx
index d2da93f..5974d45 100644
--- a/mod/mod-ci.cxx
+++ b/mod/mod-ci.cxx
@@ -3,18 +3,11 @@
#include <mod/mod-ci.hxx>
-#include <ostream>
-
-#include <libbutl/uuid.hxx>
-#include <libbutl/sendmail.mxx>
-#include <libbutl/fdstream.mxx>
-#include <libbutl/timestamp.mxx>
-#include <libbutl/filesystem.mxx>
-#include <libbutl/process-io.mxx> // operator<<(ostream, process_args)
-#include <libbutl/manifest-parser.mxx>
-#include <libbutl/manifest-serializer.mxx>
-
-#include <libbpkg/manifest.hxx>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
+
+#include <libbpkg/manifest.hxx> // package_manifest
#include <libbpkg/package-name.hxx>
#include <web/server/module.hxx>
@@ -23,20 +16,35 @@
#include <mod/page.hxx>
#include <mod/module-options.hxx>
-#include <mod/external-handler.hxx>
using namespace std;
using namespace butl;
using namespace web;
using namespace brep::cli;
+#ifdef BREP_CI_TENANT_SERVICE
+brep::ci::
+ci (tenant_service_map& tsm)
+ : tenant_service_map_ (tsm)
+{
+}
+#endif
+
brep::ci::
+#ifdef BREP_CI_TENANT_SERVICE
+ci (const ci& r, tenant_service_map& tsm)
+#else
ci (const ci& r)
+#endif
: handler (r),
+ ci_start (r),
options_ (r.initialized_ ? r.options_ : nullptr),
form_ (r.initialized_ || r.form_ == nullptr
? r.form_
: make_shared<xhtml::fragment> (*r.form_))
+#ifdef BREP_CI_TENANT_SERVICE
+ , tenant_service_map_ (tsm)
+#endif
{
}
@@ -45,22 +53,25 @@ init (scanner& s)
{
HANDLER_DIAG;
+#ifdef BREP_CI_TENANT_SERVICE
+ {
+ shared_ptr<tenant_service_base> ts (
+ dynamic_pointer_cast<tenant_service_base> (shared_from_this ()));
+
+ assert (ts != nullptr); // By definition.
+
+ tenant_service_map_["ci"] = move (ts);
+ }
+#endif
+
options_ = make_shared<options::ci> (
s, unknown_mode::fail, unknown_mode::fail);
- // Verify that the CI request handling is setup properly, if configured.
+ // Prepare for the CI requests handling, if configured.
//
if (options_->ci_data_specified ())
{
- // Verify the data directory satisfies the requirements.
- //
- const dir_path& d (options_->ci_data ());
-
- if (d.relative ())
- fail << "ci-data directory path must be absolute";
-
- if (!dir_exists (d))
- fail << "ci-data directory '" << d << "' does not exist";
+ ci_start::init (make_shared<options::ci_start> (*options_));
// Parse XHTML5 form file, if configured.
//
@@ -87,10 +98,6 @@ init (scanner& s)
fail << "unable to read ci-form file '" << ci_form << "': " << e;
}
}
-
- if (options_->ci_handler_specified () &&
- options_->ci_handler ().relative ())
- fail << "ci-handler path must be absolute";
}
if (options_->root ().empty ())
@@ -130,9 +137,8 @@ handle (request& rq, response& rs)
//
// return respond_error (); // Request is handled with an error.
//
- string request_id; // Will be set later.
- auto respond_manifest = [&rs, &request_id] (status_code status,
- const string& message) -> bool
+ auto respond_manifest = [&rs] (status_code status,
+ const string& message) -> bool
{
serializer s (rs.content (status, "text/manifest;charset=utf-8"),
"response");
@@ -140,10 +146,6 @@ handle (request& rq, response& rs)
s.next ("", "1"); // Start of manifest.
s.next ("status", to_string (status));
s.next ("message", message);
-
- if (!request_id.empty ())
- s.next ("reference", request_id);
-
s.next ("", ""); // End of manifest.
return true;
};
@@ -234,9 +236,11 @@ handle (request& rq, response& rs)
if (rl.empty () || rl.local ())
return respond_manifest (400, "invalid repository location");
- // Verify the package name[/version] arguments.
+ // Parse the package name[/version] arguments.
//
- for (const string& s: params.package())
+ vector<package> packages;
+
+ for (const string& s: params.package ())
{
// Let's skip the potentially unfilled package form fields.
//
@@ -245,18 +249,21 @@ handle (request& rq, response& rs)
try
{
+ package pkg;
size_t p (s.find ('/'));
if (p != string::npos)
{
- package_name (string (s, 0, p));
+ pkg.name = package_name (string (s, 0, p));
// Not to confuse with module::version.
//
- bpkg::version (string (s, p + 1));
+ pkg.version = bpkg::version (string (s, p + 1));
}
else
- package_name p (s); // Not to confuse with the s variable declaration.
+ pkg.name = package_name (s);
+
+ packages.push_back (move (pkg));
}
catch (const invalid_argument&)
{
@@ -265,31 +272,49 @@ handle (request& rq, response& rs)
}
// Verify that unknown parameter values satisfy the requirements (contain
- // only UTF-8 encoded graphic characters plus '\t', '\r', and '\n').
+ // only UTF-8 encoded graphic characters plus '\t', '\r', and '\n') and
+ // stash them.
//
// Actually, the expected ones must satisfy too, so check them as well.
//
- string what;
- for (const name_value& nv: rps)
+ vector<pair<string, string>> custom_request;
{
- if (nv.value &&
- !utf8 (*nv.value, what, codepoint_types::graphic, U"\n\r\t"))
- return respond_manifest (400,
- "invalid parameter " + nv.name + ": " + what);
+ string what;
+ for (const name_value& nv: rps)
+ {
+ if (nv.value &&
+ !utf8 (*nv.value, what, codepoint_types::graphic, U"\n\r\t"))
+ return respond_manifest (400,
+ "invalid parameter " + nv.name + ": " + what);
+
+ const string& n (nv.name);
+
+ if (n != "repository" &&
+ n != "_" &&
+ n != "package" &&
+ n != "overrides" &&
+ n != "interactive" &&
+ n != "simulate")
+ custom_request.emplace_back (n, nv.value ? *nv.value : "");
+ }
}
// Parse and validate overrides, if present.
//
- vector<manifest_name_value> overrides;
+ vector<pair<string, string>> overrides;
if (params.overrides_specified ())
try
{
istream& is (rq.open_upload ("overrides"));
parser mp (is, "overrides");
- overrides = parse_manifest (mp);
+ vector<manifest_name_value> ovrs (parse_manifest (mp));
+
+ package_manifest::validate_overrides (ovrs, mp.name ());
- package_manifest::validate_overrides (overrides, mp.name ());
+ overrides.reserve (ovrs.size ());
+ for (manifest_name_value& nv: ovrs)
+ overrides.emplace_back (move (nv.name), move (nv.value));
}
// Note that invalid_argument (thrown by open_upload() function call) can
// mean both no overrides upload or multiple overrides uploads.
@@ -310,381 +335,141 @@ handle (request& rq, response& rs)
return respond_error ();
}
- try
- {
- // Note that from now on the result manifest we respond with will contain
- // the reference value.
- //
- request_id = uuid::generate ().string ();
- }
- catch (const system_error& e)
- {
- error << "unable to generate request id: " << e;
- return respond_error ();
- }
-
- // Create the submission data directory.
+ // Stash the User-Agent HTTP header and the client IP address.
//
- dir_path dd (options_->ci_data () / dir_path (request_id));
-
- try
+ optional<string> client_ip;
+ optional<string> user_agent;
+ for (const name_value& h: rq.headers ())
{
- // It's highly unlikely but still possible that the directory already
- // exists. This can only happen if the generated uuid is not unique.
- //
- if (try_mkdir (dd) == mkdir_status::already_exists)
- throw_generic_error (EEXIST);
+ if (icasecmp (h.name, ":Client-IP") == 0)
+ client_ip = h.value;
+ else if (icasecmp (h.name, "User-Agent") == 0)
+ user_agent = h.value;
}
- catch (const system_error& e)
- {
- error << "unable to create directory '" << dd << "': " << e;
- return respond_error ();
- }
-
- auto_rmdir ddr (dd);
-
- // Serialize the CI request manifest to a stream. On the serialization error
- // respond to the client with the manifest containing the bad request (400)
- // code and return false, on the stream error pass through the io_error
- // exception, otherwise return true.
- //
- timestamp ts (system_clock::now ());
-
- auto rqm = [&request_id,
- &rl,
- &ts,
- &simulate,
- &rq,
- &rps,
- &params,
- &respond_manifest]
- (ostream& os, bool long_lines = false) -> bool
- {
- try
- {
- serializer s (os, "request", long_lines);
- // Serialize the submission manifest header.
- //
- s.next ("", "1"); // Start of manifest.
- s.next ("id", request_id);
- s.next ("repository", rl.string ());
-
- for (const string& p: params.package ())
- {
- if (!p.empty ()) // Skip empty package names (see above for details).
- s.next ("package", p);
- }
-
- s.next ("timestamp",
- butl::to_string (ts,
- "%Y-%m-%dT%H:%M:%SZ",
- false /* special */,
- false /* local */));
-
- if (!simulate.empty ())
- s.next ("simulate", simulate);
-
- // Serialize the User-Agent HTTP header and the client IP address.
- //
- optional<string> ip;
- optional<string> ua;
- for (const name_value& h: rq.headers ())
- {
- if (icasecmp (h.name, ":Client-IP") == 0)
- ip = h.value;
- else if (icasecmp (h.name, "User-Agent") == 0)
- ua = h.value;
- }
-
- if (ip)
- s.next ("client-ip", *ip);
-
- if (ua)
- s.next ("user-agent", *ua);
-
- // Serialize the request parameters.
- //
- // Note that the serializer constraints the parameter names (can't start
- // with '#', can't contain ':' and the whitespaces, etc.).
- //
- for (const name_value& nv: rps)
- {
- const string& n (nv.name);
-
- if (n != "repository" &&
- n != "_" &&
- n != "package" &&
- n != "overrides" &&
- n != "simulate")
- s.next (n, nv.value ? *nv.value : "");
- }
-
- s.next ("", ""); // End of manifest.
- return true;
- }
- catch (const serialization& e)
- {
- respond_manifest (400, string ("invalid parameter: ") + e.what ());
- return false;
- }
- };
-
- // Serialize the CI request manifest to the submission directory.
- //
- path rqf (dd / "request.manifest");
+ optional<start_result> r (start (error,
+ warn,
+ verb_ ? &trace : nullptr,
+#ifdef BREP_CI_TENANT_SERVICE
+ tenant_service ("", "ci"),
+#else
+ nullopt /* service */,
+#endif
+ rl,
+ packages,
+ client_ip,
+ user_agent,
+ (params.interactive_specified ()
+ ? params.interactive ()
+ : optional<string> ()),
+ (!simulate.empty ()
+ ? simulate
+ : optional<string> ()),
+ custom_request,
+ overrides));
+
+ if (!r)
+ return respond_error (); // The diagnostics is already issued.
try
{
- ofdstream os (rqf);
- bool r (rqm (os));
- os.close ();
-
- if (!r)
- return true; // The client is already responded with the manifest.
- }
- catch (const io_error& e)
- {
- error << "unable to write to '" << rqf << "': " << e;
- return respond_error ();
+ serialize_manifest (*r,
+ rs.content (r->status, "text/manifest;charset=utf-8"));
}
-
- // Serialize the CI overrides manifest to a stream. On the stream error pass
- // through the io_error exception.
- //
- // Note that it can't throw the serialization exception as the override
- // manifest is parsed from the stream and so verified.
- //
- auto ovm = [&overrides] (ostream& os, bool long_lines = false)
+ catch (const serialization& e)
{
- try
- {
- serializer s (os, "overrides", long_lines);
- serialize_manifest (s, overrides);
- }
- catch (const serialization&) {assert (false);} // See above.
- };
+ error << "ref " << r->reference << ": unable to serialize handler's "
+ << "output: " << e;
- // Serialize the CI overrides manifest to the submission directory.
- //
- path ovf (dd / "overrides.manifest");
-
- if (!overrides.empty ())
- try
- {
- ofdstream os (ovf);
- ovm (os);
- os.close ();
- }
- catch (const io_error& e)
- {
- error << "unable to write to '" << ovf << "': " << e;
return respond_error ();
}
- // Given that the submission data is now successfully persisted we are no
- // longer in charge of removing it, except for the cases when the submission
- // handler terminates with an error (see below for details).
- //
- ddr.cancel ();
-
- // If the handler terminates with non-zero exit status or specifies 5XX
- // (HTTP server error) submission result manifest status value, then we
- // stash the submission data directory for troubleshooting. Otherwise, if
- // it's the 4XX (HTTP client error) status value, then we remove the
- // directory.
- //
- // Note that leaving the directory in place in case of a submission error
- // would have prevent the user from re-submitting until we research the
- // issue and manually remove the directory.
- //
- auto stash_submit_dir = [&dd, error] ()
- {
- if (dir_exists (dd))
- try
- {
- mvdir (dd, dir_path (dd + ".fail"));
- }
- catch (const system_error& e)
- {
- // Not much we can do here. Let's just log the issue and bail out
- // leaving the directory in place.
- //
- error << "unable to rename directory '" << dd << "': " << e;
- }
- };
-
- // Run the submission handler, if specified, reading the result manifest
- // from its stdout and caching it as a name/value pair list for later use
- // (forwarding to the client, sending via email, etc.). Otherwise, create
- // implied result manifest.
- //
- status_code sc;
- vector<manifest_name_value> rvs;
-
- if (options_->ci_handler_specified ())
- {
- using namespace external_handler;
-
- optional<result_manifest> r (run (options_->ci_handler (),
- options_->ci_handler_argument (),
- dd,
- options_->ci_handler_timeout (),
- error,
- warn,
- verb_ ? &trace : nullptr));
- if (!r)
- {
- stash_submit_dir ();
- return respond_error (); // The diagnostics is already issued.
- }
-
- sc = r->status;
- rvs = move (r->values);
- }
- else // Create the implied result manifest.
- {
- sc = 200;
-
- auto add = [&rvs] (string n, string v)
- {
- manifest_name_value nv {
- move (n), move (v),
- 0 /* name_line */, 0 /* name_column */,
- 0 /* value_line */, 0 /* value_column */,
- 0 /* start_pos */, 0 /* colon_pos */, 0 /* end_pos */};
-
- rvs.emplace_back (move (nv));
- };
-
- add ("status", "200");
- add ("message", "CI request is queued");
- add ("reference", request_id);
- }
-
- assert (!rvs.empty ()); // Produced by the handler or is implied.
-
- // Serialize the submission result manifest to a stream. On the
- // serialization error log the error description and return false, on the
- // stream error pass through the io_error exception, otherwise return true.
- //
- auto rsm = [&rvs, &error, &request_id] (ostream& os,
- bool long_lines = false) -> bool
- {
- try
- {
- serializer s (os, "result", long_lines);
- serialize_manifest (s, rvs);
- return true;
- }
- catch (const serialization& e)
- {
- error << "ref " << request_id << ": unable to serialize handler's "
- << "output: " << e;
- return false;
- }
- };
-
- // If the submission data directory still exists then perform an appropriate
- // action on it, depending on the submission result status. Note that the
- // handler could move or remove the directory.
- //
- if (dir_exists (dd))
- {
- // Remove the directory if the client error is detected.
- //
- if (sc >= 400 && sc < 500)
- rmdir_r (dd);
-
- // Otherwise, save the result manifest, into the directory. Also stash the
- // directory for troubleshooting in case of the server error.
- //
- else
- {
- path rsf (dd / "result.manifest");
-
- try
- {
- ofdstream os (rsf);
-
- // Not being able to stash the result manifest is not a reason to
- // claim the submission failed. The error is logged nevertheless.
- //
- rsm (os);
-
- os.close ();
- }
- catch (const io_error& e)
- {
- // Not fatal (see above).
- //
- error << "unable to write to '" << rsf << "': " << e;
- }
-
- if (sc >= 500 && sc < 600)
- stash_submit_dir ();
- }
- }
-
- // Send email, if configured, and the CI request submission is not simulated.
- // Use the long lines manifest serialization mode for the convenience of
- // copying/clicking URLs they contain.
- //
- // Note that we don't consider the email sending failure to be a submission
- // failure as the submission data is successfully persisted and the handler
- // is successfully executed, if configured. One can argue that email can be
- // essential for the submission processing and missing it would result in
- // the incomplete submission. In this case it's natural to assume that the
- // web server error log is monitored and the email sending failure will be
- // noticed.
- //
- if (options_->ci_email_specified () && simulate.empty ())
- try
- {
- // Redirect the diagnostics to the web server error log.
- //
- sendmail sm ([&trace, this] (const char* args[], size_t n)
- {
- l2 ([&]{trace << process_args {args, n};});
- },
- 2 /* stderr */,
- options_->email (),
- "CI request submission (" + request_id + ")",
- {options_->ci_email ()});
-
- // Write the CI request manifest.
- //
- bool r (rqm (sm.out, true /* long_lines */));
- assert (r); // The serialization succeeded once, so can't fail now.
-
- // Write the CI overrides manifest.
- //
- sm.out << "\n\n";
-
- ovm (sm.out, true /* long_lines */);
-
- // Write the CI result manifest.
- //
- sm.out << "\n\n";
-
- // We don't care about the result (see above).
- //
- rsm (sm.out, true /* long_lines */);
-
- sm.out.close ();
+ return true;
+}
- if (!sm.wait ())
- error << "sendmail " << *sm.exit;
- }
- // Handle process_error and io_error (both derive from system_error).
- //
- catch (const system_error& e)
- {
- error << "sendmail error: " << e;
- }
+#ifdef BREP_CI_TENANT_SERVICE
+function<optional<string> (const brep::tenant_service&)> brep::ci::
+build_queued (const tenant_service&,
+ const vector<build>& bs,
+ optional<build_state> initial_state,
+ const build_queued_hints& hints,
+ const diag_epilogue& log_writer) const noexcept
+{
+ NOTIFICATION_DIAG (log_writer);
+
+ l2 ([&]{trace << "initial_state: "
+ << (initial_state ? to_string (*initial_state) : "none")
+ << ", hints "
+ << hints.single_package_version << ' '
+ << hints.single_package_config;});
+
+ return [&bs, initial_state] (const tenant_service& ts)
+ {
+ optional<string> r (ts.data);
+
+ for (const build& b: bs)
+ {
+ string s ((!initial_state
+ ? "queued "
+ : "queued " + to_string (*initial_state) + ' ') +
+ b.package_name.string () + '/' +
+ b.package_version.string () + '/' +
+ b.target.string () + '/' +
+ b.target_config_name + '/' +
+ b.package_config_name + '/' +
+ b.toolchain_name + '/' +
+ b.toolchain_version.string ());
+
+ if (r)
+ {
+ *r += ", ";
+ *r += s;
+ }
+ else
+ r = move (s);
+ }
+
+ return r;
+ };
+}
- if (!rsm (rs.content (sc, "text/manifest;charset=utf-8")))
- return respond_error (); // The error description is already logged.
+function<optional<string> (const brep::tenant_service&)> brep::ci::
+build_building (const tenant_service&,
+ const build& b,
+ const diag_epilogue&) const noexcept
+{
+ return [&b] (const tenant_service& ts)
+ {
+ string s ("building " +
+ b.package_name.string () + '/' +
+ b.package_version.string () + '/' +
+ b.target.string () + '/' +
+ b.target_config_name + '/' +
+ b.package_config_name + '/' +
+ b.toolchain_name + '/' +
+ b.toolchain_version.string ());
+
+ return ts.data ? *ts.data + ", " + s : s;
+ };
+}
- return true;
+function<optional<string> (const brep::tenant_service&)> brep::ci::
+build_built (const tenant_service&,
+ const build& b,
+ const diag_epilogue&) const noexcept
+{
+ return [&b] (const tenant_service& ts)
+ {
+ string s ("built " +
+ b.package_name.string () + '/' +
+ b.package_version.string () + '/' +
+ b.target.string () + '/' +
+ b.target_config_name + '/' +
+ b.package_config_name + '/' +
+ b.toolchain_name + '/' +
+ b.toolchain_version.string ());
+
+ return ts.data ? *ts.data + ", " + s : s;
+ };
}
+#endif
diff --git a/mod/mod-ci.hxx b/mod/mod-ci.hxx
index 431f53b..1e2ee15 100644
--- a/mod/mod-ci.hxx
+++ b/mod/mod-ci.hxx
@@ -9,14 +9,39 @@
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
+#include <libbrep/build.hxx>
+#include <libbrep/common.hxx> // tenant_service
+
#include <mod/module.hxx>
#include <mod/module-options.hxx>
+#include <mod/ci-common.hxx>
+
+#ifdef BREP_CI_TENANT_SERVICE
+# include <mod/tenant-service.hxx>
+#endif
+
namespace brep
{
- class ci: public handler
+ class ci: public handler,
+ private ci_start
+#ifdef BREP_CI_TENANT_SERVICE
+ , public tenant_service_build_queued,
+ public tenant_service_build_building,
+ public tenant_service_build_built
+#endif
{
public:
+
+#ifdef BREP_CI_TENANT_SERVICE
+ explicit
+ ci (tenant_service_map&);
+
+ // Create a shallow copy (handling instance) if initialized and a deep
+ // copy (context exemplar) otherwise.
+ //
+ ci (const ci&, tenant_service_map&);
+#else
ci () = default;
// Create a shallow copy (handling instance) if initialized and a deep
@@ -24,20 +49,44 @@ namespace brep
//
explicit
ci (const ci&);
+#endif
virtual bool
- handle (request&, response&);
+ handle (request&, response&) override;
virtual const cli::options&
- cli_options () const {return options::ci::description ();}
+ cli_options () const override {return options::ci::description ();}
+
+#ifdef BREP_CI_TENANT_SERVICE
+ virtual function<optional<string> (const tenant_service&)>
+ build_queued (const tenant_service&,
+ const vector<build>&,
+ optional<build_state> initial_state,
+ const build_queued_hints&,
+ const diag_epilogue& log_writer) const noexcept override;
+
+ virtual function<optional<string> (const tenant_service&)>
+ build_building (const tenant_service&,
+ const build&,
+ const diag_epilogue& log_writer) const noexcept override;
+
+ virtual function<optional<string> (const tenant_service&)>
+ build_built (const tenant_service&,
+ const build&,
+ const diag_epilogue& log_writer) const noexcept override;
+#endif
private:
virtual void
- init (cli::scanner&);
+ init (cli::scanner&) override;
private:
shared_ptr<options::ci> options_;
shared_ptr<web::xhtml::fragment> form_;
+
+#ifdef BREP_CI_TENANT_SERVICE
+ tenant_service_map& tenant_service_map_;
+#endif
};
}
diff --git a/mod/mod-package-details.cxx b/mod/mod-package-details.cxx
index e0bd1ef..fcd50da 100644
--- a/mod/mod-package-details.cxx
+++ b/mod/mod-package-details.cxx
@@ -183,20 +183,20 @@ handle (request& rq, response& rs)
//
s << H2 << pkg->summary << ~H2;
- if (const optional<string>& d = pkg->description)
+ if (const optional<typed_text>& d = pkg->package_description
+ ? pkg->package_description
+ : pkg->description)
{
const string id ("description");
const string what (name.string () + " description");
s << (full
? DIV_TEXT (*d,
- *pkg->description_type,
true /* strip_title */,
id,
what,
error)
: DIV_TEXT (*d,
- *pkg->description_type,
true /* strip_title */,
options_->package_description (),
url (!full, squery, page, id),
@@ -227,7 +227,7 @@ handle (request& rq, response& rs)
<< ~TABLE;
}
- auto pkg_count (
+ size_t pkg_count (
package_db_->query_value<package_count> (
search_params<package_count> (squery, tenant, name)));
@@ -265,23 +265,12 @@ handle (request& rq, response& rs)
assert (p->internal ());
- // @@ Shouldn't we make package repository name to be a link to the proper
- // place of the About page, describing corresponding repository?
- // Yes, I think that's sounds reasonable.
- // Or maybe it can be something more valuable like a link to the
- // repository package search page ?
- //
- // @@ In most cases package location will be the same for all versions
- // of the same package. Shouldn't we put package location to the
- // package summary part and display it here only if it differs
- // from the one in the summary ?
- //
- // Hm, I am not so sure about this. Consider: stable/testing/unstable.
+ const repository_location& rl (p->internal_repository.load ()->location);
+
+ // @@ Maybe the repository link can be something more valuable like a link
+ // to the repository package search page ?
//
- s << TR_REPOSITORY (
- p->internal_repository.object_id ().canonical_name,
- root,
- tenant)
+ s << TR_REPOSITORY (rl, root, tenant)
<< TR_DEPENDS (p->dependencies, root, tenant)
<< TR_REQUIRES (p->requirements)
<< ~TBODY
diff --git a/mod/mod-package-version-details.cxx b/mod/mod-package-version-details.cxx
index a7682ec..51b21c6 100644
--- a/mod/mod-package-version-details.cxx
+++ b/mod/mod-package-version-details.cxx
@@ -9,6 +9,8 @@
#include <odb/database.hxx>
#include <odb/transaction.hxx>
+#include <libbutl/filesystem.hxx> // dir_iterator, dir_entry
+
#include <web/server/module.hxx>
#include <web/server/mime-url-encoding.hxx>
@@ -47,6 +49,12 @@ init (scanner& s)
options_ = make_shared<options::package_version_details> (
s, unknown_mode::fail, unknown_mode::fail);
+ // Verify that the bindist-url option is specified when necessary.
+ //
+ if (options_->bindist_root_specified () &&
+ !options_->bindist_url_specified ())
+ fail << "bindist-url must be specified if bindist-root is specified";
+
database_module::init (static_cast<const options::package_db&> (*options_),
options_->package_db_retry ());
@@ -152,7 +160,7 @@ handle (request& rq, response& rs)
const string& name (pkg->name.string ());
- const string title (name + " " + sver);
+ const string title (name + ' ' + sver);
xml::serializer s (rs.content (), title);
s << HTML
@@ -181,20 +189,20 @@ handle (request& rq, response& rs)
s << H2 << pkg->summary << ~H2;
- if (const optional<string>& d = pkg->description)
+ if (const optional<typed_text>& d = pkg->package_description
+ ? pkg->package_description
+ : pkg->description)
{
const string id ("description");
const string what (title + " description");
s << (full
- ? DIV_TEXT (*d, *
- pkg->description_type,
+ ? DIV_TEXT (*d,
true /* strip_title */,
id,
what,
error)
: DIV_TEXT (*d,
- *pkg->description_type,
true /* strip_title */,
options_->package_description (),
url (!full, id),
@@ -214,14 +222,13 @@ handle (request& rq, response& rs)
<< TR_PRIORITY (pkg->priority)
<< TR_LICENSES (pkg->license_alternatives)
- << TR_REPOSITORY (rl.canonical_name (), root, tenant)
- << TR_LOCATION (rl);
+ << TR_REPOSITORY (rl, root, tenant);
if (rl.type () == repository_type::pkg)
{
assert (pkg->location);
- s << TR_LINK (rl.url ().string () + "/" + pkg->location->string (),
+ s << TR_LINK (rl.url ().string () + '/' + pkg->location->string (),
pkg->location->leaf ().string (),
"download");
}
@@ -293,7 +300,7 @@ handle (request& rq, response& rs)
if (dcon)
s << ' '
- << A(HREF=u + "/" + p->version.string ()) << *dcon << ~A;
+ << A(HREF=u + '/' + p->version.string ()) << *dcon << ~A;
}
else if (p->internal ())
{
@@ -321,31 +328,51 @@ handle (request& rq, response& rs)
<< TABLE(CLASS="proplist", ID="depends")
<< TBODY;
- for (const auto& da: ds)
+ for (const auto& das: ds)
{
s << TR(CLASS="depends")
<< TH;
- if (da.conditional)
- s << "?";
-
- if (da.buildtime)
- s << "*";
+ if (das.buildtime)
+ s << '*';
s << ~TH
<< TD
<< SPAN(CLASS="value");
- for (const auto& d: da)
+ for (const auto& da: das)
{
- if (&d != &da[0])
+ if (&da != &das[0])
s << " | ";
- print_dependency (d);
+ // Should we enclose multiple dependencies into curly braces as in the
+ // manifest? Somehow feels redundant here, since there can't be any
+ // ambiguity (dependency group version constraint is already punched
+ // into the specific dependencies without constraints).
+ //
+ for (const dependency& d: da)
+ {
+ if (&d != &da[0])
+ s << ' ';
+
+ print_dependency (d);
+ }
+
+ if (da.enable)
+ {
+ s << " ? (";
+
+ if (full)
+ s << *da.enable;
+ else
+ s << "...";
+
+ s << ')';
+ }
}
s << ~SPAN
- << SPAN_COMMENT (da.comment)
+ << SPAN_COMMENT (das.comment)
<< ~TD
<< ~TR;
}
@@ -361,34 +388,59 @@ handle (request& rq, response& rs)
<< TABLE(CLASS="proplist", ID="requires")
<< TBODY;
- for (const auto& ra: rm)
+ for (const requirement_alternatives& ras: rm)
{
s << TR(CLASS="requires")
<< TH;
- if (ra.conditional)
- s << "?";
-
- if (ra.buildtime)
- s << "*";
-
- if (ra.conditional || ra.buildtime)
- s << " ";
+ if (ras.buildtime)
+ s << '*';
s << ~TH
<< TD
<< SPAN(CLASS="value");
- for (const auto& r: ra)
+ for (const requirement_alternative& ra: ras)
{
- if (&r != &ra[0])
+ if (&ra != &ras[0])
s << " | ";
- s << r;
+ // Should we enclose multiple requirement ids into curly braces as in
+ // the manifest? Somehow feels redundant here, since there can't be
+ // any ambiguity (requirement group version constraint is already
+ // punched into the specific requirements without constraints).
+ //
+ for (const string& r: ra)
+ {
+ if (&r != &ra[0])
+ s << ' ';
+
+ s << r;
+ }
+
+ if (ra.enable)
+ {
+ if (!ra.simple () || !ra[0].empty ())
+ s << ' ';
+
+ s << '?';
+
+ if (!ra.enable->empty ())
+ {
+ s << " (";
+
+ if (full)
+ s << *ra.enable;
+ else
+ s << "...";
+
+ s << ')';
+ }
+ }
}
s << ~SPAN
- << SPAN_COMMENT (ra.comment)
+ << SPAN_COMMENT (ras.comment)
<< ~TD
<< ~TR;
}
@@ -401,7 +453,10 @@ handle (request& rq, response& rs)
//
// Print test dependencies of the specific type.
//
- auto print_tests = [&pkg, &s, &print_dependency] (test_dependency_type dt)
+ auto print_tests = [&pkg,
+ &s,
+ &print_dependency,
+ full] (test_dependency_type dt)
{
string id;
@@ -429,11 +484,31 @@ handle (request& rq, response& rs)
}
s << TR(CLASS=id)
+ << TH;
+
+ if (td.buildtime)
+ s << '*';
+
+ s << ~TH
<< TD
<< SPAN(CLASS="value");
print_dependency (td);
+ if (td.enable || td.reflect)
+ {
+ if (full)
+ {
+ if (td.enable)
+ s << " ? (" << *td.enable << ')';
+
+ if (td.reflect)
+ s << ' ' << *td.reflect;
+ }
+ else
+ s << " ...";
+ }
+
s << ~SPAN
<< ~TD
<< ~TR;
@@ -459,34 +534,203 @@ handle (request& rq, response& rs)
{
package_db_->load (*pkg, pkg->build_section);
- // If the package has a singe build configuration class expression with
- // exactly one underlying class and the class is none, then we just drop
- // the page builds section altogether.
+ // If all package build configurations has a singe effective build
+ // configuration class expression with exactly one underlying class and
+ // the class is none, then we just drop the page builds section
+ // altogether.
//
- if (pkg->builds.size () == 1)
+ builds = false;
+
+ for (const build_package_config& pc: pkg->build_configs)
{
- const build_class_expr& be (pkg->builds[0]);
+ const build_class_exprs& exprs (pc.effective_builds (pkg->builds));
- builds = be.underlying_classes.size () != 1 ||
- be.underlying_classes[0] != "none";
+ if (exprs.size () != 1 ||
+ exprs[0].underlying_classes.size () != 1 ||
+ exprs[0].underlying_classes[0] != "none")
+ {
+ builds = true;
+ break;
+ }
}
}
- bool archived (package_db_->load<brep::tenant> (tenant)->archived);
+ shared_ptr<brep::tenant> tn (package_db_->load<brep::tenant> (tenant));
t.commit ();
- if (builds)
+ // Display the binary distribution packages for this tenant, package, and
+ // version, if present. Print the archive distributions last.
+ //
+ if (options_->bindist_root_specified ())
{
- using bbot::build_config;
+ // Collect all the available package configurations by iterating over the
+ // <distribution> and <os-release> subdirectories and the <package-config>
+ // symlinks in the following filesystem hierarchy:
+ //
+ // [<tenant>/]<distribution>/<os-release>/<project>/<package>/<version>/<package-config>
+ //
+ // Note that it is possible that new directories and symlinks are created
+ // and/or removed while we iterate over the filesystem entries in the
+ // above hierarchy, which may result with system_error exceptions. If that
+ // happens, we just ignore such exceptions, trying to collect what we can.
+ //
+ const dir_path& br (options_->bindist_root ());
+
+ dir_path d (br);
+ if (!tenant.empty ())
+ d /= tenant;
+
+ // Note that distribution and os_release are simple paths and the
+ // config_symlink and config_dir are relative to the bindist root
+ // directory.
+ //
+ struct bindist_config
+ {
+ dir_path distribution; // debian, fedora, archive
+ dir_path os_release; // fedora37, windows10
+ path symlink; // .../x86_64, .../x86_64-release
+ dir_path directory; // .../x86_64-2023-05-11T10:13:43Z
+
+ bool
+ operator< (const bindist_config& v)
+ {
+ if (int r = distribution.compare (v.distribution))
+ return distribution.string () == "archive" ? false :
+ v.distribution.string () == "archive" ? true :
+ r < 0;
+
+ if (int r = os_release.compare (v.os_release))
+ return r < 0;
+
+ return symlink < v.symlink;
+ }
+ };
+
+ vector<bindist_config> configs;
+
+ if (dir_exists (d))
+ try
+ {
+ for (const dir_entry& de: dir_iterator (d, dir_iterator::ignore_dangling))
+ {
+ if (de.type () != entry_type::directory)
+ continue;
+
+ // Distribution directory.
+ //
+ dir_path dd (path_cast<dir_path> (de.path ()));
+
+ try
+ {
+ dir_path fdd (d / dd);
+
+ for (const dir_entry& re:
+ dir_iterator (fdd, dir_iterator::ignore_dangling))
+ {
+ if (re.type () != entry_type::directory)
+ continue;
+
+ // OS release directory.
+ //
+ dir_path rd (path_cast<dir_path> (re.path ()));
+
+ // Package version directory.
+ //
+ dir_path vd (fdd /
+ rd /
+ dir_path (pkg->project.string ()) /
+ dir_path (pn.string ()) /
+ dir_path (sver));
+
+ try
+ {
+ for (const dir_entry& ce:
+ dir_iterator (vd, dir_iterator::ignore_dangling))
+ {
+ if (ce.ltype () != entry_type::symlink)
+ continue;
+
+ // Skip the "hidden" symlinks which may potentially be used by
+ // the upload handlers until they expose the finalized upload
+ // directory.
+ //
+ const path& cl (ce.path ());
+ if (cl.string () [0] == '.')
+ continue;
+
+ try
+ {
+ path fcl (vd / cl);
+ dir_path cd (path_cast<dir_path> (followsymlink (fcl)));
+
+ if (cd.sub (br))
+ configs.push_back (
+ bindist_config {dd, rd, fcl.leaf (br), cd.leaf (br)});
+ }
+ catch (const system_error&) {}
+ }
+ }
+ catch (const system_error&) {}
+ }
+ }
+ catch (const system_error&) {}
+ }
+ }
+ catch (const system_error&) {}
+
+ // Sort and print collected package configurations, if any.
+ //
+ if (!configs.empty ())
+ {
+ sort (configs.begin (), configs.end ());
+
+ s << H3 << "Binaries" << ~H3
+ << TABLE(ID="binaries")
+ << TBODY;
+
+ for (const bindist_config& c: configs)
+ {
+ s << TR(CLASS="binaries")
+ << TD << SPAN(CLASS="value") << c.distribution << ~SPAN << ~TD
+ << TD << SPAN(CLASS="value") << c.os_release << ~SPAN << ~TD
+ << TD
+ << SPAN(CLASS="value")
+ << A
+ << HREF
+ << options_->bindist_url () << '/' << c.symlink
+ << ~HREF
+ << c.symlink.leaf ()
+ << ~A
+ << " ("
+ << A
+ << HREF
+ << options_->bindist_url () << '/' << c.directory
+ << ~HREF
+ << "snapshot"
+ << ~A
+ << ")"
+ << ~SPAN
+ << ~TD
+ << ~TR;
+ }
+
+ s << ~TBODY
+ << ~TABLE;
+ }
+ }
+
+ if (builds)
+ {
s << H3 << "Builds" << ~H3
<< DIV(ID="builds");
- auto exclude = [&pkg, this] (const build_config& cfg,
- string* reason = nullptr)
+ auto exclude = [&pkg, this] (const build_package_config& pc,
+ const build_target_config& tc,
+ string* rs = nullptr)
{
- return this->exclude (pkg->builds, pkg->build_constraints, cfg, reason);
+ return this->exclude (pc, pkg->builds, pkg->build_constraints, tc, rs);
};
timestamp now (system_clock::now ());
@@ -498,13 +742,7 @@ handle (request& rq, response& rs)
// Query toolchains seen for the package tenant to produce a list of the
// unbuilt configuration/toolchain combinations.
//
- // Note that it only make sense to print those unbuilt configurations that
- // may still be built. That's why we leave the toolchains list empty if
- // the package tenant is achieved.
- //
vector<pair<string, version>> toolchains;
-
- if (!archived)
{
using query = query<toolchain>;
@@ -515,49 +753,73 @@ handle (request& rq, response& rs)
"ORDER BY" + query::build::id.toolchain_name +
order_by_version_desc (query::build::id.toolchain_version,
false /* first */)))
+ {
toolchains.emplace_back (move (t.name), move (t.version));
+ }
}
- // Collect configuration names and unbuilt configurations, skipping those
- // that are hidden or excluded by the package.
+ // Compose the configuration filtering sub-query and collect unbuilt
+ // target configurations, skipping those that are hidden or excluded by
+ // the package configurations.
//
- cstrings conf_names;
+ using query = query<build>;
+
+ query sq (false);
set<config_toolchain> unbuilt_configs;
- for (const auto& c: *build_conf_map_)
+ for (const build_package_config& pc: pkg->build_configs)
{
- const build_config& cfg (*c.second);
-
- if (belongs (cfg, "all") && !exclude (cfg))
+ for (const auto& bc: *target_conf_map_)
{
- conf_names.push_back (c.first);
+ const build_target_config& tc (*bc.second);
- // Note: we will erase built configurations from the unbuilt
- // configurations set later (see below).
- //
- for (const auto& t: toolchains)
- unbuilt_configs.insert ({cfg.name, t.first, t.second});
+ if (!belongs (tc, "hidden") && !exclude (pc, tc))
+ {
+ const build_target_config_id& id (bc.first);
+
+ sq = sq || (query::id.target == id.target &&
+ query::id.target_config_name == id.config &&
+ query::id.package_config_name == pc.name);
+
+ // Note: we will erase built configurations from the unbuilt
+ // configurations set later (see below).
+ //
+ for (const auto& t: toolchains)
+ unbuilt_configs.insert (config_toolchain {tc.target,
+ tc.name,
+ pc.name,
+ t.first,
+ t.second});
+ }
}
}
- // Print the package built configurations in the time-descending order.
+ // Let's not print the package configuration row if the default
+ // configuration is the only one.
//
- using query = query<build>;
+ bool ppc (pkg->build_configs.size () != 1); // Note: can't be empty.
+ // Print the package built configurations in the time-descending order.
+ //
for (auto& b: build_db_->query<build> (
- (query::id.package == pkg->id &&
-
- query::id.configuration.in_range (conf_names.begin (),
- conf_names.end ())) +
-
+ (query::id.package == pkg->id && query::state != "queued" && sq) +
"ORDER BY" + query::timestamp + "DESC"))
{
string ts (butl::to_string (b.timestamp,
"%Y-%m-%d %H:%M:%S %Z",
true /* special */,
true /* local */) +
- " (" + butl::to_string (now - b.timestamp, false) + " ago)");
+ " (" + butl::to_string (now - b.timestamp, false) + " ago");
+
+ if (tn->archived)
+ ts += ", archived";
+ ts += ')';
+
+ // @@ Note that here we also load result logs which we don't need.
+ // Probably we should invent some table view to only load operation
+ // names and statuses.
+ //
if (b.state == build_state::built)
build_db_->load (b, b.results_section);
@@ -566,19 +828,29 @@ handle (request& rq, response& rs)
<< TR_VALUE ("toolchain",
b.toolchain_name + '-' +
b.toolchain_version.string ())
- << TR_VALUE ("config",
- b.configuration + " / " + b.target.string ())
- << TR_VALUE ("timestamp", ts)
- << TR_BUILD_RESULT (b, host, root)
+ << TR_VALUE ("target", b.target.string ())
+ << TR_VALUE ("tgt config", b.target_config_name);
+
+ if (ppc)
+ s << TR_VALUE ("pkg config", b.package_config_name);
+
+ s << TR_VALUE ("timestamp", ts);
+
+ if (b.interactive) // Note: can only be present for the building state.
+ s << TR_VALUE ("login", *b.interactive);
+
+ s << TR_BUILD_RESULT (b, tn->archived, host, root)
<< ~TBODY
<< ~TABLE;
// While at it, erase the built configuration from the unbuilt
// configurations set.
//
- unbuilt_configs.erase ({b.id.configuration,
- b.toolchain_name,
- b.toolchain_version});
+ unbuilt_configs.erase (config_toolchain {b.target,
+ b.target_config_name,
+ b.package_config_name,
+ b.toolchain_name,
+ b.toolchain_version});
}
// Print the package unbuilt configurations with the following sort
@@ -586,42 +858,57 @@ handle (request& rq, response& rs)
//
// 1: toolchain name
// 2: toolchain version (descending)
- // 3: configuration name
+ // 3: target
+ // 4: target configuration name
+ // 5: package configuration name
//
for (const auto& ct: unbuilt_configs)
{
- auto i (build_conf_map_->find (ct.configuration.c_str ()));
- assert (i != build_conf_map_->end ());
-
s << TABLE(CLASS="proplist build")
<< TBODY
<< TR_VALUE ("toolchain",
ct.toolchain_name + '-' +
ct.toolchain_version.string ())
- << TR_VALUE ("config",
- ct.configuration + " / " +
- i->second->target.string ())
- << TR_VALUE ("result", "unbuilt")
+ << TR_VALUE ("target", ct.target.string ())
+ << TR_VALUE ("tgt config", ct.target_config);
+
+ if (ppc)
+ s << TR_VALUE ("pkg config", ct.package_config);
+
+ s << TR_VALUE ("result", "unbuilt")
<< ~TBODY
<< ~TABLE;
}
- // Print the package build exclusions that belong to the 'default' class.
+ // Print the package build exclusions that belong to the 'default' class,
+ // unless the package is built interactively (normally for a single
+ // configuration).
//
- for (const auto& c: *build_conf_)
+ if (!tn->interactive)
{
- string reason;
- if (belongs (c, "default") && exclude (c, &reason))
+ for (const build_package_config& pc: pkg->build_configs)
{
- s << TABLE(CLASS="proplist build")
- << TBODY
- << TR_VALUE ("config", c.name + " / " + c.target.string ())
- << TR_VALUE ("result",
- !reason.empty ()
- ? "excluded (" + reason + ')'
- : "excluded")
- << ~TBODY
- << ~TABLE;
+ for (const auto& tc: *target_conf_)
+ {
+ string reason;
+ if (belongs (tc, "default") && exclude (pc, tc, &reason))
+ {
+ s << TABLE(CLASS="proplist build")
+ << TBODY
+ << TR_VALUE ("target", tc.target.string ())
+ << TR_VALUE ("tgt config", tc.name);
+
+ if (ppc)
+ s << TR_VALUE ("pkg config", pc.name);
+
+ s << TR_VALUE ("result",
+ !reason.empty ()
+ ? "excluded (" + reason + ')'
+ : "excluded")
+ << ~TBODY
+ << ~TABLE;
+ }
+ }
}
}
@@ -630,19 +917,25 @@ handle (request& rq, response& rs)
s << ~DIV;
}
- const string& ch (pkg->changes);
-
- if (!ch.empty ())
+ if (const optional<typed_text>& c = pkg->changes)
{
const string id ("changes");
+ const string what (title + " changes");
s << H3 << "Changes" << ~H3
<< (full
- ? PRE_TEXT (ch, id)
- : PRE_TEXT (ch,
+ ? DIV_TEXT (*c,
+ false /* strip_title */,
+ id,
+ what,
+ error)
+ : DIV_TEXT (*c,
+ false /* strip_title */,
options_->package_changes (),
- url (!full, "changes"),
- id));
+ url (!full, id),
+ id,
+ what,
+ error));
}
s << ~DIV
diff --git a/mod/mod-packages.cxx b/mod/mod-packages.cxx
index 65c7c5b..6026024 100644
--- a/mod/mod-packages.cxx
+++ b/mod/mod-packages.cxx
@@ -49,8 +49,8 @@ init (scanner& s)
options_->root (dir_path ("/"));
// Check that the database 'package' schema matches the current one. It's
- // enough to perform the check in just a single module implementation (and we
- // don't do in the dispatcher because it doesn't use the database).
+ // enough to perform the check in just a single module implementation (and
+ // we don't do in the dispatcher because it doesn't use the database).
//
// Note that the failure can be reported by each web server worker process.
// While it could be tempting to move the check to the
@@ -136,8 +136,18 @@ handle (request& rq, response& rs)
<< DIV_HEADER (options_->logo (), options_->menu (), root, tenant)
<< DIV(ID="content");
+ // On the first page print the search page description, if specified.
+ //
+ if (page == 0)
+ {
+ const web::xhtml::fragment& desc (options_->search_description ());
+
+ if (!desc.empty ())
+ s << DIV(ID="search-description") << desc << ~DIV;
+ }
+
// If the tenant is empty then we are in the global view and will display
- // packages from all the tenants.
+ // packages from all the public tenants.
//
optional<string> tn;
if (!tenant.empty ())
@@ -146,7 +156,7 @@ handle (request& rq, response& rs)
session sn;
transaction t (package_db_->begin ());
- auto pkg_count (
+ size_t pkg_count (
package_db_->query_value<latest_package_count> (
search_param<latest_package_count> (squery, tn)));
@@ -167,11 +177,10 @@ handle (request& rq, response& rs)
s << TABLE(CLASS="proplist package")
<< TBODY
- << TR_NAME (p->name, equery, root, p->tenant)
+ << TR_NAME (p->name, root, p->tenant)
<< TR_SUMMARY (p->summary)
<< TR_LICENSE (p->license_alternatives)
- << TR_DEPENDS (p->dependencies, root, p->tenant)
- << TR_REQUIRES (p->requirements);
+ << TR_DEPENDS (p->dependencies, root, p->tenant);
// In the global view mode add the tenant packages link. Note that the
// global view (and the link) makes sense only in the multi-tenant mode.
diff --git a/mod/mod-repository-details.cxx b/mod/mod-repository-details.cxx
index 813b738..082903b 100644
--- a/mod/mod-repository-details.cxx
+++ b/mod/mod-repository-details.cxx
@@ -3,14 +3,12 @@
#include <mod/mod-repository-details.hxx>
-#include <algorithm> // max()
-
#include <libstudxml/serializer.hxx>
#include <odb/database.hxx>
#include <odb/transaction.hxx>
-#include <libbutl/timestamp.mxx> // to_string()
+#include <libbutl/timestamp.hxx> // to_string()
#include <web/server/module.hxx>
#include <web/server/mime-url-encoding.hxx>
@@ -100,7 +98,7 @@ handle (request& rq, response& rs)
//
string id (html_id (r.canonical_name));
s << H1(ID=id)
- << A(HREF="#" + web::mime_url_encode (id, false))
+ << A(HREF='#' + web::mime_url_encode (id, false))
<< r.display_name
<< ~A
<< ~H1;
diff --git a/mod/mod-repository-root.cxx b/mod/mod-repository-root.cxx
index 02d6c93..34b4007 100644
--- a/mod/mod-repository-root.cxx
+++ b/mod/mod-repository-root.cxx
@@ -8,7 +8,6 @@
#include <cmark-gfm-core-extensions.h>
#include <sstream>
-#include <algorithm> // find()
#include <web/server/module.hxx>
@@ -17,6 +16,7 @@
#include <mod/mod-ci.hxx>
#include <mod/mod-submit.hxx>
+#include <mod/mod-upload.hxx>
#include <mod/mod-builds.hxx>
#include <mod/mod-packages.hxx>
#include <mod/mod-build-log.hxx>
@@ -108,24 +108,42 @@ namespace brep
//
repository_root::
repository_root ()
- : packages_ (make_shared<packages> ()),
+ :
+ //
+ // Only create and populate the tenant service map in the examplar
+ // passing a reference to it to all the sub-handler exemplars. Note
+ // that we dispatch the tenant service callbacks to the examplar
+ // without creating a new instance for each callback (thus the
+ // callbacks are const).
+ //
+ tenant_service_map_ (make_shared<tenant_service_map> ()),
+ packages_ (make_shared<packages> ()),
package_details_ (make_shared<package_details> ()),
package_version_details_ (make_shared<package_version_details> ()),
repository_details_ (make_shared<repository_details> ()),
- build_task_ (make_shared<build_task> ()),
- build_result_ (make_shared<build_result> ()),
- build_force_ (make_shared<build_force> ()),
+ build_task_ (make_shared<build_task> (*tenant_service_map_)),
+ build_result_ (make_shared<build_result> (*tenant_service_map_)),
+ build_force_ (make_shared<build_force> (*tenant_service_map_)),
build_log_ (make_shared<build_log> ()),
builds_ (make_shared<builds> ()),
build_configs_ (make_shared<build_configs> ()),
submit_ (make_shared<submit> ()),
- ci_ (make_shared<ci> ())
+#ifdef BREP_CI_TENANT_SERVICE
+ ci_ (make_shared<ci> (*tenant_service_map_)),
+#else
+ ci_ (make_shared<ci> ()),
+#endif
+ upload_ (make_shared<upload> ())
{
}
repository_root::
repository_root (const repository_root& r)
: handler (r),
+ tenant_service_map_ (
+ r.initialized_
+ ? r.tenant_service_map_
+ : make_shared<tenant_service_map> ()),
//
// Deep/shallow-copy sub-handlers depending on whether this is an
// exemplar/handler.
@@ -150,15 +168,15 @@ namespace brep
build_task_ (
r.initialized_
? r.build_task_
- : make_shared<build_task> (*r.build_task_)),
+ : make_shared<build_task> (*r.build_task_, *tenant_service_map_)),
build_result_ (
r.initialized_
? r.build_result_
- : make_shared<build_result> (*r.build_result_)),
+ : make_shared<build_result> (*r.build_result_, *tenant_service_map_)),
build_force_ (
r.initialized_
? r.build_force_
- : make_shared<build_force> (*r.build_force_)),
+ : make_shared<build_force> (*r.build_force_, *tenant_service_map_)),
build_log_ (
r.initialized_
? r.build_log_
@@ -178,7 +196,15 @@ namespace brep
ci_ (
r.initialized_
? r.ci_
+#ifdef BREP_CI_TENANT_SERVICE
+ : make_shared<ci> (*r.ci_, *tenant_service_map_)),
+#else
: make_shared<ci> (*r.ci_)),
+#endif
+ upload_ (
+ r.initialized_
+ ? r.upload_
+ : make_shared<upload> (*r.upload_)),
options_ (
r.initialized_
? r.options_
@@ -205,6 +231,7 @@ namespace brep
append (r, build_configs_->options ());
append (r, submit_->options ());
append (r, ci_->options ());
+ append (r, upload_->options ());
return r;
}
@@ -250,6 +277,7 @@ namespace brep
sub_init (*build_configs_, "build_configs");
sub_init (*submit_, "submit");
sub_init (*ci_, "ci");
+ sub_init (*upload_, "upload");
// Parse own configuration options.
//
@@ -445,6 +473,13 @@ namespace brep
return handle ("ci", param);
}
+ else if (func == "upload")
+ {
+ if (handler_ == nullptr)
+ handler_.reset (new upload (*upload_));
+
+ return handle ("upload", param);
+ }
else
return nullopt;
};
diff --git a/mod/mod-repository-root.hxx b/mod/mod-repository-root.hxx
index 9e28797..aa60fda 100644
--- a/mod/mod-repository-root.hxx
+++ b/mod/mod-repository-root.hxx
@@ -9,6 +9,7 @@
#include <mod/module.hxx>
#include <mod/module-options.hxx>
+#include <mod/tenant-service.hxx>
namespace brep
{
@@ -24,6 +25,7 @@ namespace brep
class build_configs;
class submit;
class ci;
+ class upload;
class repository_root: public handler
{
@@ -58,6 +60,8 @@ namespace brep
version ();
private:
+ shared_ptr<tenant_service_map> tenant_service_map_;
+
shared_ptr<packages> packages_;
shared_ptr<package_details> package_details_;
shared_ptr<package_version_details> package_version_details_;
@@ -70,6 +74,7 @@ namespace brep
shared_ptr<build_configs> build_configs_;
shared_ptr<submit> submit_;
shared_ptr<ci> ci_;
+ shared_ptr<upload> upload_;
shared_ptr<options::repository_root> options_;
diff --git a/mod/mod-submit.cxx b/mod/mod-submit.cxx
index 9c93a36..5ee358a 100644
--- a/mod/mod-submit.cxx
+++ b/mod/mod-submit.cxx
@@ -5,14 +5,14 @@
#include <ostream>
-#include <libbutl/sha256.mxx>
-#include <libbutl/sendmail.mxx>
-#include <libbutl/fdstream.mxx>
-#include <libbutl/timestamp.mxx>
-#include <libbutl/filesystem.mxx>
-#include <libbutl/process-io.mxx> // operator<<(ostream, process_args)
-#include <libbutl/manifest-types.mxx>
-#include <libbutl/manifest-serializer.mxx>
+#include <libbutl/sha256.hxx>
+#include <libbutl/sendmail.hxx>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/timestamp.hxx>
+#include <libbutl/filesystem.hxx>
+#include <libbutl/process-io.hxx> // operator<<(ostream, process_args)
+#include <libbutl/manifest-types.hxx>
+#include <libbutl/manifest-serializer.hxx>
#include <web/server/module.hxx>
@@ -163,7 +163,7 @@ handle (request& rq, response& rs)
if (!options_->submit_data_specified ())
return respond_manifest (404, "submission disabled");
- // Parse the request form data and verifying the submission size limit.
+ // Parse the request form data and verify the submission size limit.
//
// Note that if it is exceeded, then there are parameters and this is the
// submission rather than the form request, and so we respond with the
@@ -292,8 +292,8 @@ handle (request& rq, response& rs)
// However, using the abbreviated checksum can be helpful for
// troubleshooting.
//
- td = dir_path (options_->submit_temp () /
- dir_path (path::traits_type::temp_name (ref)));
+ td = options_->submit_temp () /
+ dir_path (path::traits_type::temp_name (ref));
// It's highly unlikely but still possible that the temporary directory
// already exists. This can only happen due to the unclean web server
@@ -553,7 +553,7 @@ handle (request& rq, response& rs)
// Run the submission handler, if specified, reading the result manifest
// from its stdout and caching it as a name/value pair list for later use
- // (forwarding to the client, sending via email, etc.). Otherwise, create
+ // (forwarding to the client, sending via email, etc). Otherwise, create
// implied result manifest.
//
status_code sc;
@@ -683,7 +683,7 @@ handle (request& rq, response& rs)
sendmail sm (print_args,
2 /* stderr */,
options_->email (),
- "new package submission " + a.string () + " (" + ref + ")",
+ "new package submission " + a.string () + " (" + ref + ')',
{options_->submit_email ()});
// Write the submission request manifest.
diff --git a/mod/mod-upload.cxx b/mod/mod-upload.cxx
new file mode 100644
index 0000000..9f8b9de
--- /dev/null
+++ b/mod/mod-upload.cxx
@@ -0,0 +1,763 @@
+// file : mod/mod-upload.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <mod/mod-upload.hxx>
+
+#include <odb/database.hxx>
+#include <odb/transaction.hxx>
+
+#include <libbutl/uuid.hxx>
+#include <libbutl/base64.hxx>
+#include <libbutl/sha256.hxx>
+#include <libbutl/sendmail.hxx>
+#include <libbutl/fdstream.hxx>
+#include <libbutl/timestamp.hxx>
+#include <libbutl/filesystem.hxx>
+#include <libbutl/process-io.hxx> // operator<<(ostream, process_args)
+#include <libbutl/manifest-types.hxx>
+#include <libbutl/manifest-serializer.hxx>
+
+#include <web/server/module.hxx>
+
+#include <libbrep/build.hxx>
+#include <libbrep/build-odb.hxx>
+#include <libbrep/build-package.hxx>
+#include <libbrep/build-package-odb.hxx>
+
+#include <mod/module-options.hxx>
+#include <mod/external-handler.hxx>
+
+using namespace std;
+using namespace butl;
+using namespace brep::cli;
+using namespace odb::core;
+
+// While currently the user-defined copy constructor is not required (we don't
+// need to deep copy nullptr's), it is a good idea to keep the placeholder
+// ready for less trivial cases.
+//
+brep::upload::
+upload (const upload& r)
+ : build_result_module (r),
+ options_ (r.initialized_ ? r.options_ : nullptr)
+{
+}
+
+void brep::upload::
+init (scanner& s)
+{
+ HANDLER_DIAG;
+
+ options_ = make_shared<options::upload> (
+ s, unknown_mode::fail, unknown_mode::fail);
+
+ // Verify that the upload handling is setup properly, if configured.
+ //
+ for (const auto& ud: options_->upload_data ())
+ {
+ const string& t (ud.first);
+
+ if (t.empty ())
+ fail << "empty upload type in upload-data configuration option";
+
+ if (ud.second.relative ())
+ fail << t << " upload-data path '" << ud.second << "' is relative";
+
+ if (!dir_exists (ud.second))
+ fail << t << " upload-data directory '" << ud.second
+ << "' does not exist";
+
+ const map<string, path>& uh (options_->upload_handler ());
+ auto i (uh.find (t));
+
+ if (i != uh.end () && i->second.relative ())
+ fail << t << " upload-handler path '" << i->second << "' is relative";
+ }
+
+ if (options_->upload_data_specified ())
+ {
+ if (!options_->build_config_specified ())
+ fail << "upload functionality is enabled but package building "
+ << "functionality is disabled";
+
+ build_result_module::init (*options_, *options_);
+ }
+}
+
+bool brep::upload::
+handle (request& rq, response& rs)
+{
+ using brep::version; // Not to confuse with module::version.
+
+ using serializer = manifest_serializer;
+ using serialization = manifest_serialization;
+
+ HANDLER_DIAG;
+
+ // We will respond with the manifest to the upload protocol violations and
+ // with a plain text message on the internal errors. In the latter case we
+ // will always respond with the same neutral message for security reason,
+ // logging the error details. Note that descriptions of exceptions caught by
+ // the web server are returned to the client (see web/module.hxx for
+ // details), and we want to avoid this when there is a danger of exposing
+ // sensitive data.
+ //
+ // Also we will pass through exceptions thrown by the underlying API, unless
+ // we need to handle them or add details for the description, in which case
+ // we will fallback to one of the above mentioned response methods.
+ //
+ // Note that both respond_manifest() and respond_error() are normally called
+ // right before the end of the request handling. They both always return
+ // true to allow bailing out with a single line, for example:
+ //
+ // return respond_error (); // Request is handled with an error.
+ //
+ string request_id; // Will be set later.
+ auto respond_manifest = [&rs, &request_id] (status_code status,
+ const string& message) -> bool
+ {
+ serializer s (rs.content (status, "text/manifest;charset=utf-8"),
+ "response");
+
+ s.next ("", "1"); // Start of manifest.
+ s.next ("status", to_string (status));
+ s.next ("message", message);
+
+ if (!request_id.empty ())
+ s.next ("reference", request_id);
+
+ s.next ("", ""); // End of manifest.
+ return true;
+ };
+
+ auto respond_error = [&rs] (status_code status = 500) -> bool
+ {
+ rs.content (status, "text/plain;charset=utf-8")
+ << "upload handling failed" << endl;
+
+ return true;
+ };
+
+ // Check if the upload functionality is enabled.
+ //
+ // Note that this is not an upload protocol violation but it feels right to
+ // respond with the manifest, to help the client a bit.
+ //
+ if (!options_->upload_data_specified ())
+ return respond_manifest (404, "upload disabled");
+
+ // Parse the request data and verify the upload size limit.
+ //
+ // Note that the size limit is upload type-specific. Thus, first, we need to
+ // determine the upload type which we expect to be specified in the URL as a
+ // value of the upload parameter.
+ //
+ string type;
+ dir_path dir;
+
+ try
+ {
+ name_value_scanner s (rq.parameters (0 /* limit */, true /* url_only */));
+
+ // We only expect the upload=<type> parameter in URL.
+ //
+ params::upload params (
+ params::upload (s, unknown_mode::fail, unknown_mode::fail));
+
+ type = move (params.type ());
+
+ if (type.empty ())
+ return respond_manifest (400, "upload type expected");
+
+ // Check if this upload type is enabled. While at it, cache the upload
+ // data directory path.
+ //
+ const map<string, dir_path>& ud (options_->upload_data ());
+ auto i (ud.find (type));
+
+ if (i == ud.end ())
+ return respond_manifest (404, type + " upload disabled");
+
+ dir = i->second;
+ }
+ catch (const cli::exception&)
+ {
+ return respond_manifest (400, "invalid parameter");
+ }
+
+ try
+ {
+ const map<string, size_t>& us (options_->upload_max_size ());
+ auto i (us.find (type));
+ rq.parameters (i != us.end () ? i->second : 10485760); // 10M by default.
+ }
+ catch (const invalid_request& e)
+ {
+ if (e.status == 413) // Payload too large?
+ return respond_manifest (e.status, type + " upload size exceeds limit");
+
+ throw;
+ }
+
+ // The request parameters are now parsed and the limit doesn't really matter.
+ //
+ const name_values& rps (rq.parameters (0 /* limit */));
+
+ // Verify the upload parameters we expect. The unknown ones will be
+ // serialized to the upload manifest.
+ //
+ params::upload params;
+
+ try
+ {
+ name_value_scanner s (rps);
+ params = params::upload (s, unknown_mode::skip, unknown_mode::skip);
+ }
+ catch (const cli::exception&)
+ {
+ return respond_manifest (400, "invalid parameter");
+ }
+
+ const string& session (params.session ());
+ const string& instance (params.instance ());
+ const string& archive (params.archive ());
+ const string& sha256sum (params.sha256sum ());
+
+ if (session.empty ())
+ return respond_manifest (400, "upload session expected");
+
+ optional<vector<char>> challenge;
+
+ if (params.challenge_specified ())
+ try
+ {
+ challenge = base64_decode (params.challenge ());
+ }
+ catch (const invalid_argument&)
+ {
+ return respond_manifest (400, "invalid challenge");
+ }
+
+ if (instance.empty ())
+ return respond_manifest (400, "upload instance expected");
+
+ if (archive.empty ())
+ return respond_manifest (400, "upload archive expected");
+
+ if (sha256sum.empty ())
+ return respond_manifest (400, "upload archive checksum expected");
+
+ if (sha256sum.size () != 64)
+ return respond_manifest (400, "invalid upload archive checksum");
+
+ // Verify that unknown parameter values satisfy the requirements (contain
+ // only UTF-8 encoded graphic characters plus '\t', '\r', and '\n').
+ //
+ // Actually, the expected ones must satisfy too, so check them as well.
+ //
+ string what;
+ for (const name_value& nv: rps)
+ {
+ if (nv.value &&
+ !utf8 (*nv.value, what, codepoint_types::graphic, U"\n\r\t"))
+ return respond_manifest (400,
+ "invalid parameter " + nv.name + ": " + what);
+ }
+
+ parse_session_result sess;
+
+ try
+ {
+ sess = parse_session (session);
+ }
+ catch (const invalid_argument& e)
+ {
+ return respond_manifest (400, string ("invalid session: ") + e.what ());
+ }
+
+ // If the session expired (no such configuration, etc) then, similar to the
+ // build result module, we log this case with the warning severity and
+ // respond with manifest with the 200 status as if the session is valid (see
+ // the build result module for the reasoning).
+ //
+ auto warn_expired = [&session, &warn] (const string& d)
+ {
+ warn << "session '" << session << "' expired: " << d;
+ };
+
+ const build_id& id (sess.id);
+
+ // Make sure the build configuration still exists.
+ //
+ const build_target_config* tc;
+ {
+ auto i (target_conf_map_->find (
+ build_target_config_id {id.target, id.target_config_name}));
+
+ if (i == target_conf_map_->end ())
+ {
+ warn_expired ("no build configuration");
+ return respond_manifest (200, type + " upload is queued");
+ }
+
+ tc = i->second;
+ }
+
+ // Note that if the session authentication fails (probably due to the
+ // authentication settings change), then we log this case with the warning
+ // severity and respond with manifest with the 200 status as if the
+ // challenge is valid (see the build result module for the reasoning).
+ //
+ shared_ptr<build> bld;
+ shared_ptr<build_package> pkg;
+ shared_ptr<build_repository> rep;
+ {
+ transaction t (build_db_->begin ());
+
+ package_build pb;
+ shared_ptr<build> b;
+ if (!build_db_->query_one<package_build> (
+ query<package_build>::build::id == id, pb))
+ {
+ warn_expired ("no package build");
+ }
+ else if ((b = move (pb.build))->state != build_state::building)
+ {
+ warn_expired ("package configuration state is " + to_string (b->state));
+ }
+ else if (b->timestamp != sess.timestamp)
+ {
+ warn_expired ("non-matching timestamp");
+ }
+ else if (authenticate_session (*options_, challenge, *b, session))
+ {
+ bld = move (b);
+ pkg = build_db_->load<build_package> (id.package);
+ rep = pkg->internal_repository.load ();
+ }
+
+ t.commit ();
+ }
+
+ // Note that from now on the result manifest we respond with will contain
+ // the reference value.
+ //
+ try
+ {
+ request_id = uuid::generate ().string ();
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to generate request id: " << e;
+ return respond_error ();
+ }
+
+ if (bld == nullptr)
+ return respond_manifest (200, type + " upload is queued");
+
+ // Create the upload data directory.
+ //
+ dir_path dd (dir / dir_path (request_id));
+
+ try
+ {
+ // It's highly unlikely but still possible that the directory already
+ // exists. This can only happen if the generated uuid is not unique.
+ //
+ if (try_mkdir (dd) == mkdir_status::already_exists)
+ throw_generic_error (EEXIST);
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to create directory '" << dd << "': " << e;
+ return respond_error ();
+ }
+
+ auto_rmdir ddr (dd);
+
+ // Save the package archive into the temporary directory and verify its
+ // checksum.
+ //
+ // Note that the archive file name can potentially contain directory path in
+ // the POSIX form, so let's strip it if that's the case.
+ //
+ path a;
+ path af;
+
+ try
+ {
+ size_t n (archive.find_last_of ('/'));
+ a = path (n != string::npos ? string (archive, n + 1) : archive);
+ af = dd / a;
+ }
+ catch (const invalid_path&)
+ {
+ return respond_manifest (400, "invalid package archive name");
+ }
+
+ try
+ {
+ istream& is (rq.open_upload ("archive"));
+
+ // Note that istream::read() sets failbit if unable to read the requested
+ // number of bytes.
+ //
+ is.exceptions (istream::badbit);
+
+ sha256 sha;
+ char buf[8192];
+ ofdstream os (af, fdopen_mode::binary);
+
+ while (!eof (is))
+ {
+ is.read (buf, sizeof (buf));
+
+ if (size_t n = is.gcount ())
+ {
+ sha.append (buf, n);
+ os.write (buf, n);
+ }
+ }
+
+ os.close ();
+
+ // Respond with the unprocessable entity (422) code for the archive
+ // checksum mismatch.
+ //
+ if (sha.string () != sha256sum)
+ return respond_manifest (422, "upload archive checksum mismatch");
+ }
+ // Note that invalid_argument (thrown by open_upload() function call) can
+ // mean both no archive upload or multiple archive uploads.
+ //
+ catch (const invalid_argument&)
+ {
+ return respond_manifest (400, "archive upload expected");
+ }
+ catch (const io_error& e)
+ {
+ error << "unable to write package archive '" << af << "': " << e;
+ return respond_error ();
+ }
+
+ // Serialize the upload request manifest to a stream. On the serialization
+ // error respond to the client with the manifest containing the bad request
+ // (400) code and return false, on the stream error pass through the
+ // io_error exception, otherwise return true.
+ //
+ timestamp ts (system_clock::now ());
+
+ auto rqm = [&request_id,
+ &ts,
+ &rps,
+ &session,
+ &instance,
+ &a,
+ &sha256sum,
+ &id,
+ &bld,
+ &pkg,
+ &rep,
+ &tc,
+ &sess,
+ &respond_manifest,
+ this] (ostream& os, bool long_lines = false) -> bool
+ {
+ try
+ {
+ serializer s (os, "request", long_lines);
+
+ // Serialize the upload manifest header.
+ //
+ s.next ("", "1"); // Start of manifest.
+ s.next ("id", request_id);
+ s.next ("session", session);
+ s.next ("instance", instance);
+ s.next ("archive", a.string ());
+ s.next ("sha256sum", sha256sum);
+
+ s.next ("timestamp",
+ butl::to_string (ts,
+ "%Y-%m-%dT%H:%M:%SZ",
+ false /* special */,
+ false /* local */));
+
+ s.next ("name", id.package.name.string ());
+ s.next ("version", pkg->version.string ());
+ s.next ("project", pkg->project.string ());
+ s.next ("target-config", tc->name);
+ s.next ("package-config", id.package_config_name);
+ s.next ("target", tc->target.string ());
+
+ if (!tenant.empty ())
+ s.next ("tenant", tenant);
+
+ s.next ("toolchain-name", id.toolchain_name);
+ s.next ("toolchain-version", sess.toolchain_version.string ());
+ s.next ("repository-name", rep->canonical_name);
+
+ s.next ("machine-name", bld->machine.name);
+ s.next ("machine-summary", bld->machine.summary);
+
+ // Serialize the request parameters.
+ //
+ // Note that the serializer constraints the parameter names (can't start
+ // with '#', can't contain ':' and the whitespaces, etc.).
+ //
+ for (const name_value& nv: rps)
+ {
+ // Note that the upload parameter is renamed to '_' by the root
+ // handler (see the request_proxy class for details).
+ //
+ const string& n (nv.name);
+ if (n != "_" &&
+ n != "session" &&
+ n != "challenge" &&
+ n != "instance" &&
+ n != "archive" &&
+ n != "sha256sum")
+ s.next (n, nv.value ? *nv.value : "");
+ }
+
+ s.next ("", ""); // End of manifest.
+ return true;
+ }
+ catch (const serialization& e)
+ {
+ respond_manifest (400, string ("invalid parameter: ") + e.what ());
+ return false;
+ }
+ };
+
+ // Serialize the upload request manifest to the upload directory.
+ //
+ path rqf (dd / "request.manifest");
+
+ try
+ {
+ ofdstream os (rqf);
+ bool r (rqm (os));
+ os.close ();
+
+ if (!r)
+ return true; // The client is already responded with the manifest.
+ }
+ catch (const io_error& e)
+ {
+ error << "unable to write to '" << rqf << "': " << e;
+ return respond_error ();
+ }
+
+ // Given that the upload data is now successfully persisted we are no longer
+ // in charge of removing it, except for the cases when the upload
+ // handler terminates with an error (see below for details).
+ //
+ ddr.cancel ();
+
+ // If the handler terminates with non-zero exit status or specifies 5XX
+ // (HTTP server error) upload result manifest status value, then we stash
+ // the upload data directory for troubleshooting. Otherwise, if it's the 4XX
+ // (HTTP client error) status value, then we remove the directory.
+ //
+ auto stash_upload_dir = [&dd, error] ()
+ {
+ if (dir_exists (dd))
+ try
+ {
+ mvdir (dd, dir_path (dd + ".fail"));
+ }
+ catch (const system_error& e)
+ {
+ // Not much we can do here. Let's just log the issue and bail out
+ // leaving the directory in place.
+ //
+ error << "unable to rename directory '" << dd << "': " << e;
+ }
+ };
+
+ // Run the upload handler, if specified, reading the result manifest from
+ // its stdout and caching it as a name/value pair list for later use
+ // (forwarding to the client, sending via email, etc). Otherwise, create
+ // implied result manifest.
+ //
+ status_code sc;
+ vector<manifest_name_value> rvs;
+
+ const map<string, path>& uh (options_->upload_handler ());
+ auto hi (uh.find (type));
+
+ if (hi != uh.end ())
+ {
+ auto range (options_->upload_handler_argument ().equal_range (type));
+
+ strings args;
+ for (auto i (range.first); i != range.second; ++i)
+ args.push_back (i->second);
+
+ const map<string, size_t>& ht (options_->upload_handler_timeout ());
+ auto i (ht.find (type));
+
+ optional<external_handler::result_manifest> r (
+ external_handler::run (hi->second,
+ args,
+ dd,
+ i != ht.end () ? i->second : 0,
+ error,
+ warn,
+ verb_ ? &trace : nullptr));
+
+ if (!r)
+ {
+ stash_upload_dir ();
+ return respond_error (); // The diagnostics is already issued.
+ }
+
+ sc = r->status;
+ rvs = move (r->values);
+ }
+ else // Create the implied result manifest.
+ {
+ sc = 200;
+
+ auto add = [&rvs] (string n, string v)
+ {
+ manifest_name_value nv {
+ move (n), move (v),
+ 0 /* name_line */, 0 /* name_column */,
+ 0 /* value_line */, 0 /* value_column */,
+ 0 /* start_pos */, 0 /* colon_pos */, 0 /* end_pos */};
+
+ rvs.emplace_back (move (nv));
+ };
+
+ add ("status", "200");
+ add ("message", type + " upload is queued");
+ add ("reference", request_id);
+ }
+
+ assert (!rvs.empty ()); // Produced by the handler or is implied.
+
+ // Serialize the upload result manifest to a stream. On the serialization
+ // error log the error description and return false, on the stream error
+ // pass through the io_error exception, otherwise return true.
+ //
+ auto rsm = [&rvs,
+ &error,
+ &request_id,
+ &type] (ostream& os, bool long_lines = false) -> bool
+ {
+ try
+ {
+ serializer s (os, "result", long_lines);
+ serialize_manifest (s, rvs);
+ return true;
+ }
+ catch (const serialization& e)
+ {
+ error << "ref " << request_id << ": unable to serialize " << type
+ << " upload handler's output: " << e;
+ return false;
+ }
+ };
+
+ // If the upload data directory still exists then perform an appropriate
+ // action on it, depending on the upload result status. Note that the
+ // handler could move or remove the directory.
+ //
+ if (dir_exists (dd))
+ {
+ // Remove the directory if the client error is detected.
+ //
+ if (sc >= 400 && sc < 500)
+ {
+ rmdir_r (dd);
+ }
+ //
+ // Otherwise, save the result manifest, into the directory. Also stash the
+ // directory for troubleshooting in case of the server error.
+ //
+ else
+ {
+ path rsf (dd / "result.manifest");
+
+ try
+ {
+ ofdstream os (rsf);
+
+ // Not being able to stash the result manifest is not a reason to
+ // claim the upload failed. The error is logged nevertheless.
+ //
+ rsm (os);
+
+ os.close ();
+ }
+ catch (const io_error& e)
+ {
+ // Not fatal (see above).
+ //
+ error << "unable to write to '" << rsf << "': " << e;
+ }
+
+ if (sc >= 500 && sc < 600)
+ stash_upload_dir ();
+ }
+ }
+
+ // Send email, if configured. Use the long lines manifest serialization mode
+ // for the convenience of copying/clicking URLs they contain.
+ //
+ // Note that we don't consider the email sending failure to be an upload
+ // failure as the upload data is successfully persisted and the handler is
+ // successfully executed, if configured. One can argue that email can be
+ // essential for the upload processing and missing it would result in the
+ // incomplete upload. In this case it's natural to assume that the web
+ // server error log is monitored and the email sending failure will be
+ // noticed.
+ //
+ const map<string, string>& ue (options_->upload_email ());
+ auto ei (ue.find (type));
+
+ if (ei != ue.end ())
+ try
+ {
+ // Redirect the diagnostics to the web server error log.
+ //
+ sendmail sm ([&trace, this] (const char* args[], size_t n)
+ {
+ l2 ([&]{trace << process_args {args, n};});
+ },
+ 2 /* stderr */,
+ options_->email (),
+ type + " upload (" + request_id + ')',
+ {ei->second});
+
+ // Write the upload request manifest.
+ //
+ bool r (rqm (sm.out, true /* long_lines */));
+ assert (r); // The serialization succeeded once, so can't fail now.
+
+ // Write the upload result manifest.
+ //
+ sm.out << "\n\n";
+
+ // We don't care about the result (see above).
+ //
+ rsm (sm.out, true /* long_lines */);
+
+ sm.out.close ();
+
+ if (!sm.wait ())
+ error << "sendmail " << *sm.exit;
+ }
+ // Handle process_error and io_error (both derive from system_error).
+ //
+ catch (const system_error& e)
+ {
+ error << "sendmail error: " << e;
+ }
+
+ if (!rsm (rs.content (sc, "text/manifest;charset=utf-8")))
+ return respond_error (); // The error description is already logged.
+
+ return true;
+}
diff --git a/mod/mod-upload.hxx b/mod/mod-upload.hxx
new file mode 100644
index 0000000..6cc723b
--- /dev/null
+++ b/mod/mod-upload.hxx
@@ -0,0 +1,41 @@
+// file : mod/mod-upload.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef MOD_MOD_UPLOAD_HXX
+#define MOD_MOD_UPLOAD_HXX
+
+#include <libbrep/types.hxx>
+#include <libbrep/utility.hxx>
+
+#include <mod/module-options.hxx>
+#include <mod/build-result-module.hxx>
+
+namespace brep
+{
+ class upload: public build_result_module
+ {
+ public:
+ upload () = default;
+
+ // Create a shallow copy (handling instance) if initialized and a deep
+ // copy (context exemplar) otherwise.
+ //
+ explicit
+ upload (const upload&);
+
+ virtual bool
+ handle (request&, response&);
+
+ virtual const cli::options&
+ cli_options () const {return options::upload::description ();}
+
+ private:
+ virtual void
+ init (cli::scanner&);
+
+ private:
+ shared_ptr<options::upload> options_;
+ };
+}
+
+#endif // MOD_MOD_UPLOAD_HXX
diff --git a/mod/module.cli b/mod/module.cli
index b59158a..a107ffe 100644
--- a/mod/module.cli
+++ b/mod/module.cli
@@ -1,7 +1,11 @@
// file : mod/options.cli -*- C++ -*-
// license : MIT; see accompanying LICENSE file
+include <map>;
+include <regex>;
+
include <libbpkg/manifest.hxx>; // repository_location
+include <libbbot/manifest.hxx>; // interactive_mode
include <web/xhtml/fragment.hxx>;
@@ -17,7 +21,7 @@ namespace brep
{
// Option groups.
//
- class handler
+ class repository_email
{
string email
{
@@ -25,7 +29,10 @@ namespace brep
"Repository email. This email is used for the \cb{From:} header in
emails send by \cb{brep} (for example, build failure notifications)."
}
+ };
+ class repository_url
+ {
string host
{
"<host>",
@@ -44,7 +51,29 @@ namespace brep
Specify '\cb{/}' to use the web server root
(\cb{http://example.org/})."
}
+ };
+
+ class build_email_notification: repository_email, repository_url
+ {
+ std::map<string, build_email> build-toolchain-email
+ {
+ "<name>=<mode>",
+ "Enable or disable package build notification emails. The valid <mode>
+ values are \cb{none}, \cb{latest}, and \cb{all}. If \cb{all} is
+ specified for a toolchain name, then emails are sent according to the
+ \cb{build-*email} package manifest values when all versions of a
+ package are built with this toolchain. If \cb{latest} is specified,
+ then for this toolchain name the emails are only sent for the latest
+ version of a package. If \cb{none} is specified, then no emails are
+ sent for this toolchain name. By default the \cb{latest} mode is
+ assumed. Repeat this option to enable/disable emails for multiple
+ toolchains. See \l{bpkg#manifest-package Package Manifest} for
+ details on \cb{build-*email} values."
+ }
+ };
+ class handler
+ {
string tenant-name = "tenant"
{
"<name>",
@@ -102,14 +131,14 @@ namespace brep
{
"<user>",
"Package database login user name. If not specified, then operating
- system (login) name is used. See also \c{package-db-role}."
+ system (login) name is used. See also \cb{package-db-role}."
}
string package-db-role = "brep"
{
"<user>",
"Package database execution user name. If not empty then the login
- user will be switched (with \c{SET ROLE}) to this user prior to
+ user will be switched (with \cb{SET ROLE}) to this user prior to
executing any statements. If not specified, then \cb{brep} is used."
}
@@ -193,45 +222,96 @@ namespace brep
be specified in seconds. Default is 10 minutes."
}
- size_t build-normal-rebuild-timeout = 86400
+ size_t build-soft-rebuild-timeout = 86400
{
"<seconds>",
- "Time to wait before considering a package for a normal rebuild. Must
- be specified in seconds. Default is 24 hours."
+ "Time to wait before considering a package for a soft rebuild (only to
+ be performed if the build environment or any of the package
+ dependencies have changed). Must be specified in seconds. The special
+ zero value disables soft rebuilds. Default is 24 hours"
}
- size_t build-alt-rebuild-timeout
+ size_t build-alt-soft-rebuild-timeout
{
"<seconds>",
- "Alternative package rebuild timeout to use instead of the normal
- rebuild timeout (see \cb{build-normal-rebuild-timeout} for details)
+ "Alternative package soft rebuild timeout to use instead of the soft
+ rebuild timeout (see \cb{build-soft-rebuild-timeout} for details)
during the time interval specified with the
- \cb{build-alt-rebuild-start} and \cb{build-alt-rebuild-stop} options.
- Must be specified in seconds. Default is the time interval length."
+ \cb{build-alt-soft-rebuild-start} and
+ \cb{build-alt-soft-rebuild-stop} options. Must be specified in
+ seconds. Default is the time interval length plus
+ \c{(\b{build-soft-rebuild-timeout} - 24h)} if soft rebuild timeout
+ is greater than 24 hours (thus the rebuild is only triggered within
+ the last 24 hours of the \cb{build-soft-rebuild-timeout} expiration)."
}
- duration build-alt-rebuild-start
+ duration build-alt-soft-rebuild-start
{
"<hours>:<minutes>",
- "The start time of the alternative package rebuild timeout (see
- \cb{build-alt-rebuild-timeout} for details). Must be specified as
- a time of day in the local timezone. The \cb{build-alt-rebuild-start}
- and \cb{build-alt-rebuild-stop} options must be either both specified
- or absent. If unspecified, then no alternative rebuild timeout will
- be used."
+ "The start time of the alternative package soft rebuild timeout (see
+ \cb{build-alt-soft-rebuild-timeout} for details). Must be specified
+ as a time of day in the local timezone. The
+ \cb{build-alt-soft-rebuild-start} and
+ \cb{build-alt-soft-rebuild-stop} options must be either both
+ specified or absent. If unspecified, then no alternative rebuild
+ timeout will be used."
}
- duration build-alt-rebuild-stop
+ duration build-alt-soft-rebuild-stop
{
"<hours>:<minutes>",
- "The end time of the alternative package rebuild timeout (see
- \cb{build-alt-rebuild-timeout} for details). Must be specified as
- a time of day in the local timezone. If it is less than the
- \cb{build-alt-rebuild-start} option value, then the time interval
- extends through midnight. The \cb{build-alt-rebuild-start} and
- \cb{build-alt-rebuild-stop} options must be either both specified or
- absent. If unspecified, then no alternative rebuild timeout will be
- used."
+ "The end time of the alternative package soft rebuild timeout (see
+ \cb{build-alt-soft-rebuild-timeout} for details). Must be specified
+ as a time of day in the local timezone. If it is less than the
+ \cb{build-alt-soft-rebuild-start} option value, then the time
+ interval extends through midnight. The
+ \cb{build-alt-soft-rebuild-start} and
+ \cb{build-alt-soft-rebuild-stop} options must be either both
+ specified or absent. If unspecified, then no alternative rebuild
+ timeout will be used."
+ }
+
+ size_t build-hard-rebuild-timeout = 604800
+ {
+ "<seconds>",
+ "Time to wait before considering a package for a hard rebuild (to be
+ performed unconditionally). Must be specified in seconds. The special
+ zero value disables hard rebuilds. Default is 7 days."
+ }
+
+ size_t build-alt-hard-rebuild-timeout
+ {
+ "<seconds>",
+ "Alternative package hard rebuild timeout. The semantics is the
+ same as for the \cb{build-alt-soft-rebuild-timeout} option but
+ for the \cb{build-hard-rebuild-timeout} option."
+ }
+
+ duration build-alt-hard-rebuild-start
+ {
+ "<hours>:<minutes>",
+ "The start time of the alternative package hard rebuild timeout (see
+ \cb{build-alt-hard-rebuild-timeout} for details). The semantics is
+ the same as for the \cb{build-alt-soft-rebuild-start} option but
+ for the \cb{build-hard-rebuild-timeout} option."
+ }
+
+ duration build-alt-hard-rebuild-stop
+ {
+ "<hours>:<minutes>",
+ "The end time of the alternative package hard rebuild timeout (see
+ \cb{build-alt-hard-rebuild-timeout} for details). The semantics is
+ the same as for the \cb{build-alt-soft-rebuild-stop} option but
+ for the \cb{build-hard-rebuild-timeout} option."
+ }
+
+ size_t build-queued-timeout = 30
+ {
+ "<seconds>",
+ "Time to wait before assuming the \cb{queued} notifications are
+ delivered for package CI requests submitted via third-party services
+ (GitHub, etc). During this time a package is not considered for a
+ build. Must be specified in seconds. Default is 30 seconds."
}
};
@@ -241,14 +321,14 @@ namespace brep
{
"<user>",
"Build database login user name. If not specified, then operating
- system (login) name is used. See also \c{build-db-role}."
+ system (login) name is used. See also \cb{build-db-role}."
}
string build-db-role = "brep"
{
"<user>",
"Build database execution user name. If not empty then the login
- user will be switched (with \c{SET ROLE}) to this user prior to
+ user will be switched (with \cb{SET ROLE}) to this user prior to
executing any statements. If not specified, then \cb{brep} is used."
}
@@ -298,6 +378,82 @@ namespace brep
}
};
+ class build_upload
+ {
+ std::map<string, dir_path> upload-data
+ {
+ "<type>=<dir>",
+ "The directory to save upload data to for the specified upload type.
+ If unspecified, the build artifacts upload functionality will be
+ disabled for this type. See \l{brep The \cb{build2} Repository
+ Interface Manual} for more information on build artifacts upload.
+
+ Note that the directory path must be absolute and the directory
+ itself must exist and have read, write, and execute permissions
+ granted to the user that runs the web server."
+ }
+
+ std::map<string, size_t> upload-max-size
+ {
+ "<type>=<bytes>",
+ "The maximum size of the upload data accepted for the specified upload
+ type. Note that currently the entire upload request is read into
+ memory. The default is 10M."
+ }
+
+ std::map<string, string> upload-email
+ {
+ "<type>=<email>",
+ "The build artifacts upload email. If specified, the upload request
+ and result manifests will be sent to this address. See \l{brep The
+ \cb{build2} Repository Interface Manual} for more information."
+ }
+
+ std::map<string, path> upload-handler
+ {
+ "<type>=<path>",
+ "The handler program to be executed on build artifacts upload of the
+ specified type. The handler is executed as part of the HTTP request
+ and is passed additional arguments that can be specified with
+ \cb{upload-handler-argument} followed by the absolute path to the
+ upload directory (\cb{upload-data}). See \l{brep The \cb{build2}
+ Repository Interface Manual} for more information. Note that the
+ program path must be absolute."
+ }
+
+ std::multimap<string, string> upload-handler-argument
+ {
+ "<type>=<arg>",
+ "Additional arguments to be passed to the upload handler program for
+ the specified upload type (see \cb{upload-handler} for details).
+ Repeat this option to specify multiple arguments."
+ }
+
+ std::map<string, size_t> upload-handler-timeout
+ {
+ "<type>=<seconds>",
+ "The upload handler program timeout in seconds for the specified
+ upload type. If specified and the handler does not exit in the
+ allotted time, then it is killed and its termination is treated as
+ abnormal."
+ }
+
+ std::multimap<string, string> upload-toolchain-exclude
+ {
+ "<type>=<name>",
+ "Disable upload of the specified type for the specified toolchain
+ name. Repeat this option to disable uploads for multiple toolchains."
+ }
+
+ std::multimap<string, string> upload-repository-exclude
+ {
+ "<type>=<name>",
+ "Disable upload of the specified type for packages from the repository
+ with the specified canonical name. Repeat this option to disable
+ uploads for multiple repositories."
+ }
+ };
+
class page
{
web::xhtml::fragment logo
@@ -307,7 +463,7 @@ namespace brep
edge. The value is treated as an XHTML5 fragment."
}
- vector<page_menu> menu;
+ vector<page_menu> menu
{
"<label=link>",
"Web page menu. Each entry is displayed in the page header in the
@@ -341,7 +497,7 @@ namespace brep
The default is 500 (~ 80 characters * 6 lines)."
}
- uint16_t package-changes = 5000;
+ uint16_t package-changes = 5000
{
"<len>",
"Number of package changes characters to display in brief pages. The
@@ -352,7 +508,7 @@ namespace brep
// Handler options.
//
- class packages: search, package_db, page, handler
+ class packages: search, package_db, page, repository_url, handler
{
string search-title = "Packages"
{
@@ -360,24 +516,63 @@ namespace brep
"Package search page title. It is placed inside XHTML5 <title>
element."
}
+
+ web::xhtml::fragment search-description
+ {
+ "<xhtml>",
+ "Package search page description. If specified, it is displayed
+ before the search form on the first page only. The value is
+ treated as an XHTML5 fragment."
+ }
};
- class package_details: package, search, package_db, page, handler
+ class package_details: package, package_db,
+ search,
+ page,
+ repository_url,
+ handler
{
};
class package_version_details: package, package_db,
build, build_db,
page,
+ repository_url,
handler
{
+ dir_path bindist-root
+ {
+ "<dir>",
+ "The root directory where the uploaded binary distribution packages
+ are saved to under the following directory hierarchy:
+
+ \
+ [<tenant>/]<distribution>/<os-release>/<project>/<package>/<version>/<package-config>
+ \
+
+ The package configuration directory symlinks that match these paths
+ are mapped to web URLs based on the \cb{bindist-url} value and
+ displayed on the package version details page. If this option is
+ specified, then \cb{bindist-url} must be specified as well."
+ }
+
+ string bindist-url
+ {
+ "<url>",
+ "The root URL of the directory specified with the \cb{bindist-root}
+ option. This option must be specified if \cb{bindist-root} is
+ specified."
+ }
};
- class repository_details: package_db, page, handler
+ class repository_details: package_db, page, repository_url, handler
{
};
- class build_task: build, build_db, handler
+ class build_task: build, build_db,
+ build_upload,
+ build_email_notification,
+ handler
{
size_t build-task-request-max-size = 102400
{
@@ -394,11 +589,35 @@ namespace brep
"Time to wait before considering the expected task result lost. Must be
specified in seconds. The default is 3 hours."
}
+
+ vector<pair<std::regex, string>> build-interactive-login
+ {
+ "</regex/replacement/>",
+ "Regular expressions for transforming the interactive build login
+ information, for example, into the actual command that can be used
+ by the user. The regular expressions are matched against the
+ \"<agent>\ <interactive-login>\" string containing the respective
+ task request manifest values. The first matching expression is used
+ for the transformation. If no expression matches, then the task
+ request is considered invalid, unless no expressions are specified.
+ Repeat this option to specify multiple expressions."
+ }
+
+ build_order build-package-order = build_order::stable
+ {
+ "<order>",
+ "Order in which packages are considered for build. The valid <order>
+ values are \cb{stable} and \cb{random}. If not specified, then
+ \cb{stable} is assumed. Note that interactive builds are always
+ preferred."
+ }
};
- class build_result: build, package_db, build_db, handler
+ class build_result: build, build_db,
+ build_email_notification,
+ handler
{
- size_t build-result-request-max-size = 10240000
+ size_t build-result-request-max-size = 10485760
{
"<bytes>",
"The maximum size of the build result manifest accepted. Note that the
@@ -408,7 +627,7 @@ namespace brep
}
};
- class build_log: build, build_db, handler
+ class build_log: build, build_db, repository_url, handler
{
};
@@ -416,7 +635,7 @@ namespace brep
{
};
- class builds: build, build_db, page, handler
+ class builds: build, build_db, page, repository_url, handler
{
uint16_t build-page-entries = 20
{
@@ -431,7 +650,7 @@ namespace brep
}
};
- class build_configs: build, page, handler
+ class build_configs: build, page, repository_url, handler
{
uint16_t build-config-page-entries = 20
{
@@ -446,7 +665,7 @@ namespace brep
}
};
- class submit: page, handler
+ class submit: page, repository_email, repository_url, handler
{
dir_path submit-data
{
@@ -526,7 +745,7 @@ namespace brep
}
};
- class ci: page, handler
+ class ci_start: repository_email
{
dir_path ci-data
{
@@ -541,15 +760,6 @@ namespace brep
granted to the user that runs the web server."
}
- path ci-form
- {
- "<file>",
- "The package CI form fragment. If specified, then its contents are
- treated as an XHTML5 fragment that is inserted into the <body>
- element of the CI page. If unspecified, then no CI page will be
- displayed. Note that the file path must be absolute."
- }
-
string ci-email
{
"<email>",
@@ -586,14 +796,44 @@ namespace brep
}
};
- class repository_root: handler
+ class ci_cancel
+ {
+ };
+
+ class ci: ci_start, page, repository_url, handler
+ {
+ // Classic CI-specific options.
+ //
+
+ path ci-form
+ {
+ "<file>",
+ "The package CI form fragment. If specified, then its contents are
+ treated as an XHTML5 fragment that is inserted into the <body>
+ element of the CI page. If unspecified, then no CI page will be
+ displayed. Note that the file path must be absolute."
+ }
+ };
+
+ class ci_github: ci_start, ci_cancel, build_db, handler
+ {
+ // GitHub CI-specific options (e.g., request timeout when invoking
+ // GitHub APIs).
+ //
+ };
+
+ class upload: build, build_db, build_upload, repository_email, handler
+ {
+ };
+
+ class repository_root: repository_url, handler
{
string root-global-view = "packages"
{
"<service>",
"The default view to display for the global repository root. The
- <service> argument is one of the supported services (\c{packages},
- \c{builds}, \c{submit}, \c{ci}, etc). The default service is
+ <service> argument is one of the supported services (\cb{packages},
+ \cb{builds}, \cb{submit}, \cb{ci}, etc). The default service is
packages."
}
@@ -601,8 +841,8 @@ namespace brep
{
"<service>",
"The default view to display for the tenant repository root. The
- <service> argument is one of the supported services (\c{packages},
- \c{builds}, \c{submit}, \c{ci}, etc). The default service is
+ <service> argument is one of the supported services (\cb{packages},
+ \cb{builds}, \cb{submit}, \cb{ci}, etc). The default service is
packages."
}
};
@@ -659,9 +899,14 @@ namespace brep
class build_task
{
- // Package repository canonical name (note: including pkg: type).
+ // Only consider packages from repositories with these canonical names
+ // (note: including pkg: type).
//
vector<string> repository | r;
+
+ // Only consider tenants with this interactive build mode.
+ //
+ bbot::interactive_mode interactive = bbot::interactive_mode::both;
};
class build_result
@@ -694,9 +939,17 @@ namespace brep
//
string version | pv;
+ // Package build target.
+ //
+ string target | tg;
+
+ // Target build configuration.
+ //
+ string target_config | tc;
+
// Package build configuration.
//
- string configuration | cf;
+ string package_config | pc;
// Toolchain name.
//
@@ -730,13 +983,10 @@ namespace brep
//
// https://cppget.org/?builds=bbot
//
- // To support the already distributed URLs the name_legacy (pn) parameter
- // overrides the name (builds) parameter, if present. Note that the
- // builds parameter is renamed to '_' by the root handler (see the
- // request_proxy class for details).
+ // Note that the builds parameter is renamed to '_' by the root handler
+ // (see the request_proxy class for details).
//
string name | _;
- string name_legacy | pn;
// Package version. If empty or *, then no version constraint is applied.
// Otherwise the build package version must match the value exactly.
@@ -747,22 +997,22 @@ namespace brep
// toolchain constraint is applied. Otherwise the build toolchain name
// and version must match the value exactly.
//
- string toolchain | tc = "*";
+ string toolchain | th = "*";
- // Package build configuration name wildcard. An empty value is treated
- // the same way as *.
+ // Package build target wildcard. An empty value is treated the same way
+ // as *.
//
- string configuration | cf;
+ string target | tg;
- // Package build machine name wildcard. An empty value is treated the
- // same way as *.
+ // Package build target configuration name wildcard. An empty value is
+ // treated the same way as *.
//
- string machine | mn;
+ string target_config | tc;
- // Package build target wildcard. An empty value is treated the same way
- // as *.
+ // Package build package configuration name wildcard. An empty value is
+ // treated the same way as *.
//
- string target | tg;
+ string package_config | pc;
// Package build result. If *, then no build result constraint is
// applied. Otherwise the value is supposed to be the one of the
@@ -775,10 +1025,13 @@ namespace brep
class build_configs
{
+ // By default, display all build configurations except those which
+ // belong to the 'hidden' class.
+ //
// Note that the build-configs parameter is renamed to '_' by the root
// handler (see the request_proxy class for details).
//
- string class_name | _ = "all";
+ string class_name | _;
// Display build configurations list starting from this page.
//
@@ -837,9 +1090,53 @@ namespace brep
//
string overrides;
+ // Interactive build execution breakpoint.
+ //
+ string interactive;
+
// Submission simulation outcome.
//
string simulate;
};
+
+ // Parameters other than challenge must be all present.
+ //
+ // Note also that besides these parameters there can be others. We don't
+ // recognize their semantics and just save them to the upload request
+ // manifest.
+ //
+ class upload
+ {
+ // Upload type.
+ //
+ // Note that the upload parameter is renamed to '_' by the root handler
+ // (see the request_proxy class for details).
+ //
+ string type | _;
+
+ // Session id as returned by brep in the task response.
+ //
+ string session;
+
+ // Answer to the private key challenge as posed by brep in the task
+ // response. It must be present only if the challenge value was present
+ // in the task response.
+ //
+ string challenge;
+
+ // Upload instance name.
+ //
+ string instance;
+
+ // Package archive file name. Must be <input type="file"/>.
+ //
+ // Note that it can potentially be not just a name but a file path.
+ //
+ string archive;
+
+ // Package archive file SHA256 checksum.
+ //
+ string sha256sum;
+ };
}
}
diff --git a/mod/module.cxx b/mod/module.cxx
index 06799d7..c8d0595 100644
--- a/mod/module.cxx
+++ b/mod/module.cxx
@@ -241,23 +241,46 @@ namespace brep
initialized_ = m.initialized_;
}
-// For function func declared like this:
-// using B = std::string (*)(int);
-// using A = B (*)(int,int);
-// A func(B (*)(char),B (*)(wchar_t));
-// __PRETTY_FUNCTION__ looks like this:
-// virtual std::string (* (* brep::search::func(std::string (* (*)(char))(int)
-// ,std::string (* (*)(wchar_t))(int)) const)(int, int))(int)
-//
+ // Here are examples of __PRETTY_FUNCTION__ for some function declarations:
+ //
+ // 1) virtual bool brep::search::handle (web::request&, web::response&);
+ //
+ // virtual bool brep::search::handle(web::request&, web::response&)
+ //
+ // 2) using B = std::string (*) (int);
+ // virtual B brep::search::func ();
+ //
+ // virtual std::string (* brep::search::func())(int)
+ //
+ // 3) using B = std::string (*) (int);
+ // using A = B (*) (int,int);
+ // virtual A brep::search::func (B (*) (char), B (*) (wchar_t));
+ //
+ // virtual std::string (* (* brep::search::func(std::string (* (*)(char))(int), std::string (* (*)(wchar_t))(int)))(int, int))(int)
+ //
+ // 4) using X = std::function<butl::optional<std::string> (int)> (*) (std::function<butl::optional<std::string> (long)>);
+ // X brep::search::func (std::function<butl::optional<std::string> (char)> (*) (std::function<butl::optional<std::string> (wchar_t)>));
+ //
+ // std::function<std::optional<std::__cxx11::basic_string<char> >(int)> (* brep::search::func(std::function<std::optional<std::__cxx11::basic_string<char> >(char)> (*)(std::function<std::optional<std::__cxx11::basic_string<char> >(wchar_t)>)))(std::function<std::optional<std::__cxx11::basic_string<char> >(long int)>)
+ //
+ // 5) using X = std::function<butl::optional<std::string> (int)> (*) (std::function<butl::optional<std::string> (long)>);
+ // using Y = X (*) (int);
+ // Y brep::search::func (const char*);
+ //
+ // std::function<std::optional<std::__cxx11::basic_string<char> >(int)> (* (* brep::search::func(const char*))(int))(std::function<std::optional<std::__cxx11::basic_string<char> >(long int)>)
+ //
string handler::
func_name (const char* pretty_name)
{
- const char* e (strchr (pretty_name, ')'));
+ // Position at the last ')' character, which is either the end of the
+ // function's arguments list or the returned function type argument list.
+ //
+ const char* e (strrchr (pretty_name, ')'));
if (e && e > pretty_name)
{
- // Position e at last matching '(' which is the beginning of the
- // argument list..
+ // Position e at the matching '(' character which is the beginning of
+ // the mentioned argument list.
//
size_t d (1);
@@ -273,11 +296,15 @@ namespace brep
if (!d && e > pretty_name)
{
- // Position e at the character following the function name.
+ // Position e at the character which follows the function name.
//
- while (e > pretty_name &&
- (*e != '(' || *(e - 1) == ' ' || *(e - 1) == ')'))
- --e;
+ // Specifically, go further to the left and stop at the '(' character
+ // which is preceded by the character other than ' ', ')', of '>'.
+ //
+ for (char c;
+ e > pretty_name &&
+ !(*e == '(' && (c = *(e - 1)) != ' ' && c != ')' && c != '>');
+ --e) ;
if (e > pretty_name)
{
@@ -406,4 +433,10 @@ namespace brep
else
throw cli::eos_reached ();
}
+
+ size_t handler::name_value_scanner::
+ position ()
+ {
+ return (i_ - name_values_.begin ()) * 2 + (name_ ? 0 : 1);
+ }
}
diff --git a/mod/module.hxx b/mod/module.hxx
index b3ed67b..f3e062e 100644
--- a/mod/module.hxx
+++ b/mod/module.hxx
@@ -135,6 +135,9 @@ namespace brep
virtual void
skip ();
+ virtual std::size_t
+ position ();
+
private:
const name_values& name_values_;
name_values::const_iterator i_;
@@ -191,7 +194,7 @@ namespace brep
log* log_ {nullptr}; // Diagnostics backend provided by the web server.
private:
- // Extract function name from a __PRETTY_FUNCTION__.
+ // Extract the full-qualified function name from a __PRETTY_FUNCTION__.
// Throw invalid_argument if fail to parse.
//
static string
diff --git a/mod/options-types.hxx b/mod/options-types.hxx
index 4aa573f..f2b059b 100644
--- a/mod/options-types.hxx
+++ b/mod/options-types.hxx
@@ -25,6 +25,19 @@ namespace brep
page_menu () = default;
page_menu (string b, string l): label (move (b)), link (move (l)) {}
};
+
+ enum class build_order
+ {
+ stable,
+ random
+ };
+
+ enum class build_email
+ {
+ none,
+ latest, // Only send emails for the latest package versions.
+ all
+ };
}
#endif // MOD_OPTIONS_TYPES_HXX
diff --git a/mod/page.cxx b/mod/page.cxx
index 1e317f0..bc2e42d 100644
--- a/mod/page.cxx
+++ b/mod/page.cxx
@@ -7,10 +7,10 @@
#include <cmark-gfm-extension_api.h>
#include <set>
-#include <ios> // hex, uppercase, right
+#include <ios> // hex, uppercase, right
#include <sstream>
-#include <iomanip> // setw(), setfill()
-#include <algorithm> // min(), find()
+#include <iomanip> // setw(), setfill()
+#include <iterator> // back_inserter()
#include <libstudxml/serializer.hxx>
@@ -36,6 +36,20 @@ using namespace web::xhtml;
//
namespace brep
{
+ static inline string
+ label_to_class (const string& label)
+ {
+ if (label.find (' ') == string::npos)
+ return label;
+
+ string r;
+ transform (label.begin (), label.end (),
+ back_inserter (r),
+ [] (char c) {return c != ' ' ? c : '-';});
+
+ return r;
+ }
+
// CSS_LINKS
//
static const dir_path css_path ("@");
@@ -123,9 +137,17 @@ namespace brep
void DIV_COUNTER::
operator() (serializer& s) const
{
- s << DIV(ID="count")
- << count_ << " "
- << (count_ % 10 == 1 && count_ % 100 != 11 ? singular_ : plural_)
+ s << DIV(ID="count");
+
+ if (count_)
+ s << *count_;
+ else
+ s << '?';
+
+ s << ' '
+ << (count_ && *count_ % 10 == 1 && *count_ % 100 != 11
+ ? singular_
+ : plural_)
<< ~DIV;
}
@@ -134,7 +156,8 @@ namespace brep
void TR_VALUE::
operator() (serializer& s) const
{
- s << TR(CLASS=label_)
+ string c (label_to_class (label_));
+ s << TR(CLASS=c)
<< TH << label_ << ~TH
<< TD << SPAN(CLASS="value") << value_ << ~SPAN << ~TD
<< ~TR;
@@ -145,7 +168,8 @@ namespace brep
void TR_INPUT::
operator() (serializer& s) const
{
- s << TR(CLASS=label_)
+ string c (label_to_class (label_));
+ s << TR(CLASS=c)
<< TH << label_ << ~TH
<< TD
<< INPUT(TYPE="text", NAME=name_);
@@ -169,7 +193,8 @@ namespace brep
void TR_SELECT::
operator() (serializer& s) const
{
- s << TR(CLASS=label_)
+ string c (label_to_class (label_));
+ s << TR(CLASS=c)
<< TH << label_ << ~TH
<< TD
<< SELECT(NAME=name_);
@@ -220,15 +245,9 @@ namespace brep
<< A
<< HREF
<< tenant_dir (root_, tenant_) /
- path (mime_url_encode (name_.string (), false));
-
- // Propagate search criteria to the package details page.
- //
- if (!query_.empty ())
- s << "?q=" << query_;
-
- s << ~HREF
- << name_
+ path (mime_url_encode (name_.string (), false))
+ << ~HREF
+ << name_
<< ~A
<< ~SPAN
<< ~TD
@@ -416,47 +435,75 @@ namespace brep
if (!dependencies_.empty ())
s << "; ";
- for (const auto& d: dependencies_)
+ for (const dependency_alternatives& das: dependencies_)
{
- if (&d != &dependencies_[0])
+ if (&das != &dependencies_[0])
s << ", ";
- if (d.conditional)
- s << "?";
-
- if (d.buildtime)
+ if (das.buildtime)
s << "*";
- // Suppress package name duplicates.
+ // Suppress dependency alternative duplicates, like in
+ // `{foo bar} < 1.1 | {foo bar} > 1.5`.
+ //
+ // Return the dependency package name space-separated list.
//
- set<package_name> names;
- for (const auto& da: d)
- names.emplace (da.name);
+ auto deps_list = [] (const dependency_alternative& da)
+ {
+ string r;
+ for (const dependency& d: da)
+ {
+ if (!r.empty ())
+ r += ' ';
+
+ r += d.name.string ();
+ }
- bool mult (names.size () > 1);
+ return r;
+ };
+
+ set<string> alternatives;
+ for (const dependency_alternative& da: das)
+ alternatives.insert (deps_list (da));
+
+ // Note that we may end up with a single package name in parenthesis, if
+ // its duplicates were suppresses. This, however, may be helpful,
+ // indicating that there some alternatives for the package.
+ //
+ bool mult (das.size () > 1 ||
+ (das.size () == 1 && das[0].size () > 1));
if (mult)
- s << "(";
+ s << '(';
bool first (true);
- for (const auto& da: d)
+ for (const dependency_alternative& da: das)
{
- const package_name& n (da.name);
- if (names.find (n) != names.end ())
- {
- names.erase (n);
+ auto i (alternatives.find (deps_list (da)));
- if (first)
- first = false;
- else
- s << " | ";
+ if (i == alternatives.end ())
+ continue;
+
+ alternatives.erase (i);
+
+ if (!first)
+ s << " | ";
+ else
+ first = false;
+
+ for (const dependency& d: da)
+ {
+ if (&d != &da[0])
+ s << ' ';
// Try to display the dependency as a link if it is resolved.
// Otherwise display it as plain text.
//
- if (da.package != nullptr)
+ const package_name& n (d.name);
+
+ if (d.package != nullptr)
{
- shared_ptr<package> p (da.package.load ());
+ shared_ptr<package> p (d.package.load ());
assert (p->internal () || !p->other_repositories.empty ());
shared_ptr<repository> r (
@@ -479,10 +526,13 @@ namespace brep
else
s << n;
}
+
+ if (da.enable)
+ s << " ?";
}
if (mult)
- s << ")";
+ s << ')';
}
s << ~SPAN
@@ -507,25 +557,25 @@ namespace brep
<< SPAN(CLASS="value")
<< requirements_.size () << "; ";
- for (const auto& r: requirements_)
+ for (const auto& ras: requirements_)
{
- if (&r != &requirements_[0])
+ if (&ras != &requirements_[0])
s << ", ";
- if (r.conditional)
- s << "?";
+ if (ras.buildtime)
+ s << '*';
- if (r.buildtime)
- s << "*";
-
- if (r.empty ())
+ // If this is a simple requirement without id, then print the comment
+ // first word.
+ //
+ if (ras.simple () && ras[0][0].empty ())
{
- // If there is no requirement alternatives specified, then print the
- // comment first word.
- //
- const auto& c (r.comment);
+ const auto& c (ras.comment);
if (!c.empty ())
{
+ if (ras[0].enable)
+ s << "? ";
+
auto n (c.find (' '));
s << string (c, 0, n);
@@ -535,21 +585,31 @@ namespace brep
}
else
{
- bool mult (r.size () > 1);
+ bool mult (ras.size () > 1 ||
+ (ras.size () == 1 && ras[0].size () > 1));
if (mult)
- s << "(";
+ s << '(';
- for (const auto& ra: r)
+ for (const auto& ra: ras)
{
- if (&ra != &r[0])
+ if (&ra != &ras[0])
s << " | ";
- s << ra;
+ for (const string& r: ra)
+ {
+ if (&r != &ra[0])
+ s << ' ';
+
+ s << r;
+ }
+
+ if (ra.enable)
+ s << " ?";
}
if (mult)
- s << ")";
+ s << ')';
}
}
@@ -563,7 +623,8 @@ namespace brep
void TR_URL::
operator() (serializer& s) const
{
- s << TR(CLASS=label_)
+ string c (label_to_class (label_));
+ s << TR(CLASS=c)
<< TH << label_ << ~TH
<< TD
<< SPAN(CLASS="value");
@@ -593,7 +654,8 @@ namespace brep
void TR_EMAIL::
operator() (serializer& s) const
{
- s << TR(CLASS=label_)
+ string c (label_to_class (label_));
+ s << TR(CLASS=c)
<< TH << label_ << ~TH
<< TD
<< SPAN(CLASS="value")
@@ -643,32 +705,22 @@ namespace brep
<< A
<< HREF
<< tenant_dir (root_, tenant_) << "?about#"
- << mime_url_encode (html_id (name_), false)
+ << mime_url_encode (html_id (location_.canonical_name ()), false)
<< ~HREF
- << name_
+ << location_
<< ~A
<< ~SPAN
<< ~TD
<< ~TR;
}
- // TR_LOCATION
- //
- void TR_LOCATION::
- operator() (serializer& s) const
- {
- s << TR(CLASS="location")
- << TH << "location" << ~TH
- << TD << SPAN(CLASS="value") << location_ << ~SPAN << ~TD
- << ~TR;
- }
-
// TR_LINK
//
void TR_LINK::
operator() (serializer& s) const
{
- s << TR(CLASS=label_)
+ string c (label_to_class (label_));
+ s << TR(CLASS=c)
<< TH << label_ << ~TH
<< TD
<< SPAN(CLASS="value") << A(HREF=url_) << text_ << ~A << ~SPAN
@@ -697,8 +749,24 @@ namespace brep
<< TD
<< SPAN(CLASS="value");
+ // Print the ' | ' separator if this is not the first item and reset the
+ // `first` flag to false otherwise.
+ //
+ bool first (true);
+ auto separate = [&s, &first] ()
+ {
+ if (first)
+ first = false;
+ else
+ s << " | ";
+ };
+
if (build_.state == build_state::building)
- s << SPAN(CLASS="building") << "building" << ~SPAN << " | ";
+ {
+ separate ();
+
+ s << SPAN(CLASS="building") << "building" << ~SPAN;
+ }
else
{
// If no unsuccessful operation results available, then print the
@@ -711,7 +779,10 @@ namespace brep
if (build_.results.empty () || *build_.status == result_status::success)
{
assert (build_.status);
- s << SPAN_BUILD_RESULT_STATUS (*build_.status) << " | ";
+
+ separate ();
+
+ s << SPAN_BUILD_RESULT_STATUS (*build_.status);
}
if (!build_.results.empty ())
@@ -719,6 +790,9 @@ namespace brep
for (const auto& r: build_.results)
{
if (r.status != result_status::success)
+ {
+ separate ();
+
s << SPAN_BUILD_RESULT_STATUS (r.status) << " ("
<< A
<< HREF
@@ -726,26 +800,33 @@ namespace brep
<< ~HREF
<< r.operation
<< ~A
- << ") | ";
+ << ")";
+ }
}
+ separate ();
+
s << A
<< HREF << build_log_url (host_, root_, build_) << ~HREF
<< "log"
- << ~A
- << " | ";
+ << ~A;
}
}
- if (build_.force == (build_.state == build_state::building
- ? force_state::forcing
- : force_state::forced))
- s << SPAN(CLASS="pending") << "pending" << ~SPAN;
- else
- s << A
- << HREF << build_force_url (host_, root_, build_) << ~HREF
- << "rebuild"
- << ~A;
+ if (!archived_)
+ {
+ separate ();
+
+ if (build_.force == (build_.state == build_state::building
+ ? force_state::forcing
+ : force_state::forced))
+ s << SPAN(CLASS="pending") << "pending" << ~SPAN;
+ else
+ s << A
+ << HREF << build_force_url (host_, root_, build_) << ~HREF
+ << "rebuild"
+ << ~A;
+ }
s << ~SPAN
<< ~TD
@@ -873,14 +954,16 @@ namespace brep
void DIV_TEXT::
operator() (serializer& s) const
{
- switch (type_)
+ const string& t (text_.text);
+
+ switch (text_.type)
{
case text_type::plain:
{
// To keep things regular we wrap the preformatted text into <div>.
//
s << DIV(ID=id_, CLASS="plain");
- serialize_pre_text (s, text_, length_, url_, "" /* id */);
+ serialize_pre_text (s, t, length_, url_, "" /* id */);
s << ~DIV;
break;
}
@@ -900,9 +983,9 @@ namespace brep
// calls to fail is the inability to allocate memory. Unfortunately,
// instead of reporting the failure to the caller, the API issues
// diagnostics to stderr and aborts the process. Let's decrease the
- // probability of such an event by limiting the text size to 64K.
+ // probability of such an event by limiting the text size to 1M.
//
- if (text_.size () > 64 * 1024)
+ if (t.size () > 1024 * 1024)
{
print_error (what_ + " is too long");
return;
@@ -914,37 +997,38 @@ namespace brep
{
// Parse Markdown into the AST.
//
+ // Note that the footnotes extension needs to be enabled via the
+ // CMARK_OPT_FOOTNOTES flag rather than the
+ // cmark_parser_attach_syntax_extension() function call.
+ //
unique_ptr<cmark_parser, void (*)(cmark_parser*)> parser (
- cmark_parser_new (CMARK_OPT_DEFAULT | CMARK_OPT_VALIDATE_UTF8),
+ cmark_parser_new (CMARK_OPT_DEFAULT |
+ CMARK_OPT_FOOTNOTES |
+ CMARK_OPT_VALIDATE_UTF8),
[] (cmark_parser* p) {cmark_parser_free (p);});
// Enable GitHub extensions in the parser, if requested.
//
- if (type_ == text_type::github_mark)
+ if (text_.type == text_type::github_mark)
{
auto add = [&parser] (const char* ext)
- {
- cmark_syntax_extension* e (
- cmark_find_syntax_extension (ext));
+ {
+ cmark_syntax_extension* e (
+ cmark_find_syntax_extension (ext));
- // Built-in extension is only expected.
- //
- assert (e != nullptr);
+ // Built-in extension is only expected.
+ //
+ assert (e != nullptr);
- cmark_parser_attach_syntax_extension (parser.get (), e);
- };
+ cmark_parser_attach_syntax_extension (parser.get (), e);
+ };
add ("table");
add ("strikethrough");
add ("autolink");
-
- // Somehow feels unsafe (there are some nasty warnings when
- // upstream's tasklist.c is compiled), so let's disable for now.
- //
- // add ("tasklist");
}
- cmark_parser_feed (parser.get (), text_.c_str (), text_.size ());
+ cmark_parser_feed (parser.get (), t.c_str (), t.size ());
unique_ptr<cmark_node, void (*)(cmark_node*)> doc (
cmark_parser_finish (parser.get ()),
diff --git a/mod/page.hxx b/mod/page.hxx
index cc9840e..7329e2d 100644
--- a/mod/page.hxx
+++ b/mod/page.hxx
@@ -82,21 +82,24 @@ namespace brep
// Generate counter element.
//
- // It could be redunant to distinguish between singular and plural word forms
- // if it wouldn't be so cheap in English, and phrase '1 Packages' wouldn't
- // look that ugly.
+ // If the count argument is nullopt, then it is assumed that the count is
+ // unknown and the '?' character is printed instead of the number.
+ //
+ // Note that it could be redunant to distinguish between singular and plural
+ // word forms if it wouldn't be so cheap in English, and phrase '1 Packages'
+ // wouldn't look that ugly.
//
class DIV_COUNTER
{
public:
- DIV_COUNTER (size_t c, const char* s, const char* p)
+ DIV_COUNTER (optional<size_t> c, const char* s, const char* p)
: count_ (c), singular_ (s), plural_ (p) {}
void
operator() (xml::serializer&) const;
private:
- size_t count_;
+ optional<size_t> count_;
const char* singular_;
const char* plural_;
};
@@ -193,24 +196,19 @@ namespace brep
const string& tenant_;
};
- // Generate package name element with an optional search criteria. The
- // search string should be url-encoded, if specified.
+ // Generate package name element.
//
class TR_NAME
{
public:
- TR_NAME (const package_name& n,
- const string& q,
- const dir_path& r,
- const string& t)
- : name_ (n), query_ (q), root_ (r), tenant_ (t) {}
+ TR_NAME (const package_name& n, const dir_path& r, const string& t)
+ : name_ (n), root_ (r), tenant_ (t) {}
void
operator() (xml::serializer&) const;
private:
const package_name& name_;
- const string& query_;
const dir_path& root_;
const string& tenant_;
};
@@ -424,32 +422,20 @@ namespace brep
class TR_REPOSITORY
{
public:
- TR_REPOSITORY (const string& n, const dir_path& r, const string& t)
- : name_ (n), root_ (r), tenant_ (t) {}
+ TR_REPOSITORY (const repository_location& l,
+ const dir_path& r,
+ const string& t)
+ : location_ (l), root_ (r), tenant_ (t) {}
void
operator() (xml::serializer&) const;
private:
- const string& name_;
+ const repository_location& location_;
const dir_path& root_;
const string& tenant_;
};
- // Generate repository location element.
- //
- class TR_LOCATION
- {
- public:
- TR_LOCATION (const repository_location& l): location_ (l) {}
-
- void
- operator() (xml::serializer&) const;
-
- private:
- const repository_location& location_;
- };
-
// Generate link element.
//
class TR_LINK
@@ -486,14 +472,23 @@ namespace brep
class TR_BUILD_RESULT
{
public:
- TR_BUILD_RESULT (const build& b, const string& h, const dir_path& r):
- build_ (b), host_ (h), root_ (r) {}
+ TR_BUILD_RESULT (const build& b,
+ bool a,
+ const string& h,
+ const dir_path& r):
+ build_ (b), archived_ (a), host_ (h), root_ (r)
+ {
+ // We don't expect a queued build to ever be displayed.
+ //
+ assert (build_.state != build_state::queued);
+ }
void
operator() (xml::serializer&) const;
private:
const build& build_;
+ bool archived_;
const string& host_;
const dir_path& root_;
};
@@ -599,16 +594,14 @@ namespace brep
public:
// Generate a full text element.
//
- DIV_TEXT (const string& t,
- text_type tp,
+ DIV_TEXT (const typed_text& t,
bool st,
const string& id,
const string& what,
const basic_mark& diag)
: text_ (t),
- type_ (tp),
strip_title_ (st),
- length_ (t.size ()),
+ length_ (t.text.size ()),
url_ (nullptr),
id_ (id),
what_ (what),
@@ -618,8 +611,7 @@ namespace brep
// Generate a brief text element.
//
- DIV_TEXT (const string& t,
- text_type tp,
+ DIV_TEXT (const typed_text& t,
bool st,
size_t l,
const string& u,
@@ -627,7 +619,6 @@ namespace brep
const string& what,
const basic_mark& diag)
: text_ (t),
- type_ (tp),
strip_title_ (st),
length_ (l),
url_ (&u),
@@ -641,8 +632,7 @@ namespace brep
operator() (xml::serializer&) const;
private:
- const string& text_;
- text_type type_;
+ const typed_text& text_;
bool strip_title_;
size_t length_;
const string* url_; // Full page url.
diff --git a/mod/tenant-service.hxx b/mod/tenant-service.hxx
new file mode 100644
index 0000000..9205f76
--- /dev/null
+++ b/mod/tenant-service.hxx
@@ -0,0 +1,155 @@
+// file : mod/tenant-service.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef MOD_TENANT_SERVICE_HXX
+#define MOD_TENANT_SERVICE_HXX
+
+#include <map>
+
+#include <libbrep/types.hxx>
+#include <libbrep/utility.hxx>
+
+#include <libbrep/build.hxx>
+
+#include <mod/diagnostics.hxx>
+
+namespace brep
+{
+ class tenant_service_base
+ {
+ public:
+ virtual ~tenant_service_base () = default;
+ };
+
+ // Possible build notifications:
+ //
+ // queued
+ // building
+ // built
+ //
+ // Possible transitions:
+ //
+ // -> queued
+ // queued -> building
+ // building -> queued (interrupted & re-queued due to higher priority task)
+ // building -> built
+ // built -> queued (periodic or user-forced rebuild)
+ //
+ // While the implementation tries to make sure the notifications arrive in
+ // the correct order, this is currently done by imposing delays (some
+ // natural, such as building->built, and some artificial, such as
+ // queued->building). As result, it is unlikely but possible to be notified
+ // about the state transitions in the wrong order, especially if the
+ // notifications take a long time. To minimize the chance of this happening,
+ // the service implementation should strive to batch the queued state
+ // notifications (or which there could be hundreds) in a single request if
+ // at all possible. Also, if supported by the third-party API, it makes
+ // sense for the implementation to protect against overwriting later states
+ // with earlier. For example, if it's possible to place a condition on a
+ // notification, it makes sense to only set the state to queued if none of
+ // the later states (e.g., building) are already in effect.
+ //
+ // Note also that it's possible for the build to get deleted at any stage
+ // without any further notifications. This can happen, for example, due to
+ // data retention timeout or because the build configuration (buildtab
+ // entry) is no longer present. There is no explicit `deleted` transition
+ // notification because such situations (i.e., when a notification sequence
+ // is abandoned half way) are not expected to arise ordinarily in a
+ // properly-configured brep instance. And the third-party service is
+ // expected to deal with them using some overall timeout/expiration
+ // mechanism which it presumably has.
+ //
+ // Each build notification is in its own interface since a service may not
+ // be interested in all of them while computing the information to pass is
+ // expensive.
+
+ class tenant_service_build_queued: public virtual tenant_service_base
+ {
+ public:
+ // If the returned function is not NULL, it is called to update the
+ // service data. It should return the new data or nullopt if no update is
+ // necessary. Note: tenant_service::data passed to the callback and to the
+ // returned function may not be the same. Also, the returned function may
+ // be called multiple times (on transaction retries).
+ //
+ // The passed initial_state indicates the logical initial state and is
+ // either absent, `building` (interrupted), or `built` (rebuild). Note
+ // that all the passed build objects are for the same package version and
+ // have the same initial state.
+ //
+ // The implementation of this and the below functions should normally not
+ // need to make any decisions based on the passed build::state. Rather,
+ // the function name suffix (_queued, _building, _built) signify the
+ // logical end state.
+ //
+ // The build_queued_hints can be used to omit certain components from the
+ // build id. If single_package_version is true, then this tenant contains
+ // a single (non-test) package version and this package name and package
+ // version can be omitted. If single_package_config is true, then the
+ // package version being built only has the default package configuration
+ // and thus it can be omitted.
+ //
+ struct build_queued_hints
+ {
+ bool single_package_version;
+ bool single_package_config;
+ };
+
+ virtual function<optional<string> (const tenant_service&)>
+ build_queued (const tenant_service&,
+ const vector<build>&,
+ optional<build_state> initial_state,
+ const build_queued_hints&,
+ const diag_epilogue& log_writer) const noexcept = 0;
+ };
+
+ class tenant_service_build_building: public virtual tenant_service_base
+ {
+ public:
+ virtual function<optional<string> (const tenant_service&)>
+ build_building (const tenant_service&,
+ const build&,
+ const diag_epilogue& log_writer) const noexcept = 0;
+ };
+
+ class tenant_service_build_built: public virtual tenant_service_base
+ {
+ public:
+ virtual function<optional<string> (const tenant_service&)>
+ build_built (const tenant_service&,
+ const build&,
+ const diag_epilogue& log_writer) const noexcept = 0;
+ };
+
+ // Map of service type (tenant_service::type) to service.
+ //
+ using tenant_service_map = std::map<string, shared_ptr<tenant_service_base>>;
+
+ // Every notification callback function that needs to produce any
+ // diagnostics shall begin with:
+ //
+ // NOTIFICATION_DIAG (log_writer);
+ //
+ // This will instantiate the error, warn, info, and trace diagnostics
+ // streams with the function's name.
+ //
+ // Note that a callback function is not expected to throw any exceptions.
+ // This is, in particular, why this macro doesn't instantiate the fail
+ // diagnostics stream.
+ //
+#define NOTIFICATION_DIAG(log_writer) \
+ const basic_mark error (severity::error, \
+ log_writer, \
+ __PRETTY_FUNCTION__); \
+ const basic_mark warn (severity::warning, \
+ log_writer, \
+ __PRETTY_FUNCTION__); \
+ const basic_mark info (severity::info, \
+ log_writer, \
+ __PRETTY_FUNCTION__); \
+ const basic_mark trace (severity::trace, \
+ log_writer, \
+ __PRETTY_FUNCTION__)
+}
+
+#endif // MOD_TENANT_SERVICE_HXX
diff --git a/mod/types-parsers.cxx b/mod/types-parsers.cxx
index dc21e97..f135608 100644
--- a/mod/types-parsers.cxx
+++ b/mod/types-parsers.cxx
@@ -3,12 +3,17 @@
#include <mod/types-parsers.hxx>
-#include <libbutl/timestamp.mxx> // from_string()
+#include <sstream>
+
+#include <libbutl/regex.hxx>
+#include <libbutl/timestamp.hxx> // from_string()
#include <mod/module-options.hxx>
using namespace std;
+using namespace butl;
using namespace bpkg;
+using namespace bbot;
using namespace web::xhtml;
namespace brep
@@ -75,9 +80,9 @@ namespace brep
string t ("1970-01-01 ");
t += v;
- x = butl::from_string (t.c_str (),
- "%Y-%m-%d %H:%M",
- false /* local */).time_since_epoch ();
+ x = from_string (t.c_str (),
+ "%Y-%m-%d %H:%M",
+ false /* local */).time_since_epoch ();
return;
}
catch (const invalid_argument&) {}
@@ -110,6 +115,29 @@ namespace brep
}
}
+ // Parse interactive_mode.
+ //
+ void parser<interactive_mode>::
+ parse (interactive_mode& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const string v (s.next ());
+
+ try
+ {
+ x = to_interactive_mode (v);
+ }
+ catch (const invalid_argument&)
+ {
+ throw invalid_value (o, v);
+ }
+ }
+
// Parse page_form.
//
void parser<page_form>::
@@ -176,10 +204,84 @@ namespace brep
{
x = fragment (v, o);
}
- catch (const xml::parsing&)
+ catch (const xml::parsing& e)
{
- throw invalid_value (o, v);
+ throw invalid_value (o, v, e.what ());
}
}
+
+ // Parse the '/regex/replacement/' string into the regex/replacement pair.
+ //
+ void parser<pair<std::regex, string>>::
+ parse (pair<std::regex, string>& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ x = regex_replace_parse (v);
+ }
+ catch (const invalid_argument& e)
+ {
+ throw invalid_value (o, v, e.what ());
+ }
+ catch (const regex_error& e)
+ {
+ // Sanitize the description.
+ //
+ ostringstream os;
+ os << e;
+
+ throw invalid_value (o, v, os.str ());
+ }
+ }
+
+ // Parse build_order.
+ //
+ void parser<build_order>::
+ parse (build_order& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const string v (s.next ());
+ if (v == "stable")
+ x = build_order::stable;
+ else if (v == "random")
+ x = build_order::random;
+ else
+ throw invalid_value (o, v);
+ }
+
+ // Parse build_email.
+ //
+ void parser<build_email>::
+ parse (build_email& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const string v (s.next ());
+ if (v == "none")
+ x = build_email::none;
+ else if (v == "latest")
+ x = build_email::latest;
+ else if (v == "all")
+ x = build_email::all;
+ else
+ throw invalid_value (o, v);
+ }
}
}
diff --git a/mod/types-parsers.hxx b/mod/types-parsers.hxx
index 6b851eb..d48ae0b 100644
--- a/mod/types-parsers.hxx
+++ b/mod/types-parsers.hxx
@@ -7,7 +7,10 @@
#ifndef MOD_TYPES_PARSERS_HXX
#define MOD_TYPES_PARSERS_HXX
+#include <regex>
+
#include <libbpkg/manifest.hxx> // repository_location
+#include <libbbot/manifest.hxx> // interactive_mode
#include <web/xhtml/fragment.hxx>
@@ -56,6 +59,13 @@ namespace brep
};
template <>
+ struct parser<bbot::interactive_mode>
+ {
+ static void
+ parse (bbot::interactive_mode&, bool&, scanner&);
+ };
+
+ template <>
struct parser<page_form>
{
static void
@@ -75,6 +85,27 @@ namespace brep
static void
parse (web::xhtml::fragment&, bool&, scanner&);
};
+
+ template <>
+ struct parser<pair<std::regex, string>>
+ {
+ static void
+ parse (pair<std::regex, string>&, bool&, scanner&);
+ };
+
+ template <>
+ struct parser<build_order>
+ {
+ static void
+ parse (build_order&, bool&, scanner&);
+ };
+
+ template <>
+ struct parser<build_email>
+ {
+ static void
+ parse (build_email&, bool&, scanner&);
+ };
}
}