diff options
Diffstat (limited to 'mod')
-rw-r--r-- | mod/build-config-module.hxx | 3 | ||||
-rw-r--r-- | mod/build-result-module.cxx | 145 | ||||
-rw-r--r-- | mod/build-target-config.cxx | 8 | ||||
-rw-r--r-- | mod/build-target-config.hxx | 23 | ||||
-rw-r--r-- | mod/database-module.cxx | 10 | ||||
-rw-r--r-- | mod/database-module.hxx | 5 | ||||
-rw-r--r-- | mod/mod-build-task.cxx | 466 | ||||
-rw-r--r-- | mod/mod-builds.cxx | 29 | ||||
-rw-r--r-- | mod/mod-package-details.cxx | 2 | ||||
-rw-r--r-- | mod/mod-package-version-details.cxx | 8 | ||||
-rw-r--r-- | mod/mod-packages.cxx | 2 | ||||
-rw-r--r-- | mod/page.cxx | 14 | ||||
-rw-r--r-- | mod/page.hxx | 13 |
13 files changed, 491 insertions, 237 deletions
diff --git a/mod/build-config-module.hxx b/mod/build-config-module.hxx index 78661c3..c1630b0 100644 --- a/mod/build-config-module.hxx +++ b/mod/build-config-module.hxx @@ -36,8 +36,9 @@ namespace brep void init (const options::build&); + template <typename K> bool - exclude (const build_package_config& pc, + exclude (const build_package_config_template<K>& pc, const build_class_exprs& common_builds, const build_constraints& common_constraints, const build_target_config& tc, diff --git a/mod/build-result-module.cxx b/mod/build-result-module.cxx index 68fbe4c..9ac1390 100644 --- a/mod/build-result-module.cxx +++ b/mod/build-result-module.cxx @@ -3,11 +3,16 @@ #include <mod/build-result-module.hxx> +#include <odb/database.hxx> + #include <libbutl/openssl.hxx> #include <libbutl/fdstream.hxx> #include <libbutl/process-io.hxx> #include <libbutl/semantic-version.hxx> +#include <libbrep/build-package.hxx> +#include <libbrep/build-package-odb.hxx> + namespace brep { using namespace std; @@ -230,54 +235,112 @@ namespace brep else { assert (b.agent_fingerprint && challenge); - auto i (bot_agent_key_map_->find (*b.agent_fingerprint)); - // The agent's key is recently replaced. + auto auth = [&challenge, + &b, + &o, + &fail, &trace, + &warn_auth, + this] (const path& key) + { + bool r (false); + + try + { + openssl os ([&trace, this] (const char* args[], size_t n) + { + l2 ([&]{trace << process_args {args, n};}); + }, + path ("-"), fdstream_mode::text, 2, + process_env (o.openssl (), o.openssl_envvar ()), + use_openssl_pkeyutl_ ? "pkeyutl" : "rsautl", + o.openssl_option (), + use_openssl_pkeyutl_ ? "-verifyrecover" : "-verify", + "-pubin", + "-inkey", key); + + for (const auto& c: *challenge) + os.out.put (c); // Sets badbit on failure. + + os.out.close (); + + string s; + getline (os.in, s); + + bool v (os.in.eof ()); + os.in.close (); + + if (os.wait () && v) + { + r = (s == *b.agent_challenge); + + if (!r) + warn_auth ("challenge mismatched"); + } + else // The signature is presumably meaningless. + warn_auth ("unable to verify challenge"); + } + catch (const system_error& e) + { + fail << "unable to verify challenge: " << e; + } + + return r; + }; + + const string& fp (*b.agent_fingerprint); + auto i (bot_agent_key_map_->find (fp)); + + // Note that it is possible that the default vs custom bot + // classification has changed since the task request time. It feels that + // there is nothing wrong with that and we will handle that + // automatically. // - if (i == bot_agent_key_map_->end ()) + if (i != bot_agent_key_map_->end ()) // Default bot? { - warn_auth ("agent's public key not found"); + r = auth (i->second); } - else - try + else // Custom bot. { - openssl os ([&trace, this] (const char* args[], size_t n) - { - l2 ([&]{trace << process_args {args, n};}); - }, - path ("-"), fdstream_mode::text, 2, - process_env (o.openssl (), o.openssl_envvar ()), - use_openssl_pkeyutl_ ? "pkeyutl" : "rsautl", - o.openssl_option (), - use_openssl_pkeyutl_ ? "-verifyrecover" : "-verify", - "-pubin", - "-inkey", - i->second); - - for (const auto& c: *challenge) - os.out.put (c); // Sets badbit on failure. - - os.out.close (); - - string s; - getline (os.in, s); - - bool v (os.in.eof ()); - os.in.close (); - - if (os.wait () && v) - { - r = (s == *b.agent_challenge); + shared_ptr<build_public_key> k ( + build_db_->find<build_public_key> (public_key_id (b.tenant, fp))); - if (!r) - warn_auth ("challenge mismatched"); + if (k != nullptr) + { + // Temporarily save the key data to disk (note that it's the + // challenge which is passed via stdin to openssl). Hopefully /tmp + // is using tmpfs. + // + auto_rmfile arm; + + try + { + arm = auto_rmfile (path::temp_path ("brep-custom-bot-key")); + } + catch (const system_error& e) + { + fail << "unable to obtain temporary file: " << e; + } + + try + { + ofdstream os (arm.path); + os << *k; + os.close (); + } + catch (const io_error& e) + { + fail << "unable to write to '" << arm.path << "': " << e; + } + + r = auth (arm.path); + } + else + { + // The agent's key is recently replaced. + // + warn_auth ("agent's public key not found"); } - else // The signature is presumably meaningless. - warn_auth ("unable to verify challenge"); - } - catch (const system_error& e) - { - fail << "unable to verify challenge: " << e; } } diff --git a/mod/build-target-config.cxx b/mod/build-target-config.cxx index a30cf07..a30e281 100644 --- a/mod/build-target-config.cxx +++ b/mod/build-target-config.cxx @@ -21,17 +21,13 @@ namespace brep {"all"}, '+', "All."); bool - exclude (const build_package_config& pc, - const build_class_exprs& cbs, - const build_constraints& ccs, + exclude (const build_class_exprs& exprs, + const build_constraints& constrs, const build_target_config& tc, const map<string, string>& class_inheritance_map, string* reason, bool default_all_ucs) { - const build_class_exprs& exprs (pc.effective_builds (cbs)); - const build_constraints& constrs (pc.effective_constraints (ccs)); - // Save the first sentence of the reason, lower-case the first letter if // the beginning looks like a word (all subsequent characters until a // whitespace are lower-case letters). diff --git a/mod/build-target-config.hxx b/mod/build-target-config.hxx index 180ca80..60d159c 100644 --- a/mod/build-target-config.hxx +++ b/mod/build-target-config.hxx @@ -32,14 +32,31 @@ namespace brep // configuration set needlessly). // bool - exclude (const build_package_config&, - const build_class_exprs& common_builds, - const build_constraints& common_constraints, + exclude (const build_class_exprs& builds, + const build_constraints& constraints, const build_target_config&, const std::map<string, string>& class_inheritance_map, string* reason = nullptr, bool default_all_ucs = false); + template <typename K> + inline bool + exclude (const build_package_config_template<K>& pc, + const build_class_exprs& common_builds, + const build_constraints& common_constraints, + const build_target_config& tc, + const std::map<string, string>& class_inheritance_map, + string* reason = nullptr, + bool default_all_ucs = false) + { + return exclude (pc.effective_builds (common_builds), + pc.effective_constraints (common_constraints), + tc, + class_inheritance_map, + reason, + default_all_ucs); + } + // Convert dash-separated components (target, build target configuration // name, machine name) or a pattern thereof into a path, replacing dashes // with slashes (directory separators), `**` with `*/**/*`, and appending diff --git a/mod/database-module.cxx b/mod/database-module.cxx index 07babc6..bbb3e59 100644 --- a/mod/database-module.cxx +++ b/mod/database-module.cxx @@ -76,7 +76,7 @@ namespace brep throw; } - void database_module:: + optional<string> database_module:: update_tenant_service_state ( const connection_ptr& conn, const string& tid, @@ -88,6 +88,8 @@ namespace brep // assert (build_db_ != nullptr); + optional<string> r; + for (size_t retry (retry_);; ) { try @@ -104,6 +106,8 @@ namespace brep { s.data = move (*data); build_db_->update (t); + + r = move (s.data); } } @@ -121,7 +125,11 @@ namespace brep HANDLER_DIAG; l1 ([&]{trace << e << "; " << retry + 1 << " tenant service " << "state update retries left";}); + + r = nullopt; // Prepare for the next iteration. } } + + return r; } } diff --git a/mod/database-module.hxx b/mod/database-module.hxx index 910cb35..298afbf 100644 --- a/mod/database-module.hxx +++ b/mod/database-module.hxx @@ -57,7 +57,8 @@ namespace brep // Update the tenant-associated service state if the specified // notification callback-returned function (expected to be not NULL) - // returns the new state data. + // returns the new state data. Return the service state data, if updated, + // and nullopt otherwise. // // Specifically, start the database transaction, query the service state, // and call the callback-returned function on this state. If this call @@ -65,7 +66,7 @@ namespace brep // state with this data and persist the change. Repeat all the above steps // on the recoverable database failures (deadlocks, etc). // - void + optional<string> update_tenant_service_state ( const odb::core::connection_ptr&, const string& tid, diff --git a/mod/mod-build-task.cxx b/mod/mod-build-task.cxx index 4dd1e3d..07aff8d 100644 --- a/mod/mod-build-task.cxx +++ b/mod/mod-build-task.cxx @@ -44,6 +44,28 @@ using namespace odb::core; static thread_local mt19937 rand_gen (random_device {} ()); +// The challenge (nonce) is randomly generated for every build task if brep is +// configured to authenticate bbot agents. +// +// Nonce generator must guarantee a probabilistically insignificant chance +// of repeating a previously generated value. The common approach is to use +// counters or random number generators (alone or in combination), that +// produce values of the sufficient length. 64-bit non-repeating and +// 512-bit random numbers are considered to be more than sufficient for +// most practical purposes. +// +// We will produce the challenge as the sha256sum of the 512-bit random +// number and the 64-bit current timestamp combination. The latter is +// not really a non-repeating counter and can't be used alone. However +// adding it is a good and cheap uniqueness improvement. +// +// Note that since generating a challenge is not exactly cheap/fast, we will +// generate it in advance for every task request, out of the database +// transaction, and will cache it if it turns out that it wasn't used (no +// package configuration to (re-)build, etc). +// +static thread_local optional<string> challenge; + // Generate a random number in the specified range (max value is included). // static inline size_t @@ -132,7 +154,8 @@ init (scanner& s) // template <typename T> static inline query<T> -package_query (brep::params::build_task& params, +package_query (bool custom_bot, + brep::params::build_task& params, interactive_mode imode, uint64_t queued_expiration_ns) { @@ -141,9 +164,39 @@ package_query (brep::params::build_task& params, query q (!query::build_tenant::archived); + if (custom_bot) + { + // Note that we could potentially only query the packages which refer to + // this custom bot key in one of their build configurations. For that we + // would need to additionally join the current query tables with the bot + // fingerprint-containing build_package_bot_keys and + // build_package_config_bot_keys tables and use the SELECT DISTINCT + // clause. The problem is that we also use the ORDER BY clause and in this + // case PostgreSQL requires all the ORDER BY clause expressions to also be + // present in the SELECT DISTINCT clause and fails with the 'for SELECT + // DISTINCT, ORDER BY expressions must appear in select list' error if + // that's not the case. Also note that in the ODB-generated code the + // 'build_package.project::TEXT' expression in the SELECT DISTINCT clause + // (see the CITEXT type mapping for details in libbrep/common.hxx) would + // not match the 'build_package.name' expression in the ORDER BY clause + // and so we will end up with the mentioned error. One (hackish) way to + // fix that would be to add a dummy member of the string type for the + // build_package.name column. This all sounds quite hairy at the moment + // and it also feels that this can potentially pessimize querying the + // packages built with the default bots only. Thus let's keep it simple + // for now and filter packages by the bot fingerprint at the program + // level. + // + q = q && (query::build_package::custom_bot.is_null () || + query::build_package::custom_bot); + } + else + q = q && (query::build_package::custom_bot.is_null () || + !query::build_package::custom_bot); + // Filter by repositories canonical names (if requested). // - const vector<string>& rp (params.repository ()); + const strings& rp (params.repository ()); if (!rp.empty ()) q = q && @@ -213,25 +266,33 @@ handle (request& rq, response& rs) throw invalid_request (400, e.what ()); } - // Obtain the agent's public key fingerprint if requested. If the fingerprint - // is requested but is not present in the request or is unknown, then respond - // with 401 HTTP code (unauthorized). + // Obtain the agent's public key fingerprint if requested. If the + // fingerprint is requested but is not present in the request, then respond + // with 401 HTTP code (unauthorized). If a key with the specified + // fingerprint is not present in the build bot agent keys directory, then + // assume that this is a custom build bot. + // + // Note that if the agent authentication is not configured (the agent keys + // directory is not specified), then the bot can never be custom and its + // fingerprint is ignored, if present. // optional<string> agent_fp; + bool custom_bot (false); if (bot_agent_key_map_ != nullptr) { - if (!tqm.fingerprint || - bot_agent_key_map_->find (*tqm.fingerprint) == - bot_agent_key_map_->end ()) + if (!tqm.fingerprint) throw invalid_request (401, "unauthorized"); agent_fp = move (tqm.fingerprint); + + custom_bot = (bot_agent_key_map_->find (*agent_fp) == + bot_agent_key_map_->end ()); } // The resulting task manifest and the related build, package, and // configuration objects. Note that the latter 3 are only meaningful if the - // session in the task manifest is not empty. + // the task manifest is present. // task_response_manifest task_response; shared_ptr<build> task_build; @@ -578,63 +639,6 @@ handle (request& rq, response& rs) : optional<size_t> ()), options_->build_hard_rebuild_timeout ())); - // Return the challenge (nonce) if brep is configured to authenticate bbot - // agents. Return nullopt otherwise. - // - // Nonce generator must guarantee a probabilistically insignificant chance - // of repeating a previously generated value. The common approach is to use - // counters or random number generators (alone or in combination), that - // produce values of the sufficient length. 64-bit non-repeating and - // 512-bit random numbers are considered to be more than sufficient for - // most practical purposes. - // - // We will produce the challenge as the sha256sum of the 512-bit random - // number and the 64-bit current timestamp combination. The latter is - // not really a non-repeating counter and can't be used alone. However - // adding it is a good and cheap uniqueness improvement. - // - auto challenge = [&agent_fp, &now, &fail, &trace, this] () - { - optional<string> r; - - if (agent_fp) - { - try - { - auto print_args = [&trace, this] (const char* args[], size_t n) - { - l2 ([&]{trace << process_args {args, n};}); - }; - - openssl os (print_args, - nullfd, path ("-"), 2, - process_env (options_->openssl (), - options_->openssl_envvar ()), - "rand", - options_->openssl_option (), 64); - - vector<char> nonce (os.in.read_binary ()); - os.in.close (); - - if (!os.wait () || nonce.size () != 64) - fail << "unable to generate nonce"; - - uint64_t t (chrono::duration_cast<chrono::nanoseconds> ( - now.time_since_epoch ()).count ()); - - sha256 cs (nonce.data (), nonce.size ()); - cs.append (&t, sizeof (t)); - r = cs.string (); - } - catch (const system_error& e) - { - fail << "unable to generate nonce: " << e; - } - } - - return r; - }; - // Convert butl::standard_version type to brep::version. // brep::version toolchain_version (tqm.toolchain_version.string ()); @@ -659,7 +663,8 @@ handle (request& rq, response& rs) using pkg_query = query<buildable_package>; using prep_pkg_query = prepared_query<buildable_package>; - pkg_query pq (package_query<buildable_package> (params, + pkg_query pq (package_query<buildable_package> (custom_bot, + params, imode, queued_expiration_ns)); @@ -815,7 +820,8 @@ handle (request& rq, response& rs) { using query = query<buildable_package_count>; - query q (package_query<buildable_package_count> (params, + query q (package_query<buildable_package_count> (custom_bot, + params, imode, queued_expiration_ns)); @@ -1241,7 +1247,8 @@ handle (request& rq, response& rs) // small_vector<bpkg::test_dependency, 1> tests; - build_db_->load (*p, p->requirements_tests_section); + if (!p->requirements_tests_section.loaded ()) + build_db_->load (*p, p->requirements_tests_section); for (const build_test_dependency& td: p->tests) { @@ -1257,13 +1264,12 @@ handle (request& rq, response& rs) // Try to use the test package configuration named the same as the // current configuration of the main package. If there is no such - // a configuration or it excludes the current target - // configuration, then fallback to using the default configuration - // (which must exist). If in this case the default configuration - // excludes the current target configuration, then exclude this - // external test package from the build task. + // a configuration, then fallback to using the default + // configuration (which must exist). If the selected test package + // configuration excludes the current target configuration, then + // exclude this external test package from the build task. // - // Note that potentially the test package default configuration + // Note that potentially the selected test package configuration // may contain some (bpkg) arguments associated, but we currently // don't provide build bot worker with such information. This, // however, is probably too far fetched so let's keep it simple @@ -1271,6 +1277,13 @@ handle (request& rq, response& rs) // const build_package_config* tpc (find (pc.name, tp->configs)); + if (tpc == nullptr) + { + tpc = find ("default", tp->configs); + + assert (tpc != nullptr); // Must always be present. + } + // Use the `all` class as a least restrictive default underlying // build class set. Note that we should only apply the explicit // build restrictions to the external test packages (think about @@ -1279,32 +1292,15 @@ handle (request& rq, response& rs) // build_db_->load (*tp, tp->constraints_section); - if (tpc == nullptr || - exclude (*tpc, + if (exclude (*tpc, tp->builds, tp->constraints, tc, nullptr /* reason */, true /* default_all_ucs */)) - { - // If the current package configuration is "default", then we - // cannot fallback and just exclude the test package outright. - // - if (pc.name == "default") - continue; - - tpc = find ("default", tp->configs); - - assert (tpc != nullptr); // Must always be present. + continue; - if (exclude (*tpc, - tp->builds, - tp->constraints, - tc, - nullptr /* reason */, - true /* default_all_ucs */)) - continue; - } + build_db_->load (*tp, tp->auxiliaries_section); for (const build_auxiliary& ba: tpc->effective_auxiliaries (tp->auxiliaries)) @@ -1325,20 +1321,56 @@ handle (request& rq, response& rs) vector<auxiliary_machine> tms; vector<build_machine> bms; - tms.reserve (picked_machines.size ()); - bms.reserve (picked_machines.size ()); - - for (pair<auxiliary_config_machine, string>& pm: picked_machines) + if (size_t n = picked_machines.size ()) { - const machine_header_manifest& m (*pm.first.machine); - tms.push_back (auxiliary_machine {m.name, move (pm.second)}); - bms.push_back (build_machine {m.name, m.summary}); + tms.reserve (n); + bms.reserve (n); + + for (pair<auxiliary_config_machine, string>& pm: picked_machines) + { + const machine_header_manifest& m (*pm.first.machine); + tms.push_back (auxiliary_machine {m.name, move (pm.second)}); + bms.push_back (build_machine {m.name, m.summary}); + } } return collect_auxiliaries_result { move (tms), move (bms), move (tests)}; }; + if (agent_fp && !challenge) + try + { + auto print_args = [&trace, this] (const char* args[], size_t n) + { + l2 ([&]{trace << process_args {args, n};}); + }; + + openssl os (print_args, + nullfd, path ("-"), 2, + process_env (options_->openssl (), + options_->openssl_envvar ()), + "rand", + options_->openssl_option (), 64); + + vector<char> nonce (os.in.read_binary ()); + os.in.close (); + + if (!os.wait () || nonce.size () != 64) + fail << "unable to generate nonce"; + + uint64_t t (chrono::duration_cast<chrono::nanoseconds> ( + now.time_since_epoch ()).count ()); + + sha256 cs (nonce.data (), nonce.size ()); + cs.append (&t, sizeof (t)); + challenge = cs.string (); + } + catch (const system_error& e) + { + fail << "unable to generate nonce: " << e; + } + // While at it, collect the aborted for various reasons builds // (interactive builds in multiple configurations, builds with too many // auxiliary machines, etc) to send the notification emails at the end @@ -1357,9 +1389,9 @@ handle (request& rq, response& rs) // bool unforced (true); - for (bool done (false); task_response.session.empty () && !done; ) + for (bool done (false); !task_response.task && !done; ) { - transaction t (conn->begin ()); + transaction tr (conn->begin ()); // We need to be careful in the random package ordering mode not to // miss the end after having wrapped around. @@ -1394,7 +1426,7 @@ handle (request& rq, response& rs) // if (chunk_size == 0) { - t.commit (); + tr.commit (); if (start_offset != 0 && offset >= start_offset) offset = 0; @@ -1411,12 +1443,24 @@ handle (request& rq, response& rs) // to bail out in the random package ordering mode for some reason (no // more untried positions, need to restart, etc). // + // Note that it is not uncommon for the sequentially examined packages + // to belong to the same tenant (single tenant mode, etc). Thus, we + // will cache the loaded tenant objects. + // + shared_ptr<build_tenant> t; + for (auto& bp: packages) { shared_ptr<build_package>& p (bp.package); id = p->id; + // Reset the tenant cache if the current package belongs to a + // different tenant. + // + if (t != nullptr && t->id != id.tenant) + t = nullptr; + // If we are in the random package ordering mode, then cache the // tenant the start offset refers to, if not cached yet, and check // if we are still iterating over packages from this tenant @@ -1476,8 +1520,6 @@ handle (request& rq, response& rs) // if (bp.interactive) { - shared_ptr<build_tenant> t; - // Note that the tenant can be archived via some other package on // some previous iteration. Skip the package if that's the case. // @@ -1486,7 +1528,9 @@ handle (request& rq, response& rs) // if (!bp.archived) { - t = build_db_->load<build_tenant> (id.tenant); + if (t == nullptr) + t = build_db_->load<build_tenant> (id.tenant); + bp.archived = t->archived; } @@ -1596,8 +1640,48 @@ handle (request& rq, response& rs) } } + // If true, then the package is (being) built for some + // configurations. + // + // Note that since we only query the built and forced rebuild + // objects there can be false negatives. + // + bool package_built (false); + + build_db_->load (*p, p->bot_keys_section); + for (const build_package_config& pc: p->configs) { + // If this is a custom bot, then skip this configuration if it + // doesn't contain this bot's public key in its custom bot keys + // list. Otherwise (this is a default bot), skip this + // configuration if its custom bot keys list is not empty. + // + { + const build_package_bot_keys& bks ( + pc.effective_bot_keys (p->bot_keys)); + + if (custom_bot) + { + assert (agent_fp); // Wouldn't be here otherwise. + + if (find_if ( + bks.begin (), bks.end (), + [&agent_fp] (const lazy_shared_ptr<build_public_key>& k) + { + return k.object_id ().fingerprint == *agent_fp; + }) == bks.end ()) + { + continue; + } + } + else + { + if (!bks.empty ()) + continue; + } + } + pkg_config = pc.name; // Iterate through the built configurations and erase them from the @@ -1610,6 +1694,9 @@ handle (request& rq, response& rs) config_machines configs (conf_machines); // Make copy for this pkg. auto pkg_builds (bld_prep_query.execute ()); + if (!package_built && !pkg_builds.empty ()) + package_built = true; + for (auto i (pkg_builds.begin ()); i != pkg_builds.end (); ++i) { auto j ( @@ -1637,35 +1724,38 @@ handle (request& rq, response& rs) // the package configuration and for which all the requested // auxiliary machines can be provided. // - auto i (configs.begin ()); - auto e (configs.end ()); + const config_machine* cm (nullptr); + optional<collect_auxiliaries_result> aux; build_db_->load (*p, p->constraints_section); - optional<collect_auxiliaries_result> aux; - for (; i != e; ++i) + for (auto i (configs.begin ()), e (configs.end ()); i != e; ++i) { - const build_target_config& tc (*i->second.config); + cm = &i->second; + const build_target_config& tc (*cm->config); - if (!exclude (pc, p->builds, p->constraints, tc) && - (aux = collect_auxiliaries (p, pc, tc))) - break; + if (!exclude (pc, p->builds, p->constraints, tc)) + { + if (!p->auxiliaries_section.loaded ()) + build_db_->load (*p, p->auxiliaries_section); + + if ((aux = collect_auxiliaries (p, pc, tc))) + break; + } } - if (i != e) + if (aux) { - config_machine& cm (i->second); - machine_header_manifest& mh (*cm.machine); + machine_header_manifest& mh (*cm->machine); build_id bid (move (id), - cm.config->target, - cm.config->name, + cm->config->target, + cm->config->name, move (pkg_config), move (toolchain_name), toolchain_version); shared_ptr<build> b (build_db_->find<build> (bid)); - optional<string> cl (challenge ()); // Move the interactive build login information into the build // object, if the package to be built interactively. @@ -1690,12 +1780,14 @@ handle (request& rq, response& rs) move (toolchain_version), move (login), move (agent_fp), - move (cl), + move (challenge), build_machine { mh.name, move (mh.summary)}, move (aux->build_auxiliary_machines), - controller_checksum (*cm.config), - machine_checksum (*cm.machine)); + controller_checksum (*cm->config), + machine_checksum (*cm->machine)); + + challenge = nullopt; build_db_->persist (b); } @@ -1732,7 +1824,10 @@ handle (request& rq, response& rs) } b->agent_fingerprint = move (agent_fp); - b->agent_challenge = move (cl); + + b->agent_challenge = move (challenge); + challenge = nullopt; + b->machine = build_machine {mh.name, move (mh.summary)}; // Mark the section as loaded, so auxiliary_machines are @@ -1743,8 +1838,8 @@ handle (request& rq, response& rs) b->auxiliary_machines = move (aux->build_auxiliary_machines); - string ccs (controller_checksum (*cm.config)); - string mcs (machine_checksum (*cm.machine)); + string ccs (controller_checksum (*cm->config)); + string mcs (machine_checksum (*cm->machine)); // Issue the hard rebuild if it is forced or the // configuration or machine has changed. @@ -1763,8 +1858,8 @@ handle (request& rq, response& rs) build_db_->update (b); } - shared_ptr<build_tenant> t ( - build_db_->load<build_tenant> (b->tenant)); + if (t == nullptr) + t = build_db_->load<build_tenant> (b->tenant); // Archive an interactive tenant. // @@ -1811,7 +1906,7 @@ handle (request& rq, response& rs) } if (tsb != nullptr || tsq != nullptr) - tss = make_pair (move (*t->service), b); + tss = make_pair (*t->service, b); } } @@ -1821,31 +1916,57 @@ handle (request& rq, response& rs) move (aux->tests), move (aux->task_auxiliary_machines), move (bp.interactive), - cm); + *cm); task_build = move (b); task_package = move (p); task_config = &pc; + package_built = true; + break; // Bail out from the package configurations loop. } } } - // If the task response manifest is prepared, then bail out from the - // package loop, commit the transaction and respond. + // If the task manifest is prepared, then bail out from the package + // loop, commit the transaction and respond. Otherwise, stash the + // build toolchain into the tenant, unless it is already stashed or + // the current package already has some configurations (being) + // built. // - if (!task_response.session.empty ()) + if (!task_response.task) + { + // Note that since there can be false negatives for the + // package_built flag (see above), there can be redundant tenant + // queries which, however, seems harmless (query uses the primary + // key and the object memory footprint is small). + // + if (!package_built) + { + if (t == nullptr) + t = build_db_->load<build_tenant> (p->id.tenant); + + if (!t->toolchain) + { + t->toolchain = build_toolchain {toolchain_name, + toolchain_version}; + + build_db_->update (t); + } + } + } + else break; } - t.commit (); + tr.commit (); } // If we don't have an unbuilt package, then let's see if we have a // build configuration to rebuild. // - if (task_response.session.empty () && !rebuilds.empty ()) + if (!task_response.task && !rebuilds.empty ()) { // Sort the configuration rebuild list with the following sort // priority: @@ -1875,8 +1996,6 @@ handle (request& rq, response& rs) sort (rebuilds.begin (), rebuilds.end (), cmp); - optional<string> cl (challenge ()); - // Pick the first build configuration from the ordered list. // // Note that the configurations and packages may not match the @@ -1935,13 +2054,17 @@ handle (request& rq, response& rs) (t->interactive.has_value () == (imode == interactive_mode::true_)))) { + const build_target_config& tc (*cm.config); + build_db_->load (*p, p->constraints_section); - const build_target_config& tc (*cm.config); + if (exclude (*pc, p->builds, p->constraints, tc)) + continue; + + build_db_->load (*p, p->auxiliaries_section); - optional<collect_auxiliaries_result> aux; - if (!exclude (*pc, p->builds, p->constraints, tc) && - (aux = collect_auxiliaries (p, *pc, tc))) + if (optional<collect_auxiliaries_result> aux = + collect_auxiliaries (p, *pc, tc)) { assert (b->status); @@ -1963,10 +2086,10 @@ handle (request& rq, response& rs) unforced = (b->force == force_state::unforced); - // Can't move from, as may need them on the next iteration. - // - b->agent_fingerprint = agent_fp; - b->agent_challenge = cl; + b->agent_fingerprint = move (agent_fp); + + b->agent_challenge = move (challenge); + challenge = nullopt; const machine_header_manifest& mh (*cm.machine); b->machine = build_machine {mh.name, mh.summary}; @@ -2059,16 +2182,19 @@ handle (request& rq, response& rs) } catch (const odb::deadlock&) { - // Just try with the next rebuild. But first, reset the task - // response manifest that we may have prepared. + // Just try with the next rebuild. But first, restore the agent's + // fingerprint and challenge and reset the task manifest and the + // session that we may have prepared. // + agent_fp = move (b->agent_fingerprint); + challenge = move (b->agent_challenge); task_response = task_response_manifest (); } - // If the task response manifest is prepared, then bail out from the - // package configuration rebuilds loop and respond. + // If the task manifest is prepared, then bail out from the package + // configuration rebuilds loop and respond. // - if (!task_response.session.empty ()) + if (task_response.task) break; } } @@ -2082,7 +2208,7 @@ handle (request& rq, response& rs) { assert (tss); // Wouldn't be here otherwise. - const tenant_service& ss (tss->first); + tenant_service& ss (tss->first); // If the task build has no initial state (is just created), then // temporarily move it into the list of the queued builds until the @@ -2105,7 +2231,11 @@ handle (request& rq, response& rs) nullopt /* initial_state */, qhs, log_writer_)) - update_tenant_service_state (conn, qbs.back ().tenant, f); + { + if (optional<string> data = + update_tenant_service_state (conn, qbs.back ().tenant, f)) + ss.data = move (data); + } } // Send the `queued` notification for the task build, unless it is @@ -2125,7 +2255,11 @@ handle (request& rq, response& rs) initial_state, qhs, log_writer_)) - update_tenant_service_state (conn, qbs.back ().tenant, f); + { + if (optional<string> data = + update_tenant_service_state (conn, qbs.back ().tenant, f)) + ss.data = move (data); + } } if (restore_build) @@ -2141,16 +2275,20 @@ handle (request& rq, response& rs) { assert (tss); // Wouldn't be here otherwise. - const tenant_service& ss (tss->first); + tenant_service& ss (tss->first); const build& b (*tss->second); if (auto f = tsb->build_building (ss, b, log_writer_)) - update_tenant_service_state (conn, b.tenant, f); + { + if (optional<string> data = + update_tenant_service_state (conn, b.tenant, f)) + ss.data = move (data); + } } - // If the task response manifest is prepared, then check that the number - // of the build auxiliary machines is less than 10. If that's not the - // case, then turn the build into the built state with the abort status. + // If the task manifest is prepared, then check that the number of the + // build auxiliary machines is less than 10. If that's not the case, + // then turn the build into the built state with the abort status. // if (task_response.task && task_response.task->auxiliary_machines.size () > 9) @@ -2254,11 +2392,15 @@ handle (request& rq, response& rs) { assert (tss); // Wouldn't be here otherwise. - const tenant_service& ss (tss->first); + tenant_service& ss (tss->first); const build& b (*tss->second); if (auto f = tsb->build_built (ss, b, log_writer_)) - update_tenant_service_state (conn, b.tenant, f); + { + if (optional<string> data = + update_tenant_service_state (conn, b.tenant, f)) + ss.data = move (data); + } } } diff --git a/mod/mod-builds.cxx b/mod/mod-builds.cxx index b0de618..30562f3 100644 --- a/mod/mod-builds.cxx +++ b/mod/mod-builds.cxx @@ -393,8 +393,11 @@ handle (request& rq, response& rs) if (!tenant.empty ()) tn = tenant; - // Return the list of distinct toolchain name/version pairs. The build db - // transaction must be started. + // Return the list of distinct toolchain name/version pairs. If no builds + // are present for the tenant, then fallback to the toolchain recorded in + // the tenant object, if present. + // + // Note: the build db transaction must be started. // using toolchains = vector<pair<string, version>>; @@ -410,11 +413,19 @@ handle (request& rq, response& rs) false /* first */))) r.emplace_back (move (t.name), move (t.version)); + if (r.empty ()) + { + shared_ptr<build_tenant> t (build_db_->find<build_tenant> (tenant)); + + if (t != nullptr && t->toolchain) + r.emplace_back (t->toolchain->name, t->toolchain->version); + } + return r; }; auto print_form = [&s, ¶ms, this] (const toolchains& toolchains, - size_t build_count) + optional<size_t> build_count) { // Print the package builds filter form on the first page only. // @@ -525,7 +536,7 @@ handle (request& rq, response& rs) conf_ids.push_back (c.first); } - size_t count; + optional<size_t> count; size_t page (params.page ()); if (params.result () != "unbuilt") // Print package build configurations. @@ -656,7 +667,7 @@ handle (request& rq, response& rs) --print; } - ++count; + ++(*count); } } @@ -937,7 +948,7 @@ handle (request& rq, response& rs) count = npos - ncur; } else - count = 0; + count = nullopt; // Unknown count. } // Print the filter form. @@ -1148,7 +1159,11 @@ handle (request& rq, response& rs) add_filter ("pc", pkg_cfg); add_filter ("rs", params.result (), "*"); - s << DIV_PAGER (page, count, page_configs, options_->build_pages (), u) + s << DIV_PAGER (page, + count ? *count : 0, + page_configs, + options_->build_pages (), + u) << ~DIV << ~BODY << ~HTML; diff --git a/mod/mod-package-details.cxx b/mod/mod-package-details.cxx index 5cf0759..fcd50da 100644 --- a/mod/mod-package-details.cxx +++ b/mod/mod-package-details.cxx @@ -227,7 +227,7 @@ handle (request& rq, response& rs) << ~TABLE; } - auto pkg_count ( + size_t pkg_count ( package_db_->query_value<package_count> ( search_params<package_count> (squery, tenant, name))); diff --git a/mod/mod-package-version-details.cxx b/mod/mod-package-version-details.cxx index 51b21c6..91923e5 100644 --- a/mod/mod-package-version-details.cxx +++ b/mod/mod-package-version-details.cxx @@ -541,7 +541,7 @@ handle (request& rq, response& rs) // builds = false; - for (const build_package_config& pc: pkg->build_configs) + for (const package_build_config& pc: pkg->build_configs) { const build_class_exprs& exprs (pc.effective_builds (pkg->builds)); @@ -726,7 +726,7 @@ handle (request& rq, response& rs) s << H3 << "Builds" << ~H3 << DIV(ID="builds"); - auto exclude = [&pkg, this] (const build_package_config& pc, + auto exclude = [&pkg, this] (const package_build_config& pc, const build_target_config& tc, string* rs = nullptr) { @@ -767,7 +767,7 @@ handle (request& rq, response& rs) query sq (false); set<config_toolchain> unbuilt_configs; - for (const build_package_config& pc: pkg->build_configs) + for (const package_build_config& pc: pkg->build_configs) { for (const auto& bc: *target_conf_map_) { @@ -886,7 +886,7 @@ handle (request& rq, response& rs) // if (!tn->interactive) { - for (const build_package_config& pc: pkg->build_configs) + for (const package_build_config& pc: pkg->build_configs) { for (const auto& tc: *target_conf_) { diff --git a/mod/mod-packages.cxx b/mod/mod-packages.cxx index 914a841..6026024 100644 --- a/mod/mod-packages.cxx +++ b/mod/mod-packages.cxx @@ -156,7 +156,7 @@ handle (request& rq, response& rs) session sn; transaction t (package_db_->begin ()); - auto pkg_count ( + size_t pkg_count ( package_db_->query_value<latest_package_count> ( search_param<latest_package_count> (squery, tn))); diff --git a/mod/page.cxx b/mod/page.cxx index 5483183..bc2e42d 100644 --- a/mod/page.cxx +++ b/mod/page.cxx @@ -137,9 +137,17 @@ namespace brep void DIV_COUNTER:: operator() (serializer& s) const { - s << DIV(ID="count") - << count_ << " " - << (count_ % 10 == 1 && count_ % 100 != 11 ? singular_ : plural_) + s << DIV(ID="count"); + + if (count_) + s << *count_; + else + s << '?'; + + s << ' ' + << (count_ && *count_ % 10 == 1 && *count_ % 100 != 11 + ? singular_ + : plural_) << ~DIV; } diff --git a/mod/page.hxx b/mod/page.hxx index cac2b8b..7329e2d 100644 --- a/mod/page.hxx +++ b/mod/page.hxx @@ -82,21 +82,24 @@ namespace brep // Generate counter element. // - // It could be redunant to distinguish between singular and plural word forms - // if it wouldn't be so cheap in English, and phrase '1 Packages' wouldn't - // look that ugly. + // If the count argument is nullopt, then it is assumed that the count is + // unknown and the '?' character is printed instead of the number. + // + // Note that it could be redunant to distinguish between singular and plural + // word forms if it wouldn't be so cheap in English, and phrase '1 Packages' + // wouldn't look that ugly. // class DIV_COUNTER { public: - DIV_COUNTER (size_t c, const char* s, const char* p) + DIV_COUNTER (optional<size_t> c, const char* s, const char* p) : count_ (c), singular_ (s), plural_ (p) {} void operator() (xml::serializer&) const; private: - size_t count_; + optional<size_t> count_; const char* singular_; const char* plural_; }; |