aboutsummaryrefslogtreecommitdiff
path: root/mod/mod-build-task.cxx
diff options
context:
space:
mode:
Diffstat (limited to 'mod/mod-build-task.cxx')
-rw-r--r--mod/mod-build-task.cxx466
1 files changed, 304 insertions, 162 deletions
diff --git a/mod/mod-build-task.cxx b/mod/mod-build-task.cxx
index 4dd1e3d..07aff8d 100644
--- a/mod/mod-build-task.cxx
+++ b/mod/mod-build-task.cxx
@@ -44,6 +44,28 @@ using namespace odb::core;
static thread_local mt19937 rand_gen (random_device {} ());
+// The challenge (nonce) is randomly generated for every build task if brep is
+// configured to authenticate bbot agents.
+//
+// Nonce generator must guarantee a probabilistically insignificant chance
+// of repeating a previously generated value. The common approach is to use
+// counters or random number generators (alone or in combination), that
+// produce values of the sufficient length. 64-bit non-repeating and
+// 512-bit random numbers are considered to be more than sufficient for
+// most practical purposes.
+//
+// We will produce the challenge as the sha256sum of the 512-bit random
+// number and the 64-bit current timestamp combination. The latter is
+// not really a non-repeating counter and can't be used alone. However
+// adding it is a good and cheap uniqueness improvement.
+//
+// Note that since generating a challenge is not exactly cheap/fast, we will
+// generate it in advance for every task request, out of the database
+// transaction, and will cache it if it turns out that it wasn't used (no
+// package configuration to (re-)build, etc).
+//
+static thread_local optional<string> challenge;
+
// Generate a random number in the specified range (max value is included).
//
static inline size_t
@@ -132,7 +154,8 @@ init (scanner& s)
//
template <typename T>
static inline query<T>
-package_query (brep::params::build_task& params,
+package_query (bool custom_bot,
+ brep::params::build_task& params,
interactive_mode imode,
uint64_t queued_expiration_ns)
{
@@ -141,9 +164,39 @@ package_query (brep::params::build_task& params,
query q (!query::build_tenant::archived);
+ if (custom_bot)
+ {
+ // Note that we could potentially only query the packages which refer to
+ // this custom bot key in one of their build configurations. For that we
+ // would need to additionally join the current query tables with the bot
+ // fingerprint-containing build_package_bot_keys and
+ // build_package_config_bot_keys tables and use the SELECT DISTINCT
+ // clause. The problem is that we also use the ORDER BY clause and in this
+ // case PostgreSQL requires all the ORDER BY clause expressions to also be
+ // present in the SELECT DISTINCT clause and fails with the 'for SELECT
+ // DISTINCT, ORDER BY expressions must appear in select list' error if
+ // that's not the case. Also note that in the ODB-generated code the
+ // 'build_package.project::TEXT' expression in the SELECT DISTINCT clause
+ // (see the CITEXT type mapping for details in libbrep/common.hxx) would
+ // not match the 'build_package.name' expression in the ORDER BY clause
+ // and so we will end up with the mentioned error. One (hackish) way to
+ // fix that would be to add a dummy member of the string type for the
+ // build_package.name column. This all sounds quite hairy at the moment
+ // and it also feels that this can potentially pessimize querying the
+ // packages built with the default bots only. Thus let's keep it simple
+ // for now and filter packages by the bot fingerprint at the program
+ // level.
+ //
+ q = q && (query::build_package::custom_bot.is_null () ||
+ query::build_package::custom_bot);
+ }
+ else
+ q = q && (query::build_package::custom_bot.is_null () ||
+ !query::build_package::custom_bot);
+
// Filter by repositories canonical names (if requested).
//
- const vector<string>& rp (params.repository ());
+ const strings& rp (params.repository ());
if (!rp.empty ())
q = q &&
@@ -213,25 +266,33 @@ handle (request& rq, response& rs)
throw invalid_request (400, e.what ());
}
- // Obtain the agent's public key fingerprint if requested. If the fingerprint
- // is requested but is not present in the request or is unknown, then respond
- // with 401 HTTP code (unauthorized).
+ // Obtain the agent's public key fingerprint if requested. If the
+ // fingerprint is requested but is not present in the request, then respond
+ // with 401 HTTP code (unauthorized). If a key with the specified
+ // fingerprint is not present in the build bot agent keys directory, then
+ // assume that this is a custom build bot.
+ //
+ // Note that if the agent authentication is not configured (the agent keys
+ // directory is not specified), then the bot can never be custom and its
+ // fingerprint is ignored, if present.
//
optional<string> agent_fp;
+ bool custom_bot (false);
if (bot_agent_key_map_ != nullptr)
{
- if (!tqm.fingerprint ||
- bot_agent_key_map_->find (*tqm.fingerprint) ==
- bot_agent_key_map_->end ())
+ if (!tqm.fingerprint)
throw invalid_request (401, "unauthorized");
agent_fp = move (tqm.fingerprint);
+
+ custom_bot = (bot_agent_key_map_->find (*agent_fp) ==
+ bot_agent_key_map_->end ());
}
// The resulting task manifest and the related build, package, and
// configuration objects. Note that the latter 3 are only meaningful if the
- // session in the task manifest is not empty.
+ // the task manifest is present.
//
task_response_manifest task_response;
shared_ptr<build> task_build;
@@ -578,63 +639,6 @@ handle (request& rq, response& rs)
: optional<size_t> ()),
options_->build_hard_rebuild_timeout ()));
- // Return the challenge (nonce) if brep is configured to authenticate bbot
- // agents. Return nullopt otherwise.
- //
- // Nonce generator must guarantee a probabilistically insignificant chance
- // of repeating a previously generated value. The common approach is to use
- // counters or random number generators (alone or in combination), that
- // produce values of the sufficient length. 64-bit non-repeating and
- // 512-bit random numbers are considered to be more than sufficient for
- // most practical purposes.
- //
- // We will produce the challenge as the sha256sum of the 512-bit random
- // number and the 64-bit current timestamp combination. The latter is
- // not really a non-repeating counter and can't be used alone. However
- // adding it is a good and cheap uniqueness improvement.
- //
- auto challenge = [&agent_fp, &now, &fail, &trace, this] ()
- {
- optional<string> r;
-
- if (agent_fp)
- {
- try
- {
- auto print_args = [&trace, this] (const char* args[], size_t n)
- {
- l2 ([&]{trace << process_args {args, n};});
- };
-
- openssl os (print_args,
- nullfd, path ("-"), 2,
- process_env (options_->openssl (),
- options_->openssl_envvar ()),
- "rand",
- options_->openssl_option (), 64);
-
- vector<char> nonce (os.in.read_binary ());
- os.in.close ();
-
- if (!os.wait () || nonce.size () != 64)
- fail << "unable to generate nonce";
-
- uint64_t t (chrono::duration_cast<chrono::nanoseconds> (
- now.time_since_epoch ()).count ());
-
- sha256 cs (nonce.data (), nonce.size ());
- cs.append (&t, sizeof (t));
- r = cs.string ();
- }
- catch (const system_error& e)
- {
- fail << "unable to generate nonce: " << e;
- }
- }
-
- return r;
- };
-
// Convert butl::standard_version type to brep::version.
//
brep::version toolchain_version (tqm.toolchain_version.string ());
@@ -659,7 +663,8 @@ handle (request& rq, response& rs)
using pkg_query = query<buildable_package>;
using prep_pkg_query = prepared_query<buildable_package>;
- pkg_query pq (package_query<buildable_package> (params,
+ pkg_query pq (package_query<buildable_package> (custom_bot,
+ params,
imode,
queued_expiration_ns));
@@ -815,7 +820,8 @@ handle (request& rq, response& rs)
{
using query = query<buildable_package_count>;
- query q (package_query<buildable_package_count> (params,
+ query q (package_query<buildable_package_count> (custom_bot,
+ params,
imode,
queued_expiration_ns));
@@ -1241,7 +1247,8 @@ handle (request& rq, response& rs)
//
small_vector<bpkg::test_dependency, 1> tests;
- build_db_->load (*p, p->requirements_tests_section);
+ if (!p->requirements_tests_section.loaded ())
+ build_db_->load (*p, p->requirements_tests_section);
for (const build_test_dependency& td: p->tests)
{
@@ -1257,13 +1264,12 @@ handle (request& rq, response& rs)
// Try to use the test package configuration named the same as the
// current configuration of the main package. If there is no such
- // a configuration or it excludes the current target
- // configuration, then fallback to using the default configuration
- // (which must exist). If in this case the default configuration
- // excludes the current target configuration, then exclude this
- // external test package from the build task.
+ // a configuration, then fallback to using the default
+ // configuration (which must exist). If the selected test package
+ // configuration excludes the current target configuration, then
+ // exclude this external test package from the build task.
//
- // Note that potentially the test package default configuration
+ // Note that potentially the selected test package configuration
// may contain some (bpkg) arguments associated, but we currently
// don't provide build bot worker with such information. This,
// however, is probably too far fetched so let's keep it simple
@@ -1271,6 +1277,13 @@ handle (request& rq, response& rs)
//
const build_package_config* tpc (find (pc.name, tp->configs));
+ if (tpc == nullptr)
+ {
+ tpc = find ("default", tp->configs);
+
+ assert (tpc != nullptr); // Must always be present.
+ }
+
// Use the `all` class as a least restrictive default underlying
// build class set. Note that we should only apply the explicit
// build restrictions to the external test packages (think about
@@ -1279,32 +1292,15 @@ handle (request& rq, response& rs)
//
build_db_->load (*tp, tp->constraints_section);
- if (tpc == nullptr ||
- exclude (*tpc,
+ if (exclude (*tpc,
tp->builds,
tp->constraints,
tc,
nullptr /* reason */,
true /* default_all_ucs */))
- {
- // If the current package configuration is "default", then we
- // cannot fallback and just exclude the test package outright.
- //
- if (pc.name == "default")
- continue;
-
- tpc = find ("default", tp->configs);
-
- assert (tpc != nullptr); // Must always be present.
+ continue;
- if (exclude (*tpc,
- tp->builds,
- tp->constraints,
- tc,
- nullptr /* reason */,
- true /* default_all_ucs */))
- continue;
- }
+ build_db_->load (*tp, tp->auxiliaries_section);
for (const build_auxiliary& ba:
tpc->effective_auxiliaries (tp->auxiliaries))
@@ -1325,20 +1321,56 @@ handle (request& rq, response& rs)
vector<auxiliary_machine> tms;
vector<build_machine> bms;
- tms.reserve (picked_machines.size ());
- bms.reserve (picked_machines.size ());
-
- for (pair<auxiliary_config_machine, string>& pm: picked_machines)
+ if (size_t n = picked_machines.size ())
{
- const machine_header_manifest& m (*pm.first.machine);
- tms.push_back (auxiliary_machine {m.name, move (pm.second)});
- bms.push_back (build_machine {m.name, m.summary});
+ tms.reserve (n);
+ bms.reserve (n);
+
+ for (pair<auxiliary_config_machine, string>& pm: picked_machines)
+ {
+ const machine_header_manifest& m (*pm.first.machine);
+ tms.push_back (auxiliary_machine {m.name, move (pm.second)});
+ bms.push_back (build_machine {m.name, m.summary});
+ }
}
return collect_auxiliaries_result {
move (tms), move (bms), move (tests)};
};
+ if (agent_fp && !challenge)
+ try
+ {
+ auto print_args = [&trace, this] (const char* args[], size_t n)
+ {
+ l2 ([&]{trace << process_args {args, n};});
+ };
+
+ openssl os (print_args,
+ nullfd, path ("-"), 2,
+ process_env (options_->openssl (),
+ options_->openssl_envvar ()),
+ "rand",
+ options_->openssl_option (), 64);
+
+ vector<char> nonce (os.in.read_binary ());
+ os.in.close ();
+
+ if (!os.wait () || nonce.size () != 64)
+ fail << "unable to generate nonce";
+
+ uint64_t t (chrono::duration_cast<chrono::nanoseconds> (
+ now.time_since_epoch ()).count ());
+
+ sha256 cs (nonce.data (), nonce.size ());
+ cs.append (&t, sizeof (t));
+ challenge = cs.string ();
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to generate nonce: " << e;
+ }
+
// While at it, collect the aborted for various reasons builds
// (interactive builds in multiple configurations, builds with too many
// auxiliary machines, etc) to send the notification emails at the end
@@ -1357,9 +1389,9 @@ handle (request& rq, response& rs)
//
bool unforced (true);
- for (bool done (false); task_response.session.empty () && !done; )
+ for (bool done (false); !task_response.task && !done; )
{
- transaction t (conn->begin ());
+ transaction tr (conn->begin ());
// We need to be careful in the random package ordering mode not to
// miss the end after having wrapped around.
@@ -1394,7 +1426,7 @@ handle (request& rq, response& rs)
//
if (chunk_size == 0)
{
- t.commit ();
+ tr.commit ();
if (start_offset != 0 && offset >= start_offset)
offset = 0;
@@ -1411,12 +1443,24 @@ handle (request& rq, response& rs)
// to bail out in the random package ordering mode for some reason (no
// more untried positions, need to restart, etc).
//
+ // Note that it is not uncommon for the sequentially examined packages
+ // to belong to the same tenant (single tenant mode, etc). Thus, we
+ // will cache the loaded tenant objects.
+ //
+ shared_ptr<build_tenant> t;
+
for (auto& bp: packages)
{
shared_ptr<build_package>& p (bp.package);
id = p->id;
+ // Reset the tenant cache if the current package belongs to a
+ // different tenant.
+ //
+ if (t != nullptr && t->id != id.tenant)
+ t = nullptr;
+
// If we are in the random package ordering mode, then cache the
// tenant the start offset refers to, if not cached yet, and check
// if we are still iterating over packages from this tenant
@@ -1476,8 +1520,6 @@ handle (request& rq, response& rs)
//
if (bp.interactive)
{
- shared_ptr<build_tenant> t;
-
// Note that the tenant can be archived via some other package on
// some previous iteration. Skip the package if that's the case.
//
@@ -1486,7 +1528,9 @@ handle (request& rq, response& rs)
//
if (!bp.archived)
{
- t = build_db_->load<build_tenant> (id.tenant);
+ if (t == nullptr)
+ t = build_db_->load<build_tenant> (id.tenant);
+
bp.archived = t->archived;
}
@@ -1596,8 +1640,48 @@ handle (request& rq, response& rs)
}
}
+ // If true, then the package is (being) built for some
+ // configurations.
+ //
+ // Note that since we only query the built and forced rebuild
+ // objects there can be false negatives.
+ //
+ bool package_built (false);
+
+ build_db_->load (*p, p->bot_keys_section);
+
for (const build_package_config& pc: p->configs)
{
+ // If this is a custom bot, then skip this configuration if it
+ // doesn't contain this bot's public key in its custom bot keys
+ // list. Otherwise (this is a default bot), skip this
+ // configuration if its custom bot keys list is not empty.
+ //
+ {
+ const build_package_bot_keys& bks (
+ pc.effective_bot_keys (p->bot_keys));
+
+ if (custom_bot)
+ {
+ assert (agent_fp); // Wouldn't be here otherwise.
+
+ if (find_if (
+ bks.begin (), bks.end (),
+ [&agent_fp] (const lazy_shared_ptr<build_public_key>& k)
+ {
+ return k.object_id ().fingerprint == *agent_fp;
+ }) == bks.end ())
+ {
+ continue;
+ }
+ }
+ else
+ {
+ if (!bks.empty ())
+ continue;
+ }
+ }
+
pkg_config = pc.name;
// Iterate through the built configurations and erase them from the
@@ -1610,6 +1694,9 @@ handle (request& rq, response& rs)
config_machines configs (conf_machines); // Make copy for this pkg.
auto pkg_builds (bld_prep_query.execute ());
+ if (!package_built && !pkg_builds.empty ())
+ package_built = true;
+
for (auto i (pkg_builds.begin ()); i != pkg_builds.end (); ++i)
{
auto j (
@@ -1637,35 +1724,38 @@ handle (request& rq, response& rs)
// the package configuration and for which all the requested
// auxiliary machines can be provided.
//
- auto i (configs.begin ());
- auto e (configs.end ());
+ const config_machine* cm (nullptr);
+ optional<collect_auxiliaries_result> aux;
build_db_->load (*p, p->constraints_section);
- optional<collect_auxiliaries_result> aux;
- for (; i != e; ++i)
+ for (auto i (configs.begin ()), e (configs.end ()); i != e; ++i)
{
- const build_target_config& tc (*i->second.config);
+ cm = &i->second;
+ const build_target_config& tc (*cm->config);
- if (!exclude (pc, p->builds, p->constraints, tc) &&
- (aux = collect_auxiliaries (p, pc, tc)))
- break;
+ if (!exclude (pc, p->builds, p->constraints, tc))
+ {
+ if (!p->auxiliaries_section.loaded ())
+ build_db_->load (*p, p->auxiliaries_section);
+
+ if ((aux = collect_auxiliaries (p, pc, tc)))
+ break;
+ }
}
- if (i != e)
+ if (aux)
{
- config_machine& cm (i->second);
- machine_header_manifest& mh (*cm.machine);
+ machine_header_manifest& mh (*cm->machine);
build_id bid (move (id),
- cm.config->target,
- cm.config->name,
+ cm->config->target,
+ cm->config->name,
move (pkg_config),
move (toolchain_name),
toolchain_version);
shared_ptr<build> b (build_db_->find<build> (bid));
- optional<string> cl (challenge ());
// Move the interactive build login information into the build
// object, if the package to be built interactively.
@@ -1690,12 +1780,14 @@ handle (request& rq, response& rs)
move (toolchain_version),
move (login),
move (agent_fp),
- move (cl),
+ move (challenge),
build_machine {
mh.name, move (mh.summary)},
move (aux->build_auxiliary_machines),
- controller_checksum (*cm.config),
- machine_checksum (*cm.machine));
+ controller_checksum (*cm->config),
+ machine_checksum (*cm->machine));
+
+ challenge = nullopt;
build_db_->persist (b);
}
@@ -1732,7 +1824,10 @@ handle (request& rq, response& rs)
}
b->agent_fingerprint = move (agent_fp);
- b->agent_challenge = move (cl);
+
+ b->agent_challenge = move (challenge);
+ challenge = nullopt;
+
b->machine = build_machine {mh.name, move (mh.summary)};
// Mark the section as loaded, so auxiliary_machines are
@@ -1743,8 +1838,8 @@ handle (request& rq, response& rs)
b->auxiliary_machines =
move (aux->build_auxiliary_machines);
- string ccs (controller_checksum (*cm.config));
- string mcs (machine_checksum (*cm.machine));
+ string ccs (controller_checksum (*cm->config));
+ string mcs (machine_checksum (*cm->machine));
// Issue the hard rebuild if it is forced or the
// configuration or machine has changed.
@@ -1763,8 +1858,8 @@ handle (request& rq, response& rs)
build_db_->update (b);
}
- shared_ptr<build_tenant> t (
- build_db_->load<build_tenant> (b->tenant));
+ if (t == nullptr)
+ t = build_db_->load<build_tenant> (b->tenant);
// Archive an interactive tenant.
//
@@ -1811,7 +1906,7 @@ handle (request& rq, response& rs)
}
if (tsb != nullptr || tsq != nullptr)
- tss = make_pair (move (*t->service), b);
+ tss = make_pair (*t->service, b);
}
}
@@ -1821,31 +1916,57 @@ handle (request& rq, response& rs)
move (aux->tests),
move (aux->task_auxiliary_machines),
move (bp.interactive),
- cm);
+ *cm);
task_build = move (b);
task_package = move (p);
task_config = &pc;
+ package_built = true;
+
break; // Bail out from the package configurations loop.
}
}
}
- // If the task response manifest is prepared, then bail out from the
- // package loop, commit the transaction and respond.
+ // If the task manifest is prepared, then bail out from the package
+ // loop, commit the transaction and respond. Otherwise, stash the
+ // build toolchain into the tenant, unless it is already stashed or
+ // the current package already has some configurations (being)
+ // built.
//
- if (!task_response.session.empty ())
+ if (!task_response.task)
+ {
+ // Note that since there can be false negatives for the
+ // package_built flag (see above), there can be redundant tenant
+ // queries which, however, seems harmless (query uses the primary
+ // key and the object memory footprint is small).
+ //
+ if (!package_built)
+ {
+ if (t == nullptr)
+ t = build_db_->load<build_tenant> (p->id.tenant);
+
+ if (!t->toolchain)
+ {
+ t->toolchain = build_toolchain {toolchain_name,
+ toolchain_version};
+
+ build_db_->update (t);
+ }
+ }
+ }
+ else
break;
}
- t.commit ();
+ tr.commit ();
}
// If we don't have an unbuilt package, then let's see if we have a
// build configuration to rebuild.
//
- if (task_response.session.empty () && !rebuilds.empty ())
+ if (!task_response.task && !rebuilds.empty ())
{
// Sort the configuration rebuild list with the following sort
// priority:
@@ -1875,8 +1996,6 @@ handle (request& rq, response& rs)
sort (rebuilds.begin (), rebuilds.end (), cmp);
- optional<string> cl (challenge ());
-
// Pick the first build configuration from the ordered list.
//
// Note that the configurations and packages may not match the
@@ -1935,13 +2054,17 @@ handle (request& rq, response& rs)
(t->interactive.has_value () ==
(imode == interactive_mode::true_))))
{
+ const build_target_config& tc (*cm.config);
+
build_db_->load (*p, p->constraints_section);
- const build_target_config& tc (*cm.config);
+ if (exclude (*pc, p->builds, p->constraints, tc))
+ continue;
+
+ build_db_->load (*p, p->auxiliaries_section);
- optional<collect_auxiliaries_result> aux;
- if (!exclude (*pc, p->builds, p->constraints, tc) &&
- (aux = collect_auxiliaries (p, *pc, tc)))
+ if (optional<collect_auxiliaries_result> aux =
+ collect_auxiliaries (p, *pc, tc))
{
assert (b->status);
@@ -1963,10 +2086,10 @@ handle (request& rq, response& rs)
unforced = (b->force == force_state::unforced);
- // Can't move from, as may need them on the next iteration.
- //
- b->agent_fingerprint = agent_fp;
- b->agent_challenge = cl;
+ b->agent_fingerprint = move (agent_fp);
+
+ b->agent_challenge = move (challenge);
+ challenge = nullopt;
const machine_header_manifest& mh (*cm.machine);
b->machine = build_machine {mh.name, mh.summary};
@@ -2059,16 +2182,19 @@ handle (request& rq, response& rs)
}
catch (const odb::deadlock&)
{
- // Just try with the next rebuild. But first, reset the task
- // response manifest that we may have prepared.
+ // Just try with the next rebuild. But first, restore the agent's
+ // fingerprint and challenge and reset the task manifest and the
+ // session that we may have prepared.
//
+ agent_fp = move (b->agent_fingerprint);
+ challenge = move (b->agent_challenge);
task_response = task_response_manifest ();
}
- // If the task response manifest is prepared, then bail out from the
- // package configuration rebuilds loop and respond.
+ // If the task manifest is prepared, then bail out from the package
+ // configuration rebuilds loop and respond.
//
- if (!task_response.session.empty ())
+ if (task_response.task)
break;
}
}
@@ -2082,7 +2208,7 @@ handle (request& rq, response& rs)
{
assert (tss); // Wouldn't be here otherwise.
- const tenant_service& ss (tss->first);
+ tenant_service& ss (tss->first);
// If the task build has no initial state (is just created), then
// temporarily move it into the list of the queued builds until the
@@ -2105,7 +2231,11 @@ handle (request& rq, response& rs)
nullopt /* initial_state */,
qhs,
log_writer_))
- update_tenant_service_state (conn, qbs.back ().tenant, f);
+ {
+ if (optional<string> data =
+ update_tenant_service_state (conn, qbs.back ().tenant, f))
+ ss.data = move (data);
+ }
}
// Send the `queued` notification for the task build, unless it is
@@ -2125,7 +2255,11 @@ handle (request& rq, response& rs)
initial_state,
qhs,
log_writer_))
- update_tenant_service_state (conn, qbs.back ().tenant, f);
+ {
+ if (optional<string> data =
+ update_tenant_service_state (conn, qbs.back ().tenant, f))
+ ss.data = move (data);
+ }
}
if (restore_build)
@@ -2141,16 +2275,20 @@ handle (request& rq, response& rs)
{
assert (tss); // Wouldn't be here otherwise.
- const tenant_service& ss (tss->first);
+ tenant_service& ss (tss->first);
const build& b (*tss->second);
if (auto f = tsb->build_building (ss, b, log_writer_))
- update_tenant_service_state (conn, b.tenant, f);
+ {
+ if (optional<string> data =
+ update_tenant_service_state (conn, b.tenant, f))
+ ss.data = move (data);
+ }
}
- // If the task response manifest is prepared, then check that the number
- // of the build auxiliary machines is less than 10. If that's not the
- // case, then turn the build into the built state with the abort status.
+ // If the task manifest is prepared, then check that the number of the
+ // build auxiliary machines is less than 10. If that's not the case,
+ // then turn the build into the built state with the abort status.
//
if (task_response.task &&
task_response.task->auxiliary_machines.size () > 9)
@@ -2254,11 +2392,15 @@ handle (request& rq, response& rs)
{
assert (tss); // Wouldn't be here otherwise.
- const tenant_service& ss (tss->first);
+ tenant_service& ss (tss->first);
const build& b (*tss->second);
if (auto f = tsb->build_built (ss, b, log_writer_))
- update_tenant_service_state (conn, b.tenant, f);
+ {
+ if (optional<string> data =
+ update_tenant_service_state (conn, b.tenant, f))
+ ss.data = move (data);
+ }
}
}