aboutsummaryrefslogtreecommitdiff
path: root/mod/mod-ci-github.cxx
diff options
context:
space:
mode:
Diffstat (limited to 'mod/mod-ci-github.cxx')
-rw-r--r--mod/mod-ci-github.cxx981
1 files changed, 921 insertions, 60 deletions
diff --git a/mod/mod-ci-github.cxx b/mod/mod-ci-github.cxx
index a99e516..d829ff6 100644
--- a/mod/mod-ci-github.cxx
+++ b/mod/mod-ci-github.cxx
@@ -32,9 +32,7 @@
// Will need to extract a few more fields from check_runs, but the layout
// is very similar to that of check_suite.
//
-// - Pull requests. Handle
-//
-// - Choose strong webhook secret
+// - Choose strong webhook secret (when deploying).
//
// - Check that delivery UUID has not been received before (replay attack).
//
@@ -71,7 +69,7 @@ namespace brep
ci_github::
ci_github (const ci_github& r, tenant_service_map& tsm)
- : handler (r),
+ : database_module (r),
ci_start (r),
options_ (r.initialized_ ? r.options_ : nullptr),
tenant_service_map_ (tsm)
@@ -95,9 +93,12 @@ namespace brep
// Prepare for the CI requests handling, if configured.
//
- if (options_->ci_github_app_webhook_secret_specified ())
+ if (options_->build_config_specified () &&
+ options_->ci_github_app_webhook_secret_specified ())
{
ci_start::init (make_shared<options::ci_start> (*options_));
+
+ database_module::init (*options_, options_->build_db_retry ());
}
}
@@ -108,13 +109,14 @@ namespace brep
HANDLER_DIAG;
- if (!options_->ci_github_app_webhook_secret_specified ())
- throw invalid_request (404, "GitHub CI request submission disabled");
+ if (build_db_ == nullptr)
+ throw invalid_request (501, "GitHub CI submission not implemented");
// Process headers.
//
string event; // Webhook event.
string hmac; // Received HMAC.
+ try
{
bool content_type (false);
@@ -181,6 +183,11 @@ namespace brep
if (hmac.empty ())
throw invalid_request (400, "missing x-hub-signature-256 header");
}
+ catch (const invalid_request& e)
+ {
+ error << "request header error: " << e.content;
+ throw;
+ }
// Read the entire request body into a buffer because we need to compute
// an HMAC over it and then parse it as JSON. The alternative of reading
@@ -274,8 +281,6 @@ namespace brep
// @@ There is also check_run even (re-requested by user, either
// individual check run or all the failed check runs).
//
- // @@ There is also the pull_request event. Probably need to handle.
- //
if (event == "check_suite")
{
gh_check_suite_event cs;
@@ -301,10 +306,8 @@ namespace brep
}
else if (cs.action == "rerequested")
{
- // Someone manually requested to re-run the check runs in this check
- // suite. Treat as a new request.
- //
- // @@ This is probably broken.
+ // Someone manually requested to re-run all the check runs in this
+ // check suite. Treat as a new request.
//
return handle_check_suite_request (move (cs), warning_success);
}
@@ -331,9 +334,46 @@ namespace brep
}
else if (event == "pull_request")
{
- // @@ TODO
+ gh_pull_request_event pr;
+ try
+ {
+ json::parser p (body.data (), body.size (), "pull_request event");
+
+ pr = gh_pull_request_event (p);
+ }
+ catch (const json::invalid_json_input& e)
+ {
+ string m ("malformed JSON in " + e.name + " request body");
+
+ error << m << ", line: " << e.line << ", column: " << e.column
+ << ", byte offset: " << e.position << ", error: " << e;
+
+ throw invalid_request (400, move (m));
+ }
- throw invalid_request (501, "pull request events not implemented yet");
+ if (pr.action == "opened" || pr.action == "synchronize")
+ {
+ // opened
+ // A pull request was opened.
+ //
+ // synchronize
+ // A pull request's head branch was updated from the base branch or
+ // new commits were pushed to the head branch. (Note that there is
+ // no equivalent event for the base branch. That case gets handled
+ // in handle_check_suite_request() instead.)
+ //
+ // Note that both cases are handled the same: we start a new CI
+ // request which will be reported on the new commit id.
+ //
+ return handle_pull_request (move (pr), warning_success);
+ }
+ else
+ {
+ // Ignore the remaining actions by sending a 200 response with empty
+ // body.
+ //
+ return true;
+ }
}
else
{
@@ -345,6 +385,51 @@ namespace brep
}
}
+ // Let's capitalize the synthetic check run names to make them easier to
+ // distinguish from the regular ones.
+ //
+ static string merge_check_run_name ("MERGE-COMMIT");
+ static string conclusion_check_run_name ("CONCLUSION");
+
+ // Return the colored circle corresponding to a result_status.
+ //
+ static string
+ circle (result_status rs)
+ {
+ switch (rs)
+ {
+ case result_status::success: return "\U0001F7E2"; // Green circle.
+ case result_status::warning: return "\U0001F7E0"; // Orange circle.
+ case result_status::error:
+ case result_status::abort:
+ case result_status::abnormal: return "\U0001F534"; // Red circle.
+
+ // Valid values we should never encounter.
+ //
+ case result_status::skip:
+ case result_status::interrupt:
+ throw invalid_argument ("unexpected result_status value: " +
+ to_string (rs));
+ }
+
+ return ""; // Should never reach.
+ }
+
+ // Make a check run summary from a CI start_result.
+ //
+ static string
+ to_check_run_summary (const optional<ci_start::start_result>& r)
+ {
+ string s;
+
+ s = "```\n";
+ if (r) s += r->message;
+ else s += "Internal service error";
+ s += "\n```";
+
+ return s;
+ }
+
bool ci_github::
handle_check_suite_request (gh_check_suite_event cs, bool warning_success)
{
@@ -366,20 +451,128 @@ namespace brep
l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
- // Submit the CI request.
+ service_data sd (warning_success,
+ iat->token,
+ iat->expires_at,
+ cs.installation.id,
+ move (cs.repository.node_id),
+ move (cs.check_suite.head_sha));
+
+ // Create the conclusion check run.
+ //
+ {
+ check_run cr;
+ cr.name = conclusion_check_run_name;
+
+ if (gq_create_check_run (error,
+ cr,
+ iat->token,
+ sd.repository_node_id,
+ sd.report_sha,
+ nullopt /* details_url */,
+ build_state::building))
+ {
+ l3 ([&]{trace << "created check_run { " << cr << " }";});
+
+ sd.conclusion_node_id = move (cr.node_id);
+ }
+ else
+ {
+ // We could try to carry on in this case by either updating or
+ // creating this conclusion check run later. But let's not complicate
+ // things for now.
+ //
+ fail << "check suite " << cs.check_suite.node_id
+ << ": unable to create conclusion check run";
+ }
+ }
+
+ // The merge commits of any open pull requests with this branch as base
+ // branch will now be out of date, and thus so will be their CI builds and
+ // associated check runs (and, no, GitHub does not invalidate those CI
+ // results automatically; see below).
+ //
+ // Unfortunately GitHub does not provide a webhook for PR base branch
+ // updates (as it does for PR head branch updates) so we have to handle it
+ // here. We do so by fetching the open pull requests with this branch as
+ // base branch and then recreating the CI requests (cancel existing,
+ // create new) for each pull request.
+ //
+ // If we fail to recreate any of the PR CI requests, they and their check
+ // runs will be left reflecting outdated merge commits. If the new merge
+ // commit failed to be generated (merge conflicts) the PR will not be
+ // mergeable which is not entirely catastrophic. But on the other hand, if
+ // all of the existing CI request's check runs have already succeeded and
+ // the new merge commit succeeds (no conflicts) with logic errors then a
+ // user would be able to merge a broken PR.
+ //
+ // Regardless of the nature of the error, we have to let the check suite
+ // handling code proceed so we only issue diagnostics. Note also that we
+ // want to run this code as early as possible to minimize the window of
+ // the user seeing misleading CI results.
+ //
+ if (cs.action == "requested")
+ {
+ // Fetch open pull requests with the check suite's head branch as base
+ // branch.
+ //
+ optional<vector<gh_pull_request>> prs (
+ gq_fetch_open_pull_requests (error,
+ iat->token,
+ sd.repository_node_id,
+ cs.check_suite.head_branch));
+
+ if (prs)
+ {
+ // Recreate each PR's CI request.
+ //
+ for (gh_pull_request& pr: *prs)
+ {
+ service_data prsd (sd.warning_success,
+ sd.installation_access.token,
+ sd.installation_access.expires_at,
+ sd.installation_id,
+ sd.repository_node_id,
+ pr.head_sha,
+ cs.repository.clone_url,
+ pr.number);
+
+ // Cancel the existing CI request and create a new unloaded CI
+ // request. After this call we will start getting the
+ // build_unloaded() notifications until (1) we load the request, (2)
+ // we cancel it, or (3) it gets archived after some timeout.
+ //
+ if (!create_pull_request_ci (error, warn, trace,
+ prsd, pr.node_id,
+ true /* cancel_first */))
+ {
+ error << "pull request " << pr.node_id
+ << ": unable to create unloaded CI request";
+ }
+ }
+ }
+ else
+ {
+ error << "unable to fetch open pull requests with base branch "
+ << cs.check_suite.head_branch;
+ }
+ }
+ // Cancel existing CI request if this check suite is being re-run.
+ //
+ else if (cs.action == "rerequested")
+ {
+ const string& nid (cs.check_suite.node_id);
+
+ if (!cancel (error, warn, &trace, *build_db_, "ci-github", nid))
+ error << "check suite " << nid << " (re-requested): unable to cancel";
+ }
+
+ // Start CI for the check suite.
//
repository_location rl (cs.repository.clone_url + '#' +
cs.check_suite.head_branch,
repository_type::git);
- string sd (service_data (warning_success,
- move (iat->token),
- iat->expires_at,
- cs.installation.id,
- move (cs.repository.node_id),
- move (cs.check_suite.head_sha))
- .json ());
-
// @@ What happens if we call this functions with an already existing
// node_id (e.g., replay attack). See the UUID header above.
//
@@ -387,20 +580,577 @@ namespace brep
start (error,
warn,
verb_ ? &trace : nullptr,
- tenant_service (move (cs.check_suite.node_id),
- "ci-github",
- move (sd)),
+ tenant_service (cs.check_suite.node_id, "ci-github", sd.json ()),
move (rl),
vector<package> {},
nullopt, /* client_ip */
nullopt /* user_agent */));
- if (!r)
- fail << "unable to submit CI request";
+ if (!r || r->status != 200)
+ {
+ // Update the conclusion check run with failure.
+ //
+ result_status rs (result_status::error);
+
+ optional<gq_built_result> br (
+ gq_built_result (gh_to_conclusion (rs, sd.warning_success),
+ circle (rs) + ' ' + ucase (to_string (rs)),
+ to_check_run_summary (r)));
+
+ check_run cr;
+
+ // Set some fields for display purposes.
+ //
+ cr.node_id = *sd.conclusion_node_id;
+ cr.name = conclusion_check_run_name;
+
+ if (gq_update_check_run (error,
+ cr,
+ iat->token,
+ sd.repository_node_id,
+ *sd.conclusion_node_id,
+ nullopt /* details_url */,
+ build_state::built,
+ move (br)))
+ {
+ l3 ([&]{trace << "updated check_run { " << cr << " }";});
+ }
+ else
+ {
+ fail << "check suite " << cs.check_suite.node_id
+ << ": unable to update conclusion check_run "
+ << *sd.conclusion_node_id;
+ }
+ }
+
+ return true;
+ }
+
+ // High-level description of pull request (PR) handling
+ //
+ // - Some GitHub pull request terminology:
+ //
+ // - Fork and pull model: Pull requests are created in a forked
+ // repository. Thus the head and base repositories are different.
+ //
+ // - Shared repository model: The pull request head and base branches are
+ // in the same repository. For example, from a feature branch onto
+ // master.
+ //
+ // - CI the merge commit but add check runs to the pull request head commit
+ //
+ // Most of the major CI integrations build the merge commit instead of the
+ // PR head commit.
+ //
+ // Adding the check runs to the PR head commit is recommended by the
+ // following blog posts by a GitHub employee who is one of the best
+ // sources on these topics:
+ // https://www.kenmuse.com/blog/shared-commits-and-github-checks/ and
+ // https://www.kenmuse.com/blog/creating-github-checks/.
+ //
+ // Do not add any check runs to the merge commit because:
+ //
+ // - The PR head commit is the only commit that can be relied upon to
+ // exist throughout the PR's lifetime. The merge commit, on the other
+ // hand, can change during the PR process. When that happens the PR will
+ // look for check runs on the new merge commit, effectively discarding
+ // the ones we had before.
+ //
+ // - Although some of the GitHub documentation makes it sound like they
+ // expect check runs to be added to both the PR head commit and the
+ // merge commit, the PR UI does not react to the merge commit's check
+ // runs consistently. It actually seems to be quite broken.
+ //
+ // The only thing it seems to do reliably is blocking the PR merge if
+ // the merge commit's check runs are not successful (i.e, overriding the
+ // PR head commit's check runs). But the UI looks quite messed up
+ // generally in this state.
+ //
+ // Note that, in the case of a PR from a forked repository (the so-called
+ // "fork and pull" model), GitHub will copy the PR head commit from the
+ // head repository (the forked one) into the base repository. So the check
+ // runs must always be added to the base repository, whether the PR is
+ // from within the same repository or from a forked repository. The merge
+ // and head commits will be at refs/pull/<PR-number>/{merge,head}.
+ //
+ // - New commits are added to PR head branch
+ //
+ // @@ TODO In this case we will end up creating two sets of check runs on
+ // the same commit (pull_request.head.sha and
+ // check_suite.head_sha). It's not obvious which to prefer but I'm
+ // thinking the pull request is more important because in most
+ // development models it represents something that is more likely to
+ // end up in an important branch (as opposed to the head of a feature
+ // branch).
+ //
+ // Note that in these two cases we are building different commit (the
+ // head commit vs merge commit). So it's not clear how we can have
+ // a single check_suite result represent both?
+ //
+ // Possible solution: ignore all check_suites with non-empty
+ // pull_requests[].
+ //
+ // => check_suite(requested, PR_head) [only in shared repo model]
+ //
+ // Note: check_suite.pull_requests[] will contain all PRs with this
+ // branch as head.
+ //
+ // Note: check_suite.pull_requests[i].head.sha will be the new, updated
+ // PR head sha.
+ //
+ // => pull_request(synchronize)
+ //
+ // Note: The check_suite and pull_request can arrive in any order.
+ //
+ // - New commits are added to PR base branch
+ //
+ // Note: In this case pull_request.base.sha does not change, but the merge
+ // commit will be updated to include the new commits to the base branch.
+ //
+ // - @@ TODO? PR base branch changed (to a different branch)
+ //
+ // => pull_request(edited)
+ //
+ // - PR closed @@ TODO
+ //
+ // => pull_request(closed)
+ //
+ // Cancel CI?
+ //
+ // - PR merged @@ TODO
+ //
+ // => pull_request(merged)
+ //
+ // => check_suite(PR_base)
+ //
+ // Probably wouldn't want to CI the base again because the PR CI would've
+ // done the equivalent already.
+ //
+ bool ci_github::
+ handle_pull_request (gh_pull_request_event pr, bool warning_success)
+ {
+ HANDLER_DIAG;
+
+ l3 ([&]{trace << "pull_request event { " << pr << " }";});
+
+ // While we don't need the installation access token in this request,
+ // let's obtain it to flush out any permission issues early. Also, it is
+ // valid for an hour so we will most likely make use of it.
+ //
+ optional<string> jwt (generate_jwt (trace, error));
+ if (!jwt)
+ throw server_error ();
+
+ optional<gh_installation_access_token> iat (
+ obtain_installation_access_token (pr.installation.id,
+ move (*jwt),
+ error));
+ if (!iat)
+ throw server_error ();
+
+ l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
+
+ service_data sd (warning_success,
+ move (iat->token),
+ iat->expires_at,
+ pr.installation.id,
+ move (pr.repository.node_id),
+ pr.pull_request.head_sha,
+ pr.repository.clone_url,
+ pr.pull_request.number);
+
+ // Create unloaded CI request. Cancel the existing CI request first if the
+ // head branch has been updated (action is `synchronize`).
+ //
+ // After this call we will start getting the build_unloaded()
+ // notifications until (1) we load the request, (2) we cancel it, or (3)
+ // it gets archived after some timeout.
+ //
+ bool cancel_first (pr.action == "synchronize");
+
+ if (!create_pull_request_ci (error, warn, trace,
+ sd, pr.pull_request.node_id,
+ cancel_first))
+ {
+ fail << "pull request " << pr.pull_request.node_id
+ << ": unable to create unloaded CI request";
+ }
return true;
}
+ // Note: only handles pull requests (not check suites).
+ //
+ function<optional<string> (const tenant_service&)> ci_github::
+ build_unloaded (tenant_service&& ts,
+ const diag_epilogue& log_writer) const noexcept
+ {
+ NOTIFICATION_DIAG (log_writer);
+
+ service_data sd;
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "failed to parse service data: " << e;
+ return nullptr;
+ }
+
+ // Get a new installation access token if the current one has expired.
+ //
+ const gh_installation_access_token* iat (nullptr);
+ optional<gh_installation_access_token> new_iat;
+
+ if (system_clock::now () > sd.installation_access.expires_at)
+ {
+ if (optional<string> jwt = generate_jwt (trace, error))
+ {
+ new_iat = obtain_installation_access_token (sd.installation_id,
+ move (*jwt),
+ error);
+ if (new_iat)
+ iat = &*new_iat;
+ }
+ }
+ else
+ iat = &sd.installation_access;
+
+ if (iat == nullptr)
+ return nullptr; // Try again on the next call.
+
+ auto make_iat_updater = [&new_iat, &error] ()
+ {
+ function<optional<string> (const tenant_service&)> r;
+
+ if (new_iat)
+ {
+ r = [&error,
+ iat = move (new_iat)] (const tenant_service& ts)
+ -> optional<string>
+ {
+ // NOTE: this lambda may be called repeatedly (e.g., due to
+ // transaction being aborted) and so should not move out of its
+ // captures.
+
+ service_data sd;
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "failed to parse service data: " << e;
+ return nullopt;
+ }
+
+ sd.installation_access = *iat;
+
+ return sd.json ();
+ };
+ }
+
+ return r;
+ };
+
+ // Create a synthetic check run with an in-progress state. Return the
+ // check run on success or nullopt on failure.
+ //
+ auto create_synthetic_cr = [iat,
+ &sd,
+ &error] (string name) -> optional<check_run>
+ {
+ check_run cr;
+ cr.name = move (name);
+
+ if (gq_create_check_run (error,
+ cr,
+ iat->token,
+ sd.repository_node_id,
+ sd.report_sha,
+ nullopt /* details_url */,
+ build_state::building))
+ {
+ return cr;
+ }
+ else
+ return nullopt;
+ };
+
+ // Update a synthetic check run with success or failure. Return the check
+ // run on success or nullopt on failure.
+ //
+ auto update_synthetic_cr = [iat,
+ &sd,
+ &error] (const string& node_id,
+ const string& name,
+ result_status rs,
+ string summary) -> optional<check_run>
+ {
+ assert (!node_id.empty ());
+
+ optional<gq_built_result> br (
+ gq_built_result (gh_to_conclusion (rs, sd.warning_success),
+ circle (rs) + ' ' + ucase (to_string (rs)),
+ move (summary)));
+
+ check_run cr;
+ cr.name = name; // For display purposes only.
+
+ if (gq_update_check_run (error,
+ cr,
+ iat->token,
+ sd.repository_node_id,
+ node_id,
+ nullopt /* details_url */,
+ build_state::built,
+ move (br)))
+ {
+ return cr;
+ }
+ else
+ return nullopt;
+ };
+
+ // Synthetic merge check run node ID. Empty until created on the first
+ // call or retrieved from service data on subsequent calls.
+ //
+ string merge_node_id;
+
+ // True if this is the first call (or the merge commit couldn't be created
+ // on the first call, in which case we just re-try by treating it as a
+ // first call).
+ //
+ bool first (!sd.merge_node_id);
+
+ // If this is the first call, (re)create the synthetic merge check run as
+ // soon as possible to make sure the previous check suite, if any, is no
+ // longer completed.
+ //
+ // Note that there is still a window between receipt of the pull_request
+ // event and the first bot/worker asking for a task, which could be
+ // substantial. We could probably (also) try to (re)create the merge
+ // checkrun in the webhook. @@ Maybe/later.
+ //
+ if (first)
+ {
+ if (auto cr = create_synthetic_cr (merge_check_run_name))
+ {
+ l3 ([&]{trace << "created check_run { " << *cr << " }";});
+
+ merge_node_id = move (*cr->node_id);
+ }
+ else
+ return make_iat_updater (); // Try again on the next call.
+ }
+ else
+ merge_node_id = *sd.merge_node_id;
+
+ // Start/check PR mergeability.
+ //
+ optional<string> mc (
+ gq_pull_request_mergeable (error, iat->token, ts.id)); // Merge commit.
+
+ if (!mc || mc->empty ())
+ {
+ if (!mc) // No merge commit yet.
+ {
+ // If this is a subsequent notification and there is no merge commit,
+ // then there is nothing to do.
+ //
+ if (!first)
+ return make_iat_updater ();
+
+ // Fall through to update service data.
+ }
+ else // Not mergeable.
+ {
+ // If the commit is not mergeable, cancel the CI request and fail the
+ // merge check run.
+ //
+ // Note that it feels like in this case we don't really need to create a
+ // failed synthetic conclusion check run since the PR cannot be merged
+ // anyway.
+
+ if (auto cr = update_synthetic_cr (
+ merge_node_id,
+ merge_check_run_name,
+ result_status::error,
+ "GitHub is unable to create test merge commit"))
+ {
+ l3 ([&]{trace << "updated check_run { " << *cr << " }";});
+
+ // Cancel the CI request.
+ //
+ // Ignore failure because this CI request may have been cancelled
+ // elsewhere due to an update to the PR base or head branches.
+ //
+ if (!cancel (error, warn, &trace, *build_db_, ts.type, ts.id))
+ l3 ([&]{trace << "CI for PR " << ts.id << " already cancelled";});
+
+ return nullptr; // No need to update service data in this case.
+ }
+ else
+ {
+ // Don't cancel the CI request if the merge check run update failed
+ // so that we can try again on the next call.
+
+ if (!first)
+ return make_iat_updater ();
+
+ // Fall through to update service data.
+ }
+ }
+
+ // This is a first notification, so record the merge check run in the
+ // service data.
+ //
+ return [&error,
+ iat = move (new_iat),
+ mni = move (merge_node_id)] (const tenant_service& ts)
+ -> optional<string>
+ {
+ // NOTE: this lambda may be called repeatedly (e.g., due to
+ // transaction being aborted) and so should not move out of its
+ // captures.
+
+ service_data sd;
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "failed to parse service data: " << e;
+ return nullopt;
+ }
+
+ if (iat)
+ sd.installation_access = *iat;
+
+ sd.merge_node_id = mni;
+
+ return sd.json ();
+ };
+ }
+
+ // If we are here, then it means we have a merge commit that we can load.
+ //
+ // Note that this can still be the first call (first=true).
+ //
+
+ // As a first step, (re)create the synthetic conclusion check run and then
+ // change the merge check run state to success. Do it in this order so
+ // that the check suite does not become completed.
+
+ // Synthetic conclusion check run node ID. Empty until created on the
+ // "second" call or retrieved from service data on subsequent calls.
+ //
+ string conclusion_node_id;
+
+ // True if this is the first call after the merge commit became available,
+ // which we will call the "second" call (or we couldn't create the
+ // conclusion check run on the first such call, in which case we just
+ // re-try by treating it as a "second" call).
+ //
+ bool second (!sd.conclusion_node_id);
+
+ if (second)
+ {
+ if (auto cr = create_synthetic_cr (conclusion_check_run_name))
+ {
+ l3 ([&]{trace << "created check_run { " << *cr << " }";});
+
+ conclusion_node_id = move (*cr->node_id);
+ }
+ }
+ else
+ conclusion_node_id = *sd.conclusion_node_id;
+
+ if (!conclusion_node_id.empty ()) // Conclusion check run was created.
+ {
+ // Update merge check run to successful.
+ //
+ if (auto cr = update_synthetic_cr (merge_node_id,
+ merge_check_run_name,
+ result_status::success,
+ "GitHub created test merge commit"))
+ {
+ l3 ([&]{trace << "updated check_run { " << *cr << " }";});
+
+ // Load the CI request.
+ //
+ // Example repository URL fragment:
+ //
+ // #pull/28/merge@1b6c9a361086ed93e6f1e67189e82d52de91c49b
+ //
+ repository_location rl (*sd.repository_clone_url + "#pull/" +
+ to_string (*sd.pr_number) + "/merge@" + *mc,
+ repository_type::git);
+
+ optional<start_result> r (
+ load (error, warn, &trace, *build_db_, move (ts), rl));
+
+ if (!r || r->status != 200)
+ {
+ if (auto cr = update_synthetic_cr (conclusion_node_id,
+ conclusion_check_run_name,
+ result_status::error,
+ to_check_run_summary (r)))
+ {
+ l3 ([&]{trace << "updated check_run { " << *cr << " }";});
+ }
+ else
+ {
+ // Nothing really we can do in this case since we will not receive
+ // any further notifications.
+ }
+
+ return nullptr; // No need to update service data in this case.
+ }
+ }
+ else
+ {
+ // Don't load the CI request if the merge check run update failed so
+ // that we can try again on the next call.
+ }
+ }
+
+ return [&error,
+ iat = move (new_iat),
+ mni = (first ? move (merge_node_id) : string ()),
+ cni = (second ? move (conclusion_node_id) : string ())]
+ (const tenant_service& ts) -> optional<string>
+ {
+ // NOTE: this lambda may be called repeatedly (e.g., due to
+ // transaction being aborted) and so should not move out of its
+ // captures.
+
+ service_data sd;
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "failed to parse service data: " << e;
+ return nullopt;
+ }
+
+ if (iat)
+ sd.installation_access = *iat;
+
+ if (!mni.empty ())
+ sd.merge_node_id = mni;
+
+ if (!cni.empty ())
+ sd.conclusion_node_id = cni;
+
+ return sd.json ();
+ };
+ }
+
// Build state change notifications (see tenant-services.hxx for
// background). Mapping our state transitions to GitHub pose multiple
// problems:
@@ -535,7 +1285,10 @@ namespace brep
//
warn << "check run " << bid << ": unexpected rebuild";
}
- else {} // Ignore interrupted.
+ else
+ {
+ // Ignore interrupted.
+ }
}
else
{
@@ -584,7 +1337,7 @@ namespace brep
if (gq_create_check_runs (error,
crs,
iat->token,
- sd.repository_node_id, sd.head_sha,
+ sd.repository_node_id, sd.report_sha,
build_state::queued))
{
for (const check_run& cr: crs)
@@ -678,7 +1431,10 @@ namespace brep
cr = move (*scr);
cr->state_synced = false;
}
- else {} // Network error during queued notification, ignore.
+ else
+ {
+ // Network error during queued notification, ignore.
+ }
}
else
warn << "check run " << bid << ": out of order building "
@@ -802,11 +1558,41 @@ namespace brep
return nullptr;
}
+ // Absent if have any unbuilt check runs.
+ //
+ optional<result_status> conclusion (*b.status);
+
check_run cr; // Updated check run.
{
string bid (gh_check_run_name (b)); // Full Build ID.
- if (check_run* scr = sd.find_check_run (bid))
+ optional<check_run> scr;
+ for (check_run& cr: sd.check_runs)
+ {
+ if (cr.build_id == bid)
+ {
+ assert (!scr);
+ scr = move (cr);
+ }
+ else
+ {
+ if (cr.state == build_state::built)
+ {
+ if (conclusion)
+ {
+ assert (cr.status);
+ *conclusion |= *cr.status;
+ }
+ }
+ else
+ conclusion = nullopt;
+ }
+
+ if (scr && !conclusion.has_value ())
+ break;
+ }
+
+ if (scr)
{
if (scr->state != build_state::building)
{
@@ -819,6 +1605,8 @@ namespace brep
if (scr->state == build_state::built)
return nullptr;
+ // Don't move from scr because we search sd.check_runs below.
+ //
cr = move (*scr);
}
else
@@ -858,29 +1646,6 @@ namespace brep
//
if (iat != nullptr)
{
- // Return the colored circle corresponding to a result_status.
- //
- auto circle = [] (result_status rs) -> string
- {
- switch (rs)
- {
- case result_status::success: return "\U0001F7E2"; // Green circle.
- case result_status::warning: return "\U0001F7E0"; // Orange circle.
- case result_status::error:
- case result_status::abort:
- case result_status::abnormal: return "\U0001F534"; // Red circle.
-
- // Valid values we should never encounter.
- //
- case result_status::skip:
- case result_status::interrupt:
- throw invalid_argument ("unexpected result_status value: " +
- to_string (rs));
- }
-
- return ""; // Should never reach.
- };
-
// Prepare the check run's summary field (the build information in an
// XHTML table).
//
@@ -912,9 +1677,9 @@ namespace brep
// Serialize a result row (colored circle, result text, log URL) for
// an operation and result_status.
//
- auto tr_result = [this, circle, &b] (xml::serializer& s,
- const string& op,
- result_status rs)
+ auto tr_result = [this, &b] (xml::serializer& s,
+ const string& op,
+ result_status rs)
{
// The log URL.
//
@@ -1015,7 +1780,7 @@ namespace brep
cr,
iat->token,
sd.repository_node_id,
- sd.head_sha,
+ sd.report_sha,
details_url (b),
build_state::built,
move (br)))
@@ -1025,6 +1790,71 @@ namespace brep
l3 ([&]{trace << "created check_run { " << cr << " }";});
}
}
+
+ if (cr.state == build_state::built)
+ {
+ // Check run was created/updated successfully to built.
+ //
+ // @@ TMP Feels like this should also be done inside
+ // gq_{create,update}_check_run() -- where cr.state is set if the
+ // create/update succeeds -- but I think we didn't want to pass a
+ // result_status into a gq_ function because converting to a GitHub
+ // conclusion/title/summary is reasonably complicated.
+ //
+ // @@@ We need to redo that code:
+ //
+ // - Pass the vector of check runs with new state (and status) set.
+ // - Update synchronized flag inside those functions.
+ // - Update the state to built if it's already built on GitHub --
+ // but then what do we set the status to?
+ // - Maybe signal in return value (optional<bool>?) that there is
+ // a discrepancy.
+ //
+ cr.status = b.status;
+
+ // Update the conclusion check run if all check runs are now built.
+ //
+ if (conclusion)
+ {
+ assert (sd.conclusion_node_id);
+
+ // Update the conclusion check run with success.
+ //
+ result_status rs (*conclusion);
+
+ optional<gq_built_result> br (
+ gq_built_result (gh_to_conclusion (rs, sd.warning_success),
+ circle (rs) + ' ' + ucase (to_string (rs)),
+ "All configurations are built"));
+
+ check_run cr;
+
+ // Set some fields for display purposes.
+ //
+ cr.node_id = *sd.conclusion_node_id;
+ cr.name = conclusion_check_run_name;
+
+ if (gq_update_check_run (error,
+ cr,
+ iat->token,
+ sd.repository_node_id,
+ *sd.conclusion_node_id,
+ nullopt /* details_url */,
+ build_state::built,
+ move (br)))
+ {
+ l3 ([&]{trace << "updated check_run { " << cr << " }";});
+ }
+ else
+ {
+ // Nothing we can do here except log the error.
+ //
+ error << "check suite " << ts.id
+ << ": unable to update conclusion check run "
+ << *sd.conclusion_node_id;
+ }
+ }
+ }
}
return [iat = move (new_iat),
@@ -1072,6 +1902,37 @@ namespace brep
};
}
+ bool ci_github::
+ create_pull_request_ci (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark& trace,
+ const service_data& sd,
+ const string& nid,
+ bool cf) const
+ {
+ // Cancel the existing CI request if asked to do so. Ignore failure
+ // because the request may already have been cancelled for other reasons.
+ //
+ if (cf)
+ {
+ if (!cancel (error, warn, &trace, *build_db_, "ci-github", nid))
+ l3 ([&] {trace << "unable to cancel CI for pull request " << nid;});
+ }
+
+ // Create a new unloaded CI request.
+ //
+ tenant_service ts (nid, "ci-github", sd.json ());
+
+ // Note: use no delay since we need to (re)create the synthetic merge
+ // check run as soon as possible.
+ //
+ return create (error, warn, &trace,
+ *build_db_, move (ts),
+ chrono::seconds (30) /* interval */,
+ chrono::seconds (0) /* delay */)
+ .has_value ();
+ }
+
string ci_github::
details_url (const build& b) const
{