aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--INSTALL228
-rw-r--r--INSTALL-CI-DEV2
-rw-r--r--INSTALL-DEV1
-rw-r--r--INSTALL-GITHUB-DEV135
-rw-r--r--INSTALL-PROXY2
-rw-r--r--LICENSE2
-rw-r--r--NEWS18
-rw-r--r--brep/handler/submit/submit-git.in21
-rw-r--r--clean/clean.cxx163
-rwxr-xr-xdoc/cli.sh2
-rw-r--r--doc/manual.cli232
-rw-r--r--etc/brep-module.conf31
-rw-r--r--etc/private/install/brep-module.conf38
-rw-r--r--etc/systemd/brep-load.service1
-rw-r--r--libbrep/build-extra.sql1
-rw-r--r--libbrep/build-package.hxx82
-rw-r--r--libbrep/build.hxx4
-rw-r--r--libbrep/build.xml131
-rw-r--r--libbrep/common.hxx155
-rw-r--r--libbrep/package.cxx2
-rw-r--r--libbrep/package.hxx137
-rw-r--r--libbrep/package.xml529
-rw-r--r--libbrep/review-manifest.cxx220
-rw-r--r--libbrep/review-manifest.hxx80
-rw-r--r--load/.gitignore1
-rw-r--r--load/buildfile6
-rw-r--r--load/load-with-metadata.in147
-rw-r--r--load/load.cli27
-rw-r--r--load/load.cxx404
-rw-r--r--load/types-parsers.cxx7
-rw-r--r--load/types-parsers.hxx7
-rw-r--r--manifest20
-rw-r--r--migrate/migrate.cxx10
-rw-r--r--mod/ci-common.cxx497
-rw-r--r--mod/ci-common.hxx161
-rw-r--r--mod/database-module.cxx28
-rw-r--r--mod/database-module.hxx14
-rw-r--r--mod/hmac.cxx13
-rw-r--r--mod/mod-advanced-search.cxx387
-rw-r--r--mod/mod-advanced-search.hxx41
-rw-r--r--mod/mod-build-force.cxx11
-rw-r--r--mod/mod-build-result.cxx38
-rw-r--r--mod/mod-build-task.cxx69
-rw-r--r--mod/mod-builds.cxx69
-rw-r--r--mod/mod-ci-github-gh.cxx422
-rw-r--r--mod/mod-ci-github-gh.hxx241
-rw-r--r--mod/mod-ci-github-gq.cxx517
-rw-r--r--mod/mod-ci-github-gq.hxx109
-rw-r--r--mod/mod-ci-github-service-data.cxx179
-rw-r--r--mod/mod-ci-github-service-data.hxx91
-rw-r--r--mod/mod-ci-github.cxx2509
-rw-r--r--mod/mod-ci-github.hxx82
-rw-r--r--mod/mod-ci.cxx34
-rw-r--r--mod/mod-ci.hxx24
-rw-r--r--mod/mod-package-details.cxx22
-rw-r--r--mod/mod-package-version-details.cxx16
-rw-r--r--mod/mod-repository-root.cxx31
-rw-r--r--mod/mod-repository-root.hxx2
-rw-r--r--mod/module.cli107
-rw-r--r--mod/page.cxx103
-rw-r--r--mod/page.hxx43
-rw-r--r--mod/tenant-service.hxx44
-rw-r--r--mod/utility.cxx69
-rw-r--r--mod/utility.hxx7
-rw-r--r--tests/ci/ci-load.testscript24
-rw-r--r--tests/load/1/basics/packages.manifest4
-rw-r--r--tests/load/1/misc/packages.manifest20
-rw-r--r--tests/load/1/staging/packages.manifest12
-rw-r--r--tests/load/1/testing/packages.manifest8
-rw-r--r--tests/manifest/buildfile6
-rw-r--r--tests/manifest/driver.cxx59
-rw-r--r--tests/manifest/review.testscript171
-rw-r--r--tests/submit/submit-dir.testscript3
-rw-r--r--tests/submit/submit-git.testscript6
-rw-r--r--tests/submit/submit-pub.testscript6
-rw-r--r--www/advanced-search-body.css98
-rw-r--r--www/advanced-search.css3
-rw-r--r--www/advanced-search.scss3
-rw-r--r--www/package-details-body.css19
-rw-r--r--www/package-version-details-body.css30
80 files changed, 7023 insertions, 2275 deletions
diff --git a/INSTALL b/INSTALL
index 79d698f..7986b84 100644
--- a/INSTALL
+++ b/INSTALL
@@ -1,7 +1,8 @@
-This guide shows how to install and configure brep on a "deployment" machine as
-opposed to a "development" one (see INSTALL-DEV for the latter). Here we assume
-you are using a systemd-based distribution. If not, then you will need to
-replace systemctl commands with the equivalent init.d ones.
+This guide describes how to install and configure brep on a "deployment"
+machine as opposed to a "development" one (see INSTALL-DEV for the
+latter). Here we assume you are using a systemd-based distribution. If not,
+then you will need to replace systemctl commands with the equivalent init.d
+ones.
The below instructions include steps for setting up brep as the build2 build
bot controller, package submission, and CI request services. All these
@@ -233,6 +234,61 @@ $ psql -d brep_build -c 'SELECT DISTINCT name FROM build_package'
$ cp install/share/brep/etc/brep-module.conf config/
$ edit config/brep-module.conf # Adjust default values if required.
+See the following sub-sections for details on configuring various optional
+brep functionality.
+
+Once the brep module configuration is ready, the next step is to enable
+it in the Apache2 configuration file. Here we assume you have setup an
+appropriate Apache2 virtual server. Open the corresponding Apache2 .conf
+file and add the contents of brep/etc/brep-apache2.conf into the
+<VirtualHost> section.
+
+The output content types of the brep module are application/xhtml+xml,
+text/manifest and text/plain. If you would like to make sure they get
+compressed (along with linked CSS), also add the following lines:
+
+ # Compress brep output (xhtml+xml) and CSS.
+ #
+ AddOutputFilterByType DEFLATE application/xhtml+xml
+ AddOutputFilterByType DEFLATE text/manifest
+ AddOutputFilterByType DEFLATE text/plain
+ AddOutputFilterByType DEFLATE text/css
+
+Then restart Apache2:
+
+$ sudo systemctl restart apache2
+
+To verify, visit the repository root. To troubleshoot, see Apache logs.
+
+Now that Apache2 loads the brep module which requires PostgreSQL, it is a good
+idea to make the Apache2 service depend on PostgreSQL so that they are started
+in proper order. Here is how we can do it with systemd (with newer versions
+you can use 'systemctl edit' instead of mkdir and cat):
+
+# mkdir -p /etc/systemd/system/apache2.service.d/
+# cat >/etc/systemd/system/apache2.service.d/postgresql.conf
+[Unit]
+Requires=postgresql.service
+After=postgresql.service
+^D
+
+# mkdir -p /etc/systemd/system/postgresql.service.d/
+# cat >/etc/systemd/system/postgresql.service.d/apache2.conf
+[Unit]
+Wants=apache2.service
+^D
+
+# systemctl daemon-reload
+# systemctl cat apache2 # Verify override is listed.
+# systemctl cat postgresql # Verify override is listed.
+# systemctl stop postgresql
+# systemctl status apache2 # Verify stopped.
+# systemctl start postgresql
+# systemctl status apache2 # Verify started.
+
+
+6.1 Enabling build bot controller functionality
+
To enable the build2 build bot controller functionality you will need to set
the build-config option in brep-module.conf. To also enable the build
artifacts upload functionality you will need to specify the upload-data
@@ -250,6 +306,9 @@ $ setfacl -m g:www-data:rwx /home/brep/bindist-data
For sample upload handler implementations see brep/handler/upload/.
+
+6.2 Enabling package submission functionality
+
To enable the package submission functionality you will need to specify the
submit-data and submit-temp directories in brep-module.conf. Note that these
directories must exist and have read, write, and execute permissions granted
@@ -272,6 +331,9 @@ $ edit config/submit.xhtml # Add custom form fields, adjust CSS style, etc.
For sample submission handler implementations see brep/handler/submit/.
+
+6.3 Enabling CI request functionality
+
To enable the CI request functionality you will need to specify the ci-data
directory in brep-module.conf. Note that this directory must exist and have
read, write, and execute permissions granted to the www-data user. This, for
@@ -291,52 +353,119 @@ $ edit config/ci.xhtml # Add custom form fields, adjust CSS style, etc.
For sample CI request handler implementations see brep/handler/ci/.
-Here we assume you have setup an appropriate Apache2 virtual server. Open the
-corresponding Apache2 .conf file and add the contents of
-brep/etc/brep-apache2.conf into the <VirtualHost> section.
-The output content types of the brep module are application/xhtml+xml,
-text/manifest and text/plain. If you would like to make sure they get
-compressed (along with linked CSS), also add the following lines:
+6.4 Enabling GitHub CI integration
- # Compress brep output (xhtml+xml) and CSS.
- #
- AddOutputFilterByType DEFLATE application/xhtml+xml
- AddOutputFilterByType DEFLATE text/manifest
- AddOutputFilterByType DEFLATE text/plain
- AddOutputFilterByType DEFLATE text/css
+6.4.1 Background
-Restart Apache2:
+The GitHub CI integration has one user-configurable setting:
+warning=<success|failure> (whether or not to fail on warnings).
-$ sudo systemctl restart apache2
+In order not to have to support repository configuration files, a deployment
+will consist of two registered GitHub Apps with the same webhook URL (i.e.,
+the same brep instance) but different query parameters: one with
+warning=success and the other with warning=failure. The App id is passed (as a
+query parameter) so that we know which private key to use (the key cannot be
+shared between Apps).
-To verify, visit the repository root. To troubleshoot, see Apache logs.
+We will call the warning=success App the "Default App" and the warning=failure
+App the "Werror App".
-Now that Apache2 loads the brep module which requires PostgreSQL, it is a good
-idea to make the Apache2 service depend on PostgreSQL so that they are started
-in proper order. Here is how we can do it with systemd (with newer versions
-you can use 'systemctl edit' instead of mkdir and cat):
+6.4.2 Create the GitHub Apps
-# mkdir -p /etc/systemd/system/apache2.service.d/
-# cat >/etc/systemd/system/apache2.service.d/postgresql.conf
-[Unit]
-Requires=postgresql.service
-After=postgresql.service
-^D
+To create a GitHub App under the <org> organization, visit
+https://github.com/organizations/<org>/settings/apps (Settings -> Developer
+settings -> GitHub Apps). Then click on New GitHub App.
-# mkdir -p /etc/systemd/system/postgresql.service.d/
-# cat >/etc/systemd/system/postgresql.service.d/apache2.conf
-[Unit]
-Wants=apache2.service
-^D
+App names (note: 34 character limit):
-# systemctl daemon-reload
-# systemctl cat apache2 # Verify override is listed.
-# systemctl cat postgresql # Verify override is listed.
-# systemctl stop postgresql
-# systemctl status apache2 # Verify stopped.
-# systemctl start postgresql
-# systemctl status apache2 # Verify started.
+ Default App: "<org> CI"
+ Werror App: "<org> CI - warnings as errors"
+
+App description:
+
+ Default App: "Trigger <org> CI on branch push and pull request."
+ Werror App: "Trigger <org> CI on branch push and pull request. Warnings are
+ treated as errors".
+
+App homepage:
+
+ https://ci.<org>.org/
+
+Skip the "Identifying and authorizing users" and "Post installation" sections.
+
+Leave webhooks active.
+
+Webhook URL:
+
+ Default App: https://ci.<org>.org/?ci-github&app-id=XXX&warning=success
+ Werror App: https://ci.<org>.org/?ci-github&app-id=XXX&warning=failure
+
+Note that the App id only becomes available once the App has been registered
+so we update it later in both URLs.
+
+Webhook secret: Use the same random 64-character string for both Apps.
+
+ echo `tr -dc -- A-Za-z0-9 </dev/urandom | head -c 64`
+
+Note that GitHub says only that the secret should be "a random string with
+high entropy." However lots of sources say 32 bytes should be secure enough
+for HMAC-SHA256, while other sources recommend 64 bytes for maximal security
+at an insignificant performance cost. (Keys longer than 64 bytes are hashed to
+match the internal block size and are therefore not recommended.)
+
+Repository permissions:
+ - Checks: RW
+ - Contents: RO (for Push events)
+ - Metadata (mandatory): RO
+ - Pull requests: RO
+
+Subscribed events:
+ - Check suite
+ - Pull request
+ - Push
+
+Note that GitHub Apps with write access to the "Checks" permission are
+automatically subscribed to check_suite(requested|rerequested) and check_run
+events so no need to subscribe explicitly. However in order to receive
+check_suite(completed) events, which we need, one does have to subscribe to
+Check suite.
+
+Select "Any account" under "Where can this GitHub App be installed?".
+
+Click "Create GitHub App".
+
+When the page reloads (should be the General tab), note the App id and replace
+the XXX in the webhook URL with it.
+
+Still in the General tab, scroll to Private keys and generate a private key.
+The file will be downloaded by the browser.
+
+@@ TODO Logo
+@@ TODO Create Marketplace listing
+
+6.4.3 Configure brep
+
+Assume the following configuration values:
+
+- Webhook secret: abcdefg
+- Default App id: 12345
+- Werror App id: 67890
+
+In brep-module.conf:
+
+Set the webhook secret from the GitHub App settings:
+
+ ci-github-app-webhook-secret abcdefg
+
+Associate each GitHub App id with the App's private key:
+
+ ci-github-app-id-private-key 12345=path/to/default-app-private-key.pem
+ ci-github-app-id-private-key 67890=path/to/werror-app-private-key.pem
+
+Now brep should be ready to handle the webhook event requests triggered by
+branch pushes and pull requests in repositories into which one of these Apps
+has been installed.
7. Optimize CSS
@@ -407,6 +536,23 @@ PATH=/usr/local/bin:/bin:/usr/bin
Note that here we assume that bpkg (which is executed by brep-load) is in one
of the PATH's directories (usually /usr/local/bin).
+To additionally load the package metadata (reviews, etc) to the database, the
+brep-load-with-metadata wrapper script can be used instead of brep-load
+directly. In this case, the package git repository that contains the owners/
+metadata directory should be pre-cloned (read-only and shallowly) as follows:
+
+$ git init public-metdata
+$ cd public-metdata
+$ git remote add origin <repository-url>
+$ git config core.sparsecheckout true
+$ echo "owners/" > .git/info/sparse-checkout
+$ git pull --depth=1 origin master
+
+And the above crontab brep-load entry needs to be replaced with the following
+version:
+
+$HOME/install/bin/brep-load-with-metadata --timeout 60 /home/brep/public-metdata $HOME/config/loadtab
+
8.b Setup Periodic Loader, Cleaner, and Monitor Execution with systemd
diff --git a/INSTALL-CI-DEV b/INSTALL-CI-DEV
index b8502d8..c1bd8ec 100644
--- a/INSTALL-CI-DEV
+++ b/INSTALL-CI-DEV
@@ -1,4 +1,4 @@
-This guide shows how to configure the brep module for serving the CI and
+This guide describes how to configure the brep module for serving the CI and
build2 build bot requests and how to smoke-test it.
Note that during the testing both the user and CI submission handler (executed
diff --git a/INSTALL-DEV b/INSTALL-DEV
index f023962..c197b7b 100644
--- a/INSTALL-DEV
+++ b/INSTALL-DEV
@@ -1,6 +1,7 @@
The goal of this setup is to run the brep Apache2 modules from the development
build while still being as close to the real deployment as possible. To this
end, we use default, system-wide installations of both Apache2 and Postgres.
+See also INSTALL-CI-DEV and INSTALL-GITHUB-DEV.
In the below instructions replace <user> with your login and www-data with the
user under which Apache2 is running (See the "User" directive in the Apache2
diff --git a/INSTALL-GITHUB-DEV b/INSTALL-GITHUB-DEV
index 45f4b9b..602b65d 100644
--- a/INSTALL-GITHUB-DEV
+++ b/INSTALL-GITHUB-DEV
@@ -1,4 +1,4 @@
-This document explains how to get GitHub webhooks (a notification that an
+This guide describes how to get GitHub webhooks (a notification that an
event such as a push has occurred on a repository) delivered to a
locally-running instance of brep (currently to initiate a CI job).
@@ -22,33 +22,67 @@ instance. This is achieved by setting the GitHub app's webhook URL to that of
the webhook proxy smee.io (as recommended by GitHub) and connecting it to our
local brep instance via the locally-run smee client (a Node application).
+0.0 User configuration
+
+This GitHub CI integration only has one user-configurable option:
+warning=<success|failure> (whether or not to fail on warnings).
+
+In order not to have to support repository configuration files the live
+deployment will consist of two registered GitHub apps with the same webhook
+URL (i.e., the same brep instance) but different query parameters: one with
+warning=success and the other with warning=failure. The app id is passed so
+that we know which private key to use (the key cannot be shared between apps).
+
+Only a single GitHub app is required during development however.
+
1. Follow the instructions in INSTALL-DEV to get brep set up.
-2. Register the GitHub app
+2. Set up the webhook proxy
-GitHub doc: Registering a GitHub App (note that that doc is a bit out of date)
-https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app
+Go to https://smee.io/ and start a new channel. Note the webhook proxy URL,
+which will look something like
+
+ https://smee.io/7stvNqVgyQRlIhbY
-Skip the steps marked "optional" and leave authorization-related settings at
-their defaults.
+This will be used in the GitHub app's webhook URL below.
-@@ TODO Update authentication-related info once better understood.
+3. Register the GitHub app
+
+GitHub reference: Registering a GitHub App (note: somewhat out of date)
+https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app
-At this stage the only settings important to us are:
+At this stage the only settings we need to update are:
- App name
-- Webhook URL (updated later -- leave webhooks deactivated for now)
+- Homepage URL (https://build2.org)
+- Webhook
+ - URL: set to the webhook proxy URL
+ - Secret (e.g. "deadbeef")
+ - Leave SSL verification enabled
- Repository permissions
- Checks: RW
+ - Metadata (mandatory): RO
- Pull requests: RO
- - Contents: RO
- - Metadata: RO
+ - Contents: RO (for Push events)
- Subscribed events
- Check suite
- - Check run
- Pull request
+ - Push
-3. Install the GitHub app
+ Note that GitHub apps with write access to the "Checks" permission are
+ automatically subscribed to check_suite(requested|rerequested) and check_run
+ events so no need to subscribe explicitly. However in order to receive
+ check_suite(completed) events, which we do, one does have to subscribe to
+ check_suite.
+
+Click "Create GitHub App" button. When the page reloads:
+
+- Note the app id (e.g. 12345).
+- Append "?app-id=12345&warning=failure" to the webhook URL.
+- Scroll to Private keys and generate a private key. The file will be
+ downloaded by the browser.
+
+4. Install the GitHub app
GitHub doc: Installing your own GitHub App
https://docs.github.com/en/apps/using-github-apps/installing-your-own-github-app
@@ -56,14 +90,20 @@ https://docs.github.com/en/apps/using-github-apps/installing-your-own-github-app
It would probably make sense to install it to your own user account and
restrict its access to a test repository.
-4. Forward GitHub webhooks to a local brep instance
+5. Configure brep
-Go to https://smee.io/ and start a new channel. Note the webhook proxy URL,
-which will look something like
+In brep-module.conf:
- https://smee.io/7stvNqVgyQRlIhbY
+- Set the webhook secret from the GitHub app settings:
-Set the GitHub app's webhook URL to this proxy URL.
+ ci-github-app-webhook-secret "deadbeef"
+
+- Associate the GitHub app id with the path of the private key downloaded
+ above:
+
+ ci-github-app-id-private-key 12345=path/to/private-key.pem
+
+6. Forward GitHub webhooks to a local brep instance
Install the smee client:
@@ -79,12 +119,53 @@ GitHub CI endpoint's URL with --target:
Trigger a webhook delivery from GitHub by pushing a commit to a repository the
GitHub app is installed in. You should see the webhook delivery on the smee.io
-channel page and the smee client will also print something to terminal.
-
-Any webhook delivery can be redelivered by clicking a button on the smee.io
-channel page (or the app's advanced settings page on GitHub) so no need to
-repeatedly push to the repository.
-
-You can also see the HTTP headers and JSON payload of delivered webhooks on
-both the GitHub app's advanced settings page and the smee.io channel page, but
-smee.io's presentation is much better. (There's also wireshark of course.)
+channel page.
+
+A webhook can be redelivered from the smee.io channel page or the app's
+advanced settings page on GitHub so no need to repeatedly push to the
+repository.
+
+Both the smee.io channel and the GitHub app's advanced settings show the JSON
+payloads of delivered webhooks. smee.io's presentation is better but the
+GitHub app page also shows the HTTP headers. Wireshark might be better in both
+aspects but can't redeliver webhooks.
+
+7. Test scenarios
+
+- Branch push (BP).
+
+ - Success (observe check runs state transitions). Test with 2 build configs.
+ - Failure (observe check runs state transitions).
+ - Push new commit to branch.
+ - Re-requested check suite.
+ - Re-requested check run (observe check run state transitions).
+ - Re-requested check run but tenant archived.
+ - Cancel previous check suite on forced push.
+ - Cancel previous check suite on branch delete.
+ - Head commit shared with another BP.
+ - Cancel previous check suite on forced push with shared previous commit.
+ - Cancel previous check suite on branch delete with shared previous commit.
+
+- Pull request (PR).
+
+ - Local PR.
+
+ - Success.
+ - Failure.
+ - Push new commit to head.
+ - Re-requested check suite.
+ - Re-requested check run.
+ - Head shared with BP (pull_request is received after check_suite)
+ - Not meargeable.
+ - Head behind base.
+ - Head commit has changed while testing manageability.
+
+ - Remote PR.
+
+ - Success.
+ - Failure.
+ - Push new commit to head.
+ - Cancel previous check suite on head move.
+ - Re-requested check suite.
+ - Re-requested check run.
+ - Head shared with another remote PR.
diff --git a/INSTALL-PROXY b/INSTALL-PROXY
index 418846a..2a3f3f4 100644
--- a/INSTALL-PROXY
+++ b/INSTALL-PROXY
@@ -1,4 +1,4 @@
-This guide shows how to configure the Apache2-based HTTP proxy server for
+This guide describes how to configure the Apache2-based HTTP proxy server for
proxying HTTP(S) requests and caching the responses.
Note that for security reasons most clients (curl, wget, etc) perform HTTPS
diff --git a/LICENSE b/LICENSE
index 5a25163..a1e9de0 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2014-2024 the build2 authors (see the AUTHORS and LEGAL files).
+Copyright (c) 2014-2025 the build2 authors (see the AUTHORS and LEGAL files).
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/NEWS b/NEWS
index 5382c22..2c3f631 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,21 @@
+Version 0.17.0
+
+ * Support for auxiliary machines/configurations.
+
+ * Support for tenant-associated service notifications. These can be used,
+ for example, for third-party CI UI integration (such as GitHub).
+
+ * Support for canceling CI requests.
+
+ * Support for custom build bots.
+
+ * The build-toolchain-email configuration option can now be used to specify
+ per-toolchain values.
+
+ * New search-description configuration option.
+
+ * New --ignore-unresolv-tests, --ignore-unresolv-cond loader options.
+
Version 0.16.0
* Note: brep_build database schema migration from version 18 is unsupported.
diff --git a/brep/handler/submit/submit-git.in b/brep/handler/submit/submit-git.in
index c882b84..f4e36a3 100644
--- a/brep/handler/submit/submit-git.in
+++ b/brep/handler/submit/submit-git.in
@@ -618,9 +618,24 @@ for i in {1..11}; do
check_connectivity "$control" "$git_timeout" ""
- if ! run_silent git "${git_http_timeout[@]}" clone $gqo $gvo --depth 1 \
---single-branch --branch "build2-control" "$control" "$ctl_dir" >&2; then
- exit_with_manifest 422 "failed to git-clone $control"
+ cmd=(git "${git_http_timeout[@]}" clone $gqo $gvo --depth 1 \
+--single-branch --branch "build2-control" "$control" "$ctl_dir")
+
+ trace_cmd "${cmd[@]}"
+
+ # Let's add the git-clone error message to the response, turning it into
+ # an info. This way the user may potentially see the following
+ # bdep-publish diagnostics:
+ #
+ # error: failed to git-clone build2-control branch of https://example.com/foo/bar
+ # info: Could not find remote branch build2-control to clone.
+ # info: Remote branch build2-control not found in upstream origin
+ # info: reference: 8589b4484f36
+ #
+ if ! e="$("${cmd[@]}" 2>&1)"; then
+ e="$(sed -E -e 's/^(error|fatal|warning):/ info:/' <<<"$e")"
+ e="failed to git-clone build2-control branch of $control"$'\n'"$e"
+ exit_with_manifest 422 "$e"
fi
if [ ! -f "$ctl_dir/submit/${sha256sum:0:16}" ]; then
diff --git a/clean/clean.cxx b/clean/clean.cxx
index 828ae4b..80c688b 100644
--- a/clean/clean.cxx
+++ b/clean/clean.cxx
@@ -313,76 +313,129 @@ namespace brep
prep_pkg_query pkg_prep_query (
conn->prepare_query<build_package_version> ("package-query", pq));
+ // On the recoverable database error we will retry querying/traversing
+ // builds in the current chunk, up to 10 times. If we still end up with
+ // the recoverable database error, then just skip this builds chunk.
+ //
+ const size_t max_retries (10);
+ size_t retry (max_retries);
+
+ // If we fail to erase some builds due to the recoverable database error
+ // and no builds are erased during this run, then we terminate with the
+ // exit code 3 (recoverable database error).
+ //
+ bool erased (false);
+ optional<string> re;
+
for (bool ne (true); ne; )
{
- transaction t (conn->begin ());
-
- // Query builds.
- //
- auto builds (bld_prep_query.execute ());
+ size_t n (0);
- if ((ne = !builds.empty ()))
+ try
{
- for (const auto& b: builds)
- {
- auto i (timeouts.find (b.toolchain_name));
-
- timestamp et (i != timeouts.end ()
- ? i->second
- : default_timeout);
-
- // Note that we don't consider the case when both the configuration
- // and the package still exist but the package now excludes the
- // configuration (configuration is now of the legacy class instead
- // of the default class, etc). Should we handle this case and
- // re-implement in a way brep-monitor does it? Probably not since
- // the described situation is not very common and storing some extra
- // builds which sooner or later will be wiped out due to the timeout
- // is harmless. The current implementation, however, is simpler and
- // consumes less resources in runtime (doesn't load build package
- // objects, etc).
- //
- bool cleanup (
- // Check that the build is not stale.
- //
- b.timestamp <= et ||
+ transaction t (conn->begin ());
+
+ // Query builds.
+ //
+ auto builds (bld_prep_query.execute ());
- // Check that the build configuration is still present.
+ n = builds.size ();
+
+ size_t not_erased (0);
+
+ if ((ne = (n != 0)))
+ {
+ for (const auto& b: builds)
+ {
+ auto i (timeouts.find (b.toolchain_name));
+
+ timestamp et (i != timeouts.end ()
+ ? i->second
+ : default_timeout);
+
+ // Note that we don't consider the case when both the
+ // configuration and the package still exist but the package now
+ // excludes the configuration (configuration is now of the legacy
+ // class instead of the default class, etc). Should we handle this
+ // case and re-implement in a way brep-monitor does it? Probably
+ // not since the described situation is not very common and
+ // storing some extra builds which sooner or later will be wiped
+ // out due to the timeout is harmless. The current implementation,
+ // however, is simpler and consumes less resources in runtime
+ // (doesn't load build package objects, etc).
//
- // Note that we unable to detect configuration changes and rely on
- // periodic rebuilds to take care of that.
+ bool cleanup (
+ // Check that the build is not stale.
+ //
+ b.timestamp <= et ||
+
+ // Check that the build configuration is still present.
+ //
+ // Note that we unable to detect configuration changes and rely
+ // on periodic rebuilds to take care of that.
+ //
+ configs_set.find (
+ build_target_config_id {
+ b.target, b.target_config_name}) == configs_set.end ());
+
+ // Check that the build package still exists.
//
- configs_set.find (
- build_target_config_id {b.target,
- b.target_config_name}) ==
- configs_set.end ());
-
- // Check that the build package still exists.
- //
- if (!cleanup)
- {
- if (tnt != b.tenant || pkg_name != b.package_name)
+ if (!cleanup)
{
- tnt = b.tenant;
- pkg_name = b.package_name;
- package_versions.clear ();
-
- for (auto& p: pkg_prep_query.execute ())
- package_versions.emplace (move (p.version));
+ if (tnt != b.tenant || pkg_name != b.package_name)
+ {
+ tnt = b.tenant;
+ pkg_name = b.package_name;
+ package_versions.clear ();
+
+ for (auto& p: pkg_prep_query.execute ())
+ package_versions.emplace (move (p.version));
+ }
+
+ cleanup = package_versions.find (b.package_version) ==
+ package_versions.end ();
}
- cleanup = package_versions.find (b.package_version) ==
- package_versions.end ();
+ if (cleanup)
+ db.erase (b);
+ else
+ ++not_erased;
}
+ }
+
+ t.commit ();
+
+ if (!erased)
+ erased = (not_erased != n);
- if (cleanup)
- db.erase (b);
- else
- ++offset;
+ offset += not_erased;
+ retry = max_retries;
+ }
+ catch (const recoverable& e)
+ {
+ // Re-iterate over the current builds chunk, unless there are no more
+ // attempts left. In the later case stash the error message, if not
+ // stashed yet, and skip the current builds chunk.
+ //
+ if (retry-- == 0)
+ {
+ offset += n;
+ retry = max_retries;
+
+ if (!re)
+ re = e.what ();
}
+
+ tnt = "";
+ pkg_name = package_name ();
+ package_versions.clear ();
}
+ }
- t.commit ();
+ if (re && !erased)
+ {
+ cerr << "recoverable database error: " << *re << endl;
+ return 3;
}
return 0;
diff --git a/doc/cli.sh b/doc/cli.sh
index 3c23f49..773fe1c 100755
--- a/doc/cli.sh
+++ b/doc/cli.sh
@@ -1,6 +1,6 @@
#! /usr/bin/env bash
-version=0.17.0-a.0.z
+version=0.18.0-a.0.z
trap 'exit 1' ERR
set -o errtrace # Trap in functions.
diff --git a/doc/manual.cli b/doc/manual.cli
index 9b85ae6..a550e5a 100644
--- a/doc/manual.cli
+++ b/doc/manual.cli
@@ -596,4 +596,236 @@ message: <string>
[reference]: <string>
\
+
+\h1#package-review|Package Review Submission|
+
+\h#package-review-manifest|Package Review Manifest|
+
+The package review manifest files are per version/revision and are normally
+stored on the filesystem along with other package metadata (like ownership
+information). Under the metadata root directory, a review manifest file has
+the following path:
+
+\
+<project>/<package>/<version>/reviews.manifest
+\
+
+For example:
+
+\
+hello/libhello/1.2.3+2/reviews.manifest
+\
+
+Note that review manifests are normally not removed when the corresponding
+package archive is removed (for example, as a result of a replacement with a
+revision) because reviews for subsequent versions may refer to review results
+of previous versions (see below).
+
+The package review file is a manifest list with each manifest containing
+the below values in an unspecified order:
+
+\
+reviewed-by: <string>
+result-<name>: pass|fail|unchanged
+[base-version]: <version>
+[details-url]: <url>
+\
+
+For example:
+
+\
+reviewed-by: John Doe <john@example.org>
+result-build: fail
+details-url: https://github.com/build2-packaging/hello/issues/1
+\
+
+The \c{reviewed-by} value identifies the reviewer. For example, a deployment
+policy may require a real name and email address when submitting a review.
+
+The \c{result-<name>} values specify the review results for various aspects of
+the package. At least one result value must be present and duplicates for the
+same aspect name are not allowed. For example, a deployment may define the
+following aspect names: \c{build} (build system), \c{code} (implementation
+source code), \c{test} (tests), \c{doc} (documentation).
+
+The \c{result-<name>} value must be one of \c{pass} (the review passed),
+\c{fail} (the review failed), and \c{unchanged} (the aspect in question hasn't
+changed compared to the previous version, which is identified with the
+\c{base-version} value; see below).
+
+The \c{base-version} value identifies the previous version on which this
+review is based. The idea here is that when reviewing a new revision, a patch
+version, or even a minor version, it is often easier to review the difference
+between the two versions than to review everything from scratch. In such
+cases, if some aspects haven't changed since the previous version, then their
+results can be specified as \c{unchanged}. The \c{base-version} value must be
+present if at least one \c{result-<name>} value is \c{unchanged}.
+
+The \c{details-url} value specifies a URL that contains the details of the
+review (issues identified, etc). It can only be absent if none of the
+\c{result-<name>} values are \c{fail} (a failed review needs an explanation
+of why it failed).
+
+
+\h1#github-ci|GitHub CI Integration|
+
+This chapter describes the integration of the \l{#ci Package CI} functionality
+with GitHub.
+
+\h#github-ci-background|GitHub CI Background|
+
+The GitHub CI model has a number of limitations that are important to
+understand in order to use the provided integration correctly. To understand
+the limitations, however, we first need to understand how the integration
+works, at least at the high level.
+
+GitHub supports integration of third-party CI services into the repository
+workflow by allowing such third-party services to register for events (called
+\i{web hooks} in the GitHub terminology).
+
+\N|This mechanism should not be confused with GitHub Actions, which is a GitHub
+built-in CI service. As far as we understand, it uses ad hoc integration
+rather than the same integration mechanism as available to third-party CI
+services.|
+
+While there are many repository workflow events, for CI the only relevant ones
+are:
+
+\ol|
+
+\li|\i{Branch push} (BP), which is triggered when a new commit is pushed
+to a branch in your repository.|
+
+\li|\i{Pull request} (PR), which is triggered when a new pull request is
+created on your repository. It is also triggered when new commits are added
+to the existing PR.||
+
+\N|Another relevant event is \i{Merge queue}. However, merge queues are
+not yet supported by this integration.|
+
+In response to these events the third-party CI service is expected to start a
+number of CI jobs (called \i{checks} in the GitHub terminology) and then
+report their progress and results back to GitHub to be shown to the user,
+and, in case of PRs, to prevent them from being merged in case the result
+is unsuccessful.
+
+Let's examine in more detail what exactly happens in case of a branch push and
+a pull request.
+
+The branch push (BP) case is pretty straightforward: when you push a new
+commit to a branch in your repository, this commit is CI'ed by the third-party
+service and the result is associated with this commit. If you push another
+commit, the process repeats and you get a new set of CI results associated
+with the new commit. The important point here is that the CI results for each
+commit are associated with that commit id (called \i{head sha} in the GitHub
+terminology).
+
+The pull request (PR) case is more complicated: the aim of a PR is to merge
+one or more commits from one branch (called \i{head branch} in the GitHub
+terminology) to another branch (called \i{base branch} in the GitHub
+terminology). If the base branch can be fast-forwarded to the head commit of
+the head branch, then we can CI this head commit and the result will be
+representative of the merge. However, if base cannot be fast-forwarded, then a
+general merge of the two branches must be performed, with potential conflict
+resolution, etc. And in this case the CI result for the head commit may not
+necessarily represent the result of the merge.
+
+To support the general case (when the base branch cannot be fast-forwarded)
+GitHub creates a tentative merge commit (called \i{test merge commit} in the
+GitHub terminology) and expects the CI service to test that commit rather than
+the head commit (this is what most of the major CI integrations do). See
+\l{https://www.kenmuse.com/blog/the-many-shas-of-a-github-pull-request/ The
+Many SHAs of a GitHub Pull Request} for additional details.
+
+While the PR case is more complicated, so far everything makes sense. But that
+ends once we understand what GitHub associates the CI result with in case of a
+PR. Since the CI service is expected to test the merge commit, it would make
+sense to associate the result of this test with the merge commit. Instead,
+GitHub expects the CI service to report it as associated with the head commit!
+
+This strange decision by GitHub, which we will refer to as \"head sharing\",
+has two serious consequences for trusting CI results when making decisions
+about merging PRs.
+
+Firstly, if the branch push and/or several pull requests share the same head
+commit, then they will share the CI result, regardless of the state of the
+PRs' base branches. Or, to put it another way, in the GitHub model there is a
+single CI result per head commit that is shared by all the BPs and PRs with
+this head commit.
+
+Secondly, if the base branch of a PR moves, the CI result associated with the
+PR does not get invalidated (because the PR head hasn't changed).
+
+Let's consider two representative examples of each case that show how the
+GitHub behavior can lead us to making wrong decisions. But before we do that,
+a last bit of terminology: we will distinguish between \i{local PRs}, those with
+the head branch from the same repository, and \i{remote PRs}, those with the
+head branch belonging to another user/organization (called \i{forked PR} in
+the GitHub terminology).
+
+The first representative example is a feature branch: we develop a feature in
+a branch of our repository and once it is ready, we create a local PR to merge
+it to the \c{master}/\c{main} branch. We typically go through the PR instead
+of merging our branch directly in order to have the changes reviewed by
+someone else. In this scenario, the head commit of our feature branch and of
+the PR we created will be the same, which means our PR will share the CI
+result with the feature branch push, which is presumably successful. This can
+lead us to merging the PR based on this result even though the merge commit of
+the PR may not have the same contents as the head commit of the result. For
+example, we may have forgotten to rebase our feature branch on the base branch
+(\c{master}/\c{main} in our example) before creating the PR and the base
+branch has moved while we developed the feature. Or the review may have taken
+some time and the base branch likewise has moved in the meantime. In both
+these cases while the changes to the base branch may not render our head
+commit unmergeable (for example, due to conflicts), they may render our
+changes uncompilable or otherwise buggy once merged.
+
+The second representative example is a single remote PR: someone creates a PR
+with a feature or bugfix from their fork of our repository. There is no
+corresponding branch push for this PR's head commit in our repository so it
+sounds like there is only one place (the PR) where the CI result, if
+associated with this head commit, will be reported in our repository and so
+the head sharing should not be an issue, right? While it's true that
+\i{spatial} sharing, that is between BP and/or several PRs, is not an issue in
+this case, \i{temporal} sharing still is. Specifically, if the base branch
+moves before we examine the PR, we again may end up merging it based on the CI
+results that are not representative of the merge commit.
+
+Hopefully you see the underlying theme by now: the only way to ensure
+correctness in the GitHub CI model is to make sure the PR's head and merge
+commits are the same, which is only the case when the PR base branch can be
+fast-forwarded to head.
+
+Thankfully, GitHub provides a branch protection rule that prevents merging of
+a PR with the head branch behind base (we will refer to it as the
+\i{head-behind-base} protection). Enabling of this protection rule is a
+prerequisite for this CI integration to work correctly.
+
+Note, however, that even with the head-behind-base protection enabled, some of
+the GitHub behavior can be counter-intuitive.
+
+For one, GitHub does not prevent the CI build from starting if this protection
+rule is violated. While this integration checks the result of this protection
+rule and does not start the build if the head is behind, the CI result may
+already be available (if this head is shared with a branch push and/or another
+PR), in which case GitHub will show it. So you may end up with a violated
+head-behind-base protection but with a successful CI result.
+
+Another surprising consequence of the head sharing is the instantaneous
+availability of the CI result, which may look suspicious. For example, if you
+create a PR from a local feature branch, you may immediately see the
+successful CI result because it is the same as for the branch push
+to the feature branch.
+
+Finally note that the GitHub CI model is quite wasteful of CI resources in
+general and the head sharing makes this problem even worse. Specifically,
+GitHub CI builds every commit indiscriminately, regardless of what was
+changed. So a minor tweak to \c{README.md} will trigger a full rebuild even
+though nothing that needs building has changed. The head sharing issue makes
+the situation worse because the CI integration cannot easily cancel an
+in-progress build when a new commit is added to a PR because the result could
+be shared with a branch push or another PR. Nevertheless, this integration
+will attempt to cancel a stale build of a remote PR provided it's not
+(currently) shared.
+
"
diff --git a/etc/brep-module.conf b/etc/brep-module.conf
index dce7f9f..fd6ba67 100644
--- a/etc/brep-module.conf
+++ b/etc/brep-module.conf
@@ -37,6 +37,7 @@ menu Packages=
# menu Configs=?build-configs
# menu Submit=?submit
# menu CI=?ci
+# menu Advanced Search=?advanced-search
menu About=?about
@@ -232,9 +233,9 @@ menu About=?about
# The maximum size of the build result manifest accepted. Note that the HTTP
# POST request body is cached to retry database transactions in the face of
-# recoverable failures (deadlock, loss of connection, etc). Default is 10M.
+# recoverable failures (deadlock, loss of connection, etc). Default is 15M.
#
-# build-result-request-max-size 10485760
+# build-result-request-max-size 15728640
# Enable or disable package build notification emails in the <name>=<mode>
@@ -297,6 +298,19 @@ menu About=?about
# bindist-url
+# The base URL for the reviews manifest files. If this option is specified,
+# then the review information is displayed on the package version details
+# page.
+#
+# The complete URL is formed by adding the following path to the base:
+#
+# <project>/<package>/<version>/reviews.manifest
+#
+# Note that no separator is added between the base and this path.
+#
+# reviews-url
+
+
# The openssl program to be used for crypto operations. You can also specify
# additional options that should be passed to the openssl program with
# openssl-option. If the openssl program is not explicitly specified, then brep
@@ -439,21 +453,16 @@ menu About=?about
# ci-handler-timeout
-# The GitHub App ID. Found in the app's settings on GitHub.
-#
-# ci-github-app-id
-
-
# The GitHub App's configured webhook secret. If not set, then the GitHub CI
-# service is disabled.
+# service is disabled. Note: make sure to choose a strong (random) secret.
#
# ci-github-app-webhook-secret
-# The private key used during GitHub API authentication. Created in the GitHub
-# App's settings.
+# The private key used during GitHub API authentication for the specified
+# GitHub App ID. Both vales are found in the GitHub App's settings.
#
-# ci-github-app-private-key
+# ci-github-app-id-private-key <id>=<path>
# The number of seconds a JWT (authentication token) should be valid for. The
diff --git a/etc/private/install/brep-module.conf b/etc/private/install/brep-module.conf
index bfaa8f6..2545a87 100644
--- a/etc/private/install/brep-module.conf
+++ b/etc/private/install/brep-module.conf
@@ -37,6 +37,7 @@ menu Packages=
# menu Configs=?build-configs
menu Submit=?submit
# menu CI=?ci
+# menu Advanced Search=?advanced-search
menu About=?about
@@ -232,9 +233,9 @@ menu About=?about
# The maximum size of the build result manifest accepted. Note that the HTTP
# POST request body is cached to retry database transactions in the face of
-# recoverable failures (deadlock, loss of connection, etc). Default is 10M.
+# recoverable failures (deadlock, loss of connection, etc). Default is 15M.
#
-# build-result-request-max-size 10485760
+# build-result-request-max-size 15728640
# Enable or disable package build notification emails in the <name>=<mode>
@@ -297,6 +298,19 @@ menu About=?about
# bindist-url
+# The base URL for the reviews manifest files. If this option is specified,
+# then the review information is displayed on the package version details
+# page.
+#
+# The complete URL is formed by adding the following path to the base:
+#
+# <project>/<package>/<version>/reviews.manifest
+#
+# Note that no separator is added between the base and this path.
+#
+# reviews-url
+
+
# The openssl program to be used for crypto operations. You can also specify
# additional options that should be passed to the openssl program with
# openssl-option. If the openssl program is not explicitly specified, then brep
@@ -447,6 +461,26 @@ submit-handler-timeout 120
# ci-handler-timeout
+# The GitHub App's configured webhook secret. If not set, then the GitHub CI
+# service is disabled. Note that the path must be absolute. Note: make sure to
+# choose a strong (random) secret.
+#
+# ci-github-app-webhook-secret <path>
+
+
+# The private key used during GitHub API authentication for the specified
+# GitHub App ID. Both vales are found in the GitHub App's settings. Note that
+# the paths must be absolute.
+#
+# ci-github-app-id-private-key <id>=<path>
+
+
+# The number of seconds a JWT (authentication token) should be valid for. The
+# maximum allowed by GitHub is 10 minutes.
+#
+# ci-github-jwt-validity-period 600
+
+
# The directory to save upload data to for the specified upload type. If
# unspecified, the build artifacts upload functionality will be disabled for
# this type.
diff --git a/etc/systemd/brep-load.service b/etc/systemd/brep-load.service
index f9d4e31..7b38225 100644
--- a/etc/systemd/brep-load.service
+++ b/etc/systemd/brep-load.service
@@ -6,6 +6,7 @@ Type=oneshot
#User=brep
#Group=brep
ExecStart=/home/brep/install/bin/brep-load /home/brep/config/loadtab
+#ExecStart=/home/brep/install/bin/brep-load-with-metadata --timeout 60 /home/brep/public-metdata /home/brep/config/loadtab
[Install]
WantedBy=default.target
diff --git a/libbrep/build-extra.sql b/libbrep/build-extra.sql
index 0c0f010..3134fbb 100644
--- a/libbrep/build-extra.sql
+++ b/libbrep/build-extra.sql
@@ -50,6 +50,7 @@ CREATE FOREIGN TABLE build_tenant (
archived BOOLEAN NOT NULL,
service_id TEXT NULL,
service_type TEXT NULL,
+ service_ref_count BIGINT NULL,
service_data TEXT NULL,
unloaded_timestamp BIGINT NULL,
unloaded_notify_interval BIGINT NULL,
diff --git a/libbrep/build-package.hxx b/libbrep/build-package.hxx
index 13645eb..f022a0d 100644
--- a/libbrep/build-package.hxx
+++ b/libbrep/build-package.hxx
@@ -42,6 +42,10 @@ namespace brep
unloaded_timestamp (t),
unloaded_notify_interval (n) {}
+ // Create tenant for subsequent loading or incremental building.
+ //
+ build_tenant () = default;
+
string id;
bool private_ = false;
@@ -57,10 +61,6 @@ namespace brep
// Database mapping.
//
#pragma db member(id) id
-
- private:
- friend class odb::access;
- build_tenant () = default;
};
// Foreign object that is mapped to a subset of the repository object.
@@ -288,50 +288,48 @@ namespace brep
//
#pragma db member(configs) id_column("") value_column("config_")
- #pragma db member(config_builds) \
- virtual(build_class_exprs_map) \
- after(configs) \
- get(odb::nested_get ( \
- brep::build_package_config_builds (this.configs))) \
- set(brep::build_package_config_builds bs; \
- odb::nested_set (bs, std::move (?)); \
- move (bs).to_configs (this.configs)) \
- id_column("") key_column("") value_column("") \
+ #pragma db member(config_builds) \
+ virtual(build_class_exprs_map) \
+ after(configs) \
+ get(odb::nested_get (this.configs, \
+ &brep::build_package_config::builds)) \
+ set(odb::nested_set (this.configs, \
+ &brep::build_package_config::builds, \
+ std::move (?))) \
+ id_column("") key_column("") value_column("") \
section(constraints_section)
- #pragma db member(config_constraints) \
- virtual(build_constraints_map) \
- after(config_builds) \
- get(odb::nested_get ( \
- brep::build_package_config_constraints (this.configs))) \
- set(brep::build_package_config_constraints cs; \
- odb::nested_set (cs, std::move (?)); \
- move (cs).to_configs (this.configs)) \
- id_column("") key_column("") value_column("") \
+ #pragma db member(config_constraints) \
+ virtual(build_constraints_map) \
+ after(config_builds) \
+ get(odb::nested_get (this.configs, \
+ &brep::build_package_config::constraints)) \
+ set(odb::nested_set (this.configs, \
+ &brep::build_package_config::constraints, \
+ std::move (?))) \
+ id_column("") key_column("") value_column("") \
section(constraints_section)
- #pragma db member(config_auxiliaries) \
- virtual(build_auxiliaries_map) \
- after(config_constraints) \
- get(odb::nested_get ( \
- brep::build_package_config_auxiliaries (this.configs))) \
- set(brep::build_package_config_auxiliaries as; \
- odb::nested_set (as, std::move (?)); \
- move (as).to_configs (this.configs)) \
- id_column("") key_column("") value_column("") \
+ #pragma db member(config_auxiliaries) \
+ virtual(build_auxiliaries_map) \
+ after(config_constraints) \
+ get(odb::nested_get (this.configs, \
+ &brep::build_package_config::auxiliaries)) \
+ set(odb::nested_set (this.configs, \
+ &brep::build_package_config::auxiliaries, \
+ std::move (?))) \
+ id_column("") key_column("") value_column("") \
section(auxiliaries_section)
- #pragma db member(config_bot_keys) \
- virtual(build_package_bot_keys_map) \
- after(config_auxiliaries) \
- get(odb::nested_get ( \
- brep::build_package_config_bot_keys< \
- lazy_shared_ptr<brep::build_public_key>> (this.configs))) \
- set(brep::build_package_config_bot_keys< \
- lazy_shared_ptr<brep::build_public_key>> bks; \
- odb::nested_set (bks, std::move (?)); \
- move (bks).to_configs (this.configs)) \
- id_column("") key_column("") value_column("key_") \
+ #pragma db member(config_bot_keys) \
+ virtual(build_package_bot_keys_map) \
+ after(config_auxiliaries) \
+ get(odb::nested_get (this.configs, \
+ &brep::build_package_config::bot_keys)) \
+ set(odb::nested_set (this.configs, \
+ &brep::build_package_config::bot_keys, \
+ std::move (?))) \
+ id_column("") key_column("") value_column("key_") \
section(bot_keys_section)
#pragma db member(constraints_section) load(lazy) update(always)
diff --git a/libbrep/build.hxx b/libbrep/build.hxx
index 55fd42b..5ebbb0c 100644
--- a/libbrep/build.hxx
+++ b/libbrep/build.hxx
@@ -26,9 +26,9 @@
// Used by the data migration entries.
//
-#define LIBBREP_BUILD_SCHEMA_VERSION_BASE 20
+#define LIBBREP_BUILD_SCHEMA_VERSION_BASE 28
-#pragma db model version(LIBBREP_BUILD_SCHEMA_VERSION_BASE, 28, closed)
+#pragma db model version(LIBBREP_BUILD_SCHEMA_VERSION_BASE, 29, closed)
// We have to keep these mappings at the global scope instead of inside the
// brep namespace because they need to be also effective in the bbot namespace
diff --git a/libbrep/build.xml b/libbrep/build.xml
index 90b4b4f..284db49 100644
--- a/libbrep/build.xml
+++ b/libbrep/build.xml
@@ -1,12 +1,63 @@
<changelog xmlns="http://www.codesynthesis.com/xmlns/odb/changelog" database="pgsql" schema-name="build" version="1">
- <changeset version="28"/>
+ <changeset version="29"/>
- <changeset version="27"/>
-
- <changeset version="26"/>
-
- <changeset version="25">
- <add-table name="build_auxiliary_machines" kind="container">
+ <model version="28">
+ <table name="build" kind="object">
+ <column name="package_tenant" type="TEXT" null="false"/>
+ <column name="package_name" type="CITEXT" null="false"/>
+ <column name="package_version_epoch" type="INTEGER" null="false"/>
+ <column name="package_version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="package_version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="package_version_revision" type="INTEGER" null="false"/>
+ <column name="target" type="TEXT" null="false"/>
+ <column name="target_config_name" type="TEXT" null="false"/>
+ <column name="package_config_name" type="TEXT" null="false"/>
+ <column name="toolchain_name" type="TEXT" null="false"/>
+ <column name="toolchain_version_epoch" type="INTEGER" null="false"/>
+ <column name="toolchain_version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="toolchain_version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="toolchain_version_revision" type="INTEGER" null="false"/>
+ <column name="package_version_upstream" type="TEXT" null="false"/>
+ <column name="package_version_release" type="TEXT" null="true"/>
+ <column name="toolchain_version_upstream" type="TEXT" null="false"/>
+ <column name="toolchain_version_release" type="TEXT" null="true"/>
+ <column name="state" type="TEXT" null="false"/>
+ <column name="interactive" type="TEXT" null="true"/>
+ <column name="timestamp" type="BIGINT" null="false"/>
+ <column name="force" type="TEXT" null="false"/>
+ <column name="status" type="TEXT" null="true"/>
+ <column name="soft_timestamp" type="BIGINT" null="false"/>
+ <column name="hard_timestamp" type="BIGINT" null="false"/>
+ <column name="agent_fingerprint" type="TEXT" null="true"/>
+ <column name="agent_challenge" type="TEXT" null="true"/>
+ <column name="controller_checksum" type="TEXT" null="false"/>
+ <column name="machine_checksum" type="TEXT" null="false"/>
+ <column name="agent_checksum" type="TEXT" null="true"/>
+ <column name="worker_checksum" type="TEXT" null="true"/>
+ <column name="dependency_checksum" type="TEXT" null="true"/>
+ <column name="machine" type="TEXT" null="false"/>
+ <column name="machine_summary" type="TEXT" null="false"/>
+ <primary-key>
+ <column name="package_tenant"/>
+ <column name="package_name"/>
+ <column name="package_version_epoch"/>
+ <column name="package_version_canonical_upstream"/>
+ <column name="package_version_canonical_release"/>
+ <column name="package_version_revision"/>
+ <column name="target"/>
+ <column name="target_config_name"/>
+ <column name="package_config_name"/>
+ <column name="toolchain_name"/>
+ <column name="toolchain_version_epoch"/>
+ <column name="toolchain_version_canonical_upstream"/>
+ <column name="toolchain_version_canonical_release"/>
+ <column name="toolchain_version_revision"/>
+ </primary-key>
+ <index name="build_timestamp_i">
+ <column name="timestamp"/>
+ </index>
+ </table>
+ <table name="build_auxiliary_machines" kind="container">
<column name="package_tenant" type="TEXT" null="false"/>
<column name="package_name" type="CITEXT" null="false"/>
<column name="package_version_epoch" type="INTEGER" null="false"/>
@@ -75,72 +126,6 @@
<index name="build_auxiliary_machines_index_i">
<column name="index"/>
</index>
- </add-table>
- </changeset>
-
- <changeset version="24"/>
-
- <changeset version="23"/>
-
- <changeset version="22"/>
-
- <changeset version="21"/>
-
- <model version="20">
- <table name="build" kind="object">
- <column name="package_tenant" type="TEXT" null="false"/>
- <column name="package_name" type="CITEXT" null="false"/>
- <column name="package_version_epoch" type="INTEGER" null="false"/>
- <column name="package_version_canonical_upstream" type="TEXT" null="false"/>
- <column name="package_version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
- <column name="package_version_revision" type="INTEGER" null="false"/>
- <column name="target" type="TEXT" null="false"/>
- <column name="target_config_name" type="TEXT" null="false"/>
- <column name="package_config_name" type="TEXT" null="false"/>
- <column name="toolchain_name" type="TEXT" null="false"/>
- <column name="toolchain_version_epoch" type="INTEGER" null="false"/>
- <column name="toolchain_version_canonical_upstream" type="TEXT" null="false"/>
- <column name="toolchain_version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
- <column name="toolchain_version_revision" type="INTEGER" null="false"/>
- <column name="package_version_upstream" type="TEXT" null="false"/>
- <column name="package_version_release" type="TEXT" null="true"/>
- <column name="toolchain_version_upstream" type="TEXT" null="false"/>
- <column name="toolchain_version_release" type="TEXT" null="true"/>
- <column name="state" type="TEXT" null="false"/>
- <column name="interactive" type="TEXT" null="true"/>
- <column name="timestamp" type="BIGINT" null="false"/>
- <column name="force" type="TEXT" null="false"/>
- <column name="status" type="TEXT" null="true"/>
- <column name="soft_timestamp" type="BIGINT" null="false"/>
- <column name="hard_timestamp" type="BIGINT" null="false"/>
- <column name="agent_fingerprint" type="TEXT" null="true"/>
- <column name="agent_challenge" type="TEXT" null="true"/>
- <column name="machine" type="TEXT" null="false"/>
- <column name="machine_summary" type="TEXT" null="false"/>
- <column name="controller_checksum" type="TEXT" null="false"/>
- <column name="machine_checksum" type="TEXT" null="false"/>
- <column name="agent_checksum" type="TEXT" null="true"/>
- <column name="worker_checksum" type="TEXT" null="true"/>
- <column name="dependency_checksum" type="TEXT" null="true"/>
- <primary-key>
- <column name="package_tenant"/>
- <column name="package_name"/>
- <column name="package_version_epoch"/>
- <column name="package_version_canonical_upstream"/>
- <column name="package_version_canonical_release"/>
- <column name="package_version_revision"/>
- <column name="target"/>
- <column name="target_config_name"/>
- <column name="package_config_name"/>
- <column name="toolchain_name"/>
- <column name="toolchain_version_epoch"/>
- <column name="toolchain_version_canonical_upstream"/>
- <column name="toolchain_version_canonical_release"/>
- <column name="toolchain_version_revision"/>
- </primary-key>
- <index name="build_timestamp_i">
- <column name="timestamp"/>
- </index>
</table>
<table name="build_results" kind="container">
<column name="package_tenant" type="TEXT" null="false"/>
diff --git a/libbrep/common.hxx b/libbrep/common.hxx
index 4be9ce9..22302f3 100644
--- a/libbrep/common.hxx
+++ b/libbrep/common.hxx
@@ -421,11 +421,10 @@ namespace brep
return i != cs.end () ? &*i : nullptr;
}
- // Note that ODB doesn't support containers of value types which contain
- // containers. Thus, we will persist/load
- // build_package_config_template<K>::{builds,constraint,auxiliaries,bot_keys}
- // via the separate nested containers using the adapter classes.
- //
+ // Note that build_package_configs_template<K> is a container of the value
+ // type build_package_config_template<K>, which contains multiple
+ // containers: builds, constraint, auxiliaries, bot_keys. We will
+ // persist/load each of them via a separate virtual container.
// build_package_config_template<K>::builds
//
@@ -436,39 +435,6 @@ namespace brep
#pragma db member(build_class_expr_key::outer) column("config_index")
#pragma db member(build_class_expr_key::inner) column("index")
- // Adapter for build_package_config_template<K>::builds.
- //
- // Note: 1 as for build_package_configs_template.
- //
- class build_package_config_builds: public small_vector<build_class_exprs, 1>
- {
- public:
- build_package_config_builds () = default;
-
- template <typename K>
- explicit
- build_package_config_builds (const build_package_configs_template<K>& cs)
- {
- reserve (cs.size ());
- for (const build_package_config_template<K>& c: cs)
- push_back (c.builds);
- }
-
- template <typename K>
- void
- to_configs (build_package_configs_template<K>& cs) &&
- {
- // Note that the empty trailing entries will be missing (see ODB's
- // nested-container.hxx for details).
- //
- assert (size () <= cs.size ());
-
- auto i (cs.begin ());
- for (build_class_exprs& ces: *this)
- i++->builds = move (ces);
- }
- };
-
// build_package_config_template<K>::constraints
//
using build_constraint_key = odb::nested_key<build_constraints>;
@@ -478,41 +444,6 @@ namespace brep
#pragma db member(build_constraint_key::outer) column("config_index")
#pragma db member(build_constraint_key::inner) column("index")
- // Adapter for build_package_config_template<K>::constraints.
- //
- // Note: 1 as for build_package_configs_template.
- //
- class build_package_config_constraints:
- public small_vector<build_constraints, 1>
- {
- public:
- build_package_config_constraints () = default;
-
- template <typename K>
- explicit
- build_package_config_constraints (
- const build_package_configs_template<K>& cs)
- {
- reserve (cs.size ());
- for (const build_package_config_template<K>& c: cs)
- push_back (c.constraints);
- }
-
- template <typename K>
- void
- to_configs (build_package_configs_template<K>& cs) &&
- {
- // Note that the empty trailing entries will be missing (see ODB's
- // nested-container.hxx for details).
- //
- assert (size () <= cs.size ());
-
- auto i (cs.begin ());
- for (build_constraints& bcs: *this)
- i++->constraints = move (bcs);
- }
- };
-
// build_package_config_template<K>::auxiliaries
//
using build_auxiliary_key = odb::nested_key<build_auxiliaries>;
@@ -522,74 +453,11 @@ namespace brep
#pragma db member(build_auxiliary_key::outer) column("config_index")
#pragma db member(build_auxiliary_key::inner) column("index")
- // Adapter for build_package_config_template<K>::auxiliaries.
- //
- // Note: 1 as for build_package_configs_template.
- //
- class build_package_config_auxiliaries:
- public small_vector<build_auxiliaries, 1>
- {
- public:
- build_package_config_auxiliaries () = default;
-
- template <typename K>
- explicit
- build_package_config_auxiliaries (
- const build_package_configs_template<K>& cs)
- {
- reserve (cs.size ());
- for (const build_package_config_template<K>& c: cs)
- push_back (c.auxiliaries);
- }
-
- template <typename K>
- void
- to_configs (build_package_configs_template<K>& cs) &&
- {
- // Note that the empty trailing entries will be missing (see ODB's
- // nested-container.hxx for details).
- //
- assert (size () <= cs.size ());
-
- auto i (cs.begin ());
- for (build_auxiliaries& bas: *this)
- i++->auxiliaries = move (bas);
- }
- };
-
// build_package_config_template<K>::bot_keys
//
- // Adapter for build_package_config_template<K>::bot_keys.
- //
- // Note: 1 as for build_package_configs_template.
- //
- template <typename K>
- class build_package_config_bot_keys: public small_vector<vector<K>, 1>
- {
- public:
- build_package_config_bot_keys () = default;
-
- explicit
- build_package_config_bot_keys (const build_package_configs_template<K>& cs)
- {
- this->reserve (cs.size ());
- for (const build_package_config_template<K>& c: cs)
- this->push_back (c.bot_keys);
- }
-
- void
- to_configs (build_package_configs_template<K>& cs) &&
- {
- // Note that the empty trailing entries will be missing (see ODB's
- // nested-container.hxx for details).
- //
- assert (this->size () <= cs.size ());
-
- auto i (cs.begin ());
- for (vector<K>& bks: *this)
- i++->bot_keys = move (bks);
- }
- };
+ // Note that the nested container support types (*_key, *_map, etc) are
+ // package object type-specific for this container and are defined in the
+ // package.hxx and build-package.hxx headers, respectively.
// The primary reason why a package is unbuildable by the build bot
// controller service.
@@ -673,17 +541,24 @@ namespace brep
// Third-party service state which may optionally be associated with a
// tenant (see also mod/tenant-service.hxx for background).
//
+ // Note that the {id, type} pair must be unique.
+ //
+ // The reference count is used to keep track of the number of attempts to
+ // create a duplicate tenant with this {id, type} (see ci_start::create()
+ // for details).
+ //
#pragma db value
struct tenant_service
{
string id;
string type;
+ uint64_t ref_count;
optional<string> data;
tenant_service () = default;
tenant_service (string i, string t, optional<string> d = nullopt)
- : id (move (i)), type (move (t)), data (move (d)) {}
+ : id (move (i)), type (move (t)), ref_count (1), data (move (d)) {}
};
// Version comparison operators.
diff --git a/libbrep/package.cxx b/libbrep/package.cxx
index 4eb6fe8..391a583 100644
--- a/libbrep/package.cxx
+++ b/libbrep/package.cxx
@@ -84,6 +84,7 @@ namespace brep
build_auxiliaries_type ac,
package_build_bot_keys bk,
package_build_configs bcs,
+ optional<reviews_summary> rvs,
optional<path> lc,
optional<string> fr,
optional<string> sh,
@@ -119,6 +120,7 @@ namespace brep
build_auxiliaries (move (ac)),
build_bot_keys (move (bk)),
build_configs (move (bcs)),
+ reviews (move (rvs)),
internal_repository (move (rp)),
location (move (lc)),
fragment (move (fr)),
diff --git a/libbrep/package.hxx b/libbrep/package.hxx
index 76c5836..2714d10 100644
--- a/libbrep/package.hxx
+++ b/libbrep/package.hxx
@@ -18,9 +18,9 @@
// Used by the data migration entries.
//
-#define LIBBREP_PACKAGE_SCHEMA_VERSION_BASE 27
+#define LIBBREP_PACKAGE_SCHEMA_VERSION_BASE 34
-#pragma db model version(LIBBREP_PACKAGE_SCHEMA_VERSION_BASE, 34, closed)
+#pragma db model version(LIBBREP_PACKAGE_SCHEMA_VERSION_BASE, 36, closed)
namespace brep
{
@@ -224,9 +224,8 @@ namespace brep
// certificate
//
#pragma db value
- class certificate
+ struct certificate
{
- public:
string fingerprint; // SHA256 fingerprint. Note: foreign-mapped in build.
string name; // CN component of Subject.
string organization; // O component of Subject.
@@ -536,6 +535,35 @@ namespace brep
#pragma db member(package_build_bot_key_key::outer) column("config_index")
#pragma db member(package_build_bot_key_key::inner) column("index")
+ // Number of the passed and failed reviews and the path to the
+ // reviews.manifest file this information comes form. The path is relative
+ // to the root of the package metadata directory.
+ //
+ #pragma db value
+ struct reviews_summary
+ {
+ // May not be both zero.
+ //
+ size_t pass;
+ size_t fail;
+
+ path manifest_file;
+ };
+
+ inline bool
+ operator== (const reviews_summary& x, const reviews_summary& y)
+ {
+ return x.pass == y.pass &&
+ x.fail == y.fail &&
+ x.manifest_file == y.manifest_file;
+ }
+
+ inline bool
+ operator!= (const reviews_summary& x, const reviews_summary& y)
+ {
+ return !(x == y);
+ }
+
// Tweak package_id mapping to include a constraint (this only affects the
// database schema).
//
@@ -589,6 +617,7 @@ namespace brep
build_auxiliaries_type,
package_build_bot_keys,
package_build_configs,
+ optional<reviews_summary>,
optional<path> location,
optional<string> fragment,
optional<string> sha256sum,
@@ -691,6 +720,9 @@ namespace brep
//
odb::section build_section;
+ optional<reviews_summary> reviews;
+ odb::section reviews_section;
+
// Note that it is foreign-mapped in build.
//
lazy_shared_ptr<repository_type> internal_repository;
@@ -870,54 +902,55 @@ namespace brep
#pragma db member(build_configs) id_column("") value_column("config_") \
section(build_section)
- #pragma db member(build_config_builds) \
- virtual(build_class_exprs_map) \
- after(build_configs) \
- get(odb::nested_get ( \
- brep::build_package_config_builds (this.build_configs))) \
- set(brep::build_package_config_builds bs; \
- odb::nested_set (bs, std::move (?)); \
- move (bs).to_configs (this.build_configs)) \
- id_column("") key_column("") value_column("") \
+ #pragma db member(build_config_builds) \
+ virtual(build_class_exprs_map) \
+ after(build_configs) \
+ get(odb::nested_get (this.build_configs, \
+ &brep::package_build_config::builds)) \
+ set(odb::nested_set (this.build_configs, \
+ &brep::package_build_config::builds, \
+ std::move (?))) \
+ id_column("") key_column("") value_column("") \
section(build_section)
- #pragma db member(build_config_constraints) \
- virtual(build_constraints_map) \
- after(build_config_builds) \
- get(odb::nested_get ( \
- brep::build_package_config_constraints (this.build_configs))) \
- set(brep::build_package_config_constraints cs; \
- odb::nested_set (cs, std::move (?)); \
- move (cs).to_configs (this.build_configs)) \
- id_column("") key_column("") value_column("") \
+ #pragma db member(build_config_constraints) \
+ virtual(build_constraints_map) \
+ after(build_config_builds) \
+ get(odb::nested_get (this.build_configs, \
+ &brep::package_build_config::constraints)) \
+ set(odb::nested_set (this.build_configs, \
+ &brep::package_build_config::constraints, \
+ std::move (?))) \
+ id_column("") key_column("") value_column("") \
section(build_section)
- #pragma db member(build_config_auxiliaries) \
- virtual(build_auxiliaries_map) \
- after(build_config_constraints) \
- get(odb::nested_get ( \
- brep::build_package_config_auxiliaries (this.build_configs))) \
- set(brep::build_package_config_auxiliaries as; \
- odb::nested_set (as, std::move (?)); \
- move (as).to_configs (this.build_configs)) \
- id_column("") key_column("") value_column("") \
+ #pragma db member(build_config_auxiliaries) \
+ virtual(build_auxiliaries_map) \
+ after(build_config_constraints) \
+ get(odb::nested_get (this.build_configs, \
+ &brep::package_build_config::auxiliaries)) \
+ set(odb::nested_set (this.build_configs, \
+ &brep::package_build_config::auxiliaries, \
+ std::move (?))) \
+ id_column("") key_column("") value_column("") \
section(unused_section)
- #pragma db member(build_config_bot_keys) \
- virtual(package_build_bot_keys_map) \
- after(build_config_auxiliaries) \
- get(odb::nested_get ( \
- brep::build_package_config_bot_keys< \
- lazy_shared_ptr<brep::public_key>> (this.build_configs))) \
- set(brep::build_package_config_bot_keys< \
- lazy_shared_ptr<brep::public_key>> bks; \
- odb::nested_set (bks, std::move (?)); \
- move (bks).to_configs (this.build_configs)) \
- id_column("") key_column("") value_column("key_") value_not_null \
+ #pragma db member(build_config_bot_keys) \
+ virtual(package_build_bot_keys_map) \
+ after(build_config_auxiliaries) \
+ get(odb::nested_get (this.build_configs, \
+ &brep::package_build_config::bot_keys)) \
+ set(odb::nested_set (this.build_configs, \
+ &brep::package_build_config::bot_keys, \
+ std::move (?))) \
+ id_column("") key_column("") value_column("key_") value_not_null \
section(unused_section)
- #pragma db member(build_section) load(lazy) update(always)
- #pragma db member(unused_section) load(lazy) update(manual)
+ #pragma db member(reviews) section(reviews_section)
+
+ #pragma db member(build_section) load(lazy) update(always)
+ #pragma db member(reviews_section) load(lazy) update(always)
+ #pragma db member(unused_section) load(lazy) update(manual)
// other_repositories
//
@@ -948,6 +981,20 @@ namespace brep
search_text (const weighted_text&) {}
};
+ // Packages count.
+ //
+ #pragma db view object(package)
+ struct package_count
+ {
+ size_t result;
+
+ operator size_t () const {return result;}
+
+ // Database mapping.
+ //
+ #pragma db member(result) column("count(" + package::id.tenant + ")")
+ };
+
// Package search query matching rank.
//
#pragma db view query("/*CALL*/ SELECT * FROM search_latest_packages(?)")
@@ -974,7 +1021,7 @@ namespace brep
};
#pragma db view query("/*CALL*/ SELECT count(*) FROM search_packages(?)")
- struct package_count
+ struct package_search_count
{
size_t result;
diff --git a/libbrep/package.xml b/libbrep/package.xml
index f33119e..ac48ec4 100644
--- a/libbrep/package.xml
+++ b/libbrep/package.xml
@@ -1,295 +1,51 @@
<changelog xmlns="http://www.codesynthesis.com/xmlns/odb/changelog" database="pgsql" schema-name="package" version="1">
- <changeset version="34">
+ <changeset version="36">
<alter-table name="tenant">
- <add-column name="unloaded_timestamp" type="BIGINT" null="true"/>
- <add-column name="unloaded_notify_interval" type="BIGINT" null="true"/>
- <add-index name="tenant_unloaded_timestamp_i">
- <column name="unloaded_timestamp"/>
- </add-index>
+ <add-column name="service_ref_count" type="BIGINT" null="true"/>
</alter-table>
</changeset>
- <changeset version="33">
- <add-table name="public_key" kind="object">
- <column name="tenant" type="TEXT" null="false"/>
- <column name="fingerprint" type="TEXT" null="false"/>
- <column name="data" type="TEXT" null="false"/>
- <primary-key>
- <column name="tenant"/>
- <column name="fingerprint"/>
- </primary-key>
- <foreign-key name="tenant_fk" deferrable="DEFERRED">
- <column name="tenant"/>
- <references table="tenant">
- <column name="id"/>
- </references>
- </foreign-key>
- </add-table>
+ <changeset version="35">
<alter-table name="package">
- <add-column name="custom_bot" type="BOOLEAN" null="true"/>
- </alter-table>
- <add-table name="package_build_bot_keys" kind="container">
- <column name="tenant" type="TEXT" null="false"/>
- <column name="name" type="CITEXT" null="false"/>
- <column name="version_epoch" type="INTEGER" null="false"/>
- <column name="version_canonical_upstream" type="TEXT" null="false"/>
- <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
- <column name="version_revision" type="INTEGER" null="false"/>
- <column name="index" type="BIGINT" null="false"/>
- <column name="key_tenant" type="TEXT" null="false"/>
- <column name="key_fingerprint" type="TEXT" null="false"/>
- <foreign-key name="tenant_fk" deferrable="DEFERRED">
- <column name="tenant"/>
- <references table="tenant">
- <column name="id"/>
- </references>
- </foreign-key>
- <foreign-key name="object_id_fk" on-delete="CASCADE">
- <column name="tenant"/>
- <column name="name"/>
- <column name="version_epoch"/>
- <column name="version_canonical_upstream"/>
- <column name="version_canonical_release"/>
- <column name="version_revision"/>
- <references table="package">
- <column name="tenant"/>
- <column name="name"/>
- <column name="version_epoch"/>
- <column name="version_canonical_upstream"/>
- <column name="version_canonical_release"/>
- <column name="version_revision"/>
- </references>
- </foreign-key>
- <index name="package_build_bot_keys_object_id_i">
- <column name="tenant"/>
- <column name="name"/>
- <column name="version_epoch"/>
- <column name="version_canonical_upstream"/>
- <column name="version_canonical_release"/>
- <column name="version_revision"/>
- </index>
- <index name="package_build_bot_keys_index_i">
- <column name="index"/>
- </index>
- <foreign-key name="key_tenant_fk" deferrable="DEFERRED">
- <column name="key_tenant"/>
- <references table="tenant">
- <column name="id"/>
- </references>
- </foreign-key>
- <foreign-key name="key_fk" deferrable="DEFERRED">
- <column name="key_tenant"/>
- <column name="key_fingerprint"/>
- <references table="public_key">
- <column name="tenant"/>
- <column name="fingerprint"/>
- </references>
- </foreign-key>
- </add-table>
- <add-table name="package_build_config_bot_keys" kind="container">
- <column name="tenant" type="TEXT" null="false"/>
- <column name="name" type="CITEXT" null="false"/>
- <column name="version_epoch" type="INTEGER" null="false"/>
- <column name="version_canonical_upstream" type="TEXT" null="false"/>
- <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
- <column name="version_revision" type="INTEGER" null="false"/>
- <column name="config_index" type="BIGINT" null="false"/>
- <column name="index" type="BIGINT" null="false"/>
- <column name="key_tenant" type="TEXT" null="false"/>
- <column name="key_fingerprint" type="TEXT" null="false"/>
- <foreign-key name="tenant_fk" deferrable="DEFERRED">
- <column name="tenant"/>
- <references table="tenant">
- <column name="id"/>
- </references>
- </foreign-key>
- <foreign-key name="object_id_fk" on-delete="CASCADE">
- <column name="tenant"/>
- <column name="name"/>
- <column name="version_epoch"/>
- <column name="version_canonical_upstream"/>
- <column name="version_canonical_release"/>
- <column name="version_revision"/>
- <references table="package">
- <column name="tenant"/>
- <column name="name"/>
- <column name="version_epoch"/>
- <column name="version_canonical_upstream"/>
- <column name="version_canonical_release"/>
- <column name="version_revision"/>
- </references>
- </foreign-key>
- <index name="package_build_config_bot_keys_object_id_i">
- <column name="tenant"/>
- <column name="name"/>
- <column name="version_epoch"/>
- <column name="version_canonical_upstream"/>
- <column name="version_canonical_release"/>
- <column name="version_revision"/>
- </index>
- <foreign-key name="key_tenant_fk" deferrable="DEFERRED">
- <column name="key_tenant"/>
- <references table="tenant">
- <column name="id"/>
- </references>
- </foreign-key>
- <foreign-key name="key_fk" deferrable="DEFERRED">
- <column name="key_tenant"/>
- <column name="key_fingerprint"/>
- <references table="public_key">
- <column name="tenant"/>
- <column name="fingerprint"/>
- </references>
- </foreign-key>
- </add-table>
- </changeset>
-
- <changeset version="32">
- <alter-table name="tenant">
- <add-column name="build_toolchain_name" type="TEXT" null="true"/>
- <add-column name="build_toolchain_version_epoch" type="INTEGER" null="true"/>
- <add-column name="build_toolchain_version_canonical_upstream" type="TEXT" null="true"/>
- <add-column name="build_toolchain_version_canonical_release" type="TEXT" null="true"/>
- <add-column name="build_toolchain_version_revision" type="INTEGER" null="true"/>
- <add-column name="build_toolchain_version_upstream" type="TEXT" null="true"/>
- <add-column name="build_toolchain_version_release" type="TEXT" null="true"/>
- </alter-table>
- </changeset>
-
- <changeset version="31">
- <add-table name="package_build_auxiliaries" kind="container">
- <column name="tenant" type="TEXT" null="false"/>
- <column name="name" type="CITEXT" null="false"/>
- <column name="version_epoch" type="INTEGER" null="false"/>
- <column name="version_canonical_upstream" type="TEXT" null="false"/>
- <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
- <column name="version_revision" type="INTEGER" null="false"/>
- <column name="index" type="BIGINT" null="false"/>
- <column name="environment_name" type="TEXT" null="false"/>
- <column name="config" type="TEXT" null="false"/>
- <column name="comment" type="TEXT" null="false"/>
- <foreign-key name="tenant_fk" deferrable="DEFERRED">
- <column name="tenant"/>
- <references table="tenant">
- <column name="id"/>
- </references>
- </foreign-key>
- <foreign-key name="object_id_fk" on-delete="CASCADE">
- <column name="tenant"/>
- <column name="name"/>
- <column name="version_epoch"/>
- <column name="version_canonical_upstream"/>
- <column name="version_canonical_release"/>
- <column name="version_revision"/>
- <references table="package">
- <column name="tenant"/>
- <column name="name"/>
- <column name="version_epoch"/>
- <column name="version_canonical_upstream"/>
- <column name="version_canonical_release"/>
- <column name="version_revision"/>
- </references>
- </foreign-key>
- <index name="package_build_auxiliaries_object_id_i">
- <column name="tenant"/>
- <column name="name"/>
- <column name="version_epoch"/>
- <column name="version_canonical_upstream"/>
- <column name="version_canonical_release"/>
- <column name="version_revision"/>
- </index>
- <index name="package_build_auxiliaries_index_i">
- <column name="index"/>
- </index>
- </add-table>
- <add-table name="package_build_config_auxiliaries" kind="container">
- <column name="tenant" type="TEXT" null="false"/>
- <column name="name" type="CITEXT" null="false"/>
- <column name="version_epoch" type="INTEGER" null="false"/>
- <column name="version_canonical_upstream" type="TEXT" null="false"/>
- <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
- <column name="version_revision" type="INTEGER" null="false"/>
- <column name="config_index" type="BIGINT" null="false"/>
- <column name="index" type="BIGINT" null="false"/>
- <column name="environment_name" type="TEXT" null="false"/>
- <column name="config" type="TEXT" null="false"/>
- <column name="comment" type="TEXT" null="false"/>
- <foreign-key name="tenant_fk" deferrable="DEFERRED">
- <column name="tenant"/>
- <references table="tenant">
- <column name="id"/>
- </references>
- </foreign-key>
- <foreign-key name="object_id_fk" on-delete="CASCADE">
- <column name="tenant"/>
- <column name="name"/>
- <column name="version_epoch"/>
- <column name="version_canonical_upstream"/>
- <column name="version_canonical_release"/>
- <column name="version_revision"/>
- <references table="package">
- <column name="tenant"/>
- <column name="name"/>
- <column name="version_epoch"/>
- <column name="version_canonical_upstream"/>
- <column name="version_canonical_release"/>
- <column name="version_revision"/>
- </references>
- </foreign-key>
- <index name="package_build_config_auxiliaries_object_id_i">
- <column name="tenant"/>
- <column name="name"/>
- <column name="version_epoch"/>
- <column name="version_canonical_upstream"/>
- <column name="version_canonical_release"/>
- <column name="version_revision"/>
- </index>
- </add-table>
- </changeset>
-
- <changeset version="30">
- <alter-table name="tenant">
- <add-column name="service_id" type="TEXT" null="true"/>
- <add-column name="service_type" type="TEXT" null="true"/>
- <add-column name="service_data" type="TEXT" null="true"/>
- <add-column name="queued_timestamp" type="BIGINT" null="true"/>
- <add-index name="tenant_service_i" type="UNIQUE">
- <column name="service_id"/>
- <column name="service_type"/>
- </add-index>
- <add-index name="tenant_service_id_i">
- <column name="service_id"/>
- </add-index>
- </alter-table>
- </changeset>
-
- <changeset version="29">
- <alter-table name="package_tests">
- <add-column name="test_enable" type="TEXT" null="true"/>
- </alter-table>
- </changeset>
-
- <changeset version="28">
- <alter-table name="package_build_configs">
- <add-column name="config_email" type="TEXT" null="true"/>
- <add-column name="config_email_comment" type="TEXT" null="true"/>
- <add-column name="config_warning_email" type="TEXT" null="true"/>
- <add-column name="config_warning_email_comment" type="TEXT" null="true"/>
- <add-column name="config_error_email" type="TEXT" null="true"/>
- <add-column name="config_error_email_comment" type="TEXT" null="true"/>
+ <add-column name="reviews_pass" type="BIGINT" null="true"/>
+ <add-column name="reviews_fail" type="BIGINT" null="true"/>
+ <add-column name="reviews_manifest_file" type="TEXT" null="true"/>
</alter-table>
</changeset>
- <model version="27">
+ <model version="34">
<table name="tenant" kind="object">
<column name="id" type="TEXT" null="false"/>
<column name="private" type="BOOLEAN" null="false"/>
<column name="interactive" type="TEXT" null="true"/>
<column name="creation_timestamp" type="BIGINT" null="false"/>
<column name="archived" type="BOOLEAN" null="false"/>
+ <column name="service_id" type="TEXT" null="true"/>
+ <column name="service_type" type="TEXT" null="true"/>
+ <column name="service_data" type="TEXT" null="true"/>
+ <column name="unloaded_timestamp" type="BIGINT" null="true"/>
+ <column name="unloaded_notify_interval" type="BIGINT" null="true"/>
+ <column name="queued_timestamp" type="BIGINT" null="true"/>
+ <column name="build_toolchain_name" type="TEXT" null="true"/>
+ <column name="build_toolchain_version_epoch" type="INTEGER" null="true"/>
+ <column name="build_toolchain_version_canonical_upstream" type="TEXT" null="true"/>
+ <column name="build_toolchain_version_canonical_release" type="TEXT" null="true"/>
+ <column name="build_toolchain_version_revision" type="INTEGER" null="true"/>
+ <column name="build_toolchain_version_upstream" type="TEXT" null="true"/>
+ <column name="build_toolchain_version_release" type="TEXT" null="true"/>
<primary-key>
<column name="id"/>
</primary-key>
+ <index name="tenant_service_i" type="UNIQUE">
+ <column name="service_id"/>
+ <column name="service_type"/>
+ </index>
+ <index name="tenant_service_id_i">
+ <column name="service_id"/>
+ </index>
+ <index name="tenant_unloaded_timestamp_i">
+ <column name="unloaded_timestamp"/>
+ </index>
</table>
<table name="repository" kind="object">
<column name="tenant" type="TEXT" null="false"/>
@@ -409,6 +165,21 @@
</references>
</foreign-key>
</table>
+ <table name="public_key" kind="object">
+ <column name="tenant" type="TEXT" null="false"/>
+ <column name="fingerprint" type="TEXT" null="false"/>
+ <column name="data" type="TEXT" null="false"/>
+ <primary-key>
+ <column name="tenant"/>
+ <column name="fingerprint"/>
+ </primary-key>
+ <foreign-key name="tenant_fk" deferrable="DEFERRED">
+ <column name="tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ </table>
<table name="package" kind="object">
<column name="tenant" type="TEXT" null="false"/>
<column name="name" type="CITEXT" null="false"/>
@@ -454,6 +225,7 @@
<column name="sha256sum" type="TEXT" null="true"/>
<column name="buildable" type="BOOLEAN" null="false"/>
<column name="unbuildable_reason" type="TEXT" null="true"/>
+ <column name="custom_bot" type="BOOLEAN" null="true"/>
<column name="search_index" type="tsvector" null="true"/>
<primary-key>
<column name="tenant"/>
@@ -989,6 +761,7 @@
<column name="test_package_version_revision" type="INTEGER" null="true"/>
<column name="test_type" type="TEXT" null="false"/>
<column name="test_buildtime" type="BOOLEAN" null="false"/>
+ <column name="test_enable" type="TEXT" null="true"/>
<column name="test_reflect" type="TEXT" null="true"/>
<foreign-key name="tenant_fk" deferrable="DEFERRED">
<column name="tenant"/>
@@ -1136,6 +909,109 @@
<column name="index"/>
</index>
</table>
+ <table name="package_build_auxiliaries" kind="container">
+ <column name="tenant" type="TEXT" null="false"/>
+ <column name="name" type="CITEXT" null="false"/>
+ <column name="version_epoch" type="INTEGER" null="false"/>
+ <column name="version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="version_revision" type="INTEGER" null="false"/>
+ <column name="index" type="BIGINT" null="false"/>
+ <column name="environment_name" type="TEXT" null="false"/>
+ <column name="config" type="TEXT" null="false"/>
+ <column name="comment" type="TEXT" null="false"/>
+ <foreign-key name="tenant_fk" deferrable="DEFERRED">
+ <column name="tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <references table="package">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </references>
+ </foreign-key>
+ <index name="package_build_auxiliaries_object_id_i">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </index>
+ <index name="package_build_auxiliaries_index_i">
+ <column name="index"/>
+ </index>
+ </table>
+ <table name="package_build_bot_keys" kind="container">
+ <column name="tenant" type="TEXT" null="false"/>
+ <column name="name" type="CITEXT" null="false"/>
+ <column name="version_epoch" type="INTEGER" null="false"/>
+ <column name="version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="version_revision" type="INTEGER" null="false"/>
+ <column name="index" type="BIGINT" null="false"/>
+ <column name="key_tenant" type="TEXT" null="false"/>
+ <column name="key_fingerprint" type="TEXT" null="false"/>
+ <foreign-key name="tenant_fk" deferrable="DEFERRED">
+ <column name="tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <references table="package">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </references>
+ </foreign-key>
+ <index name="package_build_bot_keys_object_id_i">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </index>
+ <index name="package_build_bot_keys_index_i">
+ <column name="index"/>
+ </index>
+ <foreign-key name="key_tenant_fk" deferrable="DEFERRED">
+ <column name="key_tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="key_fk" deferrable="DEFERRED">
+ <column name="key_tenant"/>
+ <column name="key_fingerprint"/>
+ <references table="public_key">
+ <column name="tenant"/>
+ <column name="fingerprint"/>
+ </references>
+ </foreign-key>
+ </table>
<table name="package_build_configs" kind="container">
<column name="tenant" type="TEXT" null="false"/>
<column name="name" type="CITEXT" null="false"/>
@@ -1147,6 +1023,12 @@
<column name="config_name" type="TEXT" null="false"/>
<column name="config_arguments" type="TEXT" null="false"/>
<column name="config_comment" type="TEXT" null="false"/>
+ <column name="config_email" type="TEXT" null="true"/>
+ <column name="config_email_comment" type="TEXT" null="true"/>
+ <column name="config_warning_email" type="TEXT" null="true"/>
+ <column name="config_warning_email_comment" type="TEXT" null="true"/>
+ <column name="config_error_email" type="TEXT" null="true"/>
+ <column name="config_error_email_comment" type="TEXT" null="true"/>
<foreign-key name="tenant_fk" deferrable="DEFERRED">
<column name="tenant"/>
<references table="tenant">
@@ -1267,6 +1149,105 @@
<column name="version_revision"/>
</index>
</table>
+ <table name="package_build_config_auxiliaries" kind="container">
+ <column name="tenant" type="TEXT" null="false"/>
+ <column name="name" type="CITEXT" null="false"/>
+ <column name="version_epoch" type="INTEGER" null="false"/>
+ <column name="version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="version_revision" type="INTEGER" null="false"/>
+ <column name="config_index" type="BIGINT" null="false"/>
+ <column name="index" type="BIGINT" null="false"/>
+ <column name="environment_name" type="TEXT" null="false"/>
+ <column name="config" type="TEXT" null="false"/>
+ <column name="comment" type="TEXT" null="false"/>
+ <foreign-key name="tenant_fk" deferrable="DEFERRED">
+ <column name="tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <references table="package">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </references>
+ </foreign-key>
+ <index name="package_build_config_auxiliaries_object_id_i">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </index>
+ </table>
+ <table name="package_build_config_bot_keys" kind="container">
+ <column name="tenant" type="TEXT" null="false"/>
+ <column name="name" type="CITEXT" null="false"/>
+ <column name="version_epoch" type="INTEGER" null="false"/>
+ <column name="version_canonical_upstream" type="TEXT" null="false"/>
+ <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE &quot;C&quot;"/>
+ <column name="version_revision" type="INTEGER" null="false"/>
+ <column name="config_index" type="BIGINT" null="false"/>
+ <column name="index" type="BIGINT" null="false"/>
+ <column name="key_tenant" type="TEXT" null="false"/>
+ <column name="key_fingerprint" type="TEXT" null="false"/>
+ <foreign-key name="tenant_fk" deferrable="DEFERRED">
+ <column name="tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="object_id_fk" on-delete="CASCADE">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ <references table="package">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </references>
+ </foreign-key>
+ <index name="package_build_config_bot_keys_object_id_i">
+ <column name="tenant"/>
+ <column name="name"/>
+ <column name="version_epoch"/>
+ <column name="version_canonical_upstream"/>
+ <column name="version_canonical_release"/>
+ <column name="version_revision"/>
+ </index>
+ <foreign-key name="key_tenant_fk" deferrable="DEFERRED">
+ <column name="key_tenant"/>
+ <references table="tenant">
+ <column name="id"/>
+ </references>
+ </foreign-key>
+ <foreign-key name="key_fk" deferrable="DEFERRED">
+ <column name="key_tenant"/>
+ <column name="key_fingerprint"/>
+ <references table="public_key">
+ <column name="tenant"/>
+ <column name="fingerprint"/>
+ </references>
+ </foreign-key>
+ </table>
<table name="package_other_repositories" kind="container">
<column name="tenant" type="TEXT" null="false"/>
<column name="name" type="CITEXT" null="false"/>
diff --git a/libbrep/review-manifest.cxx b/libbrep/review-manifest.cxx
new file mode 100644
index 0000000..3592e69
--- /dev/null
+++ b/libbrep/review-manifest.cxx
@@ -0,0 +1,220 @@
+// file : libbrep/review-manifest.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbrep/review-manifest.hxx>
+
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
+
+using namespace std;
+using namespace butl;
+
+namespace brep
+{
+ using parser = manifest_parser;
+ using parsing = manifest_parsing;
+ using serializer = manifest_serializer;
+ using serialization = manifest_serialization;
+ using name_value = manifest_name_value;
+
+ // review_result
+ //
+ string
+ to_string (review_result r)
+ {
+ switch (r)
+ {
+ case review_result::pass: return "pass";
+ case review_result::fail: return "fail";
+ case review_result::unchanged: return "unchanged";
+ }
+
+ assert (false);
+ return string ();
+ }
+
+ review_result
+ to_review_result (const string& r)
+ {
+ if (r == "pass") return review_result::pass;
+ else if (r == "fail") return review_result::fail;
+ else if (r == "unchanged") return review_result::unchanged;
+ else throw invalid_argument ("invalid review result '" + r + '\'');
+ }
+
+ // review_manifest
+ //
+ review_manifest::
+ review_manifest (parser& p, bool iu)
+ : review_manifest (p, p.next (), iu)
+ {
+ // Make sure this is the end.
+ //
+ name_value nv (p.next ());
+ if (!nv.empty ())
+ throw parsing (p.name (), nv.name_line, nv.name_column,
+ "single review manifest expected");
+ }
+
+ review_manifest::
+ review_manifest (parser& p, name_value nv, bool iu)
+ {
+ auto bad_name ([&p, &nv](const string& d) {
+ throw parsing (p.name (), nv.name_line, nv.name_column, d);});
+
+ auto bad_value ([&p, &nv](const string& d) {
+ throw parsing (p.name (), nv.value_line, nv.value_column, d);});
+
+ // Make sure this is the start and we support the version.
+ //
+ if (!nv.name.empty ())
+ throw parsing (p.name (), nv.name_line, nv.name_column,
+ "start of review manifest expected");
+
+ if (nv.value != "1")
+ throw parsing (p.name (), nv.value_line, nv.value_column,
+ "unsupported format version");
+
+ bool need_base (false);
+ bool need_details (false);
+
+ for (nv = p.next (); !nv.empty (); nv = p.next ())
+ {
+ string& n (nv.name);
+ string& v (nv.value);
+
+ if (n == "reviewed-by")
+ {
+ if (!reviewed_by.empty ())
+ bad_name ("reviewer redefinition");
+
+ if (v.empty ())
+ bad_value ("empty reviewer");
+
+ reviewed_by = move (v);
+ }
+ else if (n.size () > 7 && n.compare (0, 7, "result-") == 0)
+ {
+ string name (n, 7, n.size () - 7);
+
+ if (find_if (results.begin (), results.end (),
+ [&name] (const review_aspect& r)
+ {
+ return name == r.name;
+ }) != results.end ())
+ bad_name (name + " review result redefinition");
+
+ try
+ {
+ review_result r (to_review_result (v));
+
+ if (r == review_result::fail)
+ need_details = true;
+
+ if (r == review_result::unchanged)
+ need_base = true;
+
+ results.push_back (review_aspect {move (name), r});
+ }
+ catch (const invalid_argument& e)
+ {
+ bad_value (e.what ());
+ }
+ }
+ else if (n == "base-version")
+ {
+ if (base_version)
+ bad_name ("base version redefinition");
+
+ try
+ {
+ base_version = bpkg::version (v);
+ }
+ catch (const invalid_argument& e)
+ {
+ bad_value (e.what ());
+ }
+ }
+ else if (n == "details-url")
+ {
+ if (details_url)
+ bad_name ("details url redefinition");
+
+ try
+ {
+ details_url = url (v);
+ }
+ catch (const invalid_argument& e)
+ {
+ bad_value (e.what ());
+ }
+ }
+ else if (!iu)
+ bad_name ("unknown name '" + n + "' in review manifest");
+ }
+
+ // Verify all non-optional values were specified.
+ //
+ if (reviewed_by.empty ())
+ bad_value ("no reviewer specified");
+
+ if (results.empty ())
+ bad_value ("no result specified");
+
+ if (!base_version && need_base)
+ bad_value ("no base version specified");
+
+ if (!details_url && need_details)
+ bad_value ("no details url specified");
+ }
+
+ void review_manifest::
+ serialize (serializer& s) const
+ {
+ // @@ Should we check that all non-optional values are specified and all
+ // values are valid?
+ //
+ s.next ("", "1"); // Start of manifest.
+
+ auto bad_value ([&s](const string& d) {
+ throw serialization (s.name (), d);});
+
+ if (reviewed_by.empty ())
+ bad_value ("empty reviewer");
+
+ s.next ("reviewed-by", reviewed_by);
+
+ for (const review_aspect& r: results)
+ s.next ("result-" + r.name, to_string (r.result));
+
+ if (base_version)
+ s.next ("base-version", base_version->string ());
+
+ if (details_url)
+ s.next ("details-url", details_url->string ());
+
+ s.next ("", ""); // End of manifest.
+ }
+
+ // review_manifests
+ //
+ review_manifests::
+ review_manifests (parser& p, bool iu)
+ {
+ // Parse review manifests.
+ //
+ for (name_value nv (p.next ()); !nv.empty (); nv = p.next ())
+ emplace_back (p, move (nv), iu);
+ }
+
+ void review_manifests::
+ serialize (serializer& s) const
+ {
+ // Serialize review manifests.
+ //
+ for (const review_manifest& m: *this)
+ m.serialize (s);
+
+ s.next ("", ""); // End of stream.
+ }
+}
diff --git a/libbrep/review-manifest.hxx b/libbrep/review-manifest.hxx
new file mode 100644
index 0000000..260fdec
--- /dev/null
+++ b/libbrep/review-manifest.hxx
@@ -0,0 +1,80 @@
+// file : libbrep/review-manifest.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBREP_REVIEW_MANIFEST_HXX
+#define LIBBREP_REVIEW_MANIFEST_HXX
+
+#include <libbutl/manifest-forward.hxx>
+
+#include <libbpkg/manifest.hxx>
+
+#include <libbrep/types.hxx>
+#include <libbrep/utility.hxx>
+
+namespace brep
+{
+ enum class review_result: uint8_t
+ {
+ pass,
+ fail,
+ unchanged
+ };
+
+ string
+ to_string (review_result);
+
+ review_result
+ to_review_result (const string&); // May throw invalid_argument.
+
+ inline ostream&
+ operator<< (ostream& os, review_result r)
+ {
+ return os << to_string (r);
+ }
+
+ struct review_aspect
+ {
+ string name; // code, build, test, doc, etc
+ review_result result;
+ };
+
+ class review_manifest
+ {
+ public:
+ string reviewed_by;
+ vector<review_aspect> results;
+ optional<bpkg::version> base_version;
+ optional<url> details_url;
+
+ review_manifest (string r,
+ vector<review_aspect> rs,
+ optional<bpkg::version> bv,
+ optional<url> u)
+ : reviewed_by (move (r)),
+ results (move (rs)),
+ base_version (move (bv)),
+ details_url (move (u)) {}
+
+ public:
+ review_manifest () = default;
+ review_manifest (butl::manifest_parser&, bool ignore_unknown = false);
+ review_manifest (butl::manifest_parser&,
+ butl::manifest_name_value start,
+ bool ignore_unknown = false);
+
+ void
+ serialize (butl::manifest_serializer&) const;
+ };
+
+ class review_manifests: public vector<review_manifest>
+ {
+ public:
+ review_manifests () = default;
+ review_manifests (butl::manifest_parser&, bool ignore_unknown = false);
+
+ void
+ serialize (butl::manifest_serializer&) const;
+ };
+}
+
+#endif // LIBBREP_REVIEW_MANIFEST_HXX
diff --git a/load/.gitignore b/load/.gitignore
index 035e847..314cf8f 100644
--- a/load/.gitignore
+++ b/load/.gitignore
@@ -1,2 +1,3 @@
*-options.?xx
brep-load
+brep-load-with-metadata
diff --git a/load/buildfile b/load/buildfile
index 4278f20..51b374d 100644
--- a/load/buildfile
+++ b/load/buildfile
@@ -6,11 +6,17 @@ import libs += libodb-pgsql%lib{odb-pgsql}
import libs += libbutl%lib{butl}
import libs += libbpkg%lib{bpkg}
+import mods = bpkg-util%bash{utility}
+
include ../libbrep/
+./: exe{brep-load} exe{brep-load-with-metadata}
+
exe{brep-load}: {hxx ixx cxx}{* -load-options} {hxx ixx cxx}{load-options} \
../libbrep/lib{brep} $libs
+exe{brep-load-with-metadata}: in{load-with-metadata} $mods
+
# Build options.
#
obj{load}: cxx.poptions += -DBREP_COPYRIGHT=\"$copyright\"
diff --git a/load/load-with-metadata.in b/load/load-with-metadata.in
new file mode 100644
index 0000000..01ccd16
--- /dev/null
+++ b/load/load-with-metadata.in
@@ -0,0 +1,147 @@
+#!/usr/bin/env bash
+
+# file : load/load-with-metadata.in
+# license : MIT; see accompanying LICENSE file
+
+# The wrapper around brep-load, which pulls the package metadata from a git
+# repository and runs brep-load, passing the metadata directory to it.
+#
+# Specifically, pull a pre-cloned (read-only) git repository with the contents
+# of an archive-based bpkg repository. Run brep-load with the `--metadata
+# <dir>/owners` option, the --metadata-changed option, if the current snapshot
+# of the repository has not yet been processed by brep-load, and forward any
+# further arguments to brep-load.
+#
+# --timeout <seconds>
+#
+# Git operation timeout. Specifically, the operation will be aborted if
+# there is no network activity for the specified time. Default is 60
+# seconds. Note that currently the git timeout is only supported for the
+# http(s) transport.
+#
+# --brep-load <path>
+#
+# The brep-load program to be used. This should be the path to the brep-load
+# executable.
+#
+# Note also that this script maintains the <dir>.load file which contains the
+# last successfully processed commit.
+#
+usage="usage: $0 [<options>] <dir> [<brep-load-args>]"
+
+owd="$(pwd)"
+trap "{ cd '$owd'; exit 1; }" ERR
+set -o errtrace # Trap in functions and subshells.
+set -o pipefail # Fail if any pipeline command fails.
+shopt -s lastpipe # Execute last pipeline command in the current shell.
+shopt -s nullglob # Expand no-match globs to nothing rather than themselves.
+
+@import bpkg-util/utility@ # check_git_connectivity()
+
+# The script's own options.
+#
+timeout=60
+brep_load=
+
+while [[ "$#" -gt 0 ]]; do
+ case "$1" in
+ --timeout)
+ shift
+ timeout="$1"
+ shift || true
+ ;;
+ --brep-load)
+ shift
+ brep_load="$1"
+ shift || true
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+# The repository directory.
+#
+repo_dir="${1%/}"
+
+# Validate options and arguments.
+#
+if [[ -z "$repo_dir" ]]; then
+ error "$usage"
+fi
+
+if [[ ! -d "$repo_dir" ]]; then
+ error "'$repo_dir' does not exist or is not a directory"
+fi
+
+shift # repo_dir
+
+# If brep-load path is not specified, then use the brep-load program from the
+# script directory, if present. Otherwise, use the 'brep-load' path.
+#
+if [[ -z "$brep_load" ]]; then
+ brep_load="$(dirname "$(realpath "${BASH_SOURCE[0]}")")/brep-load"
+
+ if [[ ! -x "$brep_load" ]]; then
+ brep_load=brep-load
+ fi
+fi
+
+# Make sure the commit file is present.
+#
+load_commit="$repo_dir.load"
+touch "$load_commit"
+
+# Pull the repository shallowly.
+#
+if ! remote_url="$(git -C "$repo_dir" config --get remote.origin.url)"; then
+ error "'$repo_dir' is not a git repository"
+fi
+
+# Save the repository name and branch of where the current commit has been
+# fetched from, separated by space. For example 'origin master'.
+#
+# Note that if the commit belongs to multiple repositories/branches, then we
+# extract the first their pair from the git-log output.
+#
+refs=$(git -C "$repo_dir" log -1 --format=%D)
+repo_branch="$(sed -n -E 's%^[^/]+ ([^/ ]+)/([^/ ,]+).*$%\1 \2%p' <<<"$refs")"
+
+if [[ -z "$repo_branch" ]]; then
+ error "unable to extract repository and branch from '$refs'"
+fi
+
+# Git doesn't support the connection timeout option. The options we use are
+# just an approximation of the former, that, in particular, don't cover the
+# connection establishing. To work around this problem, before running a git
+# command that assumes the remote repository communication we manually check
+# connectivity with the remote repository.
+#
+check_git_connectivity "$remote_url" "$timeout"
+
+# Fail if no network activity happens during the time specified.
+#
+# Note: keep $repo_branch expansion unquoted.
+#
+git -c http.lowSpeedLimit=1 -c "http.lowSpeedTime=$timeout" \
+ -C "$repo_dir" fetch -q --depth=1 $repo_branch
+
+git -C "$repo_dir" reset -q --hard FETCH_HEAD
+
+# Match the HEAD commit id to the one stored in the file. If it matches, then
+# nothing changed in the repository since it has been processed by brep-load
+# last time and we should not pass the --metadata-changed option to brep-load.
+#
+commit="$(git -C "$repo_dir" rev-parse HEAD)"
+pc="$(cat "$load_commit")"
+
+loader_options=(--metadata "$repo_dir/owners")
+
+if [[ "$commit" != "$pc" ]]; then
+ loader_options+=(--metadata-changed)
+fi
+
+"$brep_load" "${loader_options[@]}" "$@"
+
+echo "$commit" >"$load_commit"
diff --git a/load/load.cli b/load/load.cli
index bda186a..fbdfbd8 100644
--- a/load/load.cli
+++ b/load/load.cli
@@ -126,6 +126,29 @@ class options
\cb{--service-id} option to be specified."
};
+ brep::dir_path --metadata
+ {
+ "<dir>",
+ "Directory where the package metadata manifest files are located. If
+ specified, then (re-)load the metadata if the package information is also
+ (re-)loaded or update it if the \cb{--metadata-changed} option is
+ specified.
+
+ The subdirectory hierarchy under this directory is expected to be in the
+ following form:
+
+ \
+ <project>/<package>/<version>/
+ \
+ "
+ }
+
+ bool --metadata-changed
+ {
+ "Update the package metadata even if the package information is not
+ reloaded."
+ };
+
brep::path --overrides-file
{
"<file>",
@@ -186,7 +209,7 @@ class options
this option to specify multiple package manager options."
}
- brep::path openssl = "openssl"
+ brep::path --openssl = "openssl"
{
"<path>",
"The openssl program to be used for crypto operations. You can also
@@ -195,7 +218,7 @@ class options
specified, then \cb{brep-load} will use \cb{openssl} by default."
}
- brep::strings openssl-option
+ brep::strings --openssl-option
{
"<opt>",
"Additional option to be passed to the openssl program (see \cb{openssl}
diff --git a/load/load.cxx b/load/load.cxx
index 2b2cd56..f79b606 100644
--- a/load/load.cxx
+++ b/load/load.cxx
@@ -3,6 +3,7 @@
#include <signal.h> // signal()
+#include <map>
#include <cerrno>
#include <chrono>
#include <thread> // this_thread::sleep_for()
@@ -31,6 +32,7 @@
#include <libbrep/package.hxx>
#include <libbrep/package-odb.hxx>
#include <libbrep/database-lock.hxx>
+#include <libbrep/review-manifest.hxx>
#include <load/load-options.hxx>
#include <load/options-types.hxx>
@@ -56,6 +58,7 @@ static const char* help_info (
static const path packages ("packages.manifest");
static const path repositories ("repositories.manifest");
+static const path reviews ("reviews.manifest");
// Retry executing bpkg on recoverable errors for about 10 seconds.
//
@@ -362,6 +365,56 @@ repository_info (const options& lo, const string& rl, const cstrings& options)
}
}
+// Map of package versions to their metadata information in the form it is
+// stored in the database (reviews summary, etc).
+//
+// This map is filled by recursively traversing the metadata directory and
+// parsing the encountered metadata manifest files (reviews.manifest, etc; see
+// --metadata option for background on metadata). Afterwards, this map is used
+// as a data source for the being persisted/updated package objects.
+//
+struct package_version_key
+{
+ package_name name;
+ brep::version version;
+
+ package_version_key (package_name n, brep::version v)
+ : name (move (n)), version (move (v)) {}
+
+ bool
+ operator< (const package_version_key& k) const
+ {
+ if (int r = name.compare (k.name))
+ return r < 0;
+
+ return version < k.version;
+ }
+};
+
+class package_version_metadata
+{
+public:
+ // Extracted from the package metadata directory. Must match the respective
+ // package manifest information.
+ //
+ package_name project;
+
+ optional<reviews_summary> reviews;
+
+ // The directory the metadata manifest files are located. It has the
+ // <project>/<package>/<version> form and is only used for diagnostics.
+ //
+ dir_path
+ directory () const
+ {
+ assert (reviews); // At least one kind of metadata must be present.
+ return reviews->manifest_file.directory ();
+ }
+};
+
+using package_version_metadata_map = std::map<package_version_key,
+ package_version_metadata>;
+
// Load the repository packages from the packages.manifest file and persist
// the repository. Should be called once per repository.
//
@@ -372,7 +425,8 @@ load_packages (const options& lo,
database& db,
bool ignore_unknown,
const manifest_name_values& overrides,
- const string& overrides_name)
+ const string& overrides_name,
+ optional<package_version_metadata_map>& metadata)
{
// packages_timestamp other than timestamp_nonexistent signals the
// repository packages are already loaded.
@@ -728,6 +782,31 @@ load_packages (const options& lo,
keys_to_objects (move (pm.build_configs[i].bot_keys));
}
+ optional<reviews_summary> rvs;
+
+ if (metadata)
+ {
+ auto i (metadata->find (package_version_key {pm.name, pm.version}));
+
+ if (i != metadata->end ())
+ {
+ package_version_metadata& md (i->second);
+
+ if (md.project != project)
+ {
+ cerr << "error: project '" << project << "' of package "
+ << pm.name << ' ' << pm.version << " doesn't match "
+ << "metadata directory path "
+ << lo.metadata () / md.directory ();
+
+ throw failed ();
+ }
+
+ if (md.reviews)
+ rvs = move (md.reviews);
+ }
+ }
+
p = make_shared<package> (
move (pm.name),
move (pm.version),
@@ -758,6 +837,7 @@ load_packages (const options& lo,
move (pm.build_auxiliaries),
move (bot_keys),
move (build_configs),
+ move (rvs),
move (pm.location),
move (pm.fragment),
move (pm.sha256sum),
@@ -1153,13 +1233,16 @@ load_repositories (const options& lo,
// We don't apply overrides to the external packages.
//
+ optional<package_version_metadata_map> metadata;
+
load_packages (lo,
pr,
!pr->cache_location.empty () ? pr->cache_location : cl,
db,
ignore_unknown,
manifest_name_values () /* overrides */,
- "" /* overrides_name */);
+ "" /* overrides_name */,
+ metadata);
load_repositories (lo,
pr,
@@ -1778,6 +1861,11 @@ try
}
}
+ // Note: the interactive tenant implies private.
+ //
+ if (ops.interactive_specified ())
+ ops.private_ (true);
+
// Parse and validate overrides, if specified.
//
// Note that here we make sure that the overrides manifest is valid.
@@ -1818,34 +1906,236 @@ try
ops.db_port (),
"options='-c default_transaction_isolation=serializable'");
+ // Load the description of all the internal repositories from the
+ // configuration file.
+ //
+ internal_repositories irs (load_repositories (path (argv[1])));
+
// Prevent several brep utility instances from updating the package database
// simultaneously.
//
database_lock l (db);
- transaction t (db.begin ());
-
- // Check that the package database schema matches the current one.
+ // Check that the package database schema matches the current one and if the
+ // package information needs to be (re-)loaded.
//
- const string ds ("package");
- if (schema_catalog::current_version (db, ds) != db.schema_version (ds))
+ bool load_pkgs;
{
- cerr << "error: package database schema differs from the current one"
- << endl << " info: use brep-migrate to migrate the database" << endl;
- throw failed ();
+ transaction t (db.begin ());
+
+ // Check the database schema match.
+ //
+ const string ds ("package");
+
+ if (schema_catalog::current_version (db, ds) != db.schema_version (ds))
+ {
+ cerr << "error: package database schema differs from the current one"
+ << endl << " info: use brep-migrate to migrate the database" << endl;
+ throw failed ();
+ }
+
+ load_pkgs = (ops.force () || changed (tnt, irs, db));
+
+ t.commit ();
}
- // Note: the interactive tenant implies private.
+ // Check if the package versions metadata needs to be (re-)loaded and, if
+ // that's the case, stash it in the memory.
//
- if (ops.interactive_specified ())
- ops.private_ (true);
+ optional<package_version_metadata_map> metadata;
+ if (ops.metadata_specified () && (load_pkgs || ops.metadata_changed ()))
+ {
+ metadata = package_version_metadata_map ();
- // Load the description of all the internal repositories from the
- // configuration file.
+ const dir_path& d (ops.metadata ());
+
+ // The first level are package projects.
+ //
+ try
+ {
+ for (const dir_entry& e: dir_iterator (d, dir_iterator::no_follow))
+ {
+ const string& n (e.path ().string ());
+
+ if (e.type () != entry_type::directory || n[0] == '.')
+ continue;
+
+ package_name project;
+
+ try
+ {
+ project = package_name (n);
+ }
+ catch (const invalid_argument& e)
+ {
+ cerr << "error: name of subdirectory '" << n << "' in " << d
+ << " is not a project name: " << e << endl;
+ throw failed ();
+ }
+
+ // The second level are package names.
+ //
+ dir_path pd (d / path_cast<dir_path> (e.path ()));
+
+ try
+ {
+ for (const dir_entry& e: dir_iterator (pd, dir_iterator::no_follow))
+ {
+ const string& n (e.path ().string ());
+
+ if (e.type () != entry_type::directory || n[0] == '.')
+ continue;
+
+ package_name name;
+
+ try
+ {
+ name = package_name (n);
+ }
+ catch (const invalid_argument& e)
+ {
+ cerr << "error: name of subdirectory '" << n << "' in " << pd
+ << " is not a package name: " << e << endl;
+ throw failed ();
+ }
+
+ // The third level are package versions.
+ //
+ dir_path vd (pd / path_cast<dir_path> (e.path ()));
+
+ try
+ {
+ for (const dir_entry& e: dir_iterator (vd,
+ dir_iterator::no_follow))
+ {
+ const string& n (e.path ().string ());
+
+ if (e.type () != entry_type::directory || n[0] == '.')
+ continue;
+
+ version ver;
+
+ try
+ {
+ ver = version (n);
+ }
+ catch (const invalid_argument& e)
+ {
+ cerr << "error: name of subdirectory '" << n << "' in " << vd
+ << " is not a package version: " << e << endl;
+ throw failed ();
+ }
+
+ dir_path md (vd / path_cast<dir_path> (e.path ()));
+
+ // Parse the reviews.manifest file, if present.
+ //
+ // Note that semantically, the absent manifest file and the
+ // empty manifest list are equivalent and result in an absent
+ // reviews summary.
+ //
+ optional<reviews_summary> rs;
+ {
+ path rf (md / reviews);
+
+ try
+ {
+ if (file_exists (rf))
+ {
+ ifdstream ifs (rf);
+ manifest_parser mp (ifs, rf.string ());
+
+ // Count the passed and failed reviews.
+ //
+ size_t ps (0);
+ size_t fl (0);
+
+ for (review_manifest& m:
+ review_manifests (mp, ops.ignore_unknown ()))
+ {
+ bool fail (false);
+
+ for (const review_aspect& r: m.results)
+ {
+ switch (r.result)
+ {
+ case review_result::fail: fail = true; break;
+
+ case review_result::unchanged:
+ {
+ cerr << "error: unsupported review result "
+ << "'unchanged' in " << rf << endl;
+ throw failed ();
+ }
+
+ case review_result::pass: break; // Noop
+ }
+ }
+
+ ++(fail ? fl : ps);
+ }
+
+ if (ps + fl != 0)
+ rs = reviews_summary {ps, fl, rf.relative (d)};
+ }
+ }
+ catch (const manifest_parsing& e)
+ {
+ cerr << "error: unable to parse reviews: " << e << endl;
+ throw failed ();
+ }
+ catch (const io_error& e)
+ {
+ cerr << "error: unable to read " << rf << ": " << e << endl;
+ throw failed ();
+ }
+ catch (const system_error& e)
+ {
+ cerr << "error: unable to stat " << rf << ": " << e << endl;
+ throw failed ();
+ }
+ }
+
+ // Add the package version metadata to the map if any kind of
+ // metadata is present.
+ //
+ if (rs)
+ {
+ (*metadata)[package_version_key {name, move (ver)}] =
+ package_version_metadata {project, move (rs)};
+ }
+ }
+ }
+ catch (const system_error& e)
+ {
+ cerr << "error: unable to iterate over " << vd << ": " << e
+ << endl;
+ throw failed ();
+ }
+ }
+ }
+ catch (const system_error& e)
+ {
+ cerr << "error: unable to iterate over " << pd << ": " << e << endl;
+ throw failed ();
+ }
+ }
+ }
+ catch (const system_error& e)
+ {
+ cerr << "error: unable to iterate over " << d << ": " << e << endl;
+ throw failed ();
+ }
+ }
+
+ // Bail out if no package information nor metadata needs to be loaded.
//
- internal_repositories irs (load_repositories (path (argv[1])));
+ if (!load_pkgs && !metadata)
+ return 0;
+
+ transaction t (db.begin ());
- if (ops.force () || changed (tnt, irs, db))
+ if (load_pkgs)
{
shared_ptr<tenant> t; // Not NULL in the --existing-tenant mode.
@@ -2015,7 +2305,8 @@ try
db,
ops.ignore_unknown (),
overrides,
- ops.overrides_file ().string ());
+ ops.overrides_file ().string (),
+ metadata);
}
// On the second pass over the internal repositories we load their
@@ -2070,6 +2361,83 @@ try
}
}
}
+ else if (metadata)
+ {
+ // Iterate over the packages which contain metadata and apply the changes,
+ // if present. Erase the metadata map entries which introduce such
+ // changes, so at the end only the newly added metadata is left in the
+ // map.
+ //
+ using query = query<package>;
+
+ for (package& p: db.query<package> (query::reviews.pass.is_not_null ()))
+ {
+ bool u (false);
+ auto i (metadata->find (package_version_key {p.name, p.version}));
+
+ if (i == metadata->end ())
+ {
+ // Mark the section as loaded, so the reviews summary is updated.
+ //
+ p.reviews_section.load ();
+ p.reviews = nullopt;
+ u = true;
+ }
+ else
+ {
+ package_version_metadata& md (i->second);
+
+ if (md.project != p.project)
+ {
+ cerr << "error: project '" << p.project << "' of package "
+ << p.name << ' ' << p.version << " doesn't match metadata "
+ << "directory path " << ops.metadata () / md.directory ();
+
+ throw failed ();
+ }
+
+ db.load (p, p.reviews_section);
+
+ if (p.reviews != md.reviews)
+ {
+ p.reviews = move (md.reviews);
+ u = true;
+ }
+
+ metadata->erase (i);
+ }
+
+ if (u)
+ db.update (p);
+ }
+
+ // Add the newly added metadata to the packages.
+ //
+ for (auto& m: *metadata)
+ {
+ if (shared_ptr<package> p =
+ db.find<package> (package_id (tnt, m.first.name, m.first.version)))
+ {
+ package_version_metadata& md (m.second);
+
+ if (m.second.project != p->project)
+ {
+ cerr << "error: project '" << p->project << "' of package "
+ << p->name << ' ' << p->version << " doesn't match metadata "
+ << "directory path " << ops.metadata () / md.directory ();
+
+ throw failed ();
+ }
+
+ // Mark the section as loaded, so the reviews summary is updated.
+ //
+ p->reviews_section.load ();
+ p->reviews = move (md.reviews);
+
+ db.update (p);
+ }
+ }
+ }
t.commit ();
return 0;
diff --git a/load/types-parsers.cxx b/load/types-parsers.cxx
index a18330d..4f031df 100644
--- a/load/types-parsers.cxx
+++ b/load/types-parsers.cxx
@@ -40,6 +40,13 @@ namespace cli
parse_path (x, s);
}
+ void parser<dir_path>::
+ parse (dir_path& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ parse_path (x, s);
+ }
+
void parser<ignore_unresolved_conditional_dependencies>::
parse (ignore_unresolved_conditional_dependencies& x, bool& xs, scanner& s)
{
diff --git a/load/types-parsers.hxx b/load/types-parsers.hxx
index fcf5113..b79cca4 100644
--- a/load/types-parsers.hxx
+++ b/load/types-parsers.hxx
@@ -26,6 +26,13 @@ namespace cli
};
template <>
+ struct parser<brep::dir_path>
+ {
+ static void
+ parse (brep::dir_path&, bool&, scanner&);
+ };
+
+ template <>
struct parser<brep::ignore_unresolved_conditional_dependencies>
{
static void
diff --git a/manifest b/manifest
index 0dea967..720f35e 100644
--- a/manifest
+++ b/manifest
@@ -1,6 +1,6 @@
: 1
name: brep
-version: 0.17.0-a.0.z
+version: 0.18.0-a.0.z
project: build2
summary: build2 package repository web interface
license: MIT
@@ -16,8 +16,8 @@ build-warning-email: builds@build2.org
requires: c++14
requires: postgresql >= 9.0
requires: apache2 ; Including development files (httpd.h header, etc).
-depends: * build2 >= 0.16.0-
-depends: * bpkg >= 0.16.0-
+depends: * build2 >= 0.16.0
+depends: * bpkg >= 0.16.0
# @@ DEP Should probably become conditional dependency.
#requires: ? cli ; Only required if changing .cli files.
depends: libapr1
@@ -25,13 +25,13 @@ depends: libapreq2
depends: libcmark-gfm == 0.29.0-a.4
depends: libcmark-gfm-extensions == 0.29.0-a.4
depends: libstudxml ^1.1.0-b.10
-depends: libodb [2.5.0-b.26.1 2.5.0-b.27)
-depends: libodb-pgsql [2.5.0-b.26.1 2.5.0-b.27)
-depends: libbutl [0.17.0-a.0.1 0.17.0-a.1)
-depends: libbpkg [0.17.0-a.0.1 0.17.0-a.1)
-depends: libbbot [0.17.0-a.0.1 0.17.0-a.1)
-depends: libbutl.bash [0.17.0-a.0.1 0.17.0-a.1)
-depends: bpkg-util [0.17.0-a.0.1 0.17.0-a.1)
+depends: libodb ^2.5.0-b.27
+depends: libodb-pgsql ^2.5.0-b.27
+depends: libbutl [0.18.0-a.0.1 0.18.0-a.1)
+depends: libbpkg [0.18.0-a.0.1 0.18.0-a.1)
+depends: libbbot [0.18.0-a.0.1 0.18.0-a.1)
+depends: libbutl.bash [0.18.0-a.0.1 0.18.0-a.1)
+depends: bpkg-util [0.18.0-a.0.1 0.18.0-a.1)
# This package dependens on platform-specific implementation libraries that
# are (currently) not packaged and need to come from the system package
diff --git a/migrate/migrate.cxx b/migrate/migrate.cxx
index 090fcac..095e6a3 100644
--- a/migrate/migrate.cxx
+++ b/migrate/migrate.cxx
@@ -208,7 +208,6 @@ create (database& db, bool extra_only) const
// Register the data migration functions for the package database schema.
//
-#if 0
template <schema_version v>
using package_migration_entry_base =
data_migration_entry<v, LIBBREP_PACKAGE_SCHEMA_VERSION_BASE>;
@@ -220,11 +219,14 @@ struct package_migration_entry: package_migration_entry_base<v>
: package_migration_entry_base<v> (f, "package") {}
};
-static const package_migration_entry<26>
-package_migrate_v26 ([] (database& db)
+static const package_migration_entry<36>
+package_migrate_v36 ([] (database& db)
{
+ // Set the reference count to 1 for tenant associated services.
+ //
+ db.execute ("UPDATE tenant SET service_ref_count = 1 "
+ "WHERE service_id IS NOT NULL");
});
-#endif
// Register the data migration functions for the build database schema.
//
diff --git a/mod/ci-common.cxx b/mod/ci-common.cxx
index c0ef89f..e720914 100644
--- a/mod/ci-common.cxx
+++ b/mod/ci-common.cxx
@@ -14,6 +14,8 @@
#include <libbutl/process-io.hxx> // operator<<(ostream, process_args)
#include <libbutl/manifest-serializer.hxx>
+#include <libbrep/build.hxx>
+#include <libbrep/build-odb.hxx>
#include <libbrep/build-package.hxx>
#include <libbrep/build-package-odb.hxx>
@@ -534,77 +536,175 @@ namespace brep
s.next ("", ""); // End of manifest.
}
- optional<string> ci_start::
+ optional<pair<string, ci_start::duplicate_tenant_result>> ci_start::
create (const basic_mark& error,
const basic_mark&,
const basic_mark* trace,
odb::core::database& db,
+ size_t retry,
tenant_service&& service,
duration notify_interval,
- duration notify_delay) const
+ duration notify_delay,
+ duplicate_tenant_mode mode) const
{
using namespace odb::core;
- // Generate the request id.
- //
- string request_id;
+ assert (mode == duplicate_tenant_mode::fail || !service.id.empty ());
+ assert (!transaction::has_current ());
- try
- {
- request_id = uuid::generate ().string ();
- }
- catch (const system_error& e)
- {
- error << "unable to generate request id: " << e;
- return nullopt;
- }
+ build_tenant t;
- // Use the generated request id if the tenant service id is not specified.
+ // Set the reference count to 1 for the `created` result.
//
- if (service.id.empty ())
- service.id = request_id;
+ duplicate_tenant_result r (duplicate_tenant_result::created);
+ service.ref_count = 1;
- build_tenant t (move (request_id),
- move (service),
- system_clock::now () - notify_interval + notify_delay,
- notify_interval);
+ for (string request_id;;)
{
- assert (!transaction::has_current ());
+ try
+ {
+ transaction tr (db.begin ());
- transaction tr (db.begin ());
+ // Unless we are in the 'fail on duplicate' mode, check if this
+ // service type/id pair is already in use and, if that's the case,
+ // either ignore it or reassign this service to a new tenant,
+ // canceling the old one.
+ //
+ if (mode != duplicate_tenant_mode::fail)
+ {
+ using query = query<build_tenant>;
+
+ shared_ptr<build_tenant> t (
+ db.query_one<build_tenant> (query::service.id == service.id &&
+ query::service.type == service.type));
+ if (t != nullptr)
+ {
+ // Reduce the replace_archived mode to the replace or ignore mode.
+ //
+ if (mode == duplicate_tenant_mode::replace_archived)
+ {
+ mode = (t->archived
+ ? duplicate_tenant_mode::replace
+ : duplicate_tenant_mode::ignore);
+ }
+
+ // Shouldn't be here otherwise.
+ //
+ assert (t->service);
+
+ // Bail out in the ignore mode and cancel the tenant in the
+ // replace mode.
+ //
+ if (mode == duplicate_tenant_mode::ignore)
+ {
+ // Increment the reference count for the `ignored` result.
+ //
+ ++(t->service->ref_count);
+
+ db.update (t);
+ tr.commit ();
+
+ return make_pair (move (t->id), duplicate_tenant_result::ignored);
+ }
+
+ assert (mode == duplicate_tenant_mode::replace);
+
+ // Preserve the current reference count for the `replaced` result.
+ //
+ service.ref_count = t->service->ref_count;
+
+ if (t->unloaded_timestamp)
+ {
+ db.erase (t);
+ }
+ else
+ {
+ t->service = nullopt;
+ t->archived = true;
+ db.update (t);
+ }
+
+ r = duplicate_tenant_result::replaced;
+ }
+ }
- // Note that in contrast to brep-load, we know that the tenant id is
- // unique and thus we don't try to remove a tenant with such an id.
- // There is also not much reason to assume that we may have switched
- // from the single-tenant mode here and remove the respective tenant,
- // unless we are in the tenant-service functionality development mode.
- //
+ // Generate the request id.
+ //
+ if (request_id.empty ())
+ try
+ {
+ request_id = uuid::generate ().string ();
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to generate request id: " << e;
+ return nullopt;
+ }
+
+ // Use the generated request id if the tenant service id is not
+ // specified.
+ //
+ if (service.id.empty ())
+ service.id = request_id;
+
+ t = build_tenant (move (request_id),
+ move (service),
+ system_clock::now () - notify_interval + notify_delay,
+ notify_interval);
+
+ // Note that in contrast to brep-load, we know that the tenant id is
+ // unique and thus we don't try to remove a tenant with such an id.
+ // There is also not much reason to assume that we may have switched
+ // from the single-tenant mode here and remove the respective tenant,
+ // unless we are in the tenant-service functionality development mode.
+ //
#ifdef BREP_CI_TENANT_SERVICE_UNLOADED
- cstrings ts ({""});
+ cstrings ts ({""});
- db.erase_query<build_package> (
- query<build_package>::id.tenant.in_range (ts.begin (), ts.end ()));
+ db.erase_query<build_package> (
+ query<build_package>::id.tenant.in_range (ts.begin (), ts.end ()));
- db.erase_query<build_repository> (
- query<build_repository>::id.tenant.in_range (ts.begin (), ts.end ()));
+ db.erase_query<build_repository> (
+ query<build_repository>::id.tenant.in_range (ts.begin (), ts.end ()));
- db.erase_query<build_public_key> (
- query<build_public_key>::id.tenant.in_range (ts.begin (), ts.end ()));
+ db.erase_query<build_public_key> (
+ query<build_public_key>::id.tenant.in_range (ts.begin (), ts.end ()));
- db.erase_query<build_tenant> (
- query<build_tenant>::id.in_range (ts.begin (), ts.end ()));
+ db.erase_query<build_tenant> (
+ query<build_tenant>::id.in_range (ts.begin (), ts.end ()));
#endif
- db.persist (t);
+ db.persist (t);
- tr.commit ();
- }
+ tr.commit ();
- if (trace != nullptr)
- *trace << "unloaded CI request " << t.id << " for service "
- << t.service->id << ' ' << t.service->type << " is created";
+ if (trace != nullptr)
+ *trace << "unloaded CI request " << t.id << " for service "
+ << t.service->id << ' ' << t.service->type << " is created";
+
+ // Bail out if we have successfully erased, updated, or persisted the
+ // tenant object.
+ //
+ break;
+ }
+ catch (const odb::recoverable& e)
+ {
+ // If no more retries left, don't re-throw odb::recoverable not to
+ // retry at the upper level.
+ //
+ if (retry-- == 0)
+ throw runtime_error (e.what ());
+
+ // Prepare for the next iteration.
+ //
+ request_id = move (t.id);
+ service = move (*t.service);
+ service.ref_count = 1;
+ r = duplicate_tenant_result::created;
+ }
+ }
- return move (t.id);
+ return make_pair (move (t.id), r);
}
optional<ci_start::start_result> ci_start::
@@ -612,51 +712,69 @@ namespace brep
const basic_mark& warn,
const basic_mark* trace,
odb::core::database& db,
+ size_t retry,
tenant_service&& service,
const repository_location& repository) const
{
using namespace odb::core;
string request_id;
+
+ for (;;)
{
- assert (!transaction::has_current ());
+ try
+ {
+ assert (!transaction::has_current ());
- transaction tr (db.begin ());
+ transaction tr (db.begin ());
- using query = query<build_tenant>;
+ using query = query<build_tenant>;
- shared_ptr<build_tenant> t (
- db.query_one<build_tenant> (query::service.id == service.id &&
- query::service.type == service.type));
+ shared_ptr<build_tenant> t (
+ db.query_one<build_tenant> (query::service.id == service.id &&
+ query::service.type == service.type));
- if (t == nullptr)
- {
- error << "unable to find tenant for service " << service.id << ' '
- << service.type;
+ if (t == nullptr)
+ {
+ error << "unable to find tenant for service " << service.id << ' '
+ << service.type;
- return nullopt;
- }
- else if (t->archived)
- {
- error << "tenant " << t->id << " for service " << service.id << ' '
- << service.type << " is already archived";
+ return nullopt;
+ }
+ else if (t->archived)
+ {
+ error << "tenant " << t->id << " for service " << service.id << ' '
+ << service.type << " is already archived";
- return nullopt;
- }
- else if (!t->unloaded_timestamp)
- {
- error << "tenant " << t->id << " for service " << service.id << ' '
- << service.type << " is already loaded";
+ return nullopt;
+ }
+ else if (!t->unloaded_timestamp)
+ {
+ error << "tenant " << t->id << " for service " << service.id << ' '
+ << service.type << " is already loaded";
- return nullopt;
- }
+ return nullopt;
+ }
- t->unloaded_timestamp = nullopt;
- db.update (t);
+ t->unloaded_timestamp = nullopt;
+ db.update (t);
- tr.commit ();
+ tr.commit ();
- request_id = move (t->id);
+ request_id = move (t->id);
+
+ // Bail out if we have successfully updated the tenant object.
+ //
+ break;
+ }
+ catch (const odb::recoverable& e)
+ {
+ // If no more retries left, don't re-throw odb::recoverable not to
+ // retry at the upper level.
+ //
+ if (retry-- == 0)
+ throw runtime_error (e.what ());
+ }
}
assert (options_ != nullptr); // Shouldn't be called otherwise.
@@ -690,33 +808,85 @@ namespace brep
const basic_mark&,
const basic_mark* trace,
odb::core::database& db,
+ size_t retry,
const string& type,
- const string& id) const
+ const string& id,
+ bool ref_count) const
{
using namespace odb::core;
assert (!transaction::has_current ());
- transaction tr (db.begin ());
+ optional<tenant_service> r;
- using query = query<build_tenant>;
+ for (;;)
+ {
+ try
+ {
+ transaction tr (db.begin ());
- shared_ptr<build_tenant> t (
- db.query_one<build_tenant> (query::service.id == id &&
- query::service.type == type));
- if (t == nullptr)
- return nullopt;
+ using query = query<build_tenant>;
- optional<tenant_service> r (move (t->service));
- t->service = nullopt;
- t->archived = true;
- db.update (t);
+ shared_ptr<build_tenant> t (
+ db.query_one<build_tenant> (query::service.id == id &&
+ query::service.type == type));
+ if (t == nullptr)
+ return nullopt;
- tr.commit ();
+ // Shouldn't be here otherwise.
+ //
+ assert (t->service && t->service->ref_count != 0);
- if (trace != nullptr)
- *trace << "CI request " << t->id << " for service " << id << ' ' << type
- << " is canceled";
+ bool cancel (!ref_count || --(t->service->ref_count) == 0);
+
+ if (cancel)
+ {
+ // Move out the service state before it is dropped from the tenant.
+ //
+ r = move (t->service);
+
+ if (t->unloaded_timestamp)
+ {
+ db.erase (t);
+ }
+ else
+ {
+ t->service = nullopt;
+ t->archived = true;
+ db.update (t);
+ }
+
+ if (trace != nullptr)
+ *trace << "CI request " << t->id << " for service " << id << ' '
+ << type << " is canceled";
+ }
+ else
+ {
+ db.update (t); // Update the service reference count.
+
+ // Move out the service state after the tenant is updated.
+ //
+ r = move (t->service);
+ }
+
+ tr.commit ();
+
+ // Bail out if we have successfully updated or erased the tenant
+ // object.
+ //
+ break;
+ }
+ catch (const odb::recoverable& e)
+ {
+ // If no more retries left, don't re-throw odb::recoverable not to
+ // retry at the upper level.
+ //
+ if (retry-- == 0)
+ throw runtime_error (e.what ());
+
+ r = nullopt; // Prepare for the next iteration.
+ }
+ }
return r;
}
@@ -727,26 +897,50 @@ namespace brep
const basic_mark* trace,
const string& reason,
odb::core::database& db,
+ size_t retry,
const string& tid) const
{
using namespace odb::core;
assert (!transaction::has_current ());
- transaction tr (db.begin ());
+ for (;;)
+ {
+ try
+ {
+ transaction tr (db.begin ());
- shared_ptr<build_tenant> t (db.find<build_tenant> (tid));
+ shared_ptr<build_tenant> t (db.find<build_tenant> (tid));
- if (t == nullptr)
- return false;
+ if (t == nullptr)
+ return false;
- if (!t->archived)
- {
- t->archived = true;
- db.update (t);
- }
+ if (t->unloaded_timestamp)
+ {
+ db.erase (t);
+ }
+ else if (!t->archived)
+ {
+ t->archived = true;
+ db.update (t);
+ }
- tr.commit ();
+ tr.commit ();
+
+ // Bail out if we have successfully updated or erased the tenant
+ // object.
+ //
+ break;
+ }
+ catch (const odb::recoverable& e)
+ {
+ // If no more retries left, don't re-throw odb::recoverable not to
+ // retry at the upper level.
+ //
+ if (retry-- == 0)
+ throw runtime_error (e.what ());
+ }
+ }
if (trace != nullptr)
*trace << "CI request " << tid << " is canceled: "
@@ -756,4 +950,109 @@ namespace brep
return true;
}
+
+ optional<build_state> ci_start::
+ rebuild (odb::core::database& db,
+ size_t retry,
+ const build_id& id,
+ function<optional<string> (const string& tenant_id,
+ const tenant_service&,
+ build_state)> uf) const
+ {
+ using namespace odb::core;
+
+ build_state s;
+
+ for (;;)
+ {
+ try
+ {
+ // NOTE: don't forget to update build_force::handle() if changing
+ // anything here.
+ //
+ transaction t (db.begin ());
+
+ package_build pb;
+ if (!db.query_one<package_build> (query<package_build>::build::id == id,
+ pb) ||
+ pb.archived)
+ {
+ return nullopt;
+ }
+
+ const shared_ptr<build>& b (pb.build);
+ s = b->state;
+
+ if (s != build_state::queued)
+ {
+ force_state force (s == build_state::built
+ ? force_state::forced
+ : force_state::forcing);
+
+ if (b->force != force)
+ {
+ b->force = force;
+ db.update (b);
+ }
+
+ if (uf != nullptr)
+ {
+ shared_ptr<build_tenant> t (db.load<build_tenant> (b->tenant));
+
+ assert (t->service);
+
+ tenant_service& ts (*t->service);
+
+ if (optional<string> data = uf (t->id, ts, s))
+ {
+ ts.data = move (*data);
+ db.update (t);
+ }
+ }
+ }
+
+ t.commit ();
+
+ // Bail out if we have successfully updated the build and tenant
+ // objects.
+ //
+ break;
+ }
+ catch (const odb::recoverable& e)
+ {
+ // If no more retries left, don't re-throw odb::recoverable not to
+ // retry at the upper level.
+ //
+ if (retry-- == 0)
+ throw runtime_error (e.what ());
+ }
+ }
+
+ return s;
+ }
+
+ optional<ci_start::tenant_data> ci_start::
+ find (odb::core::database& db,
+ const string& type,
+ const string& id) const
+ {
+ using namespace odb::core;
+
+ assert (!transaction::has_current ());
+
+ transaction tr (db.begin ());
+
+ using query = query<build_tenant>;
+
+ shared_ptr<build_tenant> t (
+ db.query_one<build_tenant> (query::service.id == id &&
+ query::service.type == type));
+
+ tr.commit ();
+
+ if (t == nullptr || !t->service)
+ return nullopt;
+
+ return tenant_data {move (t->id), move (*t->service), t->archived};
+ }
}
diff --git a/mod/ci-common.hxx b/mod/ci-common.hxx
index 6a07154..a38ac54 100644
--- a/mod/ci-common.hxx
+++ b/mod/ci-common.hxx
@@ -9,6 +9,7 @@
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
+#include <libbrep/build.hxx>
#include <libbrep/common.hxx>
#include <mod/diagnostics.hxx>
@@ -40,6 +41,10 @@ namespace brep
// Note that the inability to generate the reference is an internal
// error. Thus, it is not optional.
//
+ // Note that if the CI request information is persisted to the database
+ // (which, depending on the CI request handler, may not be the case), then
+ // the reference is assumed to be the respective tenant id.
+ //
struct start_result
{
uint16_t status;
@@ -48,8 +53,13 @@ namespace brep
vector<pair<string, string>> custom_result;
};
- // In the optional service information, if id is empty, then the generated
- // reference is used instead.
+ // In the optional tenant service information, if service id is empty,
+ // then the generated tenant id is used instead.
+ //
+ // Note that if the tenant service is specified, then the CI request
+ // information is expected to be persisted to the database and thus
+ // start_result::reference denotes the tenant id in this case (see above
+ // for details).
//
optional<start_result>
start (const basic_mark& error,
@@ -65,27 +75,63 @@ namespace brep
const vector<pair<string, string>>& custom_request = {},
const vector<pair<string, string>>& overrides = {}) const;
- // Create an unloaded CI request returning start_result::reference on
- // success and nullopt on an internal error. Such a request is not started
- // until loaded with the load() function below. Configure the time
- // interval between the build_unloaded() notifications for the being
- // created tenant and set the initial delay for the first notification.
- // See also the build_unloaded() tenant services notification.
+ // Create an unloaded CI request returning tenant id on success and
+ // nullopt on an internal error. Such a request is not started until
+ // loaded with the load() function below. Configure the time interval
+ // between the build_unloaded() notifications for the being created tenant
+ // and set the initial delay for the first notification. See also the
+ // build_unloaded() tenant services notification.
+ //
+ // The duplicate_tenant_mode argument specifies the behavior in case of
+ // the duplicate tenant_service type/id pair. The default is to fail by
+ // throwing an exception. Alternatively, this can be ignored or the
+ // previous tenant can be canceled (thus freeing the type/id pair; see
+ // below) and a new tenant with the same type/id created. In both these
+ // modes (ignore and replace), the second half of the returned pair
+ // indicates whether there was a duplicate. If there were, then for the
+ // ignore mode the returned tenant id corresponds to the old tenant and
+ // for the replace mode -- to the new tenant.
+ //
+ // The replace_archived mode is a variant of replace that replaces if the
+ // tenant is already archived and ignores it otherwise (with the result
+ // having the same semantics as in the replace and ignore modes).
+ //
+ // Note also that the duplicate_tenant_mode::replace modes are not the
+ // same as separate calls to cancel() and then to create() since the
+ // latter would happen in two separate transactions and will thus be racy.
+ //
+ // Finally note that only duplicate_tenant_mode::fail can be used if the
+ // service id is empty.
+ //
+ // The tenant reference count is set to 1 if the result is `created`,
+ // incremented if the result is `ignored`, and preserved if the result is
+ // `replaced`.
+ //
+ // Repeat the attempts on the recoverable database failures (deadlocks,
+ // etc) and throw runtime_error if no more retries left.
//
// Note: should be called out of the database transaction.
//
- optional<string>
+ enum class duplicate_tenant_mode {fail, ignore, replace, replace_archived};
+ enum class duplicate_tenant_result {created, ignored, replaced};
+
+ optional<pair<string, duplicate_tenant_result>>
create (const basic_mark& error,
const basic_mark& warn,
const basic_mark* trace,
odb::core::database&,
+ size_t retry,
tenant_service&&,
duration notify_interval,
- duration notify_delay) const;
+ duration notify_delay,
+ duplicate_tenant_mode = duplicate_tenant_mode::fail) const;
// Load (and start) previously created (as unloaded) CI request. Similarly
// to the start() function, return nullopt on an internal error.
//
+ // Repeat the attempts on the recoverable database failures (deadlocks,
+ // etc) and throw runtime_error if no more retries left.
+ //
// Note that tenant_service::id is used to identify the CI request tenant.
//
// Note: should be called out of the database transaction.
@@ -95,12 +141,27 @@ namespace brep
const basic_mark& warn,
const basic_mark* trace,
odb::core::database&,
+ size_t retry,
tenant_service&&,
const repository_location& repository) const;
// Cancel previously created or started CI request. Return the service
// state or nullopt if there is no tenant for such a type/id pair.
//
+ // Specifically, this function clears the tenant service state (thus
+ // allowing reusing the same service type/id pair in another tenant) and
+ // archives the tenant, unless the tenant is unloaded, in which case it is
+ // dropped. Note that the latter allow using unloaded tenants as a
+ // relatively cheap asynchronous execution mechanism.
+ //
+ // If ref_count is true, then decrement the tenant reference count and
+ // only cancel the CI request if it becomes 0. In this mode the caller can
+ // determine if the request was actually canceled by checking if the
+ // reference count in the returned service state is 0.
+ //
+ // Repeat the attempts on the recoverable database failures (deadlocks,
+ // etc) and throw runtime_error if no more retries left.
+ //
// Note: should be called out of the database transaction.
//
optional<tenant_service>
@@ -108,13 +169,23 @@ namespace brep
const basic_mark& warn,
const basic_mark* trace,
odb::core::database&,
+ size_t retry,
const string& type,
- const string& id) const;
+ const string& id,
+ bool ref_count = false) const;
// Cancel previously created or started CI request. Return false if there
// is no tenant for the specified tenant id. Note that the reason argument
// is only used for tracing.
//
+ // Similarly to above, this function archives the tenant, unless the
+ // tenant is unloaded, in which case it is dropped. Note, however, that
+ // this version does not touch the service state (use the above version if
+ // you want to clear it).
+ //
+ // Repeat the attempts on the recoverable database failures (deadlocks,
+ // etc) and throw runtime_error if no more retries left.
+ //
// Note: should be called out of the database transaction.
//
bool
@@ -123,8 +194,76 @@ namespace brep
const basic_mark* trace,
const string& reason,
odb::core::database&,
+ size_t retry,
const string& tenant_id) const;
+ // Schedule the re-build of the package build and return the build object
+ // current state.
+ //
+ // Specifically:
+ //
+ // - If the build has expired (build or package object doesn't exist or
+ // the package is archived or is not buildable anymore, etc), then do
+ // nothing and return nullopt.
+ //
+ // Note, however, that this function doesn't check if the build
+ // configuration still exists in the buildtab. It is supposed that the
+ // caller has already checked for that if necessary (see
+ // build_force::handle() for an example of this check). And if not
+ // then a re-build will be scheduled and later cleaned by the cleaner
+ // (without notifications).
+ //
+ // - Otherwise, if the build object is in the queued state, then do
+ // nothing and return build_state::queued. It is assumed that a build
+ // object in such a state is already about to be built.
+ //
+ // - Otherwise (the build object is in the building or built state),
+ // schedule the object for the rebuild and return the current state.
+ //
+ // Note that in contrast to the build-force handler, this function doesn't
+ // send the build_queued() notification to the tenant-associated service
+ // if the object is in the building state (which is done as soon as
+ // possible to avoid races). Instead, it is assumed the service will
+ // perform any equivalent actions directly based on the returned state.
+ //
+ // The last argument, if not NULL, is called to update the service data
+ // associated with the tenant to which this build object belongs. It has
+ // the same semantics as the returned function in the tenant service
+ // callbacks (see tenant_service_build_queued). Note that it is only
+ // called if the rebuild was actually scheduled, that is, the current
+ // state is building or built.
+ //
+ // Repeat the attempts on the recoverable database failures (deadlocks,
+ // etc) and throw runtime_error if no more retries left.
+ //
+ // Note: should be called out of the database transaction.
+ //
+ optional<build_state>
+ rebuild (odb::core::database&,
+ size_t retry,
+ const build_id&,
+ function<optional<string> (const string& tenant_id,
+ const tenant_service&,
+ build_state)> = nullptr) const;
+
+ // Find the tenant given the tenant service type and id and return the
+ // associated data plus the indication of whether the tenant is archived,
+ // or nullopt if there is no such tenant.
+ //
+ // Note: should be called out of the database transaction.
+ //
+ struct tenant_data
+ {
+ string tenant_id;
+ tenant_service service;
+ bool archived;
+ };
+
+ optional<tenant_data>
+ find (odb::core::database&,
+ const string& type,
+ const string& id) const;
+
// Helpers.
//
diff --git a/mod/database-module.cxx b/mod/database-module.cxx
index bbb3e59..629e393 100644
--- a/mod/database-module.cxx
+++ b/mod/database-module.cxx
@@ -79,8 +79,10 @@ namespace brep
optional<string> database_module::
update_tenant_service_state (
const connection_ptr& conn,
- const string& tid,
- const function<optional<string> (const tenant_service&)>& f)
+ const string& type,
+ const string& id,
+ const function<optional<string> (const string& tenant_id,
+ const tenant_service&)>& f)
{
assert (f != nullptr); // Shouldn't be called otherwise.
@@ -96,13 +98,21 @@ namespace brep
{
transaction tr (conn->begin ());
- shared_ptr<build_tenant> t (build_db_->find<build_tenant> (tid));
+ using query = query<build_tenant>;
- if (t != nullptr && t->service)
+ shared_ptr<build_tenant> t (
+ build_db_->query_one<build_tenant> (query::service.id == id &&
+ query::service.type == type));
+
+ if (t != nullptr)
{
+ // Shouldn't be here otherwise.
+ //
+ assert (t->service);
+
tenant_service& s (*t->service);
- if (optional<string> data = f (s))
+ if (optional<string> data = f (t->id, s))
{
s.data = move (*data);
build_db_->update (t);
@@ -119,10 +129,14 @@ namespace brep
}
catch (const odb::recoverable& e)
{
+ HANDLER_DIAG;
+
+ // If no more retries left, don't re-throw odb::recoverable not to
+ // retry at the upper level.
+ //
if (retry-- == 0)
- throw;
+ fail << e << "; no tenant service state update retries left";
- HANDLER_DIAG;
l1 ([&]{trace << e << "; " << retry + 1 << " tenant service "
<< "state update retries left";});
diff --git a/mod/database-module.hxx b/mod/database-module.hxx
index 298afbf..76f13d4 100644
--- a/mod/database-module.hxx
+++ b/mod/database-module.hxx
@@ -61,16 +61,18 @@ namespace brep
// and nullopt otherwise.
//
// Specifically, start the database transaction, query the service state,
- // and call the callback-returned function on this state. If this call
- // returns the data string (rather than nullopt), then update the service
- // state with this data and persist the change. Repeat all the above steps
- // on the recoverable database failures (deadlocks, etc).
+ // and, if present, call the callback-returned function on this state. If
+ // this call returns the data string (rather than nullopt), then update
+ // the service state with this data and persist the change. Repeat all the
+ // above steps on the recoverable database failures (deadlocks, etc).
//
optional<string>
update_tenant_service_state (
const odb::core::connection_ptr&,
- const string& tid,
- const function<optional<string> (const tenant_service&)>&);
+ const string& type,
+ const string& id,
+ const function<optional<string> (const string& tenant_id,
+ const tenant_service&)>&);
protected:
size_t retry_ = 0; // Max of all retries.
diff --git a/mod/hmac.cxx b/mod/hmac.cxx
index 1a78b4c..cfb0e23 100644
--- a/mod/hmac.cxx
+++ b/mod/hmac.cxx
@@ -16,6 +16,12 @@ compute_hmac (const options::openssl_options& o,
// To compute an HMAC over stdin with the key <secret>:
//
+ // openssl dgst -sha256 -hmac <secret>
+ //
+ // Note that since openssl 3.0 the `mac` command is the preferred method
+ // for generating HMACs. For future reference, the equivalent command
+ // would be:
+ //
// openssl mac -digest SHA256 -macopt "key:<secret>" HMAC
//
// Note that here we assume both output and diagnostics will fit into pipe
@@ -25,10 +31,9 @@ compute_hmac (const options::openssl_options& o,
path ("-"), // Write output to openssl::in.
process::pipe (errp.in.get (), move (errp.out)),
process_env (o.openssl (), o.openssl_envvar ()),
- "mac", o.openssl_option (),
- "-digest", "SHA256",
- "-macopt", string ("key:") + k,
- "HMAC");
+ "dgst", o.openssl_option (),
+ "-sha256",
+ "-hmac", k);
ifdstream err (move (errp.in));
diff --git a/mod/mod-advanced-search.cxx b/mod/mod-advanced-search.cxx
new file mode 100644
index 0000000..23d5430
--- /dev/null
+++ b/mod/mod-advanced-search.cxx
@@ -0,0 +1,387 @@
+// file : mod/mod-advanced-search.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <mod/mod-advanced-search.hxx>
+
+#include <libstudxml/serializer.hxx>
+
+#include <odb/database.hxx>
+#include <odb/transaction.hxx>
+
+#include <web/server/module.hxx>
+#include <web/server/mime-url-encoding.hxx>
+
+#include <web/xhtml/serialization.hxx>
+
+#include <libbrep/package.hxx>
+#include <libbrep/package-odb.hxx>
+
+#include <mod/page.hxx>
+#include <mod/utility.hxx> // wildcard_to_similar_to_pattern()
+#include <mod/module-options.hxx>
+
+using namespace std;
+using namespace butl;
+using namespace web;
+using namespace odb::core;
+using namespace brep::cli;
+
+// While currently the user-defined copy constructor is not required (we don't
+// need to deep copy nullptr's), it is a good idea to keep the placeholder
+// ready for less trivial cases.
+//
+brep::advanced_search::
+advanced_search (const advanced_search& r)
+ : database_module (r),
+ options_ (r.initialized_ ? r.options_ : nullptr)
+{
+}
+
+void brep::advanced_search::
+init (scanner& s)
+{
+ HANDLER_DIAG;
+
+ options_ = make_shared<options::advanced_search> (
+ s, unknown_mode::fail, unknown_mode::fail);
+
+ database_module::init (*options_, options_->package_db_retry ());
+
+ if (options_->root ().empty ())
+ options_->root (dir_path ("/"));
+}
+
+template <typename T, typename C>
+static inline query<T>
+match (const C qc, const string& pattern)
+{
+ return qc +
+ "SIMILAR TO" +
+ query<T>::_val (brep::wildcard_to_similar_to_pattern (pattern));
+}
+
+template <typename T>
+static inline query<T>
+package_query (const brep::params::advanced_search& params)
+{
+ using namespace brep;
+ using query = query<T>;
+
+ query q (query::internal_repository.canonical_name.is_not_null ());
+
+ // Note that there is no error reported if the filter parameters parsing
+ // fails. Instead, it is considered that no package builds match such a
+ // query.
+ //
+ try
+ {
+ // Package name.
+ //
+ if (!params.name ().empty ())
+ q = q && match<T> (query::id.name, params.name ());
+
+ // Package version.
+ //
+ if (!params.version ().empty () && params.version () != "*")
+ {
+ // May throw invalid_argument.
+ //
+ version v (params.version (), version::none);
+
+ q = q && compare_version_eq (query::id.version,
+ canonical_version (v),
+ v.revision.has_value ());
+ }
+
+ // Package project.
+ //
+ if (!params.project ().empty ())
+ q = q && match<T> (query::project, params.project ());
+
+ // Package repository.
+ //
+ const string& rp (params.repository ());
+
+ if (rp != "*")
+ q = q && query::internal_repository.canonical_name == rp;
+
+ // Reviews.
+ //
+ const string& rs (params.reviews ());
+
+ if (rs != "*")
+ {
+ if (rs == "reviewed")
+ q = q && query::reviews.pass.is_not_null ();
+ else if (rs == "unreviewed")
+ q = q && query::reviews.pass.is_null ();
+ else
+ throw invalid_argument ("");
+ }
+ }
+ catch (const invalid_argument&)
+ {
+ return query (false);
+ }
+
+ return q;
+}
+
+static const vector<pair<string, string>> reviews ({
+ {"*", "*"},
+ {"reviewed", "reviewed"},
+ {"unreviewed", "unreviewed"}});
+
+bool brep::advanced_search::
+handle (request& rq, response& rs)
+{
+ using namespace web::xhtml;
+
+ HANDLER_DIAG;
+
+ // Note that while we could potentially support the multi-tenant mode, that
+ // would require to invent the package/tenant view to filter out the private
+ // tenants from the search. This doesn't look of much use at the moment.
+ // Thus, let's keep it simple for now and just respond with the 501 status
+ // code (not implemented) if such a mode is detected.
+ //
+ // NOTE: don't forget to update TR_PROJECT::operator() when/if this mode is
+ // supported.
+ //
+ if (!tenant.empty ())
+ throw invalid_request (501, "not implemented");
+
+ const size_t res_page (options_->search_page_entries ());
+ const dir_path& root (options_->root ());
+
+ params::advanced_search params;
+
+ try
+ {
+ name_value_scanner s (rq.parameters (8 * 1024));
+ params = params::advanced_search (s,
+ unknown_mode::fail,
+ unknown_mode::fail);
+ }
+ catch (const cli::exception& e)
+ {
+ throw invalid_request (400, e.what ());
+ }
+
+ const char* title ("Advanced Package Search");
+
+ xml::serializer s (rs.content (), title);
+
+ s << HTML
+ << HEAD
+ << TITLE << title << ~TITLE
+ << CSS_LINKS (path ("advanced-search.css"), root)
+ << ~HEAD
+ << BODY
+ << DIV_HEADER (options_->logo (), options_->menu (), root, tenant)
+ << DIV(ID="content");
+
+ transaction t (package_db_->begin ());
+
+ size_t count (
+ package_db_->query_value<package_count> (
+ package_query<package_count> (params)));
+
+ // Load the internal repositories as the canonical name/location pairs,
+ // sorting them in the same way as on the About page.
+ //
+ vector<pair<string, string>> repos ({{"*", "*"}});
+ {
+ using query = query<repository>;
+
+ for (repository& r:
+ package_db_->query<repository> (
+ (query::internal && query::id.tenant == tenant) +
+ "ORDER BY" + query::priority))
+ {
+ repos.emplace_back (move (r.id.canonical_name), r.location.string ());
+ }
+ }
+
+ // Print the package builds filter form on the first page only.
+ //
+ size_t page (params.page ());
+
+ if (page == 0)
+ {
+ // The 'action' attribute is optional in HTML5. While the standard
+ // doesn't specify browser behavior explicitly for the case the
+ // attribute is omitted, the only reasonable behavior is to default it
+ // to the current document URL.
+ //
+ s << FORM
+ << TABLE(ID="filter", CLASS="proplist")
+ << TBODY
+ << TR_INPUT ("name", "advanced-search", params.name (), "*", true)
+ << TR_INPUT ("version", "pv", params.version (), "*")
+ << TR_INPUT ("project", "pr", params.project (), "*")
+ << TR_SELECT ("repository", "rp", params.repository (), repos);
+
+ if (options_->reviews_url_specified ())
+ s << TR_SELECT ("reviews", "rv", params.reviews (), reviews);
+
+ s << ~TBODY
+ << ~TABLE
+ << TABLE(CLASS="form-table")
+ << TBODY
+ << TR
+ << TD(ID="package-version-count")
+ << DIV_COUNTER (count, "Package Version", "Package Versions")
+ << ~TD
+ << TD(ID="filter-btn")
+ << *INPUT(TYPE="submit", VALUE="Filter")
+ << ~TD
+ << ~TR
+ << ~TBODY
+ << ~TABLE
+ << ~FORM;
+ }
+ else
+ s << DIV_COUNTER (count, "Package Version", "Package Versions");
+
+ using query = query<package>;
+
+ // Note that we query an additional package version which we will not
+ // display, but will use to check if it belongs to the same package and/or
+ // project as the last displayed package version. If that's the case we will
+ // display the '...' mark(s) at the end of the page, indicating that there a
+ // more package versions from this package/project on the next page(s).
+ //
+ query q (package_query<package> (params) +
+ "ORDER BY tenant, project, name, version_epoch DESC, "
+ "version_canonical_upstream DESC, version_canonical_release DESC, "
+ "version_revision DESC" +
+ "OFFSET" + to_string (page * res_page) +
+ "LIMIT" + to_string (res_page + 1));
+
+ package_name prj;
+ package_name pkg;
+ size_t n (0);
+
+ for (package& p: package_db_->query<package> (q))
+ {
+ if (!p.id.tenant.empty ())
+ throw invalid_request (501, "not implemented");
+
+ if (n++ == res_page)
+ {
+ if (p.project == prj)
+ {
+ s << ~DIV; // 'versions' class.
+
+ if (p.name == pkg)
+ s << DIV(ID="package-break") << "..." << ~DIV;
+
+ s << DIV(ID="project-break") << "..." << ~DIV;
+
+ // Make sure we don't serialize ~DIV(CLASS="versions") twice (see
+ // below).
+ //
+ pkg = package_name ();
+ }
+
+ break;
+ }
+
+ if (p.project != prj)
+ {
+ if (!pkg.empty ())
+ s << ~DIV; // 'versions' class.
+
+ prj = move (p.project);
+ pkg = package_name ();
+
+ s << TABLE(CLASS="proplist project")
+ << TBODY
+ << TR_PROJECT (prj, root, tenant)
+ << ~TBODY
+ << ~TABLE;
+ }
+
+ if (p.name != pkg)
+ {
+ if (!pkg.empty ())
+ s << ~DIV; // 'versions' class.
+
+ pkg = move (p.name);
+
+ s << TABLE(CLASS="proplist package")
+ << TBODY
+ << TR_NAME (pkg, root, p.tenant)
+ << TR_SUMMARY (p.summary)
+ << TR_LICENSE (p.license_alternatives)
+ << ~TBODY
+ << ~TABLE
+ << DIV(CLASS="versions");
+ }
+
+ s << TABLE(CLASS="proplist version")
+ << TBODY
+ << TR_VERSION (pkg, p.version, root, tenant, p.upstream_version);
+
+ assert (p.internal ());
+
+ const repository_location& rl (p.internal_repository.load ()->location);
+
+ s << TR_REPOSITORY (rl, root, tenant)
+ << TR_DEPENDS (p.dependencies, root, tenant)
+ << TR_REQUIRES (p.requirements);
+
+ if (options_->reviews_url_specified ())
+ {
+ package_db_->load (p, p.reviews_section);
+
+ s << TR_REVIEWS_SUMMARY (p.reviews, options_->reviews_url ());
+ }
+
+ s << ~TBODY
+ << ~TABLE;
+ }
+
+ if (!pkg.empty ())
+ s << ~DIV; // 'versions' class.
+
+ t.commit ();
+
+ string u (root.string () + "?advanced-search");
+
+ if (!params.name ().empty ())
+ {
+ u += '=';
+ u += mime_url_encode (params.name ());
+ }
+
+ auto add_filter = [&u] (const char* pn,
+ const string& pv,
+ const char* def = "")
+ {
+ if (pv != def)
+ {
+ u += '&';
+ u += pn;
+ u += '=';
+ u += mime_url_encode (pv);
+ }
+ };
+
+ add_filter ("pv", params.version ());
+ add_filter ("pr", params.project ());
+ add_filter ("rp", params.repository (), "*");
+ add_filter ("rv", params.reviews (), "*");
+
+ s << DIV_PAGER (page,
+ count,
+ res_page,
+ options_->search_pages (),
+ u)
+ << ~DIV
+ << ~BODY
+ << ~HTML;
+
+ return true;
+}
diff --git a/mod/mod-advanced-search.hxx b/mod/mod-advanced-search.hxx
new file mode 100644
index 0000000..4ab4d42
--- /dev/null
+++ b/mod/mod-advanced-search.hxx
@@ -0,0 +1,41 @@
+// file : mod/mod-advanced-search.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef MOD_MOD_ADVANCED_SEARCH_HXX
+#define MOD_MOD_ADVANCED_SEARCH_HXX
+
+#include <libbrep/types.hxx>
+#include <libbrep/utility.hxx>
+
+#include <mod/module-options.hxx>
+#include <mod/database-module.hxx>
+
+namespace brep
+{
+ class advanced_search: public database_module
+ {
+ public:
+ advanced_search () = default;
+
+ // Create a shallow copy (handling instance) if initialized and a deep
+ // copy (context exemplar) otherwise.
+ //
+ explicit
+ advanced_search (const advanced_search&);
+
+ virtual bool
+ handle (request&, response&);
+
+ virtual const cli::options&
+ cli_options () const {return options::advanced_search::description ();}
+
+ private:
+ virtual void
+ init (cli::scanner&);
+
+ private:
+ shared_ptr<options::advanced_search> options_;
+ };
+}
+
+#endif // MOD_MOD_ADVANCED_SEARCH_HXX
diff --git a/mod/mod-build-force.cxx b/mod/mod-build-force.cxx
index ea921e9..d37674f 100644
--- a/mod/mod-build-force.cxx
+++ b/mod/mod-build-force.cxx
@@ -198,6 +198,9 @@ handle (request& rq, response& rs)
//
connection_ptr conn (build_db_->connection ());
+ // NOTE: don't forget to update ci_start::rebuild() if changing anything
+ // here.
+ //
{
transaction t (conn->begin ());
@@ -206,8 +209,11 @@ handle (request& rq, response& rs)
if (!build_db_->query_one<package_build> (
query<package_build>::build::id == id, pb) ||
+ pb.archived ||
(b = move (pb.build))->state == build_state::queued)
+ {
config_expired ("no package build");
+ }
force_state force (b->state == build_state::built
? force_state::forced
@@ -308,14 +314,15 @@ handle (request& rq, response& rs)
//
conn.reset ();
- if (auto f = tsq->build_queued (ss,
+ if (auto f = tsq->build_queued (qbs.back ().tenant,
+ ss,
qbs,
build_state::building,
qhs,
log_writer_))
{
conn = build_db_->connection ();
- update_tenant_service_state (conn, qbs.back ().tenant, f);
+ update_tenant_service_state (conn, ss.type, ss.id, f);
}
}
diff --git a/mod/mod-build-result.cxx b/mod/mod-build-result.cxx
index 3ba18e1..cc058b5 100644
--- a/mod/mod-build-result.cxx
+++ b/mod/mod-build-result.cxx
@@ -248,16 +248,27 @@ handle (request& rq, response&)
}
else if (authenticate_session (*options_, rqm.challenge, *b, rqm.session))
{
+ // If the build is not in the `forcing` state, then retrieve the tenant
+ // service callback, if present, for subsequent notification (`queued`
+ // for the interrupted build and `built` otherwise; see below). Note
+ // that for the `forcing` state the service already assumes the `queued`
+ // state (see build_force::handle() and ci_start::rebuild() for
+ // details).
+ //
const tenant_service_base* ts (nullptr);
+ shared_ptr<build_tenant> t;
- shared_ptr<build_tenant> t (build_db_->load<build_tenant> (b->tenant));
-
- if (t->service)
+ if (b->force != force_state::forcing)
{
- auto i (tenant_service_map_.find (t->service->type));
+ t = build_db_->load<build_tenant> (b->tenant);
- if (i != tenant_service_map_.end ())
- ts = i->second.get ();
+ if (t->service)
+ {
+ auto i (tenant_service_map_.find (t->service->type));
+
+ if (i != tenant_service_map_.end ())
+ ts = i->second.get ();
+ }
}
// If the build is interrupted, then revert it to the original built
@@ -348,6 +359,8 @@ handle (request& rq, response&)
//
if (tsq != nullptr)
{
+ assert (t != nullptr);
+
// Calculate the tenant service hints.
//
buildable_package_count tpc (
@@ -498,7 +511,11 @@ handle (request& rq, response&)
// If required, stash the service notification information.
//
if (tsb != nullptr || tsq != nullptr)
+ {
+ assert (t != nullptr);
+
tss = make_pair (move (*t->service), move (b));
+ }
}
t.commit ();
@@ -528,14 +545,15 @@ handle (request& rq, response&)
//
conn.reset ();
- if (auto f = tsq->build_queued (ss,
+ if (auto f = tsq->build_queued (qbs.back ().tenant,
+ ss,
qbs,
build_state::building,
qhs,
log_writer_))
{
conn = build_db_->connection ();
- update_tenant_service_state (conn, qbs.back ().tenant, f);
+ update_tenant_service_state (conn, ss.type, ss.id, f);
}
}
@@ -555,10 +573,10 @@ handle (request& rq, response&)
//
conn.reset ();
- if (auto f = tsb->build_built (ss, b, log_writer_))
+ if (auto f = tsb->build_built (b.tenant, ss, b, log_writer_))
{
conn = build_db_->connection ();
- update_tenant_service_state (conn, b.tenant, f);
+ update_tenant_service_state (conn, ss.type, ss.id, f);
}
}
diff --git a/mod/mod-build-task.cxx b/mod/mod-build-task.cxx
index 6be77f6..c8b1bb2 100644
--- a/mod/mod-build-task.cxx
+++ b/mod/mod-build-task.cxx
@@ -203,6 +203,41 @@ package_query (bool custom_bot,
query::build_repository::id.canonical_name.in_range (rp.begin (),
rp.end ());
+ // Filter by the types of services associated with the tenants, where the
+ // empty type denotes tenants without associated service.
+ //
+ if (params.tenant_service_type_specified ())
+ {
+ cstrings ts;
+ bool et (false);
+
+ for (const string& t: params.tenant_service_type ())
+ {
+ if (!t.empty ())
+ ts.push_back (t.c_str ());
+ else
+ et = true;
+ }
+
+ if (!ts.empty () && et)
+ {
+ q = q &&
+ (query::build_tenant::service.type.in_range (ts.begin (), ts.end ()) ||
+ query::build_tenant::service.type.is_null ());
+ }
+ else if (!ts.empty ())
+ {
+ q = q && query::build_tenant::service.type.in_range (ts.begin (),
+ ts.end ());
+ }
+ else
+ {
+ assert (et);
+
+ q = q && query::build_tenant::service.type.is_null ();
+ }
+ }
+
// If the interactive mode is false or true, then filter out the respective
// packages.
//
@@ -464,10 +499,14 @@ handle (request& rq, response& rs)
//
conn.reset ();
- if (auto f = tsu->build_unloaded (move (*t->service), log_writer_))
+ tenant_service& ts (*t->service);
+ string type (ts.type);
+ string id (ts.id);
+
+ if (auto f = tsu->build_unloaded (t->id, move (ts), log_writer_))
{
conn = build_db_->connection ();
- update_tenant_service_state (conn, t->id, f);
+ update_tenant_service_state (conn, type, id, f);
}
}
}
@@ -1804,7 +1843,8 @@ handle (request& rq, response& rs)
const config_machine* cm (nullptr);
optional<collect_auxiliaries_result> aux;
- build_db_->load (*p, p->constraints_section);
+ if (!p->constraints_section.loaded ())
+ build_db_->load (*p, p->constraints_section);
for (auto i (configs.begin ()), e (configs.end ()); i != e; ++i)
{
@@ -2263,6 +2303,11 @@ handle (request& rq, response& rs)
// fingerprint and challenge and reset the task manifest and the
// session that we may have prepared.
//
+ if (task_build != nullptr)
+ b = move (task_build);
+
+ assert (b != nullptr); // Wouldn't be here otherwise.
+
agent_fp = move (b->agent_fingerprint);
challenge = move (b->agent_challenge);
task_response = task_response_manifest ();
@@ -2309,7 +2354,8 @@ handle (request& rq, response& rs)
//
conn.reset ();
- if (auto f = tsq->build_queued (ss,
+ if (auto f = tsq->build_queued (qbs.back ().tenant,
+ ss,
qbs,
nullopt /* initial_state */,
qhs,
@@ -2318,7 +2364,7 @@ handle (request& rq, response& rs)
conn = build_db_->connection ();
if (optional<string> data =
- update_tenant_service_state (conn, qbs.back ().tenant, f))
+ update_tenant_service_state (conn, ss.type, ss.id, f))
ss.data = move (data);
}
}
@@ -2341,7 +2387,8 @@ handle (request& rq, response& rs)
//
conn.reset ();
- if (auto f = tsq->build_queued (ss,
+ if (auto f = tsq->build_queued (qbs.back ().tenant,
+ ss,
qbs,
initial_state,
qhs,
@@ -2350,7 +2397,7 @@ handle (request& rq, response& rs)
conn = build_db_->connection ();
if (optional<string> data =
- update_tenant_service_state (conn, qbs.back ().tenant, f))
+ update_tenant_service_state (conn, ss.type, ss.id, f))
ss.data = move (data);
}
}
@@ -2377,12 +2424,12 @@ handle (request& rq, response& rs)
//
conn.reset ();
- if (auto f = tsb->build_building (ss, b, log_writer_))
+ if (auto f = tsb->build_building (b.tenant, ss, b, log_writer_))
{
conn = build_db_->connection ();
if (optional<string> data =
- update_tenant_service_state (conn, b.tenant, f))
+ update_tenant_service_state (conn, ss.type, ss.id, f))
ss.data = move (data);
}
}
@@ -2505,12 +2552,12 @@ handle (request& rq, response& rs)
//
conn.reset ();
- if (auto f = tsb->build_built (ss, b, log_writer_))
+ if (auto f = tsb->build_built (b.tenant, ss, b, log_writer_))
{
conn = build_db_->connection ();
if (optional<string> data =
- update_tenant_service_state (conn, b.tenant, f))
+ update_tenant_service_state (conn, ss.type, ss.id, f))
ss.data = move (data);
}
}
diff --git a/mod/mod-builds.cxx b/mod/mod-builds.cxx
index 81d4649..0155c2e 100644
--- a/mod/mod-builds.cxx
+++ b/mod/mod-builds.cxx
@@ -27,6 +27,7 @@
#include <libbrep/build-package-odb.hxx>
#include <mod/page.hxx>
+#include <mod/utility.hxx> // wildcard_to_similar_to_pattern()
#include <mod/module-options.hxx>
using namespace std;
@@ -63,71 +64,13 @@ init (scanner& s)
options_->root (dir_path ("/"));
}
-// Transform the wildcard to the SIMILAR TO-pattern.
-//
-static string
-transform (const string& pattern)
-{
- if (pattern.empty ())
- return "%";
-
- string r;
- for (const path_pattern_term& pt: path_pattern_iterator (pattern))
- {
- switch (pt.type)
- {
- case path_pattern_term_type::question: r += '_'; break;
- case path_pattern_term_type::star: r += '%'; break;
- case path_pattern_term_type::bracket:
- {
- // Copy the bracket expression translating the inverse character, if
- // present.
- //
- size_t n (r.size ());
- r.append (pt.begin, pt.end);
-
- if (r[n + 1] == '!') // ...[!... ?
- r[n + 1] = '^';
-
- break;
- }
- case path_pattern_term_type::literal:
- {
- char c (get_literal (pt));
-
- // Escape the special characters.
- //
- // Note that '.' is not a special character for SIMILAR TO.
- //
- switch (c)
- {
- case '\\':
- case '%':
- case '_':
- case '|':
- case '+':
- case '{':
- case '}':
- case '(':
- case ')':
- case '[':
- case ']': r += '\\'; break;
- }
-
- r += c;
- break;
- }
- }
- }
-
- return r;
-}
-
template <typename T, typename C>
static inline query<T>
match (const C qc, const string& pattern)
{
- return qc + "SIMILAR TO" + query<T>::_val (transform (pattern));
+ return qc +
+ "SIMILAR TO" +
+ query<T>::_val (brep::wildcard_to_similar_to_pattern (pattern));
}
// If tenant is absent, then query builds from all the public tenants.
@@ -450,9 +393,7 @@ handle (request& rq, response& rs)
// The 'action' attribute is optional in HTML5. While the standard
// doesn't specify browser behavior explicitly for the case the
// attribute is omitted, the only reasonable behavior is to default it
- // to the current document URL. Note that we specify the function name
- // using the "hidden" <input/> element since the action url must not
- // contain the query part.
+ // to the current document URL.
//
s << FORM
<< TABLE(ID="filter", CLASS="proplist")
diff --git a/mod/mod-ci-github-gh.cxx b/mod/mod-ci-github-gh.cxx
index 4ad8d32..2e886ac 100644
--- a/mod/mod-ci-github-gh.cxx
+++ b/mod/mod-ci-github-gh.cxx
@@ -7,8 +7,16 @@
namespace brep
{
- // Return the GitHub check run status corresponding to a build_state. Throw
- // invalid_argument if the build_state value was invalid.
+ [[noreturn]] static void
+ throw_json (const json::parser& p, const string& m)
+ {
+ throw json::invalid_json_input (
+ p.input_name,
+ p.line (), p.column (), p.position (),
+ m);
+ }
+
+ // Return the GitHub check run status corresponding to a build_state.
//
string
gh_to_status (build_state st)
@@ -103,10 +111,7 @@ namespace brep
[[noreturn]] static void
missing_member (const json::parser& p, const char* o, const char* m)
{
- throw json::invalid_json_input (
- p.input_name,
- p.line (), p.column (), p.position (),
- o + string (" object is missing member '") + m + '\'');
+ throw_json (p, o + string (" object is missing member '") + m + '\'');
}
using event = json::event;
@@ -129,9 +134,14 @@ namespace brep
return p.name () == s ? (v = true) : false;
};
- if (c (ni, "node_id")) node_id = p.next_expect_string ();
- else if (c (hb, "head_branch")) head_branch = p.next_expect_string ();
- else if (c (hs, "head_sha")) head_sha = p.next_expect_string ();
+ if (c (ni, "node_id")) node_id = p.next_expect_string ();
+ else if (c (hb, "head_branch"))
+ {
+ string* v (p.next_expect_string_null ());
+ if (v != nullptr)
+ head_branch = *v;
+ }
+ else if (c (hs, "head_sha")) head_sha = p.next_expect_string ();
else p.next_expect_value_skip ();
}
@@ -144,12 +154,99 @@ namespace brep
operator<< (ostream& os, const gh_check_suite& cs)
{
os << "node_id: " << cs.node_id
- << ", head_branch: " << cs.head_branch
+ << ", head_branch: " << (cs.head_branch ? *cs.head_branch : "null")
<< ", head_sha: " << cs.head_sha;
return os;
}
+ // gh_check_suite_ex
+ //
+ gh_check_suite_ex::
+ gh_check_suite_ex (json::parser& p)
+ {
+ p.next_expect (event::begin_object);
+
+ bool ni (false), hb (false), hs (false), cc (false), co (false),
+ ap (false);
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ auto c = [&p] (bool& v, const char* s)
+ {
+ return p.name () == s ? (v = true) : false;
+ };
+
+ if (c (ni, "node_id")) node_id = p.next_expect_string ();
+ else if (c (hb, "head_branch"))
+ {
+ string* v (p.next_expect_string_null ());
+ if (v != nullptr)
+ head_branch = *v;
+ }
+ else if (c (hs, "head_sha")) head_sha = p.next_expect_string ();
+ else if (c (cc, "latest_check_runs_count"))
+ check_runs_count = p.next_expect_number <size_t> ();
+ else if (c (co, "conclusion"))
+ {
+ string* v (p.next_expect_string_null ());
+ if (v != nullptr)
+ conclusion = *v;
+ }
+ else if (c (ap, "app"))
+ {
+ p.next_expect (event::begin_object);
+
+ bool ai (false);
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ if (c (ai, "id"))
+ {
+ // Note: unlike the check_run webhook's app.id, the check_suite
+ // one can be null. It's unclear under what circumstances, but it
+ // shouldn't happen unless something is broken.
+ //
+ string* v (p.next_expect_number_null ());
+
+ if (v == nullptr)
+ throw_json (p, "check_suite.app.id is null");
+
+ app_id = *v;
+ }
+ else p.next_expect_value_skip ();
+ }
+
+ if (!ai) missing_member (p, "gh_check_suite_ex.app", "id");
+ }
+ else p.next_expect_value_skip ();
+ }
+
+ if (!ni) missing_member (p, "gh_check_suite_ex", "node_id");
+ if (!hb) missing_member (p, "gh_check_suite_ex", "head_branch");
+ if (!hs) missing_member (p, "gh_check_suite_ex", "head_sha");
+ if (!cc) missing_member (p, "gh_check_suite_ex", "latest_check_runs_count");
+ if (!co) missing_member (p, "gh_check_suite_ex", "conclusion");
+ if (!ap) missing_member (p, "gh_check_suite_ex", "app");
+ }
+
+ ostream&
+ operator<< (ostream& os, const gh_check_suite_ex& cs)
+ {
+ os << "node_id: " << cs.node_id
+ << ", head_branch: " << (cs.head_branch ? *cs.head_branch : "null")
+ << ", head_sha: " << cs.head_sha
+ << ", latest_check_runs_count: " << cs.check_runs_count
+ << ", conclusion: " << (cs.conclusion ? *cs.conclusion : "null")
+ << ", app_id: " << cs.app_id;
+
+ return os;
+ }
+
// gh_check_run
//
gh_check_run::
@@ -157,16 +254,80 @@ namespace brep
{
p.next_expect (event::begin_object);
- // We always ask for this exact set of fields to be returned in GraphQL
- // requests.
+ bool ni (false), nm (false), st (false);
+
+ // Skip unknown/uninteresting members.
//
- node_id = p.next_expect_member_string ("id");
- name = p.next_expect_member_string ("name");
- status = p.next_expect_member_string ("status");
+ while (p.next_expect (event::name, event::end_object))
+ {
+ auto c = [&p] (bool& v, const char* s)
+ {
+ return p.name () == s ? (v = true) : false;
+ };
- p.next_expect (event::end_object);
+ if (c (ni, "node_id")) node_id = p.next_expect_string ();
+ else if (c (nm, "name")) name = p.next_expect_string ();
+ else if (c (st, "status")) status = p.next_expect_string ();
+ else p.next_expect_value_skip ();
+ }
+
+ if (!ni) missing_member (p, "gh_check_run", "node_id");
+ if (!nm) missing_member (p, "gh_check_run", "name");
+ if (!st) missing_member (p, "gh_check_run", "status");
}
+ // gh_check_run_ex
+ //
+ gh_check_run_ex::
+ gh_check_run_ex (json::parser& p)
+ {
+ p.next_expect (event::begin_object);
+
+ bool ni (false), nm (false), st (false), du (false), cs (false),
+ ap (false);
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ auto c = [&p] (bool& v, const char* s)
+ {
+ return p.name () == s ? (v = true) : false;
+ };
+
+ if (c (ni, "node_id")) node_id = p.next_expect_string ();
+ else if (c (nm, "name")) name = p.next_expect_string ();
+ else if (c (st, "status")) status = p.next_expect_string ();
+ else if (c (du, "details_url")) details_url = p.next_expect_string ();
+ else if (c (cs, "check_suite")) check_suite = gh_check_suite (p);
+ else if (c (ap, "app"))
+ {
+ p.next_expect (event::begin_object);
+
+ bool ai (false);
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ if (c (ai, "id")) app_id = p.next_expect_number ();
+ else p.next_expect_value_skip ();
+ }
+
+ if (!ai) missing_member (p, "gh_check_run_ex.app", "id");
+ }
+ else p.next_expect_value_skip ();
+ }
+
+ if (!ni) missing_member (p, "gh_check_run_ex", "node_id");
+ if (!nm) missing_member (p, "gh_check_run_ex", "name");
+ if (!st) missing_member (p, "gh_check_run_ex", "status");
+ if (!du) missing_member (p, "gh_check_run_ex", "details_url");
+ if (!cs) missing_member (p, "gh_check_run_ex", "check_suite");
+ if (!ap) missing_member (p, "gh_check_run_ex", "app");
+ }
+
+
ostream&
operator<< (ostream& os, const gh_check_run& cr)
{
@@ -177,13 +338,23 @@ namespace brep
return os;
}
+ ostream&
+ operator<< (ostream& os, const gh_check_run_ex& cr)
+ {
+ os << static_cast<const gh_check_run&> (cr)
+ << ", details_url: " << cr.details_url
+ << ", check_suite: { " << cr.check_suite << " }"
+ << ", app_id: " << cr.app_id;
+
+ return os;
+ }
+
gh_pull_request::
gh_pull_request (json::parser& p)
{
p.next_expect (event::begin_object);
- bool ni (false), nu (false), st (false), ma (false), ms (false),
- bs (false), hd (false);
+ bool ni (false), nu (false), bs (false), hd (false);
// Skip unknown/uninteresting members.
//
@@ -196,58 +367,71 @@ namespace brep
if (c (ni, "node_id")) node_id = p.next_expect_string ();
else if (c (nu, "number")) number = p.next_expect_number<unsigned int> ();
- else if (c (st, "state")) state = p.next_expect_string ();
- else if (c (ma, "mergeable")) mergeable = p.next_expect_boolean_null<bool> ();
- else if (c (ms, "merge_commit_sha"))
- {
- string* v (p.next_expect_string_null ());
- if (v != nullptr)
- merge_commit_sha = *v;
- }
else if (c (bs, "base"))
{
p.next_expect (event::begin_object);
- bool l (false), r (false), s (false);
+ bool r (false), s (false), rp (false), fn (false);
while (p.next_expect (event::name, event::end_object))
{
- if (c (l, "label")) base_label = p.next_expect_string ();
- else if (c (r, "ref")) base_ref = p.next_expect_string ();
+ if (c (r, "ref")) base_ref = p.next_expect_string ();
else if (c (s, "sha")) base_sha = p.next_expect_string ();
+ else if (c (rp, "repo"))
+ {
+ p.next_expect (event::begin_object);
+
+ while (p.next_expect (event::name, event::end_object))
+ {
+ if (c (fn, "full_name"))
+ base_path = p.next_expect_string ();
+ else
+ p.next_expect_value_skip ();
+ }
+ }
else p.next_expect_value_skip ();
}
- if (!l) missing_member (p, "gh_pull_request.base", "label");
- if (!r) missing_member (p, "gh_pull_request.base", "ref");
- if (!s) missing_member (p, "gh_pull_request.base", "sha");
+ if (!r) missing_member (p, "gh_pull_request.base", "ref");
+ if (!s) missing_member (p, "gh_pull_request.base", "sha");
+ if (!rp) missing_member (p, "gh_pull_request.base", "repo");
+ if (!fn) missing_member (p, "gh_pull_request.base.repo", "full_name");
}
else if (c (hd, "head"))
{
p.next_expect (event::begin_object);
- bool l (false), r (false), s (false);
+ bool r (false), s (false), rp (false), fn (false);
while (p.next_expect (event::name, event::end_object))
{
- if (c (l, "label")) head_label = p.next_expect_string ();
- else if (c (r, "ref")) head_ref = p.next_expect_string ();
+ if (c (r, "ref")) head_ref = p.next_expect_string ();
else if (c (s, "sha")) head_sha = p.next_expect_string ();
+ else if (c (rp, "repo"))
+ {
+ p.next_expect (event::begin_object);
+
+ while (p.next_expect (event::name, event::end_object))
+ {
+ if (c (fn, "full_name"))
+ head_path = p.next_expect_string ();
+ else
+ p.next_expect_value_skip ();
+ }
+ }
else p.next_expect_value_skip ();
}
- if (!l) missing_member (p, "gh_pull_request.head", "label");
- if (!r) missing_member (p, "gh_pull_request.head", "ref");
- if (!s) missing_member (p, "gh_pull_request.head", "sha");
+ if (!r) missing_member (p, "gh_pull_request.head", "ref");
+ if (!s) missing_member (p, "gh_pull_request.head", "sha");
+ if (!rp) missing_member (p, "gh_pull_request.head", "repo");
+ if (!fn) missing_member (p, "gh_pull_request.head.repo", "full_name");
}
else p.next_expect_value_skip ();
}
if (!ni) missing_member (p, "gh_pull_request", "node_id");
if (!nu) missing_member (p, "gh_pull_request", "number");
- if (!st) missing_member (p, "gh_pull_request", "state");
- if (!ma) missing_member (p, "gh_pull_request", "mergeable");
- if (!ms) missing_member (p, "gh_pull_request", "merge_commit_sha");
if (!bs) missing_member (p, "gh_pull_request", "base");
if (!hd) missing_member (p, "gh_pull_request", "head");
}
@@ -257,23 +441,17 @@ namespace brep
{
os << "node_id: " << pr.node_id
<< ", number: " << pr.number
- << ", state: " << pr.state
- << ", mergeable: " << (pr.mergeable
- ? *pr.mergeable
- ? "true"
- : "false"
- : "null")
- << ", merge_commit_sha:" << pr.merge_commit_sha
<< ", base: { "
- << "label: " << pr.base_label
+ << "path: " << pr.base_path
<< ", ref: " << pr.base_ref
<< ", sha: " << pr.base_sha
<< " }"
<< ", head: { "
- << "label: " << pr.head_label
+ << "path: " << pr.head_path
<< ", ref: " << pr.head_ref
<< ", sha: " << pr.head_sha
- << " }";
+ << " }"
+ << ", app_id: " << pr.app_id;
return os;
}
@@ -285,7 +463,7 @@ namespace brep
{
p.next_expect (event::begin_object);
- bool ni (false), nm (false), fn (false), db (false), cu (false);
+ bool ni (false), fn (false), cu (false);
// Skip unknown/uninteresting members.
//
@@ -297,17 +475,13 @@ namespace brep
};
if (c (ni, "node_id")) node_id = p.next_expect_string ();
- else if (c (nm, "name")) name = p.next_expect_string ();
- else if (c (fn, "full_name")) full_name = p.next_expect_string ();
- else if (c (db, "default_branch")) default_branch = p.next_expect_string ();
+ else if (c (fn, "full_name")) path = p.next_expect_string ();
else if (c (cu, "clone_url")) clone_url = p.next_expect_string ();
else p.next_expect_value_skip ();
}
if (!ni) missing_member (p, "gh_repository", "node_id");
- if (!nm) missing_member (p, "gh_repository", "name");
if (!fn) missing_member (p, "gh_repository", "full_name");
- if (!db) missing_member (p, "gh_repository", "default_branch");
if (!cu) missing_member (p, "gh_repository", "clone_url");
}
@@ -315,9 +489,7 @@ namespace brep
operator<< (ostream& os, const gh_repository& rep)
{
os << "node_id: " << rep.node_id
- << ", name: " << rep.name
- << ", full_name: " << rep.full_name
- << ", default_branch: " << rep.default_branch
+ << ", path: " << rep.path
<< ", clone_url: " << rep.clone_url;
return os;
@@ -341,7 +513,7 @@ namespace brep
return p.name () == s ? (v = true) : false;
};
- if (c (i, "id")) id = p.next_expect_number<uint64_t> ();
+ if (c (i, "id")) id = p.next_expect_number ();
else p.next_expect_value_skip ();
}
@@ -375,7 +547,7 @@ namespace brep
};
if (c (ac, "action")) action = p.next_expect_string ();
- else if (c (cs, "check_suite")) check_suite = gh_check_suite (p);
+ else if (c (cs, "check_suite")) check_suite = gh_check_suite_ex (p);
else if (c (rp, "repository")) repository = gh_repository (p);
else if (c (in, "installation")) installation = gh_installation (p);
else p.next_expect_value_skip ();
@@ -398,6 +570,48 @@ namespace brep
return os;
}
+ // gh_check_run_event
+ //
+ gh_check_run_event::
+ gh_check_run_event (json::parser& p)
+ {
+ p.next_expect (event::begin_object);
+
+ bool ac (false), cs (false), rp (false), in (false);
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ auto c = [&p] (bool& v, const char* s)
+ {
+ return p.name () == s ? (v = true) : false;
+ };
+
+ if (c (ac, "action")) action = p.next_expect_string ();
+ else if (c (cs, "check_run")) check_run = gh_check_run_ex (p);
+ else if (c (rp, "repository")) repository = gh_repository (p);
+ else if (c (in, "installation")) installation = gh_installation (p);
+ else p.next_expect_value_skip ();
+ }
+
+ if (!ac) missing_member (p, "gh_check_run_event", "action");
+ if (!cs) missing_member (p, "gh_check_run_event", "check_run");
+ if (!rp) missing_member (p, "gh_check_run_event", "repository");
+ if (!in) missing_member (p, "gh_check_run_event", "installation");
+ }
+
+ ostream&
+ operator<< (ostream& os, const gh_check_run_event& cr)
+ {
+ os << "action: " << cr.action;
+ os << ", check_run { " << cr.check_run << " }";
+ os << ", repository { " << cr.repository << " }";
+ os << ", installation { " << cr.installation << " }";
+
+ return os;
+ }
+
// gh_pull_request_event
//
gh_pull_request_event::
@@ -405,7 +619,7 @@ namespace brep
{
p.next_expect (event::begin_object);
- bool ac (false), pr (false), rp (false), in (false);
+ bool ac (false), pr (false), bf (false), rp (false), in (false);
// Skip unknown/uninteresting members.
//
@@ -418,6 +632,7 @@ namespace brep
if (c (ac, "action")) action = p.next_expect_string ();
else if (c (pr, "pull_request")) pull_request = gh_pull_request (p);
+ else if (c (bf, "before")) before = p.next_expect_string ();
else if (c (rp, "repository")) repository = gh_repository (p);
else if (c (in, "installation")) installation = gh_installation (p);
else p.next_expect_value_skip ();
@@ -434,12 +649,65 @@ namespace brep
{
os << "action: " << pr.action;
os << ", pull_request { " << pr.pull_request << " }";
+ os << ", before: " << (pr.before ? *pr.before : "null");
os << ", repository { " << pr.repository << " }";
os << ", installation { " << pr.installation << " }";
return os;
}
+ // gh_push_event
+ //
+ gh_push_event::
+ gh_push_event (json::parser& p)
+ {
+ p.next_expect (event::begin_object);
+
+ bool rf (false), bf (false), af (false), fd (false), dl (false),
+ rp (false), in (false);
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ auto c = [&p] (bool& v, const char* s)
+ {
+ return p.name () == s ? (v = true) : false;
+ };
+
+ if (c (rf, "ref")) ref = p.next_expect_string ();
+ else if (c (bf, "before")) before = p.next_expect_string ();
+ else if (c (af, "after")) after = p.next_expect_string ();
+ else if (c (fd, "forced")) forced = p.next_expect_boolean<bool> ();
+ else if (c (dl, "deleted")) deleted = p.next_expect_boolean<bool> ();
+ else if (c (rp, "repository")) repository = gh_repository (p);
+ else if (c (in, "installation")) installation = gh_installation (p);
+ else p.next_expect_value_skip ();
+ }
+
+ if (!rf) missing_member (p, "gh_push_event", "ref");
+ if (!bf) missing_member (p, "gh_push_event", "before");
+ if (!af) missing_member (p, "gh_push_event", "after");
+ if (!fd) missing_member (p, "gh_push_event", "forced");
+ if (!dl) missing_member (p, "gh_push_event", "deleted");
+ if (!rp) missing_member (p, "gh_push_event", "repository");
+ if (!in) missing_member (p, "gh_push_event", "installation");
+ }
+
+ ostream&
+ operator<< (ostream& os, const gh_push_event& p)
+ {
+ os << "ref: " << p.ref
+ << ", before: " << p.before
+ << ", after: " << p.after
+ << ", forced: " << p.forced
+ << ", deleted: " << p.deleted
+ << ", repository { " << p.repository << " }"
+ << ", installation { " << p.installation << " }";
+
+ return os;
+ }
+
// gh_installation_access_token
//
// Example JSON:
@@ -467,7 +735,29 @@ namespace brep
};
if (c (tk, "token")) token = p.next_expect_string ();
- else if (c (ea, "expires_at")) expires_at = gh_from_iso8601 (p.next_expect_string ());
+ else if (c (ea, "expires_at"))
+ {
+ string v (p.next_expect_string ());
+
+ try
+ {
+ expires_at = gh_from_iso8601 (v);
+ }
+ catch (const invalid_argument& e)
+ {
+ throw_json (p,
+ "invalid IAT expires_at value '" + v +
+ "': " + e.what ());
+ }
+ catch (const system_error& e)
+ {
+ // Translate for simplicity.
+ //
+ throw_json (p,
+ "unable to convert IAT expires_at value '" + v +
+ "': " + e.what ());
+ }
+ }
else p.next_expect_value_skip ();
}
@@ -502,6 +792,8 @@ namespace brep
timestamp
gh_from_iso8601 (const string& s)
{
- return butl::from_string (s.c_str (), "%Y-%m-%dT%TZ", false /* local */);
+ return butl::from_string (s.c_str (),
+ "%Y-%m-%dT%TZ",
+ false /* local */);
}
}
diff --git a/mod/mod-ci-github-gh.hxx b/mod/mod-ci-github-gh.hxx
index 2b77aeb..91f5bfe 100644
--- a/mod/mod-ci-github-gh.hxx
+++ b/mod/mod-ci-github-gh.hxx
@@ -25,27 +25,31 @@ namespace brep
// GitHub request/response types (all start with gh_).
//
- // Note that the GitHub REST and GraphQL APIs use different ID types and
- // values. In the REST API they are usually integers (but sometimes
- // strings!) whereas in GraphQL they are always strings (note:
- // base64-encoded and opaque, not just the REST ID value as a string).
+ // Note that the GitHub REST and GraphQL APIs use different id types and
+ // values. In the REST API they are usually integers (but check the API
+ // reference for the object in question) whereas in GraphQL they are always
+ // strings (note: base64-encoded and opaque, not just the REST id value as a
+ // string).
//
- // In both APIs the ID field is called `id`, but REST responses and webhook
- // events also contain the corresponding GraphQL object's ID in the
+ // In both APIs the id field is called `id`, but REST responses and webhook
+ // events also contain the corresponding GraphQL object's id in the
// `node_id` field.
//
- // In the structures below we always use the RESP API/webhook names for ID
- // fields. I.e., `id` always refers to the REST/webhook ID, and `node_id`
- // always refers to the GraphQL ID.
+ // The GraphQL API's ids are called "global node ids" by GitHub. We refer to
+ // them simply as node ids and we use them almost exclusively (over the
+ // REST/webhook ids).
+ //
+ // In the structures below, `id` always refers to the REST/webhook id and
+ // `node_id` always refers to the node id.
//
namespace json = butl::json;
- // The "check_suite" object within a check_suite webhook event request.
+ // The check_suite member of a check_run webhook event (gh_check_run_event).
//
struct gh_check_suite
{
string node_id;
- string head_branch;
+ optional<string> head_branch;
string head_sha;
explicit
@@ -54,6 +58,26 @@ namespace brep
gh_check_suite () = default;
};
+ // The check_suite member of a check_suite webhook event
+ // (gh_check_suite_event).
+ //
+ struct gh_check_suite_ex: gh_check_suite
+ {
+ size_t check_runs_count;
+ optional<string> conclusion;
+
+ string app_id;
+
+ explicit
+ gh_check_suite_ex (json::parser&);
+
+ gh_check_suite_ex () = default;
+ };
+
+ // The check_run object returned in response to GraphQL requests
+ // (e.g. create or update check run). Note that we specifiy the set of
+ // members to return in the GraphQL request.
+ //
struct gh_check_run
{
string node_id;
@@ -66,66 +90,59 @@ namespace brep
gh_check_run () = default;
};
+ // The check_run member of a check_run webhook event (gh_check_run_event).
+ //
+ struct gh_check_run_ex: gh_check_run
+ {
+ string details_url;
+ gh_check_suite check_suite;
+
+ string app_id;
+
+ explicit
+ gh_check_run_ex (json::parser&);
+
+ gh_check_run_ex () = default;
+ };
+
+ // The pull_request member of a pull_request webhook event
+ // (gh_pull_request_event).
+ //
struct gh_pull_request
{
string node_id;
unsigned int number;
- string state; // "open" or "closed".
-
- // If absent then the result of the test merge commit is not yet
- // available. If true then `merge_commit_sha` contains the commit ID of
- // the merge commit. If false then `merge_commit_sha` is either empty or
- // no longer valid.
- //
- optional<bool> mergeable;
- string merge_commit_sha;
-
- // @@ TODO Remove label if unused.
- string base_label; // Name distinguishing the base from the head.
+ string base_path; // Repository path (<org>/<repo>) under github.com.
string base_ref;
string base_sha;
- // @@ TODO Remove label if unused.
- string head_label; // Name distinguishing the head from the base.
+ string head_path;
string head_ref;
string head_sha;
+ // Note: not received from GitHub but set from the app-id webhook query
+ // parameter instead.
+ //
+ // For some reason, unlike the check_suite and check_run webhooks, the
+ // pull_request webhook does not contain the app id. For the sake of
+ // simplicity we emulate check_suite and check_run by storing the app-id
+ // webhook query parameter here.
+ //
+ string app_id;
+
explicit
gh_pull_request (json::parser&);
gh_pull_request () = default;
};
- // Return the GitHub check run status corresponding to a build_state.
- //
- string
- gh_to_status (build_state st);
-
- // Return the build_state corresponding to a GitHub check run status
- // string. Throw invalid_argument if the passed status was invalid.
- //
- build_state
- gh_from_status (const string&);
-
- // If warning_success is true, then map result_status::warning to SUCCESS
- // and to FAILURE otherwise.
+ // The repository member of various webhook events.
//
- string
- gh_to_conclusion (result_status, bool warning_success);
-
- // Create a check_run name from a build. If the second argument is not
- // NULL, return an abbreviated id if possible.
- //
- string
- gh_check_run_name (const build&, const build_queued_hints* = nullptr);
-
struct gh_repository
{
string node_id;
- string name;
- string full_name;
- string default_branch;
+ string path; // Repository path (<org>/<repo>) under github.com.
string clone_url;
explicit
@@ -134,9 +151,11 @@ namespace brep
gh_repository () = default;
};
+ // The installation member of various webhook events.
+ //
struct gh_installation
{
- uint64_t id; // Note: used for installation access token (REST API).
+ string id; // Note: used for installation access token (REST API).
explicit
gh_installation (json::parser&);
@@ -144,12 +163,12 @@ namespace brep
gh_installation () = default;
};
- // The check_suite webhook event request.
+ // The check_suite webhook event.
//
struct gh_check_suite_event
{
string action;
- gh_check_suite check_suite;
+ gh_check_suite_ex check_suite;
gh_repository repository;
gh_installation installation;
@@ -159,11 +178,34 @@ namespace brep
gh_check_suite_event () = default;
};
+ // The check_run webhook event.
+ //
+ struct gh_check_run_event
+ {
+ string action;
+ gh_check_run_ex check_run;
+ gh_repository repository;
+ gh_installation installation;
+
+ explicit
+ gh_check_run_event (json::parser&);
+
+ gh_check_run_event () = default;
+ };
+
+ // The pull_request webhook event.
+ //
struct gh_pull_request_event
{
string action;
gh_pull_request pull_request;
+
+ // The SHA of the previous commit on the head branch before the current
+ // one. Only present if action is "synchronize".
+ //
+ optional<string> before;
+
gh_repository repository;
gh_installation installation;
@@ -173,6 +215,58 @@ namespace brep
gh_pull_request_event () = default;
};
+ // The push webhook event.
+ //
+ struct gh_push_event
+ {
+ // The full git ref that was pushed. Example: refs/heads/main or
+ // refs/tags/v3.14.1.
+ //
+ string ref;
+
+ // The SHA of the most recent commit on ref before the push.
+ //
+ // The GitHub API reference says this member is always present and
+ // non-null. Testing shows that an absent before commit is represented by
+ // a value of "0000000000000000000000000000000000000000".
+ //
+ string before;
+
+ // The SHA of the most recent commit on ref after the push.
+ //
+ string after;
+
+ // True if this was a forced push of the ref. I.e., history was
+ // overwritten.
+ //
+ bool forced;
+
+ // True if this was a branch deletion.
+ //
+ bool deleted;
+
+ gh_repository repository;
+ gh_installation installation;
+
+ // Note: not received from GitHub but set from the app-id webhook query
+ // parameter instead.
+ //
+ // For some reason, unlike the check_suite and check_run webhooks, the
+ // push webhook does not contain the app id. For the sake of simplicity we
+ // emulate check_suite and check_run by storing the app-id webhook query
+ // parameter here.
+ //
+ string app_id;
+
+ explicit
+ gh_push_event (json::parser&);
+
+ gh_push_event () = default;
+ };
+
+ // Installation access token (IAT) returned when we authenticate as a GitHub
+ // app installation.
+ //
struct gh_installation_access_token
{
string token;
@@ -186,9 +280,41 @@ namespace brep
gh_installation_access_token () = default;
};
+ // Return the GitHub check run status corresponding to a build_state.
+ //
+ string
+ gh_to_status (build_state);
+
+ // Return the build_state corresponding to a GitHub check run status
+ // string. Throw invalid_argument if the passed status was invalid.
+ //
+ build_state
+ gh_from_status (const string&);
+
+ // If warning_success is true, then map result_status::warning to `SUCCESS`
+ // and to `FAILURE` otherwise.
+ //
+ // Throw invalid_argument in case of unsupported result_status value
+ // (currently skip, interrupt).
+ //
+ string
+ gh_to_conclusion (result_status, bool warning_success);
+
+ // Create a check_run name from a build. If the second argument is not
+ // NULL, return an abbreviated id if possible.
+ //
+ string
+ gh_check_run_name (const build&, const build_queued_hints* = nullptr);
+
+ // Throw system_error if the conversion fails due to underlying operating
+ // system errors.
+ //
string
gh_to_iso8601 (timestamp);
+ // Throw invalid_argument if the conversion fails due to the invalid
+ // argument and system_error if due to underlying operating system errors.
+ //
timestamp
gh_from_iso8601 (const string&);
@@ -196,6 +322,9 @@ namespace brep
operator<< (ostream&, const gh_check_suite&);
ostream&
+ operator<< (ostream&, const gh_check_suite_ex&);
+
+ ostream&
operator<< (ostream&, const gh_check_run&);
ostream&
@@ -211,9 +340,15 @@ namespace brep
operator<< (ostream&, const gh_check_suite_event&);
ostream&
+ operator<< (ostream&, const gh_check_run_event&);
+
+ ostream&
operator<< (ostream&, const gh_pull_request_event&);
ostream&
+ operator<< (ostream&, const gh_push_event&);
+
+ ostream&
operator<< (ostream&, const gh_installation_access_token&);
}
diff --git a/mod/mod-ci-github-gq.cxx b/mod/mod-ci-github-gq.cxx
index bcf9e55..db69f0c 100644
--- a/mod/mod-ci-github-gq.cxx
+++ b/mod/mod-ci-github-gq.cxx
@@ -17,9 +17,11 @@ namespace brep
// bottom).
//
static const string& gq_name (const string&);
+ static string gq_name (string&&);
static string gq_str (const string&);
static string gq_bool (bool);
static const string& gq_enum (const string&);
+ static string gq_enum (string&&);
[[noreturn]] static void
throw_json (json::parser& p, const string& m)
@@ -163,19 +165,21 @@ namespace brep
// Parse a response to a check_run GraphQL mutation such as `createCheckRun`
// or `updateCheckRun`.
//
+ // Throw invalid_json_input.
+ //
// Example response (only the part we need to parse here):
//
// {
// "cr0": {
// "checkRun": {
- // "id": "CR_kwDOLc8CoM8AAAAFQ5GqPg",
+ // "node_id": "CR_kwDOLc8CoM8AAAAFQ5GqPg",
// "name": "libb2/0.98.1+2/x86_64-linux-gnu/linux_debian_12-gcc_13.1-O3/default/dev/0.17.0-a.1",
// "status": "QUEUED"
// }
// },
// "cr1": {
// "checkRun": {
- // "id": "CR_kwDOLc8CoM8AAAAFQ5GqhQ",
+ // "node_id": "CR_kwDOLc8CoM8AAAAFQ5GqhQ",
// "name": "libb2/0.98.1+2/x86_64-linux-gnu/linux_debian_12-gcc_13.1/default/dev/0.17.0-a.1",
// "status": "QUEUED"
// }
@@ -219,16 +223,17 @@ namespace brep
return r;
}
- // Send a GraphQL mutation request `rq` that operates on one or more check
- // runs. Update the check runs in `crs` with the new state and the node ID
- // if unset. Return false and issue diagnostics if the request failed.
+ // Send a GraphQL mutation request `rq` that creates or updates one or more
+ // check runs. The requested build state is taken from each check_run
+ // object. Update the check runs in `crs` with the new data (state, node ID
+ // if unset, and state_synced). Return false and issue diagnostics if the
+ // request failed.
//
static bool
gq_mutate_check_runs (const basic_mark& error,
vector<check_run>& crs,
const string& iat,
- string rq,
- build_state st) noexcept
+ string rq)
{
vector<gh_check_run> rcrs;
@@ -241,7 +246,8 @@ namespace brep
{
vector<gh_check_run> check_runs; // Received check runs.
- resp (json::parser& p): check_runs (gq_parse_response_check_runs (p)) {}
+ resp (json::parser& p)
+ : check_runs (gq_parse_response_check_runs (p)) {}
resp () = default;
} rs;
@@ -263,25 +269,27 @@ namespace brep
//
const gh_check_run& rcr (rcrs[i]); // Received check run.
+ build_state st (crs[i].state); // Requested state.
build_state rst (gh_from_status (rcr.status)); // Received state.
- if (rst != build_state::built && rst != st)
+ // Note that GitHub won't allow us to change a built check run to
+ // any other state (but all other transitions are allowed).
+ //
+ if (rst != st && rst != build_state::built)
{
error << "unexpected check_run status: received '" << rcr.status
<< "' but expected '" << gh_to_status (st) << '\'';
return false; // Fail because something is clearly very wrong.
}
- else
- {
- check_run& cr (crs[i]);
- if (!cr.node_id)
- cr.node_id = move (rcr.node_id);
+ check_run& cr (crs[i]);
- cr.state = gh_from_status (rcr.status);
- cr.state_synced = true;
- }
+ if (!cr.node_id)
+ cr.node_id = move (rcr.node_id);
+
+ cr.state = rst;
+ cr.state_synced = (rst == st);
}
return true;
@@ -290,10 +298,10 @@ namespace brep
error << "unexpected number of check_run objects in response";
}
else
- error << "failed to update check run: error HTTP response status "
+ error << "failed to mutate check runs: error HTTP response status "
<< sc;
}
- catch (const json::invalid_json_input& e)
+ catch (const json::invalid_json_input& e) // struct resp (via github_post())
{
// Note: e.name is the GitHub API endpoint.
//
@@ -301,16 +309,16 @@ namespace brep
<< e.line << ", column: " << e.column << ", byte offset: "
<< e.position << ", error: " << e;
}
- catch (const invalid_argument& e)
+ catch (const invalid_argument& e) // github_post()
{
error << "malformed header(s) in response: " << e;
}
- catch (const system_error& e)
+ catch (const system_error& e) // github_post()
{
error << "unable to mutate check runs (errno=" << e.code () << "): "
<< e.what ();
}
- catch (const runtime_error& e) // From gq_parse_response_check_runs().
+ catch (const runtime_error& e) // gq_parse_response_check_runs()
{
// GitHub response contained error(s) (could be ours or theirs at this
// point).
@@ -343,25 +351,23 @@ namespace brep
// Serialize `createCheckRun` mutations for one or more builds to GraphQL.
//
- // The conclusion argument (`co`) is required if the build_state is built
- // because GitHub does not allow a check run status of completed without a
- // conclusion.
+ // The check run parameters (names, build states, details_urls, etc.) are
+ // taken from each object in `crs`.
+ //
+ // Note that build results are not supported because we never create
+ // multiple check runs in the built state.
//
// The details URL argument (`du`) can be empty for queued but not for the
// other states.
//
+ // Throw invalid_argument if any of the observed check run members are not
+ // valid GraphQL values (string, enum, etc).
+ //
static string
gq_mutation_create_check_runs (const string& ri, // Repository ID
const string& hs, // Head SHA
- const optional<string>& du, // Details URL.
- const vector<check_run>& crs,
- const string& st, // Check run status.
- optional<gq_built_result> br = nullopt)
+ const vector<check_run>& crs)
{
- // Ensure details URL is non-empty if present.
- //
- assert (!du || !du->empty ());
-
ostringstream os;
os << "mutation {" << '\n';
@@ -370,33 +376,34 @@ namespace brep
//
for (size_t i (0); i != crs.size (); ++i)
{
+ const check_run& cr (crs[i]);
+
+ assert (cr.state != build_state::built); // Not supported.
+
+ // Ensure details URL is non-empty if present.
+ //
+ assert (!cr.details_url || !cr.details_url->empty ());
+
string al ("cr" + to_string (i)); // Field alias.
os << gq_name (al) << ":createCheckRun(input: {" << '\n'
- << " name: " << gq_str (crs[i].name) << '\n'
+ << " name: " << gq_str (cr.name) << '\n'
<< " repositoryId: " << gq_str (ri) << '\n'
<< " headSha: " << gq_str (hs) << '\n'
- << " status: " << gq_enum (st);
- if (du)
- {
- os << '\n';
- os << " detailsUrl: " << gq_str (*du);
- }
- if (br)
+ << " status: " << gq_enum (gh_to_status (cr.state));
+ if (cr.details_url)
{
os << '\n';
- os << " conclusion: " << gq_enum (br->conclusion) << '\n'
- << " output: {" << '\n'
- << " title: " << gq_str (br->title) << '\n'
- << " summary: " << gq_str (br->summary) << '\n'
- << " }";
+ os << " detailsUrl: " << gq_str (*cr.details_url);
}
os << "})" << '\n'
- // Specify the selection set (fields to be returned).
+ // Specify the selection set (fields to be returned). Note that we
+ // rename `id` to `node_id` (using a field alias) for consistency with
+ // webhook events and REST API responses.
//
<< "{" << '\n'
<< " checkRun {" << '\n'
- << " id" << '\n'
+ << " node_id: id" << '\n'
<< " name" << '\n'
<< " status" << '\n'
<< " }" << '\n'
@@ -408,12 +415,83 @@ namespace brep
return os.str ();
}
+ // Serialize a `createCheckRun` mutation for a build to GraphQL.
+ //
+ // The build result argument (`br`) is required if the build_state is built
+ // because GitHub does not allow a check run status of completed without a
+ // conclusion.
+ //
+ // The details URL argument (`du`) can be empty for queued but not for the
+ // other states.
+ //
+ // Throw invalid_argument if any of the arguments or observed check run
+ // members are not valid GraphQL values (string, enum, etc).
+ //
+ static string
+ gq_mutation_create_check_run (const string& ri, // Repository ID
+ const string& hs, // Head SHA
+ const optional<string>& du, // Details URL.
+ const check_run& cr,
+ const string& st, // Check run status.
+ optional<gq_built_result> br = nullopt)
+ {
+ // Ensure details URL is non-empty if present.
+ //
+ assert (!du || !du->empty ());
+
+ ostringstream os;
+
+ os << "mutation {" << '\n';
+
+ // Serialize a `createCheckRun` for the build.
+ //
+ os << gq_name ("cr0") << ":createCheckRun(input: {" << '\n'
+ << " name: " << gq_str (cr.name) << '\n'
+ << " repositoryId: " << gq_str (ri) << '\n'
+ << " headSha: " << gq_str (hs) << '\n'
+ << " status: " << gq_enum (st);
+ if (du)
+ {
+ os << '\n';
+ os << " detailsUrl: " << gq_str (*du);
+ }
+ if (br)
+ {
+ os << '\n';
+ os << " conclusion: " << gq_enum (br->conclusion) << '\n'
+ << " output: {" << '\n'
+ << " title: " << gq_str (br->title) << '\n'
+ << " summary: " << gq_str (br->summary) << '\n'
+ << " }";
+ }
+ os << "})" << '\n'
+ // Specify the selection set (fields to be returned). Note that we
+ // rename `id` to `node_id` (using a field alias) for consistency with
+ // webhook events and REST API responses.
+ //
+ << "{" << '\n'
+ << " checkRun {" << '\n'
+ << " node_id: id" << '\n'
+ << " name" << '\n'
+ << " status" << '\n'
+ << " }" << '\n'
+ << "}" << '\n';
+
+ os << "}" << '\n';
+
+ return os.str ();
+ }
+
+
// Serialize an `updateCheckRun` mutation for one build to GraphQL.
//
// The `co` (conclusion) argument is required if the build_state is built
// because GitHub does not allow updating a check run to completed without a
// conclusion.
//
+ // Throw invalid_argument if any of the arguments are invalid values (of
+ // GraphQL types or otherwise).
+ //
static string
gq_mutation_update_check_run (const string& ri, // Repository ID.
const string& ni, // Node ID.
@@ -435,8 +513,19 @@ namespace brep
<< " status: " << gq_enum (st);
if (sa)
{
- os << '\n';
- os << " startedAt: " << gq_str (gh_to_iso8601 (*sa));
+ try
+ {
+ os << '\n';
+ os << " startedAt: " << gq_str (gh_to_iso8601 (*sa));
+ }
+ catch (const system_error& e)
+ {
+ // Translate for simplicity.
+ //
+ throw invalid_argument ("unable to convert started_at value " +
+ to_string (system_clock::to_time_t (*sa)) +
+ ": " + e.what ());
+ }
}
if (du)
{
@@ -453,11 +542,13 @@ namespace brep
<< " }";
}
os << "})" << '\n'
- // Specify the selection set (fields to be returned).
+ // Specify the selection set (fields to be returned). Note that we
+ // rename `id` to `node_id` (using a field alias) for consistency with
+ // webhook events and REST API responses.
//
<< "{" << '\n'
<< " checkRun {" << '\n'
- << " id" << '\n'
+ << " node_id: id" << '\n'
<< " name" << '\n'
<< " status" << '\n'
<< " }" << '\n'
@@ -472,23 +563,19 @@ namespace brep
vector<check_run>& crs,
const string& iat,
const string& rid,
- const string& hs,
- build_state st)
+ const string& hs)
{
// No support for result_status so state cannot be built.
//
- assert (st != build_state::built);
+#ifndef NDEBUG
+ for (const check_run& cr: crs)
+ assert (cr.state != build_state::built);
+#endif
- // Empty details URL because it's not available until building.
- //
string rq (
- gq_serialize_request (gq_mutation_create_check_runs (rid,
- hs,
- nullopt,
- crs,
- gh_to_status (st))));
+ gq_serialize_request (gq_mutation_create_check_runs (rid, hs, crs)));
- return gq_mutate_check_runs (error, crs, iat, move (rq), st);
+ return gq_mutate_check_runs (error, crs, iat, move (rq));
}
bool
@@ -505,18 +592,19 @@ namespace brep
//
assert (st != build_state::built || br);
- vector<check_run> crs {move (cr)};
-
string rq (
gq_serialize_request (
- gq_mutation_create_check_runs (rid,
- hs,
- du,
- crs,
- gh_to_status (st),
- move (br))));
+ gq_mutation_create_check_run (rid,
+ hs,
+ du,
+ cr,
+ gh_to_status (st),
+ move (br))));
+
+ vector<check_run> crs {move (cr)};
+ crs[0].state = st;
- bool r (gq_mutate_check_runs (error, crs, iat, move (rq), st));
+ bool r (gq_mutate_check_runs (error, crs, iat, move (rq)));
cr = move (crs[0]);
@@ -554,8 +642,9 @@ namespace brep
move (br))));
vector<check_run> crs {move (cr)};
+ crs[0].state = st;
- bool r (gq_mutate_check_runs (error, crs, iat, move (rq), st));
+ bool r (gq_mutate_check_runs (error, crs, iat, move (rq)));
cr = move (crs[0]);
@@ -564,6 +653,8 @@ namespace brep
// Serialize a GraphQL query that fetches a pull request from GitHub.
//
+ // Throw invalid_argument if the node id is not a valid GraphQL string.
+ //
static string
gq_query_pr_mergeability (const string& nid)
{
@@ -572,7 +663,10 @@ namespace brep
os << "query {" << '\n'
<< " node(id:" << gq_str (nid) << ") {" << '\n'
<< " ... on PullRequest {" << '\n'
- << " mergeable potentialMergeCommit { oid }" << '\n'
+ << " headRefOid" << '\n'
+ << " mergeStateStatus" << '\n'
+ << " mergeable" << '\n'
+ << " potentialMergeCommit { oid }" << '\n'
<< " }" << '\n'
<< " }" << '\n'
<< "}" << '\n';
@@ -580,11 +674,13 @@ namespace brep
return os.str ();
}
- optional<string>
- gq_pull_request_mergeable (const basic_mark& error,
- const string& iat,
- const string& nid)
+ optional<gq_pr_pre_check_info>
+ gq_fetch_pull_request_pre_check_info (const basic_mark& error,
+ const string& iat,
+ const string& nid)
{
+ // Let invalid_argument from gq_query_pr_mergeability() propagate.
+ //
string rq (gq_serialize_request (gq_query_pr_mergeability (nid)));
try
@@ -604,7 +700,7 @@ namespace brep
// The response value. Absent if the merge commit is still being
// generated.
//
- optional<string> value;
+ optional<gq_pr_pre_check_info> r;
resp (json::parser& p)
{
@@ -618,30 +714,42 @@ namespace brep
{
found = true;
+ string hs (p.next_expect_member_string ("headRefOid"));
+ string ms (p.next_expect_member_string ("mergeStateStatus"));
string ma (p.next_expect_member_string ("mergeable"));
- if (ma == "MERGEABLE")
+ if (ms == "BEHIND")
+ {
+ // The PR head branch is not up to date with the PR base
+ // branch.
+ //
+ // Note that we can only get here if the head-not-behind
+ // protection rule is active on the PR base branch.
+ //
+ r = {move (hs), true, nullopt};
+ }
+ else if (ma == "MERGEABLE")
{
p.next_expect_member_object ("potentialMergeCommit");
string oid (p.next_expect_member_string ("oid"));
p.next_expect (event::end_object);
- value = move (oid);
+ r = {move (hs), false, move (oid)};
}
else
{
if (ma == "CONFLICTING")
- value = "";
- if (ma == "UNKNOWN")
- ; // Still being generated; leave value absent.
+ r = {move (hs), false, nullopt};
+ else if (ma == "UNKNOWN")
+ ; // Still being generated; leave r absent.
else
- {
- parse_error = "unexpected mergeable value '" + ma + '\'';
-
- // Carry on as if it were UNKNOWN.
- }
+ throw_json (p, "unexpected mergeable value '" + ma + '\'');
+ }
- // Skip the merge commit ID (which should be null).
+ if (!r || !r->merge_commit_sha)
+ {
+ // Skip the merge commit ID if it has not yet been extracted
+ // (in which case it should be null).
//
p.next_expect_name ("potentialMergeCommit");
p.next_expect_value_skip ();
@@ -669,13 +777,13 @@ namespace brep
else if (!rs.parse_error.empty ())
error << rs.parse_error;
- return rs.value;
+ return rs.r;
}
else
error << "failed to fetch pull request: error HTTP response status "
<< sc;
}
- catch (const json::invalid_json_input& e)
+ catch (const json::invalid_json_input& e) // struct resp (via github_post())
{
// Note: e.name is the GitHub API endpoint.
//
@@ -683,16 +791,16 @@ namespace brep
<< e.line << ", column: " << e.column << ", byte offset: "
<< e.position << ", error: " << e;
}
- catch (const invalid_argument& e)
+ catch (const invalid_argument& e) // github_post()
{
error << "malformed header(s) in response: " << e;
}
- catch (const system_error& e)
+ catch (const system_error& e) // github_post()
{
error << "unable to fetch pull request (errno=" << e.code () << "): "
<< e.what ();
}
- catch (const runtime_error& e) // From response type's parsing constructor.
+ catch (const runtime_error& e) // struct resp
{
// GitHub response contained error(s) (could be ours or theirs at this
// point).
@@ -703,206 +811,6 @@ namespace brep
return nullopt;
}
- // Serialize a GraphQL query that fetches the last 100 (the maximum per
- // page) open pull requests with the specified base branch from the
- // repository with the specified node ID.
- //
- // @@ TMP Should we support more/less than 100?
- //
- // Doing more (or even 100) could waste a lot of CI resources on
- // re-testing stale PRs. Maybe we should create a failed synthetic
- // conclusion check run asking the user to re-run the CI manually if/when
- // needed.
- //
- // Note that we cannot request more than 100 at a time (will need to
- // do multiple requests with paging, etc).
- //
- // Also, maybe we should limit the result to "fresh" PRs, e.g., those
- // that have been "touched" in the last week.
- //
- // Example query:
- //
- // query {
- // node(id:"R_kgDOLc8CoA")
- // {
- // ... on Repository {
- // pullRequests (last:100 states:OPEN baseRefName:"master") {
- // edges {
- // node {
- // id
- // number
- // headRefOid
- // }
- // }
- // }
- // }
- // }
- // }
- //
- static string
- gq_query_fetch_open_pull_requests (const string& rid, const string& br)
- {
- ostringstream os;
-
- os << "query {" << '\n'
- << " node(id:" << gq_str (rid) << ") {" << '\n'
- << " ... on Repository {" << '\n'
- << " pullRequests (last:100" << '\n'
- << " states:" << gq_enum ("OPEN") << '\n'
- << " baseRefName:" << gq_str (br) << '\n'
- << " ) {" << '\n'
- << " totalCount" << '\n'
- << " edges { node { id number headRefOid } }" << '\n'
- << " }" << '\n'
- << " }" << '\n'
- << " }" << '\n'
- << "}" << '\n';
-
- return os.str ();
- }
-
- optional<vector<gh_pull_request>>
- gq_fetch_open_pull_requests (const basic_mark& error,
- const string& iat,
- const string& nid,
- const string& br)
- {
- string rq (
- gq_serialize_request (gq_query_fetch_open_pull_requests (nid, br)));
-
- try
- {
- // Response parser.
- //
- // Example response (only the part we need to parse here):
- //
- // {
- // "node": {
- // "pullRequests": {
- // "totalCount": 2,
- // "edges": [
- // {
- // "node": {
- // "id": "PR_kwDOLc8CoM5vRS0y",
- // "number": 7,
- // "headRefOid": "cf72888be9484d6946a1340264e7abf18d31cc92"
- // }
- // },
- // {
- // "node": {
- // "id": "PR_kwDOLc8CoM5vRzHs",
- // "number": 8,
- // "headRefOid": "626d25b318aad27bc0005277afefe3e8d6b2d434"
- // }
- // }
- // ]
- // }
- // }
- // }
- //
- struct resp
- {
- bool found = false;
-
- vector<gh_pull_request> pull_requests;
-
- resp (json::parser& p)
- {
- using event = json::event;
-
- auto parse_data = [this] (json::parser& p)
- {
- p.next_expect (event::begin_object);
-
- if (p.next_expect_member_object_null ("node"))
- {
- found = true;
-
- p.next_expect_member_object ("pullRequests");
-
- uint16_t n (p.next_expect_member_number<uint16_t> ("totalCount"));
-
- p.next_expect_member_array ("edges");
- for (size_t i (0); i != n; ++i)
- {
- p.next_expect (event::begin_object); // edges[i]
-
- p.next_expect_member_object ("node");
- {
- gh_pull_request pr;
- pr.node_id = p.next_expect_member_string ("id");
- pr.number = p.next_expect_member_number<unsigned int> ("number");
- pr.head_sha = p.next_expect_member_string ("headRefOid");
- pull_requests.push_back (move (pr));
- }
- p.next_expect (event::end_object); // node
-
- p.next_expect (event::end_object); // edges[i]
- }
- p.next_expect (event::end_array); // edges
-
- p.next_expect (event::end_object); // pullRequests
- p.next_expect (event::end_object); // node
- }
-
- p.next_expect (event::end_object);
- };
-
- gq_parse_response (p, move (parse_data));
- }
-
- resp () = default;
- } rs;
-
- uint16_t sc (github_post (rs,
- "graphql", // API Endpoint.
- strings {"Authorization: Bearer " + iat},
- move (rq)));
-
- if (sc == 200)
- {
- if (!rs.found)
- {
- error << "repository '" << nid << "' not found";
-
- return nullopt;
- }
-
- return rs.pull_requests;
- }
- else
- error << "failed to fetch repository pull requests: "
- << "error HTTP response status " << sc;
- }
- catch (const json::invalid_json_input& e)
- {
- // Note: e.name is the GitHub API endpoint.
- //
- error << "malformed JSON in response from " << e.name << ", line: "
- << e.line << ", column: " << e.column << ", byte offset: "
- << e.position << ", error: " << e;
- }
- catch (const invalid_argument& e)
- {
- error << "malformed header(s) in response: " << e;
- }
- catch (const system_error& e)
- {
- error << "unable to fetch repository pull requests (errno=" << e.code ()
- << "): " << e.what ();
- }
- catch (const runtime_error& e) // From response type's parsing constructor.
- {
- // GitHub response contained error(s) (could be ours or theirs at this
- // point).
- //
- error << "unable to fetch repository pull requests: " << e;
- }
-
- return nullopt;
- }
-
-
// GraphQL serialization functions.
//
// The GraphQL spec:
@@ -919,8 +827,6 @@ namespace brep
//
// Return the name or throw invalid_argument if it is invalid.
//
- // @@ TODO: dangerous API.
- //
static const string&
gq_name (const string& v)
{
@@ -939,6 +845,13 @@ namespace brep
return v;
}
+ static string
+ gq_name (string&& v)
+ {
+ gq_name (v);
+ return move (v);
+ }
+
// Serialize a string to GraphQL.
//
// Return the serialized string or throw invalid_argument if the string is
@@ -993,8 +906,6 @@ namespace brep
//
// Return the enum value or throw invalid_argument if it is invalid.
//
- // @@ TODO: dangerous API.
- //
static const string&
gq_enum (const string& v)
{
@@ -1003,4 +914,12 @@ namespace brep
return gq_name (v);
}
+
+ static string
+ gq_enum (string&& v)
+ {
+ gq_enum (v);
+ return move (v);
+ }
+
}
diff --git a/mod/mod-ci-github-gq.hxx b/mod/mod-ci-github-gq.hxx
index 439f7b7..50950d4 100644
--- a/mod/mod-ci-github-gq.hxx
+++ b/mod/mod-ci-github-gq.hxx
@@ -19,24 +19,32 @@ namespace brep
// GraphQL functions (all start with gq_).
//
- // Create a new check run on GitHub for each build. Update `check_runs` with
- // the new states and node IDs. Return false and issue diagnostics if the
- // request failed.
+ // Create a new check run on GitHub for each build with the build state,
+ // name, and details_url taken from each check_run object. Update
+ // `check_runs` with the new data (node id and state_synced). Return false
+ // and issue diagnostics if the request failed.
//
- // Note: no details_url yet since there will be no entry in the build result
- // search page until the task starts building.
+ // Throw invalid_argument if the passed data is invalid, missing, or
+ // inconsistent.
+ //
+ // Note that creating a check_run named `foo` will effectively replace any
+ // existing check_runs with that name. They will still exist on the GitHub
+ // servers but GitHub will only consider the latest one (for display in the
+ // UI or in determining the mergeability of a PR).
//
bool
gq_create_check_runs (const basic_mark& error,
vector<check_run>& check_runs,
const string& installation_access_token,
const string& repository_id,
- const string& head_sha,
- build_state);
+ const string& head_sha);
// Create a new check run on GitHub for a build. Update `cr` with the new
- // state and the node ID. Return false and issue diagnostics if the request
- // failed.
+ // data (node id, state, and state_synced). Return false and issue
+ // diagnostics if the request failed.
+ //
+ // Throw invalid_argument if the passed data is invalid, missing, or
+ // inconsistent.
//
// If the details_url is absent GitHub will use the app's homepage.
//
@@ -61,11 +69,15 @@ namespace brep
build_state,
optional<gq_built_result> = nullopt);
- // Update a check run on GitHub.
+ // Update a check run on GitHub. Update `cr` with the new data (state and
+ // state_synced). Return false and issue diagnostics if the request failed.
+ //
+ // Throw invalid_argument if the passed data is invalid, missing, or
+ // inconsistent.
//
- // Send a GraphQL request that updates an existing check run. Update `cr`
- // with the new state. Return false and issue diagnostics if the request
- // failed.
+ // Note that GitHub allows any state transitions except from built (but
+ // built to built is allowed). The latter case is signalled by setting the
+ // check_run state_synced member to false and the state member to built.
//
// If the details_url is absent GitHub will use the app's homepage.
//
@@ -83,51 +95,50 @@ namespace brep
build_state,
optional<gq_built_result> = nullopt);
- // Fetch a pull request's mergeability from GitHub and return it in first,
- // or absent if the merge commit is still being generated.
+ // Fetch pre-check information for a pull request from GitHub. This
+ // information is used to decide whether or not to CI the PR and is
+ // comprised of the PR's head commit SHA, whether its head branch is behind
+ // its base branch, and its mergeability and test merge commit SHA.
//
- // Return false in second and issue diagnostics if the request failed.
- //
- struct gq_pr_mergeability
- {
- // True if the pull request is auto-mergeable; false if it would create
- // conflicts.
- //
- bool mergeable;
-
- // The ID of the test merge commit. Empty if mergeable is false.
- //
- string merge_commit_id;
- };
-
-
- // Fetch a pull request's mergeability from GitHub. Return absent value if
- // the merge commit is still being generated. Return empty string if the
- // pull request is not auto-mergeable. Otherwise return the test merge
- // commit id.
+ // Return absent value if the merge commit is still being generated (which
+ // means PR head branch behindness is not yet known either). See the
+ // gq_pr_pre_check struct's member comments for non-absent return value
+ // semantics.
//
// Issue diagnostics and return absent if the request failed (which means it
// will be treated by the caller as still being generated).
//
+ // Throw invalid_argument if the node id is invalid.
+ //
// Note that the first request causes GitHub to start preparing the test
// merge commit.
//
- optional<string>
- gq_pull_request_mergeable (const basic_mark& error,
- const string& installation_access_token,
- const string& node_id);
-
- // Fetch the last 100 open pull requests with the specified base branch from
- // the repository with the specified node ID.
- //
- // Issue diagnostics and return nullopt if the repository was not found or
- // an error occurred.
+ // For details regarding the test merge commit and how to check/poll for PR
+ // mergeability see
+ // https://docs.github.com/en/rest/pulls/pulls?#get-a-pull-request and
+ // https://docs.github.com/en/rest/guides/using-the-rest-api-to-interact-with-your-git-database?#checking-mergeability-of-pull-requests
//
- optional<vector<gh_pull_request>>
- gq_fetch_open_pull_requests (const basic_mark& error,
- const string& installation_access_token,
- const string& repository_node_id,
- const string& base_branch);
+ struct gq_pr_pre_check_info
+ {
+ // The PR head commit id.
+ //
+ string head_sha;
+
+ // True if the PR's head branch is behind its base branch.
+ //
+ bool behind;
+
+ // The commit id of the test merge commit. Absent if behind or the PR is
+ // not auto-mergeable.
+ //
+ optional<string> merge_commit_sha;
+ };
+
+ optional<gq_pr_pre_check_info>
+ gq_fetch_pull_request_pre_check_info (
+ const basic_mark& error,
+ const string& installation_access_token,
+ const string& node_id);
}
#endif // MOD_MOD_CI_GITHUB_GQ_HXX
diff --git a/mod/mod-ci-github-service-data.cxx b/mod/mod-ci-github-service-data.cxx
index 7ae0b4f..4598302 100644
--- a/mod/mod-ci-github-service-data.cxx
+++ b/mod/mod-ci-github-service-data.cxx
@@ -10,6 +10,15 @@ namespace brep
{
using event = json::event;
+ [[noreturn]] static void
+ throw_json (json::parser& p, const string& m)
+ {
+ throw json::invalid_json_input (
+ p.input_name,
+ p.line (), p.column (), p.position (),
+ m);
+ }
+
service_data::
service_data (const string& json)
{
@@ -26,35 +35,40 @@ namespace brep
to_string (version));
}
+ {
+ string v (p.next_expect_member_string ("kind"));
+
+ if (v == "local") kind = local;
+ else if (v == "remote") kind = remote;
+ else
+ throw_json (p, "invalid service data kind: '" + v + '\'');
+ }
+
+ pre_check = p.next_expect_member_boolean<bool> ("pre_check");
+ re_request = p.next_expect_member_boolean<bool> ("re_request");
+
warning_success = p.next_expect_member_boolean<bool> ("warning_success");
- // Installation access token.
+ // Installation access token (IAT).
//
- p.next_expect_member_object ("installation_access");
- installation_access.token = p.next_expect_member_string ("token");
- installation_access.expires_at =
- gh_from_iso8601 (p.next_expect_member_string ("expires_at"));
- p.next_expect (event::end_object);
+ p.next_expect_name ("installation_access");
+ installation_access = gh_installation_access_token (p);
- installation_id =
- p.next_expect_member_number<uint64_t> ("installation_id");
+ app_id = p.next_expect_member_string ("app_id");
+ installation_id = p.next_expect_member_string ("installation_id");
repository_node_id = p.next_expect_member_string ("repository_node_id");
+ repository_clone_url = p.next_expect_member_string ("repository_clone_url");
{
- string* s (p.next_expect_member_string_null ("repository_clone_url"));
+ string* s (p.next_expect_member_string_null ("pr_node_id"));
if (s != nullptr)
- repository_clone_url = *s;
+ pr_node_id = *s;
}
pr_number = p.next_expect_member_number_null<uint32_t> ("pr_number");
- {
- string* s (p.next_expect_member_string_null ("merge_node_id"));
- if (s != nullptr)
- merge_node_id = *s;
- }
-
+ check_sha = p.next_expect_member_string ("check_sha");
report_sha = p.next_expect_member_string ("report_sha");
p.next_expect_member_array ("check_runs");
@@ -70,21 +84,49 @@ namespace brep
nid = *v;
}
- build_state s (to_build_state (p.next_expect_member_string ("state")));
+ build_state s;
+ try
+ {
+ s = to_build_state (p.next_expect_member_string ("state"));
+ }
+ catch (const invalid_argument& e)
+ {
+ throw_json (p, e.what ());
+ }
+
bool ss (p.next_expect_member_boolean<bool> ("state_synced"));
optional<result_status> rs;
{
string* v (p.next_expect_member_string_null ("status"));
if (v != nullptr)
- rs = bbot::to_result_status (*v);
+ {
+ try
+ {
+ rs = bbot::to_result_status (*v);
+ }
+ catch (const invalid_argument& e)
+ {
+ throw_json (p, e.what ());
+ }
+ assert (s == build_state::built);
+ }
}
- check_runs.emplace_back (move (bid), move (nm), move (nid), s, ss, rs);
+ check_runs.push_back (
+ check_run {move (bid),
+ move (nm),
+ move (nid),
+ s,
+ ss,
+ rs,
+ nullopt /* details_url */});
p.next_expect (event::end_object);
}
+ completed = p.next_expect_member_boolean<bool> ("completed");
+
{
string* s (p.next_expect_member_string_null ("conclusion_node_id"));
if (s != nullptr)
@@ -94,37 +136,63 @@ namespace brep
p.next_expect (event::end_object);
}
+ // check_suite constructor.
+ //
service_data::
service_data (bool ws,
string iat_tok,
timestamp iat_ea,
- uint64_t iid,
+ string aid,
+ string iid,
string rid,
+ string rcu,
+ kind_type k,
+ bool pc,
+ bool rr,
+ string cs,
string rs)
- : warning_success (ws),
+ : kind (k), pre_check (pc), re_request (rr),
+ warning_success (ws),
installation_access (move (iat_tok), iat_ea),
- installation_id (iid),
+ app_id (move (aid)),
+ installation_id (move (iid)),
repository_node_id (move (rid)),
- report_sha (move (rs))
+ repository_clone_url (move (rcu)),
+ check_sha (move (cs)),
+ report_sha (move (rs)),
+ completed (false)
{
}
+ // pull_request constructor.
+ //
service_data::
service_data (bool ws,
string iat_tok,
timestamp iat_ea,
- uint64_t iid,
+ string aid,
+ string iid,
string rid,
- string rs,
string rcu,
+ kind_type k,
+ bool pc,
+ bool rr,
+ string cs,
+ string rs,
+ string pid,
uint32_t prn)
- : warning_success (ws),
+ : kind (k), pre_check (pc), re_request (rr),
+ warning_success (ws),
installation_access (move (iat_tok), iat_ea),
- installation_id (iid),
+ app_id (move (aid)),
+ installation_id (move (iid)),
repository_node_id (move (rid)),
repository_clone_url (move (rcu)),
+ pr_node_id (move (pid)),
pr_number (prn),
- report_sha (move (rs))
+ check_sha (move (cs)),
+ report_sha (move (rs)),
+ completed (false)
{
}
@@ -138,21 +206,52 @@ namespace brep
s.member ("version", 1);
+ s.member_name ("kind");
+ switch (kind)
+ {
+ case local: s.value ("local"); break;
+ case remote: s.value ("remote"); break;
+ }
+
+ s.member ("pre_check", pre_check);
+ s.member ("re_request", re_request);
+
s.member ("warning_success", warning_success);
- // Installation access token.
+ // Installation access token (IAT).
//
s.member_begin_object ("installation_access");
s.member ("token", installation_access.token);
- s.member ("expires_at", gh_to_iso8601 (installation_access.expires_at));
+
+ // IAT expires_at timestamp.
+ //
+ {
+ string v;
+ try
+ {
+ v = gh_to_iso8601 (installation_access.expires_at);
+ }
+ catch (const system_error& e)
+ {
+ // Translate for simplicity.
+ //
+ throw invalid_argument ("unable to convert IAT expires_at value " +
+ to_string (system_clock::to_time_t (
+ installation_access.expires_at)));
+ }
+ s.member ("expires_at", move (v));
+ }
+
s.end_object ();
+ s.member ("app_id", app_id);
s.member ("installation_id", installation_id);
s.member ("repository_node_id", repository_node_id);
+ s.member ("repository_clone_url", repository_clone_url);
- s.member_name ("repository_clone_url");
- if (repository_clone_url)
- s.value (*repository_clone_url);
+ s.member_name ("pr_node_id");
+ if (pr_node_id)
+ s.value (*pr_node_id);
else
s.value (nullptr);
@@ -162,12 +261,7 @@ namespace brep
else
s.value (nullptr);
- s.member_name ("merge_node_id");
- if (merge_node_id)
- s.value (*merge_node_id);
- else
- s.value (nullptr);
-
+ s.member ("check_sha", check_sha);
s.member ("report_sha", report_sha);
s.member_begin_array ("check_runs");
@@ -188,7 +282,10 @@ namespace brep
s.member_name ("status");
if (cr.status)
- s.value (to_string (*cr.status));
+ {
+ assert (cr.state == build_state::built);
+ s.value (to_string (*cr.status)); // Doesn't throw.
+ }
else
s.value (nullptr);
@@ -196,6 +293,8 @@ namespace brep
}
s.end_array ();
+ s.member ("completed", completed);
+
s.member_name ("conclusion_node_id");
if (conclusion_node_id)
s.value (*conclusion_node_id);
diff --git a/mod/mod-ci-github-service-data.hxx b/mod/mod-ci-github-service-data.hxx
index 5573d90..50bb49d 100644
--- a/mod/mod-ci-github-service-data.hxx
+++ b/mod/mod-ci-github-service-data.hxx
@@ -31,7 +31,12 @@ namespace brep
build_state state;
bool state_synced;
- optional<result_status> status; // Only present if state is built.
+ optional<result_status> status; // Only if state is built & synced.
+
+ // Note: never serialized (only used to pass information to the GraphQL
+ // functions).
+ //
+ optional<string> details_url;
string
state_string () const
@@ -43,12 +48,39 @@ namespace brep
}
};
+ // We have two kinds of service data that correspond to the following two
+ // typical scenarios (until/unless we add support for merge queues):
+ //
+ // 1. Branch push (via check_suite) plus zero or more local PRs (via
+ // pull_request) that share the same head commit id.
+ //
+ // 2. One or more remote PRs (via pull_request) that share the same head
+ // commit id (from a repository in another organization).
+ //
+ // Plus, for PRs, the service data may be in the pre-check phase while we
+ // are in the process of requesting the test merge commit and making sure it
+ // can be created and is not behind base. We do all this before we actually
+ // create the CI tenant.
+ //
+ // Note that the above two cases are typical but not the only possible
+ // scenarios. Specifically, it is possible to have a mixture of all three
+ // kinds (branch push, local PR, and remote PR) since the same head commit
+ // id can be present in both local and remote branches. There is no way to
+ // handle this case perfectly and we do the best we can (see
+ // build_unloaded_pre_check() for details).
+ //
struct service_data
{
// The data schema version. Note: must be first member in the object.
//
uint64_t version = 1;
+ // Kind and phase.
+ //
+ enum kind_type {local, remote /*, queue */} kind;
+ bool pre_check;
+ bool re_request; // Re-requested (rebuild).
+
// Check suite settings.
//
bool warning_success; // See gh_to_conclusion().
@@ -57,32 +89,42 @@ namespace brep
//
gh_installation_access_token installation_access;
- uint64_t installation_id;
+ string app_id;
+ string installation_id;
string repository_node_id; // GitHub-internal opaque repository id.
+ string repository_clone_url;
+
// The following two are only used for pull requests.
//
- // @@ TODO/LATER: maybe put them in a struct?
+ // @@ TODO/LATER: maybe put them in a struct, if more members?
//
- optional<string> repository_clone_url;
+ optional<string> pr_node_id;
optional<uint32_t> pr_number;
- // The GitHub ID of the synthetic PR merge check run or absent if it
- // hasn't been created yet.
+ // The commit ID the branch push or pull request (and its check runs) are
+ // building. This will be the head commit for the branch push as well as
+ // local pull requests and the test merge commit for remote pull requests.
//
- optional<string> merge_node_id;
+ string check_sha;
- // The commit ID the check suite or pull request (and its check runs) are
+ // The commit ID the branch push or pull request (and its check runs) are
// reporting to. Note that in the case of a pull request this will be the
- // head commit (`pull_request.head.sha`) as opposed to the merge commit.
+ // head commit (`pull_request.head.sha`) as opposed to the test merge
+ // commit.
//
string report_sha;
vector<check_run> check_runs;
+ // Flag indicating that all the elements in check_runs are built and this
+ // check suite is completed.
+ //
+ bool completed;
+
// The GitHub ID of the synthetic conclusion check run or absent if it
- // hasn't been created yet. See also merge_node_id above.
+ // hasn't been created yet.
//
optional<string> conclusion_node_id;
@@ -96,16 +138,28 @@ namespace brep
//
// Throw invalid_argument if the schema version is not supported.
//
+ // Throw invalid_argument (invalid_json_input) in case of malformed JSON
+ // or any invalid values.
+ //
explicit
service_data (const string& json);
// The check_suite constructor.
//
+ // Note that check_sha and report_sha are both the SHA of the
+ // check_suite's head commit.
+ //
service_data (bool warning_success,
string iat_token,
timestamp iat_expires_at,
- uint64_t installation_id,
+ string app_id,
+ string installation_id,
string repository_node_id,
+ string repository_clone_url,
+ kind_type kind,
+ bool pre_check,
+ bool re_request,
+ string check_sha,
string report_sha);
// The pull_request constructor.
@@ -113,16 +167,27 @@ namespace brep
service_data (bool warning_success,
string iat_token,
timestamp iat_expires_at,
- uint64_t installation_id,
+ string app_id,
+ string installation_id,
string repository_node_id,
- string report_sha,
string repository_clone_url,
+ kind_type kind,
+ bool pre_check,
+ bool re_request,
+ string check_sha,
+ string report_sha,
+ string pr_node_id,
uint32_t pr_number);
service_data () = default;
// Serialize to JSON.
//
+ // Throw invalid_argument if any values are invalid.
+ //
+ // May also throw invalid_json_output but that would be a programming
+ // error.
+ //
string
json () const;
};
diff --git a/mod/mod-ci-github.cxx b/mod/mod-ci-github.cxx
index 0ddc75d..e008314 100644
--- a/mod/mod-ci-github.cxx
+++ b/mod/mod-ci-github.cxx
@@ -19,24 +19,6 @@
#include <stdexcept>
-// @@ Remaining TODOs
-//
-// - Rerequested checks
-//
-// - check_suite (action: rerequested): received when user re-runs all
-// checks.
-//
-// - check_run (action: rerequested): received when user re-runs a
-// specific check or all failed checks.
-//
-// Will need to extract a few more fields from check_runs, but the layout
-// is very similar to that of check_suite.
-//
-// - Choose strong webhook secret (when deploying).
-//
-// - Check that delivery UUID has not been received before (replay attack).
-//
-
// Resources:
//
// Creating an App:
@@ -79,6 +61,8 @@ namespace brep
void ci_github::
init (scanner& s)
{
+ HANDLER_DIAG;
+
{
shared_ptr<tenant_service_base> ts (
dynamic_pointer_cast<tenant_service_base> (shared_from_this ()));
@@ -93,12 +77,46 @@ namespace brep
// Prepare for the CI requests handling, if configured.
//
+ // @@ TMP Shouldn't we be checking options_->ci_data_specified () like
+ // mod-ci does?
+ //
if (options_->build_config_specified () &&
options_->ci_github_app_webhook_secret_specified ())
{
+ if (!options_->ci_github_app_id_private_key_specified ())
+ fail << "no app id/private key mappings configured";
+
+ for (const auto& pr: options_->ci_github_app_id_private_key ())
+ {
+ if (pr.second.relative ())
+ fail << "ci-github-app-id-private-key paths must be absolute";
+ }
+
ci_start::init (make_shared<options::ci_start> (*options_));
database_module::init (*options_, options_->build_db_retry ());
+
+ // Read the webhook secret from the configured path.
+ //
+ {
+ const path& p (options_->ci_github_app_webhook_secret ());
+
+ if (p.relative ())
+ fail << "ci-github-app-webhook-secret path must be absolute";
+
+ try
+ {
+ ifdstream is (p);
+ getline (is, webhook_secret_);
+
+ if (webhook_secret_.empty ())
+ fail << "empty webhook secret read from " << p;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read webhook secret file " << p << ": " << e;
+ }
+ }
}
}
@@ -220,10 +238,10 @@ namespace brep
//
try
{
- string h (
- compute_hmac (*options_,
- body.data (), body.size (),
- options_->ci_github_app_webhook_secret ().c_str ()));
+ string h (compute_hmac (*options_,
+ body.data (),
+ body.size (),
+ webhook_secret_.c_str ()));
if (!icasecmp (h, hmac))
{
@@ -239,33 +257,48 @@ namespace brep
fail << "unable to compute request HMAC: " << e;
}
- // Process the `warning` webhook request query parameter.
+ // Process the `app-id` and `warning` webhook request query parameters.
//
+ string app_id;
bool warning_success;
{
const name_values& rps (rq.parameters (1024, true /* url_only */));
- auto i (find_if (rps.begin (), rps.end (),
- [] (auto&& rp) {return rp.name == "warning";}));
+ bool ai (false), wa (false);
- if (i == rps.end ())
- throw invalid_request (400,
- "missing 'warning' webhook query parameter");
+ auto badreq = [] (const string& m)
+ {
+ throw invalid_request (400, m);
+ };
+
+ for (const name_value& rp: rps)
+ {
+ if (rp.name == "app-id")
+ {
+ if (!rp.value)
+ badreq ("missing 'app-id' webhook query parameter value");
- if (!i->value)
- throw invalid_request (
- 400, "missing 'warning' webhook query parameter value");
+ ai = true;
+ app_id = *rp.value;
+ }
+ else if (rp.name == "warning")
+ {
+ if (!rp.value)
+ badreq ("missing 'warning' webhook query parameter value");
- const string& v (*i->value);
+ wa = true;
+ const string& v (*rp.value);
- if (v == "success") warning_success = true;
- else if (v == "failure") warning_success = false;
- else
- {
- throw invalid_request (
- 400,
- "invalid 'warning' webhook query parameter value: '" + v + '\'');
+ if (v == "success") warning_success = true;
+ else if (v == "failure") warning_success = false;
+ else
+ badreq ("invalid 'warning' webhook query parameter value: '" + v +
+ '\'');
+ }
}
+
+ if (!ai) badreq ("missing 'app-id' webhook query parameter");
+ if (!wa) badreq ("missing 'warning' webhook query parameter");
}
// There is a webhook event (specified in the x-github-event header) and
@@ -275,11 +308,8 @@ namespace brep
// Note: "GitHub continues to add new event types and new actions to
// existing event types." As a result we ignore known actions that we are
// not interested in and log and ignore unknown actions. The thinking here
- // is that we want be "notified" of new actions at which point we can decide
- // whether to ignore them or to handle.
- //
- // @@ There is also check_run even (re-requested by user, either
- // individual check run or all the failed check runs).
+ // is that we want to be "notified" of new actions at which point we can
+ // decide whether to ignore them or to handle.
//
if (event == "check_suite")
{
@@ -300,34 +330,91 @@ namespace brep
throw invalid_request (400, move (m));
}
+ if (cs.check_suite.app_id != app_id)
+ {
+ fail << "webhook check_suite app.id " << cs.check_suite.app_id
+ << " does not match app-id query parameter " << app_id;
+ }
+
if (cs.action == "requested")
{
- return handle_check_suite_request (move (cs), warning_success);
+ // Branch pushes are handled in handle_branch_push() so ignore this
+ // event.
+ //
+ return true;
}
else if (cs.action == "rerequested")
{
// Someone manually requested to re-run all the check runs in this
// check suite. Treat as a new request.
//
- return handle_check_suite_request (move (cs), warning_success);
+ return handle_check_suite_rerequest (move (cs), warning_success);
}
else if (cs.action == "completed")
{
// GitHub thinks that "all the check runs in this check suite have
- // completed and a conclusion is available". Looks like this one we
- // ignore?
+ // completed and a conclusion is available". Check with our own
+ // bookkeeping and log an error if there is a mismatch.
//
- // What if our bookkeeping says otherwise? But then we can't even
- // access the service data easily here. @@ TODO: maybe/later.
+ return handle_check_suite_completed (move (cs), warning_success);
+ }
+ else
+ {
+ // Ignore unknown actions by sending a 200 response with empty body
+ // but also log as an error since we want to notice new actions.
//
+ error << "unknown action '" << cs.action << "' in check_suite event";
+
return true;
}
+ }
+ else if (event == "check_run")
+ {
+ gh_check_run_event cr;
+ try
+ {
+ json::parser p (body.data (), body.size (), "check_run event");
+
+ cr = gh_check_run_event (p);
+ }
+ catch (const json::invalid_json_input& e)
+ {
+ string m ("malformed JSON in " + e.name + " request body");
+
+ error << m << ", line: " << e.line << ", column: " << e.column
+ << ", byte offset: " << e.position << ", error: " << e;
+
+ throw invalid_request (400, move (m));
+ }
+
+ if (cr.check_run.app_id != app_id)
+ {
+ fail << "webhook check_run app.id " << cr.check_run.app_id
+ << " does not match app-id query parameter " << app_id;
+ }
+
+ if (cr.action == "rerequested")
+ {
+ // Someone manually requested to re-run a specific check run.
+ //
+ return handle_check_run_rerequest (move (cr), warning_success);
+ }
+#if 0
+ // It looks like we shouldn't be receiving these since we are not
+ // subscribed to them.
+ //
+ else if (cr.action == "created" ||
+ cr.action == "completed" ||
+ cr.action == "requested_action")
+ {
+ }
+#endif
else
{
// Ignore unknown actions by sending a 200 response with empty body
// but also log as an error since we want to notice new actions.
//
- error << "unknown action '" << cs.action << "' in check_suite event";
+ error << "unknown action '" << cr.action << "' in check_run event";
return true;
}
@@ -351,7 +438,16 @@ namespace brep
throw invalid_request (400, move (m));
}
- if (pr.action == "opened" || pr.action == "synchronize")
+ // Store the app-id webhook query parameter in the gh_pull_request_event
+ // object (see gh_pull_request for an explanation).
+ //
+ // When we receive the other webhooks we do check that the app ids in
+ // the payload and query match but here we have to assume it is valid.
+ //
+ pr.pull_request.app_id = app_id;
+
+ if (pr.action == "opened" ||
+ pr.action == "synchronize")
{
// opened
// A pull request was opened.
@@ -359,22 +455,113 @@ namespace brep
// synchronize
// A pull request's head branch was updated from the base branch or
// new commits were pushed to the head branch. (Note that there is
- // no equivalent event for the base branch. That case gets handled
- // in handle_check_suite_request() instead.)
+ // no equivalent event for the base branch.)
//
- // Note that both cases are handled the same: we start a new CI
+ // Note that both cases are handled similarly: we start a new CI
// request which will be reported on the new commit id.
//
return handle_pull_request (move (pr), warning_success);
}
+ else if (pr.action == "edited")
+ {
+ // PR base branch changed (to a different branch) besides other
+ // irrelevant changes (title, body, etc).
+ //
+ // This is in a sense a special case of the base branch moving. In
+ // that case we don't do anything (due to the head sharing problem)
+ // relying instead on the branch protection rule. So it makes sense
+ // to do the same here.
+ //
+ return true;
+ }
+ else if (pr.action == "closed")
+ {
+ // PR has been closed (as merged or not; see merged member). Also
+ // apparently received if base branch is deleted (and the same
+ // for head branch). See also the reopened event below.
+ //
+ // While it may seem natural to cancel the CI for the closed PR, it
+ // might actually be useful to have a completed CI record. GitHub
+ // doesn't prevent us from publishing CI results for the closed PR
+ // (even if both base and head branches were deleted). And if such a
+ // PR is reopened, the CI results remain.
+ //
+ return true;
+ }
+ else if (pr.action == "reopened")
+ {
+ // Previously closed PR has been reopened.
+ //
+ // Since we don't cancel the CI for a closed PR, there is nothing
+ // to do if it is reopened.
+ //
+ return true;
+ }
+ else if (pr.action == "assigned" ||
+ pr.action == "auto_merge_disabled" ||
+ pr.action == "auto_merge_enabled" ||
+ pr.action == "converted_to_draft" ||
+ pr.action == "demilestoned" ||
+ pr.action == "dequeued" ||
+ pr.action == "enqueued" ||
+ pr.action == "labeled" ||
+ pr.action == "locked" ||
+ pr.action == "milestoned" ||
+ pr.action == "ready_for_review" ||
+ pr.action == "review_request_removed" ||
+ pr.action == "review_requested" ||
+ pr.action == "unassigned" ||
+ pr.action == "unlabeled" ||
+ pr.action == "unlocked")
+ {
+ // These have no relation to CI.
+ //
+ return true;
+ }
else
{
- // Ignore the remaining actions by sending a 200 response with empty
- // body.
+ // Ignore unknown actions by sending a 200 response with empty body
+ // but also log as an error since we want to notice new actions.
//
+ error << "unknown action '" << pr.action << "' in pull_request event";
+
return true;
}
}
+ else if (event == "push")
+ {
+ // Push events are triggered by branch pushes, branch creation, and
+ // branch deletion.
+ //
+ gh_push_event ps;
+ try
+ {
+ json::parser p (body.data (), body.size (), "push event");
+
+ ps = gh_push_event (p);
+ }
+ catch (const json::invalid_json_input& e)
+ {
+ string m ("malformed JSON in " + e.name + " request body");
+
+ error << m << ", line: " << e.line << ", column: " << e.column
+ << ", byte offset: " << e.position << ", error: " << e;
+
+ throw invalid_request (400, move (m));
+ }
+
+ // Store the app-id webhook query parameter in the gh_push_event
+ // object (see gh_push_event for an explanation).
+ //
+ // When we receive the other webhooks we do check that the app ids in
+ // the payload and query match but here we have to assume it is valid.
+ //
+ ps.app_id = app_id;
+
+ // Note that the push request event has no action.
+ //
+ return handle_branch_push (move (ps), warning_success);
+ }
else
{
// Log to investigate.
@@ -385,12 +572,543 @@ namespace brep
}
}
- // Let's capitalize the synthetic check run names to make them easier to
- // distinguish from the regular ones.
+ // Let's capitalize the synthetic conclusion check run name to make it
+ // easier to distinguish from the regular ones.
//
- static string merge_check_run_name ("MERGE-COMMIT");
static string conclusion_check_run_name ("CONCLUSION");
+ bool ci_github::
+ handle_branch_push (gh_push_event ps, bool warning_success)
+ {
+ HANDLER_DIAG;
+
+ l3 ([&]{trace << "push event { " << ps << " }";});
+
+ // Cancel the CI tenant associated with the overwritten/deleted previous
+ // head commit if this is a forced push or a branch deletion.
+ //
+ if (ps.forced || ps.deleted)
+ {
+ // Service id that will uniquely identify the CI tenant.
+ //
+ string sid (ps.repository.node_id + ':' + ps.before);
+
+ // Note that it's possible this commit still exists in another branch so
+ // we do refcount-aware cancel.
+ //
+ if (optional<tenant_service> ts = cancel (error, warn,
+ verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ "ci-github", sid,
+ true /* ref_count */))
+ {
+ l3 ([&]{trace << (ps.forced ? "forced push " + ps.after + " to "
+ : "deletion of ")
+ << ps.ref << ": attempted to cancel CI of previous"
+ << " head commit with tenant_service id " << sid
+ << " (ref_count: " << ts->ref_count << ')';});
+ }
+ else
+ {
+ // It's possible that there was no CI for the previous commit for
+ // various reasons (e.g., CI was not enabled).
+ //
+ l3 ([&]{trace << (ps.forced ? "forced push " + ps.after + " to "
+ : "deletion of ")
+ << ps.ref << ": failed to cancel CI of previous"
+ << " head commit with tenant_service id " << sid;});
+ }
+ }
+
+ if (ps.deleted)
+ return true; // Do nothing further if this was a branch deletion.
+
+ // While we don't need the installation access token in this request,
+ // let's obtain it to flush out any permission issues early. Also, it is
+ // valid for an hour so we will most likely make use of it.
+ //
+ optional<string> jwt (generate_jwt (ps.app_id, trace, error));
+ if (!jwt)
+ throw server_error ();
+
+ optional<gh_installation_access_token> iat (
+ obtain_installation_access_token (ps.installation.id,
+ move (*jwt),
+ error));
+ if (!iat)
+ throw server_error ();
+
+ l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
+
+ // While it would have been nice to cancel CIs of PRs with this branch as
+ // base not to waste resources, there are complications: Firstly, we can
+ // only do this for remote PRs (since local PRs will most likely share the
+ // result with branch push). Secondly, we try to do our best even if the
+ // branch protection rule for head behind is not enabled. In this case, it
+ // would be good to complete the CI. So maybe/later. See also the head
+ // case in handle_pull_request(), where we do cancel remote PRs that are
+ // not shared.
+
+ // Service id that uniquely identifies the CI tenant.
+ //
+ string sid (ps.repository.node_id + ':' + ps.after);
+
+ service_data sd (warning_success,
+ iat->token,
+ iat->expires_at,
+ ps.app_id,
+ ps.installation.id,
+ move (ps.repository.node_id),
+ move (ps.repository.clone_url),
+ service_data::local,
+ false /* pre_check */,
+ false /* re_requested */,
+ ps.after /* check_sha */,
+ ps.after /* report_sha */);
+
+ // Create an unloaded CI tenant, doing nothing if one already exists
+ // (which could've been created by handle_pull_request() or by us as a
+ // result of a push to another branch). Note that the tenant's reference
+ // count is incremented in all cases.
+ //
+ // Note: use no delay since we need to (re)create the synthetic conclusion
+ // check run as soon as possible.
+ //
+ // Note that we use the create() API instead of start() since duplicate
+ // management is not available in start().
+ //
+ // After this call we will start getting the build_unloaded()
+ // notifications until (1) we load the tenant, (2) we cancel it, or (3)
+ // it gets archived after some timeout.
+ //
+ if (!create (error, warn, verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ tenant_service (sid, "ci-github", sd.json ()),
+ chrono::seconds (30) /* interval */,
+ chrono::seconds (0) /* delay */,
+ duplicate_tenant_mode::ignore))
+ {
+ fail << "push " + ps.after + " to " + ps.ref
+ << ": unable to create unloaded CI tenant";
+ }
+
+ return true;
+ }
+
+ // Miscellaneous pull request facts
+ //
+ // - Although some of the GitHub documentation makes it sound like they
+ // expect check runs to be added to both the PR head commit and the merge
+ // commit, the PR UI does not react to the merge commit's check runs
+ // consistently. It actually seems to be quite broken. The only thing it
+ // does seem to do reliably is blocking the PR merge if the merge commit's
+ // check runs are not successful (i.e, overriding the PR head commit's
+ // check runs). But the UI looks quite messed up generally in this state.
+ //
+ // - When new commits are added to a PR base branch, pull_request.base.sha
+ // does not change, but the test merge commit will be updated to include
+ // the new commits to the base branch.
+ //
+ // - When new commits are added to a PR head branch, pull_request.head.sha
+ // gets updated with the head commit's SHA and check_suite.pull_requests[]
+ // will contain all PRs with this branch as head.
+ //
+ bool ci_github::
+ handle_pull_request (gh_pull_request_event pr, bool warning_success)
+ {
+ HANDLER_DIAG;
+
+ l3 ([&]{trace << "pull_request event { " << pr << " }";});
+
+ // While we don't need the installation access token in this request,
+ // let's obtain it to flush out any permission issues early. Also, it is
+ // valid for an hour so we will most likely make use of it.
+ //
+ optional<string> jwt (generate_jwt (pr.pull_request.app_id, trace, error));
+ if (!jwt)
+ throw server_error ();
+
+ optional<gh_installation_access_token> iat (
+ obtain_installation_access_token (pr.installation.id,
+ move (*jwt),
+ error));
+ if (!iat)
+ throw server_error ();
+
+ l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
+
+ // Distinguish between local and remote PRs by comparing the head and base
+ // repositories' paths.
+ //
+ service_data::kind_type kind (
+ pr.pull_request.head_path == pr.pull_request.base_path
+ ? service_data::local
+ : service_data::remote);
+
+ // Note that similar to the branch push case above, while it would have
+ // been nice to cancel the previous CI job once the PR head moves (the
+ // "synchronize" event), due to the head sharing problem the previous CI
+ // job might actually still be relevant (in both local and remote PR
+ // cases). So we only do it for the remote PRs and only if the head is not
+ // shared (via tenant reference counting).
+ //
+ if (kind == service_data::remote && pr.action == "synchronize")
+ {
+ if (pr.before)
+ {
+ // Service id that will uniquely identify the CI tenant.
+ //
+ string sid (pr.repository.node_id + ':' + *pr.before);
+
+ if (optional<tenant_service> ts = cancel (error, warn,
+ verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ "ci-github", sid,
+ true /* ref_count */))
+ {
+ l3 ([&]{trace << "pull request " << pr.pull_request.node_id
+ << ": attempted to cancel CI of previous head commit"
+ << " (ref_count: " << ts->ref_count << ')';});
+ }
+ else
+ {
+ // It's possible that there was no CI for the previous commit for
+ // various reasons (e.g., CI was not enabled).
+ //
+ l3 ([&]{trace << "pull request " << pr.pull_request.node_id
+ << ": failed to cancel CI of previous head commit "
+ << "with tenant_service id " << sid;});
+ }
+ }
+ else
+ {
+ error << "pull request " << pr.pull_request.node_id
+ << ": before commit is missing in synchronize event";
+ }
+ }
+
+ // Note: for remote PRs the check_sha will be set later, in
+ // build_unloaded_pre_check(), to test merge commit id.
+ //
+ string check_sha (kind == service_data::local
+ ? pr.pull_request.head_sha
+ : "");
+
+ // Note that PR rebuilds (re-requested) are handled by
+ // handle_check_suite_rerequest().
+ //
+ // Note that, in the case of a remote PR, GitHub will copy the PR head
+ // commit from the head (forked) repository into the base repository. So
+ // the check runs must always be added to the base repository, whether the
+ // PR is local or remote. The head commit refs are located at
+ // refs/pull/<PR-number>/head.
+ //
+ service_data sd (warning_success,
+ move (iat->token),
+ iat->expires_at,
+ pr.pull_request.app_id,
+ pr.installation.id,
+ move (pr.repository.node_id),
+ move (pr.repository.clone_url),
+ kind, true /* pre_check */, false /* re_request */,
+ move (check_sha),
+ move (pr.pull_request.head_sha) /* report_sha */,
+ pr.pull_request.node_id,
+ pr.pull_request.number);
+
+ // Create an unloaded CI tenant for the pre-check phase (during which we
+ // wait for the PR's merge commit and behindness to become available).
+ //
+ // Create with an empty service id so that the generated tenant id is used
+ // instead during the pre-check phase (so as not to clash with a proper
+ // service id for this head commit, potentially created in
+ // handle_branch_push() or as another PR).
+ //
+ tenant_service ts ("", "ci-github", sd.json ());
+
+ // Note: use no delay since we need to start the actual CI (which in turn
+ // (re)creates the synthetic conclusion check run) as soon as possible.
+ //
+ // After this call we will start getting the build_unloaded()
+ // notifications -- which will be routed to build_unloaded_pre_check() --
+ // until we cancel the tenant or it gets archived after some timeout.
+ // (Note that we never actually load this request, we always cancel it;
+ // see build_unloaded_pre_check() for details.)
+ //
+ if (!create (error,
+ warn,
+ verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ move (ts),
+ chrono::seconds (30) /* interval */,
+ chrono::seconds (0) /* delay */))
+ {
+ fail << "pull request " << pr.pull_request.node_id
+ << ": unable to create unloaded pre-check tenant";
+ }
+
+ return true;
+ }
+
+ bool ci_github::
+ handle_check_suite_rerequest (gh_check_suite_event cs, bool warning_success)
+ {
+ HANDLER_DIAG;
+
+ l3 ([&]{trace << "check_suite event { " << cs << " }";});
+
+ assert (cs.action == "rerequested");
+
+ // While we don't need the installation access token in this request,
+ // let's obtain it to flush out any permission issues early. Also, it is
+ // valid for an hour so we will most likely make use of it.
+ //
+ optional<string> jwt (generate_jwt (cs.check_suite.app_id, trace, error));
+ if (!jwt)
+ throw server_error ();
+
+ optional<gh_installation_access_token> iat (
+ obtain_installation_access_token (cs.installation.id,
+ move (*jwt),
+ error));
+ if (!iat)
+ throw server_error ();
+
+ l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
+
+ // Service id that uniquely identifies the CI tenant.
+ //
+ string sid (cs.repository.node_id + ':' + cs.check_suite.head_sha);
+
+ // If the user requests a rebuild of the (entire) PR, then this manifests
+ // as the check_suite rather than pull_request event. Specifically:
+ //
+ // - For a local PR, this event is shared with the branch push and all we
+ // need to do is restart the CI for the head commit.
+ //
+ // - For a remote PR, this event will have no gh_check_suite::head_branch.
+ // In this case we need to load the existing service data for this head
+ // commit, extract the test merge commit, and restart the CI for that.
+ //
+ // Note that it's possible the base branch has moved in the meantime and
+ // ideally we would want to re-request the test merge commit, etc.
+ // However, this will only be necessary if the user does not follow our
+ // recommendation of enabling the head-behind-base protection. And it
+ // seems all this extra complexity would not be warranted.
+ //
+ string check_sha;
+ service_data::kind_type kind;
+
+ if (!cs.check_suite.head_branch)
+ {
+ // Rebuild of remote PR.
+ //
+ kind = service_data::remote;
+
+ if (optional<tenant_data> d = find (*build_db_, "ci-github", sid))
+ {
+ tenant_service& ts (d->service);
+
+ try
+ {
+ service_data sd (*ts.data);
+ check_sha = move (sd.check_sha); // Test merge commit.
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "failed to parse service data: " << e;
+ }
+ }
+ else
+ {
+ error << "check suite " << cs.check_suite.node_id
+ << " for remote pull request:"
+ << " re-requested but tenant_service with id " << sid
+ << " did not exist";
+ return true;
+ }
+ }
+ else
+ {
+ // Rebuild of branch push or local PR.
+ //
+ kind = service_data::local;
+ check_sha = cs.check_suite.head_sha;
+ }
+
+ service_data sd (warning_success,
+ iat->token,
+ iat->expires_at,
+ cs.check_suite.app_id,
+ cs.installation.id,
+ move (cs.repository.node_id),
+ move (cs.repository.clone_url),
+ kind, false /* pre_check */, true /* re_requested */,
+ move (check_sha),
+ move (cs.check_suite.head_sha) /* report_sha */);
+
+ // Replace the existing CI tenant if it exists.
+ //
+ // Note that GitHub UI does not allow re-running the entire check suite
+ // until all the check runs are completed.
+ //
+
+ // Create an unloaded CI tenant.
+ //
+ // Note: use no delay since we need to (re)create the synthetic conclusion
+ // check run as soon as possible.
+ //
+ // Note that we use the create() API instead of start() since duplicate
+ // management is not available in start().
+ //
+ // After this call we will start getting the build_unloaded()
+ // notifications until (1) we load the tenant, (2) we cancel it, or (3)
+ // it gets archived after some timeout.
+ //
+ auto pr (create (error,
+ warn,
+ verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ tenant_service (sid, "ci-github", sd.json ()),
+ chrono::seconds (30) /* interval */,
+ chrono::seconds (0) /* delay */,
+ duplicate_tenant_mode::replace));
+
+ if (!pr)
+ {
+ fail << "check suite " << cs.check_suite.node_id
+ << ": unable to create unloaded CI tenant";
+ }
+
+ if (pr->second == duplicate_tenant_result::created)
+ {
+ error << "check suite " << cs.check_suite.node_id
+ << ": re-requested but tenant_service with id " << sid
+ << " did not exist";
+ return true;
+ }
+
+ return true;
+ }
+
+ bool ci_github::
+ handle_check_suite_completed (gh_check_suite_event cs, bool warning_success)
+ {
+ // The plans is as follows:
+ //
+ // 1. Load the service data.
+ //
+ // 2. Verify it is completed.
+ //
+ // 3. Verify the check run counts match.
+ //
+ // 4. Verify (like in build_built()) that all the check runs are
+ // completed.
+ //
+ // 5. Verify the result matches what GitHub thinks it is.
+
+ HANDLER_DIAG;
+
+ l3 ([&]{trace << "check_suite event { " << cs << " }";});
+
+ // Service id that uniquely identifies the CI tenant.
+ //
+ string sid (cs.repository.node_id + ':' + cs.check_suite.head_sha);
+
+ // The common log entry subject.
+ //
+ string sub ("check suite " + cs.check_suite.node_id + '/' + sid);
+
+ // Load the service data.
+ //
+ service_data sd;
+
+ if (optional<tenant_data> d = find (*build_db_, "ci-github", sid))
+ {
+ try
+ {
+ sd = service_data (*d->service.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "failed to parse service data: " << e;
+ }
+ }
+ else
+ {
+ error << sub << ": tenant_service does not exist";
+ return true;
+ }
+
+ // Verify the completed flag and the number of check runs.
+ //
+ if (!sd.completed)
+ {
+ error << sub << " service data complete flag is false";
+ return true;
+ }
+
+ // Received count will be one higher because we don't store the conclusion
+ // check run.
+ //
+ size_t check_runs_count (sd.check_runs.size () + 1);
+
+ if (check_runs_count == 1)
+ {
+ error << sub << ": no check runs in service data";
+ return true;
+ }
+
+ if (cs.check_suite.check_runs_count != check_runs_count)
+ {
+ error << sub << ": check runs count " << cs.check_suite.check_runs_count
+ << " does not match service data count " << check_runs_count;
+ return true;
+ }
+
+ // Verify that all the check runs are built and compute the summary
+ // conclusion.
+ //
+ result_status conclusion (result_status::success);
+
+ for (const check_run& cr: sd.check_runs)
+ {
+ if (cr.state == build_state::built)
+ {
+ assert (cr.status.has_value ());
+ conclusion |= *cr.status;
+ }
+ else
+ {
+ error << sub << ": unbuilt check run in service data";
+ return true;
+ }
+ }
+
+ // Verify the conclusion.
+ //
+ if (!cs.check_suite.conclusion)
+ {
+ error << sub << ": absent conclusion in completed check suite";
+ return true;
+ }
+
+ // Note that the case mismatch is due to GraphQL (gh_conclusion())
+ // requiring uppercase conclusion values while the received webhook values
+ // are lower case.
+ //
+ string gh_conclusion (gh_to_conclusion (conclusion, warning_success));
+
+ if (icasecmp (*cs.check_suite.conclusion, gh_conclusion) != 0)
+ {
+ error << sub << ": conclusion " << *cs.check_suite.conclusion
+ << " does not match service data conclusion " << gh_conclusion;
+ return true;
+ }
+
+ return true;
+ }
+
// Return the colored circle corresponding to a result_status.
//
static string
@@ -430,361 +1148,513 @@ namespace brep
return s;
}
+ // Create a gq_built_result.
+ //
+ // Throw invalid_argument in case of invalid result_status.
+ //
+ static gq_built_result
+ make_built_result (result_status rs, bool warning_success, string message)
+ {
+ return {gh_to_conclusion (rs, warning_success),
+ circle (rs) + ' ' + ucase (to_string (rs)),
+ move (message)};
+ }
+
+ // Parse a check run details URL into a build_id.
+ //
+ // Return nullopt if the URL is invalid.
+ //
+ static optional<build_id>
+ parse_details_url (const string& details_url);
+
+ // Note that GitHub always posts a message to their GUI saying "You have
+ // successfully requested <check_run_name> be rerun", regardless of what
+ // HTTP status code we respond with. However we do return error status codes
+ // when there is no better option (like failing the conclusion) in case they
+ // start handling them someday.
+ //
bool ci_github::
- handle_check_suite_request (gh_check_suite_event cs, bool warning_success)
+ handle_check_run_rerequest (const gh_check_run_event& cr,
+ bool warning_success)
{
HANDLER_DIAG;
- l3 ([&]{trace << "check_suite event { " << cs << " }";});
+ l3 ([&]{trace << "check_run event { " << cr << " }";});
- optional<string> jwt (generate_jwt (trace, error));
- if (!jwt)
- throw server_error ();
+ // The overall plan is as follows:
+ //
+ // 1. Load service data.
+ //
+ // 2. If the tenant is archived, then fail (re-create) both the check run
+ // and the conclusion with appropriate diagnostics.
+ //
+ // 3. If the check run is in the queued state, then do nothing.
+ //
+ // 4. Re-create the check run in the queued state and the conclusion in
+ // the building state. Note: do in a single request to make sure we
+ // either "win" or "loose" the potential race for both (important
+ // for #7).
+ //
+ // 5. Call the rebuild() function to attempt to schedule a rebuild. Pass
+ // the update function that does the following (if called):
+ //
+ // a. Save new node ids.
+ //
+ // b. Update the check run state (may also not exist).
+ //
+ // c. Clear the completed flag if true.
+ //
+ // 6. If the result of rebuild() indicates the tenant is archived, then
+ // fail (update) both the check run and conclusion with appropriate
+ // diagnostics.
+ //
+ // 7. If original state is queued (no rebuild was scheduled), then fail
+ // (update) both the check run and the conclusion.
+ //
+ // Note that while conceptually we are updating existing check runs, in
+ // practice we have to re-create as new check runs in order to replace the
+ // existing ones because GitHub does not allow transitioning out of the
+ // built state.
- optional<gh_installation_access_token> iat (
- obtain_installation_access_token (cs.installation.id,
+ // Get a new installation access token.
+ //
+ auto get_iat = [this, &trace, &error, &cr] ()
+ -> optional<gh_installation_access_token>
+ {
+ optional<string> jwt (generate_jwt (cr.check_run.app_id, trace, error));
+ if (!jwt)
+ return nullopt;
+
+ optional<gh_installation_access_token> iat (
+ obtain_installation_access_token (cr.installation.id,
move (*jwt),
error));
- if (!iat)
- throw server_error ();
+ if (iat)
+ l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
- l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
+ return iat;
+ };
- service_data sd (warning_success,
- iat->token,
- iat->expires_at,
- cs.installation.id,
- move (cs.repository.node_id),
- move (cs.check_suite.head_sha));
+ const string& repo_node_id (cr.repository.node_id);
+ const string& head_sha (cr.check_run.check_suite.head_sha);
+
+ // Prepare the build and conclusion check runs. They are sent to GitHub in
+ // a single request (unless something goes wrong) so store them together
+ // from the outset.
+ //
+ vector<check_run> check_runs (2);
+ check_run& bcr (check_runs[0]); // Build check run
+ check_run& ccr (check_runs[1]); // Conclusion check run
+
+ ccr.name = conclusion_check_run_name;
- // Create the conclusion check run.
+ const gh_installation_access_token* iat (nullptr);
+ optional<gh_installation_access_token> new_iat;
+
+ // Load the service data, failing the check runs if the tenant has been
+ // archived.
//
+ service_data sd;
+ string tenant_id;
{
- check_run cr;
- cr.name = conclusion_check_run_name;
+ // Service id that uniquely identifies the CI tenant.
+ //
+ string sid (repo_node_id + ':' + head_sha);
- if (gq_create_check_run (error,
- cr,
- iat->token,
- sd.repository_node_id,
- sd.report_sha,
- nullopt /* details_url */,
- build_state::building))
+ optional<tenant_data> d (find (*build_db_, "ci-github", sid));
+ if (!d)
{
- l3 ([&]{trace << "created check_run { " << cr << " }";});
+ // No such tenant.
+ //
+ fail << "check run " << cr.check_run.node_id
+ << " re-requested but tenant_service with id " << sid
+ << " does not exist";
+ }
+
+ tenant_service& ts (d->service);
- sd.conclusion_node_id = move (cr.node_id);
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "failed to parse service data: " << e;
+ }
+
+ if (!sd.conclusion_node_id)
+ fail << "no conclusion node id for check run " << cr.check_run.node_id;
+
+ tenant_id = d->tenant_id;
+
+ // Get a new IAT if the one from the service data has expired.
+ //
+ if (system_clock::now () > sd.installation_access.expires_at)
+ {
+ if ((new_iat = get_iat ()))
+ iat = &*new_iat;
+ else
+ throw server_error ();
}
else
+ iat = &sd.installation_access;
+
+ if (d->archived) // Tenant is archived
{
- // We could try to carry on in this case by either updating or
- // creating this conclusion check run later. But let's not complicate
- // things for now.
+ // Fail (update) the check runs.
+ //
+ gq_built_result br (
+ make_built_result (
+ result_status::error, warning_success,
+ "Unable to rebuild individual configuration: build has "
+ "been archived"));
+
+ // Try to update the conclusion check run even if the first update
+ // fails.
+ //
+ bool f (false); // Failed.
+
+ if (gq_update_check_run (error, bcr, iat->token,
+ repo_node_id, cr.check_run.node_id,
+ nullopt /* details_url */,
+ build_state::built, br))
+ {
+ l3 ([&]{trace << "updated check_run { " << bcr << " }";});
+ }
+ else
+ {
+ error << "check_run " << cr.check_run.node_id
+ << ": unable to update check run";
+ f = true;
+ }
+
+ if (gq_update_check_run (error, ccr, iat->token,
+ repo_node_id, *sd.conclusion_node_id,
+ nullopt /* details_url */,
+ build_state::built, move (br)))
+ {
+ l3 ([&]{trace << "updated conclusion check_run { " << ccr << " }";});
+ }
+ else
+ {
+ error << "check_run " << cr.check_run.node_id
+ << ": unable to update conclusion check run";
+ f = true;
+ }
+
+ // Fail the handler if either of the check runs could not be
+ // updated.
//
- fail << "check suite " << cs.check_suite.node_id
- << ": unable to create conclusion check run";
+ if (f)
+ throw server_error ();
+
+ return true;
}
}
- // The merge commits of any open pull requests with this branch as base
- // branch will now be out of date, and thus so will be their CI builds and
- // associated check runs (and, no, GitHub does not invalidate those CI
- // results automatically; see below).
- //
- // Unfortunately GitHub does not provide a webhook for PR base branch
- // updates (as it does for PR head branch updates) so we have to handle it
- // here. We do so by fetching the open pull requests with this branch as
- // base branch and then recreating the CI requests (cancel existing,
- // create new) for each pull request.
- //
- // If we fail to recreate any of the PR CI requests, they and their check
- // runs will be left reflecting outdated merge commits. If the new merge
- // commit failed to be generated (merge conflicts) the PR will not be
- // mergeable which is not entirely catastrophic. But on the other hand, if
- // all of the existing CI request's check runs have already succeeded and
- // the new merge commit succeeds (no conflicts) with logic errors then a
- // user would be able to merge a broken PR.
+ // Fail if it's the conclusion check run that is being re-requested.
//
- // Regardless of the nature of the error, we have to let the check suite
- // handling code proceed so we only issue diagnostics. Note also that we
- // want to run this code as early as possible to minimize the window of
- // the user seeing misleading CI results.
+ // Expect that if the user selects re-run all failed checks we will
+ // receive multiple check runs, one of which will be the conclusion. And
+ // if we fail it while it happens to arrive last, then we will end up in
+ // the wrong overall state (real check run is building while conclusion is
+ // failed). It seems the best we can do is to ignore it: if the user did
+ // request a rebuild of the conclusion check run explicitly, there will be
+ // no change, which is not ideal but is still an indication that this
+ // operation is not supported.
//
- if (cs.action == "requested")
+ if (cr.check_run.name == conclusion_check_run_name)
{
- // Fetch open pull requests with the check suite's head branch as base
- // branch.
- //
- optional<vector<gh_pull_request>> prs (
- gq_fetch_open_pull_requests (error,
- iat->token,
- sd.repository_node_id,
- cs.check_suite.head_branch));
+ l3 ([&]{trace << "re-requested conclusion check_run";});
+
+#if 0
+ if (!sd.conclusion_node_id)
+ fail << "no conclusion node id for check run " << cr.check_run.node_id;
+
+ gq_built_result br (
+ make_built_result (result_status::error, warning_success,
+ "Conclusion check run cannot be rebuilt"));
- if (prs)
+ // Fail (update) the conclusion check run.
+ //
+ if (gq_update_check_run (error, ccr, iat->token,
+ repo_node_id, *sd.conclusion_node_id,
+ nullopt /* details_url */,
+ build_state::built, move (br)))
{
- // Recreate each PR's CI request.
- //
- for (gh_pull_request& pr: *prs)
- {
- service_data prsd (sd.warning_success,
- sd.installation_access.token,
- sd.installation_access.expires_at,
- sd.installation_id,
- sd.repository_node_id,
- pr.head_sha,
- cs.repository.clone_url,
- pr.number);
-
- // Cancel the existing CI request and create a new unloaded CI
- // request. After this call we will start getting the
- // build_unloaded() notifications until (1) we load the request, (2)
- // we cancel it, or (3) it gets archived after some timeout.
- //
- if (!create_pull_request_ci (error, warn, trace,
- prsd, pr.node_id,
- true /* cancel_first */))
- {
- error << "pull request " << pr.node_id
- << ": unable to create unloaded CI request";
- }
- }
+ l3 ([&]{trace << "updated conclusion check_run { " << ccr << " }";});
}
else
{
- error << "unable to fetch open pull requests with base branch "
- << cs.check_suite.head_branch;
+ fail << "check run " << cr.check_run.node_id
+ << ": unable to update conclusion check run "
+ << *sd.conclusion_node_id;
}
+#endif
+
+ return true;
}
- // Cancel existing CI request if this check suite is being re-run.
+
+ // Parse the check_run's details_url to extract build id.
+ //
+ // While this is a bit hackish, there doesn't seem to be a better way
+ // (like associating custom data with a check run). Note that the GitHub
+ // UI only allows rebuilding completed check runs, so the details URL
+ // should be there.
//
- else if (cs.action == "rerequested")
+ optional<build_id> bid (parse_details_url (cr.check_run.details_url));
+ if (!bid)
{
- const string& nid (cs.check_suite.node_id);
+ fail << "check run " << cr.check_run.node_id
+ << ": failed to extract build id from details_url";
+ }
- if (!cancel (error, warn, &trace, *build_db_, "ci-github", nid))
- error << "check suite " << nid << " (re-requested): unable to cancel";
+ // Initialize the check run (`bcr`) with state from the service data.
+ //
+ {
+ // Search for the check run in the service data.
+ //
+ // Note that we look by name in case node id got replaced by a racing
+ // re-request (in which case we ignore this request).
+ //
+ auto i (find_if (sd.check_runs.begin (), sd.check_runs.end (),
+ [&cr] (const check_run& scr)
+ {
+ return scr.name == cr.check_run.name;
+ }));
+
+ if (i == sd.check_runs.end ())
+ fail << "check_run " << cr.check_run.node_id
+ << " (" << cr.check_run.name << "): "
+ << "re-requested but does not exist in service data";
+
+ // Do nothing if node ids don't match.
+ //
+ if (i->node_id && *i->node_id != cr.check_run.node_id)
+ {
+ l3 ([&]{trace << "check_run " << cr.check_run.node_id
+ << " (" << cr.check_run.name << "): "
+ << "node id has changed in service data";});
+ return true;
+ }
+
+ // Do nothing if the build is already queued.
+ //
+ if (i->state == build_state::queued)
+ {
+ l3 ([&]{trace << "ignoring already-queued check run";});
+ return true;
+ }
+
+ bcr.name = i->name;
+ bcr.build_id = i->build_id;
+ bcr.state = i->state;
}
- // Start CI for the check suite.
+ // Transition the build and conclusion check runs out of the built state
+ // (or any other state) by re-creating them.
//
- repository_location rl (cs.repository.clone_url + '#' +
- cs.check_suite.head_branch,
- repository_type::git);
+ bcr.state = build_state::queued;
+ bcr.state_synced = false;
+ bcr.details_url = cr.check_run.details_url;
+
+ ccr.state = build_state::building;
+ ccr.state_synced = false;
+
+ if (gq_create_check_runs (error, check_runs, iat->token,
+ repo_node_id, head_sha))
+ {
+ assert (bcr.state == build_state::queued);
+ assert (ccr.state == build_state::building);
+
+ l3 ([&]{trace << "created check_run { " << bcr << " }";});
+ l3 ([&]{trace << "created conclusion check_run { " << ccr << " }";});
+ }
+ else
+ {
+ fail << "check run " << cr.check_run.node_id
+ << ": unable to re-create build and conclusion check runs";
+ }
- // @@ What happens if we call this functions with an already existing
- // node_id (e.g., replay attack). See the UUID header above.
+ // Request the rebuild and update service data.
//
- optional<start_result> r (
- start (error,
- warn,
- verb_ ? &trace : nullptr,
- tenant_service (cs.check_suite.node_id, "ci-github", sd.json ()),
- move (rl),
- vector<package> {},
- nullopt, /* client_ip */
- nullopt /* user_agent */));
+ bool race (false);
- if (!r || r->status != 200)
+ // Callback function called by rebuild() to update the service data (but
+ // only if the build is actually restarted).
+ //
+ auto update_sd = [&error, &new_iat, &race,
+ tenant_id = move (tenant_id),
+ &cr, &bcr, &ccr] (const string& ti,
+ const tenant_service& ts,
+ build_state) -> optional<string>
{
- // Update the conclusion check run with failure.
- //
- result_status rs (result_status::error);
+ // NOTE: this lambda may be called repeatedly (e.g., due to transaction
+ // being aborted) and so should not move out of its captures.
- optional<gq_built_result> br (
- gq_built_result (gh_to_conclusion (rs, sd.warning_success),
- circle (rs) + ' ' + ucase (to_string (rs)),
- to_check_run_summary (r)));
+ race = false; // Reset.
- check_run cr;
+ if (tenant_id != ti)
+ {
+ // The tenant got replaced since we loaded it but we managed to
+ // trigger a rebuild in the new tenant. Who knows whose check runs are
+ // visible, so let's fail ours similar to the cases below.
+ //
+ race = true;
+ return nullopt;
+ }
- // Set some fields for display purposes.
- //
- cr.node_id = *sd.conclusion_node_id;
- cr.name = conclusion_check_run_name;
+ service_data sd;
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "failed to parse service data: " << e;
+ return nullopt;
+ }
- if (gq_update_check_run (error,
- cr,
- iat->token,
- sd.repository_node_id,
- *sd.conclusion_node_id,
- nullopt /* details_url */,
- build_state::built,
- move (br)))
+ // Note that we again look by name in case node id got replaced by a
+ // racing re-request. In this case, however, it's impossible to decide
+ // who won that race, so let's fail the check suite to be on the safe
+ // side (in a sense, similar to the rebuild() returning queued below).
+ //
+ auto i (find_if (
+ sd.check_runs.begin (), sd.check_runs.end (),
+ [&cr] (const check_run& scr)
+ {
+ return scr.name == cr.check_run.name;
+ }));
+
+ if (i == sd.check_runs.end ())
{
- l3 ([&]{trace << "updated check_run { " << cr << " }";});
+ error << "check_run " << cr.check_run.node_id
+ << " (" << cr.check_run.name << "): "
+ << "re-requested but does not exist in service data";
+ return nullopt;
}
- else
+
+ if (i->node_id && *i->node_id != cr.check_run.node_id)
{
- fail << "check suite " << cs.check_suite.node_id
- << ": unable to update conclusion check_run "
- << *sd.conclusion_node_id;
+ // Keep the old conclusion node id to make sure any further state
+ // transitions are ignored. A bit of a hack.
+ //
+ race = true;
+ return nullopt;
}
- }
- return true;
- }
+ *i = bcr; // Update with new node_id, state, state_synced.
- // High-level description of pull request (PR) handling
- //
- // - Some GitHub pull request terminology:
- //
- // - Fork and pull model: Pull requests are created in a forked
- // repository. Thus the head and base repositories are different.
- //
- // - Shared repository model: The pull request head and base branches are
- // in the same repository. For example, from a feature branch onto
- // master.
- //
- // - CI the merge commit but add check runs to the pull request head commit
- //
- // Most of the major CI integrations build the merge commit instead of the
- // PR head commit.
- //
- // Adding the check runs to the PR head commit is recommended by the
- // following blog posts by a GitHub employee who is one of the best
- // sources on these topics:
- // https://www.kenmuse.com/blog/shared-commits-and-github-checks/ and
- // https://www.kenmuse.com/blog/creating-github-checks/.
- //
- // Do not add any check runs to the merge commit because:
- //
- // - The PR head commit is the only commit that can be relied upon to
- // exist throughout the PR's lifetime. The merge commit, on the other
- // hand, can change during the PR process. When that happens the PR will
- // look for check runs on the new merge commit, effectively discarding
- // the ones we had before.
- //
- // - Although some of the GitHub documentation makes it sound like they
- // expect check runs to be added to both the PR head commit and the
- // merge commit, the PR UI does not react to the merge commit's check
- // runs consistently. It actually seems to be quite broken.
- //
- // The only thing it seems to do reliably is blocking the PR merge if
- // the merge commit's check runs are not successful (i.e, overriding the
- // PR head commit's check runs). But the UI looks quite messed up
- // generally in this state.
- //
- // Note that, in the case of a PR from a forked repository (the so-called
- // "fork and pull" model), GitHub will copy the PR head commit from the
- // head repository (the forked one) into the base repository. So the check
- // runs must always be added to the base repository, whether the PR is
- // from within the same repository or from a forked repository. The merge
- // and head commits will be at refs/pull/<PR-number>/{merge,head}.
- //
- // - New commits are added to PR head branch
- //
- // @@ TODO In this case we will end up creating two sets of check runs on
- // the same commit (pull_request.head.sha and
- // check_suite.head_sha). It's not obvious which to prefer but I'm
- // thinking the pull request is more important because in most
- // development models it represents something that is more likely to
- // end up in an important branch (as opposed to the head of a feature
- // branch).
- //
- // Note that in these two cases we are building different commit (the
- // head commit vs merge commit). So it's not clear how we can have
- // a single check_suite result represent both?
- //
- // Possible solution: ignore all check_suites with non-empty
- // pull_requests[].
- //
- // => check_suite(requested, PR_head) [only in shared repo model]
- //
- // Note: check_suite.pull_requests[] will contain all PRs with this
- // branch as head.
- //
- // Note: check_suite.pull_requests[i].head.sha will be the new, updated
- // PR head sha.
- //
- // => pull_request(synchronize)
- //
- // Note: The check_suite and pull_request can arrive in any order.
- //
- // - New commits are added to PR base branch
- //
- // Note: In this case pull_request.base.sha does not change, but the merge
- // commit will be updated to include the new commits to the base branch.
- //
- // - @@ TODO? PR base branch changed (to a different branch)
- //
- // => pull_request(edited)
- //
- // - PR closed @@ TODO
- //
- // => pull_request(closed)
- //
- // Cancel CI?
- //
- // - PR merged @@ TODO
- //
- // => pull_request(merged)
- //
- // => check_suite(PR_base)
- //
- // Probably wouldn't want to CI the base again because the PR CI would've
- // done the equivalent already.
- //
- bool ci_github::
- handle_pull_request (gh_pull_request_event pr, bool warning_success)
- {
- HANDLER_DIAG;
+ sd.conclusion_node_id = ccr.node_id;
+ sd.completed = false;
- l3 ([&]{trace << "pull_request event { " << pr << " }";});
+ // Save the IAT if we created a new one.
+ //
+ if (new_iat)
+ sd.installation_access = *new_iat;
- // While we don't need the installation access token in this request,
- // let's obtain it to flush out any permission issues early. Also, it is
- // valid for an hour so we will most likely make use of it.
- //
- optional<string> jwt (generate_jwt (trace, error));
- if (!jwt)
- throw server_error ();
+ return sd.json ();
+ };
- optional<gh_installation_access_token> iat (
- obtain_installation_access_token (pr.installation.id,
- move (*jwt),
- error));
- if (!iat)
- throw server_error ();
+ optional<build_state> bs (rebuild (*build_db_, retry_, *bid, update_sd));
- l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
+ // If the build has been archived or re-enqueued since we loaded the
+ // service data, fail (by updating) both the build check run and the
+ // conclusion check run. Otherwise the build has been successfully
+ // re-enqueued so do nothing further.
+ //
+ if (!race && bs && *bs != build_state::queued)
+ return true;
- service_data sd (warning_success,
- move (iat->token),
- iat->expires_at,
- pr.installation.id,
- move (pr.repository.node_id),
- pr.pull_request.head_sha,
- pr.repository.clone_url,
- pr.pull_request.number);
+ gq_built_result br; // Built result for both check runs.
- // Create unloaded CI request. Cancel the existing CI request first if the
- // head branch has been updated (action is `synchronize`).
+ if (race || bs) // Race or re-enqueued.
+ {
+ // The re-enqueued case: this build has been re-enqueued since we first
+ // loaded the service data. This could happen if the user clicked
+ // "re-run" multiple times and another handler won the rebuild() race.
+ //
+ // However the winner of the check runs race cannot be determined.
+ //
+ // Best case the other handler won the check runs race as well and
+ // thus everything will proceed normally. Our check runs will be
+ // invisible and disregarded.
+ //
+ // Worst case we won the check runs race and the other handler's check
+ // runs -- the ones that will be updated by the build_*() notifications
+ // -- are no longer visible, leaving things quite broken.
+ //
+ // Either way, we fail our check runs. In the best case scenario it
+ // will have no effect; in the worst case scenario it lets the user
+ // know something has gone wrong.
+ //
+ br = make_built_result (result_status::error, warning_success,
+ "Unable to rebuild, try again");
+ }
+ else // Archived.
+ {
+ // The build has expired since we loaded the service data. Most likely
+ // the tenant has been archived.
+ //
+ br = make_built_result (
+ result_status::error, warning_success,
+ "Unable to rebuild individual configuration: build has been archived");
+ }
+
+ // Try to update the conclusion check run even if the first update fails.
//
- // After this call we will start getting the build_unloaded()
- // notifications until (1) we load the request, (2) we cancel it, or (3)
- // it gets archived after some timeout.
+ bool f (false); // Failed.
+
+ // Fail the build check run.
//
- bool cancel_first (pr.action == "synchronize");
+ if (gq_update_check_run (error, bcr, iat->token,
+ repo_node_id, *bcr.node_id,
+ nullopt /* details_url */,
+ build_state::built, br))
+ {
+ l3 ([&]{trace << "updated check_run { " << bcr << " }";});
+ }
+ else
+ {
+ error << "check run " << cr.check_run.node_id
+ << ": unable to update (replacement) check run "
+ << *bcr.node_id;
+ f = true;
+ }
- if (!create_pull_request_ci (error, warn, trace,
- sd, pr.pull_request.node_id,
- cancel_first))
+ // Fail the conclusion check run.
+ //
+ if (gq_update_check_run (error, ccr, iat->token,
+ repo_node_id, *ccr.node_id,
+ nullopt /* details_url */,
+ build_state::built, move (br)))
{
- fail << "pull request " << pr.pull_request.node_id
- << ": unable to create unloaded CI request";
+ l3 ([&]{trace << "updated conclusion check_run { " << ccr << " }";});
}
+ else
+ {
+ error << "check run " << cr.check_run.node_id
+ << ": unable to update conclusion check run " << *ccr.node_id;
+ f = true;
+ }
+
+ // Fail the handler if either of the check runs could not be updated.
+ //
+ if (f)
+ throw server_error ();
return true;
}
- // Note: only handles pull requests (not check suites).
- //
- function<optional<string> (const tenant_service&)> ci_github::
- build_unloaded (tenant_service&& ts,
+ function<optional<string> (const string&, const tenant_service&)> ci_github::
+ build_unloaded (const string& ti,
+ tenant_service&& ts,
const diag_epilogue& log_writer) const noexcept
{
+ // NOTE: this function is noexcept and should not throw.
+
NOTIFICATION_DIAG (log_writer);
service_data sd;
@@ -798,6 +1668,240 @@ namespace brep
return nullptr;
}
+ return sd.pre_check
+ ? build_unloaded_pre_check (move (ts), move (sd), log_writer)
+ : build_unloaded_load (ti, move (ts), move (sd), log_writer);
+ }
+
+ function<optional<string> (const string&, const tenant_service&)> ci_github::
+ build_unloaded_pre_check (tenant_service&& ts,
+ service_data&& sd,
+ const diag_epilogue& log_writer) const noexcept
+ try
+ {
+ // NOTE: this function is noexcept and should not throw.
+ //
+ // In a few places where invalid_argument is unlikely to be thrown and/or
+ // would indicate that things are seriously broken we let it propagate to
+ // the function catch block where the pre-check tenant will be canceled
+ // (otherwise we could end up in an infinite loop, e.g., because the
+ // problematic arguments won't change).
+
+ NOTIFICATION_DIAG (log_writer);
+
+ // We get here for PRs only (but both local and remote). The overall
+ // plan is as follows:
+ //
+ // 1. Ask for the mergeability/behind status/test merge commit.
+ //
+ // 2. If not ready, get called again.
+ //
+ // 3. If not mergeable, behind, or different head (head changed while
+ // waiting for merge commit and thus differs from what's in the
+ // service_data), cancel the pre-check tenant and do nothing.
+ //
+ // 4. Otherwise, create an unloaded CI tenant and cancel ourselves. Note
+ // that all re-requested cases are handled elsewhere.
+ //
+ // Note that in case of a mixed local/remote case, whether we CI the head
+ // commit or test merge commit will be racy and there is nothing we can do
+ // about (the purely local case can get "upgraded" to mixed after we have
+ // started the CI job).
+ //
+
+ // Request PR pre-check info (triggering the generation of the test merge
+ // commit on the GitHub's side).
+ //
+ // Let unlikely invalid_argument propagate (see above).
+ //
+ optional<gq_pr_pre_check_info> pc (
+ gq_fetch_pull_request_pre_check_info (error,
+ sd.installation_access.token,
+ *sd.pr_node_id));
+
+ if (!pc)
+ {
+ // Test merge commit not available yet: get called again to retry.
+ //
+ return nullptr;
+ }
+
+ // Create the CI tenant if nothing is wrong, otherwise issue diagnostics.
+ //
+ if (pc->behind)
+ {
+ l3 ([&]{trace << "ignoring pull request " << *sd.pr_node_id
+ << ": head is behind base";});
+ }
+ else if (!pc->merge_commit_sha)
+ {
+ l3 ([&]{trace << "ignoring pull request " << *sd.pr_node_id
+ << ": not auto-mergeable";});
+ }
+ else if (pc->head_sha != sd.report_sha)
+ {
+ l3 ([&]{trace << "ignoring pull request " << *sd.pr_node_id
+ << ": head commit has changed";});
+ }
+ else
+ {
+ // Create the CI tenant by reusing the pre-check service data.
+ //
+ sd.pre_check = false;
+
+ // Set the service data's check_sha if this is a remote PR. The test
+ // merge commit refs are located at refs/pull/<PR-number>/merge.
+ //
+ if (sd.kind == service_data::remote)
+ sd.check_sha = *pc->merge_commit_sha;
+
+ // Service id that will uniquely identify the CI tenant.
+ //
+ string sid (sd.repository_node_id + ':' + sd.report_sha);
+
+ // Create an unloaded CI tenant, doing nothing if one already exists
+ // (which could've been created by a head branch push or another PR
+ // sharing the same head commit). Note that the tenant's reference count
+ // is incremented in all cases.
+ //
+ // Note: use no delay since we need to (re)create the synthetic
+ // conclusion check run as soon as possible.
+ //
+ // Note that we use the create() API instead of start() since duplicate
+ // management is not available in start().
+ //
+ // After this call we will start getting the build_unloaded()
+ // notifications until (1) we load the tenant, (2) we cancel it, or (3)
+ // it gets archived after some timeout.
+ //
+ try
+ {
+ if (auto pr = create (error, warn, verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ tenant_service (sid, "ci-github", sd.json ()),
+ chrono::seconds (30) /* interval */,
+ chrono::seconds (0) /* delay */,
+ duplicate_tenant_mode::ignore))
+ {
+ if (pr->second == duplicate_tenant_result::ignored)
+ {
+ // This PR is sharing a head commit with something else.
+ //
+ // If this is a local PR then it's probably the branch push, which
+ // is expected, so do nothing.
+ //
+ // If this is a remote PR then it could be anything (branch push,
+ // local PR, or another remote PR) which in turn means the CI
+ // result may end up being for head, not merge commit. There is
+ // nothing we can do about it on our side (the user can enable the
+ // head-behind-base protection on their side).
+ //
+ if (sd.kind == service_data::remote)
+ {
+ l3 ([&]{trace << "remote pull request " << *sd.pr_node_id
+ << ": CI tenant already exists for " << sid;});
+ }
+ }
+ }
+ else
+ {
+ error << "pull request " << *sd.pr_node_id
+ << ": failed to create unloaded CI tenant "
+ << "with tenant_service id " << sid;
+
+ // Fall through to cancel.
+ }
+ }
+ catch (const runtime_error& e) // Database retries exhausted.
+ {
+ error << "pull request " << *sd.pr_node_id
+ << ": failed to create unloaded CI tenant "
+ << "with tenant_service id " << sid
+ << ": " << e.what ();
+
+ // Fall through to cancel.
+ }
+ }
+
+ // Cancel the pre-check tenant.
+ //
+ try
+ {
+ if (!cancel (error, warn, verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ ts.type,
+ ts.id))
+ {
+ // Should never happen (no such tenant).
+ //
+ error << "pull request " << *sd.pr_node_id
+ << ": failed to cancel pre-check tenant with tenant_service id "
+ << ts.id;
+ }
+ }
+ catch (const runtime_error& e) // Database retries exhausted.
+ {
+ error << "pull request " << *sd.pr_node_id
+ << ": failed to cancel pre-check tenant with tenant_service id "
+ << ts.id << ": " << e.what ();
+ }
+
+ return nullptr;
+ }
+ catch (const std::exception& e)
+ {
+ NOTIFICATION_DIAG (log_writer);
+ error << "pull request " << *sd.pr_node_id
+ << ": unhandled exception: " << e.what ();
+
+ // Cancel the pre-check tenant otherwise we could end up in an infinite
+ // loop (see top of function).
+ //
+ try
+ {
+ if (cancel (error, warn, verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ ts.type,
+ ts.id))
+ l3 ([&]{trace << "canceled pre-check tenant " << ts.id;});
+ }
+ catch (const runtime_error& e) // Database retries exhausted.
+ {
+ l3 ([&]{trace << "failed to cancel pre-check tenant " << ts.id << ": "
+ << e.what ();});
+ }
+
+ return nullptr;
+ }
+
+ function<optional<string> (const string&, const tenant_service&)> ci_github::
+ build_unloaded_load (const string& tenant_id,
+ tenant_service&& ts,
+ service_data&& sd,
+ const diag_epilogue& log_writer) const noexcept
+ try
+ {
+ // NOTE: this function is noexcept and should not throw.
+ //
+ // In a few places where invalid_argument is unlikely to be thrown and/or
+ // would indicate that things are seriously broken we let it propagate to
+ // the function catch block where the tenant will be canceled (otherwise
+ // we could end up in an infinite loop, e.g., because the problematic
+ // arguments won't change).
+
+ NOTIFICATION_DIAG (log_writer);
+
+ // Load the tenant, which is essentially the same for both branch push and
+ // PR. The overall plan is as follows:
+ //
+ // - Create synthetic conclusion check run with the in-progress state. If
+ // unable to, get called again to re-try.
+ //
+ // - Load the tenant. If unable to, fail the conclusion check run.
+ //
+ // - Update service data.
+ //
+
// Get a new installation access token if the current one has expired.
//
const gh_installation_access_token* iat (nullptr);
@@ -805,7 +1909,7 @@ namespace brep
if (system_clock::now () > sd.installation_access.expires_at)
{
- if (optional<string> jwt = generate_jwt (trace, error))
+ if (optional<string> jwt = generate_jwt (sd.app_id, trace, error))
{
new_iat = obtain_installation_access_token (sd.installation_id,
move (*jwt),
@@ -820,40 +1924,6 @@ namespace brep
if (iat == nullptr)
return nullptr; // Try again on the next call.
- auto make_iat_updater = [&new_iat, &error] ()
- {
- function<optional<string> (const tenant_service&)> r;
-
- if (new_iat)
- {
- r = [&error,
- iat = move (new_iat)] (const tenant_service& ts)
- -> optional<string>
- {
- // NOTE: this lambda may be called repeatedly (e.g., due to
- // transaction being aborted) and so should not move out of its
- // captures.
-
- service_data sd;
- try
- {
- sd = service_data (*ts.data);
- }
- catch (const invalid_argument& e)
- {
- error << "failed to parse service data: " << e;
- return nullopt;
- }
-
- sd.installation_access = *iat;
-
- return sd.json ();
- };
- }
-
- return r;
- };
-
// Create a synthetic check run with an in-progress state. Return the
// check run on success or nullopt on failure.
//
@@ -864,6 +1934,8 @@ namespace brep
check_run cr;
cr.name = move (name);
+ // Let unlikely invalid_argument propagate (see above).
+ //
if (gq_create_check_run (error,
cr,
iat->token,
@@ -890,14 +1962,16 @@ namespace brep
{
assert (!node_id.empty ());
- optional<gq_built_result> br (
- gq_built_result (gh_to_conclusion (rs, sd.warning_success),
- circle (rs) + ' ' + ucase (to_string (rs)),
- move (summary)));
+ // Let unlikely invalid_argument propagate (see above).
+ //
+ gq_built_result br (
+ make_built_result (rs, sd.warning_success, move (summary)));
check_run cr;
cr.name = name; // For display purposes only.
+ // Let unlikely invalid_argument propagate (see above).
+ //
if (gq_update_check_run (error,
cr,
iat->token,
@@ -907,194 +1981,73 @@ namespace brep
build_state::built,
move (br)))
{
+ assert (cr.state == build_state::built);
return cr;
}
else
return nullopt;
};
- // Synthetic merge check run node ID. Empty until created on the first
- // call or retrieved from service data on subsequent calls.
+ // (Re)create the synthetic conclusion check run first in order to convert
+ // a potentially completed check suite to building as early as possible.
//
- string merge_node_id;
-
- // True if this is the first call (or the merge commit couldn't be created
- // on the first call, in which case we just re-try by treating it as a
- // first call).
+ // Note that there is a window between receipt of a check_suite or
+ // pull_request event and the first bot/worker asking for a task, which
+ // could be substantial. We could probably (also) try to (re)create the
+ // conclusion checkrun in the webhook handler. @@ Maybe/later.
//
- bool first (!sd.merge_node_id);
+ string conclusion_node_id; // Conclusion check run node ID.
- // If this is the first call, (re)create the synthetic merge check run as
- // soon as possible to make sure the previous check suite, if any, is no
- // longer completed.
- //
- // Note that there is still a window between receipt of the pull_request
- // event and the first bot/worker asking for a task, which could be
- // substantial. We could probably (also) try to (re)create the merge
- // checkrun in the webhook. @@ Maybe/later.
- //
- if (first)
+ if (!sd.conclusion_node_id)
{
- if (auto cr = create_synthetic_cr (merge_check_run_name))
+ if (auto cr = create_synthetic_cr (conclusion_check_run_name))
{
l3 ([&]{trace << "created check_run { " << *cr << " }";});
- merge_node_id = move (*cr->node_id);
+ conclusion_node_id = move (*cr->node_id);
}
- else
- return make_iat_updater (); // Try again on the next call.
}
- else
- merge_node_id = *sd.merge_node_id;
- // Start/check PR mergeability.
- //
- optional<string> mc (
- gq_pull_request_mergeable (error, iat->token, ts.id)); // Merge commit.
+ const string& effective_conclusion_node_id (
+ sd.conclusion_node_id
+ ? *sd.conclusion_node_id
+ : conclusion_node_id);
- if (!mc || mc->empty ())
+ // Load the CI tenant if the conclusion check run was created.
+ //
+ if (!effective_conclusion_node_id.empty ())
{
- if (!mc) // No merge commit yet.
- {
- // If this is a subsequent notification and there is no merge commit,
- // then there is nothing to do.
- //
- if (!first)
- return make_iat_updater ();
+ string ru; // Repository URL.
- // Fall through to update service data.
- }
- else // Not mergeable.
+ // CI the test merge commit for remote PRs and the head commit for
+ // everything else (branch push or local PRs).
+ //
+ if (sd.kind == service_data::remote)
{
- // If the commit is not mergeable, cancel the CI request and fail the
- // merge check run.
+ // E.g. #pull/28/merge@1b6c9a361086ed93e6f1e67189e82d52de91c49b
//
- // Note that it feels like in this case we don't really need to create a
- // failed synthetic conclusion check run since the PR cannot be merged
- // anyway.
-
- if (auto cr = update_synthetic_cr (
- merge_node_id,
- merge_check_run_name,
- result_status::error,
- "GitHub is unable to create test merge commit"))
- {
- l3 ([&]{trace << "updated check_run { " << *cr << " }";});
-
- // Cancel the CI request.
- //
- // Ignore failure because this CI request may have been cancelled
- // elsewhere due to an update to the PR base or head branches.
- //
- if (!cancel (error, warn, &trace, *build_db_, ts.type, ts.id))
- l3 ([&]{trace << "CI for PR " << ts.id << " already cancelled";});
-
- return nullptr; // No need to update service data in this case.
- }
- else
- {
- // Don't cancel the CI request if the merge check run update failed
- // so that we can try again on the next call.
-
- if (!first)
- return make_iat_updater ();
-
- // Fall through to update service data.
- }
+ ru = sd.repository_clone_url + "#pull/" + to_string (*sd.pr_number) +
+ "/merge@" + sd.check_sha;
}
+ else
+ ru = sd.repository_clone_url + '#' + sd.check_sha;
- // This is a first notification, so record the merge check run in the
- // service data.
+ // Let unlikely invalid_argument propagate (see above).
//
- return [&error,
- iat = move (new_iat),
- mni = move (merge_node_id)] (const tenant_service& ts)
- -> optional<string>
- {
- // NOTE: this lambda may be called repeatedly (e.g., due to
- // transaction being aborted) and so should not move out of its
- // captures.
-
- service_data sd;
- try
- {
- sd = service_data (*ts.data);
- }
- catch (const invalid_argument& e)
- {
- error << "failed to parse service data: " << e;
- return nullopt;
- }
+ repository_location rl (move (ru), repository_type::git);
- if (iat)
- sd.installation_access = *iat;
-
- sd.merge_node_id = mni;
-
- return sd.json ();
- };
- }
-
- // If we are here, then it means we have a merge commit that we can load.
- //
- // Note that this can still be the first call (first=true).
- //
-
- // As a first step, (re)create the synthetic conclusion check run and then
- // change the merge check run state to success. Do it in this order so
- // that the check suite does not become completed.
-
- // Synthetic conclusion check run node ID. Empty until created on the
- // "second" call or retrieved from service data on subsequent calls.
- //
- string conclusion_node_id;
-
- // True if this is the first call after the merge commit became available,
- // which we will call the "second" call (or we couldn't create the
- // conclusion check run on the first such call, in which case we just
- // re-try by treating it as a "second" call).
- //
- bool second (!sd.conclusion_node_id);
-
- if (second)
- {
- if (auto cr = create_synthetic_cr (conclusion_check_run_name))
- {
- l3 ([&]{trace << "created check_run { " << *cr << " }";});
-
- conclusion_node_id = move (*cr->node_id);
- }
- }
- else
- conclusion_node_id = *sd.conclusion_node_id;
-
- if (!conclusion_node_id.empty ()) // Conclusion check run was created.
- {
- // Update merge check run to successful.
- //
- if (auto cr = update_synthetic_cr (merge_node_id,
- merge_check_run_name,
- result_status::success,
- "GitHub created test merge commit"))
+ try
{
- l3 ([&]{trace << "updated check_run { " << *cr << " }";});
-
- // Load the CI request.
- //
- // Example repository URL fragment:
- //
- // #pull/28/merge@1b6c9a361086ed93e6f1e67189e82d52de91c49b
- //
- repository_location rl (*sd.repository_clone_url + "#pull/" +
- to_string (*sd.pr_number) + "/merge@" + *mc,
- repository_type::git);
-
- optional<start_result> r (
- load (error, warn, &trace, *build_db_, move (ts), rl));
+ optional<start_result> r (load (error, warn, verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ move (ts),
+ move (rl)));
if (!r || r->status != 200)
{
- if (auto cr = update_synthetic_cr (conclusion_node_id,
+ // Let unlikely invalid_argument propagate (see above).
+ //
+ if (auto cr = update_synthetic_cr (effective_conclusion_node_id,
conclusion_check_run_name,
result_status::error,
to_check_run_summary (r)))
@@ -1104,29 +2057,40 @@ namespace brep
else
{
// Nothing really we can do in this case since we will not receive
- // any further notifications.
+ // any further notifications. Log the error as a last resort.
+
+ error << "failed to load CI tenant " << ts.id
+ << " and unable to update conclusion";
}
return nullptr; // No need to update service data in this case.
}
}
- else
+ catch (const runtime_error& e) // Database retries exhausted.
{
- // Don't load the CI request if the merge check run update failed so
- // that we can try again on the next call.
+ error << "failed to load CI tenant " << ts.id << ": " << e.what ();
+
+ // Fall through to retry on next call.
}
}
+ if (!new_iat && conclusion_node_id.empty ())
+ return nullptr; // Nothing to save (but potentially retry on next call).
+
return [&error,
+ tenant_id,
iat = move (new_iat),
- mni = (first ? move (merge_node_id) : string ()),
- cni = (second ? move (conclusion_node_id) : string ())]
- (const tenant_service& ts) -> optional<string>
+ cni = move (conclusion_node_id)]
+ (const string& ti,
+ const tenant_service& ts) -> optional<string>
{
// NOTE: this lambda may be called repeatedly (e.g., due to
// transaction being aborted) and so should not move out of its
// captures.
+ if (tenant_id != ti)
+ return nullopt; // Do nothing if the tenant has been replaced.
+
service_data sd;
try
{
@@ -1141,15 +2105,34 @@ namespace brep
if (iat)
sd.installation_access = *iat;
- if (!mni.empty ())
- sd.merge_node_id = mni;
-
if (!cni.empty ())
sd.conclusion_node_id = cni;
return sd.json ();
};
}
+ catch (const std::exception& e)
+ {
+ NOTIFICATION_DIAG (log_writer);
+ error << "CI tenant " << ts.id << ": unhandled exception: " << e.what ();
+
+ // Cancel the tenant otherwise we could end up in an infinite loop (see
+ // top of function).
+ //
+ try
+ {
+ if (cancel (error, warn, verb_ ? &trace : nullptr,
+ *build_db_, retry_, ts.type, ts.id))
+ l3 ([&]{trace << "canceled CI tenant " << ts.id;});
+ }
+ catch (const runtime_error& e) // Database retries exhausted.
+ {
+ l3 ([&]{trace << "failed to cancel CI tenant " << ts.id
+ << ": " << e.what ();});
+ }
+
+ return nullptr;
+ }
// Build state change notifications (see tenant-services.hxx for
// background). Mapping our state transitions to GitHub pose multiple
@@ -1160,7 +2143,9 @@ namespace brep
// them when notifying GitHub. The first is not important (we expect the
// state to go back to building shortly). The second should normally not
// happen and would mean that a completed check suite may go back on its
- // conclusion (which would be pretty confusing for the user).
+ // conclusion (which would be pretty confusing for the user). Note that
+ // the ->queued state transition of a check run rebuild triggered by
+ // us is handled directly in handle_check_run_rerequest().
//
// So, for GitHub notifications, we only have the following linear
// transition sequence:
@@ -1237,13 +2222,17 @@ namespace brep
// if we have node_id, then we update, otherwise, we create (potentially
// overriding the check run created previously).
//
- function<optional<string> (const tenant_service&)> ci_github::
- build_queued (const tenant_service& ts,
+ function<optional<string> (const string&, const tenant_service&)> ci_github::
+ build_queued (const string& tenant_id,
+ const tenant_service& ts,
const vector<build>& builds,
optional<build_state> istate,
const build_queued_hints& hs,
const diag_epilogue& log_writer) const noexcept
+ try
{
+ // NOTE: this function is noexcept and should not throw.
+
NOTIFICATION_DIAG (log_writer);
service_data sd;
@@ -1257,6 +2246,13 @@ namespace brep
return nullptr;
}
+ // Ignore attempts to add new builds to a completed check suite. This can
+ // happen, for example, if a new build configuration is added before
+ // the tenant is archived.
+ //
+ if (sd.completed)
+ return nullptr;
+
// The builds for which we will be creating check runs.
//
vector<reference_wrapper<const build>> bs;
@@ -1266,7 +2262,7 @@ namespace brep
//
for (const build& b: builds)
{
- string bid (gh_check_run_name (b)); // Full build ID.
+ string bid (gh_check_run_name (b)); // Full build id.
if (const check_run* scr = sd.find_check_run (bid))
{
@@ -1288,6 +2284,8 @@ namespace brep
else
{
// Ignore interrupted.
+ //
+ assert (*istate == build_state::building);
}
}
else
@@ -1296,11 +2294,14 @@ namespace brep
//
bs.push_back (b);
- crs.emplace_back (move (bid),
- gh_check_run_name (b, &hs),
- nullopt, /* node_id */
- build_state::queued,
- false /* state_synced */);
+ crs.push_back (
+ check_run {move (bid),
+ gh_check_run_name (b, &hs),
+ nullopt, /* node_id */
+ build_state::queued,
+ false /* state_synced */,
+ nullopt /* status */,
+ nullopt /* details_url */});
}
}
@@ -1314,7 +2315,7 @@ namespace brep
if (system_clock::now () > sd.installation_access.expires_at)
{
- if (optional<string> jwt = generate_jwt (trace, error))
+ if (optional<string> jwt = generate_jwt (sd.app_id, trace, error))
{
new_iat = obtain_installation_access_token (sd.installation_id,
move (*jwt),
@@ -1332,31 +2333,39 @@ namespace brep
//
if (iat != nullptr)
{
- // Create a check_run for each build.
+ // Create a check_run for each build as a single request.
+ //
+ // Let unlikely invalid_argument propagate.
//
if (gq_create_check_runs (error,
crs,
iat->token,
- sd.repository_node_id, sd.report_sha,
- build_state::queued))
+ sd.repository_node_id, sd.report_sha))
{
for (const check_run& cr: crs)
{
+ // We can only create a check run in the queued state.
+ //
assert (cr.state == build_state::queued);
l3 ([&]{trace << "created check_run { " << cr << " }";});
}
}
}
- return [bs = move (bs),
+ return [tenant_id,
+ bs = move (bs),
iat = move (new_iat),
crs = move (crs),
error = move (error),
- warn = move (warn)] (const tenant_service& ts) -> optional<string>
+ warn = move (warn)] (const string& ti,
+ const tenant_service& ts) -> optional<string>
{
// NOTE: this lambda may be called repeatedly (e.g., due to transaction
// being aborted) and so should not move out of its captures.
+ if (tenant_id != ti)
+ return nullopt; // Do nothing if the tenant has been replaced.
+
service_data sd;
try
{
@@ -1396,12 +2405,24 @@ namespace brep
return sd.json ();
};
}
+ catch (const std::exception& e)
+ {
+ NOTIFICATION_DIAG (log_writer);
+
+ error << "CI tenant " << ts.id << ": unhandled exception: " << e.what ();
- function<optional<string> (const tenant_service&)> ci_github::
- build_building (const tenant_service& ts,
+ return nullptr;
+ }
+
+ function<optional<string> (const string&, const tenant_service&)> ci_github::
+ build_building (const string& tenant_id,
+ const tenant_service& ts,
const build& b,
const diag_epilogue& log_writer) const noexcept
+ try
{
+ // NOTE: this function is noexcept and should not throw.
+
NOTIFICATION_DIAG (log_writer);
service_data sd;
@@ -1415,14 +2436,19 @@ namespace brep
return nullptr;
}
+ // Similar to build_queued(), ignore attempts to add new builds to a
+ // completed check suite.
+ //
+ if (sd.completed)
+ return nullptr;
+
optional<check_run> cr; // Updated check run.
- string bid (gh_check_run_name (b)); // Full Build ID.
+ string bid (gh_check_run_name (b)); // Full build id.
if (check_run* scr = sd.find_check_run (bid)) // Stored check run.
{
// Update the check run if it exists on GitHub and the queued
- // notification succeeded and updated the service data, otherwise do
- // nothing.
+ // notification updated the service data, otherwise do nothing.
//
if (scr->state == build_state::queued)
{
@@ -1454,7 +2480,7 @@ namespace brep
if (system_clock::now () > sd.installation_access.expires_at)
{
- if (optional<string> jwt = generate_jwt (trace, error))
+ if (optional<string> jwt = generate_jwt (sd.app_id, trace, error))
{
new_iat = obtain_installation_access_token (sd.installation_id,
move (*jwt),
@@ -1472,6 +2498,8 @@ namespace brep
//
if (iat != nullptr)
{
+ // Let unlikely invalid_argument propagate.
+ //
if (gq_update_check_run (error,
*cr,
iat->token,
@@ -1487,24 +2515,27 @@ namespace brep
if (cr->state == build_state::built)
{
warn << "check run " << bid << ": already in built state on GitHub";
-
return nullptr;
}
assert (cr->state == build_state::building);
-
l3 ([&]{trace << "updated check_run { " << *cr << " }";});
}
}
- return [iat = move (new_iat),
+ return [tenant_id,
+ iat = move (new_iat),
cr = move (*cr),
error = move (error),
- warn = move (warn)] (const tenant_service& ts) -> optional<string>
+ warn = move (warn)] (const string& ti,
+ const tenant_service& ts) -> optional<string>
{
// NOTE: this lambda may be called repeatedly (e.g., due to transaction
// being aborted) and so should not move out of its captures.
+ if (tenant_id != ti)
+ return nullopt; // Do nothing if the tenant has been replaced.
+
service_data sd;
try
{
@@ -1539,14 +2570,31 @@ namespace brep
return sd.json ();
};
}
+ catch (const std::exception& e)
+ {
+ NOTIFICATION_DIAG (log_writer);
+
+ string bid (gh_check_run_name (b)); // Full build id.
- function<optional<string> (const tenant_service&)> ci_github::
- build_built (const tenant_service& ts,
+ error << "check run " << bid << ": unhandled exception: " << e.what();
+
+ return nullptr;
+ }
+
+ function<optional<string> (const string&, const tenant_service&)> ci_github::
+ build_built (const string& tenant_id,
+ const tenant_service& ts,
const build& b,
const diag_epilogue& log_writer) const noexcept
+ try
{
+ // NOTE: this function is noexcept and should not throw.
+
NOTIFICATION_DIAG (log_writer);
+ // @@ TODO Include ts.id in diagnostics? Check run build ids alone seem
+ // kind of meaningless. Log lines get pretty long this way however.
+
service_data sd;
try
{
@@ -1558,13 +2606,23 @@ namespace brep
return nullptr;
}
- // Absent if have any unbuilt check runs.
+ // Similar to build_queued(), ignore attempts to add new builds to a
+ // completed check suite.
+ //
+ if (sd.completed)
+ return nullptr;
+
+ // Here we need to update the state of this check run and, if there are no
+ // more unbuilt ones, update the synthetic conclusion check run and mark
+ // the check suite as completed.
+ //
+ // Absent means we still have unbuilt check runs.
//
optional<result_status> conclusion (*b.status);
check_run cr; // Updated check run.
{
- string bid (gh_check_run_name (b)); // Full Build ID.
+ string bid (gh_check_run_name (b)); // Full build id.
optional<check_run> scr;
for (check_run& cr: sd.check_runs)
@@ -1578,6 +2636,8 @@ namespace brep
{
if (cr.state == build_state::built)
{
+ assert (cr.status);
+
if (conclusion)
*conclusion |= *cr.status;
}
@@ -1602,8 +2662,6 @@ namespace brep
if (scr->state == build_state::built)
return nullptr;
- // Don't move from scr because we search sd.check_runs below.
- //
cr = move (*scr);
}
else
@@ -1611,6 +2669,9 @@ namespace brep
warn << "check run " << bid << ": out of order built notification; "
<< "no check run state in service data";
+ // Note that we have no hints here and so have to use the full build
+ // id for name.
+ //
cr.build_id = move (bid);
cr.name = cr.build_id;
}
@@ -1625,7 +2686,7 @@ namespace brep
if (system_clock::now () > sd.installation_access.expires_at)
{
- if (optional<string> jwt = generate_jwt (trace, error))
+ if (optional<string> jwt = generate_jwt (sd.app_id, trace, error))
{
new_iat = obtain_installation_access_token (sd.installation_id,
move (*jwt),
@@ -1637,6 +2698,8 @@ namespace brep
else
iat = &sd.installation_access;
+ bool completed (false);
+
// Note: we treat the failure to obtain the installation access token the
// same as the failure to notify GitHub (state is updated but not marked
// synced).
@@ -1650,6 +2713,11 @@ namespace brep
{
using namespace web::xhtml;
+ // Note: let all serialization exceptions propagate. The XML
+ // serialization code can throw bad_alloc or xml::serialization in
+ // case of I/O failures, but we're serializing to a string stream so
+ // both exceptions are unlikely.
+ //
ostringstream os;
xml::serializer s (os, "check_run_summary");
@@ -1740,14 +2808,13 @@ namespace brep
sm = os.str ();
}
- gq_built_result br (gh_to_conclusion (*b.status, sd.warning_success),
- circle (*b.status) + ' ' +
- ucase (to_string (*b.status)),
- move (sm));
+ gq_built_result br (
+ make_built_result (*b.status, sd.warning_success, move (sm)));
if (cr.node_id)
{
- // Update existing check run to built.
+ // Update existing check run to built. Let unlikely invalid_argument
+ // propagate.
//
if (gq_update_check_run (error,
cr,
@@ -1759,16 +2826,15 @@ namespace brep
move (br)))
{
assert (cr.state == build_state::built);
-
l3 ([&]{trace << "updated check_run { " << cr << " }";});
}
}
else
{
- // Create new check run.
+ // Create new check run. Let unlikely invalid_argument propagate.
//
// Note that we don't have build hints so will be creating this check
- // run with the full build ID as name. In the unlikely event that an
+ // run with the full build id as name. In the unlikely event that an
// out of order build_queued() were to run before we've saved this
// check run to the service data it will create another check run with
// the shortened name which will never get to the built state.
@@ -1783,20 +2849,14 @@ namespace brep
move (br)))
{
assert (cr.state == build_state::built);
-
l3 ([&]{trace << "created check_run { " << cr << " }";});
}
}
- if (cr.state == build_state::built)
+ if (cr.state_synced)
{
- // Check run was created/updated successfully to built.
- //
- // @@ TMP Feels like this should also be done inside
- // gq_{create,update}_check_run() -- where cr.state is set if the
- // create/update succeeds -- but I think we didn't want to pass a
- // result_status into a gq_ function because converting to a GitHub
- // conclusion/title/summary is reasonably complicated.
+ // Check run was created/updated successfully to built (with
+ // status we specified).
//
cr.status = b.status;
@@ -1806,14 +2866,11 @@ namespace brep
{
assert (sd.conclusion_node_id);
- // Update the conclusion check run with success.
- //
result_status rs (*conclusion);
- optional<gq_built_result> br (
- gq_built_result (gh_to_conclusion (rs, sd.warning_success),
- circle (rs) + ' ' + ucase (to_string (rs)),
- "All configurations are built"));
+ gq_built_result br (
+ make_built_result (rs, sd.warning_success,
+ "All configurations are built"));
check_run cr;
@@ -1822,6 +2879,8 @@ namespace brep
cr.node_id = *sd.conclusion_node_id;
cr.name = conclusion_check_run_name;
+ // Let unlikely invalid_argument propagate.
+ //
if (gq_update_check_run (error,
cr,
iat->token,
@@ -1831,28 +2890,37 @@ namespace brep
build_state::built,
move (br)))
{
- l3 ([&]{trace << "updated check_run { " << cr << " }";});
+ assert (cr.state == build_state::built);
+ l3 ([&]{trace << "updated conclusion check_run { " << cr << " }";});
}
else
{
// Nothing we can do here except log the error.
//
- error << "check suite " << ts.id
+ error << "tenant_service id " << ts.id
<< ": unable to update conclusion check run "
<< *sd.conclusion_node_id;
}
+
+ completed = true;
}
}
}
- return [iat = move (new_iat),
+ return [tenant_id,
+ iat = move (new_iat),
cr = move (cr),
+ completed = completed,
error = move (error),
- warn = move (warn)] (const tenant_service& ts) -> optional<string>
+ warn = move (warn)] (const string& ti,
+ const tenant_service& ts) -> optional<string>
{
// NOTE: this lambda may be called repeatedly (e.g., due to transaction
// being aborted) and so should not move out of its captures.
+ if (tenant_id != ti)
+ return nullopt; // Do nothing if the tenant has been replaced.
+
service_data sd;
try
{
@@ -1867,63 +2935,77 @@ namespace brep
if (iat)
sd.installation_access = *iat;
- if (check_run* scr = sd.find_check_run (cr.build_id))
+ // Only update the check_run state in service data if it matches the
+ // state (specifically, status) on GitHub.
+ //
+ if (cr.state_synced)
{
- // This will most commonly generate a duplicate warning (see above).
- // We could save the old state and only warn if it differs but let's
- // not complicate things for now.
- //
+ if (check_run* scr = sd.find_check_run (cr.build_id))
+ {
+ // This will most commonly generate a duplicate warning (see above).
+ // We could save the old state and only warn if it differs but let's
+ // not complicate things for now.
+ //
#if 0
- if (scr->state != build_state::building)
+ if (scr->state != build_state::building)
+ {
+ warn << "check run " << cr.build_id << ": out of order built "
+ << "notification service data update; existing state: "
+ << scr->state_string ();
+ }
+#endif
+ *scr = cr; // Note: also updates node id if created.
+ }
+ else
+ sd.check_runs.push_back (cr);
+
+ if (bool c = completed)
{
- warn << "check run " << cr.build_id << ": out of order built "
- << "notification service data update; existing state: "
- << scr->state_string ();
+ // Note that this can be racy: while we calculated the completed
+ // value based on the snapshot of the service data, it could have
+ // been changed (e.g., by handle_check_run_rerequest()). So we
+ // re-calculate it based on the check run states and only update if
+ // it matches. Otherwise, we log an error.
+ //
+ for (const check_run& scr: sd.check_runs)
+ {
+ if (scr.state != build_state::built)
+ {
+ string sid (sd.repository_node_id + ':' + sd.report_sha);
+
+ error << "tenant_service id " << sid
+ << ": out of order built notification service data update; "
+ << "check suite is no longer complete";
+
+ c = false;
+ break;
+ }
+ }
+
+ if (c)
+ sd.completed = true;
}
-#endif
- *scr = cr;
}
- else
- sd.check_runs.push_back (cr);
return sd.json ();
};
}
-
- bool ci_github::
- create_pull_request_ci (const basic_mark& error,
- const basic_mark& warn,
- const basic_mark& trace,
- const service_data& sd,
- const string& nid,
- bool cf) const
+ catch (const std::exception& e)
{
- // Cancel the existing CI request if asked to do so. Ignore failure
- // because the request may already have been cancelled for other reasons.
- //
- if (cf)
- {
- if (!cancel (error, warn, &trace, *build_db_, "ci-github", nid))
- l3 ([&] {trace << "unable to cancel CI for pull request " << nid;});
- }
+ NOTIFICATION_DIAG (log_writer);
- // Create a new unloaded CI request.
- //
- tenant_service ts (nid, "ci-github", sd.json ());
+ string bid (gh_check_run_name (b)); // Full build id.
- // Note: use no delay since we need to (re)create the synthetic merge
- // check run as soon as possible.
- //
- return create (error, warn, &trace,
- *build_db_, move (ts),
- chrono::seconds (30) /* interval */,
- chrono::seconds (0) /* delay */)
- .has_value ();
+ error << "check run " << bid << ": unhandled exception: " << e.what();
+
+ return nullptr;
}
string ci_github::
details_url (const build& b) const
{
+ // This code is based on build_force_url() in mod/build.cxx.
+ //
return options_->host () +
"/@" + b.tenant +
"?builds=" + mime_url_encode (b.package_name.string ()) +
@@ -1931,23 +3013,130 @@ namespace brep
"&tg=" + mime_url_encode (b.target.string ()) +
"&tc=" + mime_url_encode (b.target_config_name) +
"&pc=" + mime_url_encode (b.package_config_name) +
- "&th=" + mime_url_encode (b.toolchain_version.string ());
+ "&th=" + mime_url_encode (b.toolchain_name) + '-' +
+ b.toolchain_version.string ();
+ }
+
+ static optional<build_id>
+ parse_details_url (const string& details_url)
+ try
+ {
+ // See details_url() above for an idea of what the URL looks like.
+
+ url u (details_url);
+
+ build_id r;
+
+ // Extract the tenant from the URL path.
+ //
+ // Example path: @d2586f57-21dc-40b7-beb2-6517ad7917dd
+ //
+ if (!u.path || u.path->size () != 37 || (*u.path)[0] != '@')
+ return nullopt;
+
+ r.package.tenant = u.path->substr (1);
+
+ // Extract the rest of the build_id members from the URL query.
+ //
+ if (!u.query)
+ return nullopt;
+
+ bool pn (false), pv (false), tg (false), tc (false), pc (false),
+ th (false);
+
+ // This URL query parsing code is based on
+ // web::apache::request::parse_url_parameters().
+ //
+ for (const char* qp (u.query->c_str ()); qp != nullptr; )
+ {
+ const char* vp (strchr (qp, '='));
+ const char* ep (strchr (qp, '&'));
+
+ if (vp == nullptr || (ep != nullptr && ep < vp))
+ return nullopt; // Missing value.
+
+ string n (mime_url_decode (qp, vp)); // Name.
+
+ ++vp; // Skip '='
+
+ const char* ve (ep != nullptr ? ep : vp + strlen (vp)); // Value end.
+
+ // Get the value as-is or URL-decode it.
+ //
+ auto rawval = [vp, ve] () { return string (vp, ve); };
+ auto decval = [vp, ve] () { return mime_url_decode (vp, ve); };
+
+ auto make_version = [] (string&& v)
+ {
+ return canonical_version (brep::version (move (v)));
+ };
+
+ auto c = [&n] (bool& b, const char* s)
+ {
+ return n == s ? (b = true) : false;
+ };
+
+ if (c (pn, "builds")) r.package.name = package_name (decval ());
+ else if (c (pv, "pv")) r.package.version = make_version (rawval ());
+ else if (c (tg, "tg")) r.target = target_triplet (decval ());
+ else if (c (tc, "tc")) r.target_config_name = decval ();
+ else if (c (pc, "pc")) r.package_config_name = decval ();
+ else if (c (th, "th"))
+ {
+ // Toolchain name and version. E.g. "public-0.17.0"
+
+ string v (rawval ());
+
+ // Note: parsing code based on mod/mod-builds.cxx.
+ //
+ size_t p (v.find_first_of ('-'));
+ if (p >= v.size () - 1)
+ return nullopt; // Invalid format.
+
+ r.toolchain_name = v.substr (0, p);
+ r.toolchain_version = make_version (v.substr (p + 1));
+ }
+
+ qp = ep != nullptr ? ep + 1 : nullptr;
+ }
+
+ if (!pn || !pv || !tg || !tc || !pc || !th)
+ return nullopt; // Fail if any query parameters are absent.
+
+ return r;
+ }
+ catch (const invalid_argument&) // Invalid url, brep::version, etc.
+ {
+ return nullopt;
}
optional<string> ci_github::
- generate_jwt (const basic_mark& trace,
+ generate_jwt (const string& app_id,
+ const basic_mark& trace,
const basic_mark& error) const
{
string jwt;
try
{
+ // Look up the private key path for the app id and fail if not found.
+ //
+ const map<string, dir_path>& pks (
+ options_->ci_github_app_id_private_key ());
+
+ auto pk (pks.find (app_id));
+ if (pk == pks.end ())
+ {
+ error << "unable to generate JWT: "
+ << "no private key configured for app id " << app_id;
+ return nullopt;
+ }
+
// Set token's "issued at" time 60 seconds in the past to combat clock
// drift (as recommended by GitHub).
//
jwt = brep::generate_jwt (
*options_,
- options_->ci_github_app_private_key (),
- to_string (options_->ci_github_app_id ()),
+ pk->second, app_id,
chrono::seconds (options_->ci_github_jwt_validity_period ()),
chrono::seconds (60));
@@ -2003,7 +3192,7 @@ namespace brep
// example.
//
optional<gh_installation_access_token> ci_github::
- obtain_installation_access_token (uint64_t iid,
+ obtain_installation_access_token (const string& iid,
string jwt,
const basic_mark& error) const
{
@@ -2012,7 +3201,7 @@ namespace brep
{
// API endpoint.
//
- string ep ("app/installations/" + to_string (iid) + "/access_tokens");
+ string ep ("app/installations/" + iid + "/access_tokens");
uint16_t sc (
github_post (iat, ep, strings {"Authorization: Bearer " + jwt}));
@@ -2038,6 +3227,8 @@ namespace brep
//
iat.expires_at -= chrono::minutes (5);
}
+ // gh_installation_access_token (via github_post())
+ //
catch (const json::invalid_json_input& e)
{
// Note: e.name is the GitHub API endpoint.
@@ -2047,12 +3238,12 @@ namespace brep
<< e.position << ", error: " << e;
return nullopt;
}
- catch (const invalid_argument& e)
+ catch (const invalid_argument& e) // github_post()
{
error << "malformed header(s) in response: " << e;
return nullopt;
}
- catch (const system_error& e)
+ catch (const system_error& e) // github_post()
{
error << "unable to get installation access token (errno=" << e.code ()
<< "): " << e.what ();
diff --git a/mod/mod-ci-github.hxx b/mod/mod-ci-github.hxx
index 489aac7..1e5f24f 100644
--- a/mod/mod-ci-github.hxx
+++ b/mod/mod-ci-github.hxx
@@ -42,36 +42,53 @@ namespace brep
virtual const cli::options&
cli_options () const {return options::ci_github::description ();}
- virtual function<optional<string> (const tenant_service&)>
- build_unloaded (tenant_service&&,
+ virtual function<optional<string> (const string&, const tenant_service&)>
+ build_unloaded (const string& tenant_id,
+ tenant_service&&,
const diag_epilogue& log_writer) const noexcept override;
- virtual function<optional<string> (const tenant_service&)>
- build_queued (const tenant_service&,
+ function<optional<string> (const string&, const tenant_service&)>
+ build_unloaded_pre_check (tenant_service&&,
+ service_data&&,
+ const diag_epilogue&) const noexcept;
+
+ function<optional<string> (const string&, const tenant_service&)>
+ build_unloaded_load (const string& tenant_id,
+ tenant_service&&,
+ service_data&&,
+ const diag_epilogue&) const noexcept;
+
+ virtual function<optional<string> (const string&, const tenant_service&)>
+ build_queued (const string& tenant_id,
+ const tenant_service&,
const vector<build>&,
optional<build_state> initial_state,
const build_queued_hints&,
const diag_epilogue& log_writer) const noexcept override;
- virtual function<optional<string> (const tenant_service&)>
- build_building (const tenant_service&, const build&,
+ virtual function<optional<string> (const string&, const tenant_service&)>
+ build_building (const string& tenant_id,
+ const tenant_service&,
+ const build&,
const diag_epilogue& log_writer) const noexcept override;
- virtual function<optional<string> (const tenant_service&)>
- build_built (const tenant_service&, const build&,
+ virtual function<optional<string> (const string&, const tenant_service&)>
+ build_built (const string& tenant_id,
+ const tenant_service&,
+ const build&,
const diag_epilogue& log_writer) const noexcept override;
private:
virtual void
init (cli::scanner&);
- // Handle the check_suite event `requested` and `rerequested` actions.
+ // Handle push events (branch push).
//
// If warning_success is true, then map result_status::warning to SUCCESS
// and to FAILURE otherwise.
//
bool
- handle_check_suite_request (gh_check_suite_event, bool warning_success);
+ handle_branch_push (gh_push_event, bool warning_success);
// Handle the pull_request event `opened` and `synchronize` actions.
//
@@ -81,24 +98,29 @@ namespace brep
bool
handle_pull_request (gh_pull_request_event, bool warning_success);
- // Create an unloaded CI request for a pull request. If `cancel_first` is
- // true, cancel its existing CI request first.
+ // Handle the check_suite event `rerequested` action.
+ //
+ // If warning_success is true, then map result_status::warning to SUCCESS
+ // and to FAILURE otherwise.
+ //
+ bool
+ handle_check_suite_rerequest (gh_check_suite_event, bool warning_success);
+
+ // Handle the check_suite event `completed` action.
//
- // Return true if an unloaded CI request was created. Ignore failure to
- // cancel because the CI request may already have been cancelled for other
- // reasons.
+ // If warning_success is true, then map result_status::warning to SUCCESS
+ // and to FAILURE otherwise.
+ //
+ bool
+ handle_check_suite_completed (gh_check_suite_event, bool warning_success);
+
+ // Handle the check_run event `rerequested` action.
//
- // After this call we will start getting the build_unloaded()
- // notifications until (1) we load the request, (2) we cancel it, or (3)
- // it gets archived after some timeout.
+ // If warning_success is true, then map result_status::warning to SUCCESS
+ // and to FAILURE otherwise.
//
bool
- create_pull_request_ci (const basic_mark& error,
- const basic_mark& warn,
- const basic_mark& trace,
- const service_data&,
- const string& pull_request_node_id,
- bool cancel_first) const;
+ handle_check_run_rerequest (const gh_check_run_event&, bool warning_success);
// Build a check run details_url for a build.
//
@@ -106,12 +128,16 @@ namespace brep
details_url (const build&) const;
optional<string>
- generate_jwt (const basic_mark& trace, const basic_mark& error) const;
+ generate_jwt (const string& app_id,
+ const basic_mark& trace,
+ const basic_mark& error) const;
- // Authenticate to GitHub as an app installation.
+ // Authenticate to GitHub as an app installation. Return the installation
+ // access token (IAT). Issue diagnostics and return nullopt if something
+ // goes wrong.
//
optional<gh_installation_access_token>
- obtain_installation_access_token (uint64_t install_id,
+ obtain_installation_access_token (const string& install_id,
string jwt,
const basic_mark& error) const;
@@ -119,6 +145,8 @@ namespace brep
shared_ptr<options::ci_github> options_;
tenant_service_map& tenant_service_map_;
+
+ string webhook_secret_;
};
}
diff --git a/mod/mod-ci.cxx b/mod/mod-ci.cxx
index 8c47bc4..46fbf6a 100644
--- a/mod/mod-ci.cxx
+++ b/mod/mod-ci.cxx
@@ -422,8 +422,10 @@ handle (request& rq, response& rs)
}
#ifdef BREP_CI_TENANT_SERVICE
-function<optional<string> (const brep::tenant_service&)> brep::ci::
-build_queued (const tenant_service&,
+function<optional<string> (const string& tenant_id,
+ const brep::tenant_service&)> brep::ci::
+build_queued (const string& /*tenant_id*/,
+ const tenant_service&,
const vector<build>& bs,
optional<build_state> initial_state,
const build_queued_hints& hints,
@@ -437,7 +439,8 @@ build_queued (const tenant_service&,
<< hints.single_package_version << ' '
<< hints.single_package_config;});
- return [&bs, initial_state] (const tenant_service& ts)
+ return [&bs, initial_state] (const string& tenant_id,
+ const tenant_service& ts)
{
optional<string> r (ts.data);
@@ -446,6 +449,7 @@ build_queued (const tenant_service&,
string s ((!initial_state
? "queued "
: "queued " + to_string (*initial_state) + ' ') +
+ tenant_id + '/' +
b.package_name.string () + '/' +
b.package_version.string () + '/' +
b.target.string () + '/' +
@@ -467,14 +471,18 @@ build_queued (const tenant_service&,
};
}
-function<optional<string> (const brep::tenant_service&)> brep::ci::
-build_building (const tenant_service&,
+function<optional<string> (const string& tenant_id,
+ const brep::tenant_service&)> brep::ci::
+build_building (const string& /*tenant_id*/,
+ const tenant_service&,
const build& b,
const diag_epilogue&) const noexcept
{
- return [&b] (const tenant_service& ts)
+ return [&b] (const string& tenant_id,
+ const tenant_service& ts)
{
string s ("building " +
+ tenant_id + '/' +
b.package_name.string () + '/' +
b.package_version.string () + '/' +
b.target.string () + '/' +
@@ -487,14 +495,17 @@ build_building (const tenant_service&,
};
}
-function<optional<string> (const brep::tenant_service&)> brep::ci::
-build_built (const tenant_service&,
+function<optional<string> (const string& tenant_id,
+ const brep::tenant_service&)> brep::ci::
+build_built (const string& /*tenant_id*/,
+ const tenant_service&,
const build& b,
const diag_epilogue&) const noexcept
{
- return [&b] (const tenant_service& ts)
+ return [&b] (const string& tenant_id, const tenant_service& ts)
{
string s ("built " +
+ tenant_id + '/' +
b.package_name.string () + '/' +
b.package_version.string () + '/' +
b.target.string () + '/' +
@@ -590,7 +601,10 @@ handle (request& rq, response& rs)
if (tid.empty ())
throw invalid_request (400, "invalid CI request id");
- if (!cancel (error, warn, verb_ ? &trace : nullptr, reason, *build_db_, tid))
+ if (!cancel (error, warn, verb_ ? &trace : nullptr,
+ reason,
+ *build_db_, retry_,
+ tid))
throw invalid_request (400, "unknown CI request id");
// We have all the data, so don't buffer the response content.
diff --git a/mod/mod-ci.hxx b/mod/mod-ci.hxx
index e4a343c..132b5b0 100644
--- a/mod/mod-ci.hxx
+++ b/mod/mod-ci.hxx
@@ -71,26 +71,34 @@ namespace brep
cli_options () const override {return options::ci::description ();}
#ifdef BREP_CI_TENANT_SERVICE
- virtual function<optional<string> (const tenant_service&)>
- build_queued (const tenant_service&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_queued (const string& tenant_id,
+ const tenant_service&,
const vector<build>&,
optional<build_state> initial_state,
const build_queued_hints&,
const diag_epilogue& log_writer) const noexcept override;
- virtual function<optional<string> (const tenant_service&)>
- build_building (const tenant_service&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_building (const string& tenant_id,
+ const tenant_service&,
const build&,
const diag_epilogue& log_writer) const noexcept override;
- virtual function<optional<string> (const tenant_service&)>
- build_built (const tenant_service&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_built (const string& tenant_id,
+ const tenant_service&,
const build&,
const diag_epilogue& log_writer) const noexcept override;
#ifdef BREP_CI_TENANT_SERVICE_UNLOADED
- virtual function<optional<string> (const tenant_service&)>
- build_unloaded (tenant_service&&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_unloaded (const string& tenant_id,
+ tenant_service&&,
const diag_epilogue& log_writer) const noexcept override;
#endif
#endif
diff --git a/mod/mod-package-details.cxx b/mod/mod-package-details.cxx
index 1fb51da..ceb23c5 100644
--- a/mod/mod-package-details.cxx
+++ b/mod/mod-package-details.cxx
@@ -119,7 +119,7 @@ handle (request& rq, response& rs)
throw invalid_request (400, "invalid package name format");
}
- const package_name& name (pkg->name);
+ const package_name& name (pkg->name);
const string ename (mime_url_encode (name.string (), false));
auto url = [&ename] (bool f = false,
@@ -226,8 +226,8 @@ handle (request& rq, response& rs)
}
size_t pkg_count (
- package_db_->query_value<package_count> (
- search_params<package_count> (squery, tenant, name)));
+ package_db_->query_value<package_search_count> (
+ search_params<package_search_count> (squery, tenant, name)));
// Let's disable autofocus in the full page mode since clicking the full or
// more link the user most likely intends to read rather than search, while
@@ -244,8 +244,8 @@ handle (request& rq, response& rs)
search_params<package_search_rank> (squery, tenant, name) +
"ORDER BY rank DESC, version_epoch DESC, "
"version_canonical_upstream DESC, version_canonical_release DESC, "
- "version_revision DESC" +
- "OFFSET" + to_string (page * res_page) +
+ "version_revision DESC" +
+ "OFFSET" + to_string (page * res_page) +
"LIMIT" + to_string (res_page)))
{
shared_ptr<package> p (package_db_->load<package> (pr.id));
@@ -270,8 +270,16 @@ handle (request& rq, response& rs)
//
s << TR_REPOSITORY (rl, root, tenant)
<< TR_DEPENDS (p->dependencies, root, tenant)
- << TR_REQUIRES (p->requirements)
- << ~TBODY
+ << TR_REQUIRES (p->requirements);
+
+ if (options_->reviews_url_specified ())
+ {
+ package_db_->load (*p, p->reviews_section);
+
+ s << TR_REVIEWS_SUMMARY (p->reviews, options_->reviews_url ());
+ }
+
+ s << ~TBODY
<< ~TABLE;
}
s << ~DIV;
diff --git a/mod/mod-package-version-details.cxx b/mod/mod-package-version-details.cxx
index 91923e5..e28310c 100644
--- a/mod/mod-package-version-details.cxx
+++ b/mod/mod-package-version-details.cxx
@@ -528,6 +528,22 @@ handle (request& rq, response& rs)
print_tests (test_dependency_type::examples);
print_tests (test_dependency_type::benchmarks);
+ if (options_->reviews_url_specified ())
+ {
+ package_db_->load (*pkg, pkg->reviews_section);
+
+ const optional<reviews_summary>& rvs (pkg->reviews);
+ const string& u (options_->reviews_url ());
+
+ s << H3 << "Reviews" << ~H3
+ << TABLE(CLASS="proplist", ID="reviews")
+ << TBODY
+ << TR_REVIEWS_COUNTER (review_result::fail, rvs, u)
+ << TR_REVIEWS_COUNTER (review_result::pass, rvs, u)
+ << ~TBODY
+ << ~TABLE;
+ }
+
bool builds (build_db_ != nullptr && pkg->buildable);
if (builds)
diff --git a/mod/mod-repository-root.cxx b/mod/mod-repository-root.cxx
index 99e7219..b0d5e0e 100644
--- a/mod/mod-repository-root.cxx
+++ b/mod/mod-repository-root.cxx
@@ -26,6 +26,7 @@
#include <mod/mod-build-result.hxx>
#include <mod/mod-build-configs.hxx>
#include <mod/mod-package-details.hxx>
+#include <mod/mod-advanced-search.hxx>
#include <mod/mod-repository-details.hxx>
#include <mod/mod-package-version-details.hxx>
@@ -119,6 +120,7 @@ namespace brep
//
tenant_service_map_ (make_shared<tenant_service_map> ()),
packages_ (make_shared<packages> ()),
+ advanced_search_ (make_shared<advanced_search> ()),
package_details_ (make_shared<package_details> ()),
package_version_details_ (make_shared<package_version_details> ()),
repository_details_ (make_shared<repository_details> ()),
@@ -155,6 +157,10 @@ namespace brep
r.initialized_
? r.packages_
: make_shared<packages> (*r.packages_)),
+ advanced_search_ (
+ r.initialized_
+ ? r.advanced_search_
+ : make_shared<advanced_search> (*r.advanced_search_)),
package_details_ (
r.initialized_
? r.package_details_
@@ -231,6 +237,7 @@ namespace brep
{
option_descriptions r (handler::options ());
append (r, packages_->options ());
+ append (r, advanced_search_->options ());
append (r, package_details_->options ());
append (r, package_version_details_->options ());
append (r, repository_details_->options ());
@@ -279,6 +286,7 @@ namespace brep
// Initialize sub-handlers.
//
sub_init (*packages_, "packages");
+ sub_init (*advanced_search_, "advanced_search");
sub_init (*package_details_, "package_details");
sub_init (*package_version_details_, "package_version_details");
sub_init (*repository_details_, "repository_details");
@@ -312,13 +320,15 @@ namespace brep
//
auto verify = [&fail] (const string& v, const char* what)
{
- cstrings vs ({"packages",
- "builds",
- "build-configs",
- "about",
- "submit",
- "ci",
- "ci-github"});
+ cstrings vs ({
+ "packages",
+ "advanced-search",
+ "builds",
+ "build-configs",
+ "about",
+ "submit",
+ "ci",
+ "ci-github"});
if (find (vs.begin (), vs.end (), v) == vs.end ())
fail << what << " value '" << v << "' is invalid";
@@ -472,6 +482,13 @@ namespace brep
return handle ("packages", param);
}
+ else if (func == "advanced-search")
+ {
+ if (handler_ == nullptr)
+ handler_.reset (new advanced_search (*advanced_search_));
+
+ return handle ("advanced_search", param);
+ }
else if (func == "about")
{
if (handler_ == nullptr)
diff --git a/mod/mod-repository-root.hxx b/mod/mod-repository-root.hxx
index 31fde9b..38f6adc 100644
--- a/mod/mod-repository-root.hxx
+++ b/mod/mod-repository-root.hxx
@@ -14,6 +14,7 @@
namespace brep
{
class packages;
+ class advanced_search;
class package_details;
class package_version_details;
class repository_details;
@@ -65,6 +66,7 @@ namespace brep
shared_ptr<tenant_service_map> tenant_service_map_;
shared_ptr<packages> packages_;
+ shared_ptr<advanced_search> advanced_search_;
shared_ptr<package_details> package_details_;
shared_ptr<package_version_details> package_version_details_;
shared_ptr<repository_details> repository_details_;
diff --git a/mod/module.cli b/mod/module.cli
index d716c6e..ba2b986 100644
--- a/mod/module.cli
+++ b/mod/module.cli
@@ -454,6 +454,25 @@ namespace brep
}
};
+ class package_version_metadata
+ {
+ string reviews-url
+ {
+ "<url>",
+ "The base URL for the reviews manifest files. If this option is
+ specified, then the review information is displayed on the package
+ version details page.
+
+ The complete URL is formed by adding the following path to the base:
+
+ \
+ <project>/<package>/<version>/reviews.manifest
+ \
+
+ Note that no separator is added between the base and this path."
+ }
+ };
+
class page
{
web::xhtml::fragment logo
@@ -526,10 +545,20 @@ namespace brep
}
};
+ class advanced_search: package_db,
+ search,
+ page,
+ repository_url,
+ package_version_metadata,
+ handler
+ {
+ };
+
class package_details: package, package_db,
search,
page,
repository_url,
+ package_version_metadata,
handler
{
};
@@ -538,6 +567,7 @@ namespace brep
build, build_db,
page,
repository_url,
+ package_version_metadata,
handler
{
dir_path bindist-root
@@ -617,13 +647,13 @@ namespace brep
build_email_notification,
handler
{
- size_t build-result-request-max-size = 10485760
+ size_t build-result-request-max-size = 15728640
{
"<bytes>",
"The maximum size of the build result manifest accepted. Note that the
HTTP POST request body is cached to retry database transactions in the
face of recoverable failures (deadlock, loss of connection, etc). The
- default is 10M."
+ default is 15M."
}
};
@@ -820,24 +850,20 @@ namespace brep
// GitHub CI-specific options.
//
- size_t ci-github-app-id
+ path ci-github-app-webhook-secret
{
- "<id>",
- "The GitHub App ID. Found in the app's settings on GitHub."
- }
-
- string ci-github-app-webhook-secret
- {
- "<secret>",
+ "<path>",
"The GitHub App's configured webhook secret. If not set, then the
- GitHub CI service is disabled."
+ GitHub CI service is disabled. Note that the path must be absolute.
+ Note: make sure to choose a strong (random) secret."
}
- path ci-github-app-private-key
+ std::map<string, dir_path> ci-github-app-id-private-key
{
- "<path>",
- "The private key used during GitHub API authentication. Created in
- the GitHub App's settings."
+ "<id>=<path>",
+ "The private key used during GitHub API authentication for the
+ specified GitHub App ID. Both vales are found in the GitHub App's
+ settings. Note that the paths must be absolute."
}
uint16_t ci-github-jwt-validity-period = 600
@@ -876,11 +902,11 @@ namespace brep
// Web handler HTTP request parameters.
//
+ // Use parameters long names in the C++ code, short aliases (if present) in
+ // HTTP URL.
+ //
namespace params
{
- // Use parameters long names in the C++ code, short aliases (if present)
- // in HTTP URL.
- //
class packages
{
// Display package search result list starting from this page.
@@ -895,6 +921,44 @@ namespace brep
string q | _;
};
+ class advanced_search
+ {
+ // Display advanced package search result list starting from this page.
+ //
+ uint16_t page | p;
+
+ // Advanced package search filter options.
+ //
+
+ // Package name wildcard. An empty value is treated the same way as *.
+ //
+ // Note that the advanced-search parameter is renamed to '_' by the root
+ // handler (see the request_proxy class for details).
+ //
+ string name | _;
+
+ // Package version. If empty or *, then no version constraint is applied.
+ // Otherwise the package version must match the value exactly.
+ //
+ string version | pv;
+
+ // Package project wildcard. An empty value is treated the same way as *.
+ //
+ string project | pr;
+
+ // Package repositories. If *, then no repository constraint is applied.
+ // Otherwise the package repository canonical name must match the value
+ // exactly.
+ //
+ string repository | rp = "*";
+
+ // Package version reviews. If *, then no reviews-related constraint is
+ // applied. Otherwise the value is supposed to be the one of the
+ // following statuses: reviewed and unreviewed.
+ //
+ string reviews | rv = "*";
+ };
+
class package_details
{
// Display package version search result list starting from this page.
@@ -933,6 +997,13 @@ namespace brep
// Only consider tenants with this interactive build mode.
//
bbot::interactive_mode interactive = bbot::interactive_mode::both;
+
+ // Only consider tenants which have third-party services of any of these
+ // types. The special empty type value denotes tenants without the
+ // associated service. If this parameter is absent, then consider all
+ // tenant types.
+ //
+ vector<string> tenant_service_type | t;
};
class build_result
diff --git a/mod/page.cxx b/mod/page.cxx
index 177fb64..afeea0d 100644
--- a/mod/page.cxx
+++ b/mod/page.cxx
@@ -302,15 +302,26 @@ namespace brep
s << TR(CLASS="project")
<< TH << "project" << ~TH
<< TD
- << SPAN(CLASS="value")
- << A
- << HREF
- << tenant_dir (root_, tenant_) << "?packages="
- << mime_url_encode (project_.string ())
- << ~HREF
- << project_
- << ~A
- << ~SPAN
+ << SPAN(CLASS="value");
+
+ // Note that we currently don't support the advanced package search in the
+ // multi-tenant mode. Thus, we print the project as a plain text in such a
+ // mode, rather than as a link.
+ //
+ if (tenant_.empty ())
+ {
+ s << A
+ << HREF
+ << tenant_dir (root_, tenant_) << "?advanced-search&pr="
+ << mime_url_encode (project_.string ())
+ << ~HREF
+ << project_
+ << ~A;
+ }
+ else
+ s << project_;
+
+ s << ~SPAN
<< ~TD
<< ~TR;
}
@@ -618,6 +629,80 @@ namespace brep
<< ~TR;
}
+ // TR_REVIEWS_SUMMARY
+ //
+ void TR_REVIEWS_SUMMARY::
+ operator() (serializer& s) const
+ {
+ s << TR(CLASS="reviews")
+ << TH << "reviews" << ~TH
+ << TD
+ << SPAN(CLASS="value");
+
+ if (reviews_)
+ {
+ s << A
+ << HREF
+ << reviews_url_ << reviews_->manifest_file
+ << ~HREF;
+
+ if (reviews_->fail != 0)
+ s << SPAN(CLASS="fail") << '-' << reviews_->fail << ~SPAN;
+
+ if (reviews_->fail != 0 && reviews_->pass != 0)
+ s << '/';
+
+ if (reviews_->pass != 0)
+ s << SPAN(CLASS="pass") << '+' << reviews_->pass << ~SPAN;
+
+ s << ~A;
+ }
+ else
+ s << SPAN(CLASS="none") << 0 << ~SPAN;
+
+ s << ~SPAN
+ << ~TD
+ << ~TR;
+ }
+
+ // TR_REVIEWS_COUNTER
+ //
+ void TR_REVIEWS_COUNTER::
+ operator() (serializer& s) const
+ {
+ const char* l (result == review_result::fail ? "fail" : "pass");
+
+ s << TR(CLASS=l)
+ << TH << l << ~TH
+ << TD
+ << SPAN(CLASS="value");
+
+ if (reviews_)
+ {
+ size_t n (result == review_result::fail
+ ? reviews_->fail
+ : reviews_->pass);
+
+ if (n != 0)
+ {
+ s << A
+ << HREF
+ << reviews_url_ << reviews_->manifest_file
+ << ~HREF
+ << SPAN(CLASS=l) << n << ~SPAN
+ << ~A;
+ }
+ else
+ s << n;
+ }
+ else
+ s << SPAN(CLASS="none") << 0 << ~SPAN;
+
+ s << ~SPAN
+ << ~TD
+ << ~TR;
+ }
+
// TR_URL
//
void TR_URL::
diff --git a/mod/page.hxx b/mod/page.hxx
index 7329e2d..3455fe8 100644
--- a/mod/page.hxx
+++ b/mod/page.hxx
@@ -15,6 +15,7 @@
#include <libbrep/build.hxx>
#include <libbrep/package.hxx>
+#include <libbrep/review-manifest.hxx> // review_result
#include <mod/diagnostics.hxx>
#include <mod/options-types.hxx> // page_menu
@@ -371,6 +372,48 @@ namespace brep
const requirements& requirements_;
};
+ // Generate package versions reviews summary element.
+ //
+ class TR_REVIEWS_SUMMARY
+ {
+ public:
+ TR_REVIEWS_SUMMARY (const optional<reviews_summary>& rs, const string& u)
+ : reviews_ (rs), reviews_url_ (u) {}
+
+ void
+ operator() (xml::serializer&) const;
+
+ private:
+ const optional<reviews_summary>& reviews_;
+ const string& reviews_url_;
+ };
+
+ // Generate package versions reviews summary counter element. The passed
+ // review result denotes which kind of counter needs to be displayed and can
+ // only be fail or pass.
+ //
+ class TR_REVIEWS_COUNTER
+ {
+ public:
+ TR_REVIEWS_COUNTER (review_result r,
+ const optional<reviews_summary>& rs,
+ const string& u)
+ : result (r),
+ reviews_ (rs),
+ reviews_url_ (u)
+ {
+ assert (r == review_result::fail || r == review_result::pass);
+ }
+
+ void
+ operator() (xml::serializer&) const;
+
+ private:
+ review_result result;
+ const optional<reviews_summary>& reviews_;
+ const string& reviews_url_;
+ };
+
// Generate url element. Strip the `<scheme>://` prefix from the link text.
//
class TR_URL
diff --git a/mod/tenant-service.hxx b/mod/tenant-service.hxx
index c46cb7b..5564a56 100644
--- a/mod/tenant-service.hxx
+++ b/mod/tenant-service.hxx
@@ -50,7 +50,9 @@ namespace brep
// sense for the implementation to protect against overwriting later states
// with earlier. For example, if it's possible to place a condition on a
// notification, it makes sense to only set the state to queued if none of
- // the later states (e.g., building) are already in effect.
+ // the later states (e.g., building) are already in effect. See also
+ // ci_start::rebuild() for additional details on the build->queued
+ // transition.
//
// Note also that it's possible for the build to get deleted at any stage
// without any further notifications. This can happen, for example, due to
@@ -72,9 +74,11 @@ namespace brep
// If the returned function is not NULL, it is called to update the
// service data. It should return the new data or nullopt if no update is
// necessary. Note: tenant_service::data passed to the callback and to the
- // returned function may not be the same. Also, the returned function may
- // be called multiple times (on transaction retries). Note that the passed
- // log_writer is valid during the calls to the returned function.
+ // returned function may not be the same. Furthermore, tenant_ids may not
+ // be the same either, in case the tenant was replaced. Also, the returned
+ // function may be called multiple times (on transaction retries). Note
+ // that the passed log_writer is valid during the calls to the returned
+ // function.
//
// The passed initial_state indicates the logical initial state and is
// either absent, `building` (interrupted), or `built` (rebuild). Note
@@ -99,8 +103,10 @@ namespace brep
bool single_package_config;
};
- virtual function<optional<string> (const tenant_service&)>
- build_queued (const tenant_service&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_queued (const string& tenant_id,
+ const tenant_service&,
const vector<build>&,
optional<build_state> initial_state,
const build_queued_hints&,
@@ -110,8 +116,10 @@ namespace brep
class tenant_service_build_building: public virtual tenant_service_base
{
public:
- virtual function<optional<string> (const tenant_service&)>
- build_building (const tenant_service&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_building (const string& tenant_id,
+ const tenant_service&,
const build&,
const diag_epilogue& log_writer) const noexcept = 0;
};
@@ -119,25 +127,31 @@ namespace brep
class tenant_service_build_built: public virtual tenant_service_base
{
public:
- virtual function<optional<string> (const tenant_service&)>
- build_built (const tenant_service&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_built (const string& tenant_id,
+ const tenant_service&,
const build&,
const diag_epilogue& log_writer) const noexcept = 0;
};
// This notification is only made on unloaded CI requests created with the
// ci_start::create() call and until they are loaded with ci_start::load()
- // or, alternatively, abandoned with ci_start::abandon().
+ // or, alternatively, abandoned with ci_start::cancel() (in which case the
+ // returned callback should be NULL).
//
// Note: make sure the implementation of this notification does not take
- // too long (currently 40 seconds) to avoid nested notifications. Note
- // also that the first notification is delayed (currently 10 seconds).
+ // longer than the notification_interval argument of ci_start::create() to
+ // avoid nested notifications. The first notification can be delayed with
+ // the notify_delay argument.
//
class tenant_service_build_unloaded: public virtual tenant_service_base
{
public:
- virtual function<optional<string> (const tenant_service&)>
- build_unloaded (tenant_service&&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_unloaded (const string& tenant_id,
+ tenant_service&&,
const diag_epilogue& log_writer) const noexcept = 0;
};
diff --git a/mod/utility.cxx b/mod/utility.cxx
new file mode 100644
index 0000000..5ca16a0
--- /dev/null
+++ b/mod/utility.cxx
@@ -0,0 +1,69 @@
+// file : mod/utility.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <mod/utility.hxx>
+
+#include <libbutl/path-pattern.hxx>
+
+namespace brep
+{
+ string
+ wildcard_to_similar_to_pattern (const string& wildcard)
+ {
+ using namespace butl;
+
+ if (wildcard.empty ())
+ return "%";
+
+ string r;
+ for (const path_pattern_term& pt: path_pattern_iterator (wildcard))
+ {
+ switch (pt.type)
+ {
+ case path_pattern_term_type::question: r += '_'; break;
+ case path_pattern_term_type::star: r += '%'; break;
+ case path_pattern_term_type::bracket:
+ {
+ // Copy the bracket expression translating the inverse character, if
+ // present.
+ //
+ size_t n (r.size ());
+ r.append (pt.begin, pt.end);
+
+ if (r[n + 1] == '!') // ...[!... ?
+ r[n + 1] = '^';
+
+ break;
+ }
+ case path_pattern_term_type::literal:
+ {
+ char c (get_literal (pt));
+
+ // Escape the special characters.
+ //
+ // Note that '.' is not a special character for SIMILAR TO.
+ //
+ switch (c)
+ {
+ case '\\':
+ case '%':
+ case '_':
+ case '|':
+ case '+':
+ case '{':
+ case '}':
+ case '(':
+ case ')':
+ case '[':
+ case ']': r += '\\'; break;
+ }
+
+ r += c;
+ break;
+ }
+ }
+ }
+
+ return r;
+ }
+}
diff --git a/mod/utility.hxx b/mod/utility.hxx
index 43527ae..07fbf8b 100644
--- a/mod/utility.hxx
+++ b/mod/utility.hxx
@@ -19,6 +19,13 @@ namespace brep
? path_cast<dir_path> (dir / ('@' + tenant))
: dir;
}
+
+ // Transform the wildcard to the `SIMILAR TO` pattern.
+ //
+ // Note that the empty wildcard is transformed to the '%' pattern.
+ //
+ string
+ wildcard_to_similar_to_pattern (const string&);
}
#endif // MOD_UTILITY_HXX
diff --git a/tests/ci/ci-load.testscript b/tests/ci/ci-load.testscript
index eb9ba7c..ff75493 100644
--- a/tests/ci/ci-load.testscript
+++ b/tests/ci/ci-load.testscript
@@ -105,11 +105,13 @@
email: user@example.org
%depends: \\* build2 .+%
%depends: \\* bpkg .+%
- bootstrap-build:\\
+ bootstrap-build:
+ \\
project = libhello
%.+
\\
- root-build:\\
+ root-build:
+ \\
cxx.std = latest
%.+
\\
@@ -124,11 +126,13 @@
email: user@example.org
%depends: \\* build2 .+%
%depends: \\* bpkg .+%
- bootstrap-build:\\
+ bootstrap-build:
+ \\
project = hello
%.+
\\
- root-build:\\
+ root-build:
+ \\
cxx.std = latest
%.+
\\
@@ -164,11 +168,13 @@
email: user@example.org
%depends: \\* build2 .+%
%depends: \\* bpkg .+%
- bootstrap-build:\\
+ bootstrap-build:
+ \\
project = hello
%.+
\\
- root-build:\\
+ root-build:
+ \\
cxx.std = latest
%.+
\\
@@ -205,11 +211,13 @@
email: user@example.org
%depends: \\* build2 .+%
%depends: \\* bpkg .+%
- bootstrap-build:\\
+ bootstrap-build:
+ \\
project = libhello
%.+
\\
- root-build:\\
+ root-build:
+ \\
cxx.std = latest
%.+
\\
diff --git a/tests/load/1/basics/packages.manifest b/tests/load/1/basics/packages.manifest
index d422df5..95bfedb 100644
--- a/tests/load/1/basics/packages.manifest
+++ b/tests/load/1/basics/packages.manifest
@@ -7,5 +7,9 @@ summary: The Expat Library
license: MIT
url: http://www.example.com/expat/
email: expat-users@example.com
+bootstrap-build:
+\
+project = libexpat
+\
location: libexpat-5.1.tar.gz
sha256sum: 75ccba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab93
diff --git a/tests/load/1/misc/packages.manifest b/tests/load/1/misc/packages.manifest
index 86620dd..bab35c4 100644
--- a/tests/load/1/misc/packages.manifest
+++ b/tests/load/1/misc/packages.manifest
@@ -16,6 +16,10 @@ depends: libmath >= 2.0.0
requires: linux | windows | macosx
changes: some changes
changes-type: text/plain
+bootstrap-build:
+\
+project = libbar
+\
location: libbar-2.4.0+3.tar.gz
sha256sum: 70ccba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab93
:
@@ -25,6 +29,10 @@ summary: Foo Library
license: MIT
url: http://www.example.com/foo/
email: foo-users@example.com
+bootstrap-build:
+\
+project = libfoo
+\
location: libfoo-1.0.tar.gz
sha256sum: e89c6d746f8b1ea3ec58d294946d2f683d133438d2ac8c88549ba24c19627e76
:
@@ -34,6 +42,10 @@ summary: Foo
license: MIT
url: http://www.example.com/foo/
email: foo-users@example.com
+bootstrap-build:
+\
+project = libfoo
+\
location: libfoo-0.1.tar.gz
sha256sum: 72ccba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab93
:
@@ -43,6 +55,10 @@ summary: Foo Library
license: MIT
url: http://www.example.com/foo/
email: foo-users@example.com
+bootstrap-build:
+\
+project = libfoo
+\
location: libfoo-1.2.4+1.tar.gz
sha256sum: 6692a487e0908598e36bdeb9c25ed1e4a35bb99587dbc475807d314fa0719ac6
:
@@ -52,5 +68,9 @@ summary: Foo Library
license: MIT
url: http://www.example.com/foo/
email: foo-users@example.com
+bootstrap-build:
+\
+project = libfoo
+\
location: libfoo-1.2.4+2.tar.gz
sha256sum: 74ccba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab93
diff --git a/tests/load/1/staging/packages.manifest b/tests/load/1/staging/packages.manifest
index 97c252a..e42fe43 100644
--- a/tests/load/1/staging/packages.manifest
+++ b/tests/load/1/staging/packages.manifest
@@ -7,6 +7,10 @@ summary: The Expat Library
license: MIT
url: http://www.example.com/expat/
email: expat-users@example.com
+bootstrap-build:
+\
+project = libexpat
+\
location: libexpat-5.1.tar.gz
sha256sum: 75c0ba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab93
:
@@ -16,6 +20,10 @@ summary: The Genx Library
license: MIT
url: http://www.example.com/genx/
email: genx-users@example.com
+bootstrap-build:
+\
+project = libgenx
+\
location: libgenx-1.0.tar.gz
sha256sum: 75c1ba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab93
:
@@ -26,5 +34,9 @@ license: MIT
url: http://www.example.com/misc/
email: misc-users@example.com
depends: libexpat >= 5.0
+bootstrap-build:
+\
+project = libmisc
+\
location: libmisc-1.0.tar.gz
sha256sum: 75c2ba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab93
diff --git a/tests/load/1/testing/packages.manifest b/tests/load/1/testing/packages.manifest
index 2d458f0..2606d0e 100644
--- a/tests/load/1/testing/packages.manifest
+++ b/tests/load/1/testing/packages.manifest
@@ -8,6 +8,10 @@ license: MIT
url: http://www.example.com/misc/
email: misc-users@example.com
depends: libexpat >= 5.0
+bootstrap-build:
+\
+project = libmisc
+\
location: libmisc-2.4.0.tar.gz
sha256sum: 75ccba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab94
:
@@ -18,5 +22,9 @@ license: MIT
url: http://www.example.com/misc/
email: misc-users@example.com
depends: libexpat >= 5.0
+bootstrap-build:
+\
+project = libmisc
+\
location: libmisc-2.3.0+1.tar.gz
sha256sum: 11ccba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab94
diff --git a/tests/manifest/buildfile b/tests/manifest/buildfile
new file mode 100644
index 0000000..e6f5a85
--- /dev/null
+++ b/tests/manifest/buildfile
@@ -0,0 +1,6 @@
+# file : tests/manifest/buildfile
+# license : MIT; see accompanying LICENSE file
+
+import libs = lib{brep}
+
+exe{driver}: {hxx cxx}{*} $libs testscript{*}
diff --git a/tests/manifest/driver.cxx b/tests/manifest/driver.cxx
new file mode 100644
index 0000000..5c70ea5
--- /dev/null
+++ b/tests/manifest/driver.cxx
@@ -0,0 +1,59 @@
+// file : tests/manifest/driver.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <ios> // ios_base::failbit, ios_base::badbit
+#include <iostream>
+
+#include <libbutl/utility.hxx> // operator<<(ostream,exception)
+#include <libbutl/manifest-parser.hxx>
+#include <libbutl/manifest-serializer.hxx>
+
+#include <libbrep/review-manifest.hxx>
+
+#undef NDEBUG
+#include <cassert>
+
+using namespace std;
+using namespace butl;
+using namespace brep;
+
+// Usage: argv[0] (-r | -rl)
+//
+// Read and parse manifest from STDIN and serialize it to STDOUT. The
+// following options specify the manifest type.
+//
+// -r parse review manifest
+// -rl parse review manifest list
+//
+int
+main (int argc, char* argv[])
+try
+{
+ assert (argc == 2);
+ string opt (argv[1]);
+
+ cin.exceptions (ios_base::failbit | ios_base::badbit);
+ cout.exceptions (ios_base::failbit | ios_base::badbit);
+
+ manifest_parser p (cin, "stdin");
+ manifest_serializer s (cout, "stdout");
+
+ if (opt == "-r")
+ review_manifest (p).serialize (s);
+ else if (opt == "-rl")
+ review_manifests (p).serialize (s);
+ else
+ assert (false);
+
+ return 0;
+}
+catch (const manifest_parsing& e)
+{
+ cerr << e << endl;
+ return 1;
+}
+catch (const manifest_serialization& e)
+{
+ cerr << e << endl;
+ return 1;
+}
diff --git a/tests/manifest/review.testscript b/tests/manifest/review.testscript
new file mode 100644
index 0000000..e3daa66
--- /dev/null
+++ b/tests/manifest/review.testscript
@@ -0,0 +1,171 @@
+# file : tests/manifest/review.testscript
+# license : MIT; see accompanying LICENSE file
+
+: single-manifest
+:
+{
+ test.options += -r
+
+ : valid
+ :
+ : Roundtrip the review manifest.
+ :
+ {
+ $* <<EOF >>EOF
+ : 1
+ reviewed-by: John Doe <john@doe.com>
+ result-code: pass
+ result-build: fail
+ result-doc: unchanged
+ base-version: 1.0.2+3
+ details-url: https://example.com/issues/1
+ EOF
+ }
+
+ : unknown-name
+ :
+ {
+ $* <<EOI 2>"stdin:2:1: error: unknown name 'unknown-name' in review manifest" != 0
+ : 1
+ unknown-name: John Doe <john@doe.com>
+ EOI
+ }
+
+ : redefinition
+ :
+ {
+ : reviewed-by
+ :
+ {
+ $* <<EOI 2>"stdin:3:1: error: reviewer redefinition" != 0
+ : 1
+ reviewed-by: John Doe <john@doe.com>
+ reviewed-by: John Doe <john@doe.com>
+ EOI
+ }
+
+ : result-code
+ :
+ {
+ $* <<EOI 2>"stdin:3:1: error: code review result redefinition" != 0
+ : 1
+ result-code: pass
+ result-code: fail
+ EOI
+ }
+ }
+
+ : invalid
+ :
+ {
+ : reviewed-by-empty
+ :
+ {
+ $* <<EOI 2>"stdin:2:13: error: empty reviewer" != 0
+ : 1
+ reviewed-by:
+ EOI
+ }
+
+ : result-code
+ :
+ {
+ $* <<EOI 2>"stdin:2:14: error: invalid review result 'fails'" != 0
+ : 1
+ result-code: fails
+ EOI
+ }
+
+ : details-url
+ :
+ {
+ $* <<EOI 2>"stdin:2:13: error: empty URL" != 0
+ : 1
+ details-url:
+ EOI
+ }
+ }
+
+ : mandatory
+ :
+ {
+ : reviewed-by
+ :
+ {
+ $* <<EOI 2>"stdin:2:1: error: no reviewer specified" != 0
+ : 1
+ EOI
+ }
+
+ : no-result
+ :
+ {
+ $* <<EOI 2>"stdin:3:1: error: no result specified" != 0
+ : 1
+ reviewed-by: John Doe <john@doe.com>
+ EOI
+ }
+
+ : no-base-version
+ :
+ {
+ $* <<EOI 2>"stdin:4:1: error: no base version specified" != 0
+ : 1
+ reviewed-by: John Doe <john@doe.com>
+ result-code: unchanged
+ EOI
+ }
+
+ : no-details-url
+ :
+ {
+ $* <<EOI 2>"stdin:4:1: error: no details url specified" != 0
+ : 1
+ reviewed-by: John Doe <john@doe.com>
+ result-code: fail
+ EOI
+ }
+ }
+}
+
+: multiple-manifests
+:
+{
+ test.options += -rl
+
+ : valid-manifest-list
+ :
+ : Roundtrip the review manifests list.
+ :
+ {
+ $* <<EOF >>EOF
+ : 1
+ reviewed-by: John Doe <john@doe.com>
+ result-code: pass
+ :
+ reviewed-by: John Doe <john@doe.com>
+ result-build: pass
+ EOF
+ }
+
+ : empty-manifest-list
+ :
+ : Roundtrip the empty manifests list.
+ :
+ {
+ $* <:'' >:''
+ }
+
+ : no-details-url
+ :
+ {
+ $* <<EOI 2>"stdin:7:1: error: no details url specified" != 0
+ : 1
+ reviewed-by: John Doe <john@doe.com>
+ result-build: pass
+ :
+ reviewed-by: John Doe <john@doe.com>
+ result-code: fail
+ EOI
+ }
+}
diff --git a/tests/submit/submit-dir.testscript b/tests/submit/submit-dir.testscript
index 285710f..3bc908f 100644
--- a/tests/submit/submit-dir.testscript
+++ b/tests/submit/submit-dir.testscript
@@ -77,7 +77,8 @@
$* >>"EOO"
: 1
status: 400
- message:\\
+ message:
+ \\
package archive is not valid
gzip: libhello-0.1.0.tar.gz: not in gzip format
diff --git a/tests/submit/submit-git.testscript b/tests/submit/submit-git.testscript
index 5197afc..7093142 100644
--- a/tests/submit/submit-git.testscript
+++ b/tests/submit/submit-git.testscript
@@ -1064,7 +1064,11 @@ pkg_ctl="$prj_ctl/hello.git"
$* "file:///$~/tgt.git" $data_dir >>"EOO"
: 1
status: 422
- message: failed to git-clone http://example.com/path/rep.git
+ message:
+ \\
+ failed to git-clone build2-control branch of http://example.com/path/rep.git
+ info: repository 'http://example.com/path/rep.git/' not found
+ \\
reference: $checksum
EOO
}
diff --git a/tests/submit/submit-pub.testscript b/tests/submit/submit-pub.testscript
index 8c042a7..a846b82 100644
--- a/tests/submit/submit-pub.testscript
+++ b/tests/submit/submit-pub.testscript
@@ -182,7 +182,8 @@ clone_root_rep = [cmdline] cp --no-cleanup -r $root_rep ./ &pkg-1/*** &?pkg.lock
$* $~/brep-loader $~/pkg $~/$data_dir >>~"%EOO%"
: 1
status: 400
- message:\\
+ message:
+ \\
submitted archive is not a valid package
%.+
\\
@@ -203,7 +204,8 @@ clone_root_rep = [cmdline] cp --no-cleanup -r $root_rep ./ &pkg-1/*** &?pkg.lock
$* $~/brep-loader $~/pkg $~/$data_dir >>~"%EOO%"
: 1
status: 400
- message:\\
+ message:
+ \\
unable to add package to repository
%.+
\\
diff --git a/www/advanced-search-body.css b/www/advanced-search-body.css
new file mode 100644
index 0000000..47398ae
--- /dev/null
+++ b/www/advanced-search-body.css
@@ -0,0 +1,98 @@
+/*
+ * Filter form (based on proplist and form-table)
+ */
+#filter input, #filter select,
+#package-version-count, #package-version-count #count
+{
+ width: 100%;
+ margin:0;
+}
+
+#filter-btn {padding-left: .4em;}
+
+/*
+ * Package version count.
+ */
+#count
+{
+ font-size: 1.32em;
+ line-height: 1.4em;
+ color: #555;
+
+ margin: 1.2em 0 0 0;
+}
+
+/*
+ * Project, Package, and Version tables.
+ */
+table.project
+{
+ margin-top: 1.7em;
+ margin-bottom: -1.2em;
+
+ padding-top: .4em;
+ padding-bottom: .4em;
+}
+
+.package, table.version, #filter
+{
+ margin-top: .8em;
+ margin-bottom: .8em;
+
+ padding-top: .4em;
+ padding-bottom: .4em;
+}
+
+.package, #project-break
+{
+ margin-left: -.4rem;
+ padding-left: 2.25rem;
+}
+
+table.version, #package-break
+{
+ margin-left: 4rem;
+ padding-left: .4rem;
+}
+
+table.version
+{
+ width: calc(100% + .8rem - 4.4rem);
+}
+
+table.version:nth-child(odd) {background-color: rgba(0, 0, 0, 0.07);}
+
+table.project th, .package th
+{
+ width: 5.5em;
+}
+
+table.version th, #filter th
+{
+ width: 7.3em;
+}
+
+table.project tr.project td .value,
+.package tr.name td .value,
+.package tr.summary td .value,
+.package tr.license td .value,
+table.version tr.version td .value,
+table.version tr.depends td .value,
+table.version tr.requires td .value,
+table.version tr.reviews td .value
+{
+ /* <code> style. */
+ font-family: monospace;
+ font-size: 0.94em;
+}
+
+table.version tr.reviews td .none {color: #fe7c04;}
+table.version tr.reviews td .fail {color: #ff0000;}
+table.version tr.reviews td .pass {color: #00bb00;}
+
+#package-break, #project-break
+{
+ /* <code> style. */
+ font-family: monospace;
+ font-weight: 500;
+}
diff --git a/www/advanced-search.css b/www/advanced-search.css
new file mode 100644
index 0000000..b594f97
--- /dev/null
+++ b/www/advanced-search.css
@@ -0,0 +1,3 @@
+@import url(common.css);
+@import url(brep-common.css);
+@import url(advanced-search-body.css);
diff --git a/www/advanced-search.scss b/www/advanced-search.scss
new file mode 100644
index 0000000..77cbe34
--- /dev/null
+++ b/www/advanced-search.scss
@@ -0,0 +1,3 @@
+@import "common";
+@import "brep-common";
+@import "advanced-search-body";
diff --git a/www/package-details-body.css b/www/package-details-body.css
index 1083c54..23bd8f1 100644
--- a/www/package-details-body.css
+++ b/www/package-details-body.css
@@ -118,8 +118,7 @@ h1, h2
}
#package th {width: 7.6em;}
-#package tr.topics td a,
-#package tr.project td a
+#package tr.topics td a
{
display: inline-block;
@@ -129,21 +128,18 @@ h1, h2
margin: 0 0.35em 0.1em 0;
}
-#package tr.topics td a:visited,
-#package tr.project td a:visited
+#package tr.topics td a:visited
{
color: #3870c0;
}
-#package tr.topics td a:hover,
-#package tr.project td a:hover
+#package tr.topics td a:hover
{
text-decoration: none;
background-color: #def;
}
-#package tr.topics td,
-#package tr.project td
+#package tr.topics td
{
padding: 0;
margin: 0 0 0 -0.5em;
@@ -185,7 +181,8 @@ table.version th {width: 7.6em;}
table.version tr.version td .value,
table.version tr.priority td .value,
table.version tr.depends td .value,
-table.version tr.requires td .value
+table.version tr.requires td .value,
+table.version tr.reviews td .value
{
/* <code> style. */
font-family: monospace;
@@ -195,3 +192,7 @@ table.version tr.requires td .value
table.version tr.priority td .security {color: #ff0000; font-weight: bold;}
table.version tr.priority td .high {color: #ff0000;}
table.version tr.priority td .medium {color: #fe7c04;}
+
+table.version tr.reviews td .none {color: #fe7c04;}
+table.version tr.reviews td .fail {color: #ff0000;}
+table.version tr.reviews td .pass {color: #00bb00;}
diff --git a/www/package-version-details-body.css b/www/package-version-details-body.css
index 1c41ed5..bd4d753 100644
--- a/www/package-version-details-body.css
+++ b/www/package-version-details-body.css
@@ -171,8 +171,7 @@ h1, h2, h3
}
#package th {width: 9.5em;}
-#package tr.topics td a,
-#package tr.project td a
+#package tr.topics td a
{
display: inline-block;
@@ -182,21 +181,18 @@ h1, h2, h3
margin: 0 0.35em 0.1em 0;
}
-#package tr.topics td a:visited,
-#package tr.project td a:visited
+#package tr.topics td a:visited
{
color: #3870c0;
}
-#package tr.topics td a:hover,
-#package tr.project td a:hover
+#package tr.topics td a:hover
{
text-decoration: none;
background-color: #def;
}
-#package tr.topics td,
-#package tr.project td
+#package tr.topics td
{
padding: 0;
margin: 0 0 0 -0.5em;
@@ -289,6 +285,24 @@ h1, h2, h3
}
/*
+ * Reviews.
+ */
+#reviews {margin-top: .4em; margin-bottom: 1em;}
+#reviews th {width: 3.7em; padding-left: 3.2em;}
+
+#reviews tr.fail td .value,
+#reviews tr.pass td .value
+{
+ /* <code> style. */
+ font-family: monospace;
+ font-size: 0.94em;
+}
+
+#reviews tr td .value .none {color: #fe7c04;}
+#reviews tr td .value .fail {color: #ff0000;}
+#reviews tr td .value .pass {color: #00bb00;}
+
+/*
* Binaries.
*/
#binaries