aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--INSTALL146
-rw-r--r--INSTALL.cli4
-rw-r--r--LICENSE2
-rw-r--r--NEWS520
-rw-r--r--bootstrap-clang.bat2
-rw-r--r--bootstrap-mingw.bat4
-rw-r--r--bootstrap-msvc.bat2
-rw-r--r--bootstrap.gmake10
-rwxr-xr-xbootstrap.sh2
-rw-r--r--build/root.build17
-rw-r--r--build2/b-options.cxx1707
-rw-r--r--build2/b-options.hxx722
-rw-r--r--build2/b-options.ixx582
-rw-r--r--build2/b.cxx1256
-rw-r--r--build2/buildfile42
-rw-r--r--build2/cli/target.hxx54
-rw-r--r--build2/types-parsers.cxx50
-rw-r--r--build2/types-parsers.hxx43
-rw-r--r--buildfile2
m---------config0
-rwxr-xr-xdoc/cli.sh12
-rw-r--r--doc/manual.cli1417
-rw-r--r--doc/testscript.cli490
-rw-r--r--libbuild2/action.hxx17
-rw-r--r--libbuild2/adhoc-rule-buildscript.cxx2507
-rw-r--r--libbuild2/adhoc-rule-buildscript.hxx43
-rw-r--r--libbuild2/adhoc-rule-cxx.cxx49
-rw-r--r--libbuild2/adhoc-rule-cxx.hxx7
-rw-r--r--libbuild2/adhoc-rule-regex-pattern.cxx190
-rw-r--r--libbuild2/adhoc-rule-regex-pattern.hxx12
-rw-r--r--libbuild2/algorithm.cxx2241
-rw-r--r--libbuild2/algorithm.hxx356
-rw-r--r--libbuild2/algorithm.ixx311
-rw-r--r--libbuild2/b-cmdline.cxx504
-rw-r--r--libbuild2/b-cmdline.hxx45
-rw-r--r--libbuild2/b-options.cxx1607
-rw-r--r--libbuild2/b-options.hxx366
-rw-r--r--libbuild2/b-options.ixx405
-rw-r--r--libbuild2/b.cli (renamed from build2/b.cli)379
-rw-r--r--libbuild2/bash/init.cxx15
-rw-r--r--libbuild2/bash/rule.cxx146
-rw-r--r--libbuild2/bash/rule.hxx16
-rw-r--r--libbuild2/bash/target.cxx2
-rw-r--r--libbuild2/bash/target.hxx7
-rw-r--r--libbuild2/bash/utility.hxx20
-rw-r--r--libbuild2/bin/def-rule.cxx229
-rw-r--r--libbuild2/bin/def-rule.hxx2
-rw-r--r--libbuild2/bin/guess.cxx73
-rw-r--r--libbuild2/bin/guess.hxx8
-rw-r--r--libbuild2/bin/init.cxx80
-rw-r--r--libbuild2/bin/rule.cxx149
-rw-r--r--libbuild2/bin/rule.hxx31
-rw-r--r--libbuild2/bin/target.cxx55
-rw-r--r--libbuild2/bin/target.hxx193
-rw-r--r--libbuild2/bin/utility.cxx5
-rw-r--r--libbuild2/build/script/builtin-options.cxx606
-rw-r--r--libbuild2/build/script/builtin-options.hxx284
-rw-r--r--libbuild2/build/script/builtin-options.ixx363
-rw-r--r--libbuild2/build/script/builtin.cli128
-rw-r--r--libbuild2/build/script/lexer+for-loop.test.testscript188
-rw-r--r--libbuild2/build/script/lexer.cxx48
-rw-r--r--libbuild2/build/script/lexer.hxx9
-rw-r--r--libbuild2/build/script/lexer.test.cxx1
-rw-r--r--libbuild2/build/script/parser+command-if.test.testscript2
-rw-r--r--libbuild2/build/script/parser+command-re-parse.test.testscript6
-rw-r--r--libbuild2/build/script/parser+diag.test.testscript106
-rw-r--r--libbuild2/build/script/parser+expansion.test.testscript6
-rw-r--r--libbuild2/build/script/parser+for.test.testscript656
-rw-r--r--libbuild2/build/script/parser+while.test.testscript133
-rw-r--r--libbuild2/build/script/parser.cxx2707
-rw-r--r--libbuild2/build/script/parser.hxx239
-rw-r--r--libbuild2/build/script/parser.test.cxx170
-rw-r--r--libbuild2/build/script/runner.cxx51
-rw-r--r--libbuild2/build/script/runner.hxx31
-rw-r--r--libbuild2/build/script/script.cxx70
-rw-r--r--libbuild2/build/script/script.hxx61
-rw-r--r--libbuild2/buildfile230
-rw-r--r--libbuild2/buildspec.cxx4
-rw-r--r--libbuild2/c/init.cxx109
-rw-r--r--libbuild2/c/init.hxx4
-rw-r--r--libbuild2/c/target.hxx2
-rw-r--r--libbuild2/cc/buildfile18
-rw-r--r--libbuild2/cc/common.cxx811
-rw-r--r--libbuild2/cc/common.hxx128
-rw-r--r--libbuild2/cc/common.txx19
-rw-r--r--libbuild2/cc/compile-rule.cxx2559
-rw-r--r--libbuild2/cc/compile-rule.hxx75
-rw-r--r--libbuild2/cc/functions.cxx154
-rw-r--r--libbuild2/cc/gcc.cxx215
-rw-r--r--libbuild2/cc/guess.cxx347
-rw-r--r--libbuild2/cc/guess.hxx3
-rw-r--r--libbuild2/cc/init.cxx42
-rw-r--r--libbuild2/cc/install-rule.cxx72
-rw-r--r--libbuild2/cc/install-rule.hxx12
-rw-r--r--libbuild2/cc/lexer+raw-string-literal.test.testscript2
-rw-r--r--libbuild2/cc/lexer.cxx6
-rw-r--r--libbuild2/cc/lexer.test.cxx1
-rw-r--r--libbuild2/cc/link-rule.cxx987
-rw-r--r--libbuild2/cc/link-rule.hxx37
-rw-r--r--libbuild2/cc/module.cxx87
-rw-r--r--libbuild2/cc/module.hxx37
-rw-r--r--libbuild2/cc/msvc.cxx158
-rw-r--r--libbuild2/cc/pkgconfig-libpkg-config.cxx271
-rw-r--r--libbuild2/cc/pkgconfig-libpkgconf.cxx355
-rw-r--r--libbuild2/cc/pkgconfig.cxx1593
-rw-r--r--libbuild2/cc/pkgconfig.hxx129
-rw-r--r--libbuild2/cc/target.cxx45
-rw-r--r--libbuild2/cc/target.hxx74
-rw-r--r--libbuild2/cc/types.cxx15
-rw-r--r--libbuild2/cc/types.hxx4
-rw-r--r--libbuild2/cc/utility.cxx57
-rw-r--r--libbuild2/cc/utility.hxx29
-rw-r--r--libbuild2/cc/windows-rpath.cxx27
-rw-r--r--libbuild2/cli/buildfile71
-rw-r--r--libbuild2/cli/export.hxx37
-rw-r--r--libbuild2/cli/init.cxx (renamed from build2/cli/init.cxx)36
-rw-r--r--libbuild2/cli/init.hxx (renamed from build2/cli/init.hxx)12
-rw-r--r--libbuild2/cli/module.hxx (renamed from build2/cli/module.hxx)10
-rw-r--r--libbuild2/cli/rule.cxx (renamed from build2/cli/rule.cxx)22
-rw-r--r--libbuild2/cli/rule.hxx (renamed from build2/cli/rule.hxx)15
-rw-r--r--libbuild2/cli/target.cxx (renamed from build2/cli/target.cxx)8
-rw-r--r--libbuild2/cli/target.hxx61
-rw-r--r--libbuild2/common-options.cxx809
-rw-r--r--libbuild2/common-options.hxx484
-rw-r--r--libbuild2/common-options.ixx312
-rw-r--r--libbuild2/common.cli9
-rw-r--r--libbuild2/config/functions.cxx57
-rw-r--r--libbuild2/config/init.cxx228
-rw-r--r--libbuild2/config/module.hxx2
-rw-r--r--libbuild2/config/operation.cxx119
-rw-r--r--libbuild2/config/operation.hxx6
-rw-r--r--libbuild2/config/types.hxx25
-rw-r--r--libbuild2/config/utility.cxx67
-rw-r--r--libbuild2/config/utility.hxx55
-rw-r--r--libbuild2/config/utility.txx4
-rw-r--r--libbuild2/context.cxx741
-rw-r--r--libbuild2/context.hxx371
-rw-r--r--libbuild2/context.ixx6
-rw-r--r--libbuild2/cxx/init.cxx79
-rw-r--r--libbuild2/cxx/init.hxx2
-rw-r--r--libbuild2/cxx/target.cxx28
-rw-r--r--libbuild2/cxx/target.hxx51
-rw-r--r--libbuild2/depdb.cxx228
-rw-r--r--libbuild2/depdb.hxx73
-rw-r--r--libbuild2/depdb.ixx6
-rw-r--r--libbuild2/diagnostics.cxx816
-rw-r--r--libbuild2/diagnostics.hxx673
-rw-r--r--libbuild2/diagnostics.ixx126
-rw-r--r--libbuild2/dist/init.cxx69
-rw-r--r--libbuild2/dist/module.hxx14
-rw-r--r--libbuild2/dist/operation.cxx736
-rw-r--r--libbuild2/dist/rule.cxx90
-rw-r--r--libbuild2/dist/rule.hxx22
-rw-r--r--libbuild2/dist/types.hxx41
-rw-r--r--libbuild2/dump.cxx1086
-rw-r--r--libbuild2/dump.hxx32
-rw-r--r--libbuild2/dyndep.cxx1104
-rw-r--r--libbuild2/dyndep.hxx304
-rw-r--r--libbuild2/file-cache.cxx2
-rw-r--r--libbuild2/file-cache.hxx19
-rw-r--r--libbuild2/file-cache.ixx24
-rw-r--r--libbuild2/file.cxx711
-rw-r--r--libbuild2/file.hxx134
-rw-r--r--libbuild2/file.ixx56
-rw-r--r--libbuild2/filesystem.cxx138
-rw-r--r--libbuild2/filesystem.hxx37
-rw-r--r--libbuild2/filesystem.txx30
-rw-r--r--libbuild2/forward.hxx5
-rw-r--r--libbuild2/function.cxx6
-rw-r--r--libbuild2/function.hxx12
-rw-r--r--libbuild2/function.test.cxx10
-rw-r--r--libbuild2/functions-bool.cxx23
-rw-r--r--libbuild2/functions-builtin.cxx34
-rw-r--r--libbuild2/functions-filesystem.cxx30
-rw-r--r--libbuild2/functions-integer.cxx181
-rw-r--r--libbuild2/functions-name.cxx226
-rw-r--r--libbuild2/functions-name.hxx30
-rw-r--r--libbuild2/functions-path.cxx365
-rw-r--r--libbuild2/functions-process.cxx71
-rw-r--r--libbuild2/functions-project-name.cxx13
-rw-r--r--libbuild2/functions-regex.cxx361
-rw-r--r--libbuild2/functions-string.cxx148
-rw-r--r--libbuild2/functions-target-triplet.cxx19
-rw-r--r--libbuild2/in/init.cxx27
-rw-r--r--libbuild2/in/rule.cxx438
-rw-r--r--libbuild2/in/rule.hxx92
-rw-r--r--libbuild2/in/target.cxx12
-rw-r--r--libbuild2/in/target.hxx7
-rw-r--r--libbuild2/install/functions.cxx116
-rw-r--r--libbuild2/install/init.cxx337
-rw-r--r--libbuild2/install/operation.cxx361
-rw-r--r--libbuild2/install/operation.hxx64
-rw-r--r--libbuild2/install/rule.cxx431
-rw-r--r--libbuild2/install/rule.hxx65
-rw-r--r--libbuild2/install/utility.cxx261
-rw-r--r--libbuild2/install/utility.hxx51
-rw-r--r--libbuild2/lexer.cxx190
-rw-r--r--libbuild2/lexer.hxx70
-rw-r--r--libbuild2/make-parser.cxx171
-rw-r--r--libbuild2/make-parser.hxx83
-rw-r--r--libbuild2/make-parser.test.cxx88
-rw-r--r--libbuild2/make-parser.test.testscript129
-rw-r--r--libbuild2/module.cxx333
-rw-r--r--libbuild2/module.hxx63
-rw-r--r--libbuild2/name.cxx54
-rw-r--r--libbuild2/name.hxx31
-rw-r--r--libbuild2/name.test.cxx16
-rw-r--r--libbuild2/operation.cxx864
-rw-r--r--libbuild2/operation.hxx139
-rw-r--r--libbuild2/options-types.hxx16
-rw-r--r--libbuild2/parser.cxx2736
-rw-r--r--libbuild2/parser.hxx243
-rw-r--r--libbuild2/prerequisite.cxx2
-rw-r--r--libbuild2/prerequisite.hxx18
-rw-r--r--libbuild2/recipe.cxx9
-rw-r--r--libbuild2/recipe.hxx20
-rw-r--r--libbuild2/rule-map.hxx58
-rw-r--r--libbuild2/rule.cxx91
-rw-r--r--libbuild2/rule.hxx112
-rw-r--r--libbuild2/scheduler.cxx45
-rw-r--r--libbuild2/scheduler.hxx68
-rw-r--r--libbuild2/scheduler.test.cxx1
-rw-r--r--libbuild2/scope.cxx73
-rw-r--r--libbuild2/scope.hxx166
-rw-r--r--libbuild2/scope.ixx31
-rw-r--r--libbuild2/script/builtin-options.cxx798
-rw-r--r--libbuild2/script/builtin-options.hxx450
-rw-r--r--libbuild2/script/builtin-options.ixx215
-rw-r--r--libbuild2/script/builtin.cli9
-rw-r--r--libbuild2/script/lexer.cxx11
-rw-r--r--libbuild2/script/lexer.hxx2
-rw-r--r--libbuild2/script/parser.cxx761
-rw-r--r--libbuild2/script/parser.hxx71
-rw-r--r--libbuild2/script/regex.cxx18
-rw-r--r--libbuild2/script/regex.hxx20
-rw-r--r--libbuild2/script/regex.test.cxx5
-rw-r--r--libbuild2/script/run.cxx1731
-rw-r--r--libbuild2/script/run.hxx51
-rw-r--r--libbuild2/script/script.cxx45
-rw-r--r--libbuild2/script/script.hxx49
-rw-r--r--libbuild2/search.cxx67
-rw-r--r--libbuild2/target-key.hxx25
-rw-r--r--libbuild2/target-state.hxx17
-rw-r--r--libbuild2/target-type.hxx79
-rw-r--r--libbuild2/target.cxx451
-rw-r--r--libbuild2/target.hxx870
-rw-r--r--libbuild2/target.ixx248
-rw-r--r--libbuild2/target.txx35
-rw-r--r--libbuild2/test/init.cxx26
-rw-r--r--libbuild2/test/operation.cxx15
-rw-r--r--libbuild2/test/rule.cxx512
-rw-r--r--libbuild2/test/rule.hxx8
-rw-r--r--libbuild2/test/script/lexer+for-loop.test.testscript231
-rw-r--r--libbuild2/test/script/lexer.cxx55
-rw-r--r--libbuild2/test/script/lexer.hxx13
-rw-r--r--libbuild2/test/script/lexer.test.cxx1
-rw-r--r--libbuild2/test/script/parser+command-if.test.testscript6
-rw-r--r--libbuild2/test/script/parser+command-re-parse.test.testscript2
-rw-r--r--libbuild2/test/script/parser+description.test.testscript4
-rw-r--r--libbuild2/test/script/parser+expansion.test.testscript2
-rw-r--r--libbuild2/test/script/parser+for.test.testscript1029
-rw-r--r--libbuild2/test/script/parser+while.test.testscript265
-rw-r--r--libbuild2/test/script/parser.cxx547
-rw-r--r--libbuild2/test/script/parser.hxx18
-rw-r--r--libbuild2/test/script/parser.test.cxx83
-rw-r--r--libbuild2/test/script/runner.cxx55
-rw-r--r--libbuild2/test/script/runner.hxx19
-rw-r--r--libbuild2/test/script/script.cxx117
-rw-r--r--libbuild2/test/script/script.hxx29
-rw-r--r--libbuild2/test/target.cxx2
-rw-r--r--libbuild2/test/target.hxx7
-rw-r--r--libbuild2/token.cxx21
-rw-r--r--libbuild2/token.hxx8
-rw-r--r--libbuild2/types-parsers.cxx153
-rw-r--r--libbuild2/types-parsers.hxx83
-rw-r--r--libbuild2/types.hxx109
-rw-r--r--libbuild2/types.ixx6
-rw-r--r--libbuild2/utility-installed.cxx4
-rw-r--r--libbuild2/utility-uninstalled.cxx6
-rw-r--r--libbuild2/utility.cxx452
-rw-r--r--libbuild2/utility.hxx535
-rw-r--r--libbuild2/utility.ixx177
-rw-r--r--libbuild2/utility.txx64
-rw-r--r--libbuild2/variable.cxx527
-rw-r--r--libbuild2/variable.hxx511
-rw-r--r--libbuild2/variable.ixx50
-rw-r--r--libbuild2/variable.txx32
-rw-r--r--libbuild2/version/init.cxx131
-rw-r--r--libbuild2/version/module.hxx1
-rw-r--r--libbuild2/version/rule.cxx65
-rw-r--r--libbuild2/version/rule.hxx11
-rw-r--r--libbuild2/version/snapshot-git.cxx19
-rw-r--r--libbuild2/version/snapshot.cxx4
-rw-r--r--manifest14
-rw-r--r--old-tests/variable/override/buildfile52
-rw-r--r--old-tests/variable/override/p/buildfile44
-rw-r--r--old-tests/variable/override/simple2
-rwxr-xr-xold-tests/variable/override/test.sh104
-rw-r--r--old-tests/variable/type-pattern-append/buildfile2
-rw-r--r--repositories.manifest6
-rw-r--r--tests/bash/testscript4
-rw-r--r--tests/build/root.build7
-rw-r--r--tests/cc/libu/testscript2
-rw-r--r--tests/cc/modules/common.testscript9
-rw-r--r--tests/cc/modules/modules.testscript2
-rw-r--r--tests/cc/preprocessed/testscript4
-rw-r--r--tests/dependency/recipe/testscript59
-rw-r--r--tests/directive/config.testscript36
-rw-r--r--tests/directive/parsing.testscript2
-rw-r--r--tests/directive/run.testscript3
-rw-r--r--tests/eval/qual.testscript5
-rw-r--r--tests/expansion/escape.testscript17
-rw-r--r--tests/function/builtin/testscript4
-rw-r--r--tests/function/integer/buildfile4
-rw-r--r--tests/function/integer/testscript41
-rw-r--r--tests/function/name/buildfile4
-rw-r--r--tests/function/name/testscript68
-rw-r--r--tests/function/path/testscript142
-rw-r--r--tests/function/regex/testscript116
-rw-r--r--tests/function/string/testscript31
-rw-r--r--tests/in/testscript29
-rw-r--r--tests/libbuild2/driver.cxx29
-rw-r--r--tests/loop/for.testscript14
-rw-r--r--tests/name/extension.testscript2
-rw-r--r--tests/name/pattern.testscript35
-rw-r--r--tests/recipe/buildscript/testscript1247
-rw-r--r--tests/recipe/cxx/testscript12
-rw-r--r--tests/test/script/builtin/sleep.testscript2
-rw-r--r--tests/test/script/common.testscript4
-rw-r--r--tests/test/script/runner/expr.testscript2
-rw-r--r--tests/test/script/runner/for.testscript502
-rw-r--r--tests/test/script/runner/pipe.testscript22
-rw-r--r--tests/test/script/runner/redirect.testscript4
-rw-r--r--tests/test/script/runner/set.testscript225
-rw-r--r--tests/test/script/runner/status.testscript8
-rw-r--r--tests/test/script/runner/timeout.testscript32
-rw-r--r--tests/test/script/runner/while.testscript16
-rw-r--r--tests/test/simple/generated/driver.cxx18
-rw-r--r--tests/test/simple/generated/testscript139
-rw-r--r--tests/value/concat.testscript42
-rw-r--r--tests/value/reverse.testscript55
-rw-r--r--tests/variable/override/testscript76
-rw-r--r--tests/variable/private/buildfile4
-rw-r--r--tests/variable/private/testscript46
-rw-r--r--tests/variable/target-specific/testscript14
-rw-r--r--tests/variable/target-type-pattern-specific/testscript19
347 files changed, 52341 insertions, 14843 deletions
diff --git a/.gitignore b/.gitignore
index 5a9e741..dfb9bab 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,10 +5,16 @@
*.d
*.t
*.i
+*.i.*
*.ii
+*.ii.*
*.o
*.obj
+*.gcm
+*.pcm
+*.ifc
*.so
+*.dylib
*.dll
*.a
*.lib
diff --git a/INSTALL b/INSTALL
new file mode 100644
index 0000000..5d2e225
--- /dev/null
+++ b/INSTALL
@@ -0,0 +1,146 @@
+Unless you specifically only need the build2 build system, you should install
+the entire build2 toolchain instead, either using the install script
+(https://build2.org/install.xhtml) or the build2-toolchain distribution.
+
+The instructions outlined below are essentially a summary of the first three
+steps of the manual bootstrap process described in build2-toolchain with a few
+extra examples that would primarily be useful for distribution packaging.
+
+Also, below we only show commands for UNIX-like operating systems. For other
+operating systems and for more details on each step, refer to the
+build2-toolchain installation documentation.
+
+build2 requires a C++14 compiler. GCC 4.9, Clang 3.7, and MSVC 14 (2015) Update
+3 or any later versions of these compilers are known to work. The build system
+is self-hosted, which means that unless you have obtained a pre-built binary
+from somewhere else, you will need to bootstrap it. To accomplish this, we use
+the bootstrap.sh shell script (or equivalent batch files for Windows) found in
+the root directory of the build2 distribution. On UNIX-like operating systems
+as well as on Windows with MinGW or Clang, a GNU make makefile called
+bootstrap.gmake can also be used with the major advanage over the script being
+support for parallel compilation and an out of tree build (see comments inside
+the makefile for more information).
+
+The following is the recommended sequence of steps:
+
+0. Prerequisites
+
+ Get libbutl (normally from the same place where you got build2) and place
+ it inside build2, so that you have:
+
+ build2-X.Y.Z
+ |
+ `-- libbutl-X.Y.Z
+
+1. Bootstrap, Phase 1
+
+ First, we build a minimal build system using bootstrap.sh (run bootstrap.sh
+ -h for options):
+
+ $ cd build2-X.Y.Z
+ $ ./bootstrap.sh g++
+
+ $ build2/b-boot --version
+
+ Alternatively, we can use the bootstrap.gmake makefile:
+
+ $ cd build2-X.Y.Z
+ $ make -f bootstrap.gmake -j 8 CXX=g++
+
+ $ build2/b-boot --version
+
+ If you would prefer to bootstrap out of source tree, this is supported by
+ the makefile (but not the script):
+
+ $ mkdir build2-boot
+ $ make -C build2-boot -f ../build2-X.Y.Z/bootstrap.gmake -j 8 CXX=g++
+
+ $ build2-boot/build2/b-boot --version
+
+2. Bootstrap, Phase 2
+
+ Then, we rebuild the build system with the result of Phase 1 linking
+ libraries statically.
+
+ $ build2/b-boot config.cxx=g++ config.bin.lib=static build2/exe{b}
+ $ mv build2/b build2/b-boot
+
+ $ build2/b-boot --version
+
+ Or, alternatively, for an out of source build:
+
+ $ build2-boot/build2/b-boot config.cxx=g++ config.bin.lib=static \
+ build2-X.Y.Z/build2/@build2-static/build2/exe{b}
+
+ $ build2-static/build2/b --version
+
+3. Build and Install
+
+ Finally, we configure, build, and optionally install the "final" version
+ using shared libraries:
+
+ $ build2/b-boot configure \
+ config.config.hermetic=true \
+ config.cxx=g++ \
+ config.cc.coptions=-O3 \
+ config.bin.rpath=/usr/local/lib \
+ config.install.root=/usr/local \
+ config.install.sudo=sudo
+
+ $ build2/b-boot
+
+ | The config.config.hermetic=true configuration variable in the first
+ | command makes sure the embedded ~host and ~build2 configurations include
+ | the current environment. This is especially important for ~build2 which
+ | is used to dynamically build and load ad hoc recipes and build system
+ | modules and must therefore match the environment that was used to build
+ | the build system itself.
+
+ If you are only interested in installing the result, then you can avoid
+ building tests by specifying the update-for-install operation in the last
+ command:
+
+ $ build2/b-boot update-for-install
+
+ On the other hand, if I you are not planning to install the result, then
+ you can omit the config.install.* values as well as .rpath.
+
+ To install:
+
+ $ build2/b-boot install
+ $ which b
+ $ b --version
+
+ To uninstall:
+
+ $ b uninstall
+ $ which b
+
+ Or, alternatively, for an out of source build:
+
+ $ build2-static/build2/b configure: build2-X.Y.Z/@build2-shared/ \
+ config.config.hermetic=true \
+ config.cxx=g++ \
+ config.cc.coptions=-O3 \
+ config.bin.rpath=/usr/local/lib \
+ config.install.root=/usr/local \
+ config.install.sudo=sudo
+
+ $ build2-static/build2/b update-for-install: build2-shared/
+
+ $ build2-static/build2/b install: build2-shared/
+
+ $ b uninstall: build2-shared/
+
+ For distribution packaging it is often required to install "as if" into the
+ system directory (for example, /usr) but to copy the files somewhere else
+ (for example, /tmp/install/usr; aka the DESTDIR functionality). In build2
+ this can be achieved with the config.install.chroot configuration variable,
+ for example:
+
+ $ build2-static/build2/b configure: build2-X.Y.Z/@build2-shared/ \
+ config.config.hermetic=true \
+ config.cxx=g++ \
+ config.cc.coptions=-O3 \
+ config.install.root=/usr \
+ config.install.chroot=/tmp/install
diff --git a/INSTALL.cli b/INSTALL.cli
index dd29c17..9d20757 100644
--- a/INSTALL.cli
+++ b/INSTALL.cli
@@ -89,7 +89,7 @@ $ mv build2/b build2/b-boot
$ build2/b-boot --version
\
-Or, alternatively, for an out of tree build:
+Or, alternatively, for an out of source build:
\
$ build2-boot/build2/b-boot config.cxx=g++ config.bin.lib=static \
@@ -150,7 +150,7 @@ $ b uninstall
$ which b
\
-Or, alternatively, for an out of tree build:
+Or, alternatively, for an out of source build:
\
$ build2-static/build2/b configure: build2-X.Y.Z/@build2-shared/ \
diff --git a/LICENSE b/LICENSE
index 0ca6964..c3f1323 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2014-2021 the build2 authors (see the AUTHORS file).
+Copyright (c) 2014-2023 the build2 authors (see the AUTHORS file).
Copyright (c) Microsoft Corporation for the libbuild2/cc/msvc-setup.h file.
Permission is hereby granted, free of charge, to any person obtaining a copy
diff --git a/NEWS b/NEWS
index f14fb83..2bfb0e7 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,515 @@
+Version 0.16.0
+
+ * Support for Objective-C/C++ compilation.
+
+ Specifically, the c and cxx modules now provide the c.objc and cxx.objcxx
+ submodules which can be loaded in order to register the m{}/mm{} target
+ types and enable Objective-C/C++ compilation in the c and cxx compile
+ rules. Note that c.objc and cxx.objcxx must be loaded after the c and cxx
+ modules, respectively, and while the m{}/mm{} target types are registered
+ unconditionally, compilation is only enabled if the C/C++ compiler
+ supports Objective-C/C++ for this target platform. Typical usage:
+
+ # root.build
+ #
+ using cxx
+ using cxx.objcxx
+
+ # buildfile
+ #
+ lib{hello}: {hxx cxx}{*}
+ lib{hello}: mm{*}: include = ($cxx.target.class == 'macos')
+
+ Note also that while there is support for linking Objective-C/C++
+ executables and libraries, this is done using the C/C++ compiler driver
+ and no attempt to automatically link any necessary Objective-C runtime
+ (such as -lobjc) is made.
+
+ * Support for Assembler with C Preprocessor (.S) compilation.
+
+ Specifically, the c module now provides the c.as-cpp submodules which can
+ be loaded in order to register the S{} target type and enable Assembler
+ with C Preprocessor compilation in the c compile rule. For details, refer
+ to "Assembler with C Preprocessor Compilation" in the manual.
+
+ * Low verbosity diagnostics rework.
+
+ The low verbosity (level 1) rule diagnostics format has been adjusted to
+ include the output target where appropriate. The implementation has also
+ been redesigned to go through the uniform print_diag() API, including for
+ the `diag` pseudo-builtin in ad hoc recipes. Specifically, the `diag`
+ builtin now expects its arguments to be in one of the following two forms
+ (which correspond to the two forms of print_diag()):
+
+ diag <prog> <l-target> <comb> <r-target>...
+ diag <prog> <r-target>...
+
+ If the `diag` builtin is not specified, the default diagnostics is now
+ equivalent to, for update:
+
+ diag <prog> ($<[0]) -> $>
+
+ And for other operations:
+
+ diag <prog> $>
+
+ For details, see the print_diag() API description in diagnostics.hxx. See
+ also GH issue #40 for additional background/details.
+
+ * New include_arch installation location and the corresponding
+ config.install.include_arch configuration variable.
+
+ This location is meant for architecture-specific files, such as
+ configuration headers. By default it's the same as the standard include
+ location but can be configured by the user to a different value (for
+ example, /usr/include/x86_64-linux-gnu/) for platforms that support
+ multiple architectures from the same installation location. This is how
+ one would normally use it from a buildfile:
+
+ # The configuration header may contain target architecture-specific
+ # information so install it into include_arch/ instead of include/.
+ #
+ h{*}: install = include/libhello/
+ h{config}: install = include_arch/libhello/
+
+ * The in.substitution variable has been renamed to in.mode.
+
+ The original name is still recognized for backwards compatibility.
+
+ * Support for post hoc prerequisites.
+
+ Unlike normal and ad hoc prerequisites, a post hoc prerequisite is built
+ after the target, not before. It may also form a dependency cycle together
+ with normal/ad hoc prerequisites. In other words, all this form of
+ dependency guarantees is that a post hoc prerequisite will be built if its
+ dependent target is built.
+
+ A canonical example where this can be useful is a library with a plugin:
+ the plugin depends on the library while the library would like to make
+ sure the plugin is built whenever the library is built so that programs
+ that link the library can be executed without having to specify explicit
+ dependency on the plugin (at least for the dynamic linking case):
+
+ lib{hello}: ...
+ lib{hello-plugin}: ... lib{hello}
+ libs{hello}: libs{hello-plugin}: include = posthoc
+
+ Note that there is no guarantee that post hoc prerequisites will be built
+ before the dependents of the target "see" it as built. Rather, it is
+ guaranteed that post hoc prerequisites will be built before the end of the
+ overall build (more precisely, before the current operation completes).
+ As a result, post hoc prerequisites should not be relied upon if the
+ result (for example, a source code generator) is expected to be used
+ during build (more precisely, within the same operation).
+
+ Note also that the post hoc semantics is not the same as order-only in
+ GNU make. In fact, it is an even more "relaxed" form of dependency.
+ Specifically, while order-only prerequisite is guaranteed to be built
+ before the target, post hoc prerequisite is only guaranteed to be built
+ before the end of the overall build.
+
+Version 0.15.0
+
+ * Generated C/C++ headers and ad hoc sources are now updated during match.
+
+ Specifically, all headers as well as ad hoc headers and sources are now
+ treated by the cc::link_rule as if they had update=match unless explicit
+ update=execute is specified (see below on the update operation-specific
+ variable).
+
+ This change should be transparent to most projects. For background and
+ discussion of rare cases where you may wish to disable this, see:
+
+ https://github.com/build2/HOWTO/blob/master/entries/handle-auto-generated-headers.md
+
+ * Support for rule hints.
+
+ A rule hint is a target attribute, for example:
+
+ [rule_hint=cxx] exe{hello}: c{hello}
+
+ Rule hints can be used to resolve ambiguity when multiple rules match the
+ same target as well as to override an unambiguous match.
+
+ In cc::link_rule we now support "linking" libraries without any sources or
+ headers with a hint. This can be useful for creating "metadata libraries"
+ whose only purpose is to convey metadata (options to use and/or libraries
+ to link).
+
+ * UTF-8 is now the default input/source character set for C/C++ compilation.
+
+ Specifically, the cc module now passes the appropriate compiler option
+ (/utf-8 for MSVC and -finput-charset=UTF-8 for GCC and Clang) unless a
+ custom value is already specified (with /{source,execution}-charset for
+ MSVC and -finput-charset for GCC and Clang).
+
+ This change may trigger new compilation errors in your source code if
+ it's not valid UTF-8 (such errors most commonly point into comments).
+ For various ways to fix this, see:
+
+ https://github.com/build2/HOWTO/blob/master/entries/convert-source-files-to-utf8.md
+
+ * Project configuration variables are now non-nullable by default.
+
+ A project configuration variable with the NULL default value is naturally
+ assumed nullable, for example:
+
+ config [string] config.libhello.fallback_name ?= [null]
+
+ Otherwise, to make a project configuration nullable use the `null`
+ variable attribute, for example:
+
+ config [string, null] config.libhello.fallback_name ?= "World"
+
+ * New $relative(<path>, <dir-path>) function.
+
+ * New $root_directory(<path>) function.
+
+ * New $size() function to get the size of string, path, dir_path.
+
+ * New $size() function to get the size of a sequence (strings, paths, etc).
+
+ * New $sort() function to sort a sequence (strings, paths, etc).
+
+ The function has the following signature:
+
+ $sort(<sequence> [, <flags>])
+
+ The following flag is supported by all the overloads:
+
+ dedup - in addition to sorting also remove duplicates
+
+ Additionally, the strings overload also support the following flag:
+
+ icase - sort ignoring case
+
+ Note that on case-insensitive filesystems the paths and dir_paths
+ overloads' order is case-insensitive.
+
+ * New $config.origin() function for querying configuration value origin.
+
+ Give a config.* variable name, this function returns one of `undefined`,
+ `default`, `buildfile`, or `override`.
+
+ * Recognition of -pthread as a special -l option in *.libs.
+
+ For background, see:
+
+ https://github.com/build2/HOWTO/blob/master/entries/link-pthread.md
+
+ * The bin.whole (whole archive) value is now saved in generated pkg-config
+ files.
+
+ * Ability to customize header and library search paths in generated
+ pkg-config files.
+
+ Specifically, {cc,c,cxx}.pkgconfig.{include,lib} variables specify header
+ (-I) and library (-L) search paths to use in the generated pkg-config
+ files instead of the default install.{include,lib}. Relative paths are
+ resolved as installation paths. For example:
+
+ lib{Qt6Core}: cxx.pkgconfig.include = include/qt6/
+
+ * Ability to save user metadata in C/C++ libraries, including in generated
+ pkg-config files.
+
+ For background and details, see:
+
+ https://github.com/build2/HOWTO/blob/master/entries/convey-additional-information-with-exe-lib.md
+
+ * Support for rule-specific search in immediate import.
+
+ We can now nominate a rule to perform the rule-specific search (if
+ required) using the rule_hint attribute. For example:
+
+ import! [metadata, rule_hint=cxx.link] lib = libhello%lib{hello}
+
+ * Support for dynamic dependencies in ad hoc recipes.
+
+ Specifically, the `depdb` builtin now has the new `dyndep` command that
+ can be used to extract dynamic dependencies from program output or a
+ file. For example, from program output:
+
+ obje{hello.o}: cxx{hello}
+ {{
+ s = $path($<[0])
+ o = $path($>)
+
+ poptions = $cxx.poptions $cc.poptions
+ coptions = $cc.coptions $cxx.coptions
+
+ depdb dyndep $poptions --what=header --default-type=h -- \
+ $cxx.path $poptions $coptions $cxx.mode -M -MG $s
+
+ diag c++ ($<[0])
+
+ $cxx.path $poptions $coptions $cxx.mode -o $o -c $s
+ }}
+
+ Or, alternatively, from a file:
+
+ t = $(o).t
+ depdb dyndep $poptions --what=header --default-type=h --file $t -- \
+ $cxx.path $poptions $coptions $cxx.mode -M -MG $s >$t
+
+ The above depdb-dyndep commands will run the C++ compiler with the -M -MG
+ options to extract the header dependency information, parse the resulting
+ make dependency declaration (either from stdout or from file) and enter
+ each header as a prerequisite of the obje{hello.o} target, as if they were
+ listed explicitly. It will also save this list of headers in the auxiliary
+ dependency database (hello.o.d file) in order to detect changes to these
+ headers on subsequent updates. The --what option specifies what to call
+ the dependencies being extracted in diagnostics. The --default-type option
+ specifies the default target type to use for a dependency if its file name
+ cannot be mapped to a target type.
+
+ The above depdb-dyndep variant extracts the dependencies ahead of the
+ compilation proper and will handle auto-generated headers (see the -MG
+ option for details) provided we pass the header search paths where they
+ could be generated with the -I options (passed as $poptions in the above
+ example).
+
+ If there can be no auto-generated dependencies or if they can all be
+ listed explicitly as static prerequisites, then we can use a variant of
+ the depdb-dyndep command that extracts the dependencies as a by-product of
+ compilation. In this mode only the --file input is supported. For example
+ (assuming hxx{config} is auto-generated):
+
+ obje{hello.o}: cxx{hello} hxx{config}
+ {{
+ s = $path($<[0])
+ o = $path($>)
+ t = $(o).t
+
+ poptions = $cxx.poptions $cc.poptions
+ coptions = $cc.coptions $cxx.coptions
+
+ depdb dyndep --byproduct --what=header --default-type=h --file $t
+
+ diag c++ ($<[0])
+
+ $cxx.path $poptions $coptions $cxx.mode -MD -MF $t -o $o -c $s
+ }}
+
+ Other options supported by the depdb-dyndep command:
+
+ --format <name>
+
+ Dependency format. Currently only the `make` dependency format is
+ supported and is the default.
+
+ --cwd <dir>
+
+ Working directory used to complete relative dependency paths. This
+ option is currently only valid in the --byproduct mode (in the normal
+ mode relative paths indicate non-existent files).
+
+ --adhoc
+
+ Treat dynamically discovered prerequisites as ad hoc (so they don't end
+ up in $<; only in the normal mode).
+
+ --drop-cycles
+
+ Drop prerequisites that are also targets. Only use this option if you
+ are sure such cycles are harmless, that is, the output is not affected
+ by such prerequisites' content.
+
+ --update-{include,exclude} <tgt>|<pat>
+
+ Prerequisite targets/patterns to include/exclude (from the static
+ prerequisite set) for update during match (those excluded will be
+ updated during execute). The order in which these options are specified
+ is significant with the first target/pattern that matches determining
+ the result. If only the --update-include options are specified, then
+ only the explicitly included prerequisites will be updated. Otherwise,
+ all prerequisites that are not explicitly excluded will be updated. If
+ none of these options is specified, then all the static prerequisites
+ are updated during match. Note also that these options do not apply to
+ ad hoc prerequisites which are always updated during match.
+
+ The common use-case for the --update-exclude option is to omit updating
+ a library which is only needed to extract exported preprocessor options.
+ Here is a typical pattern:
+
+ import libs = libhello%lib{hello}
+
+ libue{hello-meta}: $libs
+
+ obje{hello.o}: cxx{hello} libue{hello-meta}
+ {{
+ s = $path($<[0])
+ o = $path($>)
+
+ lib_poptions = $cxx.lib_poptions(libue{hello-meta}, obje)
+ depdb hash $lib_poptions
+
+ poptions = $cxx.poptions $cc.poptions $lib_poptions
+ coptions = $cc.coptions $cxx.coptions
+
+ depdb dyndep $poptions --what=header --default-type=h \
+ --update-exclude libue{hello-meta} -- \
+ $cxx.path $poptions $coptions $cxx.mode -M -MG $s
+
+ diag c++ ($<[0])
+
+ $cxx.path $poptions $coptions $cxx.mode -o $o -c $s
+ }}
+
+ As another example, sometimes we need to extract the "common interface"
+ preprocessor options that are independent of the the library type (static
+ or shared). For example, the Qt moc compiler needs to "see" the C/C++
+ preprocessor options from imported libraries if they could affect its
+ input. Here is how we can implement this:
+
+ import libs = libhello%lib{hello}
+
+ libul{hello-meta}: $libs
+
+ cxx{hello-moc}: hxx{hello} libul{hello-meta} $moc
+ {{
+ s = $path($<[0])
+ o = $path($>[0])
+ t = $(o).t
+
+ lib_poptions = $cxx.lib_poptions(libul{hello-meta})
+ depdb hash $lib_poptions
+
+ depdb dyndep --byproduct --drop-cycles --what=header --default-type=h \
+ --update-exclude libul{hello-meta} --file $t
+
+ diag moc ($<[0])
+
+ $moc $cc.poptions $cxx.poptions $lib_poptions \
+ -f $leaf($s) --output-dep-file --dep-file-path $t -o $o $s
+ }}
+
+ Planned future improvements include support for the `lines` (list of
+ files, one per line) input format in addition to `make` and support for
+ dynamic targets in addition to prerequisites.
+
+ * Support for specifying custom ad hoc pattern rule names.
+
+ Besides improving diagnostics, this allows us to use such a name in the
+ rule hints, for example:
+
+ [rule_name=hello.link] exe{~'/(.*)/'}: obje{~'/\1/'}
+ {{
+ $cxx.path -o $path($>) $path($<[0])
+ }}
+
+ [rule_hint=hello] exe{hello}: obje{hello}
+
+ obje{hello}: c{hello-c}
+
+ * Ability to disfigure specific configuration variables.
+
+ The new config.config.disfigure variable can be used to specify the list
+ of variables to ignore when loading config.build (and any files specified
+ in config.config.load), letting them to take on the default values. For
+ example:
+
+ $ b configure config.config.disfigure=config.hello.fancy
+
+ Besides names, variables can also be specified as patterns in the
+ config.<prefix>.(*|**)[<suffix>] form where `*` matches single
+ component names (i.e., `foo` but not `foo.bar`), and `**` matches
+ single and multi-component names. Currently only single wildcard (`*` or
+ `**`) is supported. Additionally, a pattern in the config.<prefix>(*|**)
+ form (i.e., without `.` after <prefix>) matches config.<prefix>.(*|**)
+ plus config.<prefix> itself (but not config.<prefix>foo).
+
+ For example, to disfigure all the project configuration variables (while
+ preserving all the module configuration variables; note quoting to prevent
+ pattern expansion):
+
+ $ b config.config.disfigure="'config.hello**'"
+
+ * Ability to omit loading config.build.
+
+ If the new config.config.unload variable is set to true, then omit loading
+ the project's configuration from the config.build file. Note that the
+ configuration is still loaded from config.config.load if specified. Note
+ also that similar to config.config.load, only overrides specified on this
+ project's root scope and global scope are considered.
+
+ * Ability to match libul{} targets.
+
+ The bin.libul rule picks, matches, and unmatches (if possible) a member
+ for the purpose of making its metadata (for example, library's poptions,
+ if it's one of the cc libraries) available.
+
+ * Ability to get common interface options via ${c,cxx}.lib_poptions().
+
+ Specifically, the output target type may now be omitted for utility
+ libraries (libul{} and libu[eas]{}). In this case, only "common interface"
+ options will be returned for lib{} dependencies. This is primarily useful
+ for obtaining poptions to be passed to tools other than C/C++ compilers
+ (for example, Qt moc).
+
+ * Ability to control -I translation to -isystem or /external:I in
+ ${c,cxx}.lib_poptions().
+
+ See the function documentation for details.
+
+ * New `update` operation-specific variable.
+
+ This variable is similar to the already existing `clean` and `test`
+ variables but besides the standard `true` and `false` values, it can also
+ be set to `unmatch` (match but do not update) and `match` (update during
+ match) and `execute` (update during execute, as is normally; this value is
+ primarily useful if the rule has the `match` semantics by default).
+
+ Note that the unmatch (match but do not update) and match (update during
+ match) values are only supported by certain rules (and potentially only
+ for certain prerequisite types).
+
+ Additionally:
+
+ - All the operation-specific variables are now checked for `false` as an
+ override for the prerequisite-specific `include` variable. This can now
+ be used to disable a prerequisite for update, for example:
+
+ ./: exe{test}: update = false
+
+ - Ad hoc Buildscript recipes now support update=unmatch|match.
+
+ - The cc::link_rule now supports the `match` value for headers and ad hoc
+ prerequisites. This can be used to make sure all the library headers are
+ updated before matching any of its (or dependent's) object files.
+
+ * New build.mode global scope variable.
+
+ This variable signals the mode the build system may be running in. The two
+ core modes are `no-external-modules` (bootstrapping of external modules is
+ disabled, see --no-external-modules for details) and `normal` (normal
+ execution). Other build system drivers may invent additional modes (for
+ example, the bpkg `skeleton` mode; see "Package Build System Skeleton" in
+ the package manager manual for details).
+
+ * New cmdline value type for canned command lines.
+
+ The Testscript and Buildscript languages now use the special cmdline value
+ type for canned command lines. Specifically, the re-lexing after expansion
+ now only happens if the expended value is of the cmdline type. See
+ "Lexical Structure" in the Testscript manual for details.
+
+ * The bash build system module now installs bash modules into
+ bin/<project>.bash/ instead of bin/<project>/ to avoid clashes.
+
+ * New --trace-{match,execute} options.
+
+ These options can be used to understand which dependency chain causes
+ matching or execution of a particular target. See b(1) for details.
+
+ * JSON format support for the --structured-result option and the info meta
+ operation.
+
+ See b(1) for details.
+
+ * Switch to using libpkg-config instead of libpkfconf for loading pkg-config
+ files.
+
Version 0.14.0
* Support for hermetic build configurations.
@@ -63,7 +575,7 @@ Version 0.14.0
For example, given the above rule and dependency declaration, the
effective dependency is going to be:
- <exe{hello} file{hello.map>: cxx{hello} hxx{hello} hxx{common}
+ <exe{hello} file{hello.map}>: cxx{hello} hxx{hello} hxx{common}
Similar to ad hoc recipes, ad hoc rules can be written in Buildscript or
C++.
@@ -1150,10 +1662,10 @@ Version 0.8.0
The alternative variable substitution symbol can be specified with the
in.symbol variable and lax (instead of the default strict) mode with
- in.substitution. For example:
+ in.mode. For example:
file{test}: in.symbol = '@'
- file{test}: in.substitution = lax
+ file{test}: in.mode = lax
* New 'bash' build system module that provides modularization support for
bash scripts. See the build system manual for all the details.
@@ -1328,7 +1840,7 @@ Version 0.7.0
* Support for forwarded configurations with target backlinking. See the
configure meta-operation discussion in b(1) for details.
- * Improvements to the in module (in.symbol, in.substitution={strict|lax}).
+ * Improvements to the in module (in.symbol, in.mode={strict|lax}).
* New $directory(), $base(), $leaf() and $extension() path functions.
diff --git a/bootstrap-clang.bat b/bootstrap-clang.bat
index 00302e9..5a06a70 100644
--- a/bootstrap-clang.bat
+++ b/bootstrap-clang.bat
@@ -88,7 +88,7 @@ rem worked around by passing an obscure internal option. Clang 9 doesn't
rem have this problem. To keep things simple, let's just suppress this
rem warning.
rem
-set "ops=-m64 -std=c++1y -D_MT -D_CRT_SECURE_NO_WARNINGS -Xlinker /ignore:4217"
+set "ops=-finput-charset=UTF-8 -m64 -std=c++1y -D_MT -D_CRT_SECURE_NO_WARNINGS -Xlinker /ignore:4217"
:ops_next
shift
if "_%1_" == "__" (
diff --git a/bootstrap-mingw.bat b/bootstrap-mingw.bat
index df7e677..5638659 100644
--- a/bootstrap-mingw.bat
+++ b/bootstrap-mingw.bat
@@ -83,7 +83,7 @@ rem
rem Note that for as long as we support GCC 4.9 we have to compile in the
rem C++14 mode since 4.9 doesn't recognize c++1z.
rem
-set "ops=-std=c++1y"
+set "ops=-finput-charset=UTF-8 -std=c++1y"
:ops_next
shift
if "_%1_" == "__" (
@@ -107,7 +107,7 @@ for %%d in (%src%) do (
)
echo on
-%cxx% -I%libbutl% -I. -DBUILD2_BOOTSTRAP -DBUILD2_HOST_TRIPLET=\"x86_64-w64-mingw32\" %ops% -o build2\b-boot.exe %r% -limagehlp
+%cxx% -I%libbutl% -I. -DBUILD2_BOOTSTRAP -DBUILD2_HOST_TRIPLET=\"x86_64-w64-mingw32\" %ops% -o build2\b-boot.exe %r% -pthread -limagehlp
@echo off
if errorlevel 1 goto error
diff --git a/bootstrap-msvc.bat b/bootstrap-msvc.bat
index 3d74427..6a6fcbc 100644
--- a/bootstrap-msvc.bat
+++ b/bootstrap-msvc.bat
@@ -111,7 +111,7 @@ set "src=%src% %libbutl%\libbutl"
rem Get the compile options.
rem
-set "ops=/nologo /EHsc /MT /MP"
+set "ops=/nologo /utf-8 /EHsc /MT /MP"
:ops_next
shift
if "_%1_" == "__" (
diff --git a/bootstrap.gmake b/bootstrap.gmake
index 1e0e8e2..a2c9779 100644
--- a/bootstrap.gmake
+++ b/bootstrap.gmake
@@ -51,7 +51,7 @@ ifeq ($(OS),Windows_NT)
ifneq ($(filter %-w64-mingw32,$(target)),)
host := x86_64-w64-mingw32
chost := $(host)
- override LIBS += -limagehlp
+ override LIBS += -pthread -limagehlp
else ifneq ($(filter %-windows-msvc,$(target)),)
host := x86_64-microsoft-win32-msvc
chost := $(host)
@@ -65,7 +65,7 @@ ifeq ($(OS),Windows_NT)
$(error unsupported target $(target))
endif
else
- override LIBS += -lpthread
+ override LIBS += -pthread
endif
# Remove all the built-in rules, enable second expansion, etc.
@@ -190,13 +190,13 @@ $(out_root)/build2/b-boot$(exe): $(build2_obj) $(libbuild2_obj) $(libbutl_obj)
$(CXX) -std=c++1y $(CXXFLAGS) $(LDFLAGS) -o $@ $^ $(LIBS)
$(out_root)/build2/%.b.o: $(src_root)/build2/%.cxx | $$(dir $$@).
- $(CXX) -I$(libbutl) -I$(src_root) -DBUILD2_BOOTSTRAP -DBUILD2_HOST_TRIPLET=\"$(chost)\" $(CPPFLAGS) -std=c++1y $(CXXFLAGS) -o $@ -c $<
+ $(CXX) -I$(libbutl) -I$(src_root) -DBUILD2_BOOTSTRAP -DBUILD2_HOST_TRIPLET=\"$(chost)\" $(CPPFLAGS) -finput-charset=UTF-8 -std=c++1y $(CXXFLAGS) -o $@ -c $<
$(out_root)/libbuild2/%.b.o: $(src_root)/libbuild2/%.cxx | $$(dir $$@).
- $(CXX) -I$(libbutl) -I$(src_root) -DBUILD2_BOOTSTRAP -DBUILD2_HOST_TRIPLET=\"$(chost)\" $(CPPFLAGS) -std=c++1y $(CXXFLAGS) -o $@ -c $<
+ $(CXX) -I$(libbutl) -I$(src_root) -DBUILD2_BOOTSTRAP -DBUILD2_HOST_TRIPLET=\"$(chost)\" $(CPPFLAGS) -finput-charset=UTF-8 -std=c++1y $(CXXFLAGS) -o $@ -c $<
$(libbutl_out)/%.b.o: $(libbutl)/libbutl/%.cxx | $$(dir $$@).
- $(CXX) -I$(libbutl) -DBUILD2_BOOTSTRAP $(CPPFLAGS) -std=c++1y $(CXXFLAGS) -o $@ -c $<
+ $(CXX) -I$(libbutl) -DBUILD2_BOOTSTRAP $(CPPFLAGS) -finput-charset=UTF-8 -std=c++1y $(CXXFLAGS) -o $@ -c $<
.PRECIOUS: %/.
%/. :
diff --git a/bootstrap.sh b/bootstrap.sh
index 14e52cf..9bd13b4 100755
--- a/bootstrap.sh
+++ b/bootstrap.sh
@@ -147,4 +147,4 @@ done
# mode since 4.9 doesn't recognize c++1z.
#
set -x
-"$cxx" "-I$libbutl" -I. -DBUILD2_BOOTSTRAP '-DBUILD2_HOST_TRIPLET="'"$host"'"' -std=c++1y "$@" -o build2/b-boot $r -lpthread
+"$cxx" "-I$libbutl" -I. -DBUILD2_BOOTSTRAP '-DBUILD2_HOST_TRIPLET="'"$host"'"' -finput-charset=UTF-8 -std=c++1y "$@" -o build2/b-boot $r -lpthread
diff --git a/build/root.build b/build/root.build
index 4925c19..ffc1a0f 100644
--- a/build/root.build
+++ b/build/root.build
@@ -20,11 +20,18 @@ if ($cxx.target.system == 'win32-msvc')
cxx.poptions += -D_CRT_SECURE_NO_WARNINGS -D_SCL_SECURE_NO_WARNINGS
if ($cxx.class == 'msvc')
- cxx.coptions += /wd4251 /wd4275 /wd4800 /wd4819
+ cxx.coptions += /wd4251 /wd4275 /wd4800
elif ($cxx.id == 'gcc')
+{
cxx.coptions += -Wno-maybe-uninitialized -Wno-free-nonheap-object \
-Wno-stringop-overread # libbutl
+ if ($cxx.version.major >= 13)
+ cxx.coptions += -Wno-dangling-reference
+}
+elif ($cxx.id.type == 'clang' && $cxx.version.major >= 15)
+ cxx.coptions += -Wno-unqualified-std-cast-call
+
cxx.poptions =+ "-I$out_root" "-I$src_root"
# While we don't have any C sources to compile, we need to get the C compiler
@@ -49,3 +56,11 @@ using? cli
# Specify the test target for cross-testing.
#
test.target = $cxx.target
+
+# Temporary ability to build with now deprecated libpkgconf instead of
+# libpkg-config. Note that libpkgconf is known to have issues on Windows and
+# Mac OS so this should only be used on Linux and maybe BSDs. Also note that
+# we will only keep this until upstream (again) breaks backwards compatibility
+# at which point we will drop this support.
+#
+config [bool, config.report=false] config.build2.libpkgconf ?= false
diff --git a/build2/b-options.cxx b/build2/b-options.cxx
deleted file mode 100644
index 1c59231..0000000
--- a/build2/b-options.cxx
+++ /dev/null
@@ -1,1707 +0,0 @@
-// -*- C++ -*-
-//
-// This file was generated by CLI, a command line interface
-// compiler for C++.
-//
-
-// Begin prologue.
-//
-#include <build2/types-parsers.hxx>
-//
-// End prologue.
-
-#include <build2/b-options.hxx>
-
-#include <map>
-#include <set>
-#include <string>
-#include <vector>
-#include <utility>
-#include <ostream>
-#include <sstream>
-#include <cstring>
-#include <fstream>
-
-namespace build2
-{
- namespace cl
- {
- // unknown_option
- //
- unknown_option::
- ~unknown_option () throw ()
- {
- }
-
- void unknown_option::
- print (::std::ostream& os) const
- {
- os << "unknown option '" << option ().c_str () << "'";
- }
-
- const char* unknown_option::
- what () const throw ()
- {
- return "unknown option";
- }
-
- // unknown_argument
- //
- unknown_argument::
- ~unknown_argument () throw ()
- {
- }
-
- void unknown_argument::
- print (::std::ostream& os) const
- {
- os << "unknown argument '" << argument ().c_str () << "'";
- }
-
- const char* unknown_argument::
- what () const throw ()
- {
- return "unknown argument";
- }
-
- // missing_value
- //
- missing_value::
- ~missing_value () throw ()
- {
- }
-
- void missing_value::
- print (::std::ostream& os) const
- {
- os << "missing value for option '" << option ().c_str () << "'";
- }
-
- const char* missing_value::
- what () const throw ()
- {
- return "missing option value";
- }
-
- // invalid_value
- //
- invalid_value::
- ~invalid_value () throw ()
- {
- }
-
- void invalid_value::
- print (::std::ostream& os) const
- {
- os << "invalid value '" << value ().c_str () << "' for option '"
- << option ().c_str () << "'";
-
- if (!message ().empty ())
- os << ": " << message ().c_str ();
- }
-
- const char* invalid_value::
- what () const throw ()
- {
- return "invalid option value";
- }
-
- // eos_reached
- //
- void eos_reached::
- print (::std::ostream& os) const
- {
- os << what ();
- }
-
- const char* eos_reached::
- what () const throw ()
- {
- return "end of argument stream reached";
- }
-
- // file_io_failure
- //
- file_io_failure::
- ~file_io_failure () throw ()
- {
- }
-
- void file_io_failure::
- print (::std::ostream& os) const
- {
- os << "unable to open file '" << file ().c_str () << "' or read failure";
- }
-
- const char* file_io_failure::
- what () const throw ()
- {
- return "unable to open file or read failure";
- }
-
- // unmatched_quote
- //
- unmatched_quote::
- ~unmatched_quote () throw ()
- {
- }
-
- void unmatched_quote::
- print (::std::ostream& os) const
- {
- os << "unmatched quote in argument '" << argument ().c_str () << "'";
- }
-
- const char* unmatched_quote::
- what () const throw ()
- {
- return "unmatched quote";
- }
-
- // scanner
- //
- scanner::
- ~scanner ()
- {
- }
-
- // argv_scanner
- //
- bool argv_scanner::
- more ()
- {
- return i_ < argc_;
- }
-
- const char* argv_scanner::
- peek ()
- {
- if (i_ < argc_)
- return argv_[i_];
- else
- throw eos_reached ();
- }
-
- const char* argv_scanner::
- next ()
- {
- if (i_ < argc_)
- {
- const char* r (argv_[i_]);
-
- if (erase_)
- {
- for (int i (i_ + 1); i < argc_; ++i)
- argv_[i - 1] = argv_[i];
-
- --argc_;
- argv_[argc_] = 0;
- }
- else
- ++i_;
-
- ++start_position_;
- return r;
- }
- else
- throw eos_reached ();
- }
-
- void argv_scanner::
- skip ()
- {
- if (i_ < argc_)
- {
- ++i_;
- ++start_position_;
- }
- else
- throw eos_reached ();
- }
-
- std::size_t argv_scanner::
- position ()
- {
- return start_position_;
- }
-
- // argv_file_scanner
- //
- int argv_file_scanner::zero_argc_ = 0;
- std::string argv_file_scanner::empty_string_;
-
- bool argv_file_scanner::
- more ()
- {
- if (!args_.empty ())
- return true;
-
- while (base::more ())
- {
- // See if the next argument is the file option.
- //
- const char* a (base::peek ());
- const option_info* oi = 0;
- const char* ov = 0;
-
- if (!skip_)
- {
- if ((oi = find (a)) != 0)
- {
- base::next ();
-
- if (!base::more ())
- throw missing_value (a);
-
- ov = base::next ();
- }
- else if (std::strncmp (a, "-", 1) == 0)
- {
- if ((ov = std::strchr (a, '=')) != 0)
- {
- std::string o (a, 0, ov - a);
- if ((oi = find (o.c_str ())) != 0)
- {
- base::next ();
- ++ov;
- }
- }
- }
- }
-
- if (oi != 0)
- {
- if (oi->search_func != 0)
- {
- std::string f (oi->search_func (ov, oi->arg));
-
- if (!f.empty ())
- load (f);
- }
- else
- load (ov);
-
- if (!args_.empty ())
- return true;
- }
- else
- {
- if (!skip_)
- skip_ = (std::strcmp (a, "--") == 0);
-
- return true;
- }
- }
-
- return false;
- }
-
- const char* argv_file_scanner::
- peek ()
- {
- if (!more ())
- throw eos_reached ();
-
- return args_.empty () ? base::peek () : args_.front ().value.c_str ();
- }
-
- const std::string& argv_file_scanner::
- peek_file ()
- {
- if (!more ())
- throw eos_reached ();
-
- return args_.empty () ? empty_string_ : *args_.front ().file;
- }
-
- std::size_t argv_file_scanner::
- peek_line ()
- {
- if (!more ())
- throw eos_reached ();
-
- return args_.empty () ? 0 : args_.front ().line;
- }
-
- const char* argv_file_scanner::
- next ()
- {
- if (!more ())
- throw eos_reached ();
-
- if (args_.empty ())
- return base::next ();
- else
- {
- hold_[i_ == 0 ? ++i_ : --i_].swap (args_.front ().value);
- args_.pop_front ();
- ++start_position_;
- return hold_[i_].c_str ();
- }
- }
-
- void argv_file_scanner::
- skip ()
- {
- if (!more ())
- throw eos_reached ();
-
- if (args_.empty ())
- return base::skip ();
- else
- {
- args_.pop_front ();
- ++start_position_;
- }
- }
-
- const argv_file_scanner::option_info* argv_file_scanner::
- find (const char* a) const
- {
- for (std::size_t i (0); i < options_count_; ++i)
- if (std::strcmp (a, options_[i].option) == 0)
- return &options_[i];
-
- return 0;
- }
-
- std::size_t argv_file_scanner::
- position ()
- {
- return start_position_;
- }
-
- void argv_file_scanner::
- load (const std::string& file)
- {
- using namespace std;
-
- ifstream is (file.c_str ());
-
- if (!is.is_open ())
- throw file_io_failure (file);
-
- files_.push_back (file);
-
- arg a;
- a.file = &*files_.rbegin ();
-
- for (a.line = 1; !is.eof (); ++a.line)
- {
- string line;
- getline (is, line);
-
- if (is.fail () && !is.eof ())
- throw file_io_failure (file);
-
- string::size_type n (line.size ());
-
- // Trim the line from leading and trailing whitespaces.
- //
- if (n != 0)
- {
- const char* f (line.c_str ());
- const char* l (f + n);
-
- const char* of (f);
- while (f < l && (*f == ' ' || *f == '\t' || *f == '\r'))
- ++f;
-
- --l;
-
- const char* ol (l);
- while (l > f && (*l == ' ' || *l == '\t' || *l == '\r'))
- --l;
-
- if (f != of || l != ol)
- line = f <= l ? string (f, l - f + 1) : string ();
- }
-
- // Ignore empty lines, those that start with #.
- //
- if (line.empty () || line[0] == '#')
- continue;
-
- string::size_type p (string::npos);
- if (line.compare (0, 1, "-") == 0)
- {
- p = line.find (' ');
-
- string::size_type q (line.find ('='));
- if (q != string::npos && q < p)
- p = q;
- }
-
- string s1;
- if (p != string::npos)
- {
- s1.assign (line, 0, p);
-
- // Skip leading whitespaces in the argument.
- //
- if (line[p] == '=')
- ++p;
- else
- {
- n = line.size ();
- for (++p; p < n; ++p)
- {
- char c (line[p]);
- if (c != ' ' && c != '\t' && c != '\r')
- break;
- }
- }
- }
- else if (!skip_)
- skip_ = (line == "--");
-
- string s2 (line, p != string::npos ? p : 0);
-
- // If the string (which is an option value or argument) is
- // wrapped in quotes, remove them.
- //
- n = s2.size ();
- char cf (s2[0]), cl (s2[n - 1]);
-
- if (cf == '"' || cf == '\'' || cl == '"' || cl == '\'')
- {
- if (n == 1 || cf != cl)
- throw unmatched_quote (s2);
-
- s2 = string (s2, 1, n - 2);
- }
-
- if (!s1.empty ())
- {
- // See if this is another file option.
- //
- const option_info* oi;
- if (!skip_ && (oi = find (s1.c_str ())))
- {
- if (s2.empty ())
- throw missing_value (oi->option);
-
- if (oi->search_func != 0)
- {
- string f (oi->search_func (s2.c_str (), oi->arg));
- if (!f.empty ())
- load (f);
- }
- else
- {
- // If the path of the file being parsed is not simple and the
- // path of the file that needs to be loaded is relative, then
- // complete the latter using the former as a base.
- //
-#ifndef _WIN32
- string::size_type p (file.find_last_of ('/'));
- bool c (p != string::npos && s2[0] != '/');
-#else
- string::size_type p (file.find_last_of ("/\\"));
- bool c (p != string::npos && s2[1] != ':');
-#endif
- if (c)
- s2.insert (0, file, 0, p + 1);
-
- load (s2);
- }
-
- continue;
- }
-
- a.value = s1;
- args_.push_back (a);
- }
-
- a.value = s2;
- args_.push_back (a);
- }
- }
-
- template <typename X>
- struct parser
- {
- static void
- parse (X& x, bool& xs, scanner& s)
- {
- using namespace std;
-
- const char* o (s.next ());
- if (s.more ())
- {
- string v (s.next ());
- istringstream is (v);
- if (!(is >> x && is.peek () == istringstream::traits_type::eof ()))
- throw invalid_value (o, v);
- }
- else
- throw missing_value (o);
-
- xs = true;
- }
-
- static void
- merge (X& b, const X& a)
- {
- b = a;
- }
- };
-
- template <>
- struct parser<bool>
- {
- static void
- parse (bool& x, scanner& s)
- {
- s.next ();
- x = true;
- }
-
- static void
- merge (bool& b, const bool&)
- {
- b = true;
- }
- };
-
- template <>
- struct parser<std::string>
- {
- static void
- parse (std::string& x, bool& xs, scanner& s)
- {
- const char* o (s.next ());
-
- if (s.more ())
- x = s.next ();
- else
- throw missing_value (o);
-
- xs = true;
- }
-
- static void
- merge (std::string& b, const std::string& a)
- {
- b = a;
- }
- };
-
- template <typename X>
- struct parser<std::pair<X, std::size_t> >
- {
- static void
- parse (std::pair<X, std::size_t>& x, bool& xs, scanner& s)
- {
- x.second = s.position ();
- parser<X>::parse (x.first, xs, s);
- }
-
- static void
- merge (std::pair<X, std::size_t>& b, const std::pair<X, std::size_t>& a)
- {
- b = a;
- }
- };
-
- template <typename X>
- struct parser<std::vector<X> >
- {
- static void
- parse (std::vector<X>& c, bool& xs, scanner& s)
- {
- X x;
- bool dummy;
- parser<X>::parse (x, dummy, s);
- c.push_back (x);
- xs = true;
- }
-
- static void
- merge (std::vector<X>& b, const std::vector<X>& a)
- {
- b.insert (b.end (), a.begin (), a.end ());
- }
- };
-
- template <typename X, typename C>
- struct parser<std::set<X, C> >
- {
- static void
- parse (std::set<X, C>& c, bool& xs, scanner& s)
- {
- X x;
- bool dummy;
- parser<X>::parse (x, dummy, s);
- c.insert (x);
- xs = true;
- }
-
- static void
- merge (std::set<X, C>& b, const std::set<X, C>& a)
- {
- b.insert (a.begin (), a.end ());
- }
- };
-
- template <typename K, typename V, typename C>
- struct parser<std::map<K, V, C> >
- {
- static void
- parse (std::map<K, V, C>& m, bool& xs, scanner& s)
- {
- const char* o (s.next ());
-
- if (s.more ())
- {
- std::size_t pos (s.position ());
- std::string ov (s.next ());
- std::string::size_type p = ov.find ('=');
-
- K k = K ();
- V v = V ();
- std::string kstr (ov, 0, p);
- std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
-
- int ac (2);
- char* av[] =
- {
- const_cast<char*> (o),
- 0
- };
-
- bool dummy;
- if (!kstr.empty ())
- {
- av[1] = const_cast<char*> (kstr.c_str ());
- argv_scanner s (0, ac, av, false, pos);
- parser<K>::parse (k, dummy, s);
- }
-
- if (!vstr.empty ())
- {
- av[1] = const_cast<char*> (vstr.c_str ());
- argv_scanner s (0, ac, av, false, pos);
- parser<V>::parse (v, dummy, s);
- }
-
- m[k] = v;
- }
- else
- throw missing_value (o);
-
- xs = true;
- }
-
- static void
- merge (std::map<K, V, C>& b, const std::map<K, V, C>& a)
- {
- for (typename std::map<K, V, C>::const_iterator i (a.begin ());
- i != a.end ();
- ++i)
- b[i->first] = i->second;
- }
- };
-
- template <typename X, typename T, T X::*M>
- void
- thunk (X& x, scanner& s)
- {
- parser<T>::parse (x.*M, s);
- }
-
- template <typename X, typename T, T X::*M, bool X::*S>
- void
- thunk (X& x, scanner& s)
- {
- parser<T>::parse (x.*M, x.*S, s);
- }
- }
-}
-
-#include <map>
-#include <cstring>
-
-namespace build2
-{
- // options
- //
-
- options::
- options ()
- : build2_metadata_ (),
- build2_metadata_specified_ (false),
- v_ (),
- V_ (),
- quiet_ (),
- silent_ (),
- verbose_ (1),
- verbose_specified_ (false),
- stat_ (),
- dump_ (),
- dump_specified_ (false),
- progress_ (),
- no_progress_ (),
- jobs_ (),
- jobs_specified_ (false),
- max_jobs_ (),
- max_jobs_specified_ (false),
- queue_depth_ (4),
- queue_depth_specified_ (false),
- file_cache_ (),
- file_cache_specified_ (false),
- max_stack_ (),
- max_stack_specified_ (false),
- serial_stop_ (),
- dry_run_ (),
- match_only_ (),
- no_external_modules_ (),
- structured_result_ (),
- mtime_check_ (),
- no_mtime_check_ (),
- no_column_ (),
- no_line_ (),
- buildfile_ (),
- buildfile_specified_ (false),
- config_guess_ (),
- config_guess_specified_ (false),
- config_sub_ (),
- config_sub_specified_ (false),
- pager_ (),
- pager_specified_ (false),
- pager_option_ (),
- pager_option_specified_ (false),
- options_file_ (),
- options_file_specified_ (false),
- default_options_ (),
- default_options_specified_ (false),
- no_default_options_ (),
- help_ (),
- version_ ()
- {
- }
-
- bool options::
- parse (int& argc,
- char** argv,
- bool erase,
- ::build2::cl::unknown_mode opt,
- ::build2::cl::unknown_mode arg)
- {
- ::build2::cl::argv_scanner s (argc, argv, erase);
- bool r = _parse (s, opt, arg);
- return r;
- }
-
- bool options::
- parse (int start,
- int& argc,
- char** argv,
- bool erase,
- ::build2::cl::unknown_mode opt,
- ::build2::cl::unknown_mode arg)
- {
- ::build2::cl::argv_scanner s (start, argc, argv, erase);
- bool r = _parse (s, opt, arg);
- return r;
- }
-
- bool options::
- parse (int& argc,
- char** argv,
- int& end,
- bool erase,
- ::build2::cl::unknown_mode opt,
- ::build2::cl::unknown_mode arg)
- {
- ::build2::cl::argv_scanner s (argc, argv, erase);
- bool r = _parse (s, opt, arg);
- end = s.end ();
- return r;
- }
-
- bool options::
- parse (int start,
- int& argc,
- char** argv,
- int& end,
- bool erase,
- ::build2::cl::unknown_mode opt,
- ::build2::cl::unknown_mode arg)
- {
- ::build2::cl::argv_scanner s (start, argc, argv, erase);
- bool r = _parse (s, opt, arg);
- end = s.end ();
- return r;
- }
-
- bool options::
- parse (::build2::cl::scanner& s,
- ::build2::cl::unknown_mode opt,
- ::build2::cl::unknown_mode arg)
- {
- bool r = _parse (s, opt, arg);
- return r;
- }
-
- void options::
- merge (const options& a)
- {
- CLI_POTENTIALLY_UNUSED (a);
-
- if (a.build2_metadata_specified_)
- {
- ::build2::cl::parser< uint64_t>::merge (
- this->build2_metadata_, a.build2_metadata_);
- this->build2_metadata_specified_ = true;
- }
-
- if (a.v_)
- {
- ::build2::cl::parser< bool>::merge (
- this->v_, a.v_);
- }
-
- if (a.V_)
- {
- ::build2::cl::parser< bool>::merge (
- this->V_, a.V_);
- }
-
- if (a.quiet_)
- {
- ::build2::cl::parser< bool>::merge (
- this->quiet_, a.quiet_);
- }
-
- if (a.silent_)
- {
- ::build2::cl::parser< bool>::merge (
- this->silent_, a.silent_);
- }
-
- if (a.verbose_specified_)
- {
- ::build2::cl::parser< uint16_t>::merge (
- this->verbose_, a.verbose_);
- this->verbose_specified_ = true;
- }
-
- if (a.stat_)
- {
- ::build2::cl::parser< bool>::merge (
- this->stat_, a.stat_);
- }
-
- if (a.dump_specified_)
- {
- ::build2::cl::parser< std::set<string>>::merge (
- this->dump_, a.dump_);
- this->dump_specified_ = true;
- }
-
- if (a.progress_)
- {
- ::build2::cl::parser< bool>::merge (
- this->progress_, a.progress_);
- }
-
- if (a.no_progress_)
- {
- ::build2::cl::parser< bool>::merge (
- this->no_progress_, a.no_progress_);
- }
-
- if (a.jobs_specified_)
- {
- ::build2::cl::parser< size_t>::merge (
- this->jobs_, a.jobs_);
- this->jobs_specified_ = true;
- }
-
- if (a.max_jobs_specified_)
- {
- ::build2::cl::parser< size_t>::merge (
- this->max_jobs_, a.max_jobs_);
- this->max_jobs_specified_ = true;
- }
-
- if (a.queue_depth_specified_)
- {
- ::build2::cl::parser< size_t>::merge (
- this->queue_depth_, a.queue_depth_);
- this->queue_depth_specified_ = true;
- }
-
- if (a.file_cache_specified_)
- {
- ::build2::cl::parser< string>::merge (
- this->file_cache_, a.file_cache_);
- this->file_cache_specified_ = true;
- }
-
- if (a.max_stack_specified_)
- {
- ::build2::cl::parser< size_t>::merge (
- this->max_stack_, a.max_stack_);
- this->max_stack_specified_ = true;
- }
-
- if (a.serial_stop_)
- {
- ::build2::cl::parser< bool>::merge (
- this->serial_stop_, a.serial_stop_);
- }
-
- if (a.dry_run_)
- {
- ::build2::cl::parser< bool>::merge (
- this->dry_run_, a.dry_run_);
- }
-
- if (a.match_only_)
- {
- ::build2::cl::parser< bool>::merge (
- this->match_only_, a.match_only_);
- }
-
- if (a.no_external_modules_)
- {
- ::build2::cl::parser< bool>::merge (
- this->no_external_modules_, a.no_external_modules_);
- }
-
- if (a.structured_result_)
- {
- ::build2::cl::parser< bool>::merge (
- this->structured_result_, a.structured_result_);
- }
-
- if (a.mtime_check_)
- {
- ::build2::cl::parser< bool>::merge (
- this->mtime_check_, a.mtime_check_);
- }
-
- if (a.no_mtime_check_)
- {
- ::build2::cl::parser< bool>::merge (
- this->no_mtime_check_, a.no_mtime_check_);
- }
-
- if (a.no_column_)
- {
- ::build2::cl::parser< bool>::merge (
- this->no_column_, a.no_column_);
- }
-
- if (a.no_line_)
- {
- ::build2::cl::parser< bool>::merge (
- this->no_line_, a.no_line_);
- }
-
- if (a.buildfile_specified_)
- {
- ::build2::cl::parser< path>::merge (
- this->buildfile_, a.buildfile_);
- this->buildfile_specified_ = true;
- }
-
- if (a.config_guess_specified_)
- {
- ::build2::cl::parser< path>::merge (
- this->config_guess_, a.config_guess_);
- this->config_guess_specified_ = true;
- }
-
- if (a.config_sub_specified_)
- {
- ::build2::cl::parser< path>::merge (
- this->config_sub_, a.config_sub_);
- this->config_sub_specified_ = true;
- }
-
- if (a.pager_specified_)
- {
- ::build2::cl::parser< string>::merge (
- this->pager_, a.pager_);
- this->pager_specified_ = true;
- }
-
- if (a.pager_option_specified_)
- {
- ::build2::cl::parser< strings>::merge (
- this->pager_option_, a.pager_option_);
- this->pager_option_specified_ = true;
- }
-
- if (a.options_file_specified_)
- {
- ::build2::cl::parser< string>::merge (
- this->options_file_, a.options_file_);
- this->options_file_specified_ = true;
- }
-
- if (a.default_options_specified_)
- {
- ::build2::cl::parser< dir_path>::merge (
- this->default_options_, a.default_options_);
- this->default_options_specified_ = true;
- }
-
- if (a.no_default_options_)
- {
- ::build2::cl::parser< bool>::merge (
- this->no_default_options_, a.no_default_options_);
- }
-
- if (a.help_)
- {
- ::build2::cl::parser< bool>::merge (
- this->help_, a.help_);
- }
-
- if (a.version_)
- {
- ::build2::cl::parser< bool>::merge (
- this->version_, a.version_);
- }
- }
-
- ::build2::cl::usage_para options::
- print_usage (::std::ostream& os, ::build2::cl::usage_para p)
- {
- CLI_POTENTIALLY_UNUSED (os);
-
- if (p != ::build2::cl::usage_para::none)
- os << ::std::endl;
-
- os << "\033[1mOPTIONS\033[0m" << ::std::endl;
-
- os << std::endl
- << "\033[1m-v\033[0m Print actual commands being executed. This options is" << ::std::endl
- << " equivalent to \033[1m--verbose 2\033[0m." << ::std::endl;
-
- os << std::endl
- << "\033[1m-V\033[0m Print all underlying commands being executed. This" << ::std::endl
- << " options is equivalent to \033[1m--verbose 3\033[0m." << ::std::endl;
-
- os << std::endl
- << "\033[1m--quiet\033[0m|\033[1m-q\033[0m Run quietly, only printing error messages in most" << ::std::endl
- << " contexts. In certain contexts (for example, while" << ::std::endl
- << " updating build system modules) this verbosity level may" << ::std::endl
- << " be ignored. Use \033[1m--silent\033[0m to run quietly in all contexts." << ::std::endl
- << " This option is equivalent to \033[1m--verbose 0\033[0m." << ::std::endl;
-
- os << std::endl
- << "\033[1m--silent\033[0m Run quietly, only printing error messages in all" << ::std::endl
- << " contexts." << ::std::endl;
-
- os << std::endl
- << "\033[1m--verbose\033[0m \033[4mlevel\033[0m Set the diagnostics verbosity to \033[4mlevel\033[0m between 0 and 6." << ::std::endl
- << " Level 0 disables any non-error messages (but see the" << ::std::endl
- << " difference between \033[1m--quiet\033[0m and \033[1m--silent\033[0m) while level 6" << ::std::endl
- << " produces lots of information, with level 1 being the" << ::std::endl
- << " default. The following additional types of diagnostics" << ::std::endl
- << " are produced at each level:" << ::std::endl
- << ::std::endl
- << " 1. High-level information messages." << ::std::endl
- << " 2. Essential underlying commands being executed." << ::std::endl
- << " 3. All underlying commands being executed." << ::std::endl
- << " 4. Information that could be helpful to the user." << ::std::endl
- << " 5. Information that could be helpful to the developer." << ::std::endl
- << " 6. Even more detailed information." << ::std::endl;
-
- os << std::endl
- << "\033[1m--stat\033[0m Display build statistics." << ::std::endl;
-
- os << std::endl
- << "\033[1m--dump\033[0m \033[4mphase\033[0m Dump the build system state after the specified phase." << ::std::endl
- << " Valid \033[4mphase\033[0m values are \033[1mload\033[0m (after loading \033[1mbuildfiles\033[0m)" << ::std::endl
- << " and \033[1mmatch\033[0m (after matching rules to targets). Repeat this" << ::std::endl
- << " option to dump the state after multiple phases." << ::std::endl;
-
- os << std::endl
- << "\033[1m--progress\033[0m Display build progress. If printing to a terminal the" << ::std::endl
- << " progress is displayed by default for low verbosity" << ::std::endl
- << " levels. Use \033[1m--no-progress\033[0m to suppress." << ::std::endl;
-
- os << std::endl
- << "\033[1m--no-progress\033[0m Don't display build progress." << ::std::endl;
-
- os << std::endl
- << "\033[1m--jobs\033[0m|\033[1m-j\033[0m \033[4mnum\033[0m Number of active jobs to perform in parallel. This" << ::std::endl
- << " includes both the number of active threads inside the" << ::std::endl
- << " build system as well as the number of external commands" << ::std::endl
- << " (compilers, linkers, etc) started but not yet finished." << ::std::endl
- << " If this option is not specified or specified with the \033[1m0\033[0m" << ::std::endl
- << " value, then the number of available hardware threads is" << ::std::endl
- << " used." << ::std::endl;
-
- os << std::endl
- << "\033[1m--max-jobs\033[0m|\033[1m-J\033[0m \033[4mnum\033[0m Maximum number of jobs (threads) to create. The default" << ::std::endl
- << " is 8x the number of active jobs (\033[1m--jobs|j\033[0m) on 32-bit" << ::std::endl
- << " architectures and 32x on 64-bit. See the build system" << ::std::endl
- << " scheduler implementation for details." << ::std::endl;
-
- os << std::endl
- << "\033[1m--queue-depth\033[0m|\033[1m-Q\033[0m \033[4mnum\033[0m The queue depth as a multiplier over the number of active" << ::std::endl
- << " jobs. Normally we want a deeper queue if the jobs take" << ::std::endl
- << " long (for example, compilation) and shorter if they are" << ::std::endl
- << " quick (for example, simple tests). The default is 4. See" << ::std::endl
- << " the build system scheduler implementation for details." << ::std::endl;
-
- os << std::endl
- << "\033[1m--file-cache\033[0m \033[4mimpl\033[0m File cache implementation to use for intermediate build" << ::std::endl
- << " results. Valid values are \033[1mnoop\033[0m (no caching or" << ::std::endl
- << " compression) and \033[1msync-lz4\033[0m (no caching with synchronous" << ::std::endl
- << " LZ4 on-disk compression). If this option is not" << ::std::endl
- << " specified, then a suitable default implementation is used" << ::std::endl
- << " (currently \033[1msync-lz4\033[0m)." << ::std::endl;
-
- os << std::endl
- << "\033[1m--max-stack\033[0m \033[4mnum\033[0m The maximum stack size in KBytes to allow for newly" << ::std::endl
- << " created threads. For \033[4mpthreads\033[0m-based systems the driver" << ::std::endl
- << " queries the stack size of the main thread and uses the" << ::std::endl
- << " same size for creating additional threads. This allows" << ::std::endl
- << " adjusting the stack size using familiar mechanisms, such" << ::std::endl
- << " as \033[1mulimit\033[0m. Sometimes, however, the stack size of the main" << ::std::endl
- << " thread is excessively large. As a result, the driver" << ::std::endl
- << " checks if it is greater than a predefined limit (64MB on" << ::std::endl
- << " 64-bit systems and 32MB on 32-bit ones) and caps it to a" << ::std::endl
- << " more sensible value (8MB) if that's the case. This option" << ::std::endl
- << " allows you to override this check with the special zero" << ::std::endl
- << " value indicating that the main thread stack size should" << ::std::endl
- << " be used as is." << ::std::endl;
-
- os << std::endl
- << "\033[1m--serial-stop\033[0m|\033[1m-s\033[0m Run serially and stop at the first error. This mode is" << ::std::endl
- << " useful to investigate build failures that are caused by" << ::std::endl
- << " build system errors rather than compilation errors. Note" << ::std::endl
- << " that if you don't want to keep going but still want" << ::std::endl
- << " parallel execution, add \033[1m--jobs|-j\033[0m (for example \033[1m-j 0\033[0m for" << ::std::endl
- << " default concurrency)." << ::std::endl;
-
- os << std::endl
- << "\033[1m--dry-run\033[0m|\033[1m-n\033[0m Print commands without actually executing them. Note that" << ::std::endl
- << " commands that are required to create an accurate build" << ::std::endl
- << " state will still be executed and the extracted auxiliary" << ::std::endl
- << " dependency information saved. In other words, this is not" << ::std::endl
- << " the \033[4m\"don't touch the filesystem\"\033[0m mode but rather \033[4m\"do" << ::std::endl
- << " minimum amount of work to show what needs to be done\"\033[0m." << ::std::endl
- << " Note also that only the \033[1mperform\033[0m meta-operation supports" << ::std::endl
- << " this mode." << ::std::endl;
-
- os << std::endl
- << "\033[1m--match-only\033[0m Match the rules but do not execute the operation. This" << ::std::endl
- << " mode is primarily useful for profiling." << ::std::endl;
-
- os << std::endl
- << "\033[1m--no-external-modules\033[0m Don't load external modules during project bootstrap." << ::std::endl
- << " Note that this option can only be used with" << ::std::endl
- << " meta-operations that do not load the project's" << ::std::endl
- << " \033[1mbuildfiles\033[0m, such as \033[1minfo\033[0m." << ::std::endl;
-
- os << std::endl
- << "\033[1m--structured-result\033[0m Write the result of execution in a structured form. In" << ::std::endl
- << " this mode, instead of printing to \033[1mSTDERR\033[0m diagnostics" << ::std::endl
- << " messages about the outcome of executing actions on" << ::std::endl
- << " targets, the driver writes to \033[1mSTDOUT\033[0m a structured result" << ::std::endl
- << " description one line per the buildspec action/target" << ::std::endl
- << " pair. Each line has the following format:" << ::std::endl
- << ::std::endl
- << " \033[4mstate\033[0m \033[4mmeta-operation\033[0m \033[4moperation\033[0m \033[4mtarget\033[0m\033[0m" << ::std::endl
- << ::std::endl
- << " Where \033[4mstate\033[0m can be one of \033[1munchanged\033[0m, \033[1mchanged\033[0m, or \033[1mfailed\033[0m." << ::std::endl
- << " If the action is a pre or post operation, then the outer" << ::std::endl
- << " operation is specified in parenthesis. For example:" << ::std::endl
- << ::std::endl
- << " unchanged perform update(test) /tmp/dir{hello/}" << ::std::endl
- << " changed perform test /tmp/dir{hello/}" << ::std::endl
- << ::std::endl
- << " Note that only the \033[1mperform\033[0m meta-operation supports the" << ::std::endl
- << " structured result output." << ::std::endl;
-
- os << std::endl
- << "\033[1m--mtime-check\033[0m Perform file modification time sanity checks. These" << ::std::endl
- << " checks can be helpful in diagnosing spurious rebuilds and" << ::std::endl
- << " are enabled by default on Windows (which is known not to" << ::std::endl
- << " guarantee monotonically increasing mtimes) and for the" << ::std::endl
- << " staged version of the build system on other platforms." << ::std::endl
- << " Use \033[1m--no-mtime-check\033[0m to disable." << ::std::endl;
-
- os << std::endl
- << "\033[1m--no-mtime-check\033[0m Don't perform file modification time sanity checks. See" << ::std::endl
- << " \033[1m--mtime-check\033[0m for details." << ::std::endl;
-
- os << std::endl
- << "\033[1m--no-column\033[0m Don't print column numbers in diagnostics." << ::std::endl;
-
- os << std::endl
- << "\033[1m--no-line\033[0m Don't print line and column numbers in diagnostics." << ::std::endl;
-
- os << std::endl
- << "\033[1m--buildfile\033[0m \033[4mpath\033[0m The alternative file to read build information from. The" << ::std::endl
- << " default is \033[1mbuildfile\033[0m or \033[1mbuild2file\033[0m, depending on the" << ::std::endl
- << " project's build file/directory naming scheme. If \033[4mpath\033[0m is" << ::std::endl
- << " '\033[1m-\033[0m', then read from \033[1mSTDIN\033[0m. Note that this option only" << ::std::endl
- << " affects the files read as part of the buildspec" << ::std::endl
- << " processing. Specifically, it has no effect on the \033[1msource\033[0m" << ::std::endl
- << " and \033[1minclude\033[0m directives. As a result, this option is" << ::std::endl
- << " primarily intended for testing rather than changing the" << ::std::endl
- << " build file names in real projects." << ::std::endl;
-
- os << std::endl
- << "\033[1m--config-guess\033[0m \033[4mpath\033[0m The path to the \033[1mconfig.guess(1)\033[0m script that should be" << ::std::endl
- << " used to guess the host machine triplet. If this option is" << ::std::endl
- << " not specified, then \033[1mb\033[0m will fall back on to using the" << ::std::endl
- << " target it was built for as host." << ::std::endl;
-
- os << std::endl
- << "\033[1m--config-sub\033[0m \033[4mpath\033[0m The path to the \033[1mconfig.sub(1)\033[0m script that should be used" << ::std::endl
- << " to canonicalize machine triplets. If this option is not" << ::std::endl
- << " specified, then \033[1mb\033[0m will use its built-in canonicalization" << ::std::endl
- << " support which should be sufficient for commonly-used" << ::std::endl
- << " platforms." << ::std::endl;
-
- os << std::endl
- << "\033[1m--pager\033[0m \033[4mpath\033[0m The pager program to be used to show long text. Commonly" << ::std::endl
- << " used pager programs are \033[1mless\033[0m and \033[1mmore\033[0m. You can also" << ::std::endl
- << " specify additional options that should be passed to the" << ::std::endl
- << " pager program with \033[1m--pager-option\033[0m. If an empty string is" << ::std::endl
- << " specified as the pager program, then no pager will be" << ::std::endl
- << " used. If the pager program is not explicitly specified," << ::std::endl
- << " then \033[1mb\033[0m will try to use \033[1mless\033[0m. If it is not available, then" << ::std::endl
- << " no pager will be used." << ::std::endl;
-
- os << std::endl
- << "\033[1m--pager-option\033[0m \033[4mopt\033[0m Additional option to be passed to the pager program. See" << ::std::endl
- << " \033[1m--pager\033[0m for more information on the pager program. Repeat" << ::std::endl
- << " this option to specify multiple pager options." << ::std::endl;
-
- os << std::endl
- << "\033[1m--options-file\033[0m \033[4mfile\033[0m Read additional options from \033[4mfile\033[0m. Each option should" << ::std::endl
- << " appear on a separate line optionally followed by space or" << ::std::endl
- << " equal sign (\033[1m=\033[0m) and an option value. Empty lines and lines" << ::std::endl
- << " starting with \033[1m#\033[0m are ignored. Option values can be" << ::std::endl
- << " enclosed in double (\033[1m\"\033[0m) or single (\033[1m'\033[0m) quotes to preserve" << ::std::endl
- << " leading and trailing whitespaces as well as to specify" << ::std::endl
- << " empty values. If the value itself contains trailing or" << ::std::endl
- << " leading quotes, enclose it with an extra pair of quotes," << ::std::endl
- << " for example \033[1m'\"x\"'\033[0m. Non-leading and non-trailing quotes" << ::std::endl
- << " are interpreted as being part of the option value." << ::std::endl
- << ::std::endl
- << " The semantics of providing options in a file is" << ::std::endl
- << " equivalent to providing the same set of options in the" << ::std::endl
- << " same order on the command line at the point where the" << ::std::endl
- << " \033[1m--options-file\033[0m option is specified except that the shell" << ::std::endl
- << " escaping and quoting is not required. Repeat this option" << ::std::endl
- << " to specify more than one options file." << ::std::endl;
-
- os << std::endl
- << "\033[1m--default-options\033[0m \033[4mdir\033[0m The directory to load additional default options files" << ::std::endl
- << " from." << ::std::endl;
-
- os << std::endl
- << "\033[1m--no-default-options\033[0m Don't load default options files." << ::std::endl;
-
- os << std::endl
- << "\033[1m--help\033[0m Print usage information and exit." << ::std::endl;
-
- os << std::endl
- << "\033[1m--version\033[0m Print version and exit." << ::std::endl;
-
- p = ::build2::cl::usage_para::option;
-
- return p;
- }
-
- typedef
- std::map<std::string, void (*) (options&, ::build2::cl::scanner&)>
- _cli_options_map;
-
- static _cli_options_map _cli_options_map_;
-
- struct _cli_options_map_init
- {
- _cli_options_map_init ()
- {
- _cli_options_map_["--build2-metadata"] =
- &::build2::cl::thunk< options, uint64_t, &options::build2_metadata_,
- &options::build2_metadata_specified_ >;
- _cli_options_map_["-v"] =
- &::build2::cl::thunk< options, bool, &options::v_ >;
- _cli_options_map_["-V"] =
- &::build2::cl::thunk< options, bool, &options::V_ >;
- _cli_options_map_["--quiet"] =
- &::build2::cl::thunk< options, bool, &options::quiet_ >;
- _cli_options_map_["-q"] =
- &::build2::cl::thunk< options, bool, &options::quiet_ >;
- _cli_options_map_["--silent"] =
- &::build2::cl::thunk< options, bool, &options::silent_ >;
- _cli_options_map_["--verbose"] =
- &::build2::cl::thunk< options, uint16_t, &options::verbose_,
- &options::verbose_specified_ >;
- _cli_options_map_["--stat"] =
- &::build2::cl::thunk< options, bool, &options::stat_ >;
- _cli_options_map_["--dump"] =
- &::build2::cl::thunk< options, std::set<string>, &options::dump_,
- &options::dump_specified_ >;
- _cli_options_map_["--progress"] =
- &::build2::cl::thunk< options, bool, &options::progress_ >;
- _cli_options_map_["--no-progress"] =
- &::build2::cl::thunk< options, bool, &options::no_progress_ >;
- _cli_options_map_["--jobs"] =
- &::build2::cl::thunk< options, size_t, &options::jobs_,
- &options::jobs_specified_ >;
- _cli_options_map_["-j"] =
- &::build2::cl::thunk< options, size_t, &options::jobs_,
- &options::jobs_specified_ >;
- _cli_options_map_["--max-jobs"] =
- &::build2::cl::thunk< options, size_t, &options::max_jobs_,
- &options::max_jobs_specified_ >;
- _cli_options_map_["-J"] =
- &::build2::cl::thunk< options, size_t, &options::max_jobs_,
- &options::max_jobs_specified_ >;
- _cli_options_map_["--queue-depth"] =
- &::build2::cl::thunk< options, size_t, &options::queue_depth_,
- &options::queue_depth_specified_ >;
- _cli_options_map_["-Q"] =
- &::build2::cl::thunk< options, size_t, &options::queue_depth_,
- &options::queue_depth_specified_ >;
- _cli_options_map_["--file-cache"] =
- &::build2::cl::thunk< options, string, &options::file_cache_,
- &options::file_cache_specified_ >;
- _cli_options_map_["--max-stack"] =
- &::build2::cl::thunk< options, size_t, &options::max_stack_,
- &options::max_stack_specified_ >;
- _cli_options_map_["--serial-stop"] =
- &::build2::cl::thunk< options, bool, &options::serial_stop_ >;
- _cli_options_map_["-s"] =
- &::build2::cl::thunk< options, bool, &options::serial_stop_ >;
- _cli_options_map_["--dry-run"] =
- &::build2::cl::thunk< options, bool, &options::dry_run_ >;
- _cli_options_map_["-n"] =
- &::build2::cl::thunk< options, bool, &options::dry_run_ >;
- _cli_options_map_["--match-only"] =
- &::build2::cl::thunk< options, bool, &options::match_only_ >;
- _cli_options_map_["--no-external-modules"] =
- &::build2::cl::thunk< options, bool, &options::no_external_modules_ >;
- _cli_options_map_["--structured-result"] =
- &::build2::cl::thunk< options, bool, &options::structured_result_ >;
- _cli_options_map_["--mtime-check"] =
- &::build2::cl::thunk< options, bool, &options::mtime_check_ >;
- _cli_options_map_["--no-mtime-check"] =
- &::build2::cl::thunk< options, bool, &options::no_mtime_check_ >;
- _cli_options_map_["--no-column"] =
- &::build2::cl::thunk< options, bool, &options::no_column_ >;
- _cli_options_map_["--no-line"] =
- &::build2::cl::thunk< options, bool, &options::no_line_ >;
- _cli_options_map_["--buildfile"] =
- &::build2::cl::thunk< options, path, &options::buildfile_,
- &options::buildfile_specified_ >;
- _cli_options_map_["--config-guess"] =
- &::build2::cl::thunk< options, path, &options::config_guess_,
- &options::config_guess_specified_ >;
- _cli_options_map_["--config-sub"] =
- &::build2::cl::thunk< options, path, &options::config_sub_,
- &options::config_sub_specified_ >;
- _cli_options_map_["--pager"] =
- &::build2::cl::thunk< options, string, &options::pager_,
- &options::pager_specified_ >;
- _cli_options_map_["--pager-option"] =
- &::build2::cl::thunk< options, strings, &options::pager_option_,
- &options::pager_option_specified_ >;
- _cli_options_map_["--options-file"] =
- &::build2::cl::thunk< options, string, &options::options_file_,
- &options::options_file_specified_ >;
- _cli_options_map_["--default-options"] =
- &::build2::cl::thunk< options, dir_path, &options::default_options_,
- &options::default_options_specified_ >;
- _cli_options_map_["--no-default-options"] =
- &::build2::cl::thunk< options, bool, &options::no_default_options_ >;
- _cli_options_map_["--help"] =
- &::build2::cl::thunk< options, bool, &options::help_ >;
- _cli_options_map_["--version"] =
- &::build2::cl::thunk< options, bool, &options::version_ >;
- }
- };
-
- static _cli_options_map_init _cli_options_map_init_;
-
- bool options::
- _parse (const char* o, ::build2::cl::scanner& s)
- {
- _cli_options_map::const_iterator i (_cli_options_map_.find (o));
-
- if (i != _cli_options_map_.end ())
- {
- (*(i->second)) (*this, s);
- return true;
- }
-
- return false;
- }
-
- bool options::
- _parse (::build2::cl::scanner& s,
- ::build2::cl::unknown_mode opt_mode,
- ::build2::cl::unknown_mode arg_mode)
- {
- // Can't skip combined flags (--no-combined-flags).
- //
- assert (opt_mode != ::build2::cl::unknown_mode::skip);
-
- bool r = false;
- bool opt = true;
-
- while (s.more ())
- {
- const char* o = s.peek ();
-
- if (std::strcmp (o, "--") == 0)
- {
- opt = false;
- }
-
- if (opt)
- {
- if (_parse (o, s))
- {
- r = true;
- continue;
- }
-
- if (std::strncmp (o, "-", 1) == 0 && o[1] != '\0')
- {
- // Handle combined option values.
- //
- std::string co;
- if (const char* v = std::strchr (o, '='))
- {
- co.assign (o, 0, v - o);
- ++v;
-
- int ac (2);
- char* av[] =
- {
- const_cast<char*> (co.c_str ()),
- const_cast<char*> (v)
- };
-
- ::build2::cl::argv_scanner ns (0, ac, av);
-
- if (_parse (co.c_str (), ns))
- {
- // Parsed the option but not its value?
- //
- if (ns.end () != 2)
- throw ::build2::cl::invalid_value (co, v);
-
- s.next ();
- r = true;
- continue;
- }
- else
- {
- // Set the unknown option and fall through.
- //
- o = co.c_str ();
- }
- }
-
- // Handle combined flags.
- //
- char cf[3];
- {
- const char* p = o + 1;
- for (; *p != '\0'; ++p)
- {
- if (!((*p >= 'a' && *p <= 'z') ||
- (*p >= 'A' && *p <= 'Z') ||
- (*p >= '0' && *p <= '9')))
- break;
- }
-
- if (*p == '\0')
- {
- for (p = o + 1; *p != '\0'; ++p)
- {
- std::strcpy (cf, "-");
- cf[1] = *p;
- cf[2] = '\0';
-
- int ac (1);
- char* av[] =
- {
- cf
- };
-
- ::build2::cl::argv_scanner ns (0, ac, av);
-
- if (!_parse (cf, ns))
- break;
- }
-
- if (*p == '\0')
- {
- // All handled.
- //
- s.next ();
- r = true;
- continue;
- }
- else
- {
- // Set the unknown option and fall through.
- //
- o = cf;
- }
- }
- }
-
- switch (opt_mode)
- {
- case ::build2::cl::unknown_mode::skip:
- {
- s.skip ();
- r = true;
- continue;
- }
- case ::build2::cl::unknown_mode::stop:
- {
- break;
- }
- case ::build2::cl::unknown_mode::fail:
- {
- throw ::build2::cl::unknown_option (o);
- }
- }
-
- break;
- }
- }
-
- switch (arg_mode)
- {
- case ::build2::cl::unknown_mode::skip:
- {
- s.skip ();
- r = true;
- continue;
- }
- case ::build2::cl::unknown_mode::stop:
- {
- break;
- }
- case ::build2::cl::unknown_mode::fail:
- {
- throw ::build2::cl::unknown_argument (o);
- }
- }
-
- break;
- }
-
- return r;
- }
-}
-
-namespace build2
-{
- ::build2::cl::usage_para
- print_b_usage (::std::ostream& os, ::build2::cl::usage_para p)
- {
- CLI_POTENTIALLY_UNUSED (os);
-
- if (p != ::build2::cl::usage_para::none)
- os << ::std::endl;
-
- os << "\033[1mSYNOPSIS\033[0m" << ::std::endl
- << ::std::endl
- << "\033[1mb --help\033[0m" << ::std::endl
- << "\033[1mb --version\033[0m" << ::std::endl
- << "\033[1mb\033[0m [\033[4moptions\033[0m] [\033[4mvariables\033[0m] [\033[4mbuildspec\033[0m]\033[0m" << ::std::endl
- << ::std::endl
- << "\033[4mbuildspec\033[0m = \033[4mmeta-operation\033[0m\033[1m(\033[0m\033[4moperation\033[0m\033[1m(\033[0m\033[4mtarget\033[0m...[\033[1m,\033[0m\033[4mparameters\033[0m]\033[1m)\033[0m...\033[1m)\033[0m...\033[0m" << ::std::endl
- << ::std::endl
- << "\033[1mDESCRIPTION\033[0m" << ::std::endl
- << ::std::endl
- << "The \033[1mbuild2\033[0m build system driver executes a set of meta-operations on operations" << ::std::endl
- << "on targets according to the build specification, or \033[4mbuildspec\033[0m for short. This" << ::std::endl
- << "process can be controlled by specifying driver \033[4moptions\033[0m and build system" << ::std::endl
- << "\033[4mvariables\033[0m." << ::std::endl
- << ::std::endl
- << "Note that \033[4moptions\033[0m, \033[4mvariables\033[0m, and \033[4mbuildspec\033[0m fragments can be specified in any" << ::std::endl
- << "order. To avoid treating an argument that starts with \033[1m'-'\033[0m as an option, add the" << ::std::endl
- << "\033[1m'--'\033[0m separator. To avoid treating an argument that contains \033[1m'='\033[0m as a variable," << ::std::endl
- << "add the second \033[1m'--'\033[0m separator." << ::std::endl;
-
- p = ::build2::options::print_usage (os, ::build2::cl::usage_para::text);
-
- if (p != ::build2::cl::usage_para::none)
- os << ::std::endl;
-
- os << "\033[1mDEFAULT OPTIONS FILES\033[0m" << ::std::endl
- << ::std::endl
- << "Instead of having a separate config file format for tool configuration, the" << ::std::endl
- << "\033[1mbuild2\033[0m toolchain uses \033[4mdefault options files\033[0m which contain the same options as" << ::std::endl
- << "what can be specified on the command line. The default options files are like" << ::std::endl
- << "options files that one can specify with \033[1m--options-file\033[0m except that they are" << ::std::endl
- << "loaded by default." << ::std::endl
- << ::std::endl
- << "The default options files for the build system driver are called \033[1mb.options\033[0m and" << ::std::endl
- << "are searched for in the \033[1m.build2/\033[0m subdirectory of the home directory and in the" << ::std::endl
- << "system directory (for example, \033[1m/etc/build2/\033[0m) if configured. Note that besides" << ::std::endl
- << "options these files can also contain global variable overrides." << ::std::endl
- << ::std::endl
- << "Once the search is complete, the files are loaded in the reverse order, that" << ::std::endl
- << "is, beginning from the system directory (if any), followed by the home" << ::std::endl
- << "directory, and finishing off with the options specified on the command line. In" << ::std::endl
- << "other words, the files are loaded from the more generic to the more specific" << ::std::endl
- << "with the command line options having the ability to override any values" << ::std::endl
- << "specified in the default options files." << ::std::endl
- << ::std::endl
- << "If a default options file contains \033[1m--no-default-options\033[0m, then the search is" << ::std::endl
- << "stopped at the directory containing this file and no outer files are loaded. If" << ::std::endl
- << "this option is specified on the command line, then none of the default options" << ::std::endl
- << "files are searched for or loaded." << ::std::endl
- << ::std::endl
- << "An additional directory containing default options files can be specified with" << ::std::endl
- << "\033[1m--default-options\033[0m. Its configuration files are loaded after the home directory." << ::std::endl
- << ::std::endl
- << "The order in which default options files are loaded is traced at the verbosity" << ::std::endl
- << "level 3 (\033[1m-V\033[0m option) or higher." << ::std::endl
- << ::std::endl
- << "\033[1mEXIT STATUS\033[0m" << ::std::endl
- << ::std::endl
- << "Non-zero exit status is returned in case of an error." << ::std::endl;
-
- os << std::endl
- << "\033[1mENVIRONMENT\033[0m" << ::std::endl
- << ::std::endl
- << "The \033[1mHOME\033[0m environment variable is used to determine the user's home directory." << ::std::endl
- << "If it is not set, then \033[1mgetpwuid(3)\033[0m is used instead. This value is used to" << ::std::endl
- << "shorten paths printed in diagnostics by replacing the home directory with \033[1m~/\033[0m." << ::std::endl
- << "It is also made available to \033[1mbuildfile\033[0m's as the \033[1mbuild.home\033[0m variable." << ::std::endl
- << ::std::endl
- << "The \033[1mBUILD2_VAR_OVR\033[0m environment variable is used to propagate global variable" << ::std::endl
- << "overrides to nested build system driver invocations. Its value is a list of" << ::std::endl
- << "global variable assignments separated with newlines." << ::std::endl
- << ::std::endl
- << "The \033[1mBUILD2_DEF_OPT\033[0m environment variable is used to suppress loading of default" << ::std::endl
- << "options files in nested build system driver invocations. Its values are \033[1mfalse\033[0m" << ::std::endl
- << "or \033[1m0\033[0m to suppress and \033[1mtrue\033[0m or \033[1m1\033[0m to load." << ::std::endl;
-
- p = ::build2::cl::usage_para::text;
-
- return p;
- }
-}
-
-// Begin epilogue.
-//
-//
-// End epilogue.
-
diff --git a/build2/b-options.hxx b/build2/b-options.hxx
deleted file mode 100644
index a2f99f4..0000000
--- a/build2/b-options.hxx
+++ /dev/null
@@ -1,722 +0,0 @@
-// -*- C++ -*-
-//
-// This file was generated by CLI, a command line interface
-// compiler for C++.
-//
-
-#ifndef BUILD2_B_OPTIONS_HXX
-#define BUILD2_B_OPTIONS_HXX
-
-// Begin prologue.
-//
-//
-// End prologue.
-
-#include <list>
-#include <deque>
-#include <iosfwd>
-#include <string>
-#include <cstddef>
-#include <exception>
-
-#ifndef CLI_POTENTIALLY_UNUSED
-# if defined(_MSC_VER) || defined(__xlC__)
-# define CLI_POTENTIALLY_UNUSED(x) (void*)&x
-# else
-# define CLI_POTENTIALLY_UNUSED(x) (void)x
-# endif
-#endif
-
-namespace build2
-{
- namespace cl
- {
- class usage_para
- {
- public:
- enum value
- {
- none,
- text,
- option
- };
-
- usage_para (value);
-
- operator value () const
- {
- return v_;
- }
-
- private:
- value v_;
- };
-
- class unknown_mode
- {
- public:
- enum value
- {
- skip,
- stop,
- fail
- };
-
- unknown_mode (value);
-
- operator value () const
- {
- return v_;
- }
-
- private:
- value v_;
- };
-
- // Exceptions.
- //
-
- class exception: public std::exception
- {
- public:
- virtual void
- print (::std::ostream&) const = 0;
- };
-
- ::std::ostream&
- operator<< (::std::ostream&, const exception&);
-
- class unknown_option: public exception
- {
- public:
- virtual
- ~unknown_option () throw ();
-
- unknown_option (const std::string& option);
-
- const std::string&
- option () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string option_;
- };
-
- class unknown_argument: public exception
- {
- public:
- virtual
- ~unknown_argument () throw ();
-
- unknown_argument (const std::string& argument);
-
- const std::string&
- argument () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string argument_;
- };
-
- class missing_value: public exception
- {
- public:
- virtual
- ~missing_value () throw ();
-
- missing_value (const std::string& option);
-
- const std::string&
- option () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string option_;
- };
-
- class invalid_value: public exception
- {
- public:
- virtual
- ~invalid_value () throw ();
-
- invalid_value (const std::string& option,
- const std::string& value,
- const std::string& message = std::string ());
-
- const std::string&
- option () const;
-
- const std::string&
- value () const;
-
- const std::string&
- message () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string option_;
- std::string value_;
- std::string message_;
- };
-
- class eos_reached: public exception
- {
- public:
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
- };
-
- class file_io_failure: public exception
- {
- public:
- virtual
- ~file_io_failure () throw ();
-
- file_io_failure (const std::string& file);
-
- const std::string&
- file () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string file_;
- };
-
- class unmatched_quote: public exception
- {
- public:
- virtual
- ~unmatched_quote () throw ();
-
- unmatched_quote (const std::string& argument);
-
- const std::string&
- argument () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string argument_;
- };
-
- // Command line argument scanner interface.
- //
- // The values returned by next() are guaranteed to be valid
- // for the two previous arguments up until a call to a third
- // peek() or next().
- //
- // The position() function returns a monotonically-increasing
- // number which, if stored, can later be used to determine the
- // relative position of the argument returned by the following
- // call to next(). Note that if multiple scanners are used to
- // extract arguments from multiple sources, then the end
- // position of the previous scanner should be used as the
- // start position of the next.
- //
- class scanner
- {
- public:
- virtual
- ~scanner ();
-
- virtual bool
- more () = 0;
-
- virtual const char*
- peek () = 0;
-
- virtual const char*
- next () = 0;
-
- virtual void
- skip () = 0;
-
- virtual std::size_t
- position () = 0;
- };
-
- class argv_scanner: public scanner
- {
- public:
- argv_scanner (int& argc,
- char** argv,
- bool erase = false,
- std::size_t start_position = 0);
-
- argv_scanner (int start,
- int& argc,
- char** argv,
- bool erase = false,
- std::size_t start_position = 0);
-
- int
- end () const;
-
- virtual bool
- more ();
-
- virtual const char*
- peek ();
-
- virtual const char*
- next ();
-
- virtual void
- skip ();
-
- virtual std::size_t
- position ();
-
- protected:
- std::size_t start_position_;
- int i_;
- int& argc_;
- char** argv_;
- bool erase_;
- };
-
- class argv_file_scanner: public argv_scanner
- {
- public:
- argv_file_scanner (int& argc,
- char** argv,
- const std::string& option,
- bool erase = false,
- std::size_t start_position = 0);
-
- argv_file_scanner (int start,
- int& argc,
- char** argv,
- const std::string& option,
- bool erase = false,
- std::size_t start_position = 0);
-
- argv_file_scanner (const std::string& file,
- const std::string& option,
- std::size_t start_position = 0);
-
- struct option_info
- {
- // If search_func is not NULL, it is called, with the arg
- // value as the second argument, to locate the options file.
- // If it returns an empty string, then the file is ignored.
- //
- const char* option;
- std::string (*search_func) (const char*, void* arg);
- void* arg;
- };
-
- argv_file_scanner (int& argc,
- char** argv,
- const option_info* options,
- std::size_t options_count,
- bool erase = false,
- std::size_t start_position = 0);
-
- argv_file_scanner (int start,
- int& argc,
- char** argv,
- const option_info* options,
- std::size_t options_count,
- bool erase = false,
- std::size_t start_position = 0);
-
- argv_file_scanner (const std::string& file,
- const option_info* options = 0,
- std::size_t options_count = 0,
- std::size_t start_position = 0);
-
- virtual bool
- more ();
-
- virtual const char*
- peek ();
-
- virtual const char*
- next ();
-
- virtual void
- skip ();
-
- virtual std::size_t
- position ();
-
- // Return the file path if the peeked at argument came from a file and
- // the empty string otherwise. The reference is guaranteed to be valid
- // till the end of the scanner lifetime.
- //
- const std::string&
- peek_file ();
-
- // Return the 1-based line number if the peeked at argument came from
- // a file and zero otherwise.
- //
- std::size_t
- peek_line ();
-
- private:
- const option_info*
- find (const char*) const;
-
- void
- load (const std::string& file);
-
- typedef argv_scanner base;
-
- const std::string option_;
- option_info option_info_;
- const option_info* options_;
- std::size_t options_count_;
-
- struct arg
- {
- std::string value;
- const std::string* file;
- std::size_t line;
- };
-
- std::deque<arg> args_;
- std::list<std::string> files_;
-
- // Circular buffer of two arguments.
- //
- std::string hold_[2];
- std::size_t i_;
-
- bool skip_;
-
- static int zero_argc_;
- static std::string empty_string_;
- };
-
- template <typename X>
- struct parser;
- }
-}
-
-#include <set>
-
-#include <libbuild2/types.hxx>
-
-namespace build2
-{
- class options
- {
- public:
- options ();
-
- // Return true if anything has been parsed.
- //
- bool
- parse (int& argc,
- char** argv,
- bool erase = false,
- ::build2::cl::unknown_mode option = ::build2::cl::unknown_mode::fail,
- ::build2::cl::unknown_mode argument = ::build2::cl::unknown_mode::stop);
-
- bool
- parse (int start,
- int& argc,
- char** argv,
- bool erase = false,
- ::build2::cl::unknown_mode option = ::build2::cl::unknown_mode::fail,
- ::build2::cl::unknown_mode argument = ::build2::cl::unknown_mode::stop);
-
- bool
- parse (int& argc,
- char** argv,
- int& end,
- bool erase = false,
- ::build2::cl::unknown_mode option = ::build2::cl::unknown_mode::fail,
- ::build2::cl::unknown_mode argument = ::build2::cl::unknown_mode::stop);
-
- bool
- parse (int start,
- int& argc,
- char** argv,
- int& end,
- bool erase = false,
- ::build2::cl::unknown_mode option = ::build2::cl::unknown_mode::fail,
- ::build2::cl::unknown_mode argument = ::build2::cl::unknown_mode::stop);
-
- bool
- parse (::build2::cl::scanner&,
- ::build2::cl::unknown_mode option = ::build2::cl::unknown_mode::fail,
- ::build2::cl::unknown_mode argument = ::build2::cl::unknown_mode::stop);
-
- // Merge options from the specified instance appending/overriding
- // them as if they appeared after options in this instance.
- //
- void
- merge (const options&);
-
- // Option accessors.
- //
- const uint64_t&
- build2_metadata () const;
-
- bool
- build2_metadata_specified () const;
-
- const bool&
- v () const;
-
- const bool&
- V () const;
-
- const bool&
- quiet () const;
-
- const bool&
- silent () const;
-
- const uint16_t&
- verbose () const;
-
- bool
- verbose_specified () const;
-
- const bool&
- stat () const;
-
- const std::set<string>&
- dump () const;
-
- bool
- dump_specified () const;
-
- const bool&
- progress () const;
-
- const bool&
- no_progress () const;
-
- const size_t&
- jobs () const;
-
- bool
- jobs_specified () const;
-
- const size_t&
- max_jobs () const;
-
- bool
- max_jobs_specified () const;
-
- const size_t&
- queue_depth () const;
-
- bool
- queue_depth_specified () const;
-
- const string&
- file_cache () const;
-
- bool
- file_cache_specified () const;
-
- const size_t&
- max_stack () const;
-
- bool
- max_stack_specified () const;
-
- const bool&
- serial_stop () const;
-
- const bool&
- dry_run () const;
-
- const bool&
- match_only () const;
-
- const bool&
- no_external_modules () const;
-
- const bool&
- structured_result () const;
-
- const bool&
- mtime_check () const;
-
- const bool&
- no_mtime_check () const;
-
- const bool&
- no_column () const;
-
- const bool&
- no_line () const;
-
- const path&
- buildfile () const;
-
- bool
- buildfile_specified () const;
-
- const path&
- config_guess () const;
-
- bool
- config_guess_specified () const;
-
- const path&
- config_sub () const;
-
- bool
- config_sub_specified () const;
-
- const string&
- pager () const;
-
- bool
- pager_specified () const;
-
- const strings&
- pager_option () const;
-
- bool
- pager_option_specified () const;
-
- const string&
- options_file () const;
-
- bool
- options_file_specified () const;
-
- const dir_path&
- default_options () const;
-
- bool
- default_options_specified () const;
-
- const bool&
- no_default_options () const;
-
- const bool&
- help () const;
-
- const bool&
- version () const;
-
- // Print usage information.
- //
- static ::build2::cl::usage_para
- print_usage (::std::ostream&,
- ::build2::cl::usage_para = ::build2::cl::usage_para::none);
-
- // Implementation details.
- //
- protected:
- bool
- _parse (const char*, ::build2::cl::scanner&);
-
- private:
- bool
- _parse (::build2::cl::scanner&,
- ::build2::cl::unknown_mode option,
- ::build2::cl::unknown_mode argument);
-
- public:
- uint64_t build2_metadata_;
- bool build2_metadata_specified_;
- bool v_;
- bool V_;
- bool quiet_;
- bool silent_;
- uint16_t verbose_;
- bool verbose_specified_;
- bool stat_;
- std::set<string> dump_;
- bool dump_specified_;
- bool progress_;
- bool no_progress_;
- size_t jobs_;
- bool jobs_specified_;
- size_t max_jobs_;
- bool max_jobs_specified_;
- size_t queue_depth_;
- bool queue_depth_specified_;
- string file_cache_;
- bool file_cache_specified_;
- size_t max_stack_;
- bool max_stack_specified_;
- bool serial_stop_;
- bool dry_run_;
- bool match_only_;
- bool no_external_modules_;
- bool structured_result_;
- bool mtime_check_;
- bool no_mtime_check_;
- bool no_column_;
- bool no_line_;
- path buildfile_;
- bool buildfile_specified_;
- path config_guess_;
- bool config_guess_specified_;
- path config_sub_;
- bool config_sub_specified_;
- string pager_;
- bool pager_specified_;
- strings pager_option_;
- bool pager_option_specified_;
- string options_file_;
- bool options_file_specified_;
- dir_path default_options_;
- bool default_options_specified_;
- bool no_default_options_;
- bool help_;
- bool version_;
- };
-}
-
-// Print page usage information.
-//
-namespace build2
-{
- ::build2::cl::usage_para
- print_b_usage (::std::ostream&,
- ::build2::cl::usage_para = ::build2::cl::usage_para::none);
-}
-
-#include <build2/b-options.ixx>
-
-// Begin epilogue.
-//
-//
-// End epilogue.
-
-#endif // BUILD2_B_OPTIONS_HXX
diff --git a/build2/b-options.ixx b/build2/b-options.ixx
deleted file mode 100644
index 104d4da..0000000
--- a/build2/b-options.ixx
+++ /dev/null
@@ -1,582 +0,0 @@
-// -*- C++ -*-
-//
-// This file was generated by CLI, a command line interface
-// compiler for C++.
-//
-
-// Begin prologue.
-//
-//
-// End prologue.
-
-#include <cassert>
-
-namespace build2
-{
- namespace cl
- {
- // usage_para
- //
- inline usage_para::
- usage_para (value v)
- : v_ (v)
- {
- }
-
- // unknown_mode
- //
- inline unknown_mode::
- unknown_mode (value v)
- : v_ (v)
- {
- }
-
- // exception
- //
- inline ::std::ostream&
- operator<< (::std::ostream& os, const exception& e)
- {
- e.print (os);
- return os;
- }
-
- // unknown_option
- //
- inline unknown_option::
- unknown_option (const std::string& option)
- : option_ (option)
- {
- }
-
- inline const std::string& unknown_option::
- option () const
- {
- return option_;
- }
-
- // unknown_argument
- //
- inline unknown_argument::
- unknown_argument (const std::string& argument)
- : argument_ (argument)
- {
- }
-
- inline const std::string& unknown_argument::
- argument () const
- {
- return argument_;
- }
-
- // missing_value
- //
- inline missing_value::
- missing_value (const std::string& option)
- : option_ (option)
- {
- }
-
- inline const std::string& missing_value::
- option () const
- {
- return option_;
- }
-
- // invalid_value
- //
- inline invalid_value::
- invalid_value (const std::string& option,
- const std::string& value,
- const std::string& message)
- : option_ (option),
- value_ (value),
- message_ (message)
- {
- }
-
- inline const std::string& invalid_value::
- option () const
- {
- return option_;
- }
-
- inline const std::string& invalid_value::
- value () const
- {
- return value_;
- }
-
- inline const std::string& invalid_value::
- message () const
- {
- return message_;
- }
-
- // file_io_failure
- //
- inline file_io_failure::
- file_io_failure (const std::string& file)
- : file_ (file)
- {
- }
-
- inline const std::string& file_io_failure::
- file () const
- {
- return file_;
- }
-
- // unmatched_quote
- //
- inline unmatched_quote::
- unmatched_quote (const std::string& argument)
- : argument_ (argument)
- {
- }
-
- inline const std::string& unmatched_quote::
- argument () const
- {
- return argument_;
- }
-
- // argv_scanner
- //
- inline argv_scanner::
- argv_scanner (int& argc,
- char** argv,
- bool erase,
- std::size_t sp)
- : start_position_ (sp + 1),
- i_ (1),
- argc_ (argc),
- argv_ (argv),
- erase_ (erase)
- {
- }
-
- inline argv_scanner::
- argv_scanner (int start,
- int& argc,
- char** argv,
- bool erase,
- std::size_t sp)
- : start_position_ (sp + static_cast<std::size_t> (start)),
- i_ (start),
- argc_ (argc),
- argv_ (argv),
- erase_ (erase)
- {
- }
-
- inline int argv_scanner::
- end () const
- {
- return i_;
- }
-
- // argv_file_scanner
- //
- inline argv_file_scanner::
- argv_file_scanner (int& argc,
- char** argv,
- const std::string& option,
- bool erase,
- std::size_t sp)
- : argv_scanner (argc, argv, erase, sp),
- option_ (option),
- options_ (&option_info_),
- options_count_ (1),
- i_ (1),
- skip_ (false)
- {
- option_info_.option = option_.c_str ();
- option_info_.search_func = 0;
- }
-
- inline argv_file_scanner::
- argv_file_scanner (int start,
- int& argc,
- char** argv,
- const std::string& option,
- bool erase,
- std::size_t sp)
- : argv_scanner (start, argc, argv, erase, sp),
- option_ (option),
- options_ (&option_info_),
- options_count_ (1),
- i_ (1),
- skip_ (false)
- {
- option_info_.option = option_.c_str ();
- option_info_.search_func = 0;
- }
-
- inline argv_file_scanner::
- argv_file_scanner (const std::string& file,
- const std::string& option,
- std::size_t sp)
- : argv_scanner (0, zero_argc_, 0, sp),
- option_ (option),
- options_ (&option_info_),
- options_count_ (1),
- i_ (1),
- skip_ (false)
- {
- option_info_.option = option_.c_str ();
- option_info_.search_func = 0;
-
- load (file);
- }
-
- inline argv_file_scanner::
- argv_file_scanner (int& argc,
- char** argv,
- const option_info* options,
- std::size_t options_count,
- bool erase,
- std::size_t sp)
- : argv_scanner (argc, argv, erase, sp),
- options_ (options),
- options_count_ (options_count),
- i_ (1),
- skip_ (false)
- {
- }
-
- inline argv_file_scanner::
- argv_file_scanner (int start,
- int& argc,
- char** argv,
- const option_info* options,
- std::size_t options_count,
- bool erase,
- std::size_t sp)
- : argv_scanner (start, argc, argv, erase, sp),
- options_ (options),
- options_count_ (options_count),
- i_ (1),
- skip_ (false)
- {
- }
-
- inline argv_file_scanner::
- argv_file_scanner (const std::string& file,
- const option_info* options,
- std::size_t options_count,
- std::size_t sp)
- : argv_scanner (0, zero_argc_, 0, sp),
- options_ (options),
- options_count_ (options_count),
- i_ (1),
- skip_ (false)
- {
- load (file);
- }
- }
-}
-
-namespace build2
-{
- // options
- //
-
- inline const uint64_t& options::
- build2_metadata () const
- {
- return this->build2_metadata_;
- }
-
- inline bool options::
- build2_metadata_specified () const
- {
- return this->build2_metadata_specified_;
- }
-
- inline const bool& options::
- v () const
- {
- return this->v_;
- }
-
- inline const bool& options::
- V () const
- {
- return this->V_;
- }
-
- inline const bool& options::
- quiet () const
- {
- return this->quiet_;
- }
-
- inline const bool& options::
- silent () const
- {
- return this->silent_;
- }
-
- inline const uint16_t& options::
- verbose () const
- {
- return this->verbose_;
- }
-
- inline bool options::
- verbose_specified () const
- {
- return this->verbose_specified_;
- }
-
- inline const bool& options::
- stat () const
- {
- return this->stat_;
- }
-
- inline const std::set<string>& options::
- dump () const
- {
- return this->dump_;
- }
-
- inline bool options::
- dump_specified () const
- {
- return this->dump_specified_;
- }
-
- inline const bool& options::
- progress () const
- {
- return this->progress_;
- }
-
- inline const bool& options::
- no_progress () const
- {
- return this->no_progress_;
- }
-
- inline const size_t& options::
- jobs () const
- {
- return this->jobs_;
- }
-
- inline bool options::
- jobs_specified () const
- {
- return this->jobs_specified_;
- }
-
- inline const size_t& options::
- max_jobs () const
- {
- return this->max_jobs_;
- }
-
- inline bool options::
- max_jobs_specified () const
- {
- return this->max_jobs_specified_;
- }
-
- inline const size_t& options::
- queue_depth () const
- {
- return this->queue_depth_;
- }
-
- inline bool options::
- queue_depth_specified () const
- {
- return this->queue_depth_specified_;
- }
-
- inline const string& options::
- file_cache () const
- {
- return this->file_cache_;
- }
-
- inline bool options::
- file_cache_specified () const
- {
- return this->file_cache_specified_;
- }
-
- inline const size_t& options::
- max_stack () const
- {
- return this->max_stack_;
- }
-
- inline bool options::
- max_stack_specified () const
- {
- return this->max_stack_specified_;
- }
-
- inline const bool& options::
- serial_stop () const
- {
- return this->serial_stop_;
- }
-
- inline const bool& options::
- dry_run () const
- {
- return this->dry_run_;
- }
-
- inline const bool& options::
- match_only () const
- {
- return this->match_only_;
- }
-
- inline const bool& options::
- no_external_modules () const
- {
- return this->no_external_modules_;
- }
-
- inline const bool& options::
- structured_result () const
- {
- return this->structured_result_;
- }
-
- inline const bool& options::
- mtime_check () const
- {
- return this->mtime_check_;
- }
-
- inline const bool& options::
- no_mtime_check () const
- {
- return this->no_mtime_check_;
- }
-
- inline const bool& options::
- no_column () const
- {
- return this->no_column_;
- }
-
- inline const bool& options::
- no_line () const
- {
- return this->no_line_;
- }
-
- inline const path& options::
- buildfile () const
- {
- return this->buildfile_;
- }
-
- inline bool options::
- buildfile_specified () const
- {
- return this->buildfile_specified_;
- }
-
- inline const path& options::
- config_guess () const
- {
- return this->config_guess_;
- }
-
- inline bool options::
- config_guess_specified () const
- {
- return this->config_guess_specified_;
- }
-
- inline const path& options::
- config_sub () const
- {
- return this->config_sub_;
- }
-
- inline bool options::
- config_sub_specified () const
- {
- return this->config_sub_specified_;
- }
-
- inline const string& options::
- pager () const
- {
- return this->pager_;
- }
-
- inline bool options::
- pager_specified () const
- {
- return this->pager_specified_;
- }
-
- inline const strings& options::
- pager_option () const
- {
- return this->pager_option_;
- }
-
- inline bool options::
- pager_option_specified () const
- {
- return this->pager_option_specified_;
- }
-
- inline const string& options::
- options_file () const
- {
- return this->options_file_;
- }
-
- inline bool options::
- options_file_specified () const
- {
- return this->options_file_specified_;
- }
-
- inline const dir_path& options::
- default_options () const
- {
- return this->default_options_;
- }
-
- inline bool options::
- default_options_specified () const
- {
- return this->default_options_specified_;
- }
-
- inline const bool& options::
- no_default_options () const
- {
- return this->no_default_options_;
- }
-
- inline const bool& options::
- help () const
- {
- return this->help_;
- }
-
- inline const bool& options::
- version () const
- {
- return this->version_;
- }
-}
-
-// Begin epilogue.
-//
-//
-// End epilogue.
diff --git a/build2/b.cxx b/build2/b.cxx
index 91d59a5..f0c9338 100644
--- a/build2/b.cxx
+++ b/build2/b.cxx
@@ -1,27 +1,18 @@
// file : build2/b.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef _WIN32
-# include <signal.h> // signal()
-#else
-# include <libbutl/win32-utility.hxx>
-#endif
-
-#ifdef __GLIBCXX__
-# include <locale>
-#endif
-
-#include <limits>
#include <sstream>
-#include <cstring> // strcmp(), strchr()
#include <typeinfo>
#include <iostream> // cout
#include <exception> // terminate(), set_terminate(), terminate_handler
#include <libbutl/pager.hxx>
-#include <libbutl/fdstream.hxx> // stderr_fd(), fdterm()
-#include <libbutl/backtrace.hxx> // backtrace()
-#include <libbutl/default-options.hxx>
+#include <libbutl/fdstream.hxx> // stderr_fd(), fdterm()
+#include <libbutl/backtrace.hxx> // backtrace()
+
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/serializer.hxx>
+#endif
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
@@ -44,7 +35,8 @@
#include <libbuild2/parser.hxx>
-#include <build2/b-options.hxx>
+#include <libbuild2/b-options.hxx>
+#include <libbuild2/b-cmdline.hxx>
// Build system modules.
//
@@ -62,8 +54,7 @@
#ifndef BUILD2_BOOTSTRAP
# include <libbuild2/bash/init.hxx>
-
-# include <build2/cli/init.hxx>
+# include <libbuild2/cli/init.hxx>
#endif
using namespace butl;
@@ -71,75 +62,161 @@ using namespace std;
namespace build2
{
- static options ops;
-
int
main (int argc, char* argv[]);
+#ifndef BUILD2_BOOTSTRAP
// Structured result printer (--structured-result mode).
//
class result_printer
{
public:
- result_printer (const action_targets& tgs): tgs_ (tgs) {}
+ result_printer (const b_options& ops,
+ const action_targets& tgs,
+ json::stream_serializer& js)
+ : ops_ (ops), tgs_ (tgs), json_serializer_ (js) {}
+
~result_printer ();
private:
+ void
+ print_lines ();
+
+ void
+ print_json ();
+
+ private:
+ const b_options& ops_;
const action_targets& tgs_;
+ json::stream_serializer& json_serializer_;
};
+ void result_printer::
+ print_lines ()
+ {
+ for (const action_target& at: tgs_)
+ {
+ if (at.state == target_state::unknown)
+ continue; // Not a target/no result.
+
+ const target& t (at.as<target> ());
+ context& ctx (t.ctx);
+
+ cout << at.state
+ << ' ' << ctx.current_mif->name
+ << ' ' << ctx.current_inner_oif->name;
+
+ if (ctx.current_outer_oif != nullptr)
+ cout << '(' << ctx.current_outer_oif->name << ')';
+
+ // There are two ways one may wish to identify the target of the
+ // operation: as something specific but inherently non-portable (say, a
+ // filesystem path, for example c:\tmp\foo.exe) or as something regular
+ // that can be used to refer to a target in a portable way (for example,
+ // c:\tmp\exe{foo}; note that the directory part is still not portable).
+ // Which one should we use is a good question. Let's go with the
+ // portable one for now and see how it goes (we can always add a format
+ // variant, e.g., --structured-result=lines-path). Note also that the
+ // json format includes both.
+
+ // Set the stream extension verbosity to 0 to suppress extension
+ // printing by default (this can still be overriden by the target type's
+ // print function as is the case for file{}, for example). And set the
+ // path verbosity to 1 to always print absolute.
+ //
+ stream_verbosity sv (stream_verb (cout));
+ stream_verb (cout, stream_verbosity (1, 0));
+
+ cout << ' ' << t << endl;
+
+ stream_verb (cout, sv);
+ }
+ }
+
+ void result_printer::
+ print_json ()
+ {
+ json::stream_serializer& s (json_serializer_);
+
+ for (const action_target& at: tgs_)
+ {
+ if (at.state == target_state::unknown)
+ continue; // Not a target/no result.
+
+ const target& t (at.as<target> ());
+ context& ctx (t.ctx);
+
+ s.begin_object ();
+
+ // Quoted target.
+ //
+ s.member_name ("target");
+ dump_quoted_target_name (s, t);
+
+ // Display target.
+ //
+ s.member_name ("display_target");
+ dump_display_target_name (s, t);
+
+ s.member ("target_type", t.type ().name, false /* check */);
+
+ if (t.is_a<dir> ())
+ s.member ("target_path", t.dir.string ());
+ else if (const auto* pt = t.is_a<path_target> ())
+ s.member ("target_path", pt->path ().string ());
+
+ s.member ("meta_operation", ctx.current_mif->name, false /* check */);
+ s.member ("operation", ctx.current_inner_oif->name, false /* check */);
+
+ if (ctx.current_outer_oif != nullptr)
+ s.member ("outer_operation",
+ ctx.current_outer_oif->name,
+ false /* check */);
+
+ s.member ("state", to_string (at.state), false /* check */);
+
+ s.end_object ();
+ }
+ }
+
result_printer::
~result_printer ()
{
// Let's do some sanity checking even when we are not in the structred
// output mode.
//
+#ifndef NDEBUG
for (const action_target& at: tgs_)
{
switch (at.state)
{
- case target_state::unknown: continue; // Not a target/no result.
+ case target_state::unknown:
case target_state::unchanged:
case target_state::changed:
case target_state::failed: break; // Valid states.
default: assert (false);
}
+ }
+#endif
- if (ops.structured_result ())
+ if (ops_.structured_result_specified ())
+ {
+ switch (ops_.structured_result ())
{
- const target& t (at.as<target> ());
- context& ctx (t.ctx);
-
- cout << at.state
- << ' ' << ctx.current_mif->name
- << ' ' << ctx.current_inner_oif->name;
-
- if (ctx.current_outer_oif != nullptr)
- cout << '(' << ctx.current_outer_oif->name << ')';
-
- // There are two ways one may wish to identify the target of the
- // operation: as something specific but inherently non-portable (say,
- // a filesystem path, for example c:\tmp\foo.exe) or as something
- // regular that can be used to refer to a target in a portable way
- // (for example, c:\tmp\exe{foo}; note that the directory part is
- // still not portable). Which one should we use is a good question.
- // Let's go with the portable one for now and see how it goes (we
- // can always add a format version, e.g., --structured-result=2).
-
- // Set the stream extension verbosity to 0 to suppress extension
- // printing by default (this can still be overriden by the target
- // type's print function as is the case for file{}, for example).
- // And set the path verbosity to 1 to always print absolute.
- //
- stream_verbosity sv (stream_verb (cout));
- stream_verb (cout, stream_verbosity (1, 0));
-
- cout << ' ' << t << endl;
-
- stream_verb (cout, sv);
+ case structured_result_format::lines:
+ {
+ print_lines ();
+ break;
+ }
+ case structured_result_format::json:
+ {
+ print_json ();
+ break;
+ }
}
}
}
+#endif
}
// Print backtrace if terminating due to an unhandled exception. Note that
@@ -172,445 +249,21 @@ main (int argc, char* argv[])
tracer trace ("main");
- int r (0);
-
- // This is a little hack to make out baseutils for Windows work when called
- // with absolute path. In a nutshell, MSYS2's exec*p() doesn't search in the
- // parent's executable directory, only in PATH. And since we are running
- // without a shell (that would read /etc/profile which sets PATH to some
- // sensible values), we are only getting Win32 PATH values. And MSYS2 /bin
- // is not one of them. So what we are going to do is add /bin at the end of
- // PATH (which will be passed as is by the MSYS2 machinery). This will make
- // MSYS2 search in /bin (where our baseutils live). And for everyone else
- // this should be harmless since it is not a valid Win32 path.
- //
-#ifdef _WIN32
- {
- string mp;
- if (optional<string> p = getenv ("PATH"))
- {
- mp = move (*p);
- mp += ';';
- }
- mp += "/bin";
-
- setenv ("PATH", mp);
- }
-#endif
-
- // A data race happens in the libstdc++ (as of GCC 7.2) implementation of
- // the ctype<char>::narrow() function (bug #77704). The issue is easily
- // triggered by the testscript runner that indirectly (via regex) uses
- // ctype<char> facet of the global locale (and can potentially be triggered
- // by other locale- aware code). We work around this by pre-initializing the
- // global locale facet internal cache.
- //
-#ifdef __GLIBCXX__
- {
- const ctype<char>& ct (use_facet<ctype<char>> (locale ()));
-
- for (size_t i (0); i != 256; ++i)
- ct.narrow (static_cast<char> (i), '\0');
- }
-#endif
-
- // On POSIX ignore SIGPIPE which is signaled to a pipe-writing process if
- // the pipe reading end is closed. Note that by default this signal
- // terminates a process. Also note that there is no way to disable this
- // behavior on a file descriptor basis or for the write() function call.
- //
-#ifndef _WIN32
- if (signal (SIGPIPE, SIG_IGN) == SIG_ERR)
- fail << "unable to ignore broken pipe (SIGPIPE) signal: "
- << system_error (errno, generic_category ()); // Sanitize.
-#endif
+ init_process ();
+ int r (0);
+ b_options ops;
scheduler sched;
- // Parse the command line.
+ // Statistics.
//
+ size_t phase_switch_contention (0);
+
try
{
- // Note that the diagnostics verbosity level can only be calculated after
- // default options are loaded and merged (see below). Thus, until then we
- // refer to the verbosity level specified on the command line.
- //
- auto verbosity = [] ()
- {
- uint16_t v (
- ops.verbose_specified ()
- ? ops.verbose ()
- : ops.V () ? 3 : ops.v () ? 2 : ops.quiet () || ops.silent () ? 0 : 1);
-
- if (ops.silent () && v != 0)
- fail << "specified with -v, -V, or --verbose verbosity level " << v
- << " is incompatible with --silent";
-
- return v;
- };
-
- // We want to be able to specify options, vars, and buildspecs in any
- // order (it is really handy to just add -v at the end of the command
- // line).
+ // Parse the command line.
//
- strings cmd_vars;
- string args;
- try
- {
- // Command line arguments starting position.
- //
- // We want the positions of the command line arguments to be after the
- // default options files. Normally that would be achieved by passing the
- // last position of the previous scanner to the next. The problem is
- // that we parse the command line arguments first (for good reasons).
- // Also the default options files parsing machinery needs the maximum
- // number of arguments to be specified and assigns the positions below
- // this value (see load_default_options() for details). So we are going
- // to "reserve" the first half of the size_t value range for the default
- // options positions and the second half for the command line arguments
- // positions.
- //
- size_t args_pos (numeric_limits<size_t>::max () / 2);
- cl::argv_file_scanner scan (argc, argv, "--options-file", args_pos);
-
- size_t argn (0); // Argument count.
- bool shortcut (false); // True if the shortcut syntax is used.
-
- for (bool opt (true), var (true); scan.more (); )
- {
- if (opt)
- {
- // Parse the next chunk of options until we reach an argument (or
- // eos).
- //
- if (ops.parse (scan) && !scan.more ())
- break;
-
- // If we see first "--", then we are done parsing options.
- //
- if (strcmp (scan.peek (), "--") == 0)
- {
- scan.next ();
- opt = false;
- continue;
- }
-
- // Fall through.
- }
-
- const char* s (scan.next ());
-
- // See if this is a command line variable. What if someone needs to
- // pass a buildspec that contains '='? One way to support this would
- // be to quote such a buildspec (e.g., "'/tmp/foo=bar/'"). Or invent
- // another separator. Or use a second "--". Actually, let's just do
- // the second "--".
- //
- if (var)
- {
- // If we see second "--", then we are also done parsing variables.
- //
- if (strcmp (s, "--") == 0)
- {
- var = false;
- continue;
- }
-
- if (const char* p = strchr (s, '=')) // Covers =, +=, and =+.
- {
- // Diagnose the empty variable name situation. Note that we don't
- // allow "partially broken down" assignments (as in foo =bar)
- // since foo= bar would be ambigous.
- //
- if (p == s || (p == s + 1 && *s == '+'))
- fail << "missing variable name in '" << s << "'";
-
- cmd_vars.push_back (s);
- continue;
- }
-
- // Handle the "broken down" variable assignments (i.e., foo = bar
- // instead of foo=bar).
- //
- if (scan.more ())
- {
- const char* a (scan.peek ());
-
- if (strcmp (a, "=" ) == 0 ||
- strcmp (a, "+=") == 0 ||
- strcmp (a, "=+") == 0)
- {
- string v (s);
- v += a;
-
- scan.next ();
-
- if (scan.more ())
- v += scan.next ();
-
- cmd_vars.push_back (move (v));
- continue;
- }
- }
-
- // Fall through.
- }
-
- // Merge all the individual buildspec arguments into a single string.
- // We use newlines to separate arguments so that line numbers in
- // diagnostics signify argument numbers. Clever, huh?
- //
- if (argn != 0)
- args += '\n';
-
- args += s;
-
- // See if we are using the shortcut syntax.
- //
- if (argn == 0 && args.back () == ':')
- {
- args.back () = '(';
- shortcut = true;
- }
-
- argn++;
- }
-
- // Add the closing parenthesis unless there wasn't anything in between
- // in which case pop the opening one.
- //
- if (shortcut)
- {
- if (argn == 1)
- args.pop_back ();
- else
- args += ')';
- }
-
- // Get/set an environment variable tracing the operation.
- //
- auto get_env = [&verbosity, &trace] (const char* nm)
- {
- optional<string> r (getenv (nm));
-
- if (verbosity () >= 5)
- {
- if (r)
- trace << nm << ": '" << *r << "'";
- else
- trace << nm << ": <NULL>";
- }
-
- return r;
- };
-
- auto set_env = [&verbosity, &trace] (const char* nm, const string& vl)
- {
- try
- {
- if (verbosity () >= 5)
- trace << "setting " << nm << "='" << vl << "'";
-
- setenv (nm, vl);
- }
- catch (const system_error& e)
- {
- // The variable value can potentially be long/multi-line, so let's
- // print it last.
- //
- fail << "unable to set environment variable " << nm << ": " << e <<
- info << "value: '" << vl << "'";
- }
- };
-
- // If the BUILD2_VAR_OVR environment variable is present, then parse its
- // value as a newline-separated global variable overrides and prepend
- // them to the overrides specified on the command line.
- //
- // Note that this means global overrides may not contain a newline.
-
- // Verify that the string is a valid global override. Uses the file name
- // and the options flag for diagnostics only.
- //
- auto verify_glb_ovr = [] (const string& v, const path_name& fn, bool opt)
- {
- size_t p (v.find ('=', 1));
- if (p == string::npos || v[0] != '!')
- {
- diag_record dr (fail (fn));
- dr << "expected " << (opt ? "option or " : "") << "global "
- << "variable override instead of '" << v << "'";
-
- if (p != string::npos)
- dr << info << "prefix variable assignment with '!'";
- }
-
- if (p == 1 || (p == 2 && v[1] == '+')) // '!=' or '!+=' ?
- fail (fn) << "missing variable name in '" << v << "'";
- };
-
- optional<string> env_ovr (get_env ("BUILD2_VAR_OVR"));
- if (env_ovr)
- {
- path_name fn ("<BUILD2_VAR_OVR>");
-
- auto i (cmd_vars.begin ());
- for (size_t b (0), e (0); next_word (*env_ovr, b, e, '\n', '\r'); )
- {
- // Extract the override from the current line, stripping the leading
- // and trailing spaces.
- //
- string s (*env_ovr, b, e - b);
- trim (s);
-
- // Verify and save the override, unless the line is empty.
- //
- if (!s.empty ())
- {
- verify_glb_ovr (s, fn, false /* opt */);
- i = cmd_vars.insert (i, move (s)) + 1;
- }
- }
- }
-
- // Load the default options files, unless --no-default-options is
- // specified on the command line or the BUILD2_DEF_OPT environment
- // variable is set to a value other than 'true' or '1'.
- //
- // If loaded, prepend the default global overrides to the variables
- // specified on the command line, unless BUILD2_VAR_OVR is set in which
- // case just ignore them.
- //
- optional<string> env_def (get_env ("BUILD2_DEF_OPT"));
-
- // False if --no-default-options is specified on the command line. Note
- // that we cache the flag since it can be overridden by a default
- // options file.
- //
- bool cmd_def (!ops.no_default_options ());
-
- if (cmd_def && (!env_def || *env_def == "true" || *env_def == "1"))
- try
- {
- optional<dir_path> extra;
- if (ops.default_options_specified ())
- extra = ops.default_options ();
-
- // Load default options files.
- //
- default_options<options> def_ops (
- load_default_options<options,
- cl::argv_file_scanner,
- cl::unknown_mode> (
- nullopt /* sys_dir */,
- path::home_directory (), // The home variable is not assigned yet.
- extra,
- default_options_files {{path ("b.options")},
- nullopt /* start */},
- [&trace, &verbosity] (const path& f, bool r, bool o)
- {
- if (verbosity () >= 3)
- {
- if (o)
- trace << "treating " << f << " as "
- << (r ? "remote" : "local");
- else
- trace << "loading " << (r ? "remote " : "local ") << f;
- }
- },
- "--options-file",
- args_pos,
- 1024,
- true /* args */));
-
- // Merge the default and command line options.
- //
- ops = merge_default_options (def_ops, ops);
-
- // Merge the default and command line global overrides, unless
- // BUILD2_VAR_OVR is already set (in which case we assume this has
- // already been done).
- //
- // Note that the "broken down" variable assignments occupying a single
- // line are naturally supported.
- //
- if (!env_ovr)
- cmd_vars =
- merge_default_arguments (
- def_ops,
- cmd_vars,
- [&verify_glb_ovr] (const default_options_entry<options>& e,
- const strings&)
- {
- path_name fn (e.file);
-
- // Verify that all arguments are global overrides.
- //
- for (const string& a: e.arguments)
- verify_glb_ovr (a, fn, true /* opt */);
- });
- }
- catch (const invalid_argument& e)
- {
- fail << "unable to load default options files: " << e;
- }
- catch (const pair<path, system_error>& e)
- {
- fail << "unable to load default options files: " << e.first << ": "
- << e.second;
- }
- catch (const system_error& e)
- {
- fail << "unable to obtain home directory: " << e;
- }
-
- // Verify and save the global overrides present in cmd_vars (default,
- // from the command line, etc), if any, into the BUILD2_VAR_OVR
- // environment variable.
- //
- if (!cmd_vars.empty ())
- {
- string ovr;
- for (const string& v: cmd_vars)
- {
- if (v[0] == '!')
- {
- if (v.find_first_of ("\n\r") != string::npos)
- fail << "newline in global variable override '" << v << "'";
-
- if (!ovr.empty ())
- ovr += '\n';
-
- ovr += v;
- }
- }
-
- // Optimize for the common case.
- //
- // Note: cmd_vars may contain non-global overrides.
- //
- if (!ovr.empty () && (!env_ovr || *env_ovr != ovr))
- set_env ("BUILD2_VAR_OVR", ovr);
- }
-
- // Propagate disabling of the default options files to the potential
- // nested invocations.
- //
- if (!cmd_def && (!env_def || *env_def != "0"))
- set_env ("BUILD2_DEF_OPT", "0");
-
- // Validate options.
- //
- if (ops.progress () && ops.no_progress ())
- fail << "both --progress and --no-progress specified";
-
- if (ops.mtime_check () && ops.no_mtime_check ())
- fail << "both --mtime-check and --no-mtime-check specified";
- }
- catch (const cl::exception& e)
- {
- fail << e;
- }
+ b_cmdline cmdl (parse_b_cmdline (trace, argc, argv, ops));
// Handle --build2-metadata (see also buildfile).
//
@@ -660,10 +313,10 @@ main (int argc, char* argv[])
// Initialize the diagnostics state.
//
- init_diag (verbosity (),
+ init_diag (cmdl.verbosity,
ops.silent (),
- (ops.progress () ? optional<bool> (true) :
- ops.no_progress () ? optional<bool> (false) : nullopt),
+ cmdl.progress,
+ cmdl.diag_color,
ops.no_line (),
ops.no_column (),
fdterm (stderr_fd ()));
@@ -694,36 +347,14 @@ main (int argc, char* argv[])
}
}
- // Initialize time conversion data that is used by localtime_r().
- //
-#ifndef _WIN32
- tzset ();
-#else
- _tzset ();
-#endif
-
// Initialize the global state.
//
init (&::terminate,
argv[0],
- (ops.mtime_check () ? optional<bool> (true) :
- ops.no_mtime_check () ? optional<bool> (false) : nullopt),
- (ops.config_sub_specified ()
- ? optional<path> (ops.config_sub ())
- : nullopt),
- (ops.config_guess_specified ()
- ? optional<path> (ops.config_guess ())
- : nullopt));
-
-#ifdef _WIN32
- // On Windows disable displaying error reporting dialog box for the
- // current and child processes unless we are in the stop mode. Failed that
- // we may have multiple dialog boxes popping up.
- //
- if (!ops.serial_stop ())
- SetErrorMode (SetErrorMode (0) | // Returns the current mode.
- SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
-#endif
+ ops.serial_stop (),
+ cmdl.mtime_check,
+ cmdl.config_sub,
+ cmdl.config_guess);
// Load builtin modules.
//
@@ -740,64 +371,20 @@ main (int argc, char* argv[])
load_builtin_module (&in::build2_in_load);
#ifndef BUILD2_BOOTSTRAP
- load_builtin_module (&cli::build2_cli_load);
load_builtin_module (&bash::build2_bash_load);
+ load_builtin_module (&cli::build2_cli_load);
#endif
// Start up the scheduler and allocate lock shards.
//
- size_t jobs (0);
-
- if (ops.jobs_specified ())
- jobs = ops.jobs ();
- else if (ops.serial_stop ())
- jobs = 1;
-
- if (jobs == 0)
- jobs = scheduler::hardware_concurrency ();
-
- if (jobs == 0)
- {
- warn << "unable to determine the number of hardware threads" <<
- info << "falling back to serial execution" <<
- info << "use --jobs|-j to override";
-
- jobs = 1;
- }
-
- size_t max_jobs (0);
-
- if (ops.max_jobs_specified ())
- {
- max_jobs = ops.max_jobs ();
-
- if (max_jobs != 0 && max_jobs < jobs)
- fail << "invalid --max-jobs|-J value";
- }
-
- sched.startup (jobs,
- 1,
- max_jobs,
- jobs * ops.queue_depth (),
- (ops.max_stack_specified ()
- ? optional<size_t> (ops.max_stack () * 1024)
- : nullopt));
+ sched.startup (cmdl.jobs,
+ 1 /* init_active */,
+ cmdl.max_jobs,
+ cmdl.jobs * ops.queue_depth (),
+ cmdl.max_stack);
global_mutexes mutexes (sched.shard_size ());
-
- bool fcache_comp (true);
- if (ops.file_cache_specified ())
- {
- const string& v (ops.file_cache ());
- if (v == "noop" || v == "none")
- fcache_comp = false;
- else if (v == "sync-lz4")
- fcache_comp = true;
- else
- fail << "invalid --file-cache value '" << v << "'";
- }
-
- file_cache fcache (fcache_comp);
+ file_cache fcache (cmdl.fcache_compress);
// Trace some overall environment information.
//
@@ -809,25 +396,45 @@ main (int argc, char* argv[])
trace << "home: " << home;
trace << "path: " << (p ? *p : "<NULL>");
trace << "type: " << (build_installed ? "installed" : "development");
- trace << "jobs: " << jobs;
+ trace << "jobs: " << cmdl.jobs;
}
// Set the build context before parsing the buildspec since it relies on
// the global scope being setup. We reset it for every meta-operation (see
// below).
//
- unique_ptr<context> ctx;
- auto new_context = [&ctx, &sched, &mutexes, &fcache, &cmd_vars]
+ unique_ptr<context> pctx;
+ auto new_context = [&ops, &cmdl,
+ &sched, &mutexes, &fcache,
+ &phase_switch_contention,
+ &pctx]
{
- ctx = nullptr; // Free first.
- ctx.reset (new context (sched,
- mutexes,
- fcache,
- ops.match_only (),
- ops.no_external_modules (),
- ops.dry_run (),
- !ops.serial_stop () /* keep_going */,
- cmd_vars));
+ if (pctx != nullptr)
+ {
+ phase_switch_contention += (pctx->phase_mutex.contention +
+ pctx->phase_mutex.contention_load);
+ pctx = nullptr; // Free first to reuse memory.
+ }
+
+ optional<match_only_level> mo;
+ if (ops.load_only ()) mo = match_only_level::alias;
+ else if (ops.match_only ()) mo = match_only_level::all;
+
+ pctx.reset (new context (sched,
+ mutexes,
+ fcache,
+ mo,
+ ops.no_external_modules (),
+ ops.dry_run (),
+ ops.no_diag_buffer (),
+ !ops.serial_stop () /* keep_going */,
+ cmdl.cmd_vars));
+
+ if (ops.trace_match_specified ())
+ pctx->trace_match = &ops.trace_match ();
+
+ if (ops.trace_execute_specified ())
+ pctx->trace_execute = &ops.trace_execute ();
};
new_context ();
@@ -835,17 +442,18 @@ main (int argc, char* argv[])
// Parse the buildspec.
//
buildspec bspec;
+ path_name bspec_name ("<buildspec>");
try
{
- istringstream is (args);
+ istringstream is (cmdl.buildspec);
is.exceptions (istringstream::failbit | istringstream::badbit);
- parser p (*ctx);
- bspec = p.parse_buildspec (is, path_name ("<buildspec>"));
+ parser p (*pctx);
+ bspec = p.parse_buildspec (is, bspec_name);
}
catch (const io_error&)
{
- fail << "unable to parse buildspec '" << args << "'";
+ fail << "unable to parse buildspec '" << cmdl.buildspec << "'";
}
l5 ([&]{trace << "buildspec: " << bspec;});
@@ -853,18 +461,194 @@ main (int argc, char* argv[])
if (bspec.empty ())
bspec.push_back (metaopspec ()); // Default meta-operation.
+ // The reserve values were picked experimentally. They allow building a
+ // sample application that depends on Qt and Boost without causing a
+ // rehash.
+ //
+ // Note: omit reserving anything for the info meta-operation since it
+ // won't be loading the buildfiles and needs to be as fast as possible.
+ //
+ bool mo_info (bspec.size () == 1 &&
+ bspec.front ().size () == 1 &&
+ (bspec.front ().name == "info" ||
+ (bspec.front ().name.empty () &&
+ bspec.front ().front ().name == "info")));
+
+ if (!mo_info)
+ {
+ // Note: also adjust in bpkg if adjusting here.
+ //
+ pctx->reserve (context::reserves {
+ 30000 /* targets */,
+ 1100 /* variables */});
+ }
+
+ bool load_only (ops.load_only ());
+
const path& buildfile (ops.buildfile_specified ()
? ops.buildfile ()
: empty_path);
bool dump_load (false);
bool dump_match (false);
- if (ops.dump_specified ())
+ bool dump_match_pre (false);
+ bool dump_match_post (false);
+ for (const string& p: ops.dump ())
{
- dump_load = ops.dump ().find ("load") != ops.dump ().end ();
- dump_match = ops.dump ().find ("match") != ops.dump ().end ();
+ if (p == "load") dump_load = true;
+ else if (p == "match") dump_match = true;
+ else if (p == "match-pre") dump_match_pre = true;
+ else if (p == "match-post") dump_match_post = true;
+ else fail << "unknown phase '" << p << "' specified with --dump";
}
+ dump_format dump_fmt (dump_format::buildfile);
+ if (ops.dump_format_specified ())
+ {
+ const string& f (ops.dump_format ());
+
+ if (f == "json-v0.1")
+ {
+#ifdef BUILD2_BOOTSTRAP
+ fail << "json dump not supported in bootstrap build system";
+#endif
+ dump_fmt = dump_format::json;
+ }
+ else if (f != "buildfile")
+ {
+ diag_record dr (fail);
+
+ dr << "unsupported format '" << f << "' specified with --dump-format";
+
+ if (f.compare (0, 4, "json") == 0)
+ dr << info << "supported json format version is json-v0.1";
+ }
+ }
+
+ auto dump = [&trace, &ops, dump_fmt] (context& ctx, optional<action> a)
+ {
+ const dir_paths& scopes (ops.dump_scope ());
+ const vector<pair<name, optional<name>>>& targets (ops.dump_target ());
+
+ if (scopes.empty () && targets.empty ())
+ build2::dump (ctx, a, dump_fmt);
+ else
+ {
+ auto comp_norm = [] (dir_path& d, const char* what)
+ {
+ try
+ {
+ if (d.relative ())
+ d.complete ();
+
+ d.normalize ();
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid path '" << e.path << "' specified with " << what;
+ }
+ };
+
+ // If exact is false then return any outer scope that contains this
+ // directory except for the global scope.
+ //
+ auto find_scope = [&ctx, &comp_norm] (dir_path& d,
+ bool exact,
+ const char* what) -> const scope*
+ {
+ comp_norm (d, what);
+
+ // This is always the output directory (specifically, see the target
+ // case below).
+ //
+ const scope& s (ctx.scopes.find_out (d));
+
+ return ((exact ? s.out_path () == d : s != ctx.global_scope)
+ ? &s
+ : nullptr);
+ };
+
+ // Dump scopes.
+ //
+ for (dir_path d: scopes)
+ {
+ const scope* s (find_scope (d, true, "--dump-scope"));
+
+ if (s == nullptr)
+ l5 ([&]{trace << "unknown target scope " << d
+ << " specified with --dump-scope";});
+
+ build2::dump (s, a, dump_fmt);
+ }
+
+ // Dump targets.
+ //
+ for (const pair<name, optional<name>>& p: targets)
+ {
+ const target* t (nullptr);
+
+ // Find the innermost known scope that contains this target. This
+ // is where we are going to resolve its type.
+ //
+ dir_path d (p.second ? p.second->dir : p.first.dir);
+
+ if (const scope* s = find_scope (d, false, "--dump-target"))
+ {
+ // Complete relative directories in names.
+ //
+ name n (p.first), o;
+
+ if (p.second)
+ {
+ comp_norm (n.dir, "--dump-target");
+ o.dir = move (d);
+ }
+ else
+ n.dir = move (d);
+
+ // Similar logic to parser::enter_target::find_target() as used by
+ // the dump directive. Except here we treat unknown target type as
+ // unknown target.
+ //
+ auto r (s->find_target_type (n, location ()));
+
+ if (r.first != nullptr)
+ {
+ t = ctx.targets.find (*r.first, // target type
+ n.dir,
+ o.dir,
+ n.value,
+ r.second, // extension
+ trace);
+
+ if (t == nullptr)
+ l5 ([&]
+ {
+ // @@ TODO: default_extension?
+ //
+ target::combine_name (n.value, r.second, false);
+ names ns {move (n)};
+ if (p.second)
+ ns.push_back (move (o));
+
+ trace << "unknown target " << ns
+ << " specified with --dump-target";
+ });
+ }
+ else
+ l5 ([&]{trace << "unknown target type '" << n.type << "' in "
+ << *s << " specified with --dump-target";});
+
+ }
+ else
+ l5 ([&]{trace << "unknown target scope " << d
+ << " specified with --dump-target";});
+
+ build2::dump (t, a, dump_fmt);
+ }
+ }
+ };
+
// If not NULL, then lifted points to the operation that has been "lifted"
// to the meta-operaion (see the logic below for details). Skip is the
// position of the next operation.
@@ -877,6 +661,20 @@ main (int argc, char* argv[])
//
bool dirty (false); // Already (re)set for the first run.
+#ifndef BUILD2_BOOTSTRAP
+ // Note that this constructor is cheap and so we rather call it always
+ // instead of resorting to dynamic allocations.
+ //
+ // Note also that we disable pretty-printing if there is also the JSON
+ // dump and thus we need to combine the two in the JSON Lines format.
+ //
+ json::stream_serializer js (cout, dump_fmt == dump_format::json ? 0 : 2);
+
+ if (ops.structured_result_specified () &&
+ ops.structured_result () == structured_result_format::json)
+ js.begin_array ();
+#endif
+
for (auto mit (bspec.begin ()); mit != bspec.end (); )
{
vector_view<opspec> opspecs;
@@ -918,8 +716,9 @@ main (int argc, char* argv[])
dirty = false;
}
- const path p ("<buildspec>");
- const location l (p, 0, 0); //@@ TODO
+ context& ctx (*pctx);
+
+ const location l (bspec_name, 0, 0); //@@ TODO (also bpkg::pkg_configure())
meta_operation_id mid (0); // Not yet translated.
const meta_operation_info* mif (nullptr);
@@ -932,25 +731,25 @@ main (int argc, char* argv[])
values& mparams (lifted == nullptr ? mit->params : lifted->params);
string mname (lifted == nullptr ? mit->name : lifted->name);
- ctx->current_mname = mname; // Set early.
+ ctx.current_mname = mname; // Set early.
if (!mname.empty ())
{
- if (meta_operation_id m = ctx->meta_operation_table.find (mname))
+ if (meta_operation_id m = ctx.meta_operation_table.find (mname))
{
// Can modify params, opspec, change meta-operation name.
//
- if (auto f = ctx->meta_operation_table[m].process)
- mname = ctx->current_mname = f (
- *ctx, mparams, opspecs, lifted != nullptr, l);
+ if (auto f = ctx.meta_operation_table[m].process)
+ mname = ctx.current_mname = f (
+ ctx, mparams, opspecs, lifted != nullptr, l);
}
}
// Expose early so can be used during bootstrap (with the same
// limitations as for pre-processing).
//
- scope& gs (ctx->global_scope.rw ());
- gs.assign (ctx->var_build_meta_operation) = mname;
+ scope& gs (ctx.global_scope.rw ());
+ gs.assign (ctx.var_build_meta_operation) = mname;
for (auto oit (opspecs.begin ()); oit != opspecs.end (); ++oit)
{
@@ -961,7 +760,7 @@ main (int argc, char* argv[])
const values& oparams (lifted == nullptr ? os.params : values ());
const string& oname (lifted == nullptr ? os.name : empty_string);
- ctx->current_oname = oname; // Set early.
+ ctx.current_oname = oname; // Set early.
if (lifted != nullptr)
lifted = nullptr; // Clear for the next iteration.
@@ -985,7 +784,7 @@ main (int argc, char* argv[])
&oname, &mname,
&os, &mit, &lifted, &skip, &l, &trace] ()
{
- meta_operation_id m (ctx->meta_operation_table.find (oname));
+ meta_operation_id m (ctx.meta_operation_table.find (oname));
if (m != 0)
{
@@ -1144,7 +943,7 @@ main (int argc, char* argv[])
// Handle a forwarded configuration. Note that if we've changed
// out_root then we also have to remap out_base.
//
- out_root = bootstrap_fwd (*ctx, src_root, altn);
+ out_root = bootstrap_fwd (ctx, src_root, altn);
if (src_root != out_root)
{
out_base = out_root / out_base.leaf (src_root);
@@ -1189,7 +988,7 @@ main (int argc, char* argv[])
// use to the bootstrap files (other than src-root.build, which,
// BTW, doesn't need to exist if src_root == out_root).
//
- scope& rs (*create_root (*ctx, out_root, src_root)->second.front ());
+ scope& rs (*create_root (ctx, out_root, src_root)->second.front ());
bool bstrapped (bootstrapped (rs));
@@ -1225,8 +1024,8 @@ main (int argc, char* argv[])
<< (forwarded ? "forwarded " : "specified ")
<< src_root;
- ctx->new_src_root = src_root;
- ctx->old_src_root = move (p);
+ ctx.new_src_root = src_root;
+ ctx.old_src_root = move (p);
p = src_root;
}
}
@@ -1248,8 +1047,13 @@ main (int argc, char* argv[])
// Now that we have src_root, load the src_root bootstrap file,
// if there is one.
//
+ // As an optimization, omit discovering subprojects for the info
+ // meta-operation if not needed.
+ //
bootstrap_pre (rs, altn);
- bootstrap_src (rs, altn);
+ bootstrap_src (rs, altn,
+ nullopt /* amalgamation */,
+ !mo_info || info_subprojects (mparams) /*subprojects*/);
// If this is a simple project, then implicitly load the test and
// install modules.
@@ -1269,7 +1073,7 @@ main (int argc, char* argv[])
// command line and import).
//
if (forwarded)
- rs.assign (ctx->var_forwarded) = true;
+ rs.assign (ctx.var_forwarded) = true;
// Sync local variable that are used below with actual values.
//
@@ -1321,8 +1125,8 @@ main (int argc, char* argv[])
// all be known. We store the combined action id in uint8_t;
// see <operation> for details.
//
- assert (ctx->operation_table.size () <= 128);
- assert (ctx->meta_operation_table.size () <= 128);
+ assert (ctx.operation_table.size () <= 128);
+ assert (ctx.meta_operation_table.size () <= 128);
// Since we now know all the names of meta-operations and
// operations, "lift" names that we assumed (from buildspec syntax)
@@ -1339,7 +1143,7 @@ main (int argc, char* argv[])
if (!mname.empty ())
{
- m = ctx->meta_operation_table.find (mname);
+ m = ctx.meta_operation_table.find (mname);
if (m == 0)
fail (l) << "unknown meta-operation " << mname;
@@ -1347,7 +1151,7 @@ main (int argc, char* argv[])
if (!oname.empty ())
{
- o = ctx->operation_table.find (oname);
+ o = ctx.operation_table.find (oname);
if (o == 0)
fail (l) << "unknown operation " << oname;
@@ -1370,7 +1174,7 @@ main (int argc, char* argv[])
if (mif == nullptr)
fail (l) << "target " << tn << " does not support meta-"
- << "operation " << ctx->meta_operation_table[m].name;
+ << "operation " << ctx.meta_operation_table[m].name;
}
//
// Otherwise, check that all the targets in a meta-operation
@@ -1383,7 +1187,7 @@ main (int argc, char* argv[])
if (mi == nullptr)
fail (l) << "target " << tn << " does not support meta-"
- << "operation " << ctx->meta_operation_table[mid].name;
+ << "operation " << ctx.meta_operation_table[mid].name;
if (mi != mif)
fail (l) << "different implementations of meta-operation "
@@ -1406,12 +1210,12 @@ main (int argc, char* argv[])
<< ", id " << static_cast<uint16_t> (mid);});
if (mif->meta_operation_pre != nullptr)
- mif->meta_operation_pre (mparams, l);
+ mif->meta_operation_pre (ctx, mparams, l);
else if (!mparams.empty ())
fail (l) << "unexpected parameters for meta-operation "
<< mif->name;
- ctx->current_meta_operation (*mif);
+ ctx.current_meta_operation (*mif);
dirty = true;
}
@@ -1427,7 +1231,7 @@ main (int argc, char* argv[])
if (r == nullptr)
fail (l) << "target " << tn << " does not support "
- << "operation " << ctx->operation_table[o];
+ << "operation " << ctx.operation_table[o];
return r;
};
@@ -1445,7 +1249,7 @@ main (int argc, char* argv[])
// Allow the meta-operation to translate the operation.
//
if (mif->operation_pre != nullptr)
- oid = mif->operation_pre (mparams, oif->id);
+ oid = mif->operation_pre (ctx, mparams, oif->id);
else // Otherwise translate default to update.
oid = (oif->id == default_id ? update_id : oif->id);
@@ -1466,24 +1270,38 @@ main (int argc, char* argv[])
if (oif->outer_id != 0)
outer_oif = lookup (oif->outer_id);
+ if (!oparams.empty ())
+ {
+ // Operation parameters belong to outer operation, if any.
+ //
+ auto* i (outer_oif != nullptr ? outer_oif : oif);
+
+ if (i->operation_pre == nullptr)
+ fail (l) << "unexpected parameters for operation " << i->name;
+ }
+
// Handle pre/post operations.
//
- if (oif->pre != nullptr)
+ if (auto po = oif->pre_operation)
{
- if ((orig_pre_oid = oif->pre (oparams, mid, l)) != 0)
+ if ((orig_pre_oid = po (
+ ctx,
+ outer_oif == nullptr ? oparams : values {},
+ mid,
+ l)) != 0)
{
assert (orig_pre_oid != default_id);
pre_oif = lookup (orig_pre_oid);
pre_oid = pre_oif->id; // De-alias.
}
}
- else if (!oparams.empty ())
- fail (l) << "unexpected parameters for operation "
- << oif->name;
- if (oif->post != nullptr)
+ if (auto po = oif->post_operation)
{
- if ((orig_post_oid = oif->post (oparams, mid)) != 0)
+ if ((orig_post_oid = po (
+ ctx,
+ outer_oif == nullptr ? oparams : values {},
+ mid)) != 0)
{
assert (orig_post_oid != default_id);
post_oif = lookup (orig_post_oid);
@@ -1504,7 +1322,7 @@ main (int argc, char* argv[])
if (r == nullptr)
fail (l) << "target " << tn << " does not support "
- << "operation " << ctx->operation_table[o];
+ << "operation " << ctx.operation_table[o];
if (r != i)
fail (l) << "different implementations of operation "
@@ -1529,6 +1347,9 @@ main (int argc, char* argv[])
// defined there (common with non-intrusive project conversions
// where everything is built from a single root buildfile).
//
+ // Note: we use find_plausible_buildfile() and not find_buildfile()
+ // to look in outer directories.
+ //
optional<path> bf (
find_buildfile (src_base, src_base, altn, buildfile));
@@ -1579,62 +1400,7 @@ main (int argc, char* argv[])
// boundaries (specifically, amalgamation) are only known after
// bootstrap.
//
- // The mildly tricky part here is to distinguish the situation where
- // we are bootstrapping the same project multiple times. The first
- // override that we set cannot already exist (because the override
- // variable names are unique) so if it is already set, then it can
- // only mean this project is already bootstrapped.
- //
- // This is further complicated by the project vs amalgamation logic
- // (we may have already done the amalgamation but not the project).
- // So we split it into two passes.
- //
- {
- auto& sm (ctx->scopes.rw ());
-
- for (const variable_override& o: ctx->var_overrides)
- {
- if (o.ovr.visibility != variable_visibility::global)
- continue;
-
- // If we have a directory, enter the scope, similar to how we do
- // it in the context ctor.
- //
- scope& s (
- o.dir
- ? *sm.insert_out ((out_base / *o.dir).normalize ())->second.front ()
- : *rs.weak_scope ());
-
- auto p (s.vars.insert (o.ovr));
-
- if (!p.second)
- break;
-
- value& v (p.first);
- v = o.val;
- }
-
- for (const variable_override& o: ctx->var_overrides)
- {
- // Ours is either project (%foo) or scope (/foo).
- //
- if (o.ovr.visibility == variable_visibility::global)
- continue;
-
- scope& s (
- o.dir
- ? *sm.insert_out ((out_base / *o.dir).normalize ())->second.front ()
- : rs);
-
- auto p (s.vars.insert (o.ovr));
-
- if (!p.second)
- break;
-
- value& v (p.first);
- v = o.val;
- }
- }
+ ctx.enter_project_overrides (rs, out_base, ctx.var_overrides);
ts.root_scope = &rs;
ts.out_base = move (out_base);
@@ -1649,6 +1415,9 @@ main (int argc, char* argv[])
break;
}
+ if (load_only && (mid != perform_id || oid != update_id))
+ fail << "--load-only requires perform(update) action";
+
// Now load the buildfiles and search the targets.
//
action_targets tgs;
@@ -1669,7 +1438,7 @@ main (int argc, char* argv[])
// building before we know how to for all the targets in this
// operation batch.
//
- const scope& bs (ctx->scopes.find_out (ts.out_base));
+ const scope& bs (ctx.scopes.find_out (ts.out_base));
// Find the target type and extract the extension.
//
@@ -1680,6 +1449,9 @@ main (int argc, char* argv[])
if (tt == nullptr)
fail (l) << "unknown target type " << tn.type;
+ if (load_only && !tt->is_a<alias> ())
+ fail << "--load-only requires alias target";
+
if (mif->search != nullptr)
{
// If the directory is relative, assume it is relative to work
@@ -1710,8 +1482,10 @@ main (int argc, char* argv[])
}
} // target
- if (dump_load)
- dump (*ctx);
+ // Delay until after match in the --load-only mode (see below).
+ //
+ if (dump_load && !load_only)
+ dump (ctx, nullopt /* action */);
// Finally, match the rules and perform the operation.
//
@@ -1721,28 +1495,42 @@ main (int argc, char* argv[])
<< ", id " << static_cast<uint16_t> (pre_oid);});
if (mif->operation_pre != nullptr)
- mif->operation_pre (mparams, pre_oid); // Cannot be translated.
+ mif->operation_pre (ctx, mparams, pre_oid); // Can't be translated.
- ctx->current_operation (*pre_oif, oif);
+ ctx.current_operation (*pre_oif, oif);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, oparams, false /* inner */, l);
+
+ if (pre_oif->operation_pre != nullptr)
+ pre_oif->operation_pre (ctx, {}, true /* inner */, l);
action a (mid, pre_oid, oid);
{
- result_printer p (tgs);
- uint16_t diag (ops.structured_result () ? 0 : 1);
+#ifndef BUILD2_BOOTSTRAP
+ result_printer p (ops, tgs, js);
+#endif
+ uint16_t diag (ops.structured_result_specified () ? 0 : 1);
if (mif->match != nullptr)
mif->match (mparams, a, tgs, diag, true /* progress */);
- if (dump_match)
- dump (*ctx, a);
+ if (dump_match_pre)
+ dump (ctx, a);
- if (mif->execute != nullptr && !ctx->match_only)
+ if (mif->execute != nullptr && !ctx.match_only)
mif->execute (mparams, a, tgs, diag, true /* progress */);
}
+ if (pre_oif->operation_post != nullptr)
+ pre_oif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, oparams, false /* inner */);
+
if (mif->operation_post != nullptr)
- mif->operation_post (mparams, pre_oid);
+ mif->operation_post (ctx, mparams, pre_oid);
l5 ([&]{trace << "end pre-operation batch " << pre_oif->name
<< ", id " << static_cast<uint16_t> (pre_oid);});
@@ -1750,24 +1538,43 @@ main (int argc, char* argv[])
tgs.reset ();
}
- ctx->current_operation (*oif, outer_oif);
+ ctx.current_operation (*oif, outer_oif);
+
+ if (outer_oif != nullptr && outer_oif->operation_pre != nullptr)
+ outer_oif->operation_pre (ctx, oparams, false /* inner */, l);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx,
+ outer_oif == nullptr ? oparams : values {},
+ true /* inner */,
+ l);
action a (mid, oid, oif->outer_id);
{
- result_printer p (tgs);
- uint16_t diag (ops.structured_result () ? 0 : 2);
+#ifndef BUILD2_BOOTSTRAP
+ result_printer p (ops, tgs, js);
+#endif
+ uint16_t diag (ops.structured_result_specified () ? 0 : 2);
if (mif->match != nullptr)
mif->match (mparams, a, tgs, diag, true /* progress */);
if (dump_match)
- dump (*ctx, a);
+ dump (ctx, a);
- if (mif->execute != nullptr && !ctx->match_only)
+ if (mif->execute != nullptr && !ctx.match_only)
mif->execute (mparams, a, tgs, diag, true /* progress */);
}
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx,
+ outer_oif == nullptr ? oparams : values {},
+ true /* inner */);
+
+ if (outer_oif != nullptr && outer_oif->operation_post != nullptr)
+ outer_oif->operation_post (ctx, oparams, false /* inner */);
+
if (post_oid != 0)
{
tgs.reset ();
@@ -1776,35 +1583,52 @@ main (int argc, char* argv[])
<< ", id " << static_cast<uint16_t> (post_oid);});
if (mif->operation_pre != nullptr)
- mif->operation_pre (mparams, post_oid); // Cannot be translated.
+ mif->operation_pre (ctx, mparams, post_oid); // Can't be translated.
- ctx->current_operation (*post_oif, oif);
+ ctx.current_operation (*post_oif, oif);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, oparams, false /* inner */, l);
+
+ if (post_oif->operation_pre != nullptr)
+ post_oif->operation_pre (ctx, {}, true /* inner */, l);
action a (mid, post_oid, oid);
{
- result_printer p (tgs);
- uint16_t diag (ops.structured_result () ? 0 : 1);
+#ifndef BUILD2_BOOTSTRAP
+ result_printer p (ops, tgs, js);
+#endif
+ uint16_t diag (ops.structured_result_specified () ? 0 : 1);
if (mif->match != nullptr)
mif->match (mparams, a, tgs, diag, true /* progress */);
- if (dump_match)
- dump (*ctx, a);
+ if (dump_match_post)
+ dump (ctx, a);
- if (mif->execute != nullptr && !ctx->match_only)
+ if (mif->execute != nullptr && !ctx.match_only)
mif->execute (mparams, a, tgs, diag, true /* progress */);
}
+ if (post_oif->operation_post != nullptr)
+ post_oif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, oparams, false /* inner */);
+
if (mif->operation_post != nullptr)
- mif->operation_post (mparams, post_oid);
+ mif->operation_post (ctx, mparams, post_oid);
l5 ([&]{trace << "end post-operation batch " << post_oif->name
<< ", id " << static_cast<uint16_t> (post_oid);});
}
+ if (dump_load && load_only)
+ dump (ctx, nullopt /* action */);
+
if (mif->operation_post != nullptr)
- mif->operation_post (mparams, oid);
+ mif->operation_post (ctx, mparams, oid);
l5 ([&]{trace << "end operation batch " << oif->name
<< ", id " << static_cast<uint16_t> (oid);});
@@ -1813,7 +1637,7 @@ main (int argc, char* argv[])
if (mid != 0)
{
if (mif->meta_operation_post != nullptr)
- mif->meta_operation_post (mparams);
+ mif->meta_operation_post (ctx, mparams);
l5 ([&]{trace << "end meta-operation batch " << mif->name
<< ", id " << static_cast<uint16_t> (mid);});
@@ -1822,6 +1646,18 @@ main (int argc, char* argv[])
if (lifted == nullptr && skip == 0)
++mit;
} // meta-operation
+
+#ifndef BUILD2_BOOTSTRAP
+ if (ops.structured_result_specified () &&
+ ops.structured_result () == structured_result_format::json)
+ {
+ js.end_array ();
+ cout << endl;
+ }
+#endif
+
+ phase_switch_contention += (pctx->phase_mutex.contention +
+ pctx->phase_mutex.contention_load);
}
catch (const failed&)
{
@@ -1843,16 +1679,18 @@ main (int argc, char* argv[])
{
text << '\n'
<< "build statistics:" << "\n\n"
- << " thread_max_active " << st.thread_max_active << '\n'
- << " thread_max_total " << st.thread_max_total << '\n'
- << " thread_helpers " << st.thread_helpers << '\n'
- << " thread_max_waiting " << st.thread_max_waiting << '\n'
+ << " thread_max_active " << st.thread_max_active << '\n'
+ << " thread_max_total " << st.thread_max_total << '\n'
+ << " thread_helpers " << st.thread_helpers << '\n'
+ << " thread_max_waiting " << st.thread_max_waiting << '\n'
+ << '\n'
+ << " task_queue_depth " << st.task_queue_depth << '\n'
+ << " task_queue_full " << st.task_queue_full << '\n'
<< '\n'
- << " task_queue_depth " << st.task_queue_depth << '\n'
- << " task_queue_full " << st.task_queue_full << '\n'
+ << " wait_queue_slots " << st.wait_queue_slots << '\n'
+ << " wait_queue_collisions " << st.wait_queue_collisions << '\n'
<< '\n'
- << " wait_queue_slots " << st.wait_queue_slots << '\n'
- << " wait_queue_collisions " << st.wait_queue_collisions << '\n';
+ << " phase_switch_contention " << phase_switch_contention << '\n';
}
return r;
diff --git a/build2/buildfile b/build2/buildfile
index 4d62fb5..0111ed2 100644
--- a/build2/buildfile
+++ b/build2/buildfile
@@ -5,16 +5,19 @@
#
libs = $libbutl
+# NOTE: don't forget to also update bpkg's buildfile if changing anything
+# here.
+#
include ../libbuild2/
libs += ../libbuild2/lib{build2}
-for m: bash bin c cc cxx in version
+for m: bash bin c cc cli cxx in version
{
include ../libbuild2/$m/
libs += ../libbuild2/$m/lib{build2-$m}
}
-exe{b}: {hxx ixx txx cxx}{** -b-options} {hxx ixx cxx}{b-options} $libs
+exe{b}: {hxx ixx txx cxx}{**} $libs
# Target metadata, see also --build2-metadata in b.cxx.
#
@@ -42,6 +45,8 @@ copyright = $process.run_regex( \
obj{b}: cxx.poptions += -DBUILD2_COPYRIGHT=\"$copyright\"
+# NOTE: remember to update bpkg buildfile if changing anything here.
+#
switch $cxx.target.class
{
case 'linux'
@@ -68,36 +73,3 @@ switch $cxx.target.class
: "-Wl,--stack,$stack_size")
}
}
-
-# Generated options parser.
-#
-if $cli.configured
-{
- cli.cxx{b-options}: cli{b}
-
- cli.options += --std c++11 -I $src_root --include-with-brackets \
---include-prefix build2 --guard-prefix BUILD2 \
---cxx-prologue "#include <build2/types-parsers.hxx>" \
---cli-namespace build2::cl --generate-file-scanner --keep-separator \
---generate-parse --generate-merge --generate-specifier
-
- # Usage options.
- #
- cli.options += --suppress-undocumented --long-usage --ansi-color \
---page-usage 'build2::print_$name$_' --option-length 21
-
- cli.cxx{*}:
- {
- # Include the generated cli files into the distribution and don't remove
- # them when cleaning in src (so that clean results in a state identical to
- # distributed).
- #
- dist = true
- clean = ($src_root != $out_root)
-
- # We keep the generated code in the repository so copy it back to src in
- # case of a forwarded configuration.
- #
- backlink = overwrite
- }
-}
diff --git a/build2/cli/target.hxx b/build2/cli/target.hxx
deleted file mode 100644
index 722bb5f..0000000
--- a/build2/cli/target.hxx
+++ /dev/null
@@ -1,54 +0,0 @@
-// file : build2/cli/target.hxx -*- C++ -*-
-// license : MIT; see accompanying LICENSE file
-
-#ifndef BUILD2_CLI_TARGET_HXX
-#define BUILD2_CLI_TARGET_HXX
-
-#include <libbuild2/types.hxx>
-#include <libbuild2/utility.hxx>
-
-#include <libbuild2/target.hxx>
-
-#include <libbuild2/cxx/target.hxx>
-
-namespace build2
-{
- namespace cli
- {
- class cli: public file
- {
- public:
- using file::file;
-
- public:
- static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
- };
-
- // Standard layout type compatible with group_view's const target*[3].
- //
- struct cli_cxx_members
- {
- const cxx::hxx* h = nullptr;
- const cxx::cxx* c = nullptr;
- const cxx::ixx* i = nullptr;
- };
-
- class cli_cxx: public mtime_target, public cli_cxx_members
- {
- public:
- using mtime_target::mtime_target;
-
- virtual group_view
- group_members (action) const override;
-
- public:
- static const target_type static_type;
-
- virtual const target_type&
- dynamic_type () const override {return static_type;}
- };
- }
-}
-
-#endif // BUILD2_CLI_TARGET_HXX
diff --git a/build2/types-parsers.cxx b/build2/types-parsers.cxx
deleted file mode 100644
index 3593143..0000000
--- a/build2/types-parsers.cxx
+++ /dev/null
@@ -1,50 +0,0 @@
-// file : build2/types-parsers.cxx -*- C++ -*-
-// license : MIT; see accompanying LICENSE file
-
-#include <build2/types-parsers.hxx>
-
-#include <build2/b-options.hxx> // build2::cl namespace
-
-namespace build2
-{
- namespace cl
- {
- template <typename T>
- static void
- parse_path (T& x, scanner& s)
- {
- const char* o (s.next ());
-
- if (!s.more ())
- throw missing_value (o);
-
- const char* v (s.next ());
-
- try
- {
- x = T (v);
-
- if (x.empty ())
- throw invalid_value (o, v);
- }
- catch (const invalid_path&)
- {
- throw invalid_value (o, v);
- }
- }
-
- void parser<path>::
- parse (path& x, bool& xs, scanner& s)
- {
- xs = true;
- parse_path (x, s);
- }
-
- void parser<dir_path>::
- parse (dir_path& x, bool& xs, scanner& s)
- {
- xs = true;
- parse_path (x, s);
- }
- }
-}
diff --git a/build2/types-parsers.hxx b/build2/types-parsers.hxx
deleted file mode 100644
index d39a096..0000000
--- a/build2/types-parsers.hxx
+++ /dev/null
@@ -1,43 +0,0 @@
-// file : build2/types-parsers.hxx -*- C++ -*-
-// license : MIT; see accompanying LICENSE file
-
-// CLI parsers, included into the generated source files.
-//
-
-#ifndef BUILD2_TYPES_PARSERS_HXX
-#define BUILD2_TYPES_PARSERS_HXX
-
-#include <libbuild2/types.hxx>
-
-namespace build2
-{
- namespace cl
- {
- class scanner;
-
- template <typename T>
- struct parser;
-
- template <>
- struct parser<path>
- {
- static void
- parse (path&, bool&, scanner&);
-
- static void
- merge (path& b, const path& a) {b = a;}
- };
-
- template <>
- struct parser<dir_path>
- {
- static void
- parse (dir_path&, bool&, scanner&);
-
- static void
- merge (dir_path& b, const dir_path& a) {b = a;}
- };
- }
-}
-
-#endif // BUILD2_TYPES_PARSERS_HXX
diff --git a/buildfile b/buildfile
index e10f1c0..c88c55b 100644
--- a/buildfile
+++ b/buildfile
@@ -1,7 +1,7 @@
# file : buildfile
# license : MIT; see accompanying LICENSE file
-./: {*/ -build/ -config/ -old-tests/} \
+./: {*/ -build/ -config/ -old-tests/ -doc/} \
doc{INSTALL NEWS README} legal{LICENSE AUTHORS} \
file{INSTALL.cli bootstrap* config.guess config.sub} \
manifest
diff --git a/config b/config
-Subproject 191bcb948f7191c36eefe634336f5fc5c0c4c2b
+Subproject 02ba26b218d3d3db6c56e014655faf463cefa98
diff --git a/doc/cli.sh b/doc/cli.sh
index d1fcb77..8d7d70a 100755
--- a/doc/cli.sh
+++ b/doc/cli.sh
@@ -1,6 +1,6 @@
#! /usr/bin/env bash
-version=0.14.0-a.0.z
+version=0.16.0-a.0.z
trap 'exit 1' ERR
set -o errtrace # Trap in functions.
@@ -47,7 +47,8 @@ function compile ()
--generate-html --html-suffix .xhtml \
--html-prologue-file man-prologue.xhtml \
--html-epilogue-file man-epilogue.xhtml \
-../build2/$n.cli
+--link-regex '%b(#.+)?%build2-build-system-manual.xhtml$1%' \
+../libbuild2/$n.cli
cli -I .. \
-v project="build2" \
@@ -55,10 +56,11 @@ function compile ()
-v date="$date" \
-v copyright="$copyright" \
--include-base-last "${o[@]}" \
---generate-man --man-suffix .1 \
+--generate-man --man-suffix .1 --ascii-tree \
--man-prologue-file man-prologue.1 \
--man-epilogue-file man-epilogue.1 \
-../build2/$n.cli
+--link-regex '%b(#.+)?%$1%' \
+../libbuild2/$n.cli
}
o="--output-prefix b-"
@@ -76,6 +78,8 @@ done
# Manuals.
#
+# @@ Note that we now have --ascii-tree CLI option.
+#
function xhtml_to_ps () # <from> <to> [<html2ps-options>]
{
local from="$1"
diff --git a/doc/manual.cli b/doc/manual.cli
index 9d79259..85a6613 100644
--- a/doc/manual.cli
+++ b/doc/manual.cli
@@ -311,6 +311,16 @@ it searches for a target for the \c{cxx{hello\}} prerequisite. During this
search, the \c{extension} variable is looked up and its value is used to end
up with the \c{hello.cxx} file.
+\N|To resolve a rule match ambiguity or to override a default match \c{build2}
+uses \i{rule hints}. For example, if we wanted link a C executable using the
+C++ link rule:
+
+\
+[rule_hint=cxx] exe{hello}: c{hello}
+\
+
+|
+
Here is our new dependency declaration again:
\
@@ -361,7 +371,7 @@ Nothing really new here: we've specified the default extension for the
prerequisites. If you have experience with other build systems, then
explicitly listing headers might seem strange to you. As will be discussed
later, in \c{build2} we have to explicitly list all the prerequisites of a
-target that should end up in a distribution of our project.
+target that should end up in a source distribution of our project.
\N|You don't have to list \i{all} headers that you include, only the ones
belonging to your project. Like all modern C/C++ build systems, \c{build2}
@@ -411,11 +421,11 @@ exe{hello}: {hxx cxx}{**}
development more pleasant and less error prone: you don't need to update your
\c{buildfile} every time you add, remove, or rename a source file and you
won't forget to explicitly list headers, a mistake that is often only detected
-when trying to build a distribution of a project. On the other hand, there is
-the possibility of including stray source files into your build without
-noticing. And, for more complex projects, name patterns can become fairly
-complex (see \l{#name-patterns Name Patterns} for details). Note also that on
-modern hardware the performance of wildcard searches hardly warrants a
+when trying to build a source distribution of a project. On the other hand,
+there is the possibility of including stray source files into your build
+without noticing. And, for more complex projects, name patterns can become
+fairly complex (see \l{#name-patterns Name Patterns} for details). Note also
+that on modern hardware the performance of wildcard searches hardly warrants a
consideration.
In our experience, when combined with modern version control systems like
@@ -448,7 +458,7 @@ invocation. In other words, expect an experience similar to a plain
\c{Makefile}.
One notable example where simple projects are handy is a \i{glue
-\c{buildfiles}} that \"pulls\" together several other projects, usually for
+\c{buildfile}} that \"pulls\" together several other projects, usually for
convenience of development. See \l{#intro-import Target Importation} for
details.|
@@ -587,7 +597,7 @@ configuration \i{persistent}. We will see an example of this shortly.
Next up are the \c{test}, \c{install}, and \c{dist} modules. As their names
suggest, they provide support for testing, installation and preparation of
-distributions. Specifically, the \c{test} module defines the \c{test}
+source distributions. Specifically, the \c{test} module defines the \c{test}
operation, the \c{install} module defines the \c{install} and \c{uninstall}
operations, and the \c{dist} module defines the \c{dist}
(meta-)operation. Again, we will try them out in a moment.
@@ -746,7 +756,7 @@ Let's take a look at a slightly more realistic root \c{buildfile}:
Here we have the customary \c{README.md} and \c{LICENSE} files as well as the
package \c{manifest}. Listing them as prerequisites achieves two things: they
will be installed if/when our project is installed and, as mentioned earlier,
-they will be included into the project distribution.
+they will be included into the project source distribution.
The \c{README.md} and \c{LICENSE} files use the \c{doc{\}} target type. We
could have used the generic \c{file{\}} but using the more precise \c{doc{\}}
@@ -1418,7 +1428,7 @@ if ($cc.class == 'gcc')
}
if ($c.target.class != 'windows')
- c.libs += -lpthread # only C
+ c.libs += -ldl # only C
\
Additionally, as we will see in \l{#intro-operations-config Configuring},
@@ -1634,6 +1644,15 @@ $ b update: hello/exe{hello} # Update specific target
$ b update: libhello/ tests/ # Update two targets.
\
+\N|If you are running \c{build2} from PowerShell, then you will need to use
+quoting when updating specific targets, for example:
+
+\
+$ b update: 'hello/exe{hello}'
+\
+
+|
+
Let's revisit \c{build/bootstrap.build} from our \c{hello} project:
\
@@ -1705,9 +1724,18 @@ $ b
...
\
-Let's take a look at \c{config.build}:
+To remove the persistent configuration we use the \c{disfigure}
+meta-operation:
+
+\
+$ b disfigure
+\
+
+Let's again configure our project and take a look at \c{config.build}:
\
+$ b configure config.cxx=clang++ config.cxx.coptions=-g
+
$ cat build/config.build
config.cxx = clang++
@@ -1742,6 +1770,15 @@ Any variable value specified on the command line overrides those specified in
the \c{buildfiles}. As a result, \c{config.cxx} was updated while the value of
\c{config.cxx.coptions} was preserved.
+\N|To revert a configuration variable to its default value, list its name in
+the special \c{config.config.disfigure} variable. For example:
+
+\
+$ b configure config.config.disfigure=config.cxx
+\
+
+|
+
Command line variable overrides are also handy to adjust the configuration for
a single build system invocation. For example, let's say we want to quickly
check that our project builds with optimization but without permanently
@@ -2275,36 +2312,40 @@ If the value of the \c{install} variable is not \c{false}, then it is normally
a relative path with the first path component being one of these names:
\
-name default override
----- ------- --------
-root config.install.root
+name default override
+---- ------- --------
+root config.install.root
-data_root root/ config.install.data_root
-exec_root root/ config.install.exec_root
+data_root root/ config.install.data_root
+exec_root root/ config.install.exec_root
-bin exec_root/bin/ config.install.bin
-sbin exec_root/sbin/ config.install.sbin
-lib exec_root/lib/ config.install.lib
-libexec exec_root/libexec/<project>/ config.install.libexec
-pkgconfig lib/pkgconfig/ config.install.pkgconfig
+bin exec_root/bin/ config.install.bin
+sbin exec_root/sbin/ config.install.sbin
+lib exec_root/lib/ config.install.lib
+libexec exec_root/libexec/<project>/ config.install.libexec
+pkgconfig lib/pkgconfig/ config.install.pkgconfig
-etc data_root/etc/ config.install.etc
-include data_root/include/ config.install.include
-share data_root/share/ config.install.share
-data share/<project>/ config.install.data
+etc data_root/etc/ config.install.etc
+include data_root/include/ config.install.include
+include_arch include/ config.install.include_arch
+share data_root/share/ config.install.share
+data share/<project>/ config.install.data
+buildfile share/build2/export/<project>/ config.install.buildfile
-doc share/doc/<project>/ config.install.doc
-legal doc/ config.install.legal
-man share/man/ config.install.man
-man<N> man/man<N>/ config.install.man<N>
+doc share/doc/<project>/ config.install.doc
+legal doc/ config.install.legal
+man share/man/ config.install.man
+man<N> man/man<N>/ config.install.man<N>
\
Let's see what's going on here: The default install directory tree is derived
from the \c{config.install.root} value but the location of each node in this
tree can be overridden by the user that installs our project using the
-corresponding \c{config.install.*} variables. In our \c{buildfiles}, in turn,
-we use the node names instead of actual directories. As an example, here is a
-\c{buildfile} fragment from the source directory of our \c{libhello} project:
+corresponding \c{config.install.*} variables (see the \l{#module-install
+\c{install}} module documentation for details on their meaning). In our
+\c{buildfiles}, in turn, we use the node names instead of actual
+directories. As an example, here is a \c{buildfile} fragment from the source
+directory of our \c{libhello} project:
\
hxx{*}:
@@ -2335,15 +2376,36 @@ the \c{details/} subdirectory with the \c{utility.hxx} header, then this
header would have been installed as
\c{.../include/libhello/details/utility.hxx}.
+\N|By default the generated \c{pkg-config} files will contain
+\c{install.include} and \c{install.lib} directories as header (\c{-I}) and
+library (\c{-L}) search paths, respectively. However, these can be customized
+with the \c{{c,cxx\}.pkgconfig.{include,lib\}} variables. For example,
+sometimes we may need to install headers into a subdirectory of the include
+directory but include them without this subdirectory:
+
+\
+# Install headers into hello/libhello/ subdirectory of, say,
+# /usr/include/ but include them as <libhello/*>.
+#
+hxx{*}:
+{
+ install = include/hello/libhello/
+ install.subdirs = true
+}
+
+lib{hello}: cxx.pkgconfig.include = include/hello/
+\
+
+|
\h2#intro-operations-dist|Distributing|
The last module that we load in our \c{bootstrap.build} is \c{dist} which
-provides support for the preparation of distributions and defines the \c{dist}
-meta-operation. Similar to \c{configure}, \c{dist} is a meta-operation rather
-than an operation because, conceptually, we are preparing a distribution for
-performing operations (like \c{update}, \c{test}) on targets rather than
-targets themselves.
+provides support for the preparation of source distributions and defines the
+\c{dist} meta-operation. Similar to \c{configure}, \c{dist} is a
+meta-operation rather than an operation because, conceptually, we are
+preparing a distribution for performing operations (like \c{update}, \c{test})
+on targets rather than targets themselves.
The preparation of a correct distribution requires that all the necessary
project files (sources, documentation, etc) be listed as prerequisites in the
@@ -2428,9 +2490,6 @@ from out. Here is a fragment from the \c{libhello} source directory
\
hxx{version}: in{version} $src_root/manifest
-{
- dist = true
-}
\
Our library provides the \c{version.hxx} header that the users can include to
@@ -2441,13 +2500,24 @@ minor, patch, etc) and then preprocesses the \c{in{\}} file substituting these
values (see the \l{#module-version \c{version}} module documentation for
details). The end result is an automatically maintained version header.
-One problem with auto-generated headers is that if one does not yet exist,
-then the compiler may still find it somewhere else. For example, we may have
-an older version of a library installed somewhere where the compiler searches
-for headers by default (for example, \c{/usr/local/include/}). To overcome
-this problem it is a good idea to ship pre-generated headers in our
-distributions. But since they are output targets, we have to explicitly
-request this with \c{dist=true}.
+Usually there is no need to include this header into the distribution since it
+will be automatically generated if and when necessary. However, we can if we
+need to. For example, we could be porting an existing project and its users
+could be expecting the version header to be shipped as part of the archive.
+Here is how we can achieve this:
+
+\
+hxx{version}: in{version} $src_root/manifest
+{
+ dist = true
+ clean = ($src_root != $out_root)
+}
+\
+
+Because this header is an output target, we have to explicitly request its
+distribution with \c{dist=true}. Notice that we have also disabled its
+cleaning for the in source build so that the \c{clean} operation results in a
+state identical to distributed.
\h#intro-import|Target Importation|
@@ -2563,6 +2633,16 @@ Subprojects and Amalgamations} for details on this subject).
subproject in \c{libhello}. The test imports \c{libhello} which is
automatically found as an amalgamation containing this subproject.|
+\N|To skip searching in subprojects/amalgamations and proceed directly to the
+rule-specific search (described below), specify the \c{config.import.*}
+variable with an empty value. For example:
+
+\
+$ b configure: ... config.import.libhello=
+\
+
+|
+
If the project being imported cannot be located using any of these methods,
then \c{import} falls back to the rule-specific search. That is, a rule that
matches the target may provide support for importing certain target types
@@ -2763,15 +2843,7 @@ impl_libs = # Implementation dependencies.
lib{hello}: {hxx ixx txx cxx}{** -version} hxx{version} \
$impl_libs $intf_libs
-# Include the generated version header into the distribution (so that
-# we don't pick up an installed one) and don't remove it when cleaning
-# in src (so that clean results in a state identical to distributed).
-#
hxx{version}: in{version} $src_root/manifest
-{
- dist = true
- clean = ($src_root != $out_root)
-}
# Build options.
#
@@ -3380,10 +3452,10 @@ details/ # Scope.
hxx{*}: install = false
}
-hxx{version}: # Target-specific.
+lib{hello}: # Target-specific.
{
- dist = true
- clean = ($src_root != $out_root)
+ cxx.export.poptions = \"-I$src_root\"
+ cxx.export.libs = $intf_libs
}
exe{test}: file{test.roundtrip}: # Prerequisite-specific.
@@ -3400,7 +3472,7 @@ example:
h{config}: in{config}
{
in.symbol = '@'
- in.substitution = lax
+ in.mode = lax
SYSTEM_NAME = $c.target.system
SYSTEM_PROCESSOR = $c.target.cpu
@@ -4054,7 +4126,7 @@ source subdirectory \c{buildfile} of an executable created with this option:
# Unit tests.
#
-exe{*.test}
+exe{*.test}:
{
test = true
install = false
@@ -4147,8 +4219,14 @@ specified for our source files.
\N|If you need to specify a name that does not have an extension, then end it
with a single dot. For example, for a header \c{utility} you would write
-\c{hxx{utility.\}}. If you need to specify a name with an actual trailing
-dot, then escape it with a double dot, for example, \c{hxx{utility..\}}.|
+\c{hxx{utility.\}}. If you need to specify a name with an actual trailing dot,
+then escape it with a double dot, for example, \c{hxx{utility..\}}.
+
+More generally, anywhere in a name, a double dot can be used to specify a dot
+that should not be considered the extension separator while a triple dot \-
+which should. For example, in \c{obja{foo.a.o\}} the extension is \c{.o} and
+if instead we wanted \c{.a.o} to be considered the extension, then we could
+rewrite it either as \c{obja{foo.a..o\}} or as \c{obja{foo...a.o\}}.|
The next couple of lines set target type/pattern-specific variables to treat
all unit test executables as tests that should not be installed:
@@ -4255,10 +4333,10 @@ latter is used to update generated source code (such as headers) that is
required to complete the match.|
Debugging issues in each phase requires different techniques. Let's start with
-the load phase. As mentioned in \l{#intro-lang Build Language}, \c{buildfiles}
-are processed linearly with directives executed and variables expanded as they
-are encountered. As we have already seen, to print a variable value we can use
-the \c{info} directive. For example:
+the load phase. As mentioned in \l{#intro-lang Buildfile Language},
+\c{buildfiles} are processed linearly with directives executed and variables
+expanded as they are encountered. As we have already seen, to print a variable
+value we can use the \c{info} directive. For example:
\
x = X
@@ -4388,12 +4466,12 @@ Instead of printing the entire scope, we can also print individual targets by
specifying one or more target names in \c{dump}. To make things more
interesting, let's convert our \c{hello} project to use a utility library,
similar to the unit testing setup (\l{#intro-unit-test Implementing Unit
-Testing}). We will also link to the \c{pthread} library to see an example of a
+Testing}). We will also link to the \c{dl} library to see an example of a
target-specific variable being dumped:
\
exe{hello}: libue{hello}: bin.whole = false
-exe{hello}: cxx.libs += -lpthread
+exe{hello}: cxx.libs += -ldl
libue{hello}: {hxx cxx}{**}
dump exe{hello}
@@ -4405,7 +4483,7 @@ The output will look along these lines:
buildfile:5:1: dump:
/tmp/hello/hello/exe{hello.?}:
{
- [strings] cxx.libs = -lpthread
+ [strings] cxx.libs = -ldl
}
/tmp/hello/hello/exe{hello.?}: /tmp/hello/hello/:libue{hello.?}:
{
@@ -4416,7 +4494,8 @@ buildfile:5:1: dump:
The output of \c{dump} might look familiar: in \l{#intro-dirs-scopes Output
Directories and Scopes} we've used the \c{--dump} option to print the entire
build state, which looks pretty similar. In fact, the \c{dump} directive uses
-the same mechanism but allows us to print individual scopes and targets.
+the same mechanism but allows us to print individual scopes and targets from
+within a \c{buildfile}.
There is, however, an important difference to keep in mind: \c{dump} prints
the state of a target or scope at the point in the \c{buildfile} load phase
@@ -4430,6 +4509,9 @@ a result, while the \c{dump} directive should be sufficient in most cases,
sometimes you may need to use the \c{--dump} option to examine the build state
just before rule execution.
+\N|It is possible to limit the output of \c{--dump} to specific scopes and/or
+targets with the \c{--dump-scope} and \c{--dump-target} options.|
+
Let's now move from state to behavior. As we already know, to see the
underlying commands executed by the build system we use the \c{-v} options
(which is equivalent to \c{--verbose\ 2}). Note, however, that these are
@@ -4506,6 +4588,25 @@ Higher verbosity levels result in more and more tracing statements being
printed. These include \c{buildfile} loading and parsing, prerequisite to
target resolution, as well as build system module and rule-specific logic.
+While the tracing statements can be helpful in understanding what is
+happening, they don't make it easy to see why things are happening a
+certain way. In particular, one question that is often encountered during
+build troubleshooting is which dependency chain causes matching or execution
+of a particular target. These questions can be answered with the help of
+the \c{--trace-match} and \c{--trace-execute} options. For example, if we
+want to understand what causes the update of \c{obje{hello\}} in the
+\c{hello} project above:
+
+\
+$ b -s --trace-execute 'obje{hello}'
+info: updating hello/obje{hello}
+ info: using rule cxx.compile
+ info: while updating hello/libue{hello}
+ info: while updating hello/exe{hello}
+ info: while updating dir{hello/}
+ info: while updating dir{./}
+\
+
Another useful diagnostics option is \c{--mtime-check}. When specified, the
build system performs a number of file modification time sanity checks that
can be helpful in diagnosing spurious rebuilds.
@@ -4550,15 +4651,20 @@ cross-compilation (specifically, inability to run tests).
As a result, we recommend using \i{expectation-based} configuration where your
project assumes a feature to be available if certain conditions are
-met. Examples of such conditions at the source code level include feature
-test macros, platform macros, runtime library macros, compiler macros, etc.,
-with the build system modules exposing some of the same information via
-variables to allow making similar decisions in \c{buildfiles}. Another
-alternative is to automatically adapt to missing features using more advanced
-techniques such as C++ SFINAE. And in situations where none of this is
-possible, we recommend delegating the decision to the user via a configuration
-value. Our experience with \c{build2} as well as those of other large
-cross-platform projects such as Boost show that this is a viable strategy.
+met. Examples of such conditions at the source code level include feature test
+macros, platform macros, runtime library macros, compiler macros, etc., with
+the build system modules exposing some of the same information via variables
+to allow making similar decisions in \c{buildfiles}. The standard
+pre-installed \l{https://github.com/build2/libbuild2-autoconf/ \c{autoconf}}
+build system module provides emulation of GNU \c{autoconf} using this
+approach.
+
+Another alternative is to automatically adapt to missing features using more
+advanced techniques such as C++ SFINAE. And in situations where none of this
+is possible, we recommend delegating the decision to the user via a
+configuration value. Our experience with \c{build2} as well as those of other
+large cross-platform projects such as Boost show that this is a viable
+strategy.
Having said that, \c{build2} does provide the ability to extract configuration
information from the environment (\c{$getenv()} function) or other tools
@@ -4766,13 +4872,31 @@ is user-defined, then the default value is not evaluated.
Note also that if the configuration value is not specified by the user and you
haven't provided the default, the variable will be undefined, not \c{null},
and, as a result, omitted from the persistent configuration
-(\c{build/config.build} file). However, \c{null} is a valid default value. It
-is traditionally used for \i{optional} configuration values. For example:
+(\c{build/config.build} file). In fact, unlike other variables, project
+configuration variables are by default not \i{nullable}. For example:
+
+\
+$ b configure config.libhello.fancy=[null]
+error: null value in non-nullable variable config.libhello.fancy
+\
+
+There are two ways to make \c{null} a valid value of a project configuration
+variable. Firstly, if the default value is \c{null}, then naturally the
+variable is assumed nullable. This is traditionally used for \i{optional}
+configuration values. For example:
\
config [string] config.libhello.fallback_name ?= [null]
\
+If we need a nullable configuration variable but with a non-\c{null} default
+value (or no default value at all), then we have to use the \c{null} variable
+attribute. For example:
+
+\
+config [string, null] config.libhello.fallback_name ?= \"World\"
+\
+
A common approach for representing an C/C++ enum-like value is to use
\c{string} as a type and pattern matching for validation. In fact, validation
and propagation can often be combined. For example, if our library needed to
@@ -4816,15 +4940,6 @@ if! $defined(config.libhello.database)
fail 'config.libhello.database must be specified'
\
-And if you want to also disallow \c{null} values, then the above check should
-be rewritten like this: \N{An undefined variable expands into a \c{null}
-value.}
-
-\
-if ($config.libhello.database == [null])
- fail 'config.libhello.database must be specified'
-\
-
If computing the default value is expensive or requires elaborate logic, then
the handling of a configuration variable can be broken down into two steps
along these lines:
@@ -4925,9 +5040,96 @@ $ b -v config.libhello.woptions=-Wno-extra
g++ ... -Wall -Wextra -Wno-extra -Werror ...
\
-While we have already seen some examples of how to propagate the configuration
-values to our source code, \l{#proj-config-propag Configuration Propagation}
-discusses this topic in more detail.
+If you do not plan to package your project, then the above rules are the only
+constraints you have. However, if your project is also a package, then other
+projects that use it as a dependency may have preferences and requirements
+regarding its configuration. And it becomes the job of the package manager
+(\c{bpkg}) to negotiate a suitable configuration between all the dependents of
+your project (see \l{bpkg#dep-config-negotiation Dependency Configuration
+Negotiation} for details). This can be a difficult problem to solve optimally
+in a reasonable time and to help the package manager come up with the best
+configuration quickly you should follow the below additional rules and
+recommendations for configuration of packages (but which are also generally
+good ideas):
+
+\ol|
+
+\li|Prefer \c{bool} configuration variables. For example, if your project
+ supports a fixed number of backends, then provide a \c{bool} variable to
+ enable each rather than a single variable that lists all the backends to
+ be enabled.|
+
+\li|Avoid project configuration variable dependencies, that is, where the
+ default value of one variable depends on the value of another. But if you
+ do need such a dependency, make sure it is expressed using the original
+ \c{config.<project>.*} variables rather than any intermediate/computed
+ values. For example:
+
+ \
+ # Enable Y only if X is enabled.
+ #
+ config [bool] config.hello.x ?= false
+ config [bool] config.hello.y ?= $config.libhello.x
+ \
+
+ |
+
+\li|Do not make project configuration variables conditional. In other words,
+ the set of configuration variables and their types should be a static
+ property of the project. If you do need to make a certain configuration
+ variable \"unavailable\" or \"disabled\" if certain conditions are met
+ (for example, on a certain platform or based on the value of another
+ configuration variable), then express this with a default value and/or a
+ check. For example:
+
+ \
+ windows = ($cxx.target.class == 'windows')
+
+ # Y should only be enabled if X is enabled and we are not on
+ # Windows.
+ #
+ config [bool] config.hello.x ?= false
+ config [bool] config.hello.y ?= ($config.hello.x && !$windows)
+
+ if $config.libhello.y
+ {
+ assert $config.hello.x \"Y can only be enabled if X is enabled\"
+ assert (!$windows) \"Y cannot be enabled on Windows\"
+ }
+ \
+
+ |
+
+|
+
+Additionally, if you wish to factor some \c{config} directives into a separate
+file (for example, if you have a large number of them or you would like to
+share them with subprojects) and source it from your \c{build/root.build},
+then it is recommended that you place this file into the \c{build/config/}
+subdirectory, where the package manager expects to find such files (see
+\l{bpkg#package-skeleton Package Build System Skeleton} for background). For
+example:
+
+\
+# root.build
+#
+
+...
+
+source $src_root/build/config/common.build
+\
+
+\N|If you would prefer to keep such a file in a different location (for
+example, because it contains things other than \c{config} directives), then
+you will need to manually list it in your package's \c{manifest} file, see the
+\l{bpkg#manifest-package-build-file \c{build-file}} value for details.|
+
+Another effect of the \c{config} directive is to print the configuration
+variable in the project's configuration report. This functionality is
+discussed in the following section. While we have already seen some examples
+of how to propagate the configuration values to our source code,
+\l{#proj-config-propag Configuration Propagation} discusses this topic in more
+detail.
\h#proj-config-report|Configuration Report|
@@ -5311,10 +5513,21 @@ configuration header into two, one public and installed while the other
private.|
+
\h1#attributes|Attributes|
\N{This chapter is a work in progress and is incomplete.}
+The only currently recognized target attribute is \c{rule_hint} which
+specifies the rule hint. Rule hints can be used to resolve ambiguity when
+multiple rules match the same target as well as to override an unambiguous
+match. For example, the following rule hint makes sure our executable is
+linked with the C++ compiler even though it only has C sources:
+
+\
+[rule_hint=cxx] exe{hello}: c{hello}
+\
+
\h1#name-patterns|Name Patterns|
@@ -5365,7 +5578,8 @@ Note that some wildcard characters may have special meaning in certain
contexts. For instance, \c{[} at the beginning of a value will be interpreted
as the start of the attribute list while \c{?} and \c{[} in the eval context
are part of the ternary operator and value subscript, respectively. In such
-cases the wildcard character will need to be escaped, for example:
+cases the character will need to be escaped in order to be treated as a
+wildcard, for example:
\
x = \[1-9]-foo.txt
@@ -5454,7 +5668,7 @@ exe{hello}: cxx{+{f* b*} -{foo bar}}
This is particularly useful if you would like to list the names to include or
exclude in a variable. For example, this is how we can exclude certain files
from compilation but still include them as ordinary file prerequisites (so
-that they are still included into the distribution):
+that they are still included into the source distribution):
\
exc = foo.cxx bar.cxx
@@ -5479,17 +5693,25 @@ exe{hello}: cxx{+{$inc} -{$exc}}
One common situation that calls for exclusions is auto-generated source
code. Let's say we have auto-generated command line parser in \c{options.hxx}
-and \c{options.cxx}. Because of the in-tree builds, our name pattern may or
-may not find these files. Note, however, that we cannot just include them as
-non-pattern prerequisites. We also have to exclude them from the pattern match
-since otherwise we may end up with duplicate prerequisites. As a result, this
-is how we have to handle this case provided we want to continue using patterns
-to find other, non-generated source files:
+and \c{options.cxx}. Because of the in/out of source builds, our name pattern
+may or may not find these files. Note, however, that we cannot just include
+them as non-pattern prerequisites. We also have to exclude them from the
+pattern match since otherwise we may end up with duplicate prerequisites. As a
+result, this is how we have to handle this case provided we want to continue
+using patterns to find other, non-generated source files:
\
exe{hello}: {hxx cxx}{* -options} {hxx cxx}{options}
\
+If all our auto-generated source files have a common prefix or suffix, then we
+can exclude them wholesale with a pattern. For example, if all our generated
+files end with the `-options` suffix:
+
+\
+exe{hello}: {hxx cxx}{** -**-options} {hxx cxx}{foo-options bar-options}
+\
+
If the name pattern includes an absolute directory, then the pattern match is
performed in that directory and the generated names include absolute
directories as well. Otherwise, the pattern match is performed in the
@@ -5683,12 +5905,12 @@ does not break or produce incorrect results if the environment changes.
Instead, changes to the environment are detected and affected targets are
automatically rebuilt.
-The two use-cases where hermetic configurations are really useful are when we
-need to save an environment which is not generally available (for example, an
-environment of a Visual Studio development command prompt) or when our build
-results need to exactly match the specific configuration (for example, because
-parts of the overall result have already been built and installed, as is the
-case with build system modules).|
+The two use-cases where hermetic configurations are especially useful are when
+we need to save an environment which is not generally available (for example,
+an environment of a Visual Studio development command prompt) or when our
+build results need to exactly match the specific configuration (for example,
+because parts of the overall result have already been built and installed, as
+is the case with build system modules).|
If we now examine \c{config.build}, we will see something along these lines:
@@ -5919,30 +6141,54 @@ of the Introduction, the \c{install} module defines the following standard
installation locations:
\
-name default config.* override
----- ------- -----------------
-root install.root
+name default config.install.*
+ (c.i.*) override
+---- ------- ----------------
+root c.i.root
-data_root root/ install.data_root
-exec_root root/ install.exec_root
+data_root root/ c.i.data_root
+exec_root root/ c.i.exec_root
-bin exec_root/bin/ install.bin
-sbin exec_root/sbin/ install.sbin
-lib exec_root/lib/<private>/ install.lib
-libexec exec_root/libexec/<private>/<project>/ install.libexec
-pkgconfig lib/pkgconfig/ install.pkgconfig
+bin exec_root/bin/ c.i.bin
+sbin exec_root/sbin/ c.i.sbin
+lib exec_root/lib/<private>/ c.i.lib
+libexec exec_root/libexec/<private>/<project>/ c.i.libexec
+pkgconfig lib/pkgconfig/ c.i.pkgconfig
-etc data_root/etc/ install.etc
-include data_root/include/<private>/ install.include
-share data_root/share/ install.share
-data share/<private>/<project>/ install.data
+etc data_root/etc/ c.i.etc
+include data_root/include/<private>/ c.i.include
+include_arch include/ c.i.include_arch
+share data_root/share/ c.i.share
+data share/<private>/<project>/ c.i.data
+buildfile share/build2/export/<project>/ c.i.buildfile
-doc share/doc/<private>/<project>/ install.doc
-legal doc/ install.legal
-man share/man/ install.man
-man<N> man/man<N>/ install.man<N>
+doc share/doc/<private>/<project>/ c.i.doc
+legal doc/ c.i.legal
+man share/man/ c.i.man
+man<N> man/man<N>/ c.i.man<N>
\
+The \c{include_arch} location is meant for architecture-specific files, such
+as configuration headers. By default it's the same as \c{include} but can be
+configured by the user to a different value (for example,
+\c{/usr/include/x86_64-linux-gnu/}) for platforms that support multiple
+architectures from the same installation location. This is how one would
+normally use it from a \c{buildfile}:
+
+\
+# The configuration header may contain target architecture-specific
+# information so install it into include_arch/ instead of include/.
+#
+h{*}: install = include/libhello/
+h{config}: install = include_arch/libhello/
+\
+
+The \c{buildfile} location is meant for exported buildfiles that can be
+imported by other projects. If a project contains any \c{**.build} buildfiles
+in its \c{build/export/} directory (or \c{**.build2} and \c{build2/export/} in
+the alternative naming scheme), then they are automatically installed into
+this location (recreating subdirectories).
+
The \c{<project>}, \c{<version>}, and \c{<private>} substitutions in these
\c{config.install.*} values are replaced with the project name, version, and
private subdirectory, respectively. If either is empty, then the corresponding
@@ -5961,7 +6207,9 @@ The private installation subdirectory is specified with the
directory and may include multiple components. For example:
\
-$ b install config.install.root=/usr/local/ config.install.private=hello/
+$ b install \
+ config.install.root=/usr/local/ \
+ config.install.private=hello/
\
\N|If you are relying on your system's dynamic linker defaults to
@@ -5979,6 +6227,153 @@ $ b install \
|
+
+\h#install-reloc|Relocatable Installation|
+
+A relocatable installation can be moved to a directory other than its original
+installation location. Note that the installation should be moved as a whole
+preserving the directory structure under its root (\c{config.install.root}).
+To request a relocatable installation, set the \c{config.install.relocatable}
+variable to \c{true}. For example:
+
+\
+$ b install \
+ config.install.root=/tmp/install \
+ config.install.relocatable=true
+\
+
+A relocatable installation is achieved by using paths relative to one
+filesystem entry within the installation to locate another. Some examples
+include:
+
+\ul|
+
+\li|Paths specified in \c{config.bin.rpath} are made relative using the
+\c{$ORIGIN} (Linux, BSD) or \c{@loader_path} (Mac OS) mechanisms.|
+
+\li|Paths in the generated \c{pkg-config} files are made relative to the
+\c{${pcfiledir\}} built-in variable.|
+
+\li|Paths in the generated installation manifest (\c{config.install.manifest})
+are made relative to the location of the manifest file.||
+
+While these common aspects are handled automatically, if a projects relies on
+knowing its installation location, then it will most likely need to add manual
+support for relocatable installations.
+
+As an example, consider an executable that supports loading plugins and
+requires the plugin installation directory to be embedded into the executable
+during the build. The common way to support relocatable installations for such
+cases is to embed a path relative to the executable and complete it at
+runtime, normally by resolving the executable's path and using its directory
+as a base.
+
+If you would like to always use the relative path, regardless of whether the
+installation is relocatable of not, then you can obtain the library
+installation directory relative to the executable installation directory like
+this:
+
+\
+plugin_dir = $install.resolve($install.lib, $install.bin)
+\
+
+Alternatively, if you would like to continue using absolute paths for
+non-relocatable installations, then you can use something like this:
+
+\
+plugin_dir = $install.resolve( \
+ $install.lib, \
+ ($install.relocatable ? $install.bin : [dir_path] ))
+\
+
+Finally, if you are unable to support relocatable installations, the correct
+way to handle this is to assert this fact in \c{root.build} of your project,
+for example:
+
+\
+assert (!$install.relocatable) 'relocatable installation not supported'
+\
+
+
+\h#install-filter|Installation Filtering|
+
+While project authors determine what gets installed at the \c{buildfile}
+level, the users of the project can further filter the installation using the
+\c{config.install.filter} variable.
+
+The value of this variable is a list of key-value pairs that specify the
+filesystem entries to include or exclude from the installation. For example,
+the following filters will omit installing headers and static libraries
+(notice the quoting of the wildcard).
+
+\
+$ b install config.install.filter='include/@false \"*.a\"@false'
+\
+
+The key in each pair is a file or directory path or a path wildcard pattern.
+If a key is relative and contains a directory component or is a directory,
+then it is treated relative to the corresponding \c{config.install.*}
+location. Otherwise (simple path, normally a pattern), it is matched against
+the leaf of any path. Note that if an absolute path is specified, it should be
+without the \c{config.install.chroot} prefix.
+
+The value in each pair is either \c{true} (include) or \c{false} (exclude).
+The filters are evaluated in the order specified and the first match that is
+found determines the outcome. If no match is found, the default is to
+include. For a directory, while \c{false} means exclude all the sub-paths
+inside this directory, \c{true} does not mean that all the sub-paths will be
+included wholesale. Rather, the matched component of the sub-path is treated
+as included with the rest of the components matched against the following
+sub-filters. For example:
+
+\
+$ b install config.install.filter='
+ include/x86_64-linux-gnu/@true
+ include/x86_64-linux-gnu/details/@false
+ include/@false'
+\
+
+The \c{true} or \c{false} value may be followed by comma and the \c{symlink}
+modifier to only apply to symlink filesystem entries. For example:
+
+\
+$ b config.install.filter='\"*.so\"@false,symlink'
+\
+
+A filter can be negated by specifying \c{!} as the first pair. For example:
+
+\
+$ b install config.install.filter='! include/@false \"*.a\"@false'
+\
+
+Note that the filtering mechanism only affects what gets physically copied to
+the installation directory without affecting what gets built for install or
+the view of what gets installed at the \c{buildfile} level. For example, given
+the \c{include/@false *.a@false} filters, static libraries will still be built
+(unless arranged not to with \c{config.bin.lib}) and the \c{pkg-config} files
+will still end up with \c{-I} options pointing to the header installation
+directory. Note also that this mechanism applies to both \c{install} and
+\c{uninstall} operations.
+
+\N|If you are familiar with the Debian or Fedora packaging, this mechanism is
+somewhat similar to (and can be used for a similar purpose as) the Debian's
+\c{.install} files and Fedora's \c{%files} spec file sections, which are used
+to split the installation into multiple binary packages.|
+
+As another example, the following filters will omit all the
+development-related files (headers, \c{pkg-config} files, static libraries,
+and shared library symlinks; assuming the platform uses the \c{.a}/\c{.so}
+extensions for the libraries):
+
+\
+$ b install config.install.filter='
+ include/@false
+ pkgconfig/@false
+ \"lib/*.a\"@false
+ \"lib/*.so\"@false,symlink'
+\
+
+
\h1#module-version|\c{version} Module|
A project can use any version format as long as it meets the package version
@@ -6249,7 +6644,7 @@ just not ordered correctly. As a result, we feel that the risks are justified
when the only alternative is manual version management (which is always an
option, nevertheless).
-When we prepare a distribution of a snapshot, the \c{version} module
+When we prepare a source distribution of a snapshot, the \c{version} module
automatically adjusts the package name to include the snapshot information as
well as patches the manifest file in the distribution with the snapshot number
and id (that is, replacing \c{.z} in the version value with the actual
@@ -6280,12 +6675,9 @@ for our \c{libhello} library. To accomplish this we add the \c{version.hxx.in}
template as well as something along these lines to our \c{buildfile}:
\
-lib{hello}: ... hxx{version}
+lib{hello}: {hxx cxx}{** -version} hxx{version}
hxx{version}: in{version} $src_root/file{manifest}
-{
- dist = true
-}
\
The header rule is a line-based preprocessor that substitutes fragments
@@ -6459,12 +6851,12 @@ config.c
config.cxx
cc.id
- c.target
- c.target.cpu
- c.target.vendor
- c.target.system
- c.target.version
- c.target.class
+ cc.target
+ cc.target.cpu
+ cc.target.vendor
+ cc.target.system
+ cc.target.version
+ cc.target.class
config.cc.poptions
cc.poptions
@@ -6651,7 +7043,7 @@ symbols for all the Windows targets/compilers using the following arrangement
\
lib{foo}: libul{foo}: {hxx cxx}{**} ...
-lib{foo}: def{foo}: include = ($cxx.target.system == 'win32-msvc')
+libs{foo}: def{foo}: include = ($cxx.target.system == 'win32-msvc')
def{foo}: libul{foo}
if ($cxx.target.system == 'mingw32')
@@ -6661,6 +7053,9 @@ if ($cxx.target.system == 'mingw32')
That is, we use the \c{.def} file approach for MSVC (including when building
with Clang) and the built-in support (\c{--export-all-symbols}) for MinGW.
+\N|You will likely also want to add the generated \c{.def} file (or the
+blanket \c{*.def}) to your \c{.gitignore} file.|
+
Note that it is also possible to use the \c{.def} file approach for MinGW. In
this case we need to explicitly load the \c{bin.def} module (which should be
done after loading \c{c} or \c{cxx}) and can use the following arrangement:
@@ -6677,7 +7072,7 @@ if ($cxx.target.class == 'windows')
\
lib{foo}: libul{foo}: {hxx cxx}{**} ...
-lib{foo}: def{foo}: include = ($cxx.target.class == 'windows')
+libs{foo}: def{foo}: include = ($cxx.target.class == 'windows')
def{foo}: libul{foo}
\
@@ -6917,6 +7312,119 @@ config.c.internal.scope
\
+\h#c-objc|Objective-C Compilation|
+
+The \c{c} module provides the \c{c.objc} submodules which can be loaded in
+order to register the \c{m{\}} target type and enable Objective-C compilation
+in the \c{C} compile rule. Note that \c{c.objc} must be loaded after the \c{c}
+module and while the \c{m{\}} target type is registered unconditionally,
+compilation is only enabled if the C compiler supports Objective-C for the
+target platform. Typical usage:
+
+\
+# root.build
+#
+using c
+using c.objc
+\
+
+\
+# buildfile
+#
+lib{hello}: {h c}{*}
+lib{hello}: m{*}: include = ($c.target.class == 'macos')
+\
+
+Note also that while there is support for linking Objective-C executables and
+libraries, this is done using the C compiler driver and no attempt is made to
+automatically link any necessary Objective-C runtime library (such as
+\c{-lobjc}).
+
+
+\h#c-as-cpp|Assembler with C Preprocessor Compilation|
+
+The \c{c} module provides the \c{c.as-cpp} submodules which can be loaded in
+order to register the \c{S{\}} target type and enable Assembler with C
+Preprocessor compilation in the \c{C} compile rule. Note that \c{c.as-cpp}
+must be loaded after the \c{c} module and while the \c{S{\}} target type is
+registered unconditionally, compilation is only enabled if the C compiler
+supports Assembler with C Preprocessor compilation.
+
+Typical usage:
+
+\
+# root.build
+#
+using c
+using c.as-cpp
+\
+
+\
+# buildfile
+#
+exe{hello}: {h c}{* -hello.c}
+
+# Use C implementation as a fallback if no assembler.
+#
+assembler = ($c.class == 'gcc' && $c.target.cpu == 'x86_64')
+
+exe{hello}: S{hello}: include = $assembler
+exe{hello}: c{hello}: include = (!$assembler)
+\
+
+\
+/* hello.S
+ */
+#ifndef HELLO_RESULT
+# define HELLO_RESULT 0
+#endif
+
+text
+
+.global hello
+hello:
+ /* ... */
+ movq $HELLO_RESULT, %rax
+ ret
+
+#ifdef __ELF__
+.section .note.GNU-stack, \"\", @progbits
+#endif
+\
+
+The default file extension for the \c{S{\}} target type is \c{.S} (capital)
+but that can be customized using the standard mechanisms. For example:
+
+\
+# root.build
+#
+using c
+using c.as-cpp
+
+h{*}: extension = h
+c{*}: extension = c
+S{*}: extension = sx
+\
+
+Note that \c{*.coptions} are passed to the C compiler when compiling Assembler
+with C Preprocessor files because compile options may cause additional
+preprocessor macros to be defined. Plus, some of them (such as \c{-g}) are
+passed (potentially translated) to the underlying assembler. To pass
+additional options when compiling Assembler files use \c{c.poptions} and
+\c{c.coptions}. For example (continuing with the previous example):
+
+\
+if $assembler
+{
+ obj{hello}:
+ {
+ c.poptions += -DHELLO_RESULT=1
+ c.coptions += -Wa,--no-pad-sections
+ }
+}
+\
+
+
\h1#module-cxx|\c{cxx} Module|
\N{This chapter is a work in progress and is incomplete.}
@@ -7689,7 +8197,7 @@ header-like search mechanism (\c{-I} paths, etc.), an explicit list of
exported modules is provided for each library in its \c{.pc} (\c{pkg-config})
file.
-Specifically, the library's \c{.pc} file contains the \c{cxx_modules} variable
+Specifically, the library's \c{.pc} file contains the \c{cxx.modules} variable
that lists all the exported C++ modules in the \c{<name>=<path>} form with
\c{<name>} being the module's C++ name and \c{<path>} \- the module interface
file's absolute path. For example:
@@ -7700,15 +8208,15 @@ Version: 1.0.0
Cflags:
Libs: -L/usr/lib -lhello
-cxx_modules = hello.core=/usr/include/hello/core.mxx hello.extra=/usr/include/hello/extra.mxx
+cxx.modules = hello.core=/usr/include/hello/core.mxx hello.extra=/usr/include/hello/extra.mxx
\
Additional module properties are specified with variables in the
-\c{cxx_module_<property>.<name>} form, for example:
+\c{cxx.module_<property>.<name>} form, for example:
\
-cxx_module_symexport.hello.core = true
-cxx_module_preprocessed.hello.core = all
+cxx.module_symexport.hello.core = true
+cxx.module_preprocessed.hello.core = all
\
Currently, two properties are defined. The \c{symexport} property with the
@@ -8569,6 +9077,34 @@ macros may not be needed by all consumers. This way we can also keep the
header macro-only which means it can be included freely, in or out of module
purviews.
+\h#cxx-objcxx|Objective-C++ Compilation|
+
+The \c{cxx} module provides the \c{cxx.objcxx} submodules which can be loaded
+in order to register the \c{mm{\}} target type and enable Objective-C++
+compilation in the \c{C++} compile rule. Note that \c{cxx.objcxx} must be
+loaded after the \c{cxx} module and while the \c{mm{\}} target type is
+registered unconditionally, compilation is only enabled if the C++ compiler
+supports Objective-C++ for the target platform. Typical usage:
+
+\
+# root.build
+#
+using cxx
+using cxx.objcxx
+\
+
+\
+# buildfile
+#
+lib{hello}: {hxx cxx}{*}
+lib{hello}: mm{*}: include = ($cxx.target.class == 'macos')
+\
+
+Note also that while there is support for linking Objective-C++ executables
+and libraries, this is done using the C++ compiler driver and no attempt is
+made to automatically link any necessary Objective-C runtime library (such as
+\c{-lobjc}).
+
\h1#module-in|\c{in} Module|
@@ -8636,13 +9172,13 @@ symbol is expected to start a substitution with unresolved (to a variable
value) names treated as errors. The double substitution symbol (for example,
\c{$$}) serves as an escape sequence.
-The substitution mode can be relaxed using the \c{in.substitution} variable.
-Its valid values are \c{strict} (default) and \c{lax}. In the lax mode a pair
-of substitution symbols is only treated as a substitution if what's between
-them looks like a build system variable name (that is, it doesn't contain
-spaces, etc). Everything else, including unterminated substitution symbols, is
-copied as is. Note also that in this mode the double substitution symbol is
-not treated as an escape sequence.
+The substitution mode can be relaxed using the \c{in.mode} variable. Its
+valid values are \c{strict} (default) and \c{lax}. In the lax mode a pair of
+substitution symbols is only treated as a substitution if what's between them
+looks like a build system variable name (that is, it doesn't contain spaces,
+etc). Everything else, including unterminated substitution symbols, is copied
+as is. Note also that in this mode the double substitution symbol is not
+treated as an escape sequence.
The lax mode is mostly useful when trying to reuse existing \c{.in} files from
other build systems, such as \c{autoconf}. Note, however, that the lax mode is
@@ -8655,7 +9191,7 @@ substitutions as is. For example:
h{config}: in{config} # config.h.in
{
in.symbol = '@'
- in.substitution = lax
+ in.mode = lax
CMAKE_SYSTEM_NAME = $c.target.system
CMAKE_SYSTEM_PROCESSOR = $c.target.cpu
@@ -8669,6 +9205,42 @@ target-specific variables. Typed variable values are converted to string
using the corresponding \c{builtin.string()} function overload before
substitution.
+While specifying substitution values as \c{buildfile} variables is usually
+natural, sometimes this may not be possible or convenient. Specifically, we
+may have substitution names that cannot be specified as \c{buildfile}
+variables, for example, because they start with an underscore (and are thus
+reserved) or because they refer to one of the predefined variables. Also, we
+may need to have different groups of substitution values for different cases,
+for example, for different platforms, and it would be convenient to pass such
+groups around as a single value.
+
+To support these requirements the substitution values can alternatively be
+specified as key-value pairs in the \c{in.substitutions} variable. Note that
+entries in this substitution map take precedence over the \c{buildfile}
+variables. For example:
+
+\
+/* config.h.in */
+
+#define _GNU_SOURCE @_GNU_SOURCE@
+#define _POSIX_SOURCE @_POSIX_SOURCE@
+\
+
+\
+# buildfile
+
+h{config}: in{config}
+{
+ in.symbol = '@'
+ in.mode = lax
+
+ in.substitutions = _GNU_SOURCE@0 _POSIX_SOURCE@1
+}
+\
+
+\N|In the above example, the \c{@} characters in \c{in.symbol} and
+\c{in.substitutions} are unrelated.|
+
Using an undefined variable in a substitution is an error. Using a \c{null}
value in a substitution is also an error unless the fallback value is
specified with the \c{in.null} variable. For example:
@@ -8682,11 +9254,21 @@ h{config}: in{config}
}
\
-A number of other build system modules, for example, \l{#module-version
-\c{version}} and \l{#module-bash \c{bash}}, are based on the \c{in} module and
-provide extended functionality. The \c{in} preprocessing rule matches any
-\c{file{\}}-based target that has the corresponding \c{in{\}} prerequisite
-provided none of the extended rules match.
+\N|To specify a \c{null} value using the \c{in.substitutions} mechanism omit
+the value, for example:
+
+\
+in.substitutions = _GNU_SOURCE
+\
+
+|
+
+A number of other build system modules, for example,
+\l{https://github.com/build2/libbuild2-autoconf/ \c{autoconf}},
+\l{#module-version \c{version}}, and \l{#module-bash \c{bash}}, are based on
+the \c{in} module and provide extended functionality. The \c{in} preprocessing
+rule matches any \c{file{\}}-based target that has the corresponding \c{in{\}}
+prerequisite provided none of the extended rules match.
\h1#module-bash|\c{bash} Module|
@@ -8739,11 +9321,12 @@ buildfiles.
The \c{say-hello.bash} module is \i{imported} by the \c{hello} script with the
\c{@import\ hello/say-hello@} substitution. The \i{import path}
-(\c{hello/say-hello} in our case) is a relative path to the module file within
-the project. Its first component (\c{hello} in our case) must be the project
-base name and the \c{.bash} module extension can be omitted. \N{The constraint
-placed on the first component of the import path is required to implement
-importation of installed modules, as discussed below.}
+(\c{hello/say-hello} in our case) is a path to the module file within the
+project. Its first component (\c{hello} in our case) must be both the project
+name and the top-level subdirectory within the project. The \c{.bash} module
+extension can be omitted. \N{The constraint placed on the first component of
+the import path is required to implement importation of installed modules, as
+discussed below.}
During preprocessing, the import substitution will be replaced with a
\c{source} builtin call and the import path resolved to one of the \c{bash{\}}
@@ -8762,11 +9345,12 @@ OS. The script, however, can provide a suitable implementation as a function.
See the \c{bash} module tests for a sample implementation of such a function.|
By default, \c{bash} modules are installed into a subdirectory of the \c{bin/}
-installation directory named as the project base name. For instance, in the
-above example, the script will be installed as \c{bin/hello} and the module as
-\c{bin/hello/say-hello.bash} with the script sourcing the module relative to
-the \c{bin/} directory. Note that currently it is assumed the script and all
-its modules are installed into the same \c{bin/} directory.
+installation directory named as the project name plus the \c{.bash} extension.
+For instance, in the above example, the script will be installed as
+\c{bin/hello} and the module as \c{bin/hello.bash/say-hello.bash} with the
+script sourcing the module relative to the \c{bin/} directory. Note that
+currently it is assumed the script and all its modules are installed into the
+same \c{bin/} directory.
Naturally, modules can import other modules and modules can be packaged into
\i{module libraries} and imported using the standard build system import
@@ -8833,8 +9417,9 @@ for example, \c{libhello}. If there is also a native library (that is, one
written in C/C++) that provides the same functionality (or the \c{bash}
library is a language binding for said library), then it is customary to add
the \c{.bash} extension to the \c{bash} library name, for example,
-\c{libhello.bash}. Note that in this case the project base name is
-\c{libhello}.
+\c{libhello.bash}. Note that in this case the top-level subdirectory within
+the project is expected to be called without the \c{bash} extension,
+for example, \c{libhello}.
Modules can be \i{private} or \i{public}. Private modules are implementation
details of a specific project and are not expected to be imported from other
@@ -8881,4 +9466,498 @@ corresponding \c{in{\}} and one or more \c{bash{\}} prerequisites as well as
\c{bash{\}} targets that have the corresponding \c{in{\}} prerequisite (if you
need to preprocess a script that does not depend on any modules, you can use
the \c{in} module's rule).
+
+
+\h1#json-dump|Appendix A \- JSON Dump Format|
+
+This appendix describes the machine-readable, JSON-based build system state
+dump format that can be requested with the \c{--dump-format=json-v0.1} build
+system driver option (see \l{b(1)} for details).
+
+The format is specified in terms of the serialized representation of C++
+\c{struct} instances. See \l{b.xhtml#json-output JSON OUTPUT} for details on
+the overall properties of this format and the semantics of the \c{struct}
+serialization.
+
+\N|This format is currently unstable (thus the temporary \c{-v0.1} suffix)
+and may be changed in ways other than as described in \l{b.xhtml#json-output
+JSON OUTPUT}. In case of such changes the format version will be incremented
+to allow detecting incompatibilities but no support for older versions is
+guaranteed.|
+
+The build system state can be dumped after the load phase (\c{--dump=load}),
+once the build state has been loaded, and/or after the match phase
+(\c{--dump=match}), after rules have been matched to targets to execute the
+desired action. The JSON format differs depending on after which phase it is
+produced. After the load phase the format aims to describe the
+action-independent state, essentially as specified in the \c{buildfiles}.
+While after the match phase it aims to describe the state for executing the
+specified action, as determined by the rules that have been matched. The
+former state would be more appropriate, for example, for an IDE that tries to
+use \c{buildfiles} as project files. While the latter state could be used to
+determine the actual build graph for a certain action, for example, in order
+to infer which executable targets are considered tests by the \c{test}
+operation.
+
+While it's possible to dump the build state as a byproduct of executing an
+action (for example, performing an update), it's often desirable to only dump
+the build state and do it as quickly as possible. For such cases the
+recommended option combinations are as follows (see the \c{--load-only} and
+\c{--match-only} documentation for details):
+
+\
+$ b --load-only --dump=load --dump-format=json-v0.1 .../dir/
+
+$ b --match-only --dump=match --dump-format=json-v0.1 .../dir/
+$ b --match-only --dump=match --dump-format=json-v0.1 .../dir/type{name}
+\
+
+\N|Note that a match dump for a large project can produce a large amount of
+data, especially for the \c{update} operation (tens and even hundreds of
+megabytes is not uncommon). To reduce this size it is possible to limit the
+dump to specific scopes and/or targets with the \c{--dump-scope} and
+\c{--dump-target} options.|
+
+The complete dump (that is, not of a specific scope or target) is a tree of
+nested scope objects (see \l{#intro-dirs-scopes Output Directories and Scopes}
+for background). The scope object has the serialized representation of the
+following C++ \c{struct} \c{scope}. It is the same for both load and match
+dumps except for the type of the \c{targets} member:
+
+\
+struct scope
+{
+ string out_path;
+ optional<string> src_path;
+
+ vector<variable> variables; // Non-type/pattern scope variables.
+
+ vector<scope> scopes; // Immediate children.
+
+ vector<loaded_target|matched_target> targets;
+};
+\
+
+For example (parts of the output are omitted for brevity):
+
+\N|The actual output is produced unindented to reduce the size.|
+
+\
+$ cd /tmp
+$ bdep new hello
+$ cd hello
+$ bdep new -C @gcc cc
+$ b --load-only --dump=load --dump-format=json-v0.1
+{
+ \"out_path\": \"\",
+ \"variables\": [ ... ],
+ \"scopes\": [
+ {
+ \"out_path\": \"/tmp/hello-gcc\",
+ \"variables\": [ ... ],
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello\",
+ \"variables\": [ ... ],
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"variables\": [ ... ],
+ \"targets\": [ ... ]
+ }
+ ],
+ \"targets\": [ ... ]
+ }
+ ],
+ \"targets\": [ ... ]
+ }
+ ]
+}
+\
+
+The \c{out_path} member is relative to the parent scope. It is empty for the
+special global scope, which is the root of the tree. The \c{src_path} member
+is absent if it is the same as \c{out_path} (in source build or scope outside
+of project).
+
+\N|For the match dump, targets that have not been matched for the specified
+action are omitted.|
+
+In the load dump, the target object has the serialized representation of the
+following C++ \c{struct} \c{loaded_target}:
+
+\
+struct loaded_target
+{
+ string name; // Relative quoted/qualified name.
+ string display_name; // Relative display name.
+ string type; // Target type.
+ optional<string> group; // Absolute quoted/qualified group target.
+
+ vector<variable> variables; // Target variables.
+
+ vector<prerequisite> prerequisites;
+};
+\
+
+For example (continuing with the previous \c{hello} setup):
+
+\
+{
+ \"out_path\": \"\",
+ \"scopes\": [
+ {
+ \"out_path\": \"/tmp/hello-gcc\",
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello\",
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"targets\": [
+ {
+ \"name\": \"exe{hello}\",
+ \"display_name\": \"exe{hello}\",
+ \"type\": \"exe\",
+ \"prerequisites\": [
+ {
+ \"name\": \"cxx{hello}\",
+ \"type\": \"cxx\"
+ },
+ {
+ \"name\": \"testscript{testscript}\",
+ \"type\": \"testscript\"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
+\
+
+The target \c{name} member is the target name that is qualified with the
+extension (if applicable and known) and, if required, is quoted so that it can
+be passed back to the build system driver on the command line. The
+\c{display_name} member is unqualified and unquoted. Note that both the target
+\c{name} and \c{display_name} members are normally relative to the containing
+scope (if any).
+
+The prerequisite object has the serialized representation of the following C++
+\c{struct} \c{prerequisite}:
+
+\
+struct prerequisite
+{
+ string name; // Quoted/qualified name.
+ string type;
+ vector<variable> variables; // Prerequisite variables.
+};
+\
+
+The prerequisite \c{name} member is normally relative to the containing scope.
+
+In the match dump, the target object has the serialized representation of the
+following C++ \c{struct} \c{matched_target}:
+
+\
+struct matched_target
+{
+ string name;
+ string display_name;
+ string type;
+ optional<string> group;
+
+ optional<path> path; // Absent if not path target, not assigned.
+
+ vector<variable> variables;
+
+ optional<operation_state> outer_operation; // null if not matched.
+ operation_state inner_operation; // null if not matched.
+};
+\
+
+For example (outer scopes removed for brevity):
+
+\
+$ b --match-only --dump=match --dump-format=json-v0.1
+{
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"targets\": [
+ {
+ \"name\": \"/tmp/hello/hello/cxx{hello.cxx}@./\",
+ \"display_name\": \"/tmp/hello/hello/cxx{hello}@./\",
+ \"type\": \"cxx\",
+ \"path\": \"/tmp/hello/hello/hello.cxx\",
+ \"inner_operation\": {
+ \"rule\": \"build.file\",
+ \"state\": \"unchanged\"
+ }
+ },
+ {
+ \"name\": \"obje{hello.o}\",
+ \"display_name\": \"obje{hello}\",
+ \"type\": \"obje\",
+ \"group\": \"/tmp/hello-gcc/hello/hello/obj{hello}\",
+ \"path\": \"/tmp/hello-gcc/hello/hello/hello.o\",
+ \"inner_operation\": {
+ \"rule\": \"cxx.compile\",
+ \"prerequisite_targets\": [
+ {
+ \"name\": \"/tmp/hello/hello/cxx{hello.cxx}@./\",
+ \"type\": \"cxx\"
+ },
+ {
+ \"name\": \"/usr/include/c++/12/h{iostream.}\",
+ \"type\": \"h\"
+ },
+ ...
+ ]
+ }
+ },
+ {
+ \"name\": \"exe{hello.}\",
+ \"display_name\": \"exe{hello}\",
+ \"type\": \"exe\",
+ \"path\": \"/tmp/hello-gcc/hello/hello/hello\",
+ \"inner_operation\": {
+ \"rule\": \"cxx.link\",
+ \"prerequisite_targets\": [
+ {
+ \"name\": \"/tmp/hello-gcc/hello/hello/obje{hello.o}\",
+ \"type\": \"obje\"
+ }
+ ]
+ }
+ }
+ ]
+}
+\
+
+The first four members in \c{matched_target} have the same semantics as in
+\c{loaded_target}.
+
+The \c{outer_operation} member is only present if the action has an outer
+operation. For example, when performing \c{update-for-test}, \c{test} is the
+outer operation while \c{update} is the inner operation.
+
+The operation state object has the serialized representation of the following
+C++ \c{struct} \c{operation_state}:
+
+\
+struct operation_state
+{
+ string rule; // null if direct recipe match.
+
+ optional<string> state; // One of unchanged|changed|group.
+
+ vector<variable> variables; // Rule variables.
+
+ vector<prerequisite_target> prerequisite_targets;
+};
+\
+
+The \c{rule} member is the matched rule name. The \c{state} member is the
+target state, if known after match. The \c{prerequisite_targets} array is a
+subset of prerequisites resolved to targets that are in effect for this
+action. The matched rule may add additional targets, for example, dynamically
+extracted additional dependencies, like \c{/usr/include/c++/12/h{iostream.\}}
+in the above listing.
+
+The prerequisite target object has the serialized representation of the
+following C++ \c{struct} \c{prerequisite_target}:
+
+\
+struct prerequisite_target
+{
+ string name; // Absolute quoted/qualified target name.
+ string type;
+ bool adhoc;
+};
+\
+
+The \c{variables} array in the scope, target, prerequisite, and prerequisite
+target objects contains scope, target, prerequisite, and rule variables,
+respectively.
+
+The variable object has the serialized representation of the following C++
+\c{struct} \c{variable}:
+
+\
+struct variable
+{
+ string name;
+ optional<string> type;
+ json_value value; // null|boolean|number|string|object|array
+};
+\
+
+For example:
+
+\
+{
+ \"out_path\": \"\",
+ \"variables\": [
+ {
+ \"name\": \"build.show_progress\",
+ \"type\": \"bool\",
+ \"value\": true
+ },
+ {
+ \"name\": \"build.verbosity\",
+ \"type\": \"uint64\",
+ \"value\": 1
+ },
+ ...
+ ],
+ \"scopes\": [
+ {
+ \"out_path\": \"/tmp/hello-gcc\",
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello\",
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"variables\": [
+ {
+ \"name\": \"out_base\",
+ \"type\": \"dir_path\",
+ \"value\": \"/tmp/hello-gcc/hello/hello\"
+ },
+ {
+ \"name\": \"src_base\",
+ \"type\": \"dir_path\",
+ \"value\": \"/tmp/hello/hello\"
+ },
+ {
+ \"name\": \"cxx.poptions\",
+ \"type\": \"strings\",
+ \"value\": [
+ \"-I/tmp/hello-gcc/hello\",
+ \"-I/tmp/hello\"
+ ]
+ },
+ {
+ \"name\": \"libs\",
+ \"value\": \"/tmp/hello-gcc/libhello/libhello/lib{hello}\"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
+\
+
+The \c{type} member is absent if the variable value is untyped.
+
+The \c{value} member contains the variable value in a suitable JSON
+representation. Specifically:
+
+\ul|
+
+\li|\c{null} values are represented as JSON \c{null}.|
+
+\li|\c{bool} values are represented as JSON \c{boolean}.|
+
+\li|\c{int64} and \c{uint64} values are represented as JSON \c{number}.|
+
+\li|\c{string}, \c{path}, \c{dir_path} values are represented as JSON
+ \c{string}.|
+
+\li|Untyped simple name values are represented as JSON \c{string}.|
+
+\li|Pairs of above values are represented as JSON objects with the \c{first}
+ and \c{second} members corresponding to the pair elements.|
+
+\li|Untyped complex name values are serialized as target names and represented
+ as JSON \c{string}.|
+
+\li|Containers of above values are represented as JSON arrays corresponding to
+ the container elements.|
+
+\li|An empty value is represented as an empty JSON object if it's a typed
+ pair, as an empty JSON array if it's a typed container or is untyped, and
+ as an empty string otherwise.||
+
+One expected use-case for the match dump is to determine the set of targets
+for which a given action is applicable. For example, we may want to determine
+all the executables in a project that can be tested with the \c{test}
+operation in order to present this list to the user in an IDE plugin or
+some such. To further illuminate the problem, consider the following
+\c{buildfile} which declares a number of executable targets, some are
+tests and some are not:
+
+\
+exe{hello1}: ... testscript # Test because of testscript prerequisite.
+
+exe{hello2}: test = true # Test because of test=true.
+
+exe{hello3}: ... testscript # Not a test because of test=false.
+{
+ test = false
+}
+\
+
+As can be seen, trying to infer this information is not straightforward and
+doing so manually by examining prerequisites, variables, etc., while possible,
+will be complex and likely brittle. Instead, the recommended approach is to
+use the match dump and base the decision on the \c{state} target object
+member. Specifically, a rule which matched the target but determined that
+nothing needs to be done for this target, returns the special \c{noop}
+recipe. The \c{build2} core recognizes this situation and sets such target's
+state to \c{unchanged} during match. Here is what the match dump will look
+like for the above three executables:
+
+\
+$ b --match-only --dump=match --dump-format=json-v0.1 test
+{
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"targets\": [
+ {
+ \"name\": \"exe{hello1.}\",
+ \"display_name\": \"exe{hello1}\",
+ \"type\": \"exe\",
+ \"path\": \"/tmp/hello-gcc/hello/hello/hello1\",
+ \"inner_operation\": {
+ \"rule\": \"test\"
+ }
+ },
+ {
+ \"name\": \"exe{hello2.}\",
+ \"display_name\": \"exe{hello2}\",
+ \"type\": \"exe\",
+ \"path\": \"/tmp/hello-gcc/hello/hello/hello2\",
+ \"inner_operation\": {
+ \"rule\": \"test\"
+ }
+ },
+ {
+ \"name\": \"exe{hello3}\",
+ \"display_name\": \"exe{hello3}\",
+ \"type\": \"exe\",
+ \"inner_operation\": {
+ \"rule\": \"test\",
+ \"state\": \"unchanged\"
+ }
+ }
+ ]
+}
+\
+
"
diff --git a/doc/testscript.cli b/doc/testscript.cli
index 50f975d..20d9c2d 100644
--- a/doc/testscript.cli
+++ b/doc/testscript.cli
@@ -622,15 +622,27 @@ By convention, the testscript file should be called either \c{testscript} if
you only have one or have the \c{.testscript} extension, for example,
\c{basics.testscript}. The \c{test} module registers the \c{testscript{\}}
target type to be used for testscript files. We don't have to use explicit
-target type for the \c{testscript} file.
+target type for the \c{testscript} file. For example:
+
+\
+exe{hello}: testscript{basics advanced}
+\
A testscript prerequisite can be specified for any target. For example, if
-our directory contains a bunch of shell scripts that we want to test together,
+our directory contains a bunch of executables that we want to test together,
then it makes sense to specify the testscript prerequisite for the directory
target:
\
-./: testscript{basics}
+./: testscript
+\
+
+Similarly, the same testscript can be used to test multiple targets. For
+example:
+
+\
+exe{hello}: testscript{basics advanced}
+exe{hello-lite}: testscript{basics}
\
During variable lookup if a variable is not found in one of the testscript
@@ -741,6 +753,68 @@ Note also that these \c{test.*} variables only establish a convention. You
could also put everything into, say \c{test.arguments}, and it will still work
as expected.
+\N|The \c{test.redirects}, \c{test.cleanups}, and \c{$*} variables are of the
+special \c{cmdline} type, see \l{#lexical Lexical Structure} for details.|
+
+The special \c{test.*} variables make it fairly easy to arrange the testing of
+a single executable. What if we need to run multiple executables from a single
+testscript file? For example, we may have a pair of executables, such as
+\c{reader} and \c{writer}, that must be tested together. Or we may have a
+number of test executables that all require a common setup, for example,
+cryptographic key generation, which we would like not to repeating for each
+test. While it is possible to achieve this with target-specific variables
+similar to \c{test}, things will be less automatic. In particular, there
+will be no automatic translation of target names to paths and we will have
+to do it manually. For example:
+
+\
+# buildfile
+
+./: exe{reader}: cxx{reader} ...
+./: exe{writer}: cxx{writer} ...
+
+./: testscript
+{
+ reader = exe{reader}
+ writer = exe{writer}
+}
+\
+
+\
+# testscript
+
+# Translate targets to paths.
+#
+reader = $path($reader)
+writer = $path($writer)
+
+: pipe
+:
+$writer | $reader
+
+: file
+:
+$writer output;
+$reader output
+\
+
+\N|Strictly speaking, for local executables, there is no need to pass the
+target names from \c{buildfile} to \c{testscript} and instead we could just
+list them literally in \c{testscript}. In particular, this could be an
+attractive approach if we have a large number of such executables. For
+example:
+
+\
+# testscript
+
+$path(exe{test1}) : test1
+$path(exe{test2}) : test2
+$path(exe{test3}) : test3
+...
+\
+
+|
+
Another pre-defined variable is \c{test.target}. It is used to specify the
test target platform when cross-testing (for example, when running Windows
test on Linux under Wine). Normally, you would set it in your
@@ -964,6 +1038,9 @@ the teardown commands are executed sequentially and in the order specified.
Again, if any of them fail, the group execution is terminated and the group is
considered to have failed.
+\N|Currently, the only way to run several executables serially is to place
+them into a single compound test. See \l{#syntax-test Test} for details.|
+
As an example, consider the following version of \c{basics.testscript}:
\
@@ -1285,62 +1362,54 @@ here-document single-quoted here_line_single
here-document double-quoted here_line_double expansions
\
-Finally, unquoted expansions in command lines (test, setup, and teardown) are
-re-lexed in the \c{command_expansion} mode in order to recognize command line
-syntax tokens (redirects, pipes, etc). To illustrate why this re-lexing is
-necessary, consider the following example of a \"canned\" command line:
+Finally, unquoted expansions in command lines (test, setup, and teardown) of
+the special \c{cmdline} type are re-lexed in the \c{command_expansion} mode in
+order to recognize command line syntax tokens (redirects, pipes, etc). To
+illustrate this mechanism, consider the following example of a \"canned\"
+command line:
\
-x = echo >-
-$x foo
+cmd = [cmdline] echo >-
+$cmd foo
\
-The test command line token sequence will be \c{$}, \c{x}, \c{foo}. After the
-expansion we have \c{echo}, \c{>-}, \c{foo}, however, the second element
-(\c{>-}) is not (yet) recognized as a redirect. To recognize it we re-lex
-the result of the expansion.
+The test command line token sequence will be \c{$}, \c{cmd}, \c{foo}. After
+the expansion we have \c{echo}, \c{>-}, \c{foo}, however, the second element
+(\c{>-}) is not (yet) recognized as a redirect. To recognize it, the result of
+the expansion is re-lexed.
Note that besides the few command line syntax characters, re-lexing will also
\"consume\" quotes and escapes, for example:
\
-args = \"'foo'\" # 'foo'
-echo $args # echo foo
+cmd = [cmdline] echo \"'foo'\" # echo 'foo'
+$cmd # echo foo
\
To preserve quotes in this context we need to escape them:
\
-args = \"\\\\'foo\\\\'\" # \'foo\'
-echo $args # echo 'foo'
-\
-
-Alternatively, for a single value, we could quote the expansion (in order
-to suppress re-lexing; note, however, that quoting will also inhibit
-word-splitting):
-
-\
-arg = \"'foo'\" # 'foo'
-echo \"$arg\" # echo 'foo'
+cmd = [cmdline] echo \"\\\\'foo\\\\'\" # echo \'foo\'
+$cmd # echo 'foo'
\
To minimize unhelpful consumption of escape sequences (for example, in Windows
paths), re-lexing only performs the \i{effective escaping} for the \c{'\"\\}
characters. All other escape sequences are passed through uninterpreted. Note
-that this means there is no way to escape command line syntax characters. The
-recommendation is to use quoting except for passing literal quotes, for
-example:
+that this means there is no way to escape command line syntax characters in
+canned commands. The recommendation is to use quoting except for passing
+literal quotes, for example:
\
-args = \'&foo\' # '&foo'
-echo $args # echo &foo
+cmd = [cmdline] echo \'&foo\' # echo '&foo'
+$cmd # echo &foo
\
To make sure that a string is passed as is through both expansions use the
\i{doubled single-quoting} idiom, for example:
\
-filter = sed -e \''s/foo (bar|baz)/$&/'\'
+filter = [cmdline] sed -e \''s/foo (bar|baz)/$&/'\'
$* <<EOI | $filter >>EOO
...
EOI
@@ -1423,6 +1492,7 @@ while potentially spanning several physical lines. The \c{-line} suffix
here signifies a \i{logical line}, for example, a command line plus its
here-document fragments.
+
\h#syntax-grammar|Grammar|
The complete grammar of the Testscript language is presented next with the
@@ -1479,33 +1549,58 @@ test:
+(variable-line|command-like)
variable-like:
- variable-line|variable-if
+ variable-line|variable-flow
variable-line:
<variable-name> ('='|'+='|'=+') value-attributes? <value> ';'?
value-attributes: '[' <key-value-pairs> ']'
+variable-flow:
+ variable-if|variable-for|variable-while
+
variable-if:
('if'|'if!') command-line
- variable-if-body
+ variable-flow-body
*variable-elif
?variable-else
- 'end'
+ 'end' ';'?
variable-elif:
('elif'|'elif!') command-line
- variable-if-body
+ variable-flow-body
variable-else:
'else'
- variable-if-body
+ variable-flow-body
-variable-if-body:
+variable-flow-body:
*variable-like
+variable-for:
+ variable-for-args|variable-for-stream
+
+variable-for-args:
+ 'for' <variable-name> element-attributes? ':' \
+ value-attributes? <value>
+ variable-flow-body
+ 'end' ';'?
+
+element-attributes: value-attributes
+
+variable-for-stream:
+ (command-pipe '|')? \
+ 'for' (<opt>|stdin)* <variable-name> element-attributes? (stdin)*
+ variable-flow-body
+ 'end' ';'?
+
+variable-while:
+ 'while' command-line
+ variable-flow-body
+ 'end' ';'?
+
command-like:
- command-line|command-if
+ command-line|command-flow
command-line: command-expr (';'|(':' <text>))?
*here-document
@@ -1518,24 +1613,47 @@ command: <path>(' '+(<arg>|redirect|cleanup))* command-exit?
command-exit: ('=='|'!=') <exit-status>
+command-flow:
+ command-if|command-for|command-while
+
command-if:
('if'|'if!') command-line
- command-if-body
+ command-flow-body
*command-elif
?command-else
'end' (';'|(':' <text>))?
command-elif:
('elif'|'elif!') command-line
- command-if-body
+ command-flow-body
command-else:
'else'
- command-if-body
+ command-flow-body
-command-if-body:
+command-flow-body:
*(variable-line|command-like)
+command-for:
+ command-for-args|command-for-stream
+
+command-for-args:
+ 'for' <variable-name> element-attributes? ':' \
+ value-attributes? <value>
+ command-flow-body
+ 'end' (';'|(':' <text>))?
+
+command-for-stream:
+ (command-pipe '|')? \
+ 'for' (<opt>|stdin)* <variable-name> element-attributes? (stdin)*
+ command-flow-body
+ 'end' (';'|(':' <text>))?
+
+command-while:
+ 'while' command-line
+ command-flow-body
+ 'end' (';'|(':' <text>))?
+
redirect: stdin|stdout|stderr
stdin: '0'?(in-redirect)
@@ -1568,6 +1686,12 @@ description:
+(':' <text>)
\
+Note that the only purpose of having separate (from the command flow control
+constructs) variable-only flow control constructs is to remove the error-prone
+requirement of having to specify \c{+} and \c{-} prefixes in group
+setup/teardown.
+
+
\h#syntax-script|Script|
\
@@ -1578,6 +1702,7 @@ script:
A testscript file is an implicit group scope (see \l{#model Model and
Execution} for details).
+
\h#syntax-scope|Scope|
\
@@ -1627,6 +1752,7 @@ the scopes in an \c{if-else} chain are alternative implementations of the same
group/test (thus the single description). If at least one of them is a group
scope, then all the others are treated as groups as well.
+
\h#syntax-directive|Directive|
\
@@ -1640,7 +1766,7 @@ variable is assigned. You can, however, use variables assigned in the
buildfile. For example:
\
-include common-$(cxx.target.class).testscript
+.include common-$(cxx.target.class).testscript
\
\h2#syntax-directive-include|Include|
@@ -1659,6 +1785,7 @@ this scope should not be included again. The implementation is not required to
handle links when determining if two paths are to the same file. Relative
paths are assumed to be relative to the including testscript file.
+
\h#syntax-setup-teardown|Setup and Teardown|
\
@@ -1672,11 +1799,12 @@ setup-line: '+' command-like
tdown-line: '-' command-like
\
-Note that variable assignments (including \c{variable-if}) do not use the
+Note that variable assignments (including \c{variable-flow}) do not use the
\c{'+'} and \c{'-'} prefixes. A standalone (not part of a test) variable
assignment is automatically treated as a setup if no tests have yet been
encountered in this scope and as a teardown otherwise.
+
\h#syntax-test|Test|
\
@@ -1695,11 +1823,16 @@ cat <'verbose = true' >=$conf;
test1 $conf
\
+\N|As discussed in \l{#model Model and Execution}, tests are executed in
+parallel. Currently, the only way to run several executables serially is to
+place them into a single compound test.|
+
+
\h#syntax-variable|Variable|
\
variable-like:
- variable-line|variable-if
+ variable-line|variable-flow
variable-line:
<variable-name> ('='|'+='|'=+') value-attributes? <value> ';'?
@@ -1718,25 +1851,26 @@ echo $args # foo bar fox baz
The value can only be followed by \c{;} inside a test to signal the test
continuation.
+
\h#syntax-variable-if|Variable-If|
\
variable-if:
('if'|'if!') command-line
- variable-if-body
+ variable-flow-body
*variable-elif
?variable-else
- 'end'
+ 'end' ';'?
variable-elif:
('elif'|'elif!') command-line
- variable-if-body
+ variable-flow-body
variable-else:
'else'
- variable-if-body
+ variable-flow-body
-variable-if-body:
+variable-flow-body:
*variable-like
\
@@ -1760,15 +1894,90 @@ with a ternary operator is often more concise:
slash = ($cxx.target.class == 'windows' ? \\\\ : /)
\
-Note also that the only purpose of having a separate (from \c{command-if})
-variable-only if-block is to remove the error-prone requirement of having to
-specify \c{+} and \c{-} prefixes in group setup/teardown.
+
+\h#syntax-variable-for|Variable-For|
+
+\
+variable-for:
+ variable-for-args|variable-for-stream
+
+variable-for-args:
+ 'for' <variable-name> element-attributes? ':' \
+ value-attributes? <value>
+ variable-flow-body
+ 'end' ';'?
+
+variable-for-stream:
+ (command-pipe '|')? \
+ 'for' (<opt>|stdin)* <variable-name> element-attributes? (stdin)*
+ variable-flow-body
+ 'end' ';'?
+
+variable-flow-body:
+ *variable-like
+\
+
+A group of variables can be set in a loop while iterating over elements of a
+list. The iteration semantics is the same as in \c{command-for}. For example:
+
+\
+uvalues =
+for v: $values
+ uvalues += $string.ucase($v)
+end
+\
+
+Another example:
+
+\
+uvalues =
+cat values.txt | for -n v
+ uvalues += $string.ucase($v)
+end
+\
+
+Or using the \c{stdin} redirect:
+
+\
+uvalues =
+for -n v <=values.txt
+ uvalues += $string.ucase($v)
+end
+\
+
+
+\h#syntax-variable-while|Variable-While|
+
+\
+variable-while:
+ 'while' command-line
+ variable-flow-body
+ 'end' ';'?
+
+variable-flow-body:
+ *variable-like
+\
+
+A group of variables can be set in a loop while the condition evaluates to
+\c{true}. The condition \c{command-line} semantics is the same as in
+\c{scope-if}. For example:
+
+\
+uvalues =
+i = [uint64] 0
+n = $size($values)
+while ($i != $n)
+ uvalues += $string.ucase($values[$i])
+ i += 1
+end
+\
+
\h#syntax-command|Command|
\
command-like:
- command-line|command-if
+ command-line|command-flow
command-line: command-expr (';'|(':' <text>))?
*here-document
@@ -1783,7 +1992,7 @@ command-exit: ('=='|'!=') <exit-status>
\
A command line is a command expression. If it appears directly (as opposed to
-inside \c{command-if}) in a test, then it can be followed by \c{;} to signal
+inside \c{command-flow}) in a test, then it can be followed by \c{;} to signal
the test continuation or by \c{:} and the trailing description.
A command expression can combine several command pipes with logical AND and OR
@@ -1808,25 +2017,26 @@ to succeed (0 exit code). The logical result of executing a command is
therefore a boolean value which is used in the higher-level constructs (pipe
and expression).
+
\h#syntax-command-if|Command-If|
\
command-if:
('if'|'if!') command-line
- command-if-body
+ command-flow-body
*command-elif
?command-else
'end' (';'|(':' <text>))?
command-elif:
('elif'|'elif!') command-line
- command-if-body
+ command-flow-body
command-else:
'else'
- command-if-body
+ command-flow-body
-command-if-body:
+command-flow-body:
*(variable-line|command-like)
\
@@ -1846,6 +2056,108 @@ end;
test1 $foo
\
+
+\h#syntax-command-for|Command-For|
+
+\
+command-for:
+ command-for-args|command-for-stream
+
+command-for-args:
+ 'for' <variable-name> element-attributes? ':' \
+ value-attributes? <value>
+ command-flow-body
+ 'end' (';'|(':' <text>))?
+
+command-for-stream:
+ (command-pipe '|')? \
+ 'for' (<opt>|stdin)* <variable-name> element-attributes? (stdin)*
+ command-flow-body
+ 'end' (';'|(':' <text>))?
+
+command-flow-body:
+ *(variable-line|command-like)
+\
+
+A group of commands can be executed in a loop while iterating over elements of
+a list and setting the specified variable (called \i{loop variable}) to the
+corresponding element on each iteration. At the end of the iteration the loop
+variable contains the value of the last element, if any. Note that in a
+compound test, commands inside \c{command-for} must not end with
+\c{;}. Rather, \c{;} may follow \c{end}.
+
+The \c{for} loop has two forms: In the first form the list is specified as
+arguments. Similar to the \c{for} loop in the Buildfile language, it can
+contain variable expansions, function calls, evaluation contexts, and/or
+literal values. For example:
+
+\
+for v: $values
+ test1 $v
+end;
+test2
+\
+
+In the second form the list is read from the \c{stdin} input. The input data
+is split into elements either at whitespaces (default) or newlines, which can
+be controlled with the \c{-n|--newline} and \c{-w|--whitespace} options.
+Overall, this form supports the same set of options as the \l{#builtins-set
+\c{set}} pseudo-builtin. For example:
+
+\
+cat values.txt | for -n v
+ test1 $v
+end
+\
+
+Or using the \c{stdin} redirect:
+
+\
+for -n v <=values.txt
+ test1 $v
+end
+\
+
+Both forms can include value attributes enclosed in \c{[]} to be applied to
+each element, again similar to the \l{#builtins-set \c{set}} pseudo-builtin.
+
+
+\h#syntax-command-while|Command-While|
+
+\
+command-while:
+ 'while' command-line
+ command-flow-body
+ 'end' (';'|(':' <text>))?
+
+command-flow-body:
+ *(variable-line|command-like)
+\
+
+A group of commands can be executed in a loop while a condition evaluates to
+\c{true}. The condition \c{command-line} semantics is the same as in
+\c{scope-if}. Note that in a compound test, commands inside \c{command-while}
+must not end with \c{;}. Rather, \c{;} may follow \c{end}. For example:
+
+\
+i = [uint64] 0;
+n = $size($values);
+while ($i != $n)
+ test1 ($values[$i])
+ i += 1
+end;
+test2
+\
+
+Another example:
+
+\
+while test -f $file
+ test1 $file
+end
+\
+
+
\h#syntax-redirect|Redirect|
\
@@ -1974,6 +2286,7 @@ Similar to the input redirects, an output here-document redirect must be
specified literally on the command line. See \l{#syntax-here-document Here
Document} for details.
+
\h#syntax-here-document|Here-Document|
\
@@ -2536,6 +2849,56 @@ false
Do nothing and terminate normally with the 1 exit code (indicating failure).
+\h#builtins-find|\c{find}|
+
+\
+find <start-path>... [<expression>]
+\
+
+Search for filesystem entries in a filesystem hierarchy. Traverse filesystem
+hierarchies from each \i{start-path} specified on the command line, evaluate
+for each filesystem entry the boolean \i{expression} consisting of the
+options-like arguments called \i{primaries}, and print the filesystem entry
+path if it evaluates to \c{true}, one path per line. The primaries are
+combined into the expression with an implicit logical AND operator. The empty
+expression always evaluates to \c{true}.
+
+Note that the implementation deviates from POSIX in a number of ways. It only
+supports a small subset of primaries and doesn't support compound expressions,
+negations, logical OR and (explicit) AND operators, and the \c{-type} primary
+values other than \c{f}, \c{d}, and \c{l}. It, however, supports the
+\c{-mindepth} and \c{-maxdepth} primaries which are not specified by POSIX but
+are supported by the major \c{find} utility implementations.
+
+The following primaries are supported:
+
+\dl|
+
+\li|\n\c{-name <pattern>}
+
+ Evaluates to \c{true} if a filesystem entry base name matches the specified
+ wildcard pattern.|
+
+\li|\n\c{-type <type>}
+
+ Evaluates to \c{true} if a filesystem entry type matches the specified type:
+ \c{f} for a regular file, \c{d} for a directory, and \c{l} for a symbolic
+ link.|
+
+\li|\n\c{-mindepth <depth>}
+
+ Evaluates to \c{true} if a filesystem entry directory level is not less than
+ the specified depth. The level of the \i{start-path} entries specified on
+ the command line is 0.|
+
+\li|\n\c{-maxdepth <depth>}
+
+ Evaluates to \c{true} if a filesystem entry directory level is not greater
+ than the specified depth. The level of the \i{start-path} entries specified
+ on the command line is 0. Note that the implementation is smart enough not
+ to traverse a directory when the maximum depth is reached.||
+
+
\h#builtins-ln|\c{ln}|
\
@@ -2772,6 +3135,7 @@ are supported.
\U - Convert next characters until \E to the upper case.
\L - Convert next characters until \E to the lower case.
+ \n - Newline.
\\\\ - Literal backslash.
\
@@ -2782,7 +3146,7 @@ are supported.
\h#builtins-set|\c{set}|
\
-set [-e] [-n|-w] [<attr>] <var>
+set [-e] [-n|-w] <var> [<attr>]
\
Set variable from the \c{stdin} input.
@@ -2819,7 +3183,7 @@ If the \i{attr} argument is specified, then it must contain a list of value
attributes enclosed in \c{[]}, for example:
\
-sed -e 's/foo/bar/' input | set [string] x
+sed -e 's/foo/bar/' input | set x [string]
\
Note that this is also the only way to set a variable with a computed name,
@@ -2827,7 +3191,7 @@ for example:
\
foo = FOO
-set [null] $foo <-
+set $foo [null] <-
\
\dl|
diff --git a/libbuild2/action.hxx b/libbuild2/action.hxx
index e149574..85012ba 100644
--- a/libbuild2/action.hxx
+++ b/libbuild2/action.hxx
@@ -45,16 +45,17 @@ namespace build2
// inner rule. In particular, it should not replace or override the inner's
// logic.
//
- // While most of the relevant target state is duplicated, certain things are
- // shared among the inner/outer rules, such as the target data pad and the
- // group state. In particular, it is assumed the group state is always
- // determined by the inner rule (see resolve_members()).
+ // While most of the action-specific target state is duplicated (see
+ // target::opstate), certain things are shared among the inner/outer rules,
+ // such as the path, mtime, and group state. In particular, it is assumed
+ // the group state is always determined by the inner rule (see
+ // resolve_members()).
//
// Normally, an outer rule will be responsible for any additional, outer
// operation-specific work. Sometimes, however, the inner rule needs to
// customize its behavior. In this case the outer and inner rules must
- // communicate this explicitly (normally via the target's data pad) and
- // there is a number of restrictions to this approach. See
+ // communicate this explicitly (normally via the target's auxiliary data
+ // storage) and there is a number of restrictions to this approach. See
// cc::{link,install}_rule for details.
//
struct action
@@ -150,6 +151,7 @@ namespace build2
// Id constants for build-in and pre-defined meta/operations.
//
// Note: currently max 15 (see above).
+ // Note: update small_vector in meta_operations if adding more.
//
const meta_operation_id noop_id = 1; // nomop?
const meta_operation_id perform_id = 2;
@@ -164,6 +166,7 @@ namespace build2
// something here remember to update the man page.
//
// Note: currently max 15 (see above).
+ // Note: update small_vector in operations if adding more.
//
const operation_id default_id = 1; // Shall be first.
const operation_id update_id = 2; // Shall be second.
@@ -176,6 +179,8 @@ namespace build2
const operation_id uninstall_id = 7;
const operation_id update_for_install_id = 8; // update(for install) alias.
+ // Commonly-used action ids.
+ //
const action_id perform_update_id = (perform_id << 4) | update_id;
const action_id perform_clean_id = (perform_id << 4) | clean_id;
const action_id perform_test_id = (perform_id << 4) | test_id;
diff --git a/libbuild2/adhoc-rule-buildscript.cxx b/libbuild2/adhoc-rule-buildscript.cxx
index 61b4cb2..ab5706c 100644
--- a/libbuild2/adhoc-rule-buildscript.cxx
+++ b/libbuild2/adhoc-rule-buildscript.cxx
@@ -5,13 +5,17 @@
#include <sstream>
+#include <libbutl/filesystem.hxx> // try_rm_file()
+
#include <libbuild2/depdb.hxx>
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
+#include <libbuild2/dyndep.hxx>
#include <libbuild2/context.hxx>
#include <libbuild2/algorithm.hxx>
#include <libbuild2/filesystem.hxx> // path_perms(), auto_rmfile
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/make-parser.hxx>
#include <libbuild2/parser.hxx> // attributes
@@ -22,6 +26,111 @@ using namespace std;
namespace build2
{
+ static inline void
+ hash_script_vars (sha256& cs,
+ const build::script::script& s,
+ const scope& bs,
+ const target& t,
+ names& storage)
+ {
+ auto& vp (bs.var_pool ());
+
+ for (const string& n: s.vars)
+ {
+ cs.append (n);
+
+ lookup l;
+
+ if (const variable* var = vp.find (n))
+ l = t[var];
+
+ cs.append (!l.defined () ? '\x1' : l->null ? '\x2' : '\x3');
+
+ if (l)
+ {
+ storage.clear ();
+ names_view ns (reverse (*l, storage, true /* reduce */));
+
+ for (const name& n: ns)
+ to_checksum (cs, n);
+ }
+ }
+ }
+
+ // How should we hash target and prerequisite sets ($> and $<)? We could
+ // hash them as target names (i.e., the same as the $>/< content) or as
+ // paths (only for path-based targets). While names feel more general, they
+ // are also more expensive to compute. And for path-based targets, path is
+ // generally a good proxy for the target name. Since the bulk of the ad hoc
+ // recipes will presumably be operating exclusively on path-based targets,
+ // let's do it both ways.
+ //
+ static inline void
+ hash_target (sha256& cs, const target& t, names& storage)
+ {
+ if (const path_target* pt = t.is_a<path_target> ())
+ cs.append (pt->path ().string ());
+ else
+ {
+ storage.clear ();
+ t.as_name (storage);
+ for (const name& n: storage)
+ to_checksum (cs, n);
+ }
+ };
+
+ // The script can reference a program in one of four ways:
+ //
+ // 1. As an (imported) target (e.g., $cli)
+ //
+ // 2. As a process_path_ex (e.g., $cxx.path).
+ //
+ // 3. As a builtin (e.g., sed)
+ //
+ // 4. As a program path/name.
+ //
+ // When it comes to change tracking, there is nothing we can do for (4) (the
+ // user can track its environment manually with depdb-env) and there is
+ // nothing to do for (3) (assuming builtin semantics is stable/backwards-
+ // compatible). The (2) case is handled automatically by hashing all the
+ // variable values referenced by the script (see below), which in case of
+ // process_path_ex includes the checksums (both executable and environment),
+ // if available.
+ //
+ // This leaves the (1) case, which itself splits into two sub-cases: the
+ // target comes with the dependency information (e.g., imported from a
+ // project via an export stub) or it does not (e.g., imported as installed).
+ // We don't need to do anything extra for the first sub-case since the
+ // target's state/mtime can be relied upon like any other prerequisite.
+ // Which cannot be said about the second sub-case, where we reply on
+ // checksum that may be included as part of the target metadata.
+ //
+ // So what we are going to do is hash checksum metadata of every executable
+ // prerequisite target that has it (we do it here in order to include ad hoc
+ // prerequisites, which feels like the right thing to do; the user may mark
+ // tools as ad hoc in order to omit them from $<).
+ //
+ static inline void
+ hash_prerequisite_target (sha256& cs, sha256& exe_cs, sha256& env_cs,
+ const target& pt,
+ names& storage)
+ {
+ hash_target (cs, pt, storage);
+
+ if (const exe* et = pt.is_a<exe> ())
+ {
+ if (const string* c = et->lookup_metadata<string> ("checksum"))
+ {
+ exe_cs.append (*c);
+ }
+
+ if (const strings* e = et->lookup_metadata<strings> ("environment"))
+ {
+ hash_environment (env_cs, *e);
+ }
+ }
+ }
+
bool adhoc_buildscript_rule::
recipe_text (const scope& s,
const target_type& tt,
@@ -77,7 +186,7 @@ namespace build2
{
os << " [";
os << "diag=";
- to_stream (os, name (*script.diag_name), true /* quote */, '@');
+ to_stream (os, name (*script.diag_name), quote_mode::normal, '@');
os << ']';
}
}
@@ -92,11 +201,7 @@ namespace build2
os << ind << "depdb clear" << endl;
script::dump (os, ind, script.depdb_preamble);
-
- if (script.diag_line)
- {
- os << ind; script::dump (os, *script.diag_line, true /* newline */);
- }
+ script::dump (os, ind, script.diag_preamble);
script::dump (os, ind, script.body);
ind.resize (ind.size () - 2);
@@ -106,30 +211,76 @@ namespace build2
bool adhoc_buildscript_rule::
reverse_fallback (action a, const target_type& tt) const
{
- // We can provide clean for a file target if we are providing update.
+ // We can provide clean for a file or group target if we are providing
+ // update.
//
- return a == perform_clean_id && tt.is_a<file> () &&
- find (actions.begin (), actions.end (),
- perform_update_id) != actions.end ();
+ return (a == perform_clean_id &&
+ (tt.is_a<file> () || tt.is_a<group> ()) &&
+ find (actions.begin (), actions.end (),
+ perform_update_id) != actions.end ());
}
+ using dynamic_target = build::script::parser::dynamic_target;
+ using dynamic_targets = build::script::parser::dynamic_targets;
+
+ struct adhoc_buildscript_rule::match_data
+ {
+ match_data (action a, const target& t, const scope& bs, bool temp_dir)
+ : env (a, t, bs, temp_dir) {}
+
+ build::script::environment env;
+ build::script::default_runner run;
+
+ path dd;
+ dynamic_targets dyn_targets;
+
+ const scope* bs;
+ timestamp mt;
+ bool deferred_failure;
+ };
+
+ struct adhoc_buildscript_rule::match_data_byproduct
+ {
+ match_data_byproduct (action a, const target& t,
+ const scope& bs,
+ bool temp_dir)
+ : env (a, t, bs, temp_dir) {}
+
+ build::script::environment env;
+ build::script::default_runner run;
+
+ build::script::parser::dyndep_byproduct byp;
+
+ depdb::reopen_state dd;
+ size_t skip_count = 0;
+ size_t pts_n; // Number of static prerequisites in prerequisite_targets.
+
+ const scope* bs;
+ timestamp mt;
+ };
+
bool adhoc_buildscript_rule::
- match (action a, target& t, const string& h, match_extra& me) const
+ match (action a, target& xt, const string& h, match_extra& me) const
{
+ const target& t (xt); // See adhoc_rule::match().
+
// We pre-parsed the script with the assumption it will be used on a
- // non/file-based target. Note that this should not be possible with
- // patterns.
+ // non/file-based (or file group-based) target. Note that this should not
+ // be possible with patterns.
//
if (pattern == nullptr)
{
- if ((t.is_a<file> () != nullptr) != ttype->is_a<file> ())
- {
+ // Let's not allow mixing file/group.
+ //
+ if ((t.is_a<file> () != nullptr) == ttype->is_a<file> () ||
+ (t.is_a<group> () != nullptr) == ttype->is_a<group> ())
+ ;
+ else
fail (loc) << "incompatible target types used with shared recipe" <<
- info << "all targets must be file-based or non-file-based";
- }
+ info << "all targets must be file- or file group-based or non";
}
- return adhoc_rule::match (a, t, h, me);
+ return adhoc_rule::match (a, xt, h, me);
}
recipe adhoc_buildscript_rule::
@@ -142,13 +293,24 @@ namespace build2
apply (action a,
target& t,
match_extra& me,
- const optional<timestamp>& d) const
+ const optional<timestamp>& deadline) const
{
+ tracer trace ("adhoc_buildscript_rule::apply");
+
+ // Handle matching group members (see adhoc_rule::match() for background).
+ //
+ if (const group* g = t.group != nullptr ? t.group->is_a<group> () : nullptr)
+ {
+ match_sync (a, *g);
+ return group_recipe; // Execute the group's recipe.
+ }
+
// We don't support deadlines for any of these cases (see below).
//
- if (d && (a.outer () ||
- me.fallback ||
- (a == perform_update_id && t.is_a<file> ())))
+ if (deadline && (a.outer () ||
+ me.fallback ||
+ (a == perform_update_id &&
+ (t.is_a<file> () || t.is_a<group> ()))))
return empty_recipe;
// If this is an outer operation (e.g., update-for-test), then delegate to
@@ -157,22 +319,97 @@ namespace build2
if (a.outer ())
{
match_inner (a, t);
- return execute_inner;
+ return inner_recipe;
}
- // Inject pattern's ad hoc group members, if any.
+ context& ctx (t.ctx);
+ const scope& bs (t.base_scope ());
+
+ group* g (t.is_a<group> ()); // Explicit group.
+
+ // Inject pattern's ad hoc group members, if any (explicit group members
+ // are injected after reset below).
//
- if (pattern != nullptr)
- pattern->apply_adhoc_members (a, t, me);
+ if (g == nullptr && pattern != nullptr)
+ pattern->apply_group_members (a, t, bs, me);
- // Derive file names for the target and its ad hoc group members, if any.
+ // Derive file names for the target and its static/ad hoc group members,
+ // if any.
//
if (a == perform_update_id || a == perform_clean_id)
{
- for (target* m (&t); m != nullptr; m = m->adhoc_member)
+ if (g != nullptr)
+ {
+ g->reset_members (a); // See group::group_members() for background.
+
+ // Note that we rely on the fact that if the group has static members,
+ // then they always come first in members and the first static member
+ // is a file.
+ //
+ for (const target& m: g->static_members)
+ g->members.push_back (&m);
+
+ g->members_static = g->members.size ();
+
+ if (pattern != nullptr)
+ {
+ pattern->apply_group_members (a, *g, bs, me);
+ g->members_static = g->members.size ();
+ }
+
+ if (g->members_static == 0)
+ {
+ if (!script.depdb_dyndep_dyn_target)
+ fail << "group " << *g << " has no static or dynamic members";
+ }
+ else
+ {
+ if (!g->members.front ()->is_a<file> ())
+ {
+ // We use the first static member to derive depdb path, get mtime,
+ // etc. So it must be file-based.
+ //
+ fail << "first static member " << g->members.front ()
+ << " of group " << *g << " is not a file";
+ }
+
+ // Derive paths for all the static members.
+ //
+ for (const target* m: g->members)
+ if (auto* p = m->is_a<path_target> ())
+ p->derive_path ();
+ }
+ }
+ else
{
- if (auto* p = m->is_a<path_target> ())
- p->derive_path ();
+ for (target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (auto* p = m->is_a<path_target> ())
+ p->derive_path ();
+ }
+ }
+ }
+ else if (g != nullptr)
+ {
+ // This could be, for example, configure/dist update which could need a
+ // "representative sample" of members (in order to be able to match the
+ // rules). So add static members unless we already have something
+ // cached.
+ //
+ if (g->group_members (a).members == nullptr) // Note: not g->member.
+ {
+ g->reset_members (a);
+
+ for (const target& m: g->static_members)
+ g->members.push_back (&m);
+
+ g->members_static = g->members.size ();
+
+ if (pattern != nullptr)
+ {
+ pattern->apply_group_members (a, *g, bs, me);
+ g->members_static = g->members.size ();
+ }
}
}
@@ -181,197 +418,1293 @@ namespace build2
// We do it always instead of only if one of the targets is path-based in
// case the recipe creates temporary files or some such.
//
- inject_fsdir (a, t);
+ // Note that we disable the prerequisite search for fsdir{} because of the
+ // prerequisites injected by the pattern. So we have to handle this ad hoc
+ // below.
+ //
+ const fsdir* dir (inject_fsdir (a, t, false /* prereq */));
// Match prerequisites.
//
- match_prerequisite_members (a, t);
+ // This is essentially match_prerequisite_members() but with support
+ // for update=unmatch|match.
+ //
+ auto& pts (t.prerequisite_targets[a]);
+ {
+ // Re-create the clean semantics as in match_prerequisite_members().
+ //
+ bool clean (a.operation () == clean_id && !t.is_a<alias> ());
+
+ // Add target's prerequisites.
+ //
+ for (prerequisite_member p: group_prerequisite_members (a, t))
+ {
+ // Note that we have to recognize update=unmatch|match for *(update),
+ // not just perform(update). But only actually do anything about it
+ // for perform(update).
+ //
+ lookup l; // The `update` variable value, if any.
+ include_type pi (
+ include (a, t, p, a.operation () == update_id ? &l : nullptr));
+
+ // Use prerequisite_target::include to signal update during match or
+ // unmatch.
+ //
+ uintptr_t mask (0);
+ if (l)
+ {
+ const string& v (cast<string> (l));
+
+ if (v == "match")
+ {
+ if (a == perform_update_id)
+ mask = prerequisite_target::include_udm;
+ }
+ else if (v == "unmatch")
+ {
+ if (a == perform_update_id)
+ mask = include_unmatch;
+ }
+ else if (v != "false" && v != "true" && v != "execute")
+ {
+ fail << "unrecognized update variable value '" << v
+ << "' specified for prerequisite " << p.prerequisite;
+ }
+ }
+
+ // Skip excluded.
+ //
+ if (!pi)
+ continue;
+
+ const target& pt (p.search (t));
+
+ if (&pt == dir) // Don't add injected fsdir{} twice.
+ continue;
+
+ if (clean && !pt.in (*bs.root_scope ()))
+ continue;
+
+ prerequisite_target pto (&pt, pi);
+
+ if (mask != 0)
+ pto.include |= mask;
+
+ pts.push_back (move (pto));
+ }
+
+ // Inject pattern's prerequisites, if any.
+ //
+ if (pattern != nullptr)
+ pattern->apply_prerequisites (a, t, bs, me);
+
+ // Start asynchronous matching of prerequisites. Wait with unlocked
+ // phase to allow phase switching.
+ //
+ wait_guard wg (ctx, ctx.count_busy (), t[a].task_count, true);
+
+ for (const prerequisite_target& pt: pts)
+ {
+ if (pt.target == dir) // Don't match injected fsdir{} twice.
+ continue;
+
+ match_async (a, *pt.target, ctx.count_busy (), t[a].task_count);
+ }
+
+ wg.wait ();
+
+ // Finish matching all the targets that we have started.
+ //
+ for (prerequisite_target& pt: pts)
+ {
+ if (pt.target == dir) // See above.
+ continue;
+
+ // Handle update=unmatch.
+ //
+ unmatch um ((pt.include & include_unmatch) != 0
+ ? unmatch::safe
+ : unmatch::none);
+
+ pair<bool, target_state> mr (match_complete (a, *pt.target, um));
+
+ if (um != unmatch::none)
+ {
+ l6 ([&]{trace << "unmatch " << *pt.target << ": " << mr.first;});
+
+ // If we managed to unmatch, blank it out so that it's not executed,
+ // etc. Otherwise, convert it to ad hoc (we also automatically avoid
+ // hashing it, updating it during match in exec_depdb_dyndep(), and
+ // making us out of date in execute_update_prerequisites()).
+ //
+ // The hashing part is tricky: by not hashing it we won't detect the
+ // case where it was removed as a prerequisite altogether. The
+ // thinking is that it was added with update=unmatch to extract some
+ // information (e.g., poptions from a library) and those will be
+ // change-tracked.
+ //
+ if (mr.first)
+ pt.target = nullptr;
+ else
+ pt.include |= prerequisite_target::include_adhoc;
+ }
+ }
+ }
+
+ // Read the list of dynamic targets and, optionally, fsdir{} prerequisites
+ // from depdb, if exists (used in a few depdb-dyndep --dyn-target handling
+ // places below).
+ //
+ auto read_dyn_targets = [] (path ddp, bool fsdir)
+ -> pair<dynamic_targets, dir_paths>
+ {
+ depdb dd (move (ddp), true /* read_only */);
+
+ pair<dynamic_targets, dir_paths> r;
+ while (dd.reading ()) // Breakout loop.
+ {
+ string* l;
+ auto read = [&dd, &l] () -> bool
+ {
+ return (l = dd.read ()) != nullptr;
+ };
+
+ if (!read ()) // Rule id.
+ break;
+
+ // We can omit this for as long as we don't break our blank line
+ // anchors semantics.
+ //
+#if 0
+ if (*l != rule_id_)
+ fail << "unable to clean dynamic target group " << t
+ << " with old depdb";
+#endif
+
+ // Note that we cannot read out expected lines since there can be
+ // custom depdb builtins. So we use the blank lines as anchors to
+ // skip to the parts we need.
+ //
+ // Skip until the first blank that separated custom depdb entries from
+ // the prerequisites list.
+ {
+ bool g;
+ while ((g = read ()) && !l->empty ()) ;
+ if (!g)
+ break;
+ }
- // Inject pattern's prerequisites, if any.
+ // Next read the prerequisites, detecting fsdir{} entries if asked.
+ //
+ {
+ bool g;
+ while ((g = read ()) && !l->empty ())
+ {
+ if (fsdir)
+ {
+ path p (*l);
+ if (p.to_directory ())
+ r.second.push_back (path_cast<dir_path> (move (p)));
+ }
+ }
+
+ if (!g)
+ break;
+ }
+
+ // Read the dynamic target files. We should always end with a blank
+ // line.
+ //
+ for (;;)
+ {
+ if (!read () || l->empty ())
+ break;
+
+ // Split into type and path.
+ //
+ size_t p (l->find (' '));
+ if (p == string::npos || // Invalid format.
+ p == 0 || // Empty type.
+ p + 1 == l->size ()) // Empty path.
+ break;
+
+ r.first.push_back (
+ dynamic_target {string (*l, 0, p), path (*l, p + 1, string::npos)});
+ }
+
+ break;
+ }
+
+ return r;
+ };
+
+ // Target path to derive the depdb path, query mtime (if file), etc.
+ //
+ // To derive the depdb path for a group with at least one static member we
+ // use the path of the first member. For a group without any static
+ // members we use the group name with the target type name as the
+ // second-level extension.
//
- if (pattern != nullptr)
- pattern->apply_prerequisites (a, t, me);
+ auto target_path = [&t, g, p = path ()] () mutable -> const path&
+ {
+ return
+ g == nullptr ? t.as<file> ().path () :
+ g->members_static != 0 ? g->members.front ()->as<file> ().path () :
+ (p = g->dir / (g->name + '.' + g->type ().name));
+ };
// See if we are providing the standard clean as a fallback.
//
if (me.fallback)
- return &perform_clean_depdb;
+ {
+ // For depdb-dyndep --dyn-target use depdb to clean dynamic targets.
+ //
+ if (script.depdb_dyndep_dyn_target)
+ {
+ // Note that only removing the relevant filesystem entries is not
+ // enough: we actually have to populate the group with members since
+ // this information could be used to clean derived targets (for
+ // example, object files). So we just do that and let the standard
+ // clean logic take care of them the same as static members.
+ //
+ // NOTE that this logic should be consistent with what we have in
+ // exec_depdb_dyndep().
+ //
+ using dyndep = dyndep_rule;
- if (a == perform_update_id && t.is_a<file> ())
+ function<dyndep::group_filter_func> filter;
+ if (g != nullptr)
+ {
+ filter = [] (mtime_target& g, const build2::file& m)
+ {
+ auto& ms (g.as<group> ().members);
+ return find (ms.begin (), ms.end (), &m) == ms.end ();
+ };
+ }
+
+ pair<dynamic_targets, dir_paths> p (
+ read_dyn_targets (target_path () + ".d", true));
+
+ for (dynamic_target& dt: p.first)
+ {
+ path& f (dt.path);
+
+ // Resolve target type. Clean it as file if unable to.
+ //
+ const target_type* tt (bs.find_target_type (dt.type));
+ if (tt == nullptr)
+ tt = &file::static_type;
+
+ if (g != nullptr)
+ {
+ pair<const build2::file&, bool> r (
+ dyndep::inject_group_member (a, bs, *g, move (f), *tt, filter));
+
+ if (r.second)
+ g->members.push_back (&r.first);
+ }
+ else
+ {
+ // Note that here we don't bother cleaning any old dynamic targets
+ // -- the more we can clean, the merrier.
+ //
+ dyndep::inject_adhoc_group_member (a, bs, t, move (f), *tt);
+ }
+ }
+
+ // Enter fsdir{} prerequisites.
+ //
+ // See the add lambda in exec_depdb_dyndep() for background.
+ //
+ for (dir_path& d: p.second)
+ {
+ dir_path o; string n; // For GCC 13 -Wdangling-reference.
+ const fsdir& dt (search<fsdir> (t,
+ move (d),
+ move (o),
+ move (n), nullptr, nullptr));
+ match_sync (a, dt);
+ pts.push_back (prerequisite_target (&dt, true /* adhoc */));
+ }
+ }
+
+ return g == nullptr ? perform_clean_file : perform_clean_group;
+ }
+
+ // If we have any update during match prerequisites, now is the time to
+ // update them.
+ //
+ // Note that we ignore the result and whether it renders us out of date,
+ // leaving it to the common execute logic in perform_update_*().
+ //
+ // Note also that update_during_match_prerequisites() spoils
+ // prerequisite_target::data.
+ //
+ if (a == perform_update_id)
+ update_during_match_prerequisites (trace, a, t);
+
+ // See if this is not update or not on a file/group-based target.
+ //
+ if (a != perform_update_id || !(g != nullptr || t.is_a<file> ()))
{
- return [this] (action a, const target& t)
+ // Make sure we get small object optimization.
+ //
+ if (deadline)
{
- return perform_update_file (a, t);
- };
+ return [dv = *deadline, this] (action a, const target& t)
+ {
+ return default_action (a, t, dv);
+ };
+ }
+ else
+ {
+ return [this] (action a, const target& t)
+ {
+ return default_action (a, t, nullopt);
+ };
+ }
}
- else
+
+ // This is a perform update on a file or group target.
+ //
+ // See if this is the simple case with only static dependencies.
+ //
+ if (!script.depdb_dyndep)
{
- return [d, this] (action a, const target& t)
+ return [this] (action a, const target& t)
{
- return default_action (a, t, d);
+ return perform_update_file_or_group (a, t);
};
}
- }
- target_state adhoc_buildscript_rule::
- perform_update_file (action a, const target& xt) const
- {
- tracer trace ("adhoc_buildscript_rule::perform_update_file");
+ // This is a perform update on a file or group target with extraction of
+ // dynamic dependency information either in the depdb preamble
+ // (depdb-dyndep without --byproduct) or as a byproduct of the recipe body
+ // execution (depdb-dyndep with --byproduct).
+ //
+ // For the former case, we may need to add additional prerequisites (or
+ // even target group members). We also have to save any such additional
+ // prerequisites in depdb so that we can check if any of them have changed
+ // on subsequent updates. So all this means that we have to take care of
+ // depdb here in apply() instead of perform_*() like we normally do. We
+ // also do things in slightly different order due to the restrictions
+ // impose by the match phase.
+ //
+ // The latter case (depdb-dyndep --byproduct) is sort of a combination
+ // of the normal dyndep and the static case: we check the depdb during
+ // match but save after executing the recipe.
+ //
+ // Note that the C/C++ header dependency extraction is the canonical
+ // example and all this logic is based on the prior work in the cc module
+ // where you can often find more detailed rationale for some of the steps
+ // performed (like the fsdir update below).
- context& ctx (xt.ctx);
+ // Re-acquire fsdir{} specified by the user, similar to inject_fsdir()
+ // (which we have disabled; see above).
+ //
+ if (dir == nullptr)
+ {
+ for (const target* pt: pts)
+ {
+ if (pt != nullptr)
+ {
+ if (const fsdir* dt = pt->is_a<fsdir> ())
+ {
+ if (dt->dir == t.dir)
+ {
+ dir = dt;
+ break;
+ }
+ }
+ }
+ }
+ }
- const file& t (xt.as<file> ());
- const path& tp (t.path ());
+ if (dir != nullptr)
+ fsdir_rule::perform_update_direct (a, t);
- // How should we hash target and prerequisite sets ($> and $<)? We could
- // hash them as target names (i.e., the same as the $>/< content) or as
- // paths (only for path-based targets). While names feel more general,
- // they are also more expensive to compute. And for path-based targets,
- // path is generally a good proxy for the target name. Since the bulk of
- // the ad hoc recipes will presumably be operating exclusively on
- // path-based targets, let's do it both ways.
+ // Because the depdb preamble can access $<, we have to blank out all the
+ // ad hoc prerequisites. Since we will still need them later, we "move"
+ // them to the auxiliary data member in prerequisite_target (see
+ // execute_update_prerequisites() for details).
+ //
+ // @@ This actually messes up with updated_during_match() check. Could
+ // we not redo this so that we always keep p.target intact? Can't
+ // we just omit p.adhoc() targets from $<?
//
- auto hash_target = [ns = names ()] (sha256& cs, const target& t) mutable
+ for (prerequisite_target& p: pts)
{
- if (const path_target* pt = t.is_a<path_target> ())
- cs.append (pt->path ().string ());
- else
+ // Note that fsdir{} injected above is adhoc.
+ //
+ if (p.target != nullptr && p.adhoc ())
{
- ns.clear ();
- t.as_name (ns);
- for (const name& n: ns)
- to_checksum (cs, n);
+ p.data = reinterpret_cast<uintptr_t> (p.target);
+ p.target = nullptr;
}
- };
+ }
- // Update prerequisites and determine if any of them render this target
- // out-of-date.
+ const path& tp (target_path ());
+
+ // Note that while it's tempting to turn match_data* into recipes, some of
+ // their members are not movable. And in the end we will have the same
+ // result: one dynamic memory allocation.
//
- timestamp mt (t.load_mtime ());
- optional<target_state> ps;
+ unique_ptr<match_data> md;
+ unique_ptr<match_data_byproduct> mdb;
- sha256 prq_cs, exe_cs, env_cs;
+ dynamic_targets old_dyn_targets;
+
+ if (script.depdb_dyndep_byproduct)
+ {
+ mdb.reset (new match_data_byproduct (
+ a, t, bs, script.depdb_preamble_temp_dir));
+ }
+ else
{
- // This is essentially ps=execute_prerequisites(a, t, mt) which we
- // cannot use because we need to see ad hoc prerequisites.
+ md.reset (new match_data (a, t, bs, script.depdb_preamble_temp_dir));
+
+ // If the set of dynamic targets can change based on changes to the
+ // inputs (say, each entity, such as a type, in the input file gets its
+ // own output file), then we can end up with a large number of old
+ // output files laying around because they are not part of the new
+ // dynamic target set. So we try to clean them up based on the old depdb
+ // information, similar to how we do it for perform_clean above (except
+ // here we will just keep the list of old files).
//
- size_t busy (ctx.count_busy ());
- size_t exec (ctx.count_executed ());
+ // Note: do before opening depdb, which can start over-writing it.
+ //
+ // We also have to do this speculatively, without knowing whether we
+ // will need to update. Oh, well, being dynamic ain't free.
+ //
+ if (script.depdb_dyndep_dyn_target)
+ old_dyn_targets = read_dyn_targets (tp + ".d", false).first;
+ }
+
+ depdb dd (tp + ".d");
+
+ // NOTE: see the "static dependencies" version (with comments) below.
+ //
+ // NOTE: We use blank lines as anchors to skip directly to certain entries
+ // (e.g., dynamic targets). So make sure none of the other entries
+ // can be blank (for example, see `depdb string` builtin).
+ //
+ // NOTE: KEEP IN SYNC WITH read_dyn_targets ABOVE!
+ //
+ if (dd.expect ("<ad hoc buildscript recipe> 1") != nullptr)
+ l4 ([&]{trace << "rule mismatch forcing update of " << t;});
- target_state rs (target_state::unchanged);
+ if (dd.expect (checksum) != nullptr)
+ l4 ([&]{trace << "recipe text change forcing update of " << t;});
+
+ if (!script.depdb_clear)
+ {
+ names storage;
- wait_guard wg (ctx, busy, t[a].task_count);
+ sha256 prq_cs, exe_cs, env_cs;
- for (const target*& pt: t.prerequisite_targets[a])
+ for (const prerequisite_target& p: pts)
{
- if (pt == nullptr) // Skipped.
- continue;
+ if (const target* pt =
+ (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
+ nullptr))
+ {
+ if ((p.include & include_unmatch) != 0) // Skip update=unmatch.
+ continue;
- target_state s (execute_async (a, *pt, busy, t[a].task_count));
+ hash_prerequisite_target (prq_cs, exe_cs, env_cs, *pt, storage);
+ }
+ }
+
+ {
+ sha256 cs;
+ hash_script_vars (cs, script, bs, t, storage);
+
+ if (dd.expect (cs.string ()) != nullptr)
+ l4 ([&]{trace << "recipe variable change forcing update of " << t;});
+ }
- if (s == target_state::postponed)
+ // Static targets and prerequisites (there can also be dynamic targets;
+ // see dyndep --dyn-target).
+ //
+ {
+ sha256 tcs;
+ if (g == nullptr)
{
- rs |= s;
- pt = nullptr;
+ // There is a nuance: in an operation batch (e.g., `b update
+ // update`) we will already have the dynamic targets as members on
+ // the subsequent operations and we need to make sure we don't treat
+ // them as static. Using target_decl to distinguish the two seems
+ // like a natural way.
+ //
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (m->decl == target_decl::real)
+ hash_target (tcs, *m, storage);
+ }
+ }
+ else
+ {
+ // Feels like there is not much sense in hashing the group itself.
+ //
+ for (const target* m: g->members)
+ hash_target (tcs, *m, storage);
}
+
+ if (dd.expect (tcs.string ()) != nullptr)
+ l4 ([&]{trace << "target set change forcing update of " << t;});
+
+ if (dd.expect (prq_cs.string ()) != nullptr)
+ l4 ([&]{trace << "prerequisite set change forcing update of " << t;});
}
- wg.wait ();
+ {
+ if (dd.expect (exe_cs.string ()) != nullptr)
+ l4 ([&]{trace << "program checksum change forcing update of " << t;});
+
+ if (dd.expect (env_cs.string ()) != nullptr)
+ l4 ([&]{trace << "environment change forcing update of " << t;});
+ }
+ }
+
+ // Get ready to run the depdb preamble.
+ //
+ build::script::environment& env (mdb != nullptr ? mdb->env : md->env);
+ build::script::default_runner& run (mdb != nullptr ? mdb->run : md->run);
+
+ run.enter (env, script.start_loc);
+
+ // Run the first half of the preamble (before depdb-dyndep).
+ //
+ {
+ build::script::parser p (ctx);
+ p.execute_depdb_preamble (a, bs, t, env, script, run, dd);
- bool e (mt == timestamp_nonexistent);
- for (prerequisite_target& p: t.prerequisite_targets[a])
+ // Write a blank line after the custom depdb entries and before
+ // prerequisites, which we use as an anchor (see read_dyn_targets
+ // above). We only do it for the new --dyn-target mode in order not to
+ // invalidate the existing depdb instances.
+ //
+ if (script.depdb_dyndep_dyn_target)
+ dd.expect ("");
+ }
+
+ // Determine if we need to do an update based on the above checks.
+ //
+ bool update (false);
+ timestamp mt;
+
+ if (dd.writing ())
+ update = true;
+ else
+ {
+ if (g == nullptr)
{
- if (p == nullptr)
- continue;
+ const file& ft (t.as<file> ());
- const target& pt (*p.target);
+ if ((mt = ft.mtime ()) == timestamp_unknown)
+ ft.mtime (mt = mtime (tp)); // Cache.
+ }
+ else
+ {
+ // Use static member, old dynamic, or force update.
+ //
+ const path* p (
+ g->members_static != 0
+ ? &tp /* first static member path */
+ : (!old_dyn_targets.empty ()
+ ? &old_dyn_targets.front ().path
+ : nullptr));
+
+ if (p != nullptr)
+ mt = g->load_mtime (*p);
+ else
+ update = true;
+ }
+
+ if (!update)
+ update = dd.mtime > mt;
+ }
+
+ if (update)
+ mt = timestamp_nonexistent;
+
+ if (script.depdb_dyndep_byproduct)
+ {
+ // If we have the dynamic dependency information as byproduct of the
+ // recipe body, then do the first part: verify the entries in depdb
+ // unless we are already updating. Essentially, this is the `if(cache)`
+ // equivalent of the restart loop in exec_depdb_dyndep().
+
+ using dyndep = dyndep_rule;
+
+ // Update our prerequisite targets and extract the depdb-dyndep
+ // command's information (we may also execute some variable
+ // assignments).
+ //
+ // Do we really need to update our prerequisite targets in this case?
+ // While it may seem like we should be able to avoid it by triggering
+ // update on encountering any non-existent files in depbd, we may
+ // actually incorrectly "validate" some number of depdb entires while
+ // having an out-of-date main source file. We could probably avoid the
+ // update if we are already updating (or not: there is pre-generation
+ // to consider; see inject_existing_file() for details).
+ //
+ {
+ build::script::parser p (ctx);
+ mdb->byp = p.execute_depdb_preamble_dyndep_byproduct (
+ a, bs, t,
+ env, script, run,
+ dd, update, mt);
+ }
- ctx.sched.wait (exec, pt[a].task_count, scheduler::work_none);
+ mdb->pts_n = pts.size ();
- target_state s (pt.executed_state (a));
- rs |= s;
+ if (!update)
+ {
+ const auto& byp (mdb->byp);
+ const char* what (byp.what.c_str ());
+ const location& ll (byp.location);
+
+ function<dyndep::map_extension_func> map_ext (
+ [] (const scope& bs, const string& n, const string& e)
+ {
+ // NOTE: another version in exec_depdb_dyndep().
- // Compare our timestamp to this prerequisite's.
+ return dyndep::map_extension (bs, n, e, nullptr);
+ });
+
+ // Similar to exec_depdb_dyndep()::add() but only for cache=true and
+ // without support for generated files.
+ //
+ // Note that we have to update each file for the same reason as the
+ // main source file -- if any of them changed, then we must assume the
+ // subsequent entries are invalid.
//
- if (!e)
+ size_t& skip_count (mdb->skip_count);
+
+ auto add = [&trace, what,
+ a, &bs, &t, pts_n = mdb->pts_n,
+ &byp, &map_ext,
+ &skip_count, mt] (path fp) -> optional<bool>
{
- // If this is an mtime-based target, then compare timestamps.
+ if (const build2::file* ft = dyndep::enter_file (
+ trace, what,
+ a, bs, t,
+ fp, true /* cache */, true /* normalized */,
+ map_ext, *byp.default_type).first)
+ {
+ // Note: mark the injected prerequisite target as updated (see
+ // execute_update_prerequisites() for details).
+ //
+ if (optional<bool> u = dyndep::inject_existing_file (
+ trace, what,
+ a, t, pts_n,
+ *ft, mt,
+ false /* fail */,
+ false /* adhoc */,
+ 1 /* data */))
+ {
+ skip_count++;
+ return *u;
+ }
+ }
+
+ return nullopt;
+ };
+
+ auto df = make_diag_frame (
+ [&ll, &t] (const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info (ll) << "while extracting dynamic dependencies for "
+ << t;
+ });
+
+ while (!update)
+ {
+ // We should always end with a blank line.
+ //
+ string* l (dd.read ());
+
+ // If the line is invalid, run the compiler.
//
- if (const mtime_target* mpt = pt.is_a<mtime_target> ())
+ if (l == nullptr)
+ {
+ update = true;
+ break;
+ }
+
+ if (l->empty ()) // Done, nothing changed.
+ break;
+
+ if (optional<bool> r = add (path (move (*l))))
{
- if (mpt->newer (mt, s))
- e = true;
+ if (*r)
+ update = true;
}
else
{
- // Otherwise we assume the prerequisite is newer if it was
- // changed.
+ // Invalidate this line and trigger update.
//
- if (s == target_state::changed)
- e = true;
+ dd.write ();
+ update = true;
}
+
+ if (update)
+ l6 ([&]{trace << "restarting (cache)";});
}
+ }
- if (p.adhoc)
- p.target = nullptr; // Blank out.
+ // Note that in case of dry run we will have an incomplete (but valid)
+ // database which will be updated on the next non-dry run.
+ //
+ if (!update || ctx.dry_run_option)
+ dd.close (false /* mtime_check */);
+ else
+ mdb->dd = dd.close_to_reopen ();
- // As part of this loop calculate checksums that need to include ad
- // hoc prerequisites (unless the script tracks changes itself).
- //
- if (script.depdb_clear)
- continue;
+ // Pass on base scope and update/mtime.
+ //
+ mdb->bs = &bs;
+ mdb->mt = update ? timestamp_nonexistent : mt;
- hash_target (prq_cs, pt);
+ return [this, md = move (mdb)] (action a, const target& t)
+ {
+ return perform_update_file_or_group_dyndep_byproduct (a, t, *md);
+ };
+ }
+ else
+ {
+ // Run the second half of the preamble (depdb-dyndep commands) to update
+ // our prerequisite targets and extract dynamic dependencies (targets
+ // and prerequisites).
+ //
+ // Note that this should be the last update to depdb (the invalidation
+ // order semantics).
+ //
+ md->deferred_failure = false;
+ {
+ build::script::parser p (ctx);
+ p.execute_depdb_preamble_dyndep (a, bs, t,
+ env, script, run,
+ dd,
+ md->dyn_targets,
+ update,
+ mt,
+ md->deferred_failure);
+ }
- // The script can reference a program in one of four ways:
- //
- // 1. As an (imported) target (e.g., $cli)
- //
- // 2. As a process_path_ex (e.g., $cxx.path).
- //
- // 3. As a builtin (e.g., sed)
- //
- // 4. As a program path/name.
- //
- // When it comes to change tracking, there is nothing we can do for
- // (4) (the user can track its environment manually with depdb-env)
- // and there is nothing to do for (3) (assuming builtin semantics is
- // stable/backwards-compatible). The (2) case is handled automatically
- // by hashing all the variable values referenced by the script (see
- // below), which in case of process_path_ex includes the checksums
- // (both executable and environment), if available.
+ if (update && dd.reading () && !ctx.dry_run_option)
+ dd.touch = timestamp_unknown;
+
+ dd.close (false /* mtime_check */);
+
+ // Remove previous dynamic targets since their set may change with
+ // changes to the inputs.
+ //
+ // The dry-run mode complicates things: if we don't remove the old
+ // files, then that information will be gone (since we update depdb even
+ // in the dry-run mode). But if we remove everything in the dry-run
+ // mode, then we may also remove some of the current files, which would
+ // be incorrect. So let's always remove but only files that are not in
+ // the current set.
+ //
+ // Note that we used to do this in perform_update_file_or_group_dyndep()
+ // but that had a tricky issue: if we end up performing match but not
+ // execute (e.g., via the resolve_members() logic), then we will not
+ // cleanup old targets but loose this information (since the depdb has
+ // be updated). So now we do it here, which is a bit strange, but it
+ // sort of fits into that dry-run logic above. Note also that we do this
+ // unconditionally, update or not, since if everything is up to date,
+ // then old and new sets should be the same.
+ //
+ for (const dynamic_target& dt: old_dyn_targets)
+ {
+ const path& f (dt.path);
+
+ if (find_if (md->dyn_targets.begin (), md->dyn_targets.end (),
+ [&f] (const dynamic_target& dt)
+ {
+ return dt.path == f;
+ }) == md->dyn_targets.end ())
+ {
+ // This is an optimization so best effort.
+ //
+ if (optional<rmfile_status> s = butl::try_rmfile_ignore_error (f))
+ {
+ if (s == rmfile_status::success && verb >= 2)
+ text << "rm " << f;
+ }
+ }
+ }
+
+ // Pass on the base scope, depdb path, and update/mtime.
+ //
+ md->bs = &bs;
+ md->dd = move (dd.path);
+ md->mt = update ? timestamp_nonexistent : mt;
+
+ return [this, md = move (md)] (action a, const target& t)
+ {
+ return perform_update_file_or_group_dyndep (a, t, *md);
+ };
+ }
+ }
+
+ target_state adhoc_buildscript_rule::
+ perform_update_file_or_group_dyndep_byproduct (
+ action a, const target& t, match_data_byproduct& md) const
+ {
+ // Note: using shared function name among the three variants.
+ //
+ tracer trace (
+ "adhoc_buildscript_rule::perform_update_file_or_group_dyndep_byproduct");
+
+ context& ctx (t.ctx);
+
+ // For a group we use the first (for now static) member as a source of
+ // mtime.
+ //
+ // @@ TODO: expl: byproduct: Note that until we support dynamic targets in
+ // the byproduct mode, we verify there is at least one static member in
+ // apply() above. Once we do support this, we will need to verify after
+ // the dependency extraction below.
+ //
+ const group* g (t.is_a<group> ());
+
+ // Note that even if we've updated all our prerequisites in apply(), we
+ // still need to execute them here to keep the dependency counts straight.
+ //
+ optional<target_state> ps (execute_update_prerequisites (a, t, md.mt));
+
+ if (!ps)
+ md.mt = timestamp_nonexistent; // Update.
+
+ build::script::environment& env (md.env);
+ build::script::default_runner& run (md.run);
+
+ if (md.mt != timestamp_nonexistent)
+ {
+ run.leave (env, script.end_loc);
+ return *ps;
+ }
+
+ const scope& bs (*md.bs);
+
+ // Sequence start time for mtime checks below.
+ //
+ timestamp start (!ctx.dry_run && depdb::mtime_check ()
+ ? system_clock::now ()
+ : timestamp_unknown);
+
+ if (!ctx.dry_run || verb != 0)
+ {
+ if (g == nullptr)
+ execute_update_file (bs, a, t.as<file> (), env, run);
+ else
+ {
+ // Note: no dynamic members yet.
//
- // This leaves the (1) case, which itself splits into two sub-cases:
- // the target comes with the dependency information (e.g., imported
- // from a project via an export stub) or it does not (e.g., imported
- // as installed). We don't need to do anything extra for the first
- // sub-case since the target's state/mtime can be relied upon like any
- // other prerequisite. Which cannot be said about the second sub-case,
- // where we reply on checksum that may be included as part of the
- // target metadata.
+ execute_update_group (bs, a, *g, env, run);
+ }
+ }
+
+ // Extract the dynamic dependency information as byproduct of the recipe
+ // body. Essentially, this is the `if(!cache)` equivalent of the restart
+ // loop in exec_depdb_dyndep().
+ //
+ if (!ctx.dry_run)
+ {
+ using dyndep = dyndep_rule;
+ using dyndep_format = build::script::parser::dyndep_format;
+
+ depdb dd (move (md.dd));
+
+ const auto& byp (md.byp);
+ const location& ll (byp.location);
+ const char* what (byp.what.c_str ());
+ const path& file (byp.file);
+
+ env.clean ({build2::script::cleanup_type::always, file},
+ true /* implicit */);
+
+ function<dyndep::map_extension_func> map_ext (
+ [] (const scope& bs, const string& n, const string& e)
+ {
+ // NOTE: another version in exec_depdb_dyndep() and above.
+
+ return dyndep::map_extension (bs, n, e, nullptr);
+ });
+
+ // Analogous to exec_depdb_dyndep()::add() but only for cache=false.
+ // The semantics is quite different, however: instead of updating the
+ // dynamic prerequisites we verify they are not generated.
+ //
+ // Note that fp is expected to be absolute.
+ //
+ size_t skip (md.skip_count);
+ const auto& pts (t.prerequisite_targets[a]);
+
+ auto add = [&trace, what,
+ a, &bs, &t, g, &pts, pts_n = md.pts_n,
+ &byp, &map_ext, &dd, &skip] (path fp)
+ {
+ normalize_external (fp, what);
+
+ // Note that unless we take into account dynamic targets, the skip
+ // logic below falls apart since we neither see targets entered via
+ // prerequsites (skip static prerequisites) nor by the cache=true code
+ // above (skip depdb entries).
//
- // So what we are going to do is hash checksum metadata of every
- // executable prerequisite target that has it (we do it here in order
- // to include ad hoc prerequisites, which feels like the right thing
- // to do; the user may mark tools as ad hoc in order to omit them from
- // $<).
+ // If this turns out to be racy (which is the reason we would skip
+ // dynamic targets; see the fine_file() implementation for details),
+ // then the only answer for now is to not use the byproduct mode.
//
- if (auto* et = pt.is_a<exe> ())
+ if (const build2::file* ft = dyndep::find_file (
+ trace, what,
+ a, bs, t,
+ fp, false /* cache */, true /* normalized */,
+ true /* dynamic */,
+ map_ext, *byp.default_type).first)
{
- if (auto* c = et->lookup_metadata<string> ("checksum"))
+ // Skip if this is one of the static prerequisites provided it was
+ // updated.
+ //
+ for (size_t i (0); i != pts_n; ++i)
+ {
+ const prerequisite_target& p (pts[i]);
+
+ if (const target* pt =
+ (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
+ nullptr))
+ {
+ if (ft == pt && (p.adhoc () || p.data == 1))
+ return;
+ }
+ }
+
+ // Skip if this is one of the targets (see the non-byproduct version
+ // for background).
+ //
+ if (byp.drop_cycles)
{
- exe_cs.append (*c);
+ if (g != nullptr)
+ {
+ auto& ms (g->members);
+ if (find (ms.begin (), ms.end (), ft) != ms.end ())
+ return;
+ }
+ else
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (ft == m)
+ return;
+ }
+ }
}
- if (auto* e = et->lookup_metadata<strings> ("environment"))
+ // Skip until where we left off.
+ //
+ if (skip != 0)
{
- hash_environment (env_cs, *e);
+ --skip;
+ return;
}
+
+ // Verify it has noop recipe.
+ //
+ // @@ Currently we will issue an imprecise diagnostics if this is
+ // a static prerequisite that was not updated (see above).
+ //
+ dyndep::verify_existing_file (trace, what, a, t, pts_n, *ft);
}
+
+ dd.write (fp);
+ };
+
+ auto df = make_diag_frame (
+ [&ll, &t] (const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info (ll) << "while extracting dynamic dependencies for "
+ << t;
+ });
+
+ ifdstream is (ifdstream::badbit);
+ try
+ {
+ is.open (file);
}
+ catch (const io_error& e)
+ {
+ fail (ll) << "unable to open file " << file << ": " << e;
+ }
+
+ location il (file, 1);
+
+ // The way we parse things is format-specific.
+ //
+ // Note: similar code in exec_depdb_dyndep(). Except here we just add
+ // the paths to depdb without entering them as targets.
+ //
+ switch (md.byp.format)
+ {
+ case dyndep_format::make:
+ {
+ using make_state = make_parser;
+ using make_type = make_parser::type;
+
+ make_parser make;
+
+ for (string l;; ++il.line) // Reuse the buffer.
+ {
+ if (eof (getline (is, l)))
+ {
+ if (make.state != make_state::end)
+ fail (il) << "incomplete make dependency declaration";
+
+ break;
+ }
+
+ size_t pos (0);
+ do
+ {
+ // Note that we don't really need a diag frame that prints the
+ // line being parsed since we are always parsing the file.
+ //
+ pair<make_type, path> r (make.next (l, pos, il));
+
+ if (r.second.empty ())
+ continue;
+
+ // Note: no support for dynamic targets in byproduct mode.
+ //
+ if (r.first == make_type::target)
+ continue;
+
+ path& f (r.second);
+
+ if (f.relative ())
+ {
+ if (!byp.cwd)
+ fail (il) << "relative " << what
+ << " prerequisite path '" << f
+ << "' in make dependency declaration" <<
+ info << "consider using --cwd to specify relative path "
+ << "base";
+
+ f = *byp.cwd / f;
+ }
+
+ add (move (f));
+ }
+ while (pos != l.size ());
+
+ if (make.state == make_state::end)
+ break;
+ }
+
+ break;
+ }
+ case dyndep_format::lines:
+ {
+ for (string l;; ++il.line) // Reuse the buffer.
+ {
+ if (eof (getline (is, l)))
+ break;
+
+ if (l.empty ())
+ fail (il) << "blank line in prerequisites list";
+
+ if (l.front () == ' ')
+ fail (il) << "non-existent prerequisite in --byproduct mode";
+
+ path f;
+ try
+ {
+ f = path (l);
+
+ // fsdir{} prerequisites only make sense with dynamic targets.
+ //
+ if (f.to_directory ())
+ throw invalid_path ("");
+
+ if (f.relative ())
+ {
+ if (!byp.cwd)
+ fail (il) << "relative " << what
+ << " prerequisite path '" << f
+ << "' in lines dependency declaration" <<
+ info << "consider using --cwd to specify "
+ << "relative path base";
+
+ f = *byp.cwd / f;
+ }
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what << " prerequisite path '"
+ << l << "'";
+ }
+
+ add (move (f));
+ }
+
+ break;
+ }
+ }
+
+ // Add the terminating blank line.
+ //
+ dd.expect ("");
+ dd.close ();
+
+ //@@ TODO: expl: byproduct: verify have at least one member.
+
+ md.dd.path = move (dd.path); // For mtime check below.
+ }
+
+ run.leave (env, script.end_loc);
+
+ timestamp now (system_clock::now ());
+
+ if (!ctx.dry_run)
+ {
+ // Only now we know for sure there must be a member in the group.
+ //
+ const file& ft ((g == nullptr ? t : *g->members.front ()).as<file> ());
+
+ depdb::check_mtime (start, md.dd.path, ft.path (), now);
+ }
+
+ (g == nullptr
+ ? static_cast<const mtime_target&> (t.as<file> ())
+ : static_cast<const mtime_target&> (*g)).mtime (now);
+
+ return target_state::changed;
+ }
+
+ target_state adhoc_buildscript_rule::
+ perform_update_file_or_group_dyndep (
+ action a, const target& t, match_data& md) const
+ {
+ tracer trace (
+ "adhoc_buildscript_rule::perform_update_file_or_group_dyndep");
+
+ context& ctx (t.ctx);
+
+ // For a group we use the first (static or dynamic) member as a source of
+ // mtime. Note that in this case there must be at least one since we fail
+ // if we were unable to extract any dynamic members and there are no
+ // static (see exec_depdb_dyndep()).
+ //
+ const group* g (t.is_a<group> ());
+
+ // Note that even if we've updated all our prerequisites in apply(), we
+ // still need to execute them here to keep the dependency counts straight.
+ //
+ optional<target_state> ps (execute_update_prerequisites (a, t, md.mt));
+
+ if (!ps)
+ md.mt = timestamp_nonexistent; // Update.
+
+ build::script::environment& env (md.env);
+ build::script::default_runner& run (md.run);
+
+ // Force update in case of a deferred failure even if nothing changed.
+ //
+ if (md.mt != timestamp_nonexistent && !md.deferred_failure)
+ {
+ run.leave (env, script.end_loc);
+ return *ps;
+ }
+
+ // Sequence start time for mtime checks below.
+ //
+ timestamp start (!ctx.dry_run && depdb::mtime_check ()
+ ? system_clock::now ()
+ : timestamp_unknown);
+
+ if (!ctx.dry_run || verb != 0)
+ {
+ if (g == nullptr)
+ execute_update_file (
+ *md.bs, a, t.as<file> (), env, run, md.deferred_failure);
+ else
+ execute_update_group (*md.bs, a, *g, env, run, md.deferred_failure);
+ }
+
+ run.leave (env, script.end_loc);
+
+ timestamp now (system_clock::now ());
+
+ if (!ctx.dry_run)
+ {
+ // Note: in case of deferred failure we may not have any members.
+ //
+ const file& ft ((g == nullptr ? t : *g->members.front ()).as<file> ());
+ depdb::check_mtime (start, md.dd, ft.path (), now);
+ }
+
+ (g == nullptr
+ ? static_cast<const mtime_target&> (t)
+ : static_cast<const mtime_target&> (*g)).mtime (now);
+
+ return target_state::changed;
+ }
+
+ target_state adhoc_buildscript_rule::
+ perform_update_file_or_group (action a, const target& t) const
+ {
+ tracer trace ("adhoc_buildscript_rule::perform_update_file_or_group");
+
+ context& ctx (t.ctx);
+ const scope& bs (t.base_scope ());
+
+ // For a group we use the first (static) member to derive depdb path, as a
+ // source of mtime, etc. Note that in this case there must be a static
+ // member since in this version of perform_update we don't extract dynamic
+ // dependencies (see apply() details).
+ //
+ const group* g (t.is_a<group> ());
+
+ const file& ft ((g == nullptr ? t : *g->members.front ()).as<file> ());
+ const path& tp (ft.path ());
+
+ // Update prerequisites and determine if any of them render this target
+ // out-of-date.
+ //
+ timestamp mt (g == nullptr ? ft.load_mtime () : g->load_mtime (tp));
+
+ // This is essentially ps=execute_prerequisites(a, t, mt) which we
+ // cannot use because we need to see ad hoc prerequisites.
+ //
+ optional<target_state> ps (execute_update_prerequisites (a, t, mt));
+
+ // Calculate prerequisite checksums (that need to include ad hoc
+ // prerequisites) unless the script tracks changes itself.
+ //
+ names storage;
+ sha256 prq_cs, exe_cs, env_cs;
- if (!e)
- ps = rs;
+ if (!script.depdb_clear)
+ {
+ for (const prerequisite_target& p: t.prerequisite_targets[a])
+ {
+ if (const target* pt =
+ (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data)
+ : nullptr))
+ {
+ if ((p.include & include_unmatch) != 0) // Skip update=unmatch.
+ continue;
+
+ hash_prerequisite_target (prq_cs, exe_cs, env_cs, *pt, storage);
+ }
+ }
}
bool update (!ps);
@@ -379,6 +1712,8 @@ namespace build2
// We use depdb to track changes to the script itself, input/output file
// names, tools, etc.
//
+ // NOTE: see the "dynamic dependencies" version above.
+ //
depdb dd (tp + ".d");
// First should come the rule name/version.
@@ -411,77 +1746,62 @@ namespace build2
l4 ([&]{trace << "recipe text change forcing update of " << t;});
// Track the variables, targets, and prerequisites changes, unless the
- // script doesn't track the dependency changes itself.
- //
-
- // For each variable hash its name, undefined/null/non-null indicator,
- // and the value if non-null.
- //
- // Note that this excludes the special $< and $> variables which we
- // handle below.
- //
- // @@ TODO: maybe detect and decompose process_path_ex in order to
- // properly attribute checksum and environment changes?
+ // script tracks the dependency changes itself.
//
if (!script.depdb_clear)
{
- sha256 cs;
- names storage;
-
- for (const string& n: script.vars)
+ // For each variable hash its name, undefined/null/non-null indicator,
+ // and the value if non-null.
+ //
+ // Note that this excludes the special $< and $> variables which we
+ // handle below.
+ //
+ // @@ TODO: maybe detect and decompose process_path_ex in order to
+ // properly attribute checksum and environment changes?
+ //
{
- cs.append (n);
+ sha256 cs;
+ hash_script_vars (cs, script, bs, t, storage);
- lookup l;
-
- if (const variable* var = ctx.var_pool.find (n))
- l = t[var];
-
- cs.append (!l.defined () ? '\x1' : l->null ? '\x2' : '\x3');
+ if (dd.expect (cs.string ()) != nullptr)
+ l4 ([&]{trace << "recipe variable change forcing update of " << t;});
+ }
- if (l)
+ // Target and prerequisite sets ($> and $<).
+ //
+ {
+ sha256 tcs;
+ if (g == nullptr)
{
- storage.clear ();
- names_view ns (reverse (*l, storage));
-
- for (const name& n: ns)
- to_checksum (cs, n);
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ hash_target (tcs, *m, storage);
+ }
+ else
+ {
+ // Feels like there is not much sense in hashing the group itself.
+ //
+ for (const target* m: g->members)
+ hash_target (tcs, *m, storage);
}
- }
-
- if (dd.expect (cs.string ()) != nullptr)
- l4 ([&]{trace << "recipe variable change forcing update of " << t;});
- }
-
- // Target and prerequisite sets ($> and $<).
- //
- if (!script.depdb_clear)
- {
- sha256 tcs;
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
- hash_target (tcs, *m);
- if (dd.expect (tcs.string ()) != nullptr)
- l4 ([&]{trace << "target set change forcing update of " << t;});
+ if (dd.expect (tcs.string ()) != nullptr)
+ l4 ([&]{trace << "target set change forcing update of " << t;});
- if (dd.expect (prq_cs.string ()) != nullptr)
- l4 ([&]{trace << "prerequisite set change forcing update of " << t;});
- }
+ if (dd.expect (prq_cs.string ()) != nullptr)
+ l4 ([&]{trace << "prerequisite set change forcing update of " << t;});
+ }
- // Finally the programs and environment checksums.
- //
- if (!script.depdb_clear)
- {
- if (dd.expect (exe_cs.string ()) != nullptr)
- l4 ([&]{trace << "program checksum change forcing update of " << t;});
+ // Finally the programs and environment checksums.
+ //
+ {
+ if (dd.expect (exe_cs.string ()) != nullptr)
+ l4 ([&]{trace << "program checksum change forcing update of " << t;});
- if (dd.expect (env_cs.string ()) != nullptr)
- l4 ([&]{trace << "environment change forcing update of " << t;});
+ if (dd.expect (env_cs.string ()) != nullptr)
+ l4 ([&]{trace << "environment change forcing update of " << t;});
+ }
}
- const scope* bs (nullptr);
- const scope* rs (nullptr);
-
// Execute the custom dependency change tracking commands, if present.
//
// Note that we share the environment between the execute_depdb_preamble()
@@ -506,21 +1826,18 @@ namespace build2
}
}
- build::script::environment env (a, t, false /* temp_dir */);
- build::script::default_runner r;
+ build::script::environment env (a, t, bs, false /* temp_dir */);
+ build::script::default_runner run;
if (depdb_preamble)
{
- bs = &t.base_scope ();
- rs = bs->root_scope ();
-
if (script.depdb_preamble_temp_dir)
env.set_temp_dir_variable ();
build::script::parser p (ctx);
- r.enter (env, script.start_loc);
- p.execute_depdb_preamble (*rs, *bs, env, script, r, dd);
+ run.enter (env, script.start_loc);
+ p.execute_depdb_preamble (a, bs, t, env, script, run, dd);
}
// Update if depdb mismatch.
@@ -539,104 +1856,449 @@ namespace build2
// below).
//
if (depdb_preamble)
- r.leave (env, script.end_loc);
+ run.leave (env, script.end_loc);
return *ps;
}
+ bool r (false);
if (!ctx.dry_run || verb != 0)
{
- // Prepare to executing the script diag line and/or body.
- //
- // Note that it doesn't make much sense to use the temporary directory
- // variable ($~) in the 'diag' builtin call, so we postpone setting it
- // until the script body execution, that can potentially be omitted.
+ // Prepare to execute the script diag preamble and/or body.
//
- if (bs == nullptr)
+ r = g == nullptr
+ ? execute_update_file (bs, a, ft, env, run)
+ : execute_update_group (bs, a, *g, env, run);
+
+ if (r)
{
- bs = &t.base_scope ();
- rs = bs->root_scope ();
+ if (!ctx.dry_run)
+ dd.check_mtime (tp);
}
+ }
- build::script::parser p (ctx);
+ if (r || depdb_preamble)
+ run.leave (env, script.end_loc);
- if (verb == 1)
+ (g == nullptr
+ ? static_cast<const mtime_target&> (ft)
+ : static_cast<const mtime_target&> (*g)).mtime (system_clock::now ());
+
+ return target_state::changed;
+ }
+
+ // Update prerequisite targets.
+ //
+ // Each (non-NULL) prerequisite target should be in one of the following
+ // states:
+ //
+ // target adhoc data
+ // --------------------
+ // !NULL false 0 - normal prerequisite to be updated
+ // !NULL false 1 - normal prerequisite already updated
+ // !NULL true 0 - ad hoc prerequisite to be updated and blanked
+ // NULL true !NULL - ad hoc prerequisite already updated and blanked
+ //
+ // Note that we still execute already updated prerequisites to keep the
+ // dependency counts straight. But we don't consider them for the "renders
+ // us out-of-date" check assuming this has already been done.
+ //
+ // See also environment::set_special_variables().
+ //
+ // See also perform_execute() which has to deal with these shenanigans.
+ //
+ optional<target_state> adhoc_buildscript_rule::
+ execute_update_prerequisites (action a, const target& t, timestamp mt) const
+ {
+ context& ctx (t.ctx);
+
+ // This is essentially a customized execute_prerequisites(a, t, mt).
+ //
+ size_t busy (ctx.count_busy ());
+
+ target_state rs (target_state::unchanged);
+
+ wait_guard wg (ctx, busy, t[a].task_count);
+
+ auto& pts (t.prerequisite_targets[a]);
+
+ for (const prerequisite_target& p: pts)
+ {
+ if (const target* pt =
+ (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) : nullptr))
{
- if (script.diag_line)
- {
- text << p.execute_special (*rs, *bs, env, *script.diag_line);
- }
- else
+ target_state s (execute_async (a, *pt, busy, t[a].task_count));
+ assert (s != target_state::postponed);
+ }
+ }
+
+ wg.wait ();
+
+ bool e (mt == timestamp_nonexistent);
+ for (prerequisite_target& p: pts)
+ {
+ if (const target* pt =
+ (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) : nullptr))
+ {
+ target_state s (execute_complete (a, *pt));
+
+ if (p.data == 0)
{
- // @@ TODO (and below):
- //
- // - we are printing target, not source (like in most other places)
- //
- // - printing of ad hoc target group (the {hxx cxx}{foo} idea)
+ rs |= s;
+
+ // Compare our timestamp to this prerequisite's skipping
+ // update=unmatch.
//
- // - if we are printing prerequisites, should we print all of them
- // (including tools)?
+ if (!e && (p.include & include_unmatch) == 0)
+ {
+ // If this is an mtime-based target, then compare timestamps.
+ //
+ if (const mtime_target* mpt = pt->is_a<mtime_target> ())
+ {
+ if (mpt->newer (mt, s))
+ e = true;
+ }
+ else
+ {
+ // Otherwise we assume the prerequisite is newer if it was
+ // changed.
+ //
+ if (s == target_state::changed)
+ e = true;
+ }
+ }
+
+ // Blank out adhoc.
//
- text << *script.diag_name << ' ' << t;
+ if (p.adhoc ())
+ {
+ p.data = reinterpret_cast<uintptr_t> (p.target);
+ p.target = nullptr;
+ }
}
}
+ }
+
+ return e ? nullopt : optional<target_state> (rs);
+ }
+
+ // Return true if execute_diag_preamble() and/or execute_body() were called
+ // and thus the caller should call run.leave().
+ //
+ bool adhoc_buildscript_rule::
+ execute_update_file (const scope& bs,
+ action a, const file& t,
+ build::script::environment& env,
+ build::script::default_runner& run,
+ bool deferred_failure) const
+ {
+ // NOTE: similar to execute_update_group() below.
+ //
+ context& ctx (t.ctx);
+
+ const scope& rs (*bs.root_scope ());
- if (!ctx.dry_run || verb >= 2)
+ // Note that it doesn't make much sense to use the temporary directory
+ // variable ($~) in the 'diag' builtin call, so we postpone setting it
+ // until the script body execution, that can potentially be omitted.
+ //
+ build::script::parser p (ctx);
+
+ bool exec_body (!ctx.dry_run || verb >= 2);
+ bool exec_diag (!script.diag_preamble.empty () && (exec_body || verb == 1));
+ bool exec_depdb (!script.depdb_preamble.empty ());
+
+ if (script.diag_name)
+ {
+ if (verb == 1)
{
- // On failure remove the target files that may potentially exist but
- // be invalid.
+ // By default we print the first non-ad hoc prerequisite target as the
+ // "main" prerequisite, unless there isn't any or it's not file-based,
+ // in which case we fallback to the second form without the
+ // prerequisite. Potential future improvements:
//
- small_vector<auto_rmfile, 8> rms;
-
- if (!ctx.dry_run)
+ // - Somehow detect that the first prerequisite target is a tool being
+ // executed and fallback to the second form. It's tempting to just
+ // exclude all exe{} targets, but this could be a rule for something
+ // like strip.
+ //
+ const file* pt (nullptr);
+ for (const prerequisite_target& p: t.prerequisite_targets[a])
{
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ // See execute_update_prerequisites().
+ //
+ if (p.target != nullptr && !p.adhoc ())
{
- if (auto* f = m->is_a<file> ())
- rms.emplace_back (f->path ());
+ pt = p.target->is_a<file> ();
+ break;
}
}
- if (script.body_temp_dir && !script.depdb_preamble_temp_dir)
- env.set_temp_dir_variable ();
+ if (t.adhoc_member == nullptr)
+ {
+ if (pt != nullptr)
+ print_diag (script.diag_name->c_str (), *pt, t);
+ else
+ print_diag (script.diag_name->c_str (), t);
+ }
+ else
+ {
+ vector<target_key> ts;
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ ts.push_back (m->key ());
+
+ if (pt != nullptr)
+ print_diag (script.diag_name->c_str (), pt->key (), move (ts));
+ else
+ print_diag (script.diag_name->c_str (), move (ts));
+ }
+ }
+ }
+ else if (exec_diag)
+ {
+ if (script.diag_preamble_temp_dir && !script.depdb_preamble_temp_dir)
+ env.set_temp_dir_variable ();
- p.execute_body (*rs, *bs, env, script, r, !depdb_preamble);
+ pair<names, location> diag (
+ p.execute_diag_preamble (rs, bs,
+ env, script, run,
+ verb == 1 /* diag */,
+ !exec_depdb /* enter */,
+ false /* leave */));
- if (!ctx.dry_run)
+ if (verb == 1)
+ print_custom_diag (bs, move (diag.first), diag.second);
+ }
+
+ if (exec_body)
+ {
+ // On failure remove the target files that may potentially exist but
+ // be invalid.
+ //
+ small_vector<auto_rmfile, 8> rms;
+
+ if (!ctx.dry_run)
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
{
- // If this is an executable, let's be helpful to the user and set
- // the executable bit on POSIX.
- //
+ if (auto* f = m->is_a<file> ())
+ rms.emplace_back (f->path ());
+ }
+ }
+
+ if (script.body_temp_dir &&
+ !script.depdb_preamble_temp_dir &&
+ !script.diag_preamble_temp_dir)
+ env.set_temp_dir_variable ();
+
+ p.execute_body (rs, bs,
+ env, script, run,
+ !exec_depdb && !exec_diag /* enter */,
+ false /* leave */);
+
+ if (!ctx.dry_run)
+ {
+ if (deferred_failure)
+ fail << "expected error exit status from recipe body";
+
+ // If this is an executable, let's be helpful to the user and set
+ // the executable bit on POSIX.
+ //
#ifndef _WIN32
- auto chmod = [] (const path& p)
- {
- path_perms (p,
- (path_perms (p) |
- permissions::xu |
- permissions::xg |
- permissions::xo));
- };
+ auto chmod = [] (const path& p)
+ {
+ path_perms (p,
+ (path_perms (p) |
+ permissions::xu |
+ permissions::xg |
+ permissions::xo));
+ };
+
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (auto* p = m->is_a<exe> ())
+ chmod (p->path ());
+ }
+#endif
+ for (auto& rm: rms)
+ rm.cancel ();
+ }
+ }
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ return exec_diag || exec_body;
+ }
+
+ bool adhoc_buildscript_rule::
+ execute_update_group (const scope& bs,
+ action a, const group& g,
+ build::script::environment& env,
+ build::script::default_runner& run,
+ bool deferred_failure) const
+ {
+ // Note: similar to execute_update_file() above (see there for comments).
+ //
+ // NOTE: when called from perform_update_file_or_group_dyndep_byproduct(),
+ // the group does not contain dynamic members yet and thus could
+ // have no members at all.
+ //
+ context& ctx (g.ctx);
+
+ const scope& rs (*bs.root_scope ());
+
+ build::script::parser p (ctx);
+
+ bool exec_body (!ctx.dry_run || verb >= 2);
+ bool exec_diag (!script.diag_preamble.empty () && (exec_body || verb == 1));
+ bool exec_depdb (!script.depdb_preamble.empty ());
+
+ if (script.diag_name)
+ {
+ if (verb == 1)
+ {
+ const file* pt (nullptr);
+ for (const prerequisite_target& p: g.prerequisite_targets[a])
+ {
+ if (p.target != nullptr && !p.adhoc ())
{
- if (auto* p = m->is_a<exe> ())
- chmod (p->path ());
+ pt = p.target->is_a<file> ();
+ break;
}
-#endif
- dd.check_mtime (tp);
+ }
+
+ if (pt != nullptr)
+ print_diag (script.diag_name->c_str (), *pt, g);
+ else
+ print_diag (script.diag_name->c_str (), g);
+ }
+ }
+ else if (exec_diag)
+ {
+ if (script.diag_preamble_temp_dir && !script.depdb_preamble_temp_dir)
+ env.set_temp_dir_variable ();
+
+ pair<names, location> diag (
+ p.execute_diag_preamble (rs, bs,
+ env, script, run,
+ verb == 1 /* diag */,
+ !exec_depdb /* enter */,
+ false /* leave */));
+ if (verb == 1)
+ print_custom_diag (bs, move (diag.first), diag.second);
+ }
+
+ if (exec_body)
+ {
+ // On failure remove the target files that may potentially exist but
+ // be invalid.
+ //
+ // Note: we may leave dynamic members if we don't know about them yet.
+ // Feels natural enough.
+ //
+ small_vector<auto_rmfile, 8> rms;
- for (auto& rm: rms)
- rm.cancel ();
+ if (!ctx.dry_run)
+ {
+ for (const target* m: g.members)
+ {
+ if (auto* f = m->is_a<file> ())
+ rms.emplace_back (f->path ());
}
}
- else if (depdb_preamble)
- r.leave (env, script.end_loc);
+
+ if (script.body_temp_dir &&
+ !script.depdb_preamble_temp_dir &&
+ !script.diag_preamble_temp_dir)
+ env.set_temp_dir_variable ();
+
+ p.execute_body (rs, bs,
+ env, script, run,
+ !exec_depdb && !exec_diag /* enter */,
+ false /* leave */);
+
+ if (!ctx.dry_run)
+ {
+ if (deferred_failure)
+ fail << "expected error exit status from recipe body";
+
+ // @@ TODO: expl: byproduct
+ //
+ // Note: will not work for dynamic members if we don't know about them
+ // yet. Could probably fix by doing this later, after the dynamic
+ // dependency extraction.
+ //
+#ifndef _WIN32
+ auto chmod = [] (const path& p)
+ {
+ path_perms (p,
+ (path_perms (p) |
+ permissions::xu |
+ permissions::xg |
+ permissions::xo));
+ };
+
+ for (const target* m: g.members)
+ {
+ if (auto* p = m->is_a<exe> ())
+ chmod (p->path ());
+ }
+#endif
+ for (auto& rm: rms)
+ rm.cancel ();
+ }
}
- else if (depdb_preamble)
- r.leave (env, script.end_loc);
- t.mtime (system_clock::now ());
- return target_state::changed;
+ return exec_diag || exec_body;
+ }
+
+ target_state adhoc_buildscript_rule::
+ perform_clean_file (action a, const target& t)
+ {
+ // Besides .d (depdb) also clean .t which is customarily used as a
+ // temporary file, such as make dependency output in depdb-dyndep. In
+ // fact, initially the plan was to only clean it if we have dyndep but
+ // there is no reason it cannot be used for something else.
+ //
+ // Note that the main advantage of using this file over something in the
+ // temporary directory ($~) is that it's next to other output which makes
+ // it easier to examine during recipe troubleshooting.
+ //
+ // Finally, we print the entire ad hoc group at verbosity level 1, similar
+ // to the default update diagnostics.
+ //
+ // @@ TODO: .t may also be a temporary directory (and below).
+ //
+ return perform_clean_extra (a,
+ t.as<file> (),
+ {".d", ".t"},
+ {},
+ true /* show_adhoc_members */);
+ }
+
+ target_state adhoc_buildscript_rule::
+ perform_clean_group (action a, const target& xt)
+ {
+ const group& g (xt.as<group> ());
+
+ path d, t;
+ if (g.members_static != 0)
+ {
+ const path& p (g.members.front ()->as<file> ().path ());
+ d = p + ".d";
+ t = p + ".t";
+ }
+ else
+ {
+ // See target_path lambda in apply().
+ //
+ t = g.dir / (g.name + '.' + g.type ().name);
+ d = t + ".d";
+ t += ".t";
+ }
+
+ return perform_clean_group_extra (a, g, {d.string ().c_str (),
+ t.string ().c_str ()});
}
target_state adhoc_buildscript_rule::
@@ -655,30 +2317,303 @@ namespace build2
const scope& bs (t.base_scope ());
const scope& rs (*bs.root_scope ());
- build::script::environment e (a, t, script.body_temp_dir, deadline);
+ build::script::environment e (a, t, bs, false /* temp_dir */, deadline);
build::script::parser p (ctx);
+ build::script::default_runner r;
- if (verb == 1)
+ bool exec_body (!ctx.dry_run || verb >= 2);
+ bool exec_diag (!script.diag_preamble.empty () &&
+ (exec_body || verb == 1));
+
+ if (script.diag_name)
{
- if (script.diag_line)
+ if (verb == 1)
{
- text << p.execute_special (rs, bs, e, *script.diag_line);
- }
- else
- {
- // @@ TODO: as above
+ // For operations other than update (as well as for non-file
+ // targets), we default to the second form (without the
+ // prerequisite). Think test.
//
- text << *script.diag_name << ' ' << t;
+ if (t.adhoc_member == nullptr)
+ print_diag (script.diag_name->c_str (), t);
+ else
+ {
+ vector<target_key> ts;
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ ts.push_back (m->key ());
+
+ print_diag (script.diag_name->c_str (), move (ts));
+ }
}
}
+ else if (exec_diag)
+ {
+ if (script.diag_preamble_temp_dir)
+ e.set_temp_dir_variable ();
+
+ pair<names, location> diag (
+ p.execute_diag_preamble (rs, bs,
+ e, script, r,
+ verb == 1 /* diag */,
+ true /* enter */,
+ !exec_body /* leave */));
+
+ if (verb == 1)
+ print_custom_diag (bs, move (diag.first), diag.second);
+ }
- if (!ctx.dry_run || verb >= 2)
+ if (exec_body)
{
- build::script::default_runner r;
- p.execute_body (rs, bs, e, script, r);
+ if (script.body_temp_dir && !script.diag_preamble_temp_dir)
+ e.set_temp_dir_variable ();
+
+ p.execute_body (rs, bs, e, script, r, !exec_diag /* enter */);
}
}
return target_state::changed;
}
+
+ void adhoc_buildscript_rule::
+ print_custom_diag (const scope& bs, names&& ns, const location& l) const
+ {
+ // The straightforward thing to do would be to just print the diagnostics
+ // as specified by the user. But that will make some of the tidying up
+ // done by print_diag() unavailable to custom diagnostics. Things like
+ // omitting the out-qualification as well as compact printing of the
+ // groups. Also, in the future we may want to support colorization of the
+ // diagnostics, which will be difficult to achive with such a "just print"
+ // approach.
+ //
+ // So instead we are going to parse the custom diagnostics, translate
+ // names back to targets (where appropriate), and call one of the
+ // print_diag() functions. Specifically, we expect the custom diagnostics
+ // to be in one of the following two forms (which correspond to the two
+ // forms of pring_diag()):
+ //
+ // diag <prog> <l-target> <comb> <r-target>...
+ // diag <prog> <r-target>...
+ //
+ // And the way we are going to disambiguate this is by analyzing name
+ // types. Specifically, we expect <comb> to be a simple name that also
+ // does not contain any directory separators (so we can distinguish it
+ // from both target names as well as paths, which can be specified on
+ // either side). We will also recognize `-` as the special stdout path
+ // name (so <comb> cannot be `-`). Finally, <l-target> (but not
+ // <r-target>) can be a string (e.g., an argument) but that should not
+ // pose an ambiguity.
+ //
+ // With this approach, the way to re-create the default diagnostics would
+ // be:
+ //
+ // diag <prog> ($<[0]) -> $>
+ // diag <prog> $>
+ //
+ auto i (ns.begin ()), e (ns.end ());
+
+ // <prog>
+ //
+ if (i == e)
+ fail (l) << "missing program name in diag builtin";
+
+ if (!i->simple () || i->empty ())
+ fail (l) << "expected simple name as program name in diag builtin";
+
+ const char* prog (i->value.c_str ());
+ ++i;
+
+ // <l-target>
+ //
+ const target* l_t (nullptr);
+ path l_p;
+ string l_s;
+
+ auto parse_target = [&bs, &l, &i, &e] () -> const target&
+ {
+ name& n (*i++);
+ name o;
+
+ if (n.pair)
+ {
+ if (i == e)
+ fail (l) << "invalid target name pair in diag builtin";
+
+ o = move (*i++);
+ }
+
+ // Similar to to_target() in $target.*().
+ //
+ if (const target* r = search_existing (n, bs, o.dir))
+ return *r;
+
+ fail (l) << "target "
+ << (n.pair ? names {move (n), move (o)} : names {move (n)})
+ << " not found in diag builtin" << endf;
+ };
+
+ auto parse_first = [&l, &i, &e,
+ &parse_target] (const target*& t, path& p, string& s,
+ const char* after)
+ {
+ if (i == e)
+ fail (l) << "missing target after " << after << " in diag builtin";
+
+ try
+ {
+ if (i->typed ())
+ {
+ t = &parse_target ();
+ return; // i is already incremented.
+ }
+ else if (!i->dir.empty ())
+ {
+ p = move (i->dir);
+ p /= i->value;
+ }
+ else if (path_traits::find_separator (i->value) != string::npos)
+ {
+ p = path (move (i->value));
+ }
+ else if (!i->value.empty ())
+ {
+ s = move (i->value);
+ }
+ else
+ fail (l) << "expected target, path, or argument after "
+ << after << " in diag builtin";
+ }
+ catch (const invalid_path& e)
+ {
+ fail (l) << "invalid path '" << e.path << "' after "
+ << after << " in diag builtin";
+ }
+
+ ++i;
+ };
+
+ parse_first (l_t, l_p, l_s, "program name");
+
+ // Now detect which form it is.
+ //
+ if (i != e &&
+ i->simple () &&
+ !i->empty () &&
+ path_traits::find_separator (i->value) == string::npos)
+ {
+ // The first form.
+
+ // <comb>
+ //
+ const char* comb (i->value.c_str ());
+ ++i;
+
+ // <r-target>
+ //
+ const target* r_t (nullptr);
+ path r_p;
+ string r_s;
+
+ parse_first (r_t, r_p, r_s, "combiner");
+
+ path_name r_pn;
+
+ if (r_t != nullptr)
+ ;
+ else if (!r_p.empty ())
+ r_pn = path_name (&r_p);
+ else
+ {
+ if (r_s != "-")
+ fail (l) << "expected target or path instead of '" << r_s
+ << "' after combiner in diag builtin";
+
+ r_pn = path_name (move (r_s));
+ }
+
+ if (i == e)
+ {
+ if (r_t != nullptr)
+ {
+ if (l_t != nullptr) print_diag (prog, *l_t, *r_t, comb);
+ else if (!l_p.empty ()) print_diag (prog, l_p, *r_t, comb);
+ else print_diag (prog, l_s, *r_t, comb);
+ }
+ else
+ {
+ if (l_t != nullptr) print_diag (prog, *l_t, r_pn, comb);
+ else if (!l_p.empty ()) print_diag (prog, l_p, r_pn, comb);
+ else print_diag (prog, l_s, r_pn, comb);
+ }
+
+ return;
+ }
+
+ // We can only have multiple targets, not paths.
+ //
+ if (r_t == nullptr)
+ fail (l) << "unexpected name after path in diag builtin";
+
+ // <r-target>...
+ //
+ vector<target_key> r_ts {r_t->key ()};
+
+ do r_ts.push_back (parse_target ().key ()); while (i != e);
+
+ if (l_t != nullptr) print_diag (prog, l_t->key (), move (r_ts), comb);
+ else if (!l_p.empty ()) print_diag (prog, l_p, move (r_ts), comb);
+ else print_diag (prog, l_s, move (r_ts), comb);
+ }
+ else
+ {
+ // The second form.
+
+ // First "absorb" the l_* values as the first <r-target>.
+ //
+ const target* r_t (nullptr);
+ path_name r_pn;
+
+ if (l_t != nullptr)
+ r_t = l_t;
+ else if (!l_p.empty ())
+ r_pn = path_name (&l_p);
+ else
+ {
+ if (l_s != "-")
+ {
+ diag_record dr (fail (l));
+
+ dr << "expected target or path instead of '" << l_s
+ << "' after program name in diag builtin";
+
+ if (i != e)
+ dr << info << "alternatively, missing combiner after '"
+ << l_s << "'";
+ }
+
+ r_pn = path_name (move (l_s));
+ }
+
+ if (i == e)
+ {
+ if (r_t != nullptr)
+ print_diag (prog, *r_t);
+ else
+ print_diag (prog, r_pn);
+
+ return;
+ }
+
+ // We can only have multiple targets, not paths.
+ //
+ if (r_t == nullptr)
+ fail (l) << "unexpected name after path in diag builtin";
+
+ // <r-target>...
+ //
+ vector<target_key> r_ts {r_t->key ()};
+
+ do r_ts.push_back (parse_target ().key ()); while (i != e);
+
+ print_diag (prog, move (r_ts));
+ }
+ }
}
diff --git a/libbuild2/adhoc-rule-buildscript.hxx b/libbuild2/adhoc-rule-buildscript.hxx
index 7f9c10a..994b18c 100644
--- a/libbuild2/adhoc-rule-buildscript.hxx
+++ b/libbuild2/adhoc-rule-buildscript.hxx
@@ -36,7 +36,41 @@ namespace build2
const optional<timestamp>&) const override;
target_state
- perform_update_file (action, const target&) const;
+ perform_update_file_or_group (action, const target&) const;
+
+ struct match_data;
+ struct match_data_byproduct;
+
+ target_state
+ perform_update_file_or_group_dyndep (
+ action, const target&, match_data&) const;
+
+ target_state
+ perform_update_file_or_group_dyndep_byproduct (
+ action, const target&, match_data_byproduct&) const;
+
+ optional<target_state>
+ execute_update_prerequisites (action, const target&, timestamp) const;
+
+ bool
+ execute_update_file (const scope&,
+ action a, const file&,
+ build::script::environment&,
+ build::script::default_runner&,
+ bool deferred_failure = false) const;
+
+ bool
+ execute_update_group (const scope&,
+ action a, const group&,
+ build::script::environment&,
+ build::script::default_runner&,
+ bool deferred_failure = false) const;
+
+ static target_state
+ perform_clean_file (action, const target&);
+
+ static target_state
+ perform_clean_group (action, const target&);
target_state
default_action (action, const target&, const optional<timestamp>&) const;
@@ -56,9 +90,16 @@ namespace build2
virtual void
dump_text (ostream&, string&) const override;
+ void
+ print_custom_diag (const scope&, names&&, const location&) const;
+
public:
using script_type = build::script::script;
+ // The prerequisite_target::include bit that indicates update=unmatch.
+ //
+ static const uintptr_t include_unmatch = 0x100;
+
script_type script;
string checksum; // Script text hash.
const target_type* ttype; // First target/pattern type.
diff --git a/libbuild2/adhoc-rule-cxx.cxx b/libbuild2/adhoc-rule-cxx.cxx
index df6467f..ad19481 100644
--- a/libbuild2/adhoc-rule-cxx.cxx
+++ b/libbuild2/adhoc-rule-cxx.cxx
@@ -10,6 +10,7 @@
#include <libbuild2/target.hxx>
#include <libbuild2/context.hxx>
#include <libbuild2/algorithm.hxx>
+#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
using namespace butl;
@@ -19,7 +20,7 @@ namespace build2
// cxx_rule_v1
//
bool cxx_rule_v1::
- match (action, target&, const string&) const
+ match (action, target&) const
{
return true;
}
@@ -94,8 +95,10 @@ namespace build2
load_module_library (const path& lib, const string& sym, string& err);
bool adhoc_cxx_rule::
- match (action a, target& t, const string& hint, match_extra& me) const
+ match (action a, target& xt, const string& hint, match_extra& me) const
{
+ const target& t (xt); // See adhoc_rule::match() for background.
+
if (pattern != nullptr && !pattern->match (a, t, hint, me))
return false;
@@ -301,9 +304,9 @@ namespace build2
//
auto_thread_env penv (nullptr);
context& ctx (*t.ctx.module_context);
- scheduler::phase_guard pg (ctx.sched);
+ scheduler::phase_guard pg (*ctx.sched);
- const uint16_t verbosity (3); // Project creation command verbosity.
+ uint16_t verbosity (3); // Project creation command verbosity.
// Project and location signatures.
//
@@ -325,6 +328,17 @@ namespace build2
if (!create && (create = !check_sig (bf, psig)))
rmdir_r (ctx, pd, false, verbosity); // Never dry-run.
+ auto diag = [verbosity] (const path& f)
+ {
+ if (verb >= verbosity)
+ {
+ if (verb >= 2)
+ text << "cat >" << f;
+ else if (verb)
+ print_diag ("save", f);
+ }
+ };
+
path of;
ofdstream ofs;
@@ -355,8 +369,7 @@ namespace build2
//
of = path (pd / "rule.cxx");
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << of;
+ diag (of);
ofs.open (of);
@@ -376,6 +389,7 @@ namespace build2
<< "#include <libbuild2/depdb.hxx>" << '\n'
<< "#include <libbuild2/scope.hxx>" << '\n'
<< "#include <libbuild2/target.hxx>" << '\n'
+ << "#include <libbuild2/dyndep.hxx>" << '\n'
<< "#include <libbuild2/context.hxx>" << '\n'
<< "#include <libbuild2/variable.hxx>" << '\n'
<< "#include <libbuild2/algorithm.hxx>" << '\n'
@@ -485,8 +499,7 @@ namespace build2
//
of = bf;
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << of;
+ diag (of);
ofs.open (of);
@@ -558,8 +571,7 @@ namespace build2
entry_time et (file_time (of));
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << of;
+ diag (of);
ofs.open (of);
@@ -604,10 +616,10 @@ namespace build2
l = find_target ();
phase_switch mp (ctx, run_phase::match);
- if (build2::match (perform_update_id, *l) != target_state::unchanged)
+ if (match_sync (perform_update_id, *l) != target_state::unchanged)
{
phase_switch ep (ctx, run_phase::execute);
- execute (a, *l);
+ execute_sync (a, *l);
}
}
else
@@ -664,13 +676,24 @@ namespace build2
}
}
- return impl->match (a, t, hint, me);
+ return impl->match (a, xt, hint, me);
}
#endif // BUILD2_BOOTSTRAP || LIBBUILD2_STATIC_BUILD
recipe adhoc_cxx_rule::
apply (action a, target& t, match_extra& me) const
{
+ // Handle matching explicit group member (see adhoc_rule::match() for
+ // background).
+ //
+ if (const group* g = (t.group != nullptr
+ ? t.group->is_a<group> ()
+ : nullptr))
+ {
+ match_sync (a, *g);
+ return group_recipe; // Execute the group's recipe.
+ }
+
return impl.load (memory_order_relaxed)->apply (a, t, me);
}
}
diff --git a/libbuild2/adhoc-rule-cxx.hxx b/libbuild2/adhoc-rule-cxx.hxx
index 466c0e5..b563881 100644
--- a/libbuild2/adhoc-rule-cxx.hxx
+++ b/libbuild2/adhoc-rule-cxx.hxx
@@ -26,7 +26,7 @@ namespace build2
};
// Note that when used as part of a pattern, the implementation cannot use
- // the match_extra::buffer nor the target auxilary data storage.
+ // the match_extra::data() facility nor the target auxiliary data storage.
//
class LIBBUILD2_SYMEXPORT cxx_rule_v1: public cxx_rule
{
@@ -52,8 +52,11 @@ namespace build2
// Return true by default.
//
+ // Note: must treat target as const (unless known to match a non-group).
+ // See adhoc_rule::match() for background.
+ //
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
};
// Note: not exported.
diff --git a/libbuild2/adhoc-rule-regex-pattern.cxx b/libbuild2/adhoc-rule-regex-pattern.cxx
index 89a4766..257952f 100644
--- a/libbuild2/adhoc-rule-regex-pattern.cxx
+++ b/libbuild2/adhoc-rule-regex-pattern.cxx
@@ -126,10 +126,13 @@ namespace build2
}
bool adhoc_rule_regex_pattern::
- match (action a, target& t, const string&, match_extra& me) const
+ match (action a, const target& t, const string&, match_extra& me) const
{
tracer trace ("adhoc_rule_regex_pattern::match");
+ // Note: target may not be locked in which case we should not modify
+ // target or match_extra (see adhoc_rule::match() for background).
+
// The plan is as follows: First check the "type signature" of the target
// and its prerequisites (the primary target type has already been matched
// by the rule matching machinery). If there is a match, then concatenate
@@ -158,11 +161,23 @@ namespace build2
auto find_prereq = [a, &t] (const target_type& tt) -> optional<target_key>
{
// We use the standard logic that one would use in the rule::match()
- // implementation.
+ // implementation. Except we support the unmatch and match values in
+ // the update variable.
+ //
+ // Note: assuming group prerequisites are immutable (not locked).
//
for (prerequisite_member p: group_prerequisite_members (a, t))
{
- if (include (a, t, p) == include_type::normal && p.is_a (tt))
+ // Note that here we don't validate the update operation override
+ // value (since we may not match). Instead the rule does this in
+ // apply().
+ //
+ // Note: assuming include()'s use of target only relied on immutable
+ // data (not locked).
+ //
+ lookup l;
+ if (include (a, t, p, a.operation () == update_id ? &l : nullptr) ==
+ include_type::normal && p.is_a (tt))
return p.key ().tk;
}
return nullopt;
@@ -190,41 +205,44 @@ namespace build2
// iterators pointing to the string being matched. Which means this string
// must be kept around until we are done with replacing the subsitutions.
// In fact, we cannot even move it because this may invalidate the
- // iterators (e.g., in case of a small string optimization). So the plan
- // is to store the string in match_extra::buffer and regex_match_results
- // (which we can move) in the auxiliary data storage.
+ // iterators (e.g., in case of a small string optimization). We also
+ // cannot set the data ahead of time because we may not match. Plus,
+ // resorting to a dynamic memory allocation even if we don't match feels
+ // heavy-handed.
+ //
+ // So the plan is to store the string in match_extra::data() and
+ // regex_match_results (which we can move) in the auxiliary data storage.
//
- string& ns (me.buffer);
+ // Note: only cache if locked.
+ //
+ static_assert (sizeof (string) <= match_extra::data_size,
+ "match data too large");
+
+ string tmp;
+ string& ns (me.locked ? me.data (string ()) : tmp);
- auto append_name = [&ns, first = true] (const target_key& tk,
- const element& e) mutable
+ auto append_name = [&ns,
+ first = true,
+ storage = string ()] (const target_key& tk,
+ const element& e) mutable
{
if (!first)
ns += '/';
else
first = false;
- ns += *tk.name;
-
- // The same semantics as in variable_type_map::find().
- //
- if (tk.ext && !tk.ext->empty () &&
- (e.match_ext ||
- tk.type->fixed_extension == &target_extension_none ||
- tk.type->fixed_extension == &target_extension_must))
- {
- ns += '.';
- ns += *tk.ext;
- }
+ ns += tk.effective_name (storage, e.match_ext);
};
// Primary target (always a pattern).
//
auto te (targets_.end ()), ti (targets_.begin ());
- append_name (t.key (), *ti);
+ append_name (t.key (), *ti); // Immutable (not locked).
// Match ad hoc group members.
//
+ // Note: shouldn't be in effect for an explicit group (not locked).
+ //
while ((ti = find_if (ti + 1, te, pattern)) != te)
{
const target* at (find_adhoc_member (t, ti->type));
@@ -274,9 +292,8 @@ namespace build2
return false;
}
- static_assert (sizeof (regex_match_results) <= target::data_size,
- "insufficient space");
- t.data (move (mr));
+ if (me.locked)
+ t.data (a, move (mr));
return true;
}
@@ -302,9 +319,15 @@ namespace build2
}
void adhoc_rule_regex_pattern::
- apply_adhoc_members (action, target& t, match_extra&) const
+ apply_group_members (action a, target& t, const scope& bs,
+ match_extra&) const
{
- const auto& mr (t.data<regex_match_results> ());
+ if (targets_.size () == 1) // The group/primary target is always present.
+ return;
+
+ group* g (t.is_a<group> ());
+
+ const auto& mr (t.data<regex_match_results> (a));
for (auto i (targets_.begin () + 1); i != targets_.end (); ++i)
{
@@ -331,39 +354,99 @@ namespace build2
d.normalize ();
}
- // @@ TODO: currently this uses type as the ad hoc member identity.
- //
- add_adhoc_member (
- t,
- e.type,
- move (d),
- dir_path () /* out */,
- substitute (t, mr, e.name.value, "ad hoc target group member"));
+ string n (substitute (
+ t,
+ mr,
+ e.name.value,
+ (g != nullptr
+ ? "explicit target group member"
+ : "ad hoc target group member")));
+
+ // @@ TODO: what if name contains extension? Shouldn't we call
+ // split_name()?
+
+ if (g != nullptr)
+ {
+ auto& ms (g->members);
+
+ // These are conceptually static but they behave more like dynamic in
+ // that we likely need to insert the target, set its group, etc. In a
+ // sense, they are rule-static, but group-dynamic.
+ //
+ // Note: a custom version of the dyndep_rule::inject_group_member()
+ // logic.
+ //
+ auto l (search_new_locked (
+ bs.ctx,
+ e.type,
+ move (d),
+ dir_path (), // Always in out.
+ move (n),
+ nullptr /* ext */,
+ &bs));
+
+ const target& t (l.first); // Note: non-const only if have lock.
+
+ if (l.second)
+ {
+ l.first.group = g;
+ l.second.unlock ();
+ }
+ else
+ {
+ if (find (ms.begin (), ms.end (), &t) != ms.end ())
+ continue;
+
+ if (t.group != g) // Note: atomic.
+ {
+ // We can only update the group under lock.
+ //
+ target_lock tl (lock (a, t));
+
+ if (!tl)
+ fail << "group " << *g << " member " << t << " is already matched" <<
+ info << "static group members specified by pattern rules cannot "
+ << "be used as prerequisites directly, only via group";
+
+ if (t.group == nullptr)
+ tl.target->group = g;
+ else if (t.group != g)
+ {
+ fail << "group " << *g << " member " << t
+ << " is already member of group " << *t.group;
+ }
+ }
+ }
+
+ ms.push_back (&t);
+ }
+ else
+ {
+ // @@ TODO: currently this uses type as the ad hoc member identity.
+ // Use inject_adhoc_group_member() variant?
+ //
+ add_adhoc_member (
+ t,
+ e.type,
+ move (d),
+ dir_path (), // Always in out.
+ move (n));
+ }
}
}
void adhoc_rule_regex_pattern::
- apply_prerequisites (action a, target& t, match_extra&) const
+ apply_prerequisites (action a, target& t,
+ const scope& bs,
+ match_extra&) const
{
- const auto& mr (t.data<regex_match_results> ());
-
- // Resolve and cache target scope lazily.
- //
- auto base_scope = [&t, bs = (const scope*) nullptr] () mutable
- -> const scope&
- {
- if (bs == nullptr)
- bs = &t.base_scope ();
-
- return *bs;
- };
+ const auto& mr (t.data<regex_match_results> (a));
// Re-create the same clean semantics as in match_prerequisite_members().
//
bool clean (a.operation () == clean_id && !t.is_a<alias> ());
auto& pts (t.prerequisite_targets[a]);
- size_t start (pts.size ());
for (const element& e: prereqs_)
{
@@ -391,7 +474,7 @@ namespace build2
n = name (e.name.dir,
e.name.type,
substitute (t, mr, e.name.value, "prerequisite"));
- s = &base_scope ();
+ s = &bs;
}
else
{
@@ -401,18 +484,15 @@ namespace build2
const target& pt (search (t, move (n), *s, &e.type));
- if (clean && !pt.in (*base_scope ().root_scope ()))
+ if (clean && !pt.in (*bs.root_scope ()))
continue;
// @@ TODO: it could be handy to mark a prerequisite (e.g., a tool)
// ad hoc so that it doesn't interfere with the $< list. Also
- // clean=false.
+ // clean=false. Also update=match|unmatch.
//
pts.push_back (prerequisite_target (&pt, false /* adhoc */));
}
-
- if (start != pts.size ())
- match_members (a, t, pts, start);
}
void adhoc_rule_regex_pattern::
diff --git a/libbuild2/adhoc-rule-regex-pattern.hxx b/libbuild2/adhoc-rule-regex-pattern.hxx
index 4327e72..9cb7874 100644
--- a/libbuild2/adhoc-rule-regex-pattern.hxx
+++ b/libbuild2/adhoc-rule-regex-pattern.hxx
@@ -14,7 +14,7 @@ namespace build2
{
// Ad hoc rule regex pattern.
//
- // The name signature is stored in match_extra::buffer while the regex
+ // The name signature string is stored in match_extra::data while the regex
// match_results object -- in the target auxiliary data storage. Both must
// remain valid until after the apply_*() calls.
//
@@ -32,13 +32,17 @@ namespace build2
names&&, const location&);
virtual bool
- match (action, target&, const string&, match_extra&) const override;
+ match (action, const target&, const string&, match_extra&) const override;
virtual void
- apply_adhoc_members (action, target&, match_extra&) const override;
+ apply_group_members (action, target&,
+ const scope&,
+ match_extra&) const override;
virtual void
- apply_prerequisites (action, target&, match_extra&) const override;
+ apply_prerequisites (action, target&,
+ const scope&,
+ match_extra&) const override;
virtual void
dump (ostream&) const override;
diff --git a/libbuild2/algorithm.cxx b/libbuild2/algorithm.cxx
index 0370626..4db3d72 100644
--- a/libbuild2/algorithm.cxx
+++ b/libbuild2/algorithm.cxx
@@ -57,15 +57,21 @@ namespace build2
assert (t.ctx.phase == run_phase::match);
// If this is a project-qualified prerequisite, then this is import's
- // business.
+ // business (phase 2).
//
if (pk.proj)
- return import (t.ctx, pk);
+ return import2 (t.ctx, pk);
if (const target* pt = pk.tk.type->search (t, pk))
return *pt;
- return create_new_target (t.ctx, pk);
+ if (pk.tk.out->empty ())
+ return create_new_target (t.ctx, pk);
+
+ // If this is triggered, then you are probably not passing scope to
+ // search() (which leads to search_existing_file() being skipped).
+ //
+ fail << "no existing source file for prerequisite " << pk << endf;
}
pair<target&, ulock>
@@ -76,7 +82,13 @@ namespace build2
if (const target* pt = pk.tk.type->search (t, pk))
return {const_cast<target&> (*pt), ulock ()};
- return create_new_target_locked (t.ctx, pk);
+ if (pk.tk.out->empty ())
+ return create_new_target_locked (t.ctx, pk);
+
+ // If this is triggered, then you are probably not passing scope to
+ // search() (which leads to search_existing_file() being skipped).
+ //
+ fail << "no existing source file for prerequisite " << pk << endf;
}
const target*
@@ -88,7 +100,29 @@ namespace build2
}
const target&
- search (const target& t, name n, const scope& s, const target_type* tt)
+ search_new (context& ctx, const prerequisite_key& pk)
+ {
+ assert (ctx.phase == run_phase::load || ctx.phase == run_phase::match);
+
+ if (const target* pt = search_existing_target (ctx, pk))
+ return *pt;
+
+ return create_new_target (ctx, pk);
+ }
+
+ pair<target&, ulock>
+ search_new_locked (context& ctx, const prerequisite_key& pk)
+ {
+ assert (ctx.phase == run_phase::load || ctx.phase == run_phase::match);
+
+ if (const target* pt = search_existing_target (ctx, pk))
+ return {const_cast<target&> (*pt), ulock ()};
+
+ return create_new_target_locked (ctx, pk);
+ }
+
+ const target&
+ search (const target& t, name&& n, const scope& s, const target_type* tt)
{
assert (t.ctx.phase == run_phase::match);
@@ -234,7 +268,7 @@ namespace build2
fail << "dependency cycle detected involving target " << ct;
if (!wq)
- return target_lock {a, nullptr, e - b};
+ return target_lock {a, nullptr, e - b, false};
// We also unlock the phase for the duration of the wait. Why?
// Consider this scenario: we are trying to match a dir{} target whose
@@ -244,13 +278,13 @@ namespace build2
// unless we release the phase.
//
phase_unlock u (ct.ctx, true /* unlock */, true /* delay */);
- e = ctx.sched.wait (busy - 1, task_count, u, *wq);
+ e = ctx.sched->wait (busy - 1, task_count, u, *wq);
}
// We don't lock already applied or executed targets.
//
if (e >= appl)
- return target_lock {a, nullptr, e - b};
+ return target_lock {a, nullptr, e - b, false};
}
// We now have the lock. Analyze the old value and decide what to do.
@@ -259,7 +293,8 @@ namespace build2
target::opstate& s (t[a]);
size_t offset;
- if (e <= b)
+ bool first;
+ if ((first = (e <= b)))
{
// First lock for this operation.
//
@@ -276,7 +311,7 @@ namespace build2
offset == target::offset_matched);
}
- return target_lock {a, &t, offset};
+ return target_lock {a, &t, offset, first};
}
void
@@ -292,7 +327,7 @@ namespace build2
// this target.
//
task_count.store (offset + ctx.count_base (), memory_order_release);
- ctx.sched.resume (task_count);
+ ctx.sched->resume (task_count);
}
target&
@@ -310,106 +345,291 @@ namespace build2
if (*mp != nullptr) // Might already be there.
return **mp;
- pair<target&, ulock> r (
- t.ctx.targets.insert_locked (tt,
- move (dir),
- move (out),
- move (n),
- nullopt /* ext */,
- target_decl::implied,
- trace));
-
- assert (r.second);
+ target* m (nullptr);
+ {
+ pair<target&, ulock> r (
+ t.ctx.targets.insert_locked (tt,
+ move (dir),
+ move (out),
+ move (n),
+ nullopt /* ext */,
+ target_decl::implied,
+ trace,
+ true /* skip_find */));
+
+ if (r.second) // Inserted.
+ {
+ m = &r.first;
+ m->group = &t;
+ }
+ }
- target& m (r.first);
- *mp = &m;
- m.group = &t;
+ assert (m != nullptr);
+ *mp = m;
- return m;
+ return *m;
};
- // Return the matching rule or NULL if no match and try_match is true.
- //
- const rule_match*
- match_rule (action a, target& t, const rule* skip, bool try_match)
+ static bool
+ trace_target (const target& t, const vector<name>& ns)
{
- const scope& bs (t.base_scope ());
+ for (const name& n: ns)
+ {
+ if (n.untyped () || n.qualified () || n.pattern)
+ fail << "unsupported trace target name '" << n << "'" <<
+ info << "unqualified, typed, non-pattern name expected";
- // Match rules in project environment.
- //
- auto_project_env penv;
- if (const scope* rs = bs.root_scope ())
- penv = auto_project_env (*rs);
+ if (!n.dir.empty ())
+ {
+ if (n.dir.relative () || !n.dir.normalized ())
+ fail << "absolute and normalized trace target directory expected";
- match_extra& me (t[a].match_extra);
+ if (t.dir != n.dir)
+ continue;
+ }
- // First check for an ad hoc recipe.
- //
- // Note that a fallback recipe is preferred over a non-fallback rule.
+ if (n.type == t.type ().name && n.value == t.name)
+ return true;
+ }
+
+ return false;
+ }
+
+ void
+ set_rule_trace (target_lock& l, const rule_match* rm)
+ {
+ action a (l.action);
+ target& t (*l.target);
+
+ // Note: see similar code in execute_impl() for execute.
//
- if (!t.adhoc_recipes.empty ())
+ if (trace_target (t, *t.ctx.trace_match))
{
- auto df = make_diag_frame (
- [a, &t](const diag_record& dr)
- {
- if (verb != 0)
- dr << info << "while matching ad hoc recipe to " << diag_do (a, t);
- });
+ diag_record dr (info);
+
+ dr << "matching to " << diag_do (a, t);
- auto match = [a, &t, &me] (const adhoc_rule& r, bool fallback) -> bool
+ if (rm != nullptr)
{
- me.init (fallback);
+ const rule& r (rm->second);
+
+ if (const adhoc_rule* ar = dynamic_cast<const adhoc_rule*> (&r))
+ {
+ dr << info (ar->loc);
- if (auto* f = (a.outer ()
- ? t.ctx.current_outer_oif
- : t.ctx.current_inner_oif)->adhoc_match)
- return f (r, a, t, string () /* hint */, me);
+ if (ar->pattern != nullptr)
+ dr << "using ad hoc pattern rule ";
+ else
+ dr << "using ad hoc recipe ";
+ }
else
- return r.match (a, t, string () /* hint */, me);
- };
+ dr << info << "using rule ";
- // The action could be Y-for-X while the ad hoc recipes are always for
- // X. So strip the Y-for part for comparison (but not for the match()
- // calls; see below for the hairy inner/outer semantics details).
- //
- action ca (a.inner ()
- ? a
- : action (a.meta_operation (), a.outer_operation ()));
+ dr << rm->first;
+ }
+ else
+ dr << info << "using directly-assigned recipe";
+ }
+
+ t[a].rule = rm;
+ }
+ // Note: not static since also called by rule::sub_match().
+ //
+ const rule_match*
+ match_adhoc_recipe (action a, target& t, match_extra& me)
+ {
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while matching ad hoc recipe to " << diag_do (a, t);
+ });
+
+ auto match = [a, &t, &me] (const adhoc_rule& r, bool fallback) -> bool
+ {
+ me.init (fallback);
+
+ if (auto* f = (a.outer ()
+ ? t.ctx.current_outer_oif
+ : t.ctx.current_inner_oif)->adhoc_match)
+ return f (r, a, t, string () /* hint */, me);
+ else
+ return r.match (a, t, string () /* hint */, me);
+ };
+
+ // The action could be Y-for-X while the ad hoc recipes are always for
+ // X. So strip the Y-for part for comparison (but not for the match()
+ // calls; see below for the hairy inner/outer semantics details).
+ //
+ action ca (a.inner ()
+ ? a
+ : action (a.meta_operation (), a.outer_operation ()));
+
+ // If returned rule_match is NULL, then the second half indicates whether
+ // the rule was found (but did not match).
+ //
+ auto find_match = [&t, &match] (action ca) -> pair<const rule_match*, bool>
+ {
+ // Note that there can be at most one recipe for any action.
+ //
auto b (t.adhoc_recipes.begin ()), e (t.adhoc_recipes.end ());
auto i (find_if (
b, e,
- [&match, ca] (const shared_ptr<adhoc_rule>& r)
+ [ca] (const shared_ptr<adhoc_rule>& r)
{
auto& as (r->actions);
- return (find (as.begin (), as.end (), ca) != as.end () &&
- match (*r, false));
+ return find (as.begin (), as.end (), ca) != as.end ();
}));
- if (i == e)
+ bool f (i != e);
+ if (f)
+ {
+ if (!match (**i, false /* fallback */))
+ i = e;
+ }
+ else
{
// See if we have a fallback implementation.
//
// See the adhoc_rule::reverse_fallback() documentation for details on
// what's going on here.
//
+ // Note that it feels natural not to look for a fallback if a custom
+ // recipe was provided but did not match.
+ //
+ const target_type& tt (t.type ());
i = find_if (
b, e,
- [&match, ca, &t] (const shared_ptr<adhoc_rule>& r)
+ [ca, &tt] (const shared_ptr<adhoc_rule>& r)
{
- auto& as (r->actions);
-
- // Note that the rule could be there but not match (see above),
- // thus this extra check.
+ // Only the rule that provides the "forward" action can provide
+ // "reverse", so there can be at most one such rule.
//
- return (find (as.begin (), as.end (), ca) == as.end () &&
- r->reverse_fallback (ca, t.type ()) &&
- match (*r, true));
+ return r->reverse_fallback (ca, tt);
});
+
+ f = (i != e);
+ if (f)
+ {
+ if (!match (**i, true /* fallback */))
+ i = e;
+ }
+ }
+
+ return pair<const rule_match*, bool> (
+ i != e ? &(*i)->rule_match : nullptr,
+ f);
+ };
+
+ pair<const rule_match*, bool> r (find_match (ca));
+
+ // Provide the "add dist_* and configure_* actions for every perform_*
+ // action unless there is a custom one" semantics (see the equivalent ad
+ // hoc rule registration code in the parser for background).
+ //
+ // Note that handling this in the parser by adding the extra actions is
+ // difficult because we store recipe actions in the recipe itself (
+ // adhoc_rule::actions) and a recipe could be shared among multiple
+ // targets, some of which may provide a "custom one" as another recipe. On
+ // the other hand, handling it here is relatively straightforward.
+ //
+ if (r.first == nullptr && !r.second)
+ {
+ meta_operation_id mo (ca.meta_operation ());
+ if (mo == configure_id || mo == dist_id)
+ {
+ action pa (perform_id, ca.operation ());
+ r = find_match (pa);
+ }
+ }
+
+ return r.first;
+ }
+
+ // Return the matching rule or NULL if no match and try_match is true.
+ //
+ const rule_match*
+ match_rule (action a, target& t,
+ const rule* skip,
+ bool try_match,
+ match_extra* pme)
+ {
+ using fallback_rule = adhoc_rule_pattern::fallback_rule;
+
+ auto adhoc_rule_match = [] (const rule_match& r)
+ {
+ return dynamic_cast<const adhoc_rule*> (&r.second.get ());
+ };
+
+ auto fallback_rule_match = [] (const rule_match& r)
+ {
+ return dynamic_cast<const fallback_rule*> (&r.second.get ());
+ };
+
+ if (const target* g = t.group)
+ {
+ // If this is a group with dynamic members, then match it with the
+ // group's rule automatically. See dyndep_rule::inject_group_member()
+ // for background.
+ //
+ if ((g->type ().flags & target_type::flag::dyn_members) ==
+ target_type::flag::dyn_members)
+ {
+ if (g->matched (a, memory_order_acquire))
+ {
+ const rule_match* r (g->state[a].rule);
+ assert (r != nullptr); // Shouldn't happen with dyn_members.
+ return r;
+ }
+
+ // Assume static member and fall through.
}
- if (i != e)
- return &(*i)->rule_match;
+ // If this is a member of group-based target, then first try to find a
+ // matching ad hoc recipe/rule by matching (to an ad hoc recipe/rule) the
+ // group but applying to the member. See adhoc_rule::match() for
+ // background, including for why const_cast should be safe.
+ //
+ // To put it another way, if a group is matched by an ad hoc
+ // recipe/rule, then we want all the member to be matched to the same
+ // recipe/rule.
+ //
+ // Note that such a group is dyn_members so we would have tried the
+ // "already matched" case above.
+ //
+ if (g->is_a<group> ())
+ {
+ // We cannot init match_extra from the target if it's unlocked so use
+ // a temporary (it shouldn't be modified if unlocked).
+ //
+ match_extra me (false /* locked */);
+ if (const rule_match* r = match_rule (
+ a, const_cast<target&> (*g), skip, true /* try_match */, &me))
+ return r;
+
+ // Fall through to normal match of the member.
+ }
+ }
+
+ const scope& bs (t.base_scope ());
+
+ // Match rules in project environment.
+ //
+ auto_project_env penv;
+ if (const scope* rs = bs.root_scope ())
+ penv = auto_project_env (*rs);
+
+ match_extra& me (pme == nullptr ? t[a].match_extra : *pme);
+
+ // First check for an ad hoc recipe.
+ //
+ // Note that a fallback recipe is preferred over a non-fallback rule.
+ //
+ if (!t.adhoc_recipes.empty ())
+ {
+ if (const rule_match* r = match_adhoc_recipe (a, t, me))
+ return r;
}
// If this is an outer operation (Y-for-X), then we look for rules
@@ -425,183 +645,210 @@ namespace build2
meta_operation_id mo (a.meta_operation ());
operation_id o (a.inner () ? a.operation () : a.outer_operation ());
- for (auto tt (&t.type ()); tt != nullptr; tt = tt->base)
+ // Our hint semantics applies regardless of the meta-operation. This works
+ // reasonably well except for the default/fallback rules provided by some
+ // meta-operations (e.g., dist, config), which naturally do not match the
+ // hint.
+ //
+ // The way we solve this problem is by trying a hint-less match as a
+ // fallback for non-perform meta-operations. @@ Ideally we would want to
+ // only consider such default/fallback rules, which we may do in the
+ // future (we could just decorate their names with some special marker,
+ // e.g., `dist.file.*` but that would be visible in diagnostics).
+ //
+ // It seems the only potential problem with this approach is the inability
+ // by the user to specify the hint for this specific meta-operation (e.g.,
+ // to resolve an ambiguity between two rules or override a matched rule),
+ // which seems quite remote at the moment. Maybe/later we can invent a
+ // syntax for that.
+ //
+ const string* hint;
+ for (bool retry (false);; retry = true)
{
- // Search scopes outwards, stopping at the project root.
- //
- for (const scope* s (&bs);
- s != nullptr;
- s = s->root () ? &s->global_scope () : s->parent_scope ())
- {
- const operation_rule_map* om (s->rules[mo]);
-
- if (om == nullptr)
- continue; // No entry for this meta-operation id.
+ hint = retry
+ ? &empty_string
+ : &t.find_hint (o); // MT-safe (target locked).
- // First try the map for the actual operation. If that doesn't yeld
- // anything, try the wildcard map.
+ for (auto tt (&t.type ()); tt != nullptr; tt = tt->base)
+ {
+ // Search scopes outwards, stopping at the project root. For retry
+ // only look in the root and global scopes.
//
- for (operation_id oi (o), oip (o); oip != 0; oip = oi, oi = 0)
+ for (const scope* s (retry ? bs.root_scope () : &bs);
+ s != nullptr;
+ s = s->root () ? &s->global_scope () : s->parent_scope ())
{
- const target_type_rule_map* ttm ((*om)[oi]);
-
- if (ttm == nullptr)
- continue; // No entry for this operation id.
-
- if (ttm->empty ())
- continue; // Empty map for this operation id.
-
- auto i (ttm->find (tt));
+ const operation_rule_map* om (s->rules[mo]);
- if (i == ttm->end () || i->second.empty ())
- continue; // No rules registered for this target type.
+ if (om == nullptr)
+ continue; // No entry for this meta-operation id.
- const auto& rules (i->second); // Hint map.
-
- // @@ TODO hint
- //
- // Different rules can be used for different operations (update vs
- // test is a good example). So, at some point, we will probably have
- // to support a list of hints or even an operation-hint map (e.g.,
- // 'hint=cxx test=foo' if cxx supports the test operation but we
- // want the foo rule instead). This is also the place where the
- // '{build clean}=cxx' construct (which we currently do not support)
- // can come handy.
- //
- // Also, ignore the hint (that is most likely ment for a different
- // operation) if this is a unique match.
+ // First try the map for the actual operation. If that doesn't yeld
+ // anything, try the wildcard map.
//
- string hint;
- auto rs (rules.size () == 1
- ? make_pair (rules.begin (), rules.end ())
- : rules.find_sub (hint));
-
- for (auto i (rs.first); i != rs.second; ++i)
+ for (operation_id oi (o), oip (o); oip != 0; oip = oi, oi = 0)
{
- const rule_match* r (&*i);
+ const target_type_rule_map* ttm ((*om)[oi]);
- // In a somewhat hackish way we reuse operation wildcards to plumb
- // the ad hoc rule's reverse operation fallback logic.
- //
- // The difficulty is two-fold:
- //
- // 1. It's difficult to add the fallback flag to the rule map
- // because of rule_match which is used throughout.
- //
- // 2. Even if we could do that, we pass the reverse action to
- // reverse_fallback() rather than it returning (a list) of
- // reverse actions, which would be necessary to register them.
- //
- using fallback_rule = adhoc_rule_pattern::fallback_rule;
+ if (ttm == nullptr)
+ continue; // No entry for this operation id.
- auto find_fallback = [mo, o, tt] (const fallback_rule& fr)
- -> const rule_match*
- {
- for (const shared_ptr<adhoc_rule>& ar: fr.rules)
- if (ar->reverse_fallback (action (mo, o), *tt))
- return &ar->rule_match;
+ if (ttm->empty ())
+ continue; // Empty map for this operation id.
- return nullptr;
- };
+ auto i (ttm->find (tt));
- if (oi == 0)
- {
- if (auto* fr =
- dynamic_cast<const fallback_rule*> (&r->second.get ()))
- {
- if ((r = find_fallback (*fr)) == nullptr)
- continue;
- }
- }
+ if (i == ttm->end () || i->second.empty ())
+ continue; // No rules registered for this target type.
- const string& n (r->first);
- const rule& ru (r->second);
+ const auto& rules (i->second); // Name map.
- if (&ru == skip)
- continue;
+ // Filter against the hint, if any.
+ //
+ auto rs (hint->empty ()
+ ? make_pair (rules.begin (), rules.end ())
+ : rules.find_sub (*hint));
- me.init (oi == 0 /* fallback */);
+ for (auto i (rs.first); i != rs.second; ++i)
{
- auto df = make_diag_frame (
- [a, &t, &n](const diag_record& dr)
- {
- if (verb != 0)
- dr << info << "while matching rule " << n << " to "
- << diag_do (a, t);
- });
+ const rule_match* r (&*i);
- if (!ru.match (a, t, hint, me))
- continue;
- }
-
- // Do the ambiguity test.
- //
- bool ambig (false);
+ // In a somewhat hackish way we reuse operation wildcards to
+ // plumb the ad hoc rule's reverse operation fallback logic.
+ //
+ // The difficulty is two-fold:
+ //
+ // 1. It's difficult to add the fallback flag to the rule map
+ // because of rule_match which is used throughout.
+ //
+ // 2. Even if we could do that, we pass the reverse action to
+ // reverse_fallback() rather than it returning (a list) of
+ // reverse actions, which would be necessary to register them.
+ //
+ auto find_fallback = [mo, o, tt] (const fallback_rule& fr)
+ -> const rule_match*
+ {
+ for (const shared_ptr<adhoc_rule>& ar: fr.rules)
+ if (ar->reverse_fallback (action (mo, o), *tt))
+ return &ar->rule_match;
- diag_record dr;
- for (++i; i != rs.second; ++i)
- {
- const rule_match* r1 (&*i);
+ return nullptr;
+ };
if (oi == 0)
{
- if (auto* fr =
- dynamic_cast<const fallback_rule*> (&r1->second.get ()))
+ if (const fallback_rule* fr = fallback_rule_match (*r))
{
- if ((r1 = find_fallback (*fr)) == nullptr)
+ if ((r = find_fallback (*fr)) == nullptr)
continue;
}
}
- const string& n1 (r1->first);
- const rule& ru1 (r1->second);
+ // Skip non-ad hoc rules if the target is not locked (see
+ // above).
+ //
+ if (!me.locked && !adhoc_rule_match (*r))
+ continue;
+
+ const string& n (r->first);
+ const rule& ru (r->second);
+ if (&ru == skip)
+ continue;
+
+ me.init (oi == 0 /* fallback */);
{
auto df = make_diag_frame (
- [a, &t, &n1](const diag_record& dr)
+ [a, &t, &n](const diag_record& dr)
{
if (verb != 0)
- dr << info << "while matching rule " << n1 << " to "
+ dr << info << "while matching rule " << n << " to "
<< diag_do (a, t);
});
- // @@ TODO: this makes target state in match() undetermined
- // so need to fortify rules that modify anything in match
- // to clear things.
- //
- // @@ Can't we temporarily swap things out in target?
- //
- match_extra me1;
- me1.init (oi == 0);
- if (!ru1.match (a, t, hint, me1))
+ if (!ru.match (a, t, *hint, me))
continue;
}
- if (!ambig)
+ // Do the ambiguity test.
+ //
+ bool ambig (false);
+
+ diag_record dr;
+ for (++i; i != rs.second; ++i)
{
- dr << fail << "multiple rules matching " << diag_doing (a, t)
- << info << "rule " << n << " matches";
- ambig = true;
+ const rule_match* r1 (&*i);
+
+ if (oi == 0)
+ {
+ if (const fallback_rule* fr = fallback_rule_match (*r1))
+ {
+ if ((r1 = find_fallback (*fr)) == nullptr)
+ continue;
+ }
+ }
+
+ if (!me.locked && !adhoc_rule_match (*r1))
+ continue;
+
+ const string& n1 (r1->first);
+ const rule& ru1 (r1->second);
+
+ {
+ auto df = make_diag_frame (
+ [a, &t, &n1](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while matching rule " << n1 << " to "
+ << diag_do (a, t);
+ });
+
+ // @@ TODO: this makes target state in match() undetermined
+ // so need to fortify rules that modify anything in match
+ // to clear things.
+ //
+ // @@ Can't we temporarily swap things out in target?
+ //
+ match_extra me1 (me.locked, oi == 0 /* fallback */);
+ if (!ru1.match (a, t, *hint, me1))
+ continue;
+ }
+
+ if (!ambig)
+ {
+ dr << fail << "multiple rules matching " << diag_doing (a, t)
+ << info << "rule " << n << " matches";
+ ambig = true;
+ }
+
+ dr << info << "rule " << n1 << " also matches";
}
- dr << info << "rule " << n1 << " also matches";
+ if (!ambig)
+ return r;
+ else
+ dr << info << "use rule hint to disambiguate this match";
}
-
- if (!ambig)
- return r;
- else
- dr << info << "use rule hint to disambiguate this match";
}
}
}
+
+ if (mo == perform_id || hint->empty () || retry)
+ break;
}
me.free ();
if (!try_match)
{
- diag_record dr;
- dr << fail << "no rule to " << diag_do (a, t);
+ diag_record dr (fail);
+
+ if (hint->empty ())
+ dr << "no rule to ";
+ else
+ dr << "no rule with hint " << *hint << " to ";
+
+ dr << diag_do (a, t);
// Try to give some hints of the common causes.
//
@@ -695,6 +942,87 @@ namespace build2
return re;
}
+ // If anything goes wrong, set target state to failed and return false.
+ //
+ // Note: must be called while holding target_lock.
+ //
+ static bool
+ match_posthoc (action a, target& t)
+ {
+ // The plan is to, while holding the lock, search and collect all the post
+ // hoc prerequisited and add an entry to context::current_posthoc_targets.
+ // The actual matching happens as post-pass in the meta-operation's match
+ // function.
+ //
+ // While it may seem like we could do matching here by unlocking (or
+ // unstacking) the lock for this target, that will only work for simple
+ // cases. In particular, consider:
+ //
+ // lib{foo}: ...
+ // lib{plug}: ... lib{foo}
+ // libs{foo}: libs{plug}: include = posthoc
+ //
+ // The chain we will end up with:
+ //
+ // lib{foo}->libs{foo}=>libs{plug}->lib{foo}
+ //
+ // This will trip up the cycle detection for group lib{foo}, not for
+ // libs{foo}.
+ //
+ // In the end, matching (and execution) "inline" (i.e., as we match/
+ // execute the corresponding target) appears to be unworkable in the
+ // face of cycles.
+
+ // @@ Anything we need to do for group members (see through)? Feels quite
+ // far-fetched.
+ //
+ vector<const target*> pts;
+ try
+ {
+ for (const prerequisite& p: group_prerequisites (t))
+ {
+ // Note that we have to ignore any operation-specific values for
+ // non-posthoc prerequisites. See include_impl() for details.
+ //
+ lookup l;
+ if (include (a, t, p, &l) == include_type::posthoc)
+ {
+ if (l)
+ {
+ const string& v (cast<string> (l));
+
+ // The only valid values are true and false and the latter would
+ // have been translated to include_type::exclude.
+ //
+ if (v != "true")
+ {
+ fail << "unrecognized " << *l.var << " variable value "
+ << "'" << v << "' specified for prerequisite " << p;
+ }
+ }
+
+ pts.push_back (&search (t, p)); // May fail.
+ }
+ }
+ }
+ catch (const failed&)
+ {
+ t.state[a].state = target_state::failed;
+ return false;
+ }
+
+ if (!pts.empty ())
+ {
+ context& ctx (t.ctx);
+
+ mlock l (ctx.current_posthoc_targets_mutex);
+ ctx.current_posthoc_targets.push_back (
+ context::posthoc_target {a, t, move (pts)});
+ }
+
+ return true;
+ }
+
// If step is true then perform only one step of the match/apply sequence.
//
// If try_match is true, then indicate whether there is a rule match with
@@ -711,46 +1039,46 @@ namespace build2
target& t (*l.target);
target::opstate& s (t[a]);
- // Intercept and handle matching an ad hoc group member.
- //
- if (t.adhoc_group_member ())
+ try
{
- assert (!step);
+ // Intercept and handle matching an ad hoc group member.
+ //
+ if (t.adhoc_group_member ())
+ {
+ assert (!step);
- const target& g (*t.group);
+ const target& g (*t.group);
- // It feels natural to "convert" this call to the one for the group,
- // including the try_match part. Semantically, we want to achieve the
- // following:
- //
- // [try_]match (a, g);
- // match_recipe (l, group_recipe);
- //
- auto df = make_diag_frame (
- [a, &t](const diag_record& dr)
- {
- if (verb != 0)
- dr << info << "while matching group rule to " << diag_do (a, t);
- });
+ // It feels natural to "convert" this call to the one for the group,
+ // including the try_match part. Semantically, we want to achieve the
+ // following:
+ //
+ // [try_]match (a, g);
+ // match_recipe (l, group_recipe);
+ //
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while matching group rule to " << diag_do (a, t);
+ });
- pair<bool, target_state> r (match (a, g, 0, nullptr, try_match));
+ pair<bool, target_state> r (match_impl (a, g, 0, nullptr, try_match));
- if (r.first)
- {
- if (r.second != target_state::failed)
+ if (r.first)
{
- match_inc_dependens (a, g);
- match_recipe (l, group_recipe);
+ if (r.second != target_state::failed)
+ {
+ match_inc_dependents (a, g);
+ match_recipe (l, group_recipe);
+ }
}
- }
- else
- l.offset = target::offset_tried;
+ else
+ l.offset = target::offset_tried;
- return r; // Group state (must be consistent with matched_state()).
- }
+ return r; // Group state (must be consistent with matched_state()).
+ }
- try
- {
// Continue from where the target has been left off.
//
switch (l.offset)
@@ -769,9 +1097,9 @@ namespace build2
//
// Clear the rule-specific variables, resolved targets list, and the
- // data pad before calling match(). The rule is free to modify these
- // in its match() (provided that it matches) in order to, for
- // example, convey some information to apply().
+ // auxiliary data storage before calling match(). The rule is free
+ // to modify these in its match() (provided that it matches) in
+ // order to, for example, convey some information to apply().
//
clear_target (a, t);
@@ -785,7 +1113,7 @@ namespace build2
return make_pair (false, target_state::unknown);
}
- s.rule = r;
+ set_rule (l, r);
l.offset = target::offset_matched;
if (step)
@@ -825,11 +1153,11 @@ namespace build2
// the first half of the result.
//
pair<bool, target_state>
- match (action a,
- const target& ct,
- size_t start_count,
- atomic_count* task_count,
- bool try_match)
+ match_impl (action a,
+ const target& ct,
+ size_t start_count,
+ atomic_count* task_count,
+ bool try_match)
{
// If we are blocking then work our own queue one task at a time. The
// logic here is that we may have already queued other tasks before this
@@ -860,7 +1188,20 @@ namespace build2
return make_pair (false, target_state::unknown);
if (task_count == nullptr)
- return match_impl (l, false /* step */, try_match);
+ {
+ pair<bool, target_state> r (match_impl (l, false /*step*/, try_match));
+
+ if (r.first &&
+ r.second != target_state::failed &&
+ l.offset == target::offset_applied &&
+ ct.has_group_prerequisites ()) // Already matched.
+ {
+ if (!match_posthoc (a, *l.target))
+ r.second = target_state::failed;
+ }
+
+ return r;
+ }
// Pass "disassembled" lock since the scheduler queue doesn't support
// task destruction.
@@ -870,12 +1211,12 @@ namespace build2
// Also pass our diagnostics and lock stacks (this is safe since we
// expect the caller to wait for completion before unwinding its stack).
//
- if (ct.ctx.sched.async (
+ if (ct.ctx.sched->async (
start_count,
*task_count,
[a, try_match] (const diag_frame* ds,
const target_lock* ls,
- target& t, size_t offset)
+ target& t, size_t offset, bool first)
{
// Switch to caller's diag and lock stacks.
//
@@ -886,9 +1227,18 @@ namespace build2
{
phase_lock pl (t.ctx, run_phase::match); // Throws.
{
- target_lock l {a, &t, offset}; // Reassemble.
- match_impl (l, false /* step */, try_match);
- // Unlock within the match phase.
+ // Note: target_lock must be unlocked within the match phase.
+ //
+ target_lock l {a, &t, offset, first}; // Reassemble.
+
+ pair<bool, target_state> r (
+ match_impl (l, false /* step */, try_match));
+
+ if (r.first &&
+ r.second != target_state::failed &&
+ l.offset == target::offset_applied &&
+ t.has_group_prerequisites ()) // Already matched.
+ match_posthoc (a, t);
}
}
catch (const failed&) {} // Phase lock failure.
@@ -896,7 +1246,8 @@ namespace build2
diag_frame::stack (),
target_lock::stack (),
ref (*ld.target),
- ld.offset))
+ ld.offset,
+ ld.first))
return make_pair (true, target_state::postponed); // Queued.
// Matched synchronously, fall through.
@@ -914,9 +1265,13 @@ namespace build2
return ct.try_matched_state (a, false);
}
+ // Note: lock is a reference to avoid the stacking overhead.
+ //
static group_view
- resolve_members_impl (action a, const target& g, target_lock l)
+ resolve_members_impl (action a, const target& g, target_lock&& l)
{
+ assert (a.inner ());
+
// Note that we will be unlocked if the target is already applied.
//
group_view r;
@@ -930,9 +1285,11 @@ namespace build2
{
// Match (locked).
//
- if (match_impl (l, true).second == target_state::failed)
+ if (match_impl (l, true /* step */).second == target_state::failed)
throw failed ();
+ // Note: only matched so no call to match_posthoc().
+
if ((r = g.group_members (a)).members != nullptr)
break;
@@ -941,22 +1298,58 @@ namespace build2
// Fall through.
case target::offset_matched:
{
- // @@ Doing match without execute messes up our target_count. Does
- // not seem like it will be easy to fix (we don't know whether
- // someone else will execute this target).
- //
- // @@ What if we always do match & execute together? After all,
- // if a group can be resolved in apply(), then it can be
- // resolved in match()!
- //
-
// Apply (locked).
//
- if (match_impl (l, true).second == target_state::failed)
+ pair<bool, target_state> s (match_impl (l, true /* step */));
+
+ if (s.second != target_state::failed &&
+ g.has_group_prerequisites ()) // Already matched.
+ {
+ if (!match_posthoc (a, *l.target))
+ s.second = target_state::failed;
+ }
+
+ if (s.second == target_state::failed)
throw failed ();
if ((r = g.group_members (a)).members != nullptr)
+ {
+ // Doing match without execute messes up our target_count. There
+ // doesn't seem to be a clean way to solve this. Well, just always
+ // executing if we've done the match would have been clean but quite
+ // heavy-handed (it would be especially surprising if otherwise
+ // there is nothing else to do, which can happen, for example,
+ // during update-for-test when there are no tests to run).
+ //
+ // So our solution is as follows:
+ //
+ // 1. Keep track both of the targets that ended up in this situation
+ // (the target::resolve_counted flag) as well as their total
+ // count (the context::resolve_count member). Only do this if
+ // set_recipe() (called by match_impl()) would have incremented
+ // target_count.
+ //
+ // 2. If we happen to execute such a target (common case), then
+ // clear the flag and decrement the count.
+ //
+ // 3. When it's time to assert that target_count==0 (i.e., all the
+ // matched targets have been executed), check if resolve_count is
+ // 0. If it's not, then find every target with the flag set,
+ // pretend-execute it, and decrement both counts. See
+ // perform_execute() for further details on this step.
+ //
+ if (s.second != target_state::unchanged)
+ {
+ target::opstate& s (l.target->state[a]); // Inner.
+
+ if (!s.recipe_group_action)
+ {
+ s.resolve_counted = true;
+ g.ctx.resolve_count.fetch_add (1, memory_order_relaxed);
+ }
+ }
break;
+ }
// Unlock and to execute ...
//
@@ -967,15 +1360,19 @@ namespace build2
{
// Execute (unlocked).
//
- // Note that we use execute_direct() rather than execute() here to
- // sidestep the dependents count logic. In this context, this is by
- // definition the first attempt to execute this rule (otherwise we
- // would have already known the members list) and we really do need
+ // Note that we use execute_direct_sync() rather than execute_sync()
+ // here to sidestep the dependents count logic. In this context, this
+ // is by definition the first attempt to execute this rule (otherwise
+ // we would have already known the members list) and we really do need
// to execute it now.
//
+ // Note that while it might be tempting to decrement resolve_count
+ // here, there is no guarantee that we were the ones who have matched
+ // this target.
+ //
{
phase_switch ps (g.ctx, run_phase::execute);
- execute_direct (a, g);
+ execute_direct_sync (a, g);
}
r = g.group_members (a);
@@ -1021,10 +1418,29 @@ namespace build2
return r;
}
+ // Note: lock is a reference to avoid the stacking overhead.
+ //
void
- resolve_group_impl (action, const target&, target_lock l)
+ resolve_group_impl (action a, const target& t, target_lock&& l)
{
- match_impl (l, true /* step */, true /* try_match */);
+ assert (a.inner ());
+
+ pair<bool, target_state> r (
+ match_impl (l, true /* step */, true /* try_match */));
+
+ if (r.first &&
+ r.second != target_state::failed &&
+ l.offset == target::offset_applied &&
+ t.has_group_prerequisites ()) // Already matched.
+ {
+ if (!match_posthoc (a, *l.target))
+ r.second = target_state::failed;
+ }
+
+ l.unlock ();
+
+ if (r.first && r.second == target_state::failed)
+ throw failed ();
}
template <typename R, typename S>
@@ -1069,7 +1485,7 @@ namespace build2
for (size_t n (pts.size ()); i != n; ++i)
{
const target& pt (*pts[i]);
- match (a, pt);
+ match_complete (a, pt);
}
}
@@ -1089,9 +1505,8 @@ namespace build2
match_prerequisite_range (a, t, group_prerequisite_members (a, t), msm, s);
}
- template <typename T>
void
- match_members (action a, target& t, T const* ts, size_t n)
+ match_members (action a, const target& t, const target* const* ts, size_t n)
{
// Pretty much identical to match_prerequisite_range() except we don't
// search.
@@ -1119,22 +1534,52 @@ namespace build2
if (m == nullptr || marked (m))
continue;
- match (a, *m);
+ match_complete (a, *m);
}
}
- // Instantiate only for what we need.
- //
- template LIBBUILD2_SYMEXPORT void
- match_members<const target*> (action, target&,
- const target* const*, size_t);
+ void
+ match_members (action a,
+ const target& t,
+ prerequisite_targets& ts,
+ size_t s,
+ pair<uintptr_t, uintptr_t> imv)
+ {
+ size_t n (ts.size ());
+
+ wait_guard wg (t.ctx, t.ctx.count_busy (), t[a].task_count, true);
+
+ for (size_t i (s); i != n; ++i)
+ {
+ const prerequisite_target& pt (ts[i]);
+ const target* m (pt.target);
+
+ if (m == nullptr ||
+ marked (m) ||
+ (imv.first != 0 && (pt.include & imv.first) != imv.second))
+ continue;
+
+ match_async (a, *m, t.ctx.count_busy (), t[a].task_count);
+ }
+
+ wg.wait ();
- template LIBBUILD2_SYMEXPORT void
- match_members<prerequisite_target> (action, target&,
- prerequisite_target const*, size_t);
+ for (size_t i (s); i != n; ++i)
+ {
+ const prerequisite_target& pt (ts[i]);
+ const target* m (pt.target);
+
+ if (m == nullptr ||
+ marked (m) ||
+ (imv.first != 0 && (pt.include & imv.first) != imv.second))
+ continue;
+
+ match_complete (a, *m);
+ }
+ }
const fsdir*
- inject_fsdir (action a, target& t, bool parent)
+ inject_fsdir (action a, target& t, bool prereq, bool parent)
{
tracer trace ("inject_fsdir");
@@ -1163,7 +1608,7 @@ namespace build2
//
r = &search<fsdir> (t, d, dir_path (), string (), nullptr, nullptr);
}
- else
+ else if (prereq)
{
// See if one was mentioned explicitly.
//
@@ -1187,7 +1632,7 @@ namespace build2
// Make it ad hoc so that it doesn't end up in prerequisite_targets
// after execution.
//
- match (a, *r);
+ match_sync (a, *r);
t.prerequisite_targets[a].emplace_back (r, include_type::adhoc);
}
@@ -1301,11 +1746,26 @@ namespace build2
return ts;
}
- void
- update_backlink (const file& f, const path& l, bool changed, backlink_mode m)
+ static inline const char*
+ update_backlink_name (backlink_mode m, bool to_dir)
{
using mode = backlink_mode;
+ const char* r (nullptr);
+ switch (m)
+ {
+ case mode::link:
+ case mode::symbolic: r = verb >= 3 ? "ln -sf" : verb >= 2 ? "ln -s" : "ln"; break;
+ case mode::hard: r = verb >= 3 ? "ln -f" : "ln"; break;
+ case mode::copy:
+ case mode::overwrite: r = to_dir ? "cp -r" : "cp"; break;
+ }
+ return r;
+ }
+
+ void
+ update_backlink (const file& f, const path& l, bool changed, backlink_mode m)
+ {
const path& p (f.path ());
dir_path d (l.directory ());
@@ -1317,28 +1777,20 @@ namespace build2
// actually updated to signal to the user that the updated out target is
// now available in src.
//
- if (verb <= 2)
+ if (verb == 1 || verb == 2)
{
if (changed || !butl::entry_exists (l,
false /* follow_symlinks */,
true /* ignore_errors */))
{
- const char* c (nullptr);
- switch (m)
- {
- case mode::link:
- case mode::symbolic: c = verb >= 2 ? "ln -s" : "ln"; break;
- case mode::hard: c = "ln"; break;
- case mode::copy:
- case mode::overwrite: c = l.to_directory () ? "cp -r" : "cp"; break;
- }
+ const char* c (update_backlink_name (m, l.to_directory ()));
- // Note: 'ln foo/ bar/' means a different thing.
+ // Note: 'ln foo/ bar/' means a different thing (and below).
//
- if (verb >= 2)
+ if (verb == 2)
text << c << ' ' << p.string () << ' ' << l.string ();
else
- text << c << ' ' << f << " -> " << d;
+ print_diag (c, f, d);
}
}
@@ -1358,30 +1810,25 @@ namespace build2
{
// As above but with a slightly different diagnostics.
- using mode = backlink_mode;
-
dir_path d (l.directory ());
- if (verb <= 2)
+ if (verb == 1 || verb == 2)
{
if (changed || !butl::entry_exists (l,
false /* follow_symlinks */,
true /* ignore_errors */))
{
- const char* c (nullptr);
- switch (m)
- {
- case mode::link:
- case mode::symbolic: c = verb >= 2 ? "ln -s" : "ln"; break;
- case mode::hard: c = "ln"; break;
- case mode::copy:
- case mode::overwrite: c = l.to_directory () ? "cp -r" : "cp"; break;
- }
+ const char* c (update_backlink_name (m, l.to_directory ()));
+ // Note: 'ln foo/ bar/' means a different thing (and above) so strip
+ // trailing directory separator (but keep as path for relative).
+ //
if (verb >= 2)
text << c << ' ' << p.string () << ' ' << l.string ();
else
- text << c << ' ' << p.string () << " -> " << d;
+ print_diag (c,
+ p.to_directory () ? path (p.string ()) : p,
+ d);
}
}
@@ -1433,6 +1880,8 @@ namespace build2
const path& p, const path& l, backlink_mode om,
uint16_t verbosity)
{
+ assert (verbosity >= 2);
+
using mode = backlink_mode;
bool d (l.to_directory ());
@@ -1442,17 +1891,8 @@ namespace build2
{
if (verb >= verbosity)
{
- const char* c (nullptr);
- switch (m)
- {
- case mode::link:
- case mode::symbolic: c = "ln -sf"; break;
- case mode::hard: c = "ln -f"; break;
- case mode::copy:
- case mode::overwrite: c = d ? "cp -r" : "cp"; break;
- }
-
- text << c << ' ' << p.string () << ' ' << l.string ();
+ text << update_backlink_name (m, d) << ' ' << p.string () << ' '
+ << l.string ();
}
};
@@ -1514,8 +1954,7 @@ namespace build2
try_mkdir (to);
- for (const auto& de:
- dir_iterator (fr, false /* ignore_dangling */))
+ for (const auto& de: dir_iterator (fr, dir_iterator::no_follow))
{
path f (fr / de.path ());
path t (to / de.path ());
@@ -1568,6 +2007,11 @@ namespace build2
//
// Note that here the dry-run mode is handled by the filesystem functions.
+ // Note that if we ever need to support level 1 for some reason, maybe
+ // consider showing the target, for example, `unlink exe{hello} <- dir/`?
+ //
+ assert (v >= 2);
+
using mode = backlink_mode;
if (l.to_directory ())
@@ -1602,9 +2046,15 @@ namespace build2
struct backlink: auto_rm<path>
{
using path_type = build2::path;
+ using target_type = build2::target;
reference_wrapper<const path_type> target;
- backlink_mode mode;
+ backlink_mode mode;
+
+ // Ad hoc group-specific information for diagnostics (see below).
+ //
+ const target_type* member = nullptr;
+ bool print = true;
backlink (const path_type& t, path_type&& l, backlink_mode m, bool active)
: auto_rm<path_type> (move (l), active), target (t), mode (m)
@@ -1626,33 +2076,65 @@ namespace build2
};
// Normally (i.e., on sane platforms that don't have things like PDBs, etc)
- // there will be just one backlink so optimize for that.
+ // there will be just one or two backlinks so optimize for that.
//
- using backlinks = small_vector<backlink, 1>;
+ using backlinks = small_vector<backlink, 2>;
- static optional<backlink_mode>
- backlink_test (const target& t, const lookup& l)
+ static optional<pair<backlink_mode, bool>>
+ backlink_test (const target& t, const lookup& l, optional<backlink_mode> gm)
{
using mode = backlink_mode;
- optional<mode> r;
- const string& v (cast<string> (l));
+ const names& ns (cast<names> (l));
- if (v == "true") r = mode::link;
- else if (v == "symbolic") r = mode::symbolic;
- else if (v == "hard") r = mode::hard;
- else if (v == "copy") r = mode::copy;
- else if (v == "overwrite") r = mode::overwrite;
- else if (v != "false")
- fail << "invalid backlink variable value '" << v << "' "
+ if (ns.size () != 1 && ns.size () != 2)
+ {
+ fail << "invalid backlink variable value '" << ns << "' "
<< "specified for target " << t;
+ }
- return r;
+ optional<mode> m;
+ for (;;) // Breakout loop.
+ {
+ const name& n (ns.front ());
+
+ if (n.simple ())
+ {
+ const string& v (n.value);
+
+ if (v == "true") {m = mode::link; break;}
+ else if (v == "symbolic") {m = mode::symbolic; break;}
+ else if (v == "hard") {m = mode::hard; break;}
+ else if (v == "copy") {m = mode::copy; break;}
+ else if (v == "overwrite") {m = mode::overwrite; break;}
+ else if (v == "false") { break;}
+ else if (v == "group") {if ((m = gm)) break;}
+ }
+
+ fail << "invalid backlink variable value mode component '" << n << "' "
+ << "specified for target " << t << endf;
+ }
+
+ bool np (false); // "not print"
+ if (ns.size () == 2)
+ {
+ const name& n (ns.back ());
+
+ if (n.simple () && (n.value == "true" || (np = (n.value == "false"))))
+ ;
+ else
+ fail << "invalid backlink variable value print component '" << n
+ << "' specified for target " << t;
+ }
+
+ return m ? optional<pair<mode, bool>> (make_pair (*m, !np)) : nullopt;
}
static optional<backlink_mode>
backlink_test (action a, target& t)
{
+ using mode = backlink_mode;
+
context& ctx (t.ctx);
// Note: the order of these checks is from the least to most expensive.
@@ -1662,9 +2144,20 @@ namespace build2
if (a.outer () || (a != perform_update_id && a != perform_clean_id))
return nullopt;
- // Only file-based targets in the out tree can be backlinked.
+ // Only targets in the out tree can be backlinked.
+ //
+ if (!t.out.empty ())
+ return nullopt;
+
+ // Only file-based targets or groups containing file-based targets can be
+ // backlinked. Note that we don't do the "file-based" check of the latter
+ // case here since they can still be execluded. So instead we are prepared
+ // to handle the empty backlinks list.
//
- if (!t.out.empty () || !t.is_a<file> ())
+ // @@ Potentially members could only be resolved in execute. I guess we
+ // don't support backlink for such groups at the moment.
+ //
+ if (!t.is_a<file> () && t.group_members (a).members == nullptr)
return nullopt;
// Neither an out-of-project nor in-src configuration can be forwarded.
@@ -1688,7 +2181,13 @@ namespace build2
if (!l.defined ())
l = ctx.global_scope.lookup (*ctx.var_backlink, t.key ());
- return l ? backlink_test (t, l) : nullopt;
+ optional<pair<mode, bool>> r (l ? backlink_test (t, l, nullopt) : nullopt);
+
+ if (r && !r->second)
+ fail << "backlink variable value print component cannot be false "
+ << "for primary target " << t;
+
+ return r ? optional<mode> (r->first) : nullopt;
}
static backlinks
@@ -1696,58 +2195,104 @@ namespace build2
{
using mode = backlink_mode;
+ context& ctx (t.ctx);
const scope& s (t.base_scope ());
backlinks bls;
- auto add = [&bls, &s] (const path& p, mode m)
+ auto add = [&bls, &s] (const path& p,
+ mode m,
+ const target* mt = nullptr,
+ bool print = true)
{
bls.emplace_back (p,
s.src_path () / p.leaf (s.out_path ()),
m,
!s.ctx.dry_run /* active */);
+
+ if (mt != nullptr)
+ {
+ backlink& bl (bls.back ());
+ bl.member = mt;
+ bl.print = print;
+ }
};
- // First the target itself.
+ // Check for a custom backlink mode for this member. If none, then
+ // inherit the one from the group (so if the user asked to copy
+ // .exe, we will also copy .pdb).
//
- add (t.as<file> ().path (), m);
+ // Note that we want to avoid group or tt/patter-spec lookup. And
+ // since this is an ad hoc member (which means it was either declared
+ // in the buildfile or added by the rule), we assume that the value,
+ // if any, will be set as a target or rule-specific variable.
+ //
+ auto member_mode = [a, m, &ctx] (const target& mt)
+ -> optional<pair<mode, bool>>
+ {
+ lookup l (mt.state[a].vars[ctx.var_backlink]);
+
+ if (!l)
+ l = mt.vars[ctx.var_backlink];
+
+ return l ? backlink_test (mt, l, m) : make_pair (m, true);
+ };
- // Then ad hoc group file/fsdir members, if any.
+ // @@ Currently we don't handle the following cases:
//
- for (const target* mt (t.adhoc_member);
- mt != nullptr;
- mt = mt->adhoc_member)
+ // 1. File-based explicit groups.
+ //
+ // 2. Ad hoc subgroups in explicit groups.
+ //
+ // Note: see also the corresponding code in backlink_update_post().
+ //
+ if (file* f = t.is_a<file> ())
{
- const path* p (nullptr);
+ // First the target itself.
+ //
+ add (f->path (), m, f, true); // Note: always printed.
- if (const file* f = mt->is_a<file> ())
+ // Then ad hoc group file/fsdir members, if any.
+ //
+ for (const target* mt (t.adhoc_member);
+ mt != nullptr;
+ mt = mt->adhoc_member)
{
- p = &f->path ();
+ const path* p (nullptr);
- if (p->empty ()) // The "trust me, it's somewhere" case.
- p = nullptr;
- }
- else if (const fsdir* d = mt->is_a<fsdir> ())
- p = &d->dir;
+ if (const file* f = mt->is_a<file> ())
+ {
+ p = &f->path ();
- if (p != nullptr)
- {
- // Check for a custom backlink mode for this member. If none, then
- // inherit the one from the group (so if the user asked to copy .exe,
- // we will also copy .pdb).
- //
- // Note that we want to avoid group or tt/patter-spec lookup. And
- // since this is an ad hoc member (which means it was either declared
- // in the buildfile or added by the rule), we assume that the value,
- // if any, will be set as a rule-specific variable (since setting it
- // as a target-specific wouldn't be MT-safe). @@ Don't think this
- // applies to declared ad hoc members.
- //
- lookup l (mt->state[a].vars[t.ctx.var_backlink]);
+ if (p->empty ()) // The "trust me, it's somewhere" case.
+ p = nullptr;
+ }
+ else if (const fsdir* d = mt->is_a<fsdir> ())
+ p = &d->dir;
- optional<mode> bm (l ? backlink_test (*mt, l) : m);
+ if (p != nullptr)
+ {
+ if (optional<pair<mode, bool>> m = member_mode (*mt))
+ add (*p, m->first, mt, m->second);
+ }
+ }
+ }
+ else
+ {
+ // Explicit group.
+ //
+ group_view gv (t.group_members (a));
+ assert (gv.members != nullptr);
- if (bm)
- add (*p, *bm);
+ for (size_t i (0); i != gv.count; ++i)
+ {
+ if (const target* mt = gv.members[i])
+ {
+ if (const file* f = mt->is_a<file> ())
+ {
+ if (optional<pair<mode, bool>> m = member_mode (*mt))
+ add (f->path (), m->first);
+ }
+ }
}
}
@@ -1761,29 +2306,89 @@ namespace build2
}
static void
- backlink_update_post (target& t, target_state ts, backlinks& bls)
+ backlink_update_post (target& t, target_state ts,
+ backlink_mode m, backlinks& bls)
{
if (ts == target_state::failed)
return; // Let auto rm clean things up.
- // Make backlinks.
- //
- for (auto b (bls.begin ()), i (b); i != bls.end (); ++i)
+ context& ctx (t.ctx);
+
+ file* ft (t.is_a<file> ());
+
+ if (ft != nullptr && bls.size () == 1)
{
- const backlink& bl (*i);
+ // Single file-based target.
+ //
+ const backlink& bl (bls.front ());
- if (i == b)
- update_backlink (t.as<file> (),
- bl.path,
- ts == target_state::changed,
- bl.mode);
- else
- update_backlink (t.ctx, bl.target, bl.path, bl.mode);
+ update_backlink (*ft,
+ bl.path,
+ ts == target_state::changed,
+ bl.mode);
+ }
+ else
+ {
+ // Explicit or ad hoc group.
+ //
+ // What we have below is a custom variant of update_backlink(file).
+ //
+ dir_path d (bls.front ().path.directory ());
+
+ // First print the verbosity level 1 diagnostics. Level 2 and higher are
+ // handled by the update_backlink() calls below.
+ //
+ if (verb == 1)
+ {
+ bool changed (ts == target_state::changed);
+
+ if (!changed)
+ {
+ for (const backlink& bl: bls)
+ {
+ changed = !butl::entry_exists (bl.path,
+ false /* follow_symlinks */,
+ true /* ignore_errors */);
+ if (changed)
+ break;
+ }
+ }
+
+ if (changed)
+ {
+ const char* c (update_backlink_name (m, false /* to_dir */));
+
+ // For explicit groups we only print the group target. For ad hoc
+ // groups we print all the members except those explicitly excluded.
+ //
+ if (ft == nullptr)
+ print_diag (c, t, d);
+ else
+ {
+ vector<target_key> tks;
+ tks.reserve (bls.size ());
+
+ for (const backlink& bl: bls)
+ if (bl.print)
+ tks.push_back (bl.member->key ());
+
+ print_diag (c, move (tks), d);
+ }
+ }
+ }
+
+ if (!exists (d))
+ mkdir_p (d, 2 /* verbosity */);
+
+ // Make backlinks.
+ //
+ for (const backlink& bl: bls)
+ update_backlink (ctx, bl.target, bl.path, bl.mode, 2 /* verbosity */);
}
// Cancel removal.
//
- if (!t.ctx.dry_run)
+ if (!ctx.dry_run)
{
for (backlink& bl: bls)
bl.cancel ();
@@ -1824,15 +2429,57 @@ namespace build2
// which is ok since such targets are probably not interesting for
// backlinking.
//
+ // Note also that for group members (both ad hoc and non) backlinking
+ // is handled when updating/cleaning the group.
+ //
backlinks bls;
- optional<backlink_mode> blm (backlink_test (a, t));
+ optional<backlink_mode> blm;
- if (blm)
+ if (t.group == nullptr) // Matched so must be already resolved.
{
- if (a == perform_update_id)
- bls = backlink_update_pre (a, t, *blm);
+ blm = backlink_test (a, t);
+
+ if (blm)
+ {
+ if (a == perform_update_id)
+ {
+ bls = backlink_update_pre (a, t, *blm);
+ if (bls.empty ())
+ blm = nullopt;
+ }
+ else
+ backlink_clean_pre (a, t, *blm);
+ }
+ }
+
+ // Note: see similar code in set_rule_trace() for match.
+ //
+ if (ctx.trace_execute != nullptr && trace_target (t, *ctx.trace_execute))
+ {
+ diag_record dr (info);
+
+ dr << diag_doing (a, t);
+
+ if (s.rule != nullptr)
+ {
+ const rule& r (s.rule->second);
+
+ if (const adhoc_rule* ar = dynamic_cast<const adhoc_rule*> (&r))
+ {
+ dr << info (ar->loc);
+
+ if (ar->pattern != nullptr)
+ dr << "using ad hoc pattern rule ";
+ else
+ dr << "using ad hoc recipe ";
+ }
+ else
+ dr << info << "using rule ";
+
+ dr << s.rule->first;
+ }
else
- backlink_clean_pre (a, t, *blm);
+ dr << info << "using directly-assigned recipe";
}
ts = execute_recipe (a, t, s.recipe);
@@ -1840,7 +2487,7 @@ namespace build2
if (blm)
{
if (a == perform_update_id)
- backlink_update_post (t, ts, bls);
+ backlink_update_post (t, ts, *blm, bls);
}
}
catch (const failed&)
@@ -1851,13 +2498,30 @@ namespace build2
ts = s.state = target_state::failed;
}
+ // Clear the recipe to release any associated memory. Note that
+ // s.recipe_group_action may be used further (see, for example,
+ // group_state()) and should retain its value.
+ //
+ //
+ if (!s.recipe_keep)
+ s.recipe = nullptr;
+
// Decrement the target count (see set_recipe() for details).
//
- if (a.inner ())
+ // Note that here we cannot rely on s.state being group because of the
+ // postponment logic (see excute_recipe() for details).
+ //
+ if (a.inner () && !s.recipe_group_action)
{
- recipe_function** f (s.recipe.target<recipe_function*> ());
- if (f == nullptr || *f != &group_action)
- ctx.target_count.fetch_sub (1, memory_order_relaxed);
+ // See resolve_members_impl() for background.
+ //
+ if (s.resolve_counted)
+ {
+ s.resolve_counted = false;
+ ctx.resolve_count.fetch_sub (1, memory_order_relaxed);
+ }
+
+ ctx.target_count.fetch_sub (1, memory_order_relaxed);
}
// Decrement the task count (to count_executed) and wake up any threads
@@ -1867,17 +2531,19 @@ namespace build2
target::offset_busy - target::offset_executed,
memory_order_release));
assert (tc == ctx.count_busy ());
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
return ts;
}
target_state
- execute (action a,
- const target& ct,
- size_t start_count,
- atomic_count* task_count)
+ execute_impl (action a,
+ const target& ct,
+ size_t start_count,
+ atomic_count* task_count)
{
+ // NOTE: see also pretend_execute lambda in perform_execute().
+
target& t (const_cast<target&> (ct)); // MT-aware.
target::opstate& s (t[a]);
@@ -1888,7 +2554,6 @@ namespace build2
size_t gd (ctx.dependency_count.fetch_sub (1, memory_order_relaxed));
size_t td (s.dependents.fetch_sub (1, memory_order_release));
assert (td != 0 && gd != 0);
- td--;
// Handle the "last" execution mode.
//
@@ -1911,7 +2576,7 @@ namespace build2
// thread. For other threads the state will still be unknown (until they
// try to execute it).
//
- if (ctx.current_mode == execution_mode::last && td != 0)
+ if (ctx.current_mode == execution_mode::last && --td != 0)
return target_state::postponed;
// Try to atomically change applied to busy.
@@ -1921,6 +2586,7 @@ namespace build2
size_t exec (ctx.count_executed ());
size_t busy (ctx.count_busy ());
+ optional<target_state> r;
if (s.task_count.compare_exchange_strong (
tc,
busy,
@@ -1933,32 +2599,35 @@ namespace build2
{
// There could still be scope operations.
//
- if (t.is_a<dir> ())
- execute_recipe (a, t, nullptr /* recipe */);
+ r = t.is_a<dir> ()
+ ? execute_recipe (a, t, nullptr /* recipe */)
+ : s.state;
s.task_count.store (exec, memory_order_release);
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
}
else
{
if (task_count == nullptr)
- return execute_impl (a, t);
-
- // Pass our diagnostics stack (this is safe since we expect the
- // caller to wait for completion before unwinding its diag stack).
- //
- if (ctx.sched.async (start_count,
- *task_count,
- [a] (const diag_frame* ds, target& t)
- {
- diag_frame::stack_guard dsg (ds);
- execute_impl (a, t);
- },
- diag_frame::stack (),
- ref (t)))
- return target_state::unknown; // Queued.
-
- // Executed synchronously, fall through.
+ r = execute_impl (a, t);
+ else
+ {
+ // Pass our diagnostics stack (this is safe since we expect the
+ // caller to wait for completion before unwinding its diag stack).
+ //
+ if (ctx.sched->async (start_count,
+ *task_count,
+ [a] (const diag_frame* ds, target& t)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (a, t);
+ },
+ diag_frame::stack (),
+ ref (t)))
+ return target_state::unknown; // Queued.
+
+ // Executed synchronously, fall through.
+ }
}
}
else
@@ -1969,24 +2638,28 @@ namespace build2
else assert (tc == exec);
}
- return t.executed_state (a, false);
+ return r ? *r : t.executed_state (a, false /* fail */);
}
target_state
- execute_direct (action a, const target& ct)
+ execute_direct_impl (action a,
+ const target& ct,
+ size_t start_count,
+ atomic_count* task_count)
{
context& ctx (ct.ctx);
target& t (const_cast<target&> (ct)); // MT-aware.
target::opstate& s (t[a]);
- // Similar logic to match() above except we execute synchronously.
+ // Similar logic to execute_impl() above.
//
size_t tc (ctx.count_applied ());
size_t exec (ctx.count_executed ());
size_t busy (ctx.count_busy ());
+ optional<target_state> r;
if (s.task_count.compare_exchange_strong (
tc,
busy,
@@ -1994,33 +2667,247 @@ namespace build2
memory_order_acquire)) // Synchronize on failure.
{
if (s.state == target_state::unknown)
- execute_impl (a, t);
+ {
+ if (task_count == nullptr)
+ r = execute_impl (a, t);
+ else
+ {
+ if (ctx.sched->async (start_count,
+ *task_count,
+ [a] (const diag_frame* ds, target& t)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (a, t);
+ },
+ diag_frame::stack (),
+ ref (t)))
+ return target_state::unknown; // Queued.
+
+ // Executed synchronously, fall through.
+ }
+ }
else
{
assert (s.state == target_state::unchanged ||
s.state == target_state::failed);
- if (s.state == target_state::unchanged)
- {
- if (t.is_a<dir> ())
- execute_recipe (a, t, nullptr /* recipe */);
- }
+ r = s.state == target_state::unchanged && t.is_a<dir> ()
+ ? execute_recipe (a, t, nullptr /* recipe */)
+ : s.state;
s.task_count.store (exec, memory_order_release);
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
}
}
else
{
- // If the target is busy, wait for it.
+ // Either busy or already executed.
+ //
+ if (tc >= busy) return target_state::busy;
+ else assert (tc == exec);
+ }
+
+ return r ? *r : t.executed_state (a, false /* fail */);
+ }
+
+ bool
+ update_during_match (tracer& trace, action a, const target& t, timestamp ts)
+ {
+ assert (a == perform_update_id);
+
+ // Note: this function is used to make sure header dependencies are up to
+ // date (and which is where it originated).
+ //
+ // There would normally be a lot of headers for every source file (think
+ // all the system headers) and just calling execute_direct_sync() on all
+ // of them can get expensive. At the same time, most of these headers are
+ // existing files that we will never be updating (again, system headers,
+ // for example) and the rule that will match them is the fallback
+ // file_rule. That rule has an optimization: it returns noop_recipe (which
+ // causes the target state to be automatically set to unchanged) if the
+ // file is known to be up to date. So we do the update "smartly".
+ //
+ // Also, now that we do header pre-generation by default, there is a good
+ // chance the header has already been updated. So we also detect that and
+ // avoid switching the phase.
+ //
+ const path_target* pt (t.is_a<path_target> ());
+
+ if (pt == nullptr)
+ ts = timestamp_unknown;
+
+ target_state os (t.matched_state (a));
+
+ if (os == target_state::unchanged)
+ {
+ if (ts == timestamp_unknown)
+ return false;
+ else
+ {
+ // We expect the timestamp to be known (i.e., existing file).
//
- if (tc >= busy)
- ctx.sched.wait (exec, s.task_count, scheduler::work_none);
- else
- assert (tc == exec);
+ timestamp mt (pt->mtime ());
+ assert (mt != timestamp_unknown);
+ return mt > ts;
+ }
+ }
+ else
+ {
+ // We only want to return true if our call to execute() actually caused
+ // an update. In particular, the target could already have been in
+ // target_state::changed because of the dynamic dependency extraction
+ // run for some other target.
+ //
+ target_state ns;
+ if (os != target_state::changed)
+ {
+ phase_switch ps (t.ctx, run_phase::execute);
+ ns = execute_direct_sync (a, t);
+ }
+ else
+ ns = os;
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "updated " << t
+ << "; old state " << os
+ << "; new state " << ns;});
+ return true;
+ }
+ else
+ return ts != timestamp_unknown ? pt->newer (ts, ns) : false;
+ }
+ }
+
+ bool
+ update_during_match_prerequisites (tracer& trace,
+ action a, target& t,
+ uintptr_t mask)
+ {
+ prerequisite_targets& pts (t.prerequisite_targets[a]);
+
+ // On the first pass detect and handle unchanged tragets. Note that we
+ // have to do it in a separate pass since we cannot call matched_state()
+ // once we've switched the phase.
+ //
+ size_t n (0);
+
+ for (prerequisite_target& p: pts)
+ {
+ if ((p.include & mask) != 0)
+ {
+ if (p.target != nullptr)
+ {
+ const target& pt (*p.target);
+
+ target_state os (pt.matched_state (a));
+
+ if (os != target_state::unchanged)
+ {
+ ++n;
+ p.data = static_cast<uintptr_t> (os);
+ continue;
+ }
+ }
+
+ p.data = 0;
+ }
+ }
+
+ // If all unchanged, we are done.
+ //
+ if (n == 0)
+ return false;
+
+ // Provide additional information on what's going on.
+ //
+ auto df = make_diag_frame (
+ [&t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while updating during match prerequisites of "
+ << "target " << t;
+ });
+
+ context& ctx (t.ctx);
+
+ phase_switch ps (ctx, run_phase::execute);
+
+ bool r (false);
+
+ // @@ Maybe we should optimize for n == 1? Maybe we should just call
+ // smarter update_during_match() in this case?
+ //
+#if 0
+ for (prerequisite_target& p: pts)
+ {
+ if ((p.include & mask) != 0 && p.data != 0)
+ {
+ const target& pt (*p.target);
+
+ target_state os (static_cast<target_state> (p.data));
+ target_state ns (execute_direct_sync (a, pt));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "updated " << pt
+ << "; old state " << os
+ << "; new state " << ns;});
+ r = true;
+ }
+
+ p.data = 0;
+ }
+ }
+#else
+
+ // Start asynchronous execution of prerequisites. Similar logic to
+ // straight_execute_members().
+ //
+ // Note that the target's task count is expected to be busy (since this
+ // function is called during match). And there don't seem to be any
+ // problems in using it for execute.
+ //
+ atomic_count& tc (t[a].task_count);
+
+ size_t busy (ctx.count_busy ());
+
+ wait_guard wg (ctx, busy, tc);
+
+ for (prerequisite_target& p: pts)
+ {
+ if ((p.include & mask) != 0 && p.data != 0)
+ {
+ execute_direct_async (a, *p.target, busy, tc);
+ }
+ }
+
+ wg.wait ();
+
+ // Finish execution and process the result.
+ //
+ for (prerequisite_target& p: pts)
+ {
+ if ((p.include & mask) != 0 && p.data != 0)
+ {
+ const target& pt (*p.target);
+ target_state ns (execute_complete (a, pt));
+ target_state os (static_cast<target_state> (p.data));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "updated " << pt
+ << "; old state " << os
+ << "; new state " << ns;});
+ r = true;
+ }
+
+ p.data = 0;
+ }
}
+#endif
- return t.executed_state (a);
+ return r;
}
static inline void
@@ -2031,7 +2918,7 @@ namespace build2
static inline void
blank_adhoc_member (prerequisite_target& pt)
{
- if (pt.adhoc)
+ if (pt.adhoc ())
pt.target = nullptr;
}
@@ -2043,7 +2930,6 @@ namespace build2
target_state r (target_state::unchanged);
size_t busy (ctx.count_busy ());
- size_t exec (ctx.count_executed ());
// Start asynchronous execution of prerequisites.
//
@@ -2078,12 +2964,7 @@ namespace build2
continue;
const target& mt (*ts[i]);
-
- // If the target is still busy, wait for its completion.
- //
- ctx.sched.wait (exec, mt[a].task_count, scheduler::work_none);
-
- r |= mt.executed_state (a);
+ r |= execute_complete (a, mt);
blank_adhoc_member (ts[i]);
}
@@ -2101,7 +2982,6 @@ namespace build2
target_state r (target_state::unchanged);
size_t busy (ctx.count_busy ());
- size_t exec (ctx.count_executed ());
wait_guard wg (ctx, busy, tc);
@@ -2130,10 +3010,7 @@ namespace build2
continue;
const target& mt (*ts[i]);
-
- ctx.sched.wait (exec, mt[a].task_count, scheduler::work_none);
-
- r |= mt.executed_state (a);
+ r |= execute_complete (a, mt);
blank_adhoc_member (ts[i]);
}
@@ -2165,12 +3042,11 @@ namespace build2
const timestamp& mt, const execute_filter& ef,
size_t n)
{
- context& ctx (t.ctx);
+ assert (a == perform_update_id);
- assert (ctx.current_mode == execution_mode::first);
+ context& ctx (t.ctx);
size_t busy (ctx.count_busy ());
- size_t exec (ctx.count_executed ());
auto& pts (t.prerequisite_targets[a]);
@@ -2202,7 +3078,7 @@ namespace build2
wg.wait ();
bool e (mt == timestamp_nonexistent);
- const target* rt (tt != nullptr ? nullptr : &t);
+ const target* rt (nullptr);
for (size_t i (0); i != n; ++i)
{
@@ -2212,15 +3088,102 @@ namespace build2
continue;
const target& pt (*p.target);
+ target_state s (execute_complete (a, pt));
+ rs |= s;
+
+ // Should we compare the timestamp to this target's?
+ //
+ if (!e && (p.adhoc () || !ef || ef (pt, i)))
+ {
+ // If this is an mtime-based target, then compare timestamps.
+ //
+ if (const mtime_target* mpt = pt.is_a<mtime_target> ())
+ {
+ if (mpt->newer (mt, s))
+ e = true;
+ }
+ else
+ {
+ // Otherwise we assume the prerequisite is newer if it was changed.
+ //
+ if (s == target_state::changed)
+ e = true;
+ }
+ }
- ctx.sched.wait (exec, pt[a].task_count, scheduler::work_none);
+ if (p.adhoc ())
+ p.target = nullptr; // Blank out.
+ else if (tt != nullptr)
+ {
+ if (rt == nullptr && pt.is_a (*tt))
+ rt = &pt;
+ }
+ }
+
+ assert (tt == nullptr || rt != nullptr);
- target_state s (pt.executed_state (a));
+ return pair<optional<target_state>, const target*> (
+ e ? optional<target_state> () : rs, rt);
+ }
+
+ pair<optional<target_state>, const target*>
+ reverse_execute_prerequisites (const target_type* tt,
+ action a, const target& t,
+ const timestamp& mt, const execute_filter& ef,
+ size_t n)
+ {
+ assert (a == perform_update_id);
+
+ context& ctx (t.ctx);
+
+ size_t busy (ctx.count_busy ());
+
+ auto& pts (t.prerequisite_targets[a]);
+
+ if (n == 0)
+ n = pts.size ();
+
+ // Pretty much as reverse_execute_members() but hairier.
+ //
+ target_state rs (target_state::unchanged);
+
+ wait_guard wg (ctx, busy, t[a].task_count);
+
+ for (size_t i (n); i != 0; )
+ {
+ const target*& pt (pts[--i]);
+
+ if (pt == nullptr) // Skipped.
+ continue;
+
+ target_state s (execute_async (a, *pt, busy, t[a].task_count));
+
+ if (s == target_state::postponed)
+ {
+ rs |= s;
+ pt = nullptr;
+ }
+ }
+
+ wg.wait ();
+
+ bool e (mt == timestamp_nonexistent);
+ const target* rt (nullptr);
+
+ for (size_t i (n); i != 0; )
+ {
+ prerequisite_target& p (pts[--i]);
+
+ if (p == nullptr)
+ continue;
+
+ const target& pt (*p.target);
+ target_state s (execute_complete (a, pt));
rs |= s;
// Should we compare the timestamp to this target's?
//
- if (!e && (p.adhoc || !ef || ef (pt, i)))
+ if (!e && (p.adhoc () || !ef || ef (pt, i)))
{
// If this is an mtime-based target, then compare timestamps.
//
@@ -2238,26 +3201,27 @@ namespace build2
}
}
- if (p.adhoc)
+ if (p.adhoc ())
p.target = nullptr; // Blank out.
- else
+ else if (tt != nullptr)
{
- if (rt == nullptr && pt.is_a (*tt))
+ // Note that here we need last.
+ //
+ if (pt.is_a (*tt))
rt = &pt;
}
}
- assert (rt != nullptr);
+ assert (tt == nullptr || rt != nullptr);
return pair<optional<target_state>, const target*> (
- e ? optional<target_state> () : rs,
- tt != nullptr ? rt : nullptr);
+ e ? optional<target_state> () : rs, rt);
}
target_state
noop_action (action a, const target& t)
{
- text << "noop action triggered for " << diag_doing (a, t);
+ error << "noop action triggered for " << diag_doing (a, t);
assert (false); // We shouldn't be called (see set_recipe()).
return target_state::unchanged;
}
@@ -2271,10 +3235,13 @@ namespace build2
//
const target& g (*t.group);
- target_state gs (execute (a, g));
+ // This is execute_sync(a, t, false) but that saves a call to
+ // executed_state() (which we don't need).
+ //
+ target_state gs (execute_impl (a, g, 0, nullptr));
if (gs == target_state::busy)
- ctx.sched.wait (ctx.count_executed (),
+ ctx.sched->wait (ctx.count_executed (),
g[a].task_count,
scheduler::work_none);
@@ -2302,102 +3269,105 @@ namespace build2
return execute_prerequisites (a, t);
}
- target_state
- perform_clean_extra (action a, const file& ft,
- const clean_extras& extras,
- const clean_adhoc_extras& adhoc_extras)
+ static target_state
+ clean_extra (context& ctx,
+ const path& fp,
+ const clean_extras& es,
+ path& ep, bool& ed)
{
- // Clean the extras first and don't print the commands at verbosity level
- // below 3. Note the first extra file/directory that actually got removed
- // for diagnostics below.
- //
- // Note that dry-run is taken care of by the filesystem functions.
- //
- target_state er (target_state::unchanged);
- bool ed (false);
- path ep;
+ assert (!fp.empty ()); // Must be assigned.
- context& ctx (ft.ctx);
+ target_state er (target_state::unchanged);
- auto clean_extra = [&er, &ed, &ep, &ctx] (const file& f,
- const path* fp,
- const clean_extras& es)
+ for (const char* e: es)
{
- for (const char* e: es)
- {
- size_t n;
- if (e == nullptr || (n = strlen (e)) == 0)
- continue;
+ size_t n;
+ if (e == nullptr || (n = strlen (e)) == 0)
+ continue;
- path p;
- bool d;
+ path p;
+ bool d;
- if (path::traits_type::absolute (e))
- {
- p = path (e);
- d = p.to_directory ();
- }
- else
- {
- if ((d = (e[n - 1] == '/')))
- --n;
+ if (path::traits_type::absolute (e))
+ {
+ p = path (e);
+ d = p.to_directory ();
+ }
+ else
+ {
+ if ((d = (e[n - 1] == '/')))
+ --n;
- if (fp == nullptr)
- {
- fp = &f.path ();
- assert (!fp->empty ()); // Must be assigned.
- }
+ p = fp;
+ for (; *e == '-'; ++e)
+ p = p.base ();
- p = *fp;
- for (; *e == '-'; ++e)
- p = p.base ();
+ p.append (e, n);
+ }
- p.append (e, n);
- }
+ target_state r (target_state::unchanged);
- target_state r (target_state::unchanged);
+ if (d)
+ {
+ dir_path dp (path_cast<dir_path> (p));
- if (d)
+ switch (rmdir_r (ctx, dp, true, 3))
{
- dir_path dp (path_cast<dir_path> (p));
-
- switch (rmdir_r (ctx, dp, true, 3))
+ case rmdir_status::success:
{
- case rmdir_status::success:
- {
- r = target_state::changed;
- break;
- }
- case rmdir_status::not_empty:
- {
- if (verb >= 3)
- text << dp << " is current working directory, not removing";
- break;
- }
- case rmdir_status::not_exist:
+ r = target_state::changed;
break;
}
+ case rmdir_status::not_empty:
+ {
+ if (verb >= 3)
+ info << dp << " is current working directory, not removing";
+ break;
+ }
+ case rmdir_status::not_exist:
+ break;
}
- else
- {
- if (rmfile (ctx, p, 3))
- r = target_state::changed;
- }
-
- if (r == target_state::changed && ep.empty ())
- {
- ed = d;
- ep = move (p);
- }
+ }
+ else
+ {
+ if (rmfile (ctx, p, 3))
+ r = target_state::changed;
+ }
- er |= r;
+ if (r == target_state::changed && ep.empty ())
+ {
+ ed = d;
+ ep = move (p);
}
- };
+
+ er |= r;
+ }
+
+ return er;
+ }
+
+ target_state
+ perform_clean_extra (action a, const file& ft,
+ const clean_extras& extras,
+ const clean_adhoc_extras& adhoc_extras,
+ bool show_adhoc)
+ {
+ context& ctx (ft.ctx);
+
+ // Clean the extras first and don't print the commands at verbosity level
+ // below 3. Note the first extra file/directory that actually got removed
+ // for diagnostics below.
+ //
+ // Note that dry-run is taken care of by the filesystem functions.
+ //
+ target_state er (target_state::unchanged);
+ bool ed (false);
+ path ep;
const path& fp (ft.path ());
if (!fp.empty () && !extras.empty ())
- clean_extra (ft, nullptr, extras);
+ er |= clean_extra (ctx, fp, extras, ep, ed);
target_state tr (target_state::unchanged);
@@ -2412,6 +3382,12 @@ namespace build2
// Now clean the ad hoc group file members, if any.
//
+ // While at it, also collect the group target keys if we are showing
+ // the members. But only those that exist (since we don't want to
+ // print any diagnostics if none of them exist).
+ //
+ vector<target_key> tks;
+
for (const target* m (ft.adhoc_member);
m != nullptr;
m = m->adhoc_member)
@@ -2432,7 +3408,7 @@ namespace build2
}));
if (i != adhoc_extras.end ())
- clean_extra (*mf, mp, i->extras);
+ er |= clean_extra (ctx, *mp, i->extras, ep, ed);
}
if (!clean)
@@ -2452,18 +3428,38 @@ namespace build2
? target_state::changed
: target_state::unchanged);
- if (r == target_state::changed && ep.empty ())
- ep = *mp;
-
- er |= r;
+ if (r == target_state::changed)
+ {
+ if (show_adhoc && verb == 1)
+ tks.push_back (mf->key ());
+ else if (ep.empty ())
+ {
+ ep = *mp;
+ er |= r;
+ }
+ }
}
}
// Now clean the primary target and its prerequisited in the reverse order
// of update: first remove the file, then clean the prerequisites.
//
- if (clean && !fp.empty () && rmfile (fp, ft))
- tr = target_state::changed;
+ if (clean && !fp.empty ())
+ {
+ if (show_adhoc && verb == 1 && !tks.empty ())
+ {
+ if (rmfile (fp, ft, 2 /* verbosity */))
+ tks.insert (tks.begin (), ft.key ());
+
+ print_diag ("rm", move (tks));
+ tr = target_state::changed;
+ }
+ else
+ {
+ if (rmfile (fp, ft))
+ tr = target_state::changed;
+ }
+ }
// Update timestamp in case there are operations after us that could use
// the information.
@@ -2483,10 +3479,20 @@ namespace build2
{
if (verb > (ctx.current_diag_noise ? 0 : 1) && verb < 3)
{
- if (ed)
- text << "rm -r " << path_cast<dir_path> (ep);
- else
- text << "rm " << ep;
+ if (verb >= 2)
+ {
+ if (ed)
+ text << "rm -r " << path_cast<dir_path> (ep);
+ else
+ text << "rm " << ep;
+ }
+ else if (verb)
+ {
+ if (ed)
+ print_diag ("rm -r", path_cast<dir_path> (ep));
+ else
+ print_diag ("rm", ep);
+ }
}
}
@@ -2499,29 +3505,19 @@ namespace build2
}
target_state
- perform_clean (action a, const target& t)
+ perform_clean_group_extra (action a, const mtime_target& g,
+ const clean_extras& extras)
{
- const file& f (t.as<file> ());
- assert (!f.path ().empty ());
- return perform_clean_extra (a, f, {});
- }
+ context& ctx (g.ctx);
- target_state
- perform_clean_depdb (action a, const target& t)
- {
- const file& f (t.as<file> ());
- assert (!f.path ().empty ());
- return perform_clean_extra (a, f, {".d"});
- }
+ target_state er (target_state::unchanged);
+ bool ed (false);
+ path ep;
- target_state
- perform_clean_group (action a, const target& xg)
- {
- const mtime_target& g (xg.as<mtime_target> ());
+ if (!extras.empty ())
+ er |= clean_extra (ctx, g.dir / path (g.name), extras, ep, ed);
- // Similar logic to perform_clean_extra() above.
- //
- target_state r (target_state::unchanged);
+ target_state tr (target_state::unchanged);
if (cast_true<bool> (g[g.ctx.var_clean]))
{
@@ -2529,54 +3525,93 @@ namespace build2
{
if (const target* m = gv.members[gv.count - 1])
{
- if (rmfile (m->as<file> ().path (), *m))
- r |= target_state::changed;
+ // Note that at the verbosity level 1 we don't show the removal of
+ // each group member. This is consistent with what is normally shown
+ // during update.
+ //
+ if (rmfile (m->as<file> ().path (), *m, 2 /* verbosity */))
+ tr |= target_state::changed;
}
}
+
+ if (tr == target_state::changed && verb == 1)
+ print_diag ("rm", g);
}
g.mtime (timestamp_nonexistent);
- r |= reverse_execute_prerequisites (a, g);
- return r;
+ if (tr != target_state::changed && er == target_state::changed)
+ {
+ if (verb > (ctx.current_diag_noise ? 0 : 1) && verb < 3)
+ {
+ if (verb >= 2)
+ {
+ if (ed)
+ text << "rm -r " << path_cast<dir_path> (ep);
+ else
+ text << "rm " << ep;
+ }
+ else if (verb)
+ {
+ if (ed)
+ print_diag ("rm -r", path_cast<dir_path> (ep));
+ else
+ print_diag ("rm", ep);
+ }
+ }
+ }
+
+ tr |= reverse_execute_prerequisites (a, g);
+
+ tr |= er;
+ return tr;
}
target_state
- perform_clean_group_depdb (action a, const target& g)
+ perform_clean (action a, const target& t)
{
- context& ctx (g.ctx);
+ const file& f (t.as<file> ());
+ assert (!f.path ().empty ());
+ return perform_clean_extra (a, f, {});
+ }
- // The same twisted target state merging logic as in perform_clean_extra().
- //
- target_state er (target_state::unchanged);
- path ep;
+ target_state
+ perform_clean_depdb (action a, const target& t)
+ {
+ const file& f (t.as<file> ());
+ assert (!f.path ().empty ());
+ return perform_clean_extra (a, f, {".d"});
+ }
- group_view gv (g.group_members (a));
- if (gv.count != 0)
+ target_state
+ perform_clean_group (action a, const target& t)
+ {
+ return perform_clean_group_extra (a, t.as<mtime_target> (), {});
+ }
+
+ target_state
+ perform_clean_group_depdb (action a, const target& t)
+ {
+ path d;
+ clean_extras extras;
{
- for (size_t i (0); i != gv.count; ++i)
+ group_view gv (t.group_members (a));
+ if (gv.count != 0)
{
- if (const target* m = gv.members[i])
+ for (size_t i (0); i != gv.count; ++i)
{
- ep = m->as<file> ().path () + ".d";
- break;
+ if (const target* m = gv.members[i])
+ {
+ d = m->as<file> ().path () + ".d";
+ break;
+ }
}
- }
-
- assert (!ep.empty ());
- if (rmfile (ctx, ep, 3))
- er = target_state::changed;
- }
-
- target_state tr (perform_clean_group (a, g));
- if (tr != target_state::changed && er == target_state::changed)
- {
- if (verb > (ctx.current_diag_noise ? 0 : 1) && verb < 3)
- text << "rm " << ep;
+ assert (!d.empty ());
+ extras.push_back (d.string ().c_str ());
+ }
}
- tr |= er;
- return tr;
+ return perform_clean_group_extra (a, t.as<mtime_target> (), extras);
}
}
diff --git a/libbuild2/algorithm.hxx b/libbuild2/algorithm.hxx
index 73705d8..8bdf737 100644
--- a/libbuild2/algorithm.hxx
+++ b/libbuild2/algorithm.hxx
@@ -17,7 +17,7 @@
namespace build2
{
// The default prerequisite search implementation. It first calls the
- // prerequisite-type-specific search function. If that doesn't yeld
+ // prerequisite-type-specific search function. If that doesn't yield
// anything, it creates a new target.
//
LIBBUILD2_SYMEXPORT const target&
@@ -45,20 +45,32 @@ namespace build2
LIBBUILD2_SYMEXPORT pair<target&, ulock>
search_locked (const target&, const prerequisite_key&);
- // Note that unlike the above version, this one can be called during the
- // load and execute phases.
+ // As above but this one can be called during the load and execute phases.
//
LIBBUILD2_SYMEXPORT const target*
search_existing (context&, const prerequisite_key&);
+ // First search for an existing target and if that doesn't yield anything,
+ // creates a new target, bypassing any prerequisite-type-specific search.
+ // Can be called during the load and match phases but only on project-
+ // unqualified prerequisites. This version is suitable for cases where you
+ // know the target is in out and cannot be possibly found in src.
+ //
+ LIBBUILD2_SYMEXPORT const target&
+ search_new (context&, const prerequisite_key&);
+
+ // As above but return the lock if the target was newly created.
+ //
+ LIBBUILD2_SYMEXPORT pair<target&, ulock>
+ search_new_locked (context&, const prerequisite_key&);
+
// Uniform search interface for prerequisite/prerequisite_member.
//
inline const target&
search (const target& t, const prerequisite_member& p) {return p.search (t);}
- // As above but override the target type. Useful for searching for
- // target group members where we need to search for a different
- // target type.
+ // As above but override the target type. Useful for searching for target
+ // group members where we need to search for a different target type.
//
const target&
search (const target&, const target_type&, const prerequisite_key&);
@@ -66,6 +78,15 @@ namespace build2
pair<target&, ulock>
search_locked (const target&, const target_type&, const prerequisite_key&);
+ const target*
+ search_exsiting (context&, const target_type&, const prerequisite_key&);
+
+ const target&
+ search_new (context&, const target_type&, const prerequisite_key&);
+
+ pair<target&, ulock>
+ search_new_locked (context&, const target_type&, const prerequisite_key&);
+
// As above but specify the prerequisite to search as individual key
// components. Scope can be NULL if the directory is absolute.
//
@@ -85,8 +106,8 @@ namespace build2
const dir_path& dir,
const dir_path& out,
const string& name,
- const string* ext = nullptr, // NULL means unspecified.
- const scope* = nullptr); // NULL means dir is absolute.
+ const string* ext = nullptr,
+ const scope* = nullptr);
const target*
search_existing (context&,
@@ -98,6 +119,24 @@ namespace build2
const scope* = nullptr,
const optional<project_name>& proj = nullopt);
+ const target&
+ search_new (context&,
+ const target_type&,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext = nullptr,
+ const scope* = nullptr);
+
+ pair<target&, ulock>
+ search_new_locked (context&,
+ const target_type&,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext = nullptr,
+ const scope* = nullptr);
+
// As above but specify the target type as template argument.
//
template <typename T>
@@ -109,6 +148,15 @@ namespace build2
const string* ext = nullptr,
const scope* = nullptr);
+ template <typename T>
+ const T*
+ search_existing (context&,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext = nullptr,
+ const scope* = nullptr);
+
// Search for a target identified by the name. The semantics is "as if" we
// first created a prerequisite based on this name in exactly the same way
// as the parser would and then searched based on this prerequisite. If the
@@ -116,10 +164,10 @@ namespace build2
// argument.
//
LIBBUILD2_SYMEXPORT const target&
- search (const target&, name, const scope&, const target_type* = nullptr);
+ search (const target&, name&&, const scope&, const target_type* = nullptr);
- // Return NULL for unknown target types. Note that unlike the above version,
- // these ones can be called during the load and execute phases.
+ // Note: returns NULL for unknown target types. Note that unlike the above
+ // version, these ones can be called during the load and execute phases.
//
LIBBUILD2_SYMEXPORT const target*
search_existing (const name&,
@@ -143,17 +191,20 @@ namespace build2
action_type action;
target_type* target = nullptr;
size_t offset = 0;
+ bool first;
explicit operator bool () const {return target != nullptr;}
+ // Note: achieved offset is preserved.
+ //
void
unlock ();
// Movable-only type with move-assignment only to NULL lock.
//
target_lock () = default;
- target_lock (target_lock&&);
- target_lock& operator= (target_lock&&);
+ target_lock (target_lock&&) noexcept;
+ target_lock& operator= (target_lock&&) noexcept;
target_lock (const target_lock&) = delete;
target_lock& operator= (const target_lock&) = delete;
@@ -161,13 +212,14 @@ namespace build2
// Implementation details.
//
~target_lock ();
- target_lock (action_type, target_type*, size_t);
+ target_lock (action_type, target_type*, size_t, bool);
struct data
{
action_type action;
target_type* target;
size_t offset;
+ bool first;
};
data
@@ -295,17 +347,18 @@ namespace build2
}
// Match and apply a rule to the action/target with ambiguity detection.
- // Increment the target's dependents count, which means that you should call
- // this function with the intent to also call execute(). Return the target
- // state translating target_state::failed to the failed exception unless
- // instructed otherwise.
- //
- // The try_match() version doesn't issue diagnostics if there is no rule
- // match (but fails as match() for all other errors, like rule ambiguity,
- // inability to apply, etc). The first half of the result indicated whether
- // there was a rule match.
- //
- // The unmatch argument allows optimizations that avoid calling execute().
+ // This is the synchrounous match implementation that waits for completion
+ // if the target is already being matched. Increment the target's dependents
+ // count, which means that you should call this function with the intent to
+ // also call execute*(). Translating target_state::failed to the failed
+ // exception unless instructed otherwise.
+ //
+ // The try_match_sync() version doesn't issue diagnostics if there is no
+ // rule match (but fails as match_sync() for all other errors, like rule
+ // ambiguity, inability to apply, etc). The first half of the result
+ // indicated whether there was a rule match.
+ //
+ // The unmatch argument allows optimizations that avoid calling execute*().
// If it is unmatch::unchanged then only unmatch the target if it is known
// to be unchanged after match. If it is unmatch::safe, then unmatch the
// target if it is safe (this includes unchanged or if we know that someone
@@ -315,18 +368,25 @@ namespace build2
enum class unmatch {none, unchanged, safe};
target_state
- match (action, const target&, bool fail = true);
+ match_sync (action, const target&, bool fail = true);
pair<bool, target_state>
- try_match (action, const target&, bool fail = true);
+ try_match_sync (action, const target&, bool fail = true);
pair<bool, target_state>
- match (action, const target&, unmatch);
+ match_sync (action, const target&, unmatch);
+
+ // As above but without incrementing the target's dependents count. Should
+ // be executed with execute_direct_*().
+ //
+ target_state
+ match_direct_sync (action, const target&, bool fail = true);
// Start asynchronous match. Return target_state::postponed if the
- // asynchrounous operation has been started and target_state::busy if the
- // target has already been busy. Regardless of the result, match() must be
- // called in order to complete the operation (except target_state::failed).
+ // asynchronous operation has been started and target_state::busy if the
+ // target has already been busy. Regardless of the result, match_complete()
+ // must be called in order to complete the operation (except if the result
+ // is target_state::failed), which has the result semantics of match_sync().
//
// If fail is false, then return target_state::failed if the target match
// failed. Otherwise, throw the failed exception if keep_going is false and
@@ -337,6 +397,12 @@ namespace build2
size_t start_count, atomic_count& task_count,
bool fail = true);
+ target_state
+ match_complete (action, const target&, bool fail = true);
+
+ pair<bool, target_state>
+ match_complete (action, const target&, unmatch);
+
// Apply the specified recipe directly and without incrementing the
// dependency counts. The target must be locked.
//
@@ -352,15 +418,20 @@ namespace build2
// Match a "delegate rule" from withing another rules' apply() function
// avoiding recursive matches (thus the third argument). Unless try_match is
// true, fail if no rule is found. Otherwise return empty recipe. Note that
- // unlike match(), this function does not increment the dependents count and
- // the two rules must coordinate who is using the target's data pad and/or
- // prerequisite_targets. See also the companion execute_delegate().
+ // unlike match(), this function does not increment the dependents count.
+ // See also the companion execute_delegate().
//
recipe
match_delegate (action, target&, const rule&, bool try_match = false);
- // Match a rule for the inner operation from withing the outer rule's
- // apply() function. See also the companion execute_inner().
+ // Incrementing the dependency counts of the specified target.
+ //
+ void
+ match_inc_dependents (action, const target&);
+
+ // Match (synchronously) a rule for the inner operation from withing the
+ // outer rule's apply() function. See also the companion execute_inner()
+ // and inner_recipe.
//
target_state
match_inner (action, const target&);
@@ -423,27 +494,26 @@ namespace build2
// dependencies. Similar in semantics to match_prerequisites(). Any marked
// target pointers are skipped.
//
- // T can only be const target* or prerequisite_target.
- //
- template <typename T>
- void
- match_members (action, target&, T const*, size_t);
+ LIBBUILD2_SYMEXPORT void
+ match_members (action, const target&, const target* const*, size_t);
template <size_t N>
inline void
- match_members (action a, target& t, const target* (&ts)[N])
+ match_members (action a, const target& t, const target* (&ts)[N])
{
match_members (a, t, ts, N);
}
- inline void
- match_members (action a,
- target& t,
- prerequisite_targets& ts,
- size_t start = 0)
- {
- match_members (a, t, ts.data () + start, ts.size () - start);
- }
+ // As above plus if the include mask (first) and value (second) are
+ // specified, then only match prerequisites that satisfy the
+ // ((prerequisite_target::include & mask) == value) condition.
+ //
+ LIBBUILD2_SYMEXPORT void
+ match_members (action,
+ const target&,
+ prerequisite_targets&,
+ size_t start = 0,
+ pair<uintptr_t, uintptr_t> include = {0, 0});
// Unless already known, match, and, if necessary, execute the group in
// order to resolve its members list. Note that even after that the member's
@@ -473,8 +543,9 @@ namespace build2
resolve_group (action, const target&);
// Inject a target as a "prerequisite target" (note: not a prerequisite) of
- // another target. Specifically, first match the prerequisite target and
- // then add it to the back of the dependent target's prerequisite_targets.
+ // another target. Specifically, match (synchronously) the prerequisite
+ // target and then add it to the back of the dependent target's
+ // prerequisite_targets.
//
void
inject (action, target&, const target& prereq);
@@ -486,56 +557,56 @@ namespace build2
// the injected target or NULL. Normally this function is called from the
// rule's apply() function.
//
- // As an extension, this function will also search for an existing fsdir{}
- // prerequisite for the directory and if one exists, return that (even if
- // the target is in src tree). This can be used, for example, to place
- // output into an otherwise non-existent directory.
+ // As an extension, unless prereq is false, this function will also search
+ // for an existing fsdir{} prerequisite for the directory and if one exists,
+ // return that (even if the target is in src tree). This can be used, for
+ // example, to place output into an otherwise non-existent directory.
//
LIBBUILD2_SYMEXPORT const fsdir*
- inject_fsdir (action, target&, bool parent = true);
+ inject_fsdir (action, target&, bool prereq = true, bool parent = true);
// Execute the action on target, assuming a rule has been matched and the
// recipe for this action has been set. This is the synchrounous executor
- // implementation (but may still return target_state::busy if the target
- // is already being executed). Decrements the dependents count.
- //
- // Note: does not translate target_state::failed to the failed exception.
+ // implementation that waits for completion if the target is already being
+ // executed. Translate target_state::failed to the failed exception unless
+ // fail is false.
//
target_state
- execute (action, const target&);
-
- // As above but wait for completion if the target is busy and translate
- // target_state::failed to the failed exception.
- //
- target_state
- execute_wait (action, const target&);
+ execute_sync (action, const target&, bool fail = true);
// As above but start asynchronous execution. Return target_state::unknown
// if the asynchrounous execution has been started and target_state::busy if
// the target has already been busy.
//
- // If fail is false, then return target_state::failed if the target match
- // failed. Otherwise, throw the failed exception if keep_going is false and
- // return target_state::failed otherwise.
+ // If fail is false, then return target_state::failed if the target
+ // execution failed. Otherwise, throw the failed exception if keep_going is
+ // false and return target_state::failed otherwise. Regardless of the
+ // result, execute_complete() must be called in order to complete the
+ // operation (except if the result is target_state::failed), which has the
+ // result semantics of execute_sync().
//
target_state
execute_async (action, const target&,
size_t start_count, atomic_count& task_count,
bool fail = true);
- // Execute the recipe obtained with match_delegate(). Note that the target's
- // state is neither checked nor updated by this function. In other words,
- // the appropriate usage is to call this function from another recipe and to
- // factor the obtained state into the one returned.
+ target_state
+ execute_complete (action, const target&);
+
+ // Execute (synchronously) the recipe obtained with match_delegate(). Note
+ // that the target's state is neither checked nor updated by this function.
+ // In other words, the appropriate usage is to call this function from
+ // another recipe and to factor the obtained state into the one returned.
//
target_state
execute_delegate (const recipe&, action, const target&);
- // Execute the inner operation matched with match_inner(). Note that the
- // returned target state is for the inner operation. The appropriate usage
- // is to call this function from the outer operation's recipe and to factor
- // the obtained state into the one returned (similar to how we do it for
- // prerequisites).
+ // Execute (synchronously) the inner operation matched with match_inner().
+ // Note that the returned target state is for the inner operation. The
+ // appropriate usage is to call this function from the outer operation's
+ // recipe and to factor the obtained state into the one returned (similar to
+ // how we do it for prerequisites). Or, if factoring is not needed, simply
+ // return inner_recipe as outer recipe.
//
// Note: waits for the completion if the target is busy and translates
// target_state::failed to the failed exception.
@@ -548,11 +619,43 @@ namespace build2
// relationship (so no dependents count is decremented) and execution order
// (so this function never returns the postponed target state).
//
- // Note: waits for the completion if the target is busy and translates
- // target_state::failed to the failed exception.
+ // The first version waits for the completion if the target is busy and
+ // translates target_state::failed to the failed exception.
//
- LIBBUILD2_SYMEXPORT target_state
- execute_direct (action, const target&);
+ target_state
+ execute_direct_sync (action, const target&, bool fail = true);
+
+ target_state
+ execute_direct_async (action, const target&,
+ size_t start_count, atomic_count& task_count,
+ bool fail = true);
+
+ // Update the target during the match phase (by switching the phase and
+ // calling execute_direct()). Return true if the target has changed or, if
+ // the passed timestamp is not timestamp_unknown, it is older than the
+ // target.
+ //
+ // Note that such a target must still be updated normally during the execute
+ // phase in order to keep the dependency counts straight (at which point the
+ // target state/timestamp will be re-incorporated into the result).
+ //
+ LIBBUILD2_SYMEXPORT bool
+ update_during_match (tracer&,
+ action, const target&,
+ timestamp = timestamp_unknown);
+
+ // As above, but update all the targets in prerequisite_targets that have
+ // the specified mask in prerequisite_target::include. Return true if any of
+ // them have changed.
+ //
+ // Note that this function spoils prerequisite_target::data (which is used
+ // for temporary storage). But it resets data to 0 once done.
+ //
+ LIBBUILD2_SYMEXPORT bool
+ update_during_match_prerequisites (
+ tracer&,
+ action, target&,
+ uintptr_t mask = prerequisite_target::include_udm);
// The default prerequisite execute implementation. Call execute_async() on
// each non-ignored (non-NULL) prerequisite target in a loop and then wait
@@ -615,8 +718,8 @@ namespace build2
// case if they are up to something tricky (like recursively linking liba{}
// prerequisites).
//
- // Note that because we use mtime, this function should normally only be
- // used in the perform_update action (which is straight).
+ // Note that because we use mtime, this function can only be used for the
+ // perform_update action.
//
using execute_filter = function<bool (const target&, size_t pos)>;
@@ -626,6 +729,18 @@ namespace build2
const execute_filter& = nullptr,
size_t count = 0);
+ // As above, but execute prerequisites in reverse.
+ //
+ // Sometime it may be advantageous to execute prerequisites in reverse, for
+ // example, to have more immediate incremental compilation or more accurate
+ // progress. See cc::link_rule for background.
+ //
+ optional<target_state>
+ reverse_execute_prerequisites (action, const target&,
+ const timestamp&,
+ const execute_filter& = nullptr,
+ size_t count = 0);
+
// Another version of the above that does two extra things for the caller:
// it determines whether the action needs to be executed on the target based
// on the passed timestamp and finds a prerequisite of the specified type
@@ -690,8 +805,9 @@ namespace build2
// Call straight or reverse depending on the current mode.
//
+ template <typename T>
target_state
- execute_members (action, const target&, const target*[], size_t);
+ execute_members (action, const target&, T[], size_t);
template <size_t N>
inline target_state
@@ -731,8 +847,8 @@ namespace build2
LIBBUILD2_SYMEXPORT target_state
group_action (action, const target&);
- // Standard perform(clean) action implementation for the file target
- // (or derived).
+ // Standard perform(clean) action implementation for the file target (or
+ // derived). Note: also cleans ad hoc group members, if any.
//
LIBBUILD2_SYMEXPORT target_state
perform_clean (action, const target&);
@@ -742,8 +858,8 @@ namespace build2
LIBBUILD2_SYMEXPORT target_state
perform_clean_depdb (action, const target&);
- // As above but clean the target group. The group should be an mtime_target
- // and members should be files.
+ // As above but clean the (non-ad hoc) target group. The group should be an
+ // mtime_target and members should be files.
//
LIBBUILD2_SYMEXPORT target_state
perform_clean_group (action, const target&);
@@ -754,21 +870,22 @@ namespace build2
LIBBUILD2_SYMEXPORT target_state
perform_clean_group_depdb (action, const target&);
- // Helper for custom perform(clean) implementations that cleans extra files
- // and directories (recursively) specified as a list of either absolute
- // paths or "path derivation directives". The directive string can be NULL,
- // or empty in which case it is ignored. If the last character in a
- // directive is '/', then the resulting path is treated as a directory
- // rather than a file. The directive can start with zero or more '-'
- // characters which indicate the number of extensions that should be
- // stripped before the new extension (if any) is added (so if you want to
- // strip the extension, specify just "-"). For example:
+ // Helpers for custom perform(clean) implementations that, besides the
+ // target and group members, can also clean extra files and directories
+ // (recursively) specified as a list of either absolute paths or "path
+ // derivation directives". The directive string can be NULL, or empty in
+ // which case it is ignored. If the last character in a directive is '/',
+ // then the resulting path is treated as a directory rather than a file. The
+ // directive can start with zero or more '-' characters which indicate the
+ // number of extensions that should be stripped before the new extension (if
+ // any) is added (so if you want to strip the extension, specify just
+ // "-"). For example:
//
// perform_clean_extra (a, t, {".d", ".dlls/", "-.dll"});
//
// The extra files/directories are removed first in the specified order
- // followed by the ad hoc group member, then target itself, and, finally,
- // the prerequisites in the reverse order.
+ // followed by the group member, then target itself, and, finally, the
+ // prerequisites in the reverse order.
//
// You can also clean extra files derived from ad hoc group members that are
// "indexed" using their target types (see add/find_adhoc_member() for
@@ -787,21 +904,46 @@ namespace build2
using clean_adhoc_extras = small_vector<clean_adhoc_extra, 2>;
+ // If show_adhoc_members is true, then print the entire ad hoc group instead
+ // of just the primary member at verbosity level 1 (see print_diag() for
+ // details). Note that the default is false because normally a rule
+ // implemented in C++ would only use an ad hoc group for subordiate members
+ // (.pdb, etc) and would use a dedicate target group type if the members
+ // are equal.
+ //
LIBBUILD2_SYMEXPORT target_state
perform_clean_extra (action, const file&,
const clean_extras&,
- const clean_adhoc_extras& = {});
+ const clean_adhoc_extras& = {},
+ bool show_adhoc_members = false);
inline target_state
perform_clean_extra (action a, const file& f,
- initializer_list<const char*> e)
+ initializer_list<const char*> e,
+ bool show_adhoc_members = false)
{
- return perform_clean_extra (a, f, clean_extras (e));
+ return perform_clean_extra (a, f, clean_extras (e), {}, show_adhoc_members);
+ }
+
+ // Similar to perform_clean_group() but with extras similar to
+ // perform_clean_extra(). Note that the extras are derived from the group
+ // "path" (g.dir / g.name).
+ //
+ LIBBUILD2_SYMEXPORT target_state
+ perform_clean_group_extra (action, const mtime_target&, const clean_extras&);
+
+ inline target_state
+ perform_clean_group_extra (action a, const mtime_target& g,
+ initializer_list<const char*> e)
+ {
+ return perform_clean_group_extra (a, g, clean_extras (e));
}
// Update/clean a backlink issuing appropriate diagnostics at appropriate
// levels depending on the overload and the changed argument.
//
+ // Note that these functions assume (target.leaf() == link.leaf ()).
+ //
enum class backlink_mode
{
link, // Make a symbolic link if possible, hard otherwise.
@@ -824,6 +966,8 @@ namespace build2
bool changed,
backlink_mode = backlink_mode::link);
+ // Note: verbosity should be 2 or greater.
+ //
LIBBUILD2_SYMEXPORT void
update_backlink (context&,
const path& target,
@@ -831,6 +975,8 @@ namespace build2
backlink_mode = backlink_mode::link,
uint16_t verbosity = 3);
+ // Note: verbosity should be 2 or greater.
+ //
LIBBUILD2_SYMEXPORT void
clean_backlink (context&,
const path& link,
diff --git a/libbuild2/algorithm.ixx b/libbuild2/algorithm.ixx
index 24d9e5b..9f1b70f 100644
--- a/libbuild2/algorithm.ixx
+++ b/libbuild2/algorithm.ixx
@@ -45,6 +45,39 @@ namespace build2
k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
}
+ inline const target*
+ search_exsiting (context& ctx,
+ const target_type& tt,
+ const prerequisite_key& k)
+ {
+ return search_existing (
+ ctx,
+ prerequisite_key {
+ k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
+ }
+
+ inline const target&
+ search_new (context& ctx,
+ const target_type& tt,
+ const prerequisite_key& k)
+ {
+ return search_new (
+ ctx,
+ prerequisite_key {
+ k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
+ }
+
+ inline pair<target&, ulock>
+ search_new_locked (context& ctx,
+ const target_type& tt,
+ const prerequisite_key& k)
+ {
+ return search_new_locked (
+ ctx,
+ prerequisite_key {
+ k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
+ }
+
inline const target&
search (const target& t,
const target_type& type,
@@ -110,6 +143,48 @@ namespace build2
scope});
}
+ inline const target&
+ search_new (context& ctx,
+ const target_type& type,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext,
+ const scope* scope)
+ {
+ return search_new (
+ ctx,
+ prerequisite_key {
+ nullopt,
+ {
+ &type,
+ &dir, &out, &name,
+ ext != nullptr ? optional<string> (*ext) : nullopt
+ },
+ scope});
+ }
+
+ inline pair<target&, ulock>
+ search_new_locked (context& ctx,
+ const target_type& type,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext,
+ const scope* scope)
+ {
+ return search_new_locked (
+ ctx,
+ prerequisite_key {
+ nullopt,
+ {
+ &type,
+ &dir, &out, &name,
+ ext != nullptr ? optional<string> (*ext) : nullopt
+ },
+ scope});
+ }
+
template <typename T>
inline const T&
search (const target& t,
@@ -123,6 +198,21 @@ namespace build2
t, T::static_type, dir, out, name, ext, scope).template as<T> ();
}
+ template <typename T>
+ inline const T*
+ search_existing (context& ctx,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext,
+ const scope* scope)
+ {
+ const target* r (
+ search_existing (
+ ctx, T::static_type, dir, out, name, ext, scope));
+ return r != nullptr ? &r->template as<T> () : nullptr;
+ }
+
LIBBUILD2_SYMEXPORT target_lock
lock_impl (action, const target&, optional<scheduler::work_queue>);
@@ -130,8 +220,8 @@ namespace build2
unlock_impl (action, target&, size_t);
inline target_lock::
- target_lock (action_type a, target_type* t, size_t o)
- : action (a), target (t), offset (o)
+ target_lock (action_type a, target_type* t, size_t o, bool f)
+ : action (a), target (t), offset (o), first (f)
{
if (target != nullptr)
prev = stack (this);
@@ -143,7 +233,8 @@ namespace build2
if (target != nullptr && prev != this)
{
const target_lock* cur (stack (prev));
- assert (cur == this);
+ if (cur != this) // NDEBUG
+ assert (cur == this);
prev = this;
}
}
@@ -158,7 +249,8 @@ namespace build2
if (prev != this)
{
const target_lock* cur (stack (prev));
- assert (cur == this);
+ if (cur != this) // NDEBUG
+ assert (cur == this);
}
target = nullptr;
@@ -168,14 +260,15 @@ namespace build2
inline auto target_lock::
release () -> data
{
- data r {action, target, offset};
+ data r {action, target, offset, first};
if (target != nullptr)
{
if (prev != this)
{
const target_lock* cur (stack (prev));
- assert (cur == this);
+ if (cur != this) // NDEBUG
+ assert (cur == this);
}
target = nullptr;
@@ -191,7 +284,7 @@ namespace build2
}
inline target_lock::
- target_lock (target_lock&& x)
+ target_lock (target_lock&& x) noexcept
: action (x.action), target (x.target), offset (x.offset)
{
if (target != nullptr)
@@ -199,7 +292,8 @@ namespace build2
if (x.prev != &x)
{
const target_lock* cur (stack (this));
- assert (cur == &x);
+ if (cur != &x) // NDEBUG
+ assert (cur == &x);
prev = x.prev;
}
else
@@ -210,7 +304,7 @@ namespace build2
}
inline target_lock& target_lock::
- operator= (target_lock&& x)
+ operator= (target_lock&& x) noexcept
{
if (this != &x)
{
@@ -225,7 +319,8 @@ namespace build2
if (x.prev != &x)
{
const target_lock* cur (stack (this));
- assert (cur == &x);
+ if (cur != &x) // NDEBUG
+ assert (cur == &x);
prev = x.prev;
}
else
@@ -297,48 +392,66 @@ namespace build2
}
LIBBUILD2_SYMEXPORT const rule_match*
- match_rule (action, target&, const rule* skip, bool try_match = false);
+ match_rule (action, target&,
+ const rule* skip,
+ bool try_match = false,
+ match_extra* = nullptr);
LIBBUILD2_SYMEXPORT recipe
apply_impl (action, target&, const rule_match&);
LIBBUILD2_SYMEXPORT pair<bool, target_state>
- match (action, const target&, size_t, atomic_count*, bool try_match = false);
+ match_impl (action, const target&,
+ size_t, atomic_count*,
+ bool try_match = false);
inline void
- match_inc_dependens (action a, const target& t)
+ match_inc_dependents (action a, const target& t)
{
t.ctx.dependency_count.fetch_add (1, memory_order_relaxed);
t[a].dependents.fetch_add (1, memory_order_release);
}
inline target_state
- match (action a, const target& t, bool fail)
+ match_sync (action a, const target& t, bool fail)
{
assert (t.ctx.phase == run_phase::match);
- target_state r (match (a, t, 0, nullptr).second);
+ target_state r (match_impl (a, t, 0, nullptr).second);
if (r != target_state::failed)
- match_inc_dependens (a, t);
+ match_inc_dependents (a, t);
else if (fail)
throw failed ();
return r;
}
+ inline target_state
+ match_direct_sync (action a, const target& t, bool fail)
+ {
+ assert (t.ctx.phase == run_phase::match);
+
+ target_state r (match_impl (a, t, 0, nullptr).second);
+
+ if (r == target_state::failed && fail)
+ throw failed ();
+
+ return r;
+ }
+
inline pair<bool, target_state>
- try_match (action a, const target& t, bool fail)
+ try_match_sync (action a, const target& t, bool fail)
{
assert (t.ctx.phase == run_phase::match);
pair<bool, target_state> r (
- match (a, t, 0, nullptr, true /* try_match */));
+ match_impl (a, t, 0, nullptr, true /* try_match */));
if (r.first)
{
if (r.second != target_state::failed)
- match_inc_dependens (a, t);
+ match_inc_dependents (a, t);
else if (fail)
throw failed ();
}
@@ -347,11 +460,11 @@ namespace build2
}
inline pair<bool, target_state>
- match (action a, const target& t, unmatch um)
+ match_sync (action a, const target& t, unmatch um)
{
assert (t.ctx.phase == run_phase::match);
- target_state s (match (a, t, 0, nullptr).second);
+ target_state s (match_impl (a, t, 0, nullptr).second);
if (s == target_state::failed)
throw failed ();
@@ -385,7 +498,7 @@ namespace build2
}
}
- match_inc_dependens (a, t);
+ match_inc_dependents (a, t);
return make_pair (false, s);;
}
@@ -397,23 +510,49 @@ namespace build2
context& ctx (t.ctx);
assert (ctx.phase == run_phase::match);
- target_state r (match (a, t, sc, &tc).second);
+ target_state r (match_impl (a, t, sc, &tc).second);
- if (fail && !ctx.keep_going && r == target_state::failed)
+ if (r == target_state::failed && fail && !ctx.keep_going)
throw failed ();
return r;
}
+ inline target_state
+ match_complete (action a, const target& t, bool fail)
+ {
+ return match_sync (a, t, fail);
+ }
+
+ inline pair<bool, target_state>
+ match_complete (action a, const target& t, unmatch um)
+ {
+ return match_sync (a, t, um);
+ }
+
// Clear rule match-specific target data.
//
inline void
clear_target (action a, target& t)
{
- t[a].vars.clear ();
+ target::opstate& s (t.state[a]);
+ s.recipe = nullptr;
+ s.recipe_keep = false;
+ s.resolve_counted = false;
+ s.vars.clear ();
t.prerequisite_targets[a].clear ();
- if (a.inner ())
- t.clear_data ();
+ }
+
+ LIBBUILD2_SYMEXPORT void
+ set_rule_trace (target_lock&, const rule_match*);
+
+ inline void
+ set_rule (target_lock& l, const rule_match* r)
+ {
+ if (l.target->ctx.trace_match == nullptr)
+ (*l.target)[l.action].rule = r;
+ else
+ set_rule_trace (l, r);
}
inline void
@@ -423,6 +562,7 @@ namespace build2
target::opstate& s (t[l.action]);
s.recipe = move (r);
+ s.recipe_group_action = false;
// If this is a noop recipe, then mark the target unchanged to allow for
// some optimizations.
@@ -448,9 +588,11 @@ namespace build2
// likely. The alternative (trying to "merge" the count keeping track of
// whether inner and/or outer is noop) gets hairy rather quickly.
//
- if (l.action.inner ())
+ if (f != nullptr && *f == &group_action)
+ s.recipe_group_action = true;
+ else
{
- if (f == nullptr || *f != &group_action)
+ if (l.action.inner ())
t.ctx.target_count.fetch_add (1, memory_order_relaxed);
}
}
@@ -464,7 +606,7 @@ namespace build2
l.target->ctx.phase == run_phase::match);
clear_target (l.action, *l.target);
- (*l.target)[l.action].rule = nullptr; // No rule.
+ set_rule (l, nullptr); // No rule.
set_recipe (l, move (r));
l.offset = target::offset_applied;
}
@@ -477,7 +619,7 @@ namespace build2
l.target->ctx.phase == run_phase::match);
clear_target (l.action, *l.target);
- (*l.target)[l.action].rule = &r;
+ set_rule (l, &r);
l.offset = target::offset_matched;
}
@@ -499,18 +641,18 @@ namespace build2
// In a sense this is like any other dependency.
//
assert (a.outer ());
- return match (a.inner_action (), t);
+ return match_sync (a.inner_action (), t);
}
inline pair<bool, target_state>
match_inner (action a, const target& t, unmatch um)
{
assert (a.outer ());
- return match (a.inner_action (), t, um);
+ return match_sync (a.inner_action (), t, um);
}
LIBBUILD2_SYMEXPORT void
- resolve_group_impl (action, const target&, target_lock);
+ resolve_group_impl (action, const target&, target_lock&&);
inline const target*
resolve_group (action a, const target& t)
@@ -544,7 +686,7 @@ namespace build2
inline void
inject (action a, target& t, const target& p)
{
- match (a, p);
+ match_sync (a, p);
t.prerequisite_targets[a].emplace_back (&p);
}
@@ -608,23 +750,26 @@ namespace build2
}
LIBBUILD2_SYMEXPORT target_state
- execute (action, const target&, size_t, atomic_count*);
+ execute_impl (action, const target&, size_t, atomic_count*);
inline target_state
- execute (action a, const target& t)
+ execute_sync (action a, const target& t, bool fail)
{
- return execute (a, t, 0, nullptr);
- }
+ target_state r (execute_impl (a, t, 0, nullptr));
- inline target_state
- execute_wait (action a, const target& t)
- {
- if (execute (a, t) == target_state::busy)
- t.ctx.sched.wait (t.ctx.count_executed (),
+ if (r == target_state::busy)
+ {
+ t.ctx.sched->wait (t.ctx.count_executed (),
t[a].task_count,
scheduler::work_none);
- return t.executed_state (a);
+ r = t.executed_state (a, false);
+ }
+
+ if (r == target_state::failed && fail)
+ throw failed ();
+
+ return r;
}
inline target_state
@@ -632,9 +777,62 @@ namespace build2
size_t sc, atomic_count& tc,
bool fail)
{
- target_state r (execute (a, t, sc, &tc));
+ target_state r (execute_impl (a, t, sc, &tc));
+
+ if (r == target_state::failed && fail && !t.ctx.keep_going)
+ throw failed ();
+
+ return r;
+ }
+
+ inline target_state
+ execute_complete (action a, const target& t)
+ {
+ // Note: standard operation execute() sidesteps this and calls
+ // executed_state() directly.
+
+ context& ctx (t.ctx);
+
+ // If the target is still busy, wait for its completion.
+ //
+ ctx.sched->wait (ctx.count_executed (),
+ t[a].task_count,
+ scheduler::work_none);
+
+ return t.executed_state (a);
+ }
+
+ LIBBUILD2_SYMEXPORT target_state
+ execute_direct_impl (action, const target&, size_t, atomic_count*);
+
+ inline target_state
+ execute_direct_sync (action a, const target& t, bool fail)
+ {
+ target_state r (execute_direct_impl (a, t, 0, nullptr));
+
+ if (r == target_state::busy)
+ {
+ t.ctx.sched->wait (t.ctx.count_executed (),
+ t[a].task_count,
+ scheduler::work_none);
+
+ r = t.executed_state (a, false);
+ }
+
+ if (r == target_state::failed && fail)
+ throw failed ();
+
+ return r;
+ }
+
+ inline target_state
+ execute_direct_async (action a, const target& t,
+ size_t sc, atomic_count& tc,
+ bool fail)
+ {
+ target_state r (execute_direct_impl (a, t, sc, &tc));
- if (fail && !t.ctx.keep_going && r == target_state::failed)
+ if (r == target_state::failed && fail && !t.ctx.keep_going)
throw failed ();
return r;
@@ -650,7 +848,7 @@ namespace build2
execute_inner (action a, const target& t)
{
assert (a.outer ());
- return execute_wait (a.inner_action (), t);
+ return execute_sync (a.inner_action (), t);
}
inline target_state
@@ -726,6 +924,12 @@ namespace build2
const timestamp&, const execute_filter&,
size_t);
+ LIBBUILD2_SYMEXPORT pair<optional<target_state>, const target*>
+ reverse_execute_prerequisites (const target_type*,
+ action, const target&,
+ const timestamp&, const execute_filter&,
+ size_t);
+
inline optional<target_state>
execute_prerequisites (action a, const target& t,
const timestamp& mt, const execute_filter& ef,
@@ -734,6 +938,14 @@ namespace build2
return execute_prerequisites (nullptr, a, t, mt, ef, n).first;
}
+ inline optional<target_state>
+ reverse_execute_prerequisites (action a, const target& t,
+ const timestamp& mt, const execute_filter& ef,
+ size_t n)
+ {
+ return reverse_execute_prerequisites (nullptr, a, t, mt, ef, n).first;
+ }
+
template <typename T>
inline pair<optional<target_state>, const T&>
execute_prerequisites (action a, const target& t,
@@ -767,8 +979,9 @@ namespace build2
p.first, static_cast<const T&> (p.second));
}
+ template <typename T>
inline target_state
- execute_members (action a, const target& t, const target* ts[], size_t n)
+ execute_members (action a, const target& t, T ts[], size_t n)
{
return t.ctx.current_mode == execution_mode::first
? straight_execute_members (a, t, ts, n, 0)
diff --git a/libbuild2/b-cmdline.cxx b/libbuild2/b-cmdline.cxx
new file mode 100644
index 0000000..77ad087
--- /dev/null
+++ b/libbuild2/b-cmdline.cxx
@@ -0,0 +1,504 @@
+// file : libbuild2/b-cmdline.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/b-cmdline.hxx>
+
+#include <limits>
+#include <cstring> // strcmp(), strchr()
+
+#include <libbutl/default-options.hxx>
+
+#include <libbuild2/b-options.hxx>
+#include <libbuild2/scheduler.hxx>
+#include <libbuild2/diagnostics.hxx>
+
+using namespace std;
+using namespace butl;
+
+namespace cli = build2::build::cli;
+
+namespace build2
+{
+ b_cmdline
+ parse_b_cmdline (tracer& trace,
+ int argc, char* argv[],
+ b_options& ops,
+ uint16_t def_verb,
+ size_t def_jobs)
+ {
+ // Note that the diagnostics verbosity level can only be calculated after
+ // default options are loaded and merged (see below). Thus, until then we
+ // refer to the verbosity level specified on the command line.
+ //
+ auto verbosity = [&ops, def_verb] ()
+ {
+ uint16_t v (
+ ops.verbose_specified ()
+ ? ops.verbose ()
+ : (ops.V () ? 3 :
+ ops.v () ? 2 :
+ ops.quiet () || ops.silent () ? 0 : def_verb));
+ return v;
+ };
+
+ b_cmdline r;
+
+ // We want to be able to specify options, vars, and buildspecs in any
+ // order (it is really handy to just add -v at the end of the command
+ // line).
+ //
+ try
+ {
+ // Command line arguments starting position.
+ //
+ // We want the positions of the command line arguments to be after the
+ // default options files. Normally that would be achieved by passing the
+ // last position of the previous scanner to the next. The problem is
+ // that we parse the command line arguments first (for good reasons).
+ // Also the default options files parsing machinery needs the maximum
+ // number of arguments to be specified and assigns the positions below
+ // this value (see load_default_options() for details). So we are going
+ // to "reserve" the first half of the size_t value range for the default
+ // options positions and the second half for the command line arguments
+ // positions.
+ //
+ size_t args_pos (numeric_limits<size_t>::max () / 2);
+ cli::argv_file_scanner scan (argc, argv, "--options-file", args_pos);
+
+ size_t argn (0); // Argument count.
+ bool shortcut (false); // True if the shortcut syntax is used.
+
+ for (bool opt (true), var (true); scan.more (); )
+ {
+ if (opt)
+ {
+ // Parse the next chunk of options until we reach an argument (or
+ // eos).
+ //
+ if (ops.parse (scan) && !scan.more ())
+ break;
+
+ // If we see first "--", then we are done parsing options.
+ //
+ if (strcmp (scan.peek (), "--") == 0)
+ {
+ scan.next ();
+ opt = false;
+ continue;
+ }
+
+ // Fall through.
+ }
+
+ const char* s (scan.next ());
+
+ // See if this is a command line variable. What if someone needs to
+ // pass a buildspec that contains '='? One way to support this would
+ // be to quote such a buildspec (e.g., "'/tmp/foo=bar/'"). Or invent
+ // another separator. Or use a second "--". Actually, let's just do
+ // the second "--".
+ //
+ if (var)
+ {
+ // If we see second "--", then we are also done parsing variables.
+ //
+ if (strcmp (s, "--") == 0)
+ {
+ var = false;
+ continue;
+ }
+
+ if (const char* p = strchr (s, '=')) // Covers =, +=, and =+.
+ {
+ // Diagnose the empty variable name situation. Note that we don't
+ // allow "partially broken down" assignments (as in foo =bar)
+ // since foo= bar would be ambigous.
+ //
+ if (p == s || (p == s + 1 && *s == '+'))
+ fail << "missing variable name in '" << s << "'";
+
+ r.cmd_vars.push_back (s);
+ continue;
+ }
+
+ // Handle the "broken down" variable assignments (i.e., foo = bar
+ // instead of foo=bar).
+ //
+ if (scan.more ())
+ {
+ const char* a (scan.peek ());
+
+ if (strcmp (a, "=" ) == 0 ||
+ strcmp (a, "+=") == 0 ||
+ strcmp (a, "=+") == 0)
+ {
+ string v (s);
+ v += a;
+
+ scan.next ();
+
+ if (scan.more ())
+ v += scan.next ();
+
+ r.cmd_vars.push_back (move (v));
+ continue;
+ }
+ }
+
+ // Fall through.
+ }
+
+ // Merge all the individual buildspec arguments into a single string.
+ // We use newlines to separate arguments so that line numbers in
+ // diagnostics signify argument numbers. Clever, huh?
+ //
+ if (argn != 0)
+ r.buildspec += '\n';
+
+ r.buildspec += s;
+
+ // See if we are using the shortcut syntax.
+ //
+ if (argn == 0 && r.buildspec.back () == ':')
+ {
+ r.buildspec.back () = '(';
+ shortcut = true;
+ }
+
+ argn++;
+ }
+
+ // Add the closing parenthesis unless there wasn't anything in between
+ // in which case pop the opening one.
+ //
+ if (shortcut)
+ {
+ if (argn == 1)
+ r.buildspec.pop_back ();
+ else
+ r.buildspec += ')';
+ }
+
+ // Get/set an environment variable tracing the operation.
+ //
+ auto get_env = [&verbosity, &trace] (const char* nm)
+ {
+ optional<string> r (getenv (nm));
+
+ if (verbosity () >= 5)
+ {
+ if (r)
+ trace << nm << ": '" << *r << "'";
+ else
+ trace << nm << ": <NULL>";
+ }
+
+ return r;
+ };
+
+ auto set_env = [&verbosity, &trace] (const char* nm, const string& vl)
+ {
+ try
+ {
+ if (verbosity () >= 5)
+ trace << "setting " << nm << "='" << vl << "'";
+
+ setenv (nm, vl);
+ }
+ catch (const system_error& e)
+ {
+ // The variable value can potentially be long/multi-line, so let's
+ // print it last.
+ //
+ fail << "unable to set environment variable " << nm << ": " << e <<
+ info << "value: '" << vl << "'";
+ }
+ };
+
+ // If the BUILD2_VAR_OVR environment variable is present, then parse its
+ // value as a newline-separated global variable overrides and prepend
+ // them to the overrides specified on the command line.
+ //
+ // Note that this means global overrides may not contain a newline.
+
+ // Verify that the string is a valid global override. Uses the file name
+ // and the options flag for diagnostics only.
+ //
+ auto verify_glb_ovr = [] (const string& v, const path_name& fn, bool opt)
+ {
+ size_t p (v.find ('=', 1));
+ if (p == string::npos || v[0] != '!')
+ {
+ diag_record dr (fail (fn));
+ dr << "expected " << (opt ? "option or " : "") << "global "
+ << "variable override instead of '" << v << "'";
+
+ if (p != string::npos)
+ dr << info << "prefix variable assignment with '!'";
+ }
+
+ if (p == 1 || (p == 2 && v[1] == '+')) // '!=' or '!+=' ?
+ fail (fn) << "missing variable name in '" << v << "'";
+ };
+
+ optional<string> env_ovr (get_env ("BUILD2_VAR_OVR"));
+ if (env_ovr)
+ {
+ path_name fn ("<BUILD2_VAR_OVR>");
+
+ auto i (r.cmd_vars.begin ());
+ for (size_t b (0), e (0); next_word (*env_ovr, b, e, '\n', '\r'); )
+ {
+ // Extract the override from the current line, stripping the leading
+ // and trailing spaces.
+ //
+ string s (*env_ovr, b, e - b);
+ trim (s);
+
+ // Verify and save the override, unless the line is empty.
+ //
+ if (!s.empty ())
+ {
+ verify_glb_ovr (s, fn, false /* opt */);
+ i = r.cmd_vars.insert (i, move (s)) + 1;
+ }
+ }
+ }
+
+ // Load the default options files, unless --no-default-options is
+ // specified on the command line or the BUILD2_DEF_OPT environment
+ // variable is set to a value other than 'true' or '1'.
+ //
+ // If loaded, prepend the default global overrides to the variables
+ // specified on the command line, unless BUILD2_VAR_OVR is set in which
+ // case just ignore them.
+ //
+ optional<string> env_def (get_env ("BUILD2_DEF_OPT"));
+
+ // False if --no-default-options is specified on the command line. Note
+ // that we cache the flag since it can be overridden by a default
+ // options file.
+ //
+ bool cmd_def (!ops.no_default_options ());
+
+ if (cmd_def && (!env_def || *env_def == "true" || *env_def == "1"))
+ try
+ {
+ optional<dir_path> extra;
+ if (ops.default_options_specified ())
+ {
+ extra = ops.default_options ();
+
+ // Note that load_default_options() expects absolute and normalized
+ // directory.
+ //
+ try
+ {
+ if (extra->relative ())
+ extra->complete ();
+
+ extra->normalize ();
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid --default-options value " << e.path;
+ }
+ }
+
+ // Load default options files.
+ //
+ default_options<b_options> def_ops (
+ load_default_options<b_options,
+ cli::argv_file_scanner,
+ cli::unknown_mode> (
+ nullopt /* sys_dir */,
+ path::home_directory (), // The home variable is not assigned yet.
+ extra,
+ default_options_files {{path ("b.options")},
+ nullopt /* start */},
+ [&trace, &verbosity] (const path& f, bool r, bool o)
+ {
+ if (verbosity () >= 3)
+ {
+ if (o)
+ trace << "treating " << f << " as "
+ << (r ? "remote" : "local");
+ else
+ trace << "loading " << (r ? "remote " : "local ") << f;
+ }
+ },
+ "--options-file",
+ args_pos,
+ 1024,
+ true /* args */));
+
+ // Merge the default and command line options.
+ //
+ ops = merge_default_options (def_ops, ops);
+
+ // Merge the default and command line global overrides, unless
+ // BUILD2_VAR_OVR is already set (in which case we assume this has
+ // already been done).
+ //
+ // Note that the "broken down" variable assignments occupying a single
+ // line are naturally supported.
+ //
+ if (!env_ovr)
+ r.cmd_vars =
+ merge_default_arguments (
+ def_ops,
+ r.cmd_vars,
+ [&verify_glb_ovr] (const default_options_entry<b_options>& e,
+ const strings&)
+ {
+ path_name fn (e.file);
+
+ // Verify that all arguments are global overrides.
+ //
+ for (const string& a: e.arguments)
+ verify_glb_ovr (a, fn, true /* opt */);
+ });
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "unable to load default options files: " << e;
+ }
+ catch (const pair<path, system_error>& e)
+ {
+ fail << "unable to load default options files: " << e.first << ": "
+ << e.second;
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to obtain home directory: " << e;
+ }
+
+ // Verify and save the global overrides present in cmd_vars (default,
+ // from the command line, etc), if any, into the BUILD2_VAR_OVR
+ // environment variable.
+ //
+ if (!r.cmd_vars.empty ())
+ {
+ string ovr;
+ for (const string& v: r.cmd_vars)
+ {
+ if (v[0] == '!')
+ {
+ if (v.find_first_of ("\n\r") != string::npos)
+ fail << "newline in global variable override '" << v << "'";
+
+ if (!ovr.empty ())
+ ovr += '\n';
+
+ ovr += v;
+ }
+ }
+
+ // Optimize for the common case.
+ //
+ // Note: cmd_vars may contain non-global overrides.
+ //
+ if (!ovr.empty () && (!env_ovr || *env_ovr != ovr))
+ set_env ("BUILD2_VAR_OVR", ovr);
+ }
+
+ // Propagate disabling of the default options files to the potential
+ // nested invocations.
+ //
+ if (!cmd_def && (!env_def || *env_def != "0"))
+ set_env ("BUILD2_DEF_OPT", "0");
+
+ // Validate options.
+ //
+ if (ops.progress () && ops.no_progress ())
+ fail << "both --progress and --no-progress specified";
+
+ if (ops.diag_color () && ops.no_diag_color ())
+ fail << "both --diag-color and --no-diag-color specified";
+
+ if (ops.mtime_check () && ops.no_mtime_check ())
+ fail << "both --mtime-check and --no-mtime-check specified";
+
+ if (ops.match_only () && ops.load_only ())
+ fail << "both --match-only and --load-only specified";
+ }
+ catch (const cli::exception& e)
+ {
+ fail << e;
+ }
+
+ if (ops.help () || ops.version ())
+ return r;
+
+ r.verbosity = verbosity ();
+
+ if (ops.silent () && r.verbosity != 0)
+ fail << "specified with -v, -V, or --verbose verbosity level "
+ << r.verbosity << " is incompatible with --silent";
+
+ r.progress = (ops.progress () ? optional<bool> (true) :
+ ops.no_progress () ? optional<bool> (false) : nullopt);
+
+ r.diag_color = (ops.diag_color () ? optional<bool> (true) :
+ ops.no_diag_color () ? optional<bool> (false) : nullopt);
+
+ r.mtime_check = (ops.mtime_check () ? optional<bool> (true) :
+ ops.no_mtime_check () ? optional<bool> (false) : nullopt);
+
+
+ r.config_sub = (ops.config_sub_specified ()
+ ? optional<path> (ops.config_sub ())
+ : nullopt);
+
+ r.config_guess = (ops.config_guess_specified ()
+ ? optional<path> (ops.config_guess ())
+ : nullopt);
+
+ if (ops.jobs_specified ())
+ r.jobs = ops.jobs ();
+ else if (ops.serial_stop ())
+ r.jobs = 1;
+
+ if (def_jobs != 0)
+ r.jobs = def_jobs;
+ else
+ {
+ if (r.jobs == 0)
+ r.jobs = scheduler::hardware_concurrency ();
+
+ if (r.jobs == 0)
+ {
+ warn << "unable to determine the number of hardware threads" <<
+ info << "falling back to serial execution" <<
+ info << "use --jobs|-j to override";
+
+ r.jobs = 1;
+ }
+ }
+
+ if (ops.max_jobs_specified ())
+ {
+ r.max_jobs = ops.max_jobs ();
+
+ if (r.max_jobs != 0 && r.max_jobs < r.jobs)
+ fail << "invalid --max-jobs|-J value";
+ }
+
+ r.max_stack = (ops.max_stack_specified ()
+ ? optional<size_t> (ops.max_stack () * 1024)
+ : nullopt);
+
+ if (ops.file_cache_specified ())
+ {
+ const string& v (ops.file_cache ());
+ if (v == "noop" || v == "none")
+ r.fcache_compress = false;
+ else if (v == "sync-lz4")
+ r.fcache_compress = true;
+ else
+ fail << "invalid --file-cache value '" << v << "'";
+ }
+
+ return r;
+ }
+}
diff --git a/libbuild2/b-cmdline.hxx b/libbuild2/b-cmdline.hxx
new file mode 100644
index 0000000..8ccbb20
--- /dev/null
+++ b/libbuild2/b-cmdline.hxx
@@ -0,0 +1,45 @@
+// file : libbuild2/b-cmdline.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_B_CMDLINE_HXX
+#define LIBBUILD2_B_CMDLINE_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/forward.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/b-options.hxx>
+#include <libbuild2/diagnostics.hxx>
+
+#include <libbuild2/export.hxx>
+
+namespace build2
+{
+ struct b_cmdline
+ {
+ strings cmd_vars;
+ string buildspec;
+
+ // Processed/meged option values (unless --help or --version specified).
+ //
+ uint16_t verbosity = 1;
+ optional<bool> progress;
+ optional<bool> diag_color;
+ optional<bool> mtime_check;
+ optional<path> config_sub;
+ optional<path> config_guess;
+ size_t jobs = 0;
+ size_t max_jobs = 0;
+ optional<size_t> max_stack;
+ bool fcache_compress = true;
+ };
+
+ LIBBUILD2_SYMEXPORT b_cmdline
+ parse_b_cmdline (tracer&,
+ int argc, char* argv[],
+ b_options&,
+ uint16_t default_verbosity = 1,
+ size_t default_jobs = 0);
+}
+
+#endif // LIBBUILD2_B_CMDLINE_HXX
diff --git a/libbuild2/b-options.cxx b/libbuild2/b-options.cxx
new file mode 100644
index 0000000..c107b44
--- /dev/null
+++ b/libbuild2/b-options.cxx
@@ -0,0 +1,1607 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+#include <libbuild2/types-parsers.hxx>
+//
+// End prologue.
+
+#include <libbuild2/b-options.hxx>
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+#include <utility>
+#include <ostream>
+#include <sstream>
+#include <cstring>
+
+namespace build2
+{
+ namespace build
+ {
+ namespace cli
+ {
+ template <typename X>
+ struct parser
+ {
+ static void
+ parse (X& x, bool& xs, scanner& s)
+ {
+ using namespace std;
+
+ const char* o (s.next ());
+ if (s.more ())
+ {
+ string v (s.next ());
+ istringstream is (v);
+ if (!(is >> x && is.peek () == istringstream::traits_type::eof ()))
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+
+ static void
+ merge (X& b, const X& a)
+ {
+ b = a;
+ }
+ };
+
+ template <>
+ struct parser<bool>
+ {
+ static void
+ parse (bool& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+
+ static void
+ merge (bool& b, const bool&)
+ {
+ b = true;
+ }
+ };
+
+ template <>
+ struct parser<std::string>
+ {
+ static void
+ parse (std::string& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ x = s.next ();
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+
+ static void
+ merge (std::string& b, const std::string& a)
+ {
+ b = a;
+ }
+ };
+
+ template <typename X>
+ struct parser<std::pair<X, std::size_t> >
+ {
+ static void
+ parse (std::pair<X, std::size_t>& x, bool& xs, scanner& s)
+ {
+ x.second = s.position ();
+ parser<X>::parse (x.first, xs, s);
+ }
+
+ static void
+ merge (std::pair<X, std::size_t>& b, const std::pair<X, std::size_t>& a)
+ {
+ b = a;
+ }
+ };
+
+ template <typename X>
+ struct parser<std::vector<X> >
+ {
+ static void
+ parse (std::vector<X>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.push_back (x);
+ xs = true;
+ }
+
+ static void
+ merge (std::vector<X>& b, const std::vector<X>& a)
+ {
+ b.insert (b.end (), a.begin (), a.end ());
+ }
+ };
+
+ template <typename X, typename C>
+ struct parser<std::set<X, C> >
+ {
+ static void
+ parse (std::set<X, C>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.insert (x);
+ xs = true;
+ }
+
+ static void
+ merge (std::set<X, C>& b, const std::set<X, C>& a)
+ {
+ b.insert (a.begin (), a.end ());
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::map<K, V, C> >
+ {
+ static void
+ parse (std::map<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m[k] = v;
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+
+ static void
+ merge (std::map<K, V, C>& b, const std::map<K, V, C>& a)
+ {
+ for (typename std::map<K, V, C>::const_iterator i (a.begin ());
+ i != a.end ();
+ ++i)
+ b[i->first] = i->second;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+
+ static void
+ merge (std::multimap<K, V, C>& b, const std::multimap<K, V, C>& a)
+ {
+ for (typename std::multimap<K, V, C>::const_iterator i (a.begin ());
+ i != a.end ();
+ ++i)
+ b.insert (typename std::multimap<K, V, C>::value_type (i->first,
+ i->second));
+ }
+ };
+
+ template <typename X, typename T, T X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, s);
+ }
+
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
+ template <typename X, typename T, T X::*M, bool X::*S>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, x.*S, s);
+ }
+ }
+ }
+}
+
+#include <map>
+
+namespace build2
+{
+ // b_options
+ //
+
+ b_options::
+ b_options ()
+ : build2_metadata_ (),
+ build2_metadata_specified_ (false),
+ v_ (),
+ V_ (),
+ quiet_ (),
+ silent_ (),
+ verbose_ (1),
+ verbose_specified_ (false),
+ stat_ (),
+ progress_ (),
+ no_progress_ (),
+ diag_color_ (),
+ no_diag_color_ (),
+ jobs_ (),
+ jobs_specified_ (false),
+ max_jobs_ (),
+ max_jobs_specified_ (false),
+ queue_depth_ (4),
+ queue_depth_specified_ (false),
+ file_cache_ (),
+ file_cache_specified_ (false),
+ max_stack_ (),
+ max_stack_specified_ (false),
+ serial_stop_ (),
+ dry_run_ (),
+ no_diag_buffer_ (),
+ match_only_ (),
+ load_only_ (),
+ no_external_modules_ (),
+ structured_result_ (),
+ structured_result_specified_ (false),
+ mtime_check_ (),
+ no_mtime_check_ (),
+ dump_ (),
+ dump_specified_ (false),
+ dump_format_ (),
+ dump_format_specified_ (false),
+ dump_scope_ (),
+ dump_scope_specified_ (false),
+ dump_target_ (),
+ dump_target_specified_ (false),
+ trace_match_ (),
+ trace_match_specified_ (false),
+ trace_execute_ (),
+ trace_execute_specified_ (false),
+ no_column_ (),
+ no_line_ (),
+ buildfile_ (),
+ buildfile_specified_ (false),
+ config_guess_ (),
+ config_guess_specified_ (false),
+ config_sub_ (),
+ config_sub_specified_ (false),
+ pager_ (),
+ pager_specified_ (false),
+ pager_option_ (),
+ pager_option_specified_ (false),
+ options_file_ (),
+ options_file_specified_ (false),
+ default_options_ (),
+ default_options_specified_ (false),
+ no_default_options_ (),
+ help_ (),
+ version_ ()
+ {
+ }
+
+ bool b_options::
+ parse (int& argc,
+ char** argv,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ bool b_options::
+ parse (int start,
+ int& argc,
+ char** argv,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ bool b_options::
+ parse (int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ end = s.end ();
+ return r;
+ }
+
+ bool b_options::
+ parse (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ end = s.end ();
+ return r;
+ }
+
+ bool b_options::
+ parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ void b_options::
+ merge (const b_options& a)
+ {
+ CLI_POTENTIALLY_UNUSED (a);
+
+ if (a.build2_metadata_specified_)
+ {
+ ::build2::build::cli::parser< uint64_t>::merge (
+ this->build2_metadata_, a.build2_metadata_);
+ this->build2_metadata_specified_ = true;
+ }
+
+ if (a.v_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->v_, a.v_);
+ }
+
+ if (a.V_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->V_, a.V_);
+ }
+
+ if (a.quiet_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->quiet_, a.quiet_);
+ }
+
+ if (a.silent_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->silent_, a.silent_);
+ }
+
+ if (a.verbose_specified_)
+ {
+ ::build2::build::cli::parser< uint16_t>::merge (
+ this->verbose_, a.verbose_);
+ this->verbose_specified_ = true;
+ }
+
+ if (a.stat_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->stat_, a.stat_);
+ }
+
+ if (a.progress_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->progress_, a.progress_);
+ }
+
+ if (a.no_progress_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_progress_, a.no_progress_);
+ }
+
+ if (a.diag_color_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->diag_color_, a.diag_color_);
+ }
+
+ if (a.no_diag_color_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_diag_color_, a.no_diag_color_);
+ }
+
+ if (a.jobs_specified_)
+ {
+ ::build2::build::cli::parser< size_t>::merge (
+ this->jobs_, a.jobs_);
+ this->jobs_specified_ = true;
+ }
+
+ if (a.max_jobs_specified_)
+ {
+ ::build2::build::cli::parser< size_t>::merge (
+ this->max_jobs_, a.max_jobs_);
+ this->max_jobs_specified_ = true;
+ }
+
+ if (a.queue_depth_specified_)
+ {
+ ::build2::build::cli::parser< size_t>::merge (
+ this->queue_depth_, a.queue_depth_);
+ this->queue_depth_specified_ = true;
+ }
+
+ if (a.file_cache_specified_)
+ {
+ ::build2::build::cli::parser< string>::merge (
+ this->file_cache_, a.file_cache_);
+ this->file_cache_specified_ = true;
+ }
+
+ if (a.max_stack_specified_)
+ {
+ ::build2::build::cli::parser< size_t>::merge (
+ this->max_stack_, a.max_stack_);
+ this->max_stack_specified_ = true;
+ }
+
+ if (a.serial_stop_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->serial_stop_, a.serial_stop_);
+ }
+
+ if (a.dry_run_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->dry_run_, a.dry_run_);
+ }
+
+ if (a.no_diag_buffer_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_diag_buffer_, a.no_diag_buffer_);
+ }
+
+ if (a.match_only_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->match_only_, a.match_only_);
+ }
+
+ if (a.load_only_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->load_only_, a.load_only_);
+ }
+
+ if (a.no_external_modules_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_external_modules_, a.no_external_modules_);
+ }
+
+ if (a.structured_result_specified_)
+ {
+ ::build2::build::cli::parser< structured_result_format>::merge (
+ this->structured_result_, a.structured_result_);
+ this->structured_result_specified_ = true;
+ }
+
+ if (a.mtime_check_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->mtime_check_, a.mtime_check_);
+ }
+
+ if (a.no_mtime_check_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_mtime_check_, a.no_mtime_check_);
+ }
+
+ if (a.dump_specified_)
+ {
+ ::build2::build::cli::parser< strings>::merge (
+ this->dump_, a.dump_);
+ this->dump_specified_ = true;
+ }
+
+ if (a.dump_format_specified_)
+ {
+ ::build2::build::cli::parser< string>::merge (
+ this->dump_format_, a.dump_format_);
+ this->dump_format_specified_ = true;
+ }
+
+ if (a.dump_scope_specified_)
+ {
+ ::build2::build::cli::parser< dir_paths>::merge (
+ this->dump_scope_, a.dump_scope_);
+ this->dump_scope_specified_ = true;
+ }
+
+ if (a.dump_target_specified_)
+ {
+ ::build2::build::cli::parser< vector<pair<name, optional<name>>>>::merge (
+ this->dump_target_, a.dump_target_);
+ this->dump_target_specified_ = true;
+ }
+
+ if (a.trace_match_specified_)
+ {
+ ::build2::build::cli::parser< vector<name>>::merge (
+ this->trace_match_, a.trace_match_);
+ this->trace_match_specified_ = true;
+ }
+
+ if (a.trace_execute_specified_)
+ {
+ ::build2::build::cli::parser< vector<name>>::merge (
+ this->trace_execute_, a.trace_execute_);
+ this->trace_execute_specified_ = true;
+ }
+
+ if (a.no_column_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_column_, a.no_column_);
+ }
+
+ if (a.no_line_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_line_, a.no_line_);
+ }
+
+ if (a.buildfile_specified_)
+ {
+ ::build2::build::cli::parser< path>::merge (
+ this->buildfile_, a.buildfile_);
+ this->buildfile_specified_ = true;
+ }
+
+ if (a.config_guess_specified_)
+ {
+ ::build2::build::cli::parser< path>::merge (
+ this->config_guess_, a.config_guess_);
+ this->config_guess_specified_ = true;
+ }
+
+ if (a.config_sub_specified_)
+ {
+ ::build2::build::cli::parser< path>::merge (
+ this->config_sub_, a.config_sub_);
+ this->config_sub_specified_ = true;
+ }
+
+ if (a.pager_specified_)
+ {
+ ::build2::build::cli::parser< string>::merge (
+ this->pager_, a.pager_);
+ this->pager_specified_ = true;
+ }
+
+ if (a.pager_option_specified_)
+ {
+ ::build2::build::cli::parser< strings>::merge (
+ this->pager_option_, a.pager_option_);
+ this->pager_option_specified_ = true;
+ }
+
+ if (a.options_file_specified_)
+ {
+ ::build2::build::cli::parser< string>::merge (
+ this->options_file_, a.options_file_);
+ this->options_file_specified_ = true;
+ }
+
+ if (a.default_options_specified_)
+ {
+ ::build2::build::cli::parser< dir_path>::merge (
+ this->default_options_, a.default_options_);
+ this->default_options_specified_ = true;
+ }
+
+ if (a.no_default_options_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_default_options_, a.no_default_options_);
+ }
+
+ if (a.help_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->help_, a.help_);
+ }
+
+ if (a.version_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->version_, a.version_);
+ }
+ }
+
+ ::build2::build::cli::usage_para b_options::
+ print_usage (::std::ostream& os, ::build2::build::cli::usage_para p)
+ {
+ CLI_POTENTIALLY_UNUSED (os);
+
+ if (p != ::build2::build::cli::usage_para::none)
+ os << ::std::endl;
+
+ os << "\033[1mOPTIONS\033[0m" << ::std::endl;
+
+ os << std::endl
+ << "\033[1m-v\033[0m Print actual commands being executed. This options is" << ::std::endl
+ << " equivalent to \033[1m--verbose 2\033[0m." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m-V\033[0m Print all underlying commands being executed. This" << ::std::endl
+ << " options is equivalent to \033[1m--verbose 3\033[0m." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--quiet\033[0m|\033[1m-q\033[0m Run quietly, only printing error messages in most" << ::std::endl
+ << " contexts. In certain contexts (for example, while" << ::std::endl
+ << " updating build system modules) this verbosity level may" << ::std::endl
+ << " be ignored. Use \033[1m--silent\033[0m to run quietly in all" << ::std::endl
+ << " contexts. This option is equivalent to \033[1m--verbose 0\033[0m." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--silent\033[0m Run quietly, only printing error messages in all" << ::std::endl
+ << " contexts." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--verbose\033[0m \033[4mlevel\033[0m Set the diagnostics verbosity to \033[4mlevel\033[0m between 0 and 6." << ::std::endl
+ << " Level 0 disables any non-error messages (but see the" << ::std::endl
+ << " difference between \033[1m--quiet\033[0m and \033[1m--silent\033[0m) while level 6" << ::std::endl
+ << " produces lots of information, with level 1 being the" << ::std::endl
+ << " default. The following additional types of diagnostics" << ::std::endl
+ << " are produced at each level:" << ::std::endl
+ << ::std::endl
+ << " 1. High-level information messages." << ::std::endl
+ << " 2. Essential underlying commands being executed." << ::std::endl
+ << " 3. All underlying commands being executed." << ::std::endl
+ << " 4. Information that could be helpful to the user." << ::std::endl
+ << " 5. Information that could be helpful to the developer." << ::std::endl
+ << " 6. Even more detailed information." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--stat\033[0m Display build statistics." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--progress\033[0m Display build progress. If printing to a terminal the" << ::std::endl
+ << " progress is displayed by default for low verbosity" << ::std::endl
+ << " levels. Use \033[1m--no-progress\033[0m to suppress." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-progress\033[0m Don't display build progress." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--diag-color\033[0m Use color in diagnostics. If printing to a terminal the" << ::std::endl
+ << " color is used by default provided the terminal is not" << ::std::endl
+ << " dumb. Use \033[1m--no-diag-color\033[0m to suppress." << ::std::endl
+ << ::std::endl
+ << " This option affects the diagnostics printed by the" << ::std::endl
+ << " build system itself. Some rules may also choose to" << ::std::endl
+ << " propagate its value to tools (such as compilers) that" << ::std::endl
+ << " they invoke." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-diag-color\033[0m Don't use color in diagnostics." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--jobs\033[0m|\033[1m-j\033[0m \033[4mnum\033[0m Number of active jobs to perform in parallel. This" << ::std::endl
+ << " includes both the number of active threads inside the" << ::std::endl
+ << " build system as well as the number of external commands" << ::std::endl
+ << " (compilers, linkers, etc) started but not yet finished." << ::std::endl
+ << " If this option is not specified or specified with the" << ::std::endl
+ << " \033[1m0\033[0m value, then the number of available hardware threads" << ::std::endl
+ << " is used." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--max-jobs\033[0m|\033[1m-J\033[0m \033[4mnum\033[0m Maximum number of jobs (threads) to create. The default" << ::std::endl
+ << " is 8x the number of active jobs (\033[1m--jobs|j\033[0m) on 32-bit" << ::std::endl
+ << " architectures and 32x on 64-bit. See the build system" << ::std::endl
+ << " scheduler implementation for details." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--queue-depth\033[0m|\033[1m-Q\033[0m \033[4mnum\033[0m The queue depth as a multiplier over the number of" << ::std::endl
+ << " active jobs. Normally we want a deeper queue if the" << ::std::endl
+ << " jobs take long (for example, compilation) and shorter" << ::std::endl
+ << " if they are quick (for example, simple tests). The" << ::std::endl
+ << " default is 4. See the build system scheduler" << ::std::endl
+ << " implementation for details." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--file-cache\033[0m \033[4mimpl\033[0m File cache implementation to use for intermediate build" << ::std::endl
+ << " results. Valid values are \033[1mnoop\033[0m (no caching or" << ::std::endl
+ << " compression) and \033[1msync-lz4\033[0m (no caching with synchronous" << ::std::endl
+ << " LZ4 on-disk compression). If this option is not" << ::std::endl
+ << " specified, then a suitable default implementation is" << ::std::endl
+ << " used (currently \033[1msync-lz4\033[0m)." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--max-stack\033[0m \033[4mnum\033[0m The maximum stack size in KBytes to allow for newly" << ::std::endl
+ << " created threads. For \033[4mpthreads\033[0m-based systems the driver" << ::std::endl
+ << " queries the stack size of the main thread and uses the" << ::std::endl
+ << " same size for creating additional threads. This allows" << ::std::endl
+ << " adjusting the stack size using familiar mechanisms," << ::std::endl
+ << " such as \033[1mulimit\033[0m. Sometimes, however, the stack size of" << ::std::endl
+ << " the main thread is excessively large. As a result, the" << ::std::endl
+ << " driver checks if it is greater than a predefined limit" << ::std::endl
+ << " (64MB on 64-bit systems and 32MB on 32-bit ones) and" << ::std::endl
+ << " caps it to a more sensible value (8MB) if that's the" << ::std::endl
+ << " case. This option allows you to override this check" << ::std::endl
+ << " with the special zero value indicating that the main" << ::std::endl
+ << " thread stack size should be used as is." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--serial-stop\033[0m|\033[1m-s\033[0m Run serially and stop at the first error. This mode is" << ::std::endl
+ << " useful to investigate build failures that are caused by" << ::std::endl
+ << " build system errors rather than compilation errors." << ::std::endl
+ << " Note that if you don't want to keep going but still" << ::std::endl
+ << " want parallel execution, add \033[1m--jobs|-j\033[0m (for example \033[1m-j" << ::std::endl
+ << " 0\033[0m for default concurrency). Note also that during" << ::std::endl
+ << " serial execution there is no diagnostics buffering and" << ::std::endl
+ << " child process' \033[1mstderr\033[0m is a terminal (unless redirected;" << ::std::endl
+ << " see \033[1m--no-diag-buffer\033[0m for details)." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dry-run\033[0m|\033[1m-n\033[0m Print commands without actually executing them. Note" << ::std::endl
+ << " that commands that are required to create an accurate" << ::std::endl
+ << " build state will still be executed and the extracted" << ::std::endl
+ << " auxiliary dependency information saved. In other words," << ::std::endl
+ << " this is not the \033[4m\"don't touch the filesystem\"\033[0m mode but" << ::std::endl
+ << " rather \033[4m\"do minimum amount of work to show what needs to" << ::std::endl
+ << " be done\"\033[0m. Note also that only the \033[1mperform\033[0m" << ::std::endl
+ << " meta-operation supports this mode." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-diag-buffer\033[0m Do not buffer diagnostics from child processes. By" << ::std::endl
+ << " default, unless running serially, such diagnostics is" << ::std::endl
+ << " buffered and printed all at once after each child exits" << ::std::endl
+ << " in order to prevent interleaving. However, this can" << ::std::endl
+ << " have side-effects since the child process' \033[1mstderr\033[0m is no" << ::std::endl
+ << " longer a terminal. Most notably, the use of color in" << ::std::endl
+ << " diagnostics may be disabled by some programs. On the" << ::std::endl
+ << " other hand, depending on the platform and programs" << ::std::endl
+ << " invoked, the interleaving diagnostics may not break" << ::std::endl
+ << " lines and thus could be tolerable." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--match-only\033[0m Match the rules without executing the operation. This" << ::std::endl
+ << " mode is primarily useful for profiling and dumping the" << ::std::endl
+ << " build system state." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--load-only\033[0m Match the rules only to \033[1malias{}\033[0m targets ignoring other" << ::std::endl
+ << " targets and without executing the operation. In" << ::std::endl
+ << " particular, this has the effect of loading all the" << ::std::endl
+ << " subdirectory \033[1mbuildfiles\033[0m that are not explicitly" << ::std::endl
+ << " included. Note that this option can only be used with" << ::std::endl
+ << " the \033[1mperform(update)\033[0m action on an \033[1malias{}\033[0m target," << ::std::endl
+ << " usually \033[1mdir{}\033[0m." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-external-modules\033[0m Don't load external modules during project bootstrap." << ::std::endl
+ << " Note that this option can only be used with" << ::std::endl
+ << " meta-operations that do not load the project's" << ::std::endl
+ << " \033[1mbuildfiles\033[0m, such as \033[1minfo\033[0m." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--structured-result\033[0m \033[4mfmt\033[0m Write the result of execution in a structured form. In" << ::std::endl
+ << " this mode, instead of printing to \033[1mstderr\033[0m diagnostics" << ::std::endl
+ << " messages about the outcome of executing actions on" << ::std::endl
+ << " targets, the driver writes to \033[1mstdout\033[0m a machine-readable" << ::std::endl
+ << " result description in the specified format. Valid" << ::std::endl
+ << " values for this option are \033[1mlines\033[0m and \033[1mjson\033[0m. Note that" << ::std::endl
+ << " currently only the \033[1mperform\033[0m meta-operation supports the" << ::std::endl
+ << " structured result output." << ::std::endl
+ << ::std::endl
+ << " If the output format is \033[1mlines\033[0m, then the result is" << ::std::endl
+ << " written one line per the buildspec action/target pair." << ::std::endl
+ << " Each line has the following form:" << ::std::endl
+ << ::std::endl
+ << " \033[4mstate\033[0m \033[4mmeta-operation\033[0m \033[4moperation\033[0m \033[4mtarget\033[0m\033[0m" << ::std::endl
+ << ::std::endl
+ << " Where \033[4mstate\033[0m can be one of \033[1munchanged\033[0m, \033[1mchanged\033[0m, or" << ::std::endl
+ << " \033[1mfailed\033[0m. If the action is a pre or post operation, then" << ::std::endl
+ << " the outer operation is specified in parenthesis. For" << ::std::endl
+ << " example:" << ::std::endl
+ << ::std::endl
+ << " unchanged perform update(test)" << ::std::endl
+ << " /tmp/hello/hello/exe{hello}" << ::std::endl
+ << " changed perform test /tmp/hello/hello/exe{hello}" << ::std::endl
+ << ::std::endl
+ << " If the output format is \033[1mjson\033[0m, then the output is a JSON" << ::std::endl
+ << " array of objects which are the serialized" << ::std::endl
+ << " representation of the following C++ \033[1mstruct\033[0m" << ::std::endl
+ << " \033[1mtarget_action_result\033[0m:" << ::std::endl
+ << ::std::endl
+ << " struct target_action_result" << ::std::endl
+ << " {" << ::std::endl
+ << " string target;" << ::std::endl
+ << " string display_target;" << ::std::endl
+ << " string target_type;" << ::std::endl
+ << " optional<string> target_path;" << ::std::endl
+ << " string meta_operation;" << ::std::endl
+ << " string operation;" << ::std::endl
+ << " optional<string> outer_operation;" << ::std::endl
+ << " string state;" << ::std::endl
+ << " };" << ::std::endl
+ << ::std::endl
+ << " For example:" << ::std::endl
+ << ::std::endl
+ << " [" << ::std::endl
+ << " {" << ::std::endl
+ << " \"target\": \"/tmp/hello/hello/exe{hello.}\"," << ::std::endl
+ << " \"display_target\": \"/tmp/hello/hello/exe{hello}\"," << ::std::endl
+ << " \"target_type\": \"exe\"," << ::std::endl
+ << " \"target_path\": \"/tmp/hello/hello/hello\"," << ::std::endl
+ << " \"meta_operation\": \"perform\"," << ::std::endl
+ << " \"operation\": \"update\"," << ::std::endl
+ << " \"outer_operation\": \"test\"," << ::std::endl
+ << " \"state\": \"unchanged\"" << ::std::endl
+ << " }," << ::std::endl
+ << " {" << ::std::endl
+ << " \"target\": \"/tmp/hello/hello/exe{hello.}\"," << ::std::endl
+ << " \"display_target\": \"/tmp/hello/hello/exe{hello}\"," << ::std::endl
+ << " \"target_type\": \"exe\"," << ::std::endl
+ << " \"target_path\": \"/tmp/hello/hello/hello\"," << ::std::endl
+ << " \"meta_operation\": \"perform\"," << ::std::endl
+ << " \"operation\": \"test\"," << ::std::endl
+ << " \"state\": \"changed\"" << ::std::endl
+ << " }" << ::std::endl
+ << " ]" << ::std::endl
+ << ::std::endl
+ << " See the JSON OUTPUT section below for details on the" << ::std::endl
+ << " overall properties of this format and the semantics of" << ::std::endl
+ << " the \033[1mstruct\033[0m serialization." << ::std::endl
+ << ::std::endl
+ << " The \033[1mtarget\033[0m member is the target name that is qualified" << ::std::endl
+ << " with the extension (if applicable) and, if required, is" << ::std::endl
+ << " quoted so that it can be passed back to the build" << ::std::endl
+ << " system driver on the command line. The \033[1mdisplay_target\033[0m" << ::std::endl
+ << " member is the unqualified and unquoted \"display\" target" << ::std::endl
+ << " name, the same as in the \033[1mlines\033[0m format. The \033[1mtarget_type\033[0m" << ::std::endl
+ << " member is the type of target. The \033[1mtarget_path\033[0m member" << ::std::endl
+ << " is an absolute path to the target if the target type is" << ::std::endl
+ << " path-based or \033[1mdir\033[0m." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--mtime-check\033[0m Perform file modification time sanity checks. These" << ::std::endl
+ << " checks can be helpful in diagnosing spurious rebuilds" << ::std::endl
+ << " and are enabled by default on Windows (which is known" << ::std::endl
+ << " not to guarantee monotonically increasing mtimes) and" << ::std::endl
+ << " for the staged version of the build system on other" << ::std::endl
+ << " platforms. Use \033[1m--no-mtime-check\033[0m to disable." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-mtime-check\033[0m Don't perform file modification time sanity checks. See" << ::std::endl
+ << " \033[1m--mtime-check\033[0m for details." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump\033[0m \033[4mphase\033[0m Dump the build system state after the specified phase." << ::std::endl
+ << " Valid \033[4mphase\033[0m values are \033[1mload\033[0m (after loading \033[1mbuildfiles\033[0m)" << ::std::endl
+ << " and \033[1mmatch\033[0m (after matching rules to targets). The \033[1mmatch\033[0m" << ::std::endl
+ << " value also has the \033[1mmatch-pre\033[0m and \033[1mmatch-post\033[0m variants to" << ::std::endl
+ << " dump the state for the pre/post-operations (\033[1mmatch\033[0m dumps" << ::std::endl
+ << " the main operation only). Repeat this option to dump" << ::std::endl
+ << " the state after multiple phases/variants. By default" << ::std::endl
+ << " the entire build state is dumped but this behavior can" << ::std::endl
+ << " be altered with the \033[1m--dump-scope\033[0m and \033[1m--dump-target\033[0m" << ::std::endl
+ << " options. See also the \033[1m--match-only\033[0m and \033[1m--load-only\033[0m" << ::std::endl
+ << " options." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-format\033[0m \033[4mformat\033[0m Representation format and output stream to use when" << ::std::endl
+ << " dumping the build system state. Valid values for this" << ::std::endl
+ << " option are \033[1mbuildfile\033[0m (a human-readable, Buildfile-like" << ::std::endl
+ << " format written to \033[1mstderr\033[0m; this is the default), and" << ::std::endl
+ << " \033[1mjson-v0.1\033[0m (machine-readable, JSON-based format written" << ::std::endl
+ << " to \033[1mstdout\033[0m). For details on the \033[1mbuildfile\033[0m format, see" << ::std::endl
+ << " Diagnostics and Debugging (b#intro-diag-debug). For" << ::std::endl
+ << " details on the \033[1mjson-v0.1\033[0m format, see the JSON OUTPUT" << ::std::endl
+ << " section below (overall properties) and JSON Dump Format" << ::std::endl
+ << " (b#json-dump) (format specifics). Note that the JSON" << ::std::endl
+ << " format is currently unstable (thus the temporary \033[1m-v0.1\033[0m" << ::std::endl
+ << " suffix)." << ::std::endl
+ << ::std::endl
+ << " Note that because it's possible to end up with multiple" << ::std::endl
+ << " dumps (for example, by specifying the \033[1m--dump-scope\033[0m" << ::std::endl
+ << " and/or \033[1m--dump-target\033[0m options multiple times), the JSON" << ::std::endl
+ << " output is in the \"JSON Lines\" form, that is, without" << ::std::endl
+ << " pretty-printing and with the top-level JSON objects" << ::std::endl
+ << " delimited by newlines. Note also that if the JSON dump" << ::std::endl
+ << " output is combined with \033[1m--structured-result=json\033[0m, then" << ::std::endl
+ << " the structured result is the last line." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-scope\033[0m \033[4mdir\033[0m Dump the build system state for the specified scope" << ::std::endl
+ << " only. Repeat this option to dump the state of multiple" << ::std::endl
+ << " scopes." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-target\033[0m \033[4mtarget\033[0m Dump the build system state for the specified target" << ::std::endl
+ << " only. Repeat this option to dump the state of multiple" << ::std::endl
+ << " targets." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--trace-match\033[0m \033[4mtarget\033[0m Trace rule matching for the specified target. This is" << ::std::endl
+ << " primarily useful during troubleshooting. Repeat this" << ::std::endl
+ << " option to trace multiple targets." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--trace-execute\033[0m \033[4mtarget\033[0m Trace rule execution for the specified target. This is" << ::std::endl
+ << " primarily useful during troubleshooting. Repeat this" << ::std::endl
+ << " option to trace multiple targets." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-column\033[0m Don't print column numbers in diagnostics." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-line\033[0m Don't print line and column numbers in diagnostics." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--buildfile\033[0m \033[4mpath\033[0m The alternative file to read build information from." << ::std::endl
+ << " The default is \033[1mbuildfile\033[0m or \033[1mbuild2file\033[0m, depending on" << ::std::endl
+ << " the project's build file/directory naming scheme. If" << ::std::endl
+ << " \033[4mpath\033[0m is '\033[1m-\033[0m', then read from \033[1mstdin\033[0m. Note that this" << ::std::endl
+ << " option only affects the files read as part of the" << ::std::endl
+ << " buildspec processing. Specifically, it has no effect on" << ::std::endl
+ << " the \033[1msource\033[0m and \033[1minclude\033[0m directives. As a result, this" << ::std::endl
+ << " option is primarily intended for testing rather than" << ::std::endl
+ << " changing the build file names in real projects." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--config-guess\033[0m \033[4mpath\033[0m The path to the \033[1mconfig.guess(1)\033[0m script that should be" << ::std::endl
+ << " used to guess the host machine triplet. If this option" << ::std::endl
+ << " is not specified, then \033[1mb\033[0m will fall back on to using the" << ::std::endl
+ << " target it was built for as host." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--config-sub\033[0m \033[4mpath\033[0m The path to the \033[1mconfig.sub(1)\033[0m script that should be" << ::std::endl
+ << " used to canonicalize machine triplets. If this option" << ::std::endl
+ << " is not specified, then \033[1mb\033[0m will use its built-in" << ::std::endl
+ << " canonicalization support which should be sufficient for" << ::std::endl
+ << " commonly-used platforms." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--pager\033[0m \033[4mpath\033[0m The pager program to be used to show long text." << ::std::endl
+ << " Commonly used pager programs are \033[1mless\033[0m and \033[1mmore\033[0m. You can" << ::std::endl
+ << " also specify additional options that should be passed" << ::std::endl
+ << " to the pager program with \033[1m--pager-option\033[0m. If an empty" << ::std::endl
+ << " string is specified as the pager program, then no pager" << ::std::endl
+ << " will be used. If the pager program is not explicitly" << ::std::endl
+ << " specified, then \033[1mb\033[0m will try to use \033[1mless\033[0m. If it is not" << ::std::endl
+ << " available, then no pager will be used." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--pager-option\033[0m \033[4mopt\033[0m Additional option to be passed to the pager program." << ::std::endl
+ << " See \033[1m--pager\033[0m for more information on the pager program." << ::std::endl
+ << " Repeat this option to specify multiple pager options." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--options-file\033[0m \033[4mfile\033[0m Read additional options from \033[4mfile\033[0m. Each option should" << ::std::endl
+ << " appear on a separate line optionally followed by space" << ::std::endl
+ << " or equal sign (\033[1m=\033[0m) and an option value. Empty lines and" << ::std::endl
+ << " lines starting with \033[1m#\033[0m are ignored. Option values can be" << ::std::endl
+ << " enclosed in double (\033[1m\"\033[0m) or single (\033[1m'\033[0m) quotes to preserve" << ::std::endl
+ << " leading and trailing whitespaces as well as to specify" << ::std::endl
+ << " empty values. If the value itself contains trailing or" << ::std::endl
+ << " leading quotes, enclose it with an extra pair of" << ::std::endl
+ << " quotes, for example \033[1m'\"x\"'\033[0m. Non-leading and non-trailing" << ::std::endl
+ << " quotes are interpreted as being part of the option" << ::std::endl
+ << " value." << ::std::endl
+ << ::std::endl
+ << " The semantics of providing options in a file is" << ::std::endl
+ << " equivalent to providing the same set of options in the" << ::std::endl
+ << " same order on the command line at the point where the" << ::std::endl
+ << " \033[1m--options-file\033[0m option is specified except that the" << ::std::endl
+ << " shell escaping and quoting is not required. Repeat this" << ::std::endl
+ << " option to specify more than one options file." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--default-options\033[0m \033[4mdir\033[0m The directory to load additional default options files" << ::std::endl
+ << " from." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-default-options\033[0m Don't load default options files." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--help\033[0m Print usage information and exit." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--version\033[0m Print version and exit." << ::std::endl;
+
+ p = ::build2::build::cli::usage_para::option;
+
+ return p;
+ }
+
+ typedef
+ std::map<std::string, void (*) (b_options&, ::build2::build::cli::scanner&)>
+ _cli_b_options_map;
+
+ static _cli_b_options_map _cli_b_options_map_;
+
+ struct _cli_b_options_map_init
+ {
+ _cli_b_options_map_init ()
+ {
+ _cli_b_options_map_["--build2-metadata"] =
+ &::build2::build::cli::thunk< b_options, uint64_t, &b_options::build2_metadata_,
+ &b_options::build2_metadata_specified_ >;
+ _cli_b_options_map_["-v"] =
+ &::build2::build::cli::thunk< b_options, &b_options::v_ >;
+ _cli_b_options_map_["-V"] =
+ &::build2::build::cli::thunk< b_options, &b_options::V_ >;
+ _cli_b_options_map_["--quiet"] =
+ &::build2::build::cli::thunk< b_options, &b_options::quiet_ >;
+ _cli_b_options_map_["-q"] =
+ &::build2::build::cli::thunk< b_options, &b_options::quiet_ >;
+ _cli_b_options_map_["--silent"] =
+ &::build2::build::cli::thunk< b_options, &b_options::silent_ >;
+ _cli_b_options_map_["--verbose"] =
+ &::build2::build::cli::thunk< b_options, uint16_t, &b_options::verbose_,
+ &b_options::verbose_specified_ >;
+ _cli_b_options_map_["--stat"] =
+ &::build2::build::cli::thunk< b_options, &b_options::stat_ >;
+ _cli_b_options_map_["--progress"] =
+ &::build2::build::cli::thunk< b_options, &b_options::progress_ >;
+ _cli_b_options_map_["--no-progress"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_progress_ >;
+ _cli_b_options_map_["--diag-color"] =
+ &::build2::build::cli::thunk< b_options, &b_options::diag_color_ >;
+ _cli_b_options_map_["--no-diag-color"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_diag_color_ >;
+ _cli_b_options_map_["--jobs"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::jobs_,
+ &b_options::jobs_specified_ >;
+ _cli_b_options_map_["-j"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::jobs_,
+ &b_options::jobs_specified_ >;
+ _cli_b_options_map_["--max-jobs"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::max_jobs_,
+ &b_options::max_jobs_specified_ >;
+ _cli_b_options_map_["-J"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::max_jobs_,
+ &b_options::max_jobs_specified_ >;
+ _cli_b_options_map_["--queue-depth"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::queue_depth_,
+ &b_options::queue_depth_specified_ >;
+ _cli_b_options_map_["-Q"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::queue_depth_,
+ &b_options::queue_depth_specified_ >;
+ _cli_b_options_map_["--file-cache"] =
+ &::build2::build::cli::thunk< b_options, string, &b_options::file_cache_,
+ &b_options::file_cache_specified_ >;
+ _cli_b_options_map_["--max-stack"] =
+ &::build2::build::cli::thunk< b_options, size_t, &b_options::max_stack_,
+ &b_options::max_stack_specified_ >;
+ _cli_b_options_map_["--serial-stop"] =
+ &::build2::build::cli::thunk< b_options, &b_options::serial_stop_ >;
+ _cli_b_options_map_["-s"] =
+ &::build2::build::cli::thunk< b_options, &b_options::serial_stop_ >;
+ _cli_b_options_map_["--dry-run"] =
+ &::build2::build::cli::thunk< b_options, &b_options::dry_run_ >;
+ _cli_b_options_map_["-n"] =
+ &::build2::build::cli::thunk< b_options, &b_options::dry_run_ >;
+ _cli_b_options_map_["--no-diag-buffer"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_diag_buffer_ >;
+ _cli_b_options_map_["--match-only"] =
+ &::build2::build::cli::thunk< b_options, &b_options::match_only_ >;
+ _cli_b_options_map_["--load-only"] =
+ &::build2::build::cli::thunk< b_options, &b_options::load_only_ >;
+ _cli_b_options_map_["--no-external-modules"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_external_modules_ >;
+ _cli_b_options_map_["--structured-result"] =
+ &::build2::build::cli::thunk< b_options, structured_result_format, &b_options::structured_result_,
+ &b_options::structured_result_specified_ >;
+ _cli_b_options_map_["--mtime-check"] =
+ &::build2::build::cli::thunk< b_options, &b_options::mtime_check_ >;
+ _cli_b_options_map_["--no-mtime-check"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_mtime_check_ >;
+ _cli_b_options_map_["--dump"] =
+ &::build2::build::cli::thunk< b_options, strings, &b_options::dump_,
+ &b_options::dump_specified_ >;
+ _cli_b_options_map_["--dump-format"] =
+ &::build2::build::cli::thunk< b_options, string, &b_options::dump_format_,
+ &b_options::dump_format_specified_ >;
+ _cli_b_options_map_["--dump-scope"] =
+ &::build2::build::cli::thunk< b_options, dir_paths, &b_options::dump_scope_,
+ &b_options::dump_scope_specified_ >;
+ _cli_b_options_map_["--dump-target"] =
+ &::build2::build::cli::thunk< b_options, vector<pair<name, optional<name>>>, &b_options::dump_target_,
+ &b_options::dump_target_specified_ >;
+ _cli_b_options_map_["--trace-match"] =
+ &::build2::build::cli::thunk< b_options, vector<name>, &b_options::trace_match_,
+ &b_options::trace_match_specified_ >;
+ _cli_b_options_map_["--trace-execute"] =
+ &::build2::build::cli::thunk< b_options, vector<name>, &b_options::trace_execute_,
+ &b_options::trace_execute_specified_ >;
+ _cli_b_options_map_["--no-column"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_column_ >;
+ _cli_b_options_map_["--no-line"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_line_ >;
+ _cli_b_options_map_["--buildfile"] =
+ &::build2::build::cli::thunk< b_options, path, &b_options::buildfile_,
+ &b_options::buildfile_specified_ >;
+ _cli_b_options_map_["--config-guess"] =
+ &::build2::build::cli::thunk< b_options, path, &b_options::config_guess_,
+ &b_options::config_guess_specified_ >;
+ _cli_b_options_map_["--config-sub"] =
+ &::build2::build::cli::thunk< b_options, path, &b_options::config_sub_,
+ &b_options::config_sub_specified_ >;
+ _cli_b_options_map_["--pager"] =
+ &::build2::build::cli::thunk< b_options, string, &b_options::pager_,
+ &b_options::pager_specified_ >;
+ _cli_b_options_map_["--pager-option"] =
+ &::build2::build::cli::thunk< b_options, strings, &b_options::pager_option_,
+ &b_options::pager_option_specified_ >;
+ _cli_b_options_map_["--options-file"] =
+ &::build2::build::cli::thunk< b_options, string, &b_options::options_file_,
+ &b_options::options_file_specified_ >;
+ _cli_b_options_map_["--default-options"] =
+ &::build2::build::cli::thunk< b_options, dir_path, &b_options::default_options_,
+ &b_options::default_options_specified_ >;
+ _cli_b_options_map_["--no-default-options"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_default_options_ >;
+ _cli_b_options_map_["--help"] =
+ &::build2::build::cli::thunk< b_options, &b_options::help_ >;
+ _cli_b_options_map_["--version"] =
+ &::build2::build::cli::thunk< b_options, &b_options::version_ >;
+ }
+ };
+
+ static _cli_b_options_map_init _cli_b_options_map_init_;
+
+ bool b_options::
+ _parse (const char* o, ::build2::build::cli::scanner& s)
+ {
+ _cli_b_options_map::const_iterator i (_cli_b_options_map_.find (o));
+
+ if (i != _cli_b_options_map_.end ())
+ {
+ (*(i->second)) (*this, s);
+ return true;
+ }
+
+ return false;
+ }
+
+ bool b_options::
+ _parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt_mode,
+ ::build2::build::cli::unknown_mode arg_mode)
+ {
+ // Can't skip combined flags (--no-combined-flags).
+ //
+ assert (opt_mode != ::build2::build::cli::unknown_mode::skip);
+
+ bool r = false;
+ bool opt = true;
+
+ while (s.more ())
+ {
+ const char* o = s.peek ();
+
+ if (std::strcmp (o, "--") == 0)
+ {
+ opt = false;
+ }
+
+ if (opt)
+ {
+ if (_parse (o, s))
+ {
+ r = true;
+ continue;
+ }
+
+ if (std::strncmp (o, "-", 1) == 0 && o[1] != '\0')
+ {
+ // Handle combined option values.
+ //
+ std::string co;
+ if (const char* v = std::strchr (o, '='))
+ {
+ co.assign (o, 0, v - o);
+ ++v;
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (co.c_str ()),
+ const_cast<char*> (v)
+ };
+
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
+
+ if (_parse (co.c_str (), ns))
+ {
+ // Parsed the option but not its value?
+ //
+ if (ns.end () != 2)
+ throw ::build2::build::cli::invalid_value (co, v);
+
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = co.c_str ();
+ }
+ }
+
+ // Handle combined flags.
+ //
+ char cf[3];
+ {
+ const char* p = o + 1;
+ for (; *p != '\0'; ++p)
+ {
+ if (!((*p >= 'a' && *p <= 'z') ||
+ (*p >= 'A' && *p <= 'Z') ||
+ (*p >= '0' && *p <= '9')))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ for (p = o + 1; *p != '\0'; ++p)
+ {
+ std::strcpy (cf, "-");
+ cf[1] = *p;
+ cf[2] = '\0';
+
+ int ac (1);
+ char* av[] =
+ {
+ cf
+ };
+
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
+
+ if (!_parse (cf, ns))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ // All handled.
+ //
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = cf;
+ }
+ }
+ }
+
+ switch (opt_mode)
+ {
+ case ::build2::build::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::build2::build::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::build2::build::cli::unknown_mode::fail:
+ {
+ throw ::build2::build::cli::unknown_option (o);
+ }
+ }
+
+ break;
+ }
+ }
+
+ switch (arg_mode)
+ {
+ case ::build2::build::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::build2::build::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::build2::build::cli::unknown_mode::fail:
+ {
+ throw ::build2::build::cli::unknown_argument (o);
+ }
+ }
+
+ break;
+ }
+
+ return r;
+ }
+}
+
+namespace build2
+{
+ ::build2::build::cli::usage_para
+ print_b_usage (::std::ostream& os, ::build2::build::cli::usage_para p)
+ {
+ CLI_POTENTIALLY_UNUSED (os);
+
+ if (p != ::build2::build::cli::usage_para::none)
+ os << ::std::endl;
+
+ os << "\033[1mSYNOPSIS\033[0m" << ::std::endl
+ << ::std::endl
+ << "\033[1mb --help\033[0m" << ::std::endl
+ << "\033[1mb --version\033[0m" << ::std::endl
+ << "\033[1mb\033[0m [\033[4moptions\033[0m] [\033[4mvariables\033[0m] [\033[4mbuildspec\033[0m]\033[0m" << ::std::endl
+ << ::std::endl
+ << "\033[4mbuildspec\033[0m = \033[4mmeta-operation\033[0m\033[1m(\033[0m\033[4moperation\033[0m\033[1m(\033[0m\033[4mtarget\033[0m...[\033[1m,\033[0m\033[4mparameters\033[0m]\033[1m)\033[0m...\033[1m)\033[0m...\033[0m" << ::std::endl
+ << ::std::endl
+ << "\033[1mDESCRIPTION\033[0m" << ::std::endl
+ << ::std::endl
+ << "The \033[1mbuild2\033[0m build system driver executes a set of meta-operations on operations" << ::std::endl
+ << "on targets according to the build specification, or \033[4mbuildspec\033[0m for short. This" << ::std::endl
+ << "process can be controlled by specifying driver \033[4moptions\033[0m and build system" << ::std::endl
+ << "\033[4mvariables\033[0m." << ::std::endl
+ << ::std::endl
+ << "Note that \033[4moptions\033[0m, \033[4mvariables\033[0m, and \033[4mbuildspec\033[0m fragments can be specified in any" << ::std::endl
+ << "order. To avoid treating an argument that starts with \033[1m'-'\033[0m as an option, add the" << ::std::endl
+ << "\033[1m'--'\033[0m separator. To avoid treating an argument that contains \033[1m'='\033[0m as a variable," << ::std::endl
+ << "add the second \033[1m'--'\033[0m separator." << ::std::endl;
+
+ p = ::build2::b_options::print_usage (os, ::build2::build::cli::usage_para::text);
+
+ if (p != ::build2::build::cli::usage_para::none)
+ os << ::std::endl;
+
+ os << "\033[1mDEFAULT OPTIONS FILES\033[0m" << ::std::endl
+ << ::std::endl
+ << "Instead of having a separate config file format for tool configuration, the" << ::std::endl
+ << "\033[1mbuild2\033[0m toolchain uses \033[4mdefault options files\033[0m which contain the same options as" << ::std::endl
+ << "what can be specified on the command line. The default options files are like" << ::std::endl
+ << "options files that one can specify with \033[1m--options-file\033[0m except that they are" << ::std::endl
+ << "loaded by default." << ::std::endl
+ << ::std::endl
+ << "The default options files for the build system driver are called \033[1mb.options\033[0m and" << ::std::endl
+ << "are searched for in the \033[1m.build2/\033[0m subdirectory of the home directory and in the" << ::std::endl
+ << "system directory (for example, \033[1m/etc/build2/\033[0m) if configured. Note that besides" << ::std::endl
+ << "options these files can also contain global variable overrides." << ::std::endl
+ << ::std::endl
+ << "Once the search is complete, the files are loaded in the reverse order, that" << ::std::endl
+ << "is, beginning from the system directory (if any), followed by the home" << ::std::endl
+ << "directory, and finishing off with the options specified on the command line. In" << ::std::endl
+ << "other words, the files are loaded from the more generic to the more specific" << ::std::endl
+ << "with the command line options having the ability to override any values" << ::std::endl
+ << "specified in the default options files." << ::std::endl
+ << ::std::endl
+ << "If a default options file contains \033[1m--no-default-options\033[0m, then the search is" << ::std::endl
+ << "stopped at the directory containing this file and no outer files are loaded. If" << ::std::endl
+ << "this option is specified on the command line, then none of the default options" << ::std::endl
+ << "files are searched for or loaded." << ::std::endl
+ << ::std::endl
+ << "An additional directory containing default options files can be specified with" << ::std::endl
+ << "\033[1m--default-options\033[0m. Its configuration files are loaded after the home directory." << ::std::endl
+ << ::std::endl
+ << "The order in which default options files are loaded is traced at the verbosity" << ::std::endl
+ << "level 3 (\033[1m-V\033[0m option) or higher." << ::std::endl
+ << ::std::endl
+ << "\033[1mJSON OUTPUT\033[0m" << ::std::endl
+ << ::std::endl
+ << "Commands that support the JSON output specify their formats as a serialized" << ::std::endl
+ << "representation of a C++ \033[1mstruct\033[0m or an array thereof. For example:" << ::std::endl
+ << ::std::endl
+ << "struct package" << ::std::endl
+ << "{" << ::std::endl
+ << " string name;" << ::std::endl
+ << "};" << ::std::endl
+ << ::std::endl
+ << "struct configuration" << ::std::endl
+ << "{" << ::std::endl
+ << " uint64_t id;" << ::std::endl
+ << " string path;" << ::std::endl
+ << " optional<string> name;" << ::std::endl
+ << " bool default;" << ::std::endl
+ << " vector<package> packages;" << ::std::endl
+ << "};" << ::std::endl
+ << ::std::endl
+ << "An example of the serialized JSON representation of \033[1mstruct\033[0m \033[1mconfiguration\033[0m:" << ::std::endl
+ << ::std::endl
+ << "{" << ::std::endl
+ << " \"id\": 1," << ::std::endl
+ << " \"path\": \"/tmp/hello-gcc\"," << ::std::endl
+ << " \"name\": \"gcc\"," << ::std::endl
+ << " \"default\": true," << ::std::endl
+ << " \"packages\": [" << ::std::endl
+ << " {" << ::std::endl
+ << " \"name\": \"hello\"" << ::std::endl
+ << " }" << ::std::endl
+ << " ]" << ::std::endl
+ << "}" << ::std::endl
+ << ::std::endl
+ << "This sections provides details on the overall properties of such formats and" << ::std::endl
+ << "the semantics of the \033[1mstruct\033[0m serialization." << ::std::endl
+ << ::std::endl
+ << "The order of members in a JSON object is fixed as specified in the" << ::std::endl
+ << "corresponding \033[1mstruct\033[0m. While new members may be added in the future (and should" << ::std::endl
+ << "be ignored by older consumers), the semantics of the existing members" << ::std::endl
+ << "(including whether the top-level entry is an object or array) may not change." << ::std::endl
+ << ::std::endl
+ << "An object member is required unless its type is \033[1moptional<>\033[0m, \033[1mbool\033[0m, or \033[1mvector<>\033[0m" << ::std::endl
+ << "(array). For \033[1mbool\033[0m members absent means \033[1mfalse\033[0m. For \033[1mvector<>\033[0m members absent means" << ::std::endl
+ << "empty. An empty top-level array is always present." << ::std::endl
+ << ::std::endl
+ << "For example, the following JSON text is a possible serialization of the above" << ::std::endl
+ << "\033[1mstruct\033[0m \033[1mconfiguration\033[0m:" << ::std::endl
+ << ::std::endl
+ << "{" << ::std::endl
+ << " \"id\": 1," << ::std::endl
+ << " \"path\": \"/tmp/hello-gcc\"" << ::std::endl
+ << "}" << ::std::endl
+ << ::std::endl
+ << "\033[1mEXIT STATUS\033[0m" << ::std::endl
+ << ::std::endl
+ << "Non-zero exit status is returned in case of an error." << ::std::endl;
+
+ os << std::endl
+ << "\033[1mENVIRONMENT\033[0m" << ::std::endl
+ << ::std::endl
+ << "The \033[1mHOME\033[0m environment variable is used to determine the user's home directory." << ::std::endl
+ << "If it is not set, then \033[1mgetpwuid(3)\033[0m is used instead. This value is used to" << ::std::endl
+ << "shorten paths printed in diagnostics by replacing the home directory with \033[1m~/\033[0m." << ::std::endl
+ << "It is also made available to \033[1mbuildfile\033[0m's as the \033[1mbuild.home\033[0m variable." << ::std::endl
+ << ::std::endl
+ << "The \033[1mBUILD2_VAR_OVR\033[0m environment variable is used to propagate global variable" << ::std::endl
+ << "overrides to nested build system driver invocations. Its value is a list of" << ::std::endl
+ << "global variable assignments separated with newlines." << ::std::endl
+ << ::std::endl
+ << "The \033[1mBUILD2_DEF_OPT\033[0m environment variable is used to suppress loading of default" << ::std::endl
+ << "options files in nested build system driver invocations. Its values are \033[1mfalse\033[0m" << ::std::endl
+ << "or \033[1m0\033[0m to suppress and \033[1mtrue\033[0m or \033[1m1\033[0m to load." << ::std::endl;
+
+ p = ::build2::build::cli::usage_para::text;
+
+ return p;
+ }
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
diff --git a/libbuild2/b-options.hxx b/libbuild2/b-options.hxx
new file mode 100644
index 0000000..48dd35f
--- /dev/null
+++ b/libbuild2/b-options.hxx
@@ -0,0 +1,366 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+#ifndef LIBBUILD2_B_OPTIONS_HXX
+#define LIBBUILD2_B_OPTIONS_HXX
+
+// Begin prologue.
+//
+#include <libbuild2/export.hxx>
+//
+// End prologue.
+
+#include <libbuild2/common-options.hxx>
+
+namespace build2
+{
+ class LIBBUILD2_SYMEXPORT b_options
+ {
+ public:
+ b_options ();
+
+ // Return true if anything has been parsed.
+ //
+ bool
+ parse (int& argc,
+ char** argv,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ bool
+ parse (int start,
+ int& argc,
+ char** argv,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ bool
+ parse (int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ bool
+ parse (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ bool
+ parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ // Merge options from the specified instance appending/overriding
+ // them as if they appeared after options in this instance.
+ //
+ void
+ merge (const b_options&);
+
+ // Option accessors.
+ //
+ const uint64_t&
+ build2_metadata () const;
+
+ bool
+ build2_metadata_specified () const;
+
+ const bool&
+ v () const;
+
+ const bool&
+ V () const;
+
+ const bool&
+ quiet () const;
+
+ const bool&
+ silent () const;
+
+ const uint16_t&
+ verbose () const;
+
+ bool
+ verbose_specified () const;
+
+ const bool&
+ stat () const;
+
+ const bool&
+ progress () const;
+
+ const bool&
+ no_progress () const;
+
+ const bool&
+ diag_color () const;
+
+ const bool&
+ no_diag_color () const;
+
+ const size_t&
+ jobs () const;
+
+ bool
+ jobs_specified () const;
+
+ const size_t&
+ max_jobs () const;
+
+ bool
+ max_jobs_specified () const;
+
+ const size_t&
+ queue_depth () const;
+
+ bool
+ queue_depth_specified () const;
+
+ const string&
+ file_cache () const;
+
+ bool
+ file_cache_specified () const;
+
+ const size_t&
+ max_stack () const;
+
+ bool
+ max_stack_specified () const;
+
+ const bool&
+ serial_stop () const;
+
+ const bool&
+ dry_run () const;
+
+ const bool&
+ no_diag_buffer () const;
+
+ const bool&
+ match_only () const;
+
+ const bool&
+ load_only () const;
+
+ const bool&
+ no_external_modules () const;
+
+ const structured_result_format&
+ structured_result () const;
+
+ bool
+ structured_result_specified () const;
+
+ const bool&
+ mtime_check () const;
+
+ const bool&
+ no_mtime_check () const;
+
+ const strings&
+ dump () const;
+
+ bool
+ dump_specified () const;
+
+ const string&
+ dump_format () const;
+
+ bool
+ dump_format_specified () const;
+
+ const dir_paths&
+ dump_scope () const;
+
+ bool
+ dump_scope_specified () const;
+
+ const vector<pair<name, optional<name>>>&
+ dump_target () const;
+
+ bool
+ dump_target_specified () const;
+
+ const vector<name>&
+ trace_match () const;
+
+ bool
+ trace_match_specified () const;
+
+ const vector<name>&
+ trace_execute () const;
+
+ bool
+ trace_execute_specified () const;
+
+ const bool&
+ no_column () const;
+
+ const bool&
+ no_line () const;
+
+ const path&
+ buildfile () const;
+
+ bool
+ buildfile_specified () const;
+
+ const path&
+ config_guess () const;
+
+ bool
+ config_guess_specified () const;
+
+ const path&
+ config_sub () const;
+
+ bool
+ config_sub_specified () const;
+
+ const string&
+ pager () const;
+
+ bool
+ pager_specified () const;
+
+ const strings&
+ pager_option () const;
+
+ bool
+ pager_option_specified () const;
+
+ const string&
+ options_file () const;
+
+ bool
+ options_file_specified () const;
+
+ const dir_path&
+ default_options () const;
+
+ bool
+ default_options_specified () const;
+
+ const bool&
+ no_default_options () const;
+
+ const bool&
+ help () const;
+
+ const bool&
+ version () const;
+
+ // Print usage information.
+ //
+ static ::build2::build::cli::usage_para
+ print_usage (::std::ostream&,
+ ::build2::build::cli::usage_para = ::build2::build::cli::usage_para::none);
+
+ // Implementation details.
+ //
+ protected:
+ bool
+ _parse (const char*, ::build2::build::cli::scanner&);
+
+ private:
+ bool
+ _parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option,
+ ::build2::build::cli::unknown_mode argument);
+
+ public:
+ uint64_t build2_metadata_;
+ bool build2_metadata_specified_;
+ bool v_;
+ bool V_;
+ bool quiet_;
+ bool silent_;
+ uint16_t verbose_;
+ bool verbose_specified_;
+ bool stat_;
+ bool progress_;
+ bool no_progress_;
+ bool diag_color_;
+ bool no_diag_color_;
+ size_t jobs_;
+ bool jobs_specified_;
+ size_t max_jobs_;
+ bool max_jobs_specified_;
+ size_t queue_depth_;
+ bool queue_depth_specified_;
+ string file_cache_;
+ bool file_cache_specified_;
+ size_t max_stack_;
+ bool max_stack_specified_;
+ bool serial_stop_;
+ bool dry_run_;
+ bool no_diag_buffer_;
+ bool match_only_;
+ bool load_only_;
+ bool no_external_modules_;
+ structured_result_format structured_result_;
+ bool structured_result_specified_;
+ bool mtime_check_;
+ bool no_mtime_check_;
+ strings dump_;
+ bool dump_specified_;
+ string dump_format_;
+ bool dump_format_specified_;
+ dir_paths dump_scope_;
+ bool dump_scope_specified_;
+ vector<pair<name, optional<name>>> dump_target_;
+ bool dump_target_specified_;
+ vector<name> trace_match_;
+ bool trace_match_specified_;
+ vector<name> trace_execute_;
+ bool trace_execute_specified_;
+ bool no_column_;
+ bool no_line_;
+ path buildfile_;
+ bool buildfile_specified_;
+ path config_guess_;
+ bool config_guess_specified_;
+ path config_sub_;
+ bool config_sub_specified_;
+ string pager_;
+ bool pager_specified_;
+ strings pager_option_;
+ bool pager_option_specified_;
+ string options_file_;
+ bool options_file_specified_;
+ dir_path default_options_;
+ bool default_options_specified_;
+ bool no_default_options_;
+ bool help_;
+ bool version_;
+ };
+}
+
+// Print page usage information.
+//
+namespace build2
+{
+ LIBBUILD2_SYMEXPORT ::build2::build::cli::usage_para
+ print_b_usage (::std::ostream&,
+ ::build2::build::cli::usage_para = ::build2::build::cli::usage_para::none);
+}
+
+#include <libbuild2/b-options.ixx>
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
+#endif // LIBBUILD2_B_OPTIONS_HXX
diff --git a/libbuild2/b-options.ixx b/libbuild2/b-options.ixx
new file mode 100644
index 0000000..34b0d39
--- /dev/null
+++ b/libbuild2/b-options.ixx
@@ -0,0 +1,405 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+namespace build2
+{
+ // b_options
+ //
+
+ inline const uint64_t& b_options::
+ build2_metadata () const
+ {
+ return this->build2_metadata_;
+ }
+
+ inline bool b_options::
+ build2_metadata_specified () const
+ {
+ return this->build2_metadata_specified_;
+ }
+
+ inline const bool& b_options::
+ v () const
+ {
+ return this->v_;
+ }
+
+ inline const bool& b_options::
+ V () const
+ {
+ return this->V_;
+ }
+
+ inline const bool& b_options::
+ quiet () const
+ {
+ return this->quiet_;
+ }
+
+ inline const bool& b_options::
+ silent () const
+ {
+ return this->silent_;
+ }
+
+ inline const uint16_t& b_options::
+ verbose () const
+ {
+ return this->verbose_;
+ }
+
+ inline bool b_options::
+ verbose_specified () const
+ {
+ return this->verbose_specified_;
+ }
+
+ inline const bool& b_options::
+ stat () const
+ {
+ return this->stat_;
+ }
+
+ inline const bool& b_options::
+ progress () const
+ {
+ return this->progress_;
+ }
+
+ inline const bool& b_options::
+ no_progress () const
+ {
+ return this->no_progress_;
+ }
+
+ inline const bool& b_options::
+ diag_color () const
+ {
+ return this->diag_color_;
+ }
+
+ inline const bool& b_options::
+ no_diag_color () const
+ {
+ return this->no_diag_color_;
+ }
+
+ inline const size_t& b_options::
+ jobs () const
+ {
+ return this->jobs_;
+ }
+
+ inline bool b_options::
+ jobs_specified () const
+ {
+ return this->jobs_specified_;
+ }
+
+ inline const size_t& b_options::
+ max_jobs () const
+ {
+ return this->max_jobs_;
+ }
+
+ inline bool b_options::
+ max_jobs_specified () const
+ {
+ return this->max_jobs_specified_;
+ }
+
+ inline const size_t& b_options::
+ queue_depth () const
+ {
+ return this->queue_depth_;
+ }
+
+ inline bool b_options::
+ queue_depth_specified () const
+ {
+ return this->queue_depth_specified_;
+ }
+
+ inline const string& b_options::
+ file_cache () const
+ {
+ return this->file_cache_;
+ }
+
+ inline bool b_options::
+ file_cache_specified () const
+ {
+ return this->file_cache_specified_;
+ }
+
+ inline const size_t& b_options::
+ max_stack () const
+ {
+ return this->max_stack_;
+ }
+
+ inline bool b_options::
+ max_stack_specified () const
+ {
+ return this->max_stack_specified_;
+ }
+
+ inline const bool& b_options::
+ serial_stop () const
+ {
+ return this->serial_stop_;
+ }
+
+ inline const bool& b_options::
+ dry_run () const
+ {
+ return this->dry_run_;
+ }
+
+ inline const bool& b_options::
+ no_diag_buffer () const
+ {
+ return this->no_diag_buffer_;
+ }
+
+ inline const bool& b_options::
+ match_only () const
+ {
+ return this->match_only_;
+ }
+
+ inline const bool& b_options::
+ load_only () const
+ {
+ return this->load_only_;
+ }
+
+ inline const bool& b_options::
+ no_external_modules () const
+ {
+ return this->no_external_modules_;
+ }
+
+ inline const structured_result_format& b_options::
+ structured_result () const
+ {
+ return this->structured_result_;
+ }
+
+ inline bool b_options::
+ structured_result_specified () const
+ {
+ return this->structured_result_specified_;
+ }
+
+ inline const bool& b_options::
+ mtime_check () const
+ {
+ return this->mtime_check_;
+ }
+
+ inline const bool& b_options::
+ no_mtime_check () const
+ {
+ return this->no_mtime_check_;
+ }
+
+ inline const strings& b_options::
+ dump () const
+ {
+ return this->dump_;
+ }
+
+ inline bool b_options::
+ dump_specified () const
+ {
+ return this->dump_specified_;
+ }
+
+ inline const string& b_options::
+ dump_format () const
+ {
+ return this->dump_format_;
+ }
+
+ inline bool b_options::
+ dump_format_specified () const
+ {
+ return this->dump_format_specified_;
+ }
+
+ inline const dir_paths& b_options::
+ dump_scope () const
+ {
+ return this->dump_scope_;
+ }
+
+ inline bool b_options::
+ dump_scope_specified () const
+ {
+ return this->dump_scope_specified_;
+ }
+
+ inline const vector<pair<name, optional<name>>>& b_options::
+ dump_target () const
+ {
+ return this->dump_target_;
+ }
+
+ inline bool b_options::
+ dump_target_specified () const
+ {
+ return this->dump_target_specified_;
+ }
+
+ inline const vector<name>& b_options::
+ trace_match () const
+ {
+ return this->trace_match_;
+ }
+
+ inline bool b_options::
+ trace_match_specified () const
+ {
+ return this->trace_match_specified_;
+ }
+
+ inline const vector<name>& b_options::
+ trace_execute () const
+ {
+ return this->trace_execute_;
+ }
+
+ inline bool b_options::
+ trace_execute_specified () const
+ {
+ return this->trace_execute_specified_;
+ }
+
+ inline const bool& b_options::
+ no_column () const
+ {
+ return this->no_column_;
+ }
+
+ inline const bool& b_options::
+ no_line () const
+ {
+ return this->no_line_;
+ }
+
+ inline const path& b_options::
+ buildfile () const
+ {
+ return this->buildfile_;
+ }
+
+ inline bool b_options::
+ buildfile_specified () const
+ {
+ return this->buildfile_specified_;
+ }
+
+ inline const path& b_options::
+ config_guess () const
+ {
+ return this->config_guess_;
+ }
+
+ inline bool b_options::
+ config_guess_specified () const
+ {
+ return this->config_guess_specified_;
+ }
+
+ inline const path& b_options::
+ config_sub () const
+ {
+ return this->config_sub_;
+ }
+
+ inline bool b_options::
+ config_sub_specified () const
+ {
+ return this->config_sub_specified_;
+ }
+
+ inline const string& b_options::
+ pager () const
+ {
+ return this->pager_;
+ }
+
+ inline bool b_options::
+ pager_specified () const
+ {
+ return this->pager_specified_;
+ }
+
+ inline const strings& b_options::
+ pager_option () const
+ {
+ return this->pager_option_;
+ }
+
+ inline bool b_options::
+ pager_option_specified () const
+ {
+ return this->pager_option_specified_;
+ }
+
+ inline const string& b_options::
+ options_file () const
+ {
+ return this->options_file_;
+ }
+
+ inline bool b_options::
+ options_file_specified () const
+ {
+ return this->options_file_specified_;
+ }
+
+ inline const dir_path& b_options::
+ default_options () const
+ {
+ return this->default_options_;
+ }
+
+ inline bool b_options::
+ default_options_specified () const
+ {
+ return this->default_options_specified_;
+ }
+
+ inline const bool& b_options::
+ no_default_options () const
+ {
+ return this->no_default_options_;
+ }
+
+ inline const bool& b_options::
+ help () const
+ {
+ return this->help_;
+ }
+
+ inline const bool& b_options::
+ version () const
+ {
+ return this->version_;
+ }
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
diff --git a/build2/b.cli b/libbuild2/b.cli
index 112db2b..f58b869 100644
--- a/build2/b.cli
+++ b/libbuild2/b.cli
@@ -1,8 +1,7 @@
-// file : build2/b.cli
+// file : libbuild2/b.cli
// license : MIT; see accompanying LICENSE file
-include <set>;
-include <libbuild2/types.hxx>;
+include <libbuild2/common.cli>;
"\section=1"
"\name=b"
@@ -316,14 +315,114 @@ namespace build2
\li|\cb{info}
Print basic information (name, version, source and output
- directories, etc) about one or more projects to \cb{STDOUT},
+ directories, etc) about one or more projects to \cb{stdout},
separating multiple projects with a blank line. Each project is
- identified by its root directory target. For example:
+ identified by its root directory target. For example (some output
+ is omitted):
\
$ b info: libfoo/ libbar/
+ project: libfoo
+ version: 1.0.0
+ src_root: /tmp/libfoo
+ out_root: /tmp/libfoo
+ subprojects: @tests
+
+ project: libbar
+ version: 2.0.0
+ src_root: /tmp/libbar
+ out_root: /tmp/libbar-out
+ subprojects: @tests
\
+ To omit discovering and printing subprojects information, use the
+ \cb{no_subprojects} parameter, for example:
+
+ \
+ $ b info: libfoo/,no_subprojects
+ \
+
+ To instead print this information in the JSON format, use the
+ \cb{json} parameter, for example:
+
+ \
+ $ b info: libfoo/,json
+ \
+
+ In this case the output is a JSON array of objects which are the
+ serialized representation of the following C++ \cb{struct}
+ \cb{project_info}:
+
+ \
+ struct subproject
+ {
+ string path;
+ optional<string> name;
+ };
+
+ struct project_info
+ {
+ optional<string> project;
+ optional<string> version;
+ optional<string> summary;
+ optional<string> url;
+ string src_root;
+ string out_root;
+ optional<string> amalgamation;
+ vector<subproject> subprojects;
+ vector<string> operations;
+ vector<string> meta_operations;
+ vector<string> modules;
+ };
+ \
+
+ For example:
+
+ \
+ [
+ {
+ \"project\": \"libfoo\",
+ \"version\": \"1.0.0\",
+ \"summary\": \"libfoo C++ library\",
+ \"src_root\": \"/tmp/libfoo\",
+ \"out_root\": \"/tmp/gcc-debug/libfoo\",
+ \"amalgamation\": \"..\",
+ \"subprojects\": [
+ {
+ \"path\": \"tests\"
+ }
+ ],
+ \"operations\": [
+ \"update\",
+ \"clean\",
+ \"test\",
+ \"update-for-test\",
+ \"install\",
+ \"uninstall\",
+ \"update-for-install\"
+ ],
+ \"meta-operations\": [
+ \"perform\",
+ \"configure\",
+ \"disfigure\",
+ \"dist\",
+ \"info\"
+ ],
+ \"modules\": [
+ \"version\",
+ \"config\",
+ \"test\",
+ \"install\",
+ \"dist\"
+ ]
+ }
+ ]
+ \
+
+ See the JSON OUTPUT section below for details on the overall
+ properties of this format and the semantics of the \cb{struct}
+ serialization.
+
||
The build system has the following built-in and pre-defined operations:
@@ -379,13 +478,13 @@ namespace build2
\
$ b config.install.root=c:\projects\install
- $ b \"config.install.root='c:\Program Files (x86)\test\'\"
+ $ b \"config.install.root='c:\Program Files\test\'\"
$ b 'config.cxx.poptions=-DFOO_STR=\"foo\"'
\
"
}
- class options
+ class b_options
{
"\h#options|OPTIONS|"
@@ -445,15 +544,6 @@ namespace build2
"Display build statistics."
}
- std::set<string> --dump
- {
- "<phase>",
- "Dump the build system state after the specified phase. Valid <phase>
- values are \cb{load} (after loading \cb{buildfiles}) and \cb{match}
- (after matching rules to targets). Repeat this option to dump the
- state after multiple phases."
- }
-
bool --progress
{
"Display build progress. If printing to a terminal the progress is
@@ -466,6 +556,22 @@ namespace build2
"Don't display build progress."
}
+ bool --diag-color
+ {
+ "Use color in diagnostics. If printing to a terminal the color is used
+ by default provided the terminal is not dumb. Use \cb{--no-diag-color}
+ to suppress.
+
+ This option affects the diagnostics printed by the build system itself.
+ Some rules may also choose to propagate its value to tools (such as
+ compilers) that they invoke."
+ }
+
+ bool --no-diag-color
+ {
+ "Don't use color in diagnostics."
+ }
+
size_t --jobs|-j
{
"<num>",
@@ -525,7 +631,10 @@ namespace build2
investigate build failures that are caused by build system errors
rather than compilation errors. Note that if you don't want to keep
going but still want parallel execution, add \cb{--jobs|-j} (for
- example \cb{-j\ 0} for default concurrency)."
+ example \cb{-j\ 0} for default concurrency). Note also that during
+ serial execution there is no diagnostics buffering and child
+ process' \cb{stderr} is a terminal (unless redirected; see
+ \cb{--no-diag-buffer} for details)."
}
bool --dry-run|-n
@@ -539,10 +648,33 @@ namespace build2
this mode."
}
+ bool --no-diag-buffer
+ {
+ "Do not buffer diagnostics from child processes. By default, unless
+ running serially, such diagnostics is buffered and printed all at
+ once after each child exits in order to prevent interleaving.
+ However, this can have side-effects since the child process'
+ \cb{stderr} is no longer a terminal. Most notably, the use of
+ color in diagnostics may be disabled by some programs. On the
+ other hand, depending on the platform and programs invoked, the
+ interleaving diagnostics may not break lines and thus could be
+ tolerable."
+ }
+
bool --match-only
{
- "Match the rules but do not execute the operation. This mode is primarily
- useful for profiling."
+ "Match the rules without executing the operation. This mode is primarily
+ useful for profiling and dumping the build system state."
+ }
+
+ bool --load-only
+ {
+ "Match the rules only to \cb{alias{\}} targets ignoring other targets
+ and without executing the operation. In particular, this has the
+ effect of loading all the subdirectory \cb{buildfiles} that are not
+ explicitly included. Note that this option can only be used with the
+ \cb{perform(update)} action on an \cb{alias{\}} target, usually
+ \cb{dir{\}}."
}
bool --no-external-modules
@@ -552,13 +684,20 @@ namespace build2
project's \cb{buildfiles}, such as \cb{info}."
}
- bool --structured-result
+ structured_result_format --structured-result
{
+ "<fmt>",
+
"Write the result of execution in a structured form. In this mode,
- instead of printing to \cb{STDERR} diagnostics messages about the
+ instead of printing to \cb{stderr} diagnostics messages about the
outcome of executing actions on targets, the driver writes to
- \cb{STDOUT} a structured result description one line per the
- buildspec action/target pair. Each line has the following format:
+ \cb{stdout} a machine-readable result description in the specified
+ format. Valid values for this option are \cb{lines} and \cb{json}.
+ Note that currently only the \cb{perform} meta-operation supports
+ the structured result output.
+
+ If the output format is \cb{lines}, then the result is written one line
+ per the buildspec action/target pair. Each line has the following form:
\c{\i{state} \i{meta-operation} \i{operation} \i{target}}
@@ -567,12 +706,66 @@ namespace build2
outer operation is specified in parenthesis. For example:
\
- unchanged perform update(test) /tmp/dir{hello/}
- changed perform test /tmp/dir{hello/}
+ unchanged perform update(test) /tmp/hello/hello/exe{hello}
+ changed perform test /tmp/hello/hello/exe{hello}
\
- Note that only the \cb{perform} meta-operation supports the structured
- result output.
+ If the output format is \cb{json}, then the output is a JSON array of
+ objects which are the serialized representation of the following C++
+ \cb{struct} \cb{target_action_result}:
+
+ \
+ struct target_action_result
+ {
+ string target;
+ string display_target;
+ string target_type;
+ optional<string> target_path;
+ string meta_operation;
+ string operation;
+ optional<string> outer_operation;
+ string state;
+ };
+ \
+
+ For example:
+
+ \
+ [
+ {
+ \"target\": \"/tmp/hello/hello/exe{hello.}\",
+ \"display_target\": \"/tmp/hello/hello/exe{hello}\",
+ \"target_type\": \"exe\",
+ \"target_path\": \"/tmp/hello/hello/hello\",
+ \"meta_operation\": \"perform\",
+ \"operation\": \"update\",
+ \"outer_operation\": \"test\",
+ \"state\": \"unchanged\"
+ },
+ {
+ \"target\": \"/tmp/hello/hello/exe{hello.}\",
+ \"display_target\": \"/tmp/hello/hello/exe{hello}\",
+ \"target_type\": \"exe\",
+ \"target_path\": \"/tmp/hello/hello/hello\",
+ \"meta_operation\": \"perform\",
+ \"operation\": \"test\",
+ \"state\": \"changed\"
+ }
+ ]
+ \
+
+ See the JSON OUTPUT section below for details on the overall
+ properties of this format and the semantics of the \cb{struct}
+ serialization.
+
+ The \cb{target} member is the target name that is qualified with the
+ extension (if applicable) and, if required, is quoted so that it can be
+ passed back to the build system driver on the command line. The
+ \cb{display_target} member is the unqualified and unquoted \"display\"
+ target name, the same as in the \cb{lines} format. The \cb{target_type}
+ member is the type of target. The \cb{target_path} member is an
+ absolute path to the target if the target type is path-based or
+ \cb{dir}.
"
}
@@ -591,6 +784,73 @@ namespace build2
\cb{--mtime-check} for details."
}
+ strings --dump
+ {
+ "<phase>",
+ "Dump the build system state after the specified phase. Valid <phase>
+ values are \cb{load} (after loading \cb{buildfiles}) and \cb{match}
+ (after matching rules to targets). The \cb{match} value also has the
+ \cb{match-pre} and \cb{match-post} variants to dump the state for the
+ pre/post-operations (\cb{match} dumps the main operation only). Repeat
+ this option to dump the state after multiple phases/variants. By
+ default the entire build state is dumped but this behavior can be
+ altered with the \cb{--dump-scope} and \cb{--dump-target} options.
+ See also the \cb{--match-only} and \cb{--load-only} options."
+ }
+
+ string --dump-format
+ {
+ // NOTE: fix all references to json-v0.1, including the manual.
+ //
+ "<format>",
+ "Representation format and output stream to use when dumping the build
+ system state. Valid values for this option are \cb{buildfile} (a
+ human-readable, Buildfile-like format written to \cb{stderr}; this is
+ the default), and \cb{json-v0.1} (machine-readable, JSON-based format
+ written to \cb{stdout}). For details on the \cb{buildfile} format, see
+ \l{b#intro-diag-debug Diagnostics and Debugging}. For details on the
+ \cb{json-v0.1} format, see the JSON OUTPUT section below (overall
+ properties) and \l{b#json-dump JSON Dump Format} (format specifics).
+ Note that the JSON format is currently unstable (thus the temporary
+ \cb{-v0.1} suffix).
+
+ Note that because it's possible to end up with multiple dumps (for
+ example, by specifying the \cb{--dump-scope} and/or \cb{--dump-target}
+ options multiple times), the JSON output is in the \"JSON Lines\" form,
+ that is, without pretty-printing and with the top-level JSON objects
+ delimited by newlines. Note also that if the JSON dump output is
+ combined with \cb{--structured-result=json}, then the structured
+ result is the last line."
+ }
+
+ dir_paths --dump-scope
+ {
+ "<dir>",
+ "Dump the build system state for the specified scope only. Repeat this
+ option to dump the state of multiple scopes."
+ }
+
+ vector<pair<name, optional<name>>> --dump-target
+ {
+ "<target>",
+ "Dump the build system state for the specified target only. Repeat this
+ option to dump the state of multiple targets."
+ }
+
+ vector<name> --trace-match
+ {
+ "<target>",
+ "Trace rule matching for the specified target. This is primarily useful
+ during troubleshooting. Repeat this option to trace multiple targets."
+ }
+
+ vector<name> --trace-execute
+ {
+ "<target>",
+ "Trace rule execution for the specified target. This is primarily useful
+ during troubleshooting. Repeat this option to trace multiple targets."
+ }
+
bool --no-column
{
"Don't print column numbers in diagnostics."
@@ -607,7 +867,7 @@ namespace build2
"The alternative file to read build information from. The default is
\cb{buildfile} or \cb{build2file}, depending on the project's build
file/directory naming scheme. If <path> is '\cb{-}', then read from
- \cb{STDIN}. Note that this option only affects the files read as part
+ \cb{stdin}. Note that this option only affects the files read as part
of the buildspec processing. Specifically, it has no effect on the
\cb{source} and \cb{include} directives. As a result, this option is
primarily intended for testing rather than changing the build file
@@ -723,6 +983,69 @@ namespace build2
The order in which default options files are loaded is traced at the
verbosity level 3 (\cb{-V} option) or higher.
+ \h#json-output|JSON OUTPUT|
+
+ Commands that support the JSON output specify their formats as a
+ serialized representation of a C++ \cb{struct} or an array thereof. For
+ example:
+
+ \
+ struct package
+ {
+ string name;
+ };
+
+ struct configuration
+ {
+ uint64_t id;
+ string path;
+ optional<string> name;
+ bool default;
+ vector<package> packages;
+ };
+ \
+
+ An example of the serialized JSON representation of \cb{struct}
+ \cb{configuration}:
+
+ \
+ {
+ \"id\": 1,
+ \"path\": \"/tmp/hello-gcc\",
+ \"name\": \"gcc\",
+ \"default\": true,
+ \"packages\": [
+ {
+ \"name\": \"hello\"
+ }
+ ]
+ }
+ \
+
+ This sections provides details on the overall properties of such formats
+ and the semantics of the \cb{struct} serialization.
+
+ The order of members in a JSON object is fixed as specified in the
+ corresponding \cb{struct}. While new members may be added in the
+ future (and should be ignored by older consumers), the semantics of the
+ existing members (including whether the top-level entry is an object or
+ array) may not change.
+
+ An object member is required unless its type is \cb{optional<>},
+ \cb{bool}, or \cb{vector<>} (array). For \cb{bool} members absent means
+ \cb{false}. For \cb{vector<>} members absent means empty. An empty
+ top-level array is always present.
+
+ For example, the following JSON text is a possible serialization of
+ the above \cb{struct} \cb{configuration}:
+
+ \
+ {
+ \"id\": 1,
+ \"path\": \"/tmp/hello-gcc\"
+ }
+ \
+
\h|EXIT STATUS|
Non-zero exit status is returned in case of an error.
diff --git a/libbuild2/bash/init.cxx b/libbuild2/bash/init.cxx
index cf5307f..88c88ba 100644
--- a/libbuild2/bash/init.cxx
+++ b/libbuild2/bash/init.cxx
@@ -20,7 +20,7 @@ namespace build2
namespace bash
{
static const in_rule in_rule_;
- static const install_rule install_rule_ (in_rule_);
+ static const install_rule install_rule_ (in_rule_, "bash.in");
bool
init (scope& rs,
@@ -48,14 +48,13 @@ namespace build2
{
using namespace install;
- // Install into bin/<project>/ by default stripping the .bash
- // extension from <project> if present.
+ // Install bash{} into bin/<project>.bash/ by default.
//
const project_name& p (project (rs));
if (!p.empty ())
{
- install_path<bash> (bs, dir_path ("bin") /= project_base (p));
+ install_path<bash> (bs, dir_path ("bin") /= modules_install_dir (p));
install_mode<bash> (bs, "644");
}
}
@@ -72,11 +71,11 @@ namespace build2
if (install_loaded)
{
- bs.insert_rule<exe> (perform_install_id, "bash.install", install_rule_);
- bs.insert_rule<exe> (perform_uninstall_id, "bash.uninstall", install_rule_);
+ bs.insert_rule<exe> (perform_install_id, "bash.install", install_rule_);
+ bs.insert_rule<exe> (perform_uninstall_id, "bash.install", install_rule_);
- bs.insert_rule<bash> (perform_install_id, "bash.install", install_rule_);
- bs.insert_rule<bash> (perform_uninstall_id, "bash.uninstall", install_rule_);
+ bs.insert_rule<bash> (perform_install_id, "bash.install", install_rule_);
+ bs.insert_rule<bash> (perform_uninstall_id, "bash.install", install_rule_);
}
return true;
diff --git a/libbuild2/bash/rule.cxx b/libbuild2/bash/rule.cxx
index e0391e3..29c6a2a 100644
--- a/libbuild2/bash/rule.cxx
+++ b/libbuild2/bash/rule.cxx
@@ -26,6 +26,9 @@ namespace build2
struct match_data
{
+ explicit
+ match_data (const in_rule& r): rule (r) {}
+
// The "for install" condition is signalled to us by install_rule when
// it is matched for the update operation. It also verifies that if we
// have already been executed, then it was for install.
@@ -33,24 +36,43 @@ namespace build2
// See cc::link_rule for a discussion of some subtleties in this logic.
//
optional<bool> for_install;
+
+ const in_rule& rule;
+
+ target_state
+ operator() (action a, const target& t)
+ {
+ // Unless the outer install rule signalled that this is update for
+ // install, signal back that we've performed plain update.
+ //
+ if (!for_install)
+ for_install = false;
+
+ //@@ TODO: need to verify all the modules we depend on are compatible
+ // with our for_install value, similar to cc::link_rule's
+ // append_libraries() (and which is the other half of the check
+ // in install_rule).
+
+ return rule.perform_update (a, t);
+ }
};
- static_assert (sizeof (match_data) <= target::data_size,
- "insufficient space");
+ static_assert (sizeof (match_data) <= target::small_data_size,
+ "match data requires dynamic allocation");
// in_rule
//
bool in_rule::
- match (action a, target& t, const string&) const
+ match (action a, target& t, const string& hint, match_extra&) const
{
tracer trace ("bash::in_rule::match");
- // Note that for bash{} we match even if the target does not depend on
- // any modules (while it could have been handled by the in module, that
- // would require loading it).
+ // Note that for bash{} and for exe{} with hint we match even if the
+ // target does not depend on any modules (while it could have been
+ // handled by the in module, that would require loading it).
//
- bool fi (false); // Found in.
- bool fm (t.is_a<bash> ()); // Found module.
+ bool fi (false); // Found in.
+ bool fm (!hint.empty () || t.is_a<bash> ()); // Found module.
for (prerequisite_member p: group_prerequisite_members (a, t))
{
if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
@@ -64,39 +86,26 @@ namespace build2
l4 ([&]{trace << "no in file prerequisite for target " << t;});
if (!fm)
- l4 ([&]{trace << "no bash module prerequisite for target " << t;});
+ l4 ([&]{trace << "no bash module prerequisite or hint for target "
+ << t;});
- return (fi && fm);
+ return fi && fm;
}
recipe in_rule::
apply (action a, target& t) const
{
- // Note that for-install is signalled by install_rule and therefore
- // can only be relied upon during execute.
- //
- t.data (match_data ());
-
- return rule::apply (a, t);
- }
-
- target_state in_rule::
- perform_update (action a, const target& t) const
- {
- // Unless the outer install rule signalled that this is update for
- // install, signal back that we've performed plain update.
- //
- match_data& md (t.data<match_data> ());
+ recipe r (rule::apply (a, t));
- if (!md.for_install)
- md.for_install = false;
-
- //@@ TODO: need to verify all the modules we depend on are compatible
- // with our for_install value, similar to cc::link_rule's
- // append_libraries() (and which is the other half of the check
- // in install_rule).
+ if (a == perform_update_id)
+ {
+ // Note that for-install is signalled by install_rule and therefore
+ // can only be relied upon during execute.
+ //
+ return match_data (*this);
+ }
- return rule::perform_update (a, t);
+ return r;
}
prerequisite_target in_rule::
@@ -126,7 +135,7 @@ namespace build2
// apply).
//
string ext (p.ext ? *p.ext : "bash");
- path ip (dir_path (project_base (*p.proj)) / p.dir / p.name);
+ path ip (dir_path (modules_install_dir (*p.proj)) / p.dir / p.name);
if (!ext.empty ())
{
@@ -161,6 +170,9 @@ namespace build2
if (mt != timestamp_nonexistent)
{
+ // @@ Do we actually need _locked(), isn't path_mtime()
+ // atomic?
+ //
auto rp (t.ctx.targets.insert_locked (bash::static_type,
ap.directory (),
dir_path () /* out */,
@@ -200,12 +212,16 @@ namespace build2
action a,
const target& t,
const string& n,
+ optional<uint64_t> flags,
bool strict,
+ const substitution_map* smap,
const optional<string>& null) const
{
+ assert (!flags);
+
return n.compare (0, 6, "import") == 0 && (n[6] == ' ' || n[6] == '\t')
? substitute_import (l, a, t, trim (string (n, 7)))
- : rule::substitute (l, a, t, n, strict, null);
+ : rule::substitute (l, a, t, n, nullopt, strict, smap, null);
}
string in_rule::
@@ -214,21 +230,45 @@ namespace build2
const target& t,
const string& n) const
{
- // Derive (relative) import path from the import name.
+ // Derive (relative) import path from the import name. And derive import
+ // installed path from that by adding the .bash extension to the first
+ // component.
//
- path ip;
+ path ip, iip;
+ project_name pn;
try
{
ip = path (n);
- if (ip.empty () || ip.absolute ())
+ if (ip.empty () || ip.simple () || ip.absolute ())
throw invalid_path (n);
if (ip.extension_cstring () == nullptr)
ip += ".bash";
ip.normalize ();
+
+ auto b (ip.begin ()), e (ip.end ());
+
+ try
+ {
+ pn = project_name (*b);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid import path '" << n << "': " << e.what ();
+ }
+
+ char s (b++.separator ());
+
+ iip = path (modules_install_dir (pn) + s) / path (b, e);
+
+ // Strip the .bash extension from the project name in this path to
+ // be able to compare it to paths inside the project (see below).
+ //
+ if (pn.extension () == "bash")
+ ip = path (pn.base ("bash") + s) / path (b, e);
}
catch (const invalid_path&)
{
@@ -240,7 +280,7 @@ namespace build2
const path* ap (nullptr);
for (const prerequisite_target& pt: t.prerequisite_targets[a])
{
- if (pt.adhoc || pt.target == nullptr)
+ if (pt.target == nullptr || pt.adhoc ())
continue;
if (const bash* b = pt.target->is_a<bash> ())
@@ -258,19 +298,19 @@ namespace build2
//
// But we still do a simple match first since it can quickly weed
// out candidates that cannot possibly match.
- //
- if (!pp.sup (ip))
- continue;
- // See if this is import-installed target (refer to search() for
- // details).
+ // See if this is import-installed target (refer to search() above
+ // for details).
//
if (size_t n = pt.data)
{
+ if (!pp.sup (iip))
+ continue;
+
// Both are normalized so we can compare the "tails".
//
const string& ps (pp.string ());
- const string& is (ip.string ());
+ const string& is (iip.string ());
if (path::traits_type::compare (
ps.c_str () + ps.size () - n, n,
@@ -285,6 +325,9 @@ namespace build2
if (const scope* rs = b->base_scope ().root_scope ())
{
+ if (!pp.sup (ip) || project (*rs) != pn)
+ continue;
+
const dir_path& d (pp.sub (rs->src_path ())
? rs->src_path ()
: rs->out_path ());
@@ -305,7 +348,7 @@ namespace build2
if (ap == nullptr)
fail (l) << "unable to resolve import path " << ip;
- match_data& md (t.data<match_data> ());
+ match_data& md (t.data<match_data> (a));
assert (md.for_install);
if (*md.for_install)
@@ -363,7 +406,7 @@ namespace build2
"source \"$(dirname"
" \"$(readlink -f"
" \"${BASH_SOURCE[0]}\")\")/"
- + ip.string () + "\"";
+ + iip.string () + '"';
}
else
{
@@ -384,7 +427,7 @@ namespace build2
return
"source \"$(dirname"
" \"${BASH_SOURCE[0]}\")/"
- + o + ip.string () + "\"";
+ + o + iip.string () + '"';
}
}
else
@@ -394,12 +437,13 @@ namespace build2
// install_rule
//
bool install_rule::
- match (action a, target& t, const string& hint) const
+ match (action a, target& t) const
{
// We only want to handle installation if we are also the ones building
// this target. So first run in's match().
//
- return in_.match (a, t, hint) && file_rule::match (a, t, "");
+ return in_.sub_match (in_name_, update_id, a, t) &&
+ file_rule::match (a, t);
}
recipe install_rule::
@@ -415,7 +459,7 @@ namespace build2
// Signal to the in rule that this is update for install. And if the
// update has already been executed, verify it was done for install.
//
- auto& md (t.data<match_data> ());
+ auto& md (t.data<match_data> (a.inner_action ()));
if (md.for_install)
{
diff --git a/libbuild2/bash/rule.hxx b/libbuild2/bash/rule.hxx
index c54b07c..444d176 100644
--- a/libbuild2/bash/rule.hxx
+++ b/libbuild2/bash/rule.hxx
@@ -29,17 +29,16 @@ namespace build2
class LIBBUILD2_BASH_SYMEXPORT in_rule: public in::rule
{
public:
- in_rule (): rule ("bash.in 1", "bash.in", '@', false /* strict */) {}
+ in_rule (): rule ("bash.in 1", "bash", '@', false /* strict */) {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&, const string&, match_extra&) const override;
+
+ using in::rule::match; // Make Clang happy.
virtual recipe
apply (action, target&) const override;
- virtual target_state
- perform_update (action, const target&) const override;
-
virtual prerequisite_target
search (action,
const target&,
@@ -51,7 +50,9 @@ namespace build2
action a,
const target&,
const string&,
+ optional<uint64_t>,
bool,
+ const substitution_map*,
const optional<string>&) const override;
string
@@ -67,16 +68,17 @@ namespace build2
class LIBBUILD2_BASH_SYMEXPORT install_rule: public install::file_rule
{
public:
- install_rule (const in_rule& in): in_ (in) {}
+ install_rule (const in_rule& r, const char* n): in_ (r), in_name_ (n) {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
protected:
const in_rule& in_;
+ const string in_name_;
};
}
}
diff --git a/libbuild2/bash/target.cxx b/libbuild2/bash/target.cxx
index 6fa7cf4..5240fed 100644
--- a/libbuild2/bash/target.cxx
+++ b/libbuild2/bash/target.cxx
@@ -23,7 +23,7 @@ namespace build2
&target_pattern_var<bash_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
}
}
diff --git a/libbuild2/bash/target.hxx b/libbuild2/bash/target.hxx
index f0af967..ad926bd 100644
--- a/libbuild2/bash/target.hxx
+++ b/libbuild2/bash/target.hxx
@@ -21,11 +21,14 @@ namespace build2
class LIBBUILD2_BASH_SYMEXPORT bash: public file
{
public:
- using file::file;
+ bash (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
}
}
diff --git a/libbuild2/bash/utility.hxx b/libbuild2/bash/utility.hxx
index 087fc38..e5e4377 100644
--- a/libbuild2/bash/utility.hxx
+++ b/libbuild2/bash/utility.hxx
@@ -11,14 +11,26 @@ namespace build2
{
namespace bash
{
- // Strip the .bash extension from the project name.
+ // Return the bash{} modules installation directory under bin/.
//
- // Note that the result may not be a valid project name.
+ // Note that we used to install into bin/<project>/ but that has a good
+ // chance of clashing with the project's executable. Adding the .bash
+ // extension feels like a good idea since in our model the executables
+ // should not use the .bash extension (only modules) and therefore are
+ // unlikely to clash with this name.
+ //
+ // One drawback of this approach is that in case of a project like
+ // libbutl.bash we now have different module directories inside the
+ // project (libbutl/) and when installed (libbutl.bash/). Also, the
+ // installation directory will be shared with the libbutl project but
+ // that's probably ok (and we had the same issue before).
//
inline string
- project_base (const project_name& pn)
+ modules_install_dir (const project_name& pn)
{
- return pn.base ("bash");
+ // Strip the .bash extension if present not to duplicate it.
+ //
+ return pn.base ("bash") + ".bash";
}
}
}
diff --git a/libbuild2/bin/def-rule.cxx b/libbuild2/bin/def-rule.cxx
index ab31fde..0998c89 100644
--- a/libbuild2/bin/def-rule.cxx
+++ b/libbuild2/bin/def-rule.cxx
@@ -7,6 +7,7 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
#include <libbuild2/algorithm.hxx>
+#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
#include <libbuild2/bin/target.hxx>
@@ -16,17 +17,26 @@ namespace build2
{
namespace bin
{
+ // In C global uninitialized data becomes a "common symbol" (an equivalent
+ // definition compiled as C++ results in a BSS symbol) which allows some
+ // archaic merging of multiple such definitions during linking (see GNU ld
+ // --warn-common for background). Note that this merging may happen with
+ // other data symbol types, not just common.
+ //
struct symbols
{
set<string> d; // data
set<string> r; // read-only data
set<string> b; // uninitialized data (BSS)
+ set<string> c; // common uninitialized data
set<string> t; // text (code)
};
static void
- read_dumpbin (istream& is, symbols& syms)
+ read_dumpbin (diag_buffer& dbuf, ifdstream& is, symbols& syms)
{
+ // Note: io_error is handled by the caller.
+
// Lines that describe symbols look like:
//
// 0 1 2 3 4 5 6
@@ -62,29 +72,29 @@ namespace build2
// B44 00000000 SECT4 notype Static | .rdata$r
// AA2 00000000 SECT5 notype Static | .bss
//
-
- // Map of read-only (.rdata, .xdata) and uninitialized (.bss) sections
- // to their types (R and B, respectively). If a section is not found in
- // this map, then it's assumed to be normal data (.data).
+ // Note that an UNDEF data symbol with non-zero OFFSET is a "common
+ // symbol", equivalent to the nm `C` type.
//
- map<string, char> sections;
-
- string l;
- while (!eof (getline (is, l)))
+ // We keep a map of read-only (.rdata, .xdata) and uninitialized (.bss)
+ // sections to their types (R and B, respectively). If a section is not
+ // found in this map, then it's assumed to be normal data (.data).
+ //
+ auto parse_line = [&syms,
+ secs = map<string, char> ()] (const string& l) mutable
{
size_t b (0), e (0), n;
// IDX (note that it can be more than 3 characters).
//
if (next_word (l, b, e) == 0)
- continue;
+ return;
// OFFSET (always 8 characters).
//
n = next_word (l, b, e);
if (n != 8)
- continue;
+ return;
string off (l, b, n);
@@ -92,8 +102,8 @@ namespace build2
//
n = next_word (l, b, e);
- if (n == 0 || l.compare (b, n, "UNDEF") == 0)
- continue;
+ if (n == 0)
+ return;
string sec (l, b, n);
@@ -102,23 +112,23 @@ namespace build2
n = next_word (l, b, e);
if (l.compare (b, n, "notype") != 0)
- continue;
+ return;
- bool d;
+ bool dat;
if (l[e] == ' ' && l[e + 1] == '(' && l[e + 2] == ')')
{
e += 3;
- d = false;
+ dat = false;
}
else
- d = true;
+ dat = true;
// VISIBILITY
//
n = next_word (l, b, e);
if (n == 0)
- continue;
+ return;
string vis (l, b, n);
@@ -127,20 +137,24 @@ namespace build2
n = next_word (l, b, e);
if (n != 1 || l[b] != '|')
- continue;
+ return;
// SYMNAME
//
n = next_word (l, b, e);
if (n == 0)
- continue;
+ return;
string s (l, b, n);
// See if this is the section type symbol.
//
- if (d && off == "00000000" && vis == "Static" && s[0] == '.')
+ if (dat &&
+ off == "00000000" &&
+ sec != "UNDEF" &&
+ vis == "Static" &&
+ s[0] == '.')
{
auto cmp = [&s] (const char* n, size_t l)
{
@@ -148,43 +162,88 @@ namespace build2
};
if (cmp (".rdata", 6) ||
- cmp (".xdata", 6)) sections.emplace (move (sec), 'R');
- else if (cmp (".bss", 4)) sections.emplace (move (sec), 'B');
+ cmp (".xdata", 6)) secs.emplace (move (sec), 'R');
+ else if (cmp (".bss", 4)) secs.emplace (move (sec), 'B');
- continue;
+ return;
}
// We can only export extern symbols.
//
if (vis != "External")
- continue;
+ return;
- if (d)
+ if (dat)
{
- auto i (sections.find (sec));
- switch (i == sections.end () ? 'D' : i->second)
+ if (sec != "UNDEF")
{
- case 'D': syms.d.insert (move (s)); break;
- case 'R': syms.r.insert (move (s)); break;
- case 'B': syms.b.insert (move (s)); break;
+ auto i (secs.find (sec));
+ switch (i == secs.end () ? 'D' : i->second)
+ {
+ case 'D': syms.d.insert (move (s)); break;
+ case 'R': syms.r.insert (move (s)); break;
+ case 'B': syms.b.insert (move (s)); break;
+ }
+ }
+ else
+ {
+ if (off != "00000000")
+ syms.c.insert (move (s));
}
}
else
- syms.t.insert (move (s));
+ {
+ if (sec != "UNDEF")
+ syms.t.insert (move (s));
+ }
+ };
+
+ // Read until we reach EOF on all streams.
+ //
+ // Note that if dbuf is not opened, then we automatically get an
+ // inactive nullfd entry.
+ //
+ fdselect_set fds {is.fd (), dbuf.is.fd ()};
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
+
+ for (string l; ist.fd != nullfd || dst.fd != nullfd; )
+ {
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
+ {
+ if (eof (is))
+ ist.fd = nullfd;
+ else
+ {
+ parse_line (l);
+ l.clear ();
+ }
+
+ continue;
+ }
+
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
}
}
static void
- read_posix_nm (istream& is, symbols& syms)
+ read_posix_nm (diag_buffer& dbuf, ifdstream& is, symbols& syms)
{
+ // Note: io_error is handled by the caller.
+
// Lines that describe symbols look like:
//
// <NAME> <TYPE> <VALUE> <SIZE>
//
// The types that we are interested in are T, D, R, and B.
//
- string l;
- while (!eof (getline (is, l)))
+ auto parse_line = [&syms] (const string& l)
{
size_t b (0), e (0), n;
@@ -193,7 +252,7 @@ namespace build2
n = next_word (l, b, e);
if (n == 0)
- continue;
+ return;
string s (l, b, n);
@@ -202,15 +261,50 @@ namespace build2
n = next_word (l, b, e);
if (n != 1)
- continue;
+ return;
switch (l[b])
{
case 'D': syms.d.insert (move (s)); break;
case 'R': syms.r.insert (move (s)); break;
case 'B': syms.b.insert (move (s)); break;
+ case 'c':
+ case 'C': syms.c.insert (move (s)); break;
case 'T': syms.t.insert (move (s)); break;
}
+ };
+
+ // Read until we reach EOF on all streams.
+ //
+ // Note that if dbuf is not opened, then we automatically get an
+ // inactive nullfd entry.
+ //
+ fdselect_set fds {is.fd (), dbuf.is.fd ()};
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
+
+ for (string l; ist.fd != nullfd || dst.fd != nullfd; )
+ {
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
+ {
+ if (eof (is))
+ ist.fd = nullfd;
+ else
+ {
+ parse_line (l);
+ l.clear ();
+ }
+
+ continue;
+ }
+
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
}
}
@@ -311,6 +405,13 @@ namespace build2
if (const char* v = filter (s))
os << " " << v << " DATA\n";
+ // For common symbols, only write extern C.
+ //
+ for (const string& s: syms.c)
+ if (extern_c (s))
+ if (const char* v = filter (s))
+ os << " " << v << " DATA\n";
+
// Read-only data contains an especially large number of various
// special symbols. Instead of trying to filter them out case by case,
// we will try to recognize C/C++ identifiers plus the special symbols
@@ -386,6 +487,10 @@ namespace build2
if (const char* v = filter (s))
os << " " << v << " DATA\n";
+ for (const string& s: syms.c)
+ if (const char* v = filter (s))
+ os << " " << v << " DATA\n";
+
// Read-only data contains an especially large number of various
// special symbols. Instead of trying to filter them out case by case,
// we will try to recognize C/C++ identifiers plus the special symbols
@@ -411,7 +516,7 @@ namespace build2
}
bool def_rule::
- match (action a, target& t, const string&) const
+ match (action a, target& t) const
{
tracer trace ("bin::def_rule::match");
@@ -615,8 +720,12 @@ namespace build2
const char*& arg (*(args.end () - 2));
+ // We could print the prerequisite if it's a single obj{}/libu{} (with
+ // the latter being the common case). But it doesn't feel like that's
+ // worth the variability and the associated possibility of confusion.
+ //
if (verb == 1)
- text << "def " << t;
+ print_diag ("def", t);
// Extract symbols from each object file.
//
@@ -636,22 +745,37 @@ namespace build2
// Both dumpbin.exe and nm send their output to stdout. While nm sends
// diagnostics to stderr, dumpbin sends it to stdout together with the
- // output.
+ // output. To keep things uniform we will buffer stderr in both cases.
//
- process pr (run_start (nm,
- args,
- 0 /* stdin */,
- -1 /* stdout */));
+ process pr (
+ run_start (nm,
+ args,
+ 0 /* stdin */,
+ -1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */));
+
+ // Note that while we read both streams until eof in the normal
+ // circumstances, we cannot use fdstream_mode::skip for the exception
+ // case on both of them: we may end up being blocked trying to read
+ // one stream while the process may be blocked writing to the other.
+ // So in case of an exception we only skip the diagnostics and close
+ // stdout hard. The latter should happen first so the order of the
+ // dbuf/is variables is important.
+ //
+ diag_buffer dbuf (ctx, args[0], pr, (fdstream_mode::non_blocking |
+ fdstream_mode::skip));
+
bool io (false);
try
{
- ifdstream is (
- move (pr.in_ofd), fdstream_mode::skip, ifdstream::badbit);
+ ifdstream is (move (pr.in_ofd),
+ fdstream_mode::non_blocking,
+ ifdstream::badbit);
if (lid == "msvc" || nid == "msvc")
- read_dumpbin (is, syms);
+ read_dumpbin (dbuf, is, syms);
else
- read_posix_nm (is, syms);
+ read_posix_nm (dbuf, is, syms);
is.close ();
}
@@ -663,16 +787,17 @@ namespace build2
io = true;
}
- if (!run_finish_code (args.data (), pr) || io)
+ if (!run_finish_code (dbuf, args, pr, 1 /* verbosity */) || io)
fail << "unable to extract symbols from " << arg;
}
- /*
+#if 0
for (const string& s: syms.d) text << "D " << s;
for (const string& s: syms.r) text << "R " << s;
for (const string& s: syms.b) text << "B " << s;
+ for (const string& s: syms.c) text << "C " << s;
for (const string& s: syms.t) text << "T " << s;
- */
+#endif
if (verb >= 3)
text << "cat >" << tp;
@@ -712,6 +837,6 @@ namespace build2
return target_state::changed;
}
- const string def_rule::rule_id_ {"bin.def 1"};
+ const string def_rule::rule_id_ {"bin.def 2"};
}
}
diff --git a/libbuild2/bin/def-rule.hxx b/libbuild2/bin/def-rule.hxx
index 32423a0..acdf841 100644
--- a/libbuild2/bin/def-rule.hxx
+++ b/libbuild2/bin/def-rule.hxx
@@ -24,7 +24,7 @@ namespace build2
def_rule () {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
diff --git a/libbuild2/bin/guess.cxx b/libbuild2/bin/guess.cxx
index 905bd0a..e9759b8 100644
--- a/libbuild2/bin/guess.cxx
+++ b/libbuild2/bin/guess.cxx
@@ -34,9 +34,12 @@ namespace build2
// Return 0-version if the version is invalid.
//
static inline semantic_version
- parse_version (const string& s, size_t p = 0, const char* bs = ".-+~ ")
+ parse_version (const string& s, size_t p = 0,
+ semantic_version::flags f = semantic_version::allow_omit_patch |
+ semantic_version::allow_build,
+ const char* bs = ".-+~ ")
{
- optional<semantic_version> v (parse_semantic_version (s, p, bs));
+ optional<semantic_version> v (parse_semantic_version (s, p, f, bs));
return v ? *v : semantic_version ();
}
@@ -89,7 +92,7 @@ namespace build2
static global_cache<ar_info> ar_cache;
const ar_info&
- guess_ar (const path& ar, const path* rl, const char* paths)
+ guess_ar (context& ctx, const path& ar, const path* rl, const char* paths)
{
tracer trace ("bin::guess_ar");
@@ -177,7 +180,11 @@ namespace build2
// "LLVM version 3.5.2"
// "LLVM version 5.0.0"
//
- if (l.compare (0, 13, "LLVM version ") == 0)
+ // But it can also be prefixed with some stuff, for example:
+ //
+ // "Debian LLVM version 14.0.6"
+ //
+ if (l.find ("LLVM version ") != string::npos)
{
semantic_version v (parse_version (l, l.rfind (' ') + 1));
return guess_result ("llvm", move (l), move (v));
@@ -227,7 +234,11 @@ namespace build2
// (yes, it goes to stdout) but that seems harmless.
//
sha256 cs;
- arr = run<guess_result> (3, are, "--version", f, false, false, &cs);
+ arr = run<guess_result> (ctx,
+ 3,
+ are, "--version",
+ f,
+ false , false, &cs);
if (!arr.empty ())
arr.checksum = cs.string ();
@@ -247,10 +258,10 @@ namespace build2
: guess_result ();
};
- // Redirect STDERR to STDOUT and ignore exit status.
+ // Redirect stderr to stdout and ignore exit status.
//
sha256 cs;
- arr = run<guess_result> (3, are, f, false, true, &cs);
+ arr = run<guess_result> (ctx, 3, are, f, false, true, &cs);
if (!arr.empty ())
{
@@ -280,7 +291,7 @@ namespace build2
// "LLVM version ".
//
- if (l.compare (0, 13, "LLVM version ") == 0)
+ if (l.find ("LLVM version ") != string::npos)
return guess_result ("llvm", move (l), semantic_version ());
// On FreeBSD we get "ranlib" rather than "BSD ranlib" for some
@@ -293,7 +304,11 @@ namespace build2
};
sha256 cs;
- rlr = run<guess_result> (3, rle, "--version", f, false, false, &cs);
+ rlr = run<guess_result> (ctx,
+ 3,
+ rle, "--version",
+ f,
+ false, false, &cs);
if (!rlr.empty ())
rlr.checksum = cs.string ();
@@ -310,10 +325,10 @@ namespace build2
: guess_result ();
};
- // Redirect STDERR to STDOUT and ignore exit status.
+ // Redirect stderr to stdout and ignore exit status.
//
sha256 cs;
- rlr = run<guess_result> (3, rle, f, false, true, &cs);
+ rlr = run<guess_result> (ctx, 3, rle, f, false, true, &cs);
if (!rlr.empty ())
{
@@ -378,7 +393,7 @@ namespace build2
static global_cache<ld_info> ld_cache;
const ld_info&
- guess_ld (const path& ld, const char* paths)
+ guess_ld (context& ctx, const path& ld, const char* paths)
{
tracer trace ("bin::guess_ld");
@@ -437,17 +452,22 @@ namespace build2
string id;
optional<semantic_version> ver;
+ size_t p;
+
// Microsoft link.exe output starts with "Microsoft (R) ".
//
if (l.compare (0, 14, "Microsoft (R) ") == 0)
{
id = "msvc";
}
- // LLD prints a line in the form "LLD X.Y.Z ...".
+ // LLD prints a line in the form "LLD X.Y.Z ...". But it can also
+ // be prefixed with some stuff, for example:
//
- else if (l.compare (0, 4, "LLD ") == 0)
+ // Debian LLD 14.0.6 (compatible with GNU linkers)
+ //
+ else if ((p = l.find ("LLD ")) != string::npos)
{
- ver = parse_version (l, 4);
+ ver = parse_version (l, p + 4);
// The only way to distinguish between various LLD drivers is via
// their name. Handle potential prefixes (say a target) and
@@ -485,12 +505,12 @@ namespace build2
: guess_result (move (id), move (l), move (ver)));
};
- // Redirect STDERR to STDOUT and ignore exit status. Note that in case
+ // Redirect stderr to stdout and ignore exit status. Note that in case
// of link.exe we will hash the diagnostics (yes, it goes to stdout)
// but that seems harmless.
//
sha256 cs;
- r = run<guess_result> (3, env, "--version", f, false, true, &cs);
+ r = run<guess_result> (ctx, 3, env, "--version", f, false, true, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -521,7 +541,7 @@ namespace build2
};
sha256 cs;
- r = run<guess_result> (3, env, "-v", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "-v", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -548,7 +568,7 @@ namespace build2
// option.
//
sha256 cs;
- r = run<guess_result> (3, env, "-version", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "-version", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -586,7 +606,7 @@ namespace build2
static global_cache<rc_info> rc_cache;
const rc_info&
- guess_rc (const path& rc, const char* paths)
+ guess_rc (context& ctx, const path& rc, const char* paths)
{
tracer trace ("bin::guess_rc");
@@ -642,7 +662,7 @@ namespace build2
// option.
//
sha256 cs;
- r = run<guess_result> (3, env, "--version", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "--version", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -675,7 +695,7 @@ namespace build2
};
sha256 cs;
- r = run<guess_result> (3, env, "/?", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "/?", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -703,7 +723,7 @@ namespace build2
static global_cache<nm_info> nm_cache;
const nm_info&
- guess_nm (const path& nm, const char* paths)
+ guess_nm (context& ctx, const path& nm, const char* paths)
{
tracer trace ("bin::guess_nm");
@@ -764,7 +784,10 @@ namespace build2
// LLVM nm --version output has a line that starts with
// "LLVM version" followed by a version.
//
- if (l.compare (0, 13, "LLVM version ") == 0)
+ // But let's assume it can be prefixed with some stuff like the rest
+ // of the LLVM tools (see above).
+ //
+ if (l.find ("LLVM version ") != string::npos)
return guess_result ("llvm", move (l), semantic_version ());
if (l.compare (0, 14, "Microsoft (R) ") == 0)
@@ -784,7 +807,7 @@ namespace build2
// option.
//
sha256 cs;
- r = run<guess_result> (3, env, "--version", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "--version", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
diff --git a/libbuild2/bin/guess.hxx b/libbuild2/bin/guess.hxx
index 52c0e1b..7dc7b33 100644
--- a/libbuild2/bin/guess.hxx
+++ b/libbuild2/bin/guess.hxx
@@ -54,7 +54,7 @@ namespace build2
// attemplated and the returned ranlib_* members will be left empty.
//
const ar_info&
- guess_ar (const path& ar, const path* ranlib, const char* paths);
+ guess_ar (context&, const path& ar, const path* ranlib, const char* paths);
// ld information.
//
@@ -100,7 +100,7 @@ namespace build2
};
const ld_info&
- guess_ld (const path& ld, const char* paths);
+ guess_ld (context&, const path& ld, const char* paths);
// rc information.
//
@@ -132,7 +132,7 @@ namespace build2
};
const rc_info&
- guess_rc (const path& rc, const char* paths);
+ guess_rc (context&, const path& rc, const char* paths);
// nm information.
//
@@ -166,7 +166,7 @@ namespace build2
};
const nm_info&
- guess_nm (const path& nm, const char* paths);
+ guess_nm (context&, const path& nm, const char* paths);
}
}
diff --git a/libbuild2/bin/init.cxx b/libbuild2/bin/init.cxx
index ab3980a..05a9c60 100644
--- a/libbuild2/bin/init.cxx
+++ b/libbuild2/bin/init.cxx
@@ -54,11 +54,14 @@ namespace build2
// Enter variables.
//
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
+
// Target is a string and not target_triplet because it can be
// specified by the user.
//
- auto& vp (rs.var_pool ());
-
vp.insert<string> ("config.bin.target");
vp.insert<string> ("config.bin.pattern");
@@ -76,6 +79,9 @@ namespace build2
// example, addition of rpaths for prerequisite libraries (see the cc
// module for an example). Default is true.
//
+ // Note also that a rule may need to make rpath relative if
+ // install.relocatable is true.
+ //
vp.insert<dir_paths> ("config.bin.rpath");
vp.insert<bool> ("config.bin.rpath.auto");
@@ -104,12 +110,12 @@ namespace build2
// Link whole archive. Note: with target visibility.
//
// The lookup semantics is as follows: we first look for a prerequisite-
- // specific value, then for a target-specific value in the library being
- // linked, and then for target type/pattern-specific value starting from
- // the scope of the target being linked-to. In that final lookup we do
- // not look in the target being linked-to itself since that is used to
- // indicate how this target should be linked to other targets. For
- // example:
+ // specific value, then for a target-specific value in the prerequisite
+ // library, and then for target type/pattern-specific value starting
+ // from the scope of the target being linked. In that final lookup we do
+ // not look in the target being linked itself since that is used to
+ // indicate how this target should be used as a prerequisite of other
+ // targets. For example:
//
// exe{test}: liba{foo}
// liba{foo}: libua{foo1 foo2}
@@ -195,6 +201,8 @@ namespace build2
//
const target_triplet* tgt (nullptr);
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (ctx.var_pool["config.bin.target"]);
// We first see if the value was specified via the configuration
@@ -231,9 +239,9 @@ namespace build2
//
if (!hint && config_sub)
{
- s = run<string> (3,
- *config_sub,
- s.c_str (),
+ s = run<string> (ctx,
+ 3,
+ *config_sub, s.c_str (),
[] (string& l, bool) {return move (l);});
l5 ([&]{trace << "config.sub target: '" << s << "'";});
}
@@ -272,6 +280,8 @@ namespace build2
//
const string* pat (nullptr);
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (ctx.var_pool["config.bin.pattern"]);
// We first see if the value was specified via the configuration
@@ -547,7 +557,7 @@ namespace build2
&target_pattern_fix<wasm_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
&file_search,
- false /* see_through */}));
+ target_type::flag::none}));
if (install_loaded)
{
@@ -578,8 +588,6 @@ namespace build2
// Similar to alias.
//
-
- //@@ outer
r.insert<lib> (perform_id, 0, "bin.lib", lib_);
r.insert<lib> (configure_id, 0, "bin.lib", lib_);
@@ -600,6 +608,18 @@ namespace build2
if (rs.find_module ("dist"))
{
+ // Note that without custom dist rules in setups along the follwing
+ // lines the source file will be unreachable by dist:
+ //
+ // lib{foo}: obj{foo}
+ // obja{foo}: cxx{foo}
+ // objs{foo}: cxx{foo}
+ //
+ r.insert<obj> (dist_id, 0, "bin.obj", obj_);
+ r.insert<bmi> (dist_id, 0, "bin.bmi", obj_);
+ r.insert<hbmi> (dist_id, 0, "bin.hbmi", obj_);
+ r.insert<libul> (dist_id, 0, "bin.libul", libul_);
+
r.insert<lib> (dist_id, 0, "bin.lib", lib_);
}
}
@@ -626,7 +646,10 @@ namespace build2
//
if (first)
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
vp.insert<path> ("config.bin.ar");
vp.insert<path> ("config.bin.ranlib");
@@ -684,7 +707,7 @@ namespace build2
nullptr,
config::save_default_commented)));
- const ar_info& ari (guess_ar (ar, ranlib, pat.paths));
+ const ar_info& ari (guess_ar (rs.ctx, ar, ranlib, pat.paths));
// If this is a configuration with new values, then print the report
// at verbosity level 2 and up (-v).
@@ -800,7 +823,10 @@ namespace build2
//
if (first)
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
vp.insert<path> ("config.bin.ld");
}
@@ -832,7 +858,7 @@ namespace build2
path (apply_pattern (ld_d, pat.pattern)),
config::save_default_commented)));
- const ld_info& ldi (guess_ld (ld, pat.paths));
+ const ld_info& ldi (guess_ld (rs.ctx, ld, pat.paths));
// If this is a configuration with new values, then print the report
// at verbosity level 2 and up (-v).
@@ -927,7 +953,7 @@ namespace build2
&target_pattern_fix<pdb_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
&file_search,
- false /* see_through */}));
+ target_type::flag::none}));
if (cast_false<bool> (rs["install.loaded"]))
{
@@ -958,7 +984,10 @@ namespace build2
//
if (first)
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
vp.insert<path> ("config.bin.rc");
}
@@ -990,7 +1019,7 @@ namespace build2
path (apply_pattern (rc_d, pat.pattern)),
config::save_default_commented)));
- const rc_info& rci (guess_rc (rc, pat.paths));
+ const rc_info& rci (guess_rc (rs.ctx, rc, pat.paths));
// If this is a configuration with new values, then print the report
// at verbosity level 2 and up (-v).
@@ -1057,7 +1086,10 @@ namespace build2
//
if (first)
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
vp.insert<path> ("config.bin.nm");
}
@@ -1099,7 +1131,7 @@ namespace build2
path (apply_pattern (nm_d, pat.pattern)),
config::save_default_commented)));
- const nm_info& nmi (guess_nm (nm, pat.paths));
+ const nm_info& nmi (guess_nm (rs.ctx, nm, pat.paths));
// If this is a configuration with new values, then print the report
// at verbosity level 2 and up (-v).
@@ -1185,7 +1217,6 @@ namespace build2
{"bin.vars", nullptr, vars_init},
{"bin.config", nullptr, config_init},
- {"bin", nullptr, init},
{"bin.ar.config", nullptr, ar_config_init},
{"bin.ar", nullptr, ar_init},
{"bin.ld.config", nullptr, ld_config_init},
@@ -1195,6 +1226,7 @@ namespace build2
{"bin.nm.config", nullptr, nm_config_init},
{"bin.nm", nullptr, nm_init},
{"bin.def", nullptr, def_init},
+ {"bin", nullptr, init},
{nullptr, nullptr, nullptr}
};
diff --git a/libbuild2/bin/rule.cxx b/libbuild2/bin/rule.cxx
index 021a768..c7147bf 100644
--- a/libbuild2/bin/rule.cxx
+++ b/libbuild2/bin/rule.cxx
@@ -17,12 +17,30 @@ namespace build2
{
namespace bin
{
+ // Search for an existing (declared real) member and match it if found.
+ //
+ static void
+ dist_match (action a, target& t, const target_type& tt)
+ {
+ if (const target* m = search_existing (t.ctx, tt, t.dir, t.out, t.name))
+ {
+ // Only a real target declaration can have prerequisites (which is
+ // the reason we are doing this).
+ //
+ if (m->decl == target_decl::real)
+ match_sync (a, *m);
+ }
+ }
+
// obj_rule
//
bool obj_rule::
- match (action a, target& t, const string&) const
+ match (action a, target& t) const
{
- const char* n (t.dynamic_type ().name); // Ignore derived type.
+ if (a.meta_operation () == dist_id)
+ return true;
+
+ const char* n (t.dynamic_type->name); // Ignore derived type.
fail << diag_doing (a, t) << " target group" <<
info << "explicitly select " << n << "e{}, " << n << "a{}, or "
@@ -30,27 +48,142 @@ namespace build2
}
recipe obj_rule::
- apply (action, target&) const {return empty_recipe;}
+ apply (action a, target& t) const
+ {
+ // We only get here for dist.
+ //
+ const target_type* ett (nullptr);
+ const target_type* att (nullptr);
+ const target_type* stt (nullptr);
+
+ if (t.is_a<obj> ())
+ {
+ ett = &obje::static_type;
+ att = &obja::static_type;
+ stt = &objs::static_type;
+ }
+ else if (t.is_a<bmi> ())
+ {
+ ett = &bmie::static_type;
+ att = &bmia::static_type;
+ stt = &bmis::static_type;
+ }
+ else if (t.is_a<hbmi> ())
+ {
+ ett = &hbmie::static_type;
+ att = &hbmia::static_type;
+ stt = &hbmis::static_type;
+ }
+ else
+ assert (false);
+
+ dist_match (a, t, *ett);
+ dist_match (a, t, *att);
+ dist_match (a, t, *stt);
+
+ // Delegate to the default dist rule to match prerequisites.
+ //
+ return dist::rule::apply (a, t);
+ }
// libul_rule
//
bool libul_rule::
- match (action a, target& t, const string&) const
+ match (action, target&) const
{
- fail << diag_doing (a, t) << " target group" <<
- info << "explicitly select libua{} or libus{} member" << endf;
+ return true;
}
recipe libul_rule::
- apply (action, target&) const {return empty_recipe;}
+ apply (action a, target& t) const
+ {
+ if (a.meta_operation () == dist_id)
+ {
+ dist_match (a, t, libua::static_type);
+ dist_match (a, t, libus::static_type);
+
+ // Delegate to the default dist rule to match prerequisites.
+ //
+ return dist::rule::apply (a, t);
+ }
+
+ // Pick one of the members. First looking for the one already matched.
+ //
+ const target* m (nullptr);
+
+ const libus* ls (nullptr);
+ {
+ ls = search_existing<libus> (t.ctx, t.dir, t.out, t.name);
+
+ if (ls != nullptr && ls->matched (a))
+ m = ls;
+ }
+
+ const libua* la (nullptr);
+ if (m == nullptr)
+ {
+ la = search_existing<libua> (t.ctx, t.dir, t.out, t.name);
+
+ if (la != nullptr && la->matched (a))
+ m = la;
+ }
+
+ if (m == nullptr)
+ {
+ const scope& bs (t.base_scope ());
+
+ lmembers lm (link_members (*bs.root_scope ()));
+
+ if (lm.s && lm.a)
+ {
+ // Use the bin.exe.lib order as a heuristics to pick the library
+ // (i.e., the most likely utility library to be built is the one
+ // most likely to be linked).
+ //
+ lorder lo (link_order (bs, otype::e));
+
+ (lo == lorder::s_a || lo == lorder::s ? lm.a : lm.s) = false;
+ }
+
+ if (lm.s)
+ m = ls != nullptr ? ls : &search<libus> (t, t.dir, t.out, t.name);
+ else
+ m = la != nullptr ? la : &search<libua> (t, t.dir, t.out, t.name);
+ }
+
+ // Save the member we picked in case others (e.g., $x.lib_poptions())
+ // need this information.
+ //
+ t.prerequisite_targets[a].push_back (m);
+
+ if (match_sync (a, *m, unmatch::safe).first)
+ return noop_recipe;
+
+ return [] (action a, const target& t)
+ {
+ const target* m (t.prerequisite_targets[a].back ());
+
+ // For update always return unchanged so we are consistent whether we
+ // managed to unmatch or now. Note that for clean we may get postponed
+ // so let's return the actual target state.
+ //
+ target_state r (execute_sync (a, *m));
+ return a == perform_update_id ? target_state::unchanged : r;
+ };
+ }
// lib_rule
//
// The whole logic is pretty much as if we had our two group members as
// our prerequisites.
//
+ // Note also that unlike the obj and libul rules above, we don't need to
+ // delegate to the default dist rule since any group prerequisites will be
+ // matched by one of the members (the key difference here is that unlike
+ // those rules, we insert and match members unconditionally).
+ //
bool lib_rule::
- match (action a, target& xt, const string&) const
+ match (action a, target& xt) const
{
lib& t (xt.as<lib> ());
diff --git a/libbuild2/bin/rule.hxx b/libbuild2/bin/rule.hxx
index ffb975d..9dd1d14 100644
--- a/libbuild2/bin/rule.hxx
+++ b/libbuild2/bin/rule.hxx
@@ -9,6 +9,8 @@
#include <libbuild2/rule.hxx>
+#include <libbuild2/dist/rule.hxx>
+
#include <libbuild2/bin/export.hxx>
namespace build2
@@ -18,28 +20,41 @@ namespace build2
// "Fail rule" for obj{} and [h]bmi{} that issues diagnostics if someone
// tries to build these groups directly.
//
- class obj_rule: public simple_rule
+ // Note that for dist it acts as a pass-through to all existing (declared)
+ // members.
+ //
+ class obj_rule: public dist::rule
{
public:
obj_rule () {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
};
- // "Fail rule" for libul{} that issues diagnostics if someone tries to
- // build this group directly.
+ // This rule picks, matches, and unmatches (if possible) a member for the
+ // purpose of making its metadata (for example, library's poptions, if
+ // it's one of the cc libraries) available.
+ //
+ // The underlying idea here is that someone else (e.g., cc::link_rule)
+ // makes a more informed choice and we piggy back on that decision,
+ // falling back to making our own based on bin.lib and bin.exe.lib. Note
+ // that for update this rule always returns target_state::unchanged.
//
- class libul_rule: public simple_rule
+ // Note also that for dist it acts as a pass-through to all existing
+ // (declared) members.
+ //
+ class libul_rule: public dist::rule
{
public:
+ explicit
libul_rule () {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -47,13 +62,15 @@ namespace build2
// Pass-through to group members rule, similar to alias.
//
+ // Note that for dist it always passes to both members.
+ //
class LIBBUILD2_BIN_SYMEXPORT lib_rule: public simple_rule
{
public:
lib_rule () {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
diff --git a/libbuild2/bin/target.cxx b/libbuild2/bin/target.cxx
index bf701c9..38572ef 100644
--- a/libbuild2/bin/target.cxx
+++ b/libbuild2/bin/target.cxx
@@ -21,7 +21,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
const target_type bmix::static_type
@@ -34,7 +34,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
const target_type hbmix::static_type
@@ -47,7 +47,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
const target_type libx::static_type
@@ -60,7 +60,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::member_hint // Use untyped hint for group members.
};
const target_type libux::static_type
@@ -73,7 +73,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
// Note that we link groups during the load phase since this is often
@@ -108,7 +108,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type bmie::static_type
@@ -121,7 +121,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type hbmie::static_type
@@ -134,7 +134,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type obja::static_type
@@ -147,7 +147,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type bmia::static_type
@@ -160,7 +160,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type hbmia::static_type
@@ -173,7 +173,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type objs::static_type
@@ -186,7 +186,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type bmis::static_type
@@ -199,7 +199,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type hbmis::static_type
@@ -212,7 +212,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type libue::static_type
@@ -225,7 +225,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type libua::static_type
@@ -238,7 +238,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
const target_type libus::static_type
@@ -251,7 +251,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&target_search, // Note: not _file(); don't look for an existing file.
- false
+ target_type::flag::none
};
// obj{}, [h]bmi{}, and libu{} group factory.
@@ -292,7 +292,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::member_hint // Use untyped hint for group members.
};
const target_type bmi::static_type
@@ -305,7 +305,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::member_hint // Use untyped hint for group members.
};
const target_type hbmi::static_type
@@ -318,7 +318,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::member_hint // Use untyped hint for group members.
};
// The same as g_factory() but without E.
@@ -352,7 +352,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::member_hint // Use untyped hint for group members.
};
// What extensions should we use? At the outset, this is platform-
@@ -375,7 +375,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
const target_type libs::static_type
@@ -388,7 +388,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
// lib
@@ -435,7 +435,10 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false // Note: not see-through ("alternatives" group).
+
+ // Note: not see-through ("alternatives" group).
+ //
+ target_type::flag::member_hint // Use untyped hint for group members.
};
// libi
@@ -450,7 +453,7 @@ namespace build2
&target_pattern_var<nullptr>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
// def
@@ -467,7 +470,7 @@ namespace build2
&target_pattern_fix<def_ext>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
}
}
diff --git a/libbuild2/bin/target.hxx b/libbuild2/bin/target.hxx
index f8d2dd0..9685e39 100644
--- a/libbuild2/bin/target.hxx
+++ b/libbuild2/bin/target.hxx
@@ -22,7 +22,11 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT objx: public file
{
public:
- using file::file;
+ objx (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
@@ -31,41 +35,55 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT obje: public objx
{
public:
- using objx::objx;
+ obje (context& c, dir_path d, dir_path o, string n)
+ : objx (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT obja: public objx
{
public:
- using objx::objx;
+ obja (context& c, dir_path d, dir_path o, string n)
+ : objx (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT objs: public objx
{
public:
- using objx::objx;
+ objs (context& c, dir_path d, dir_path o, string n)
+ : objx (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
+ // Note: this is a "choice" target group.
+ //
class LIBBUILD2_BIN_SYMEXPORT obj: public target
{
public:
- using target::target;
+ obj (context& c, dir_path d, dir_path o, string n)
+ : target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Binary module interface (BMI).
@@ -100,7 +118,11 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT bmix: public file
{
public:
- using file::file;
+ bmix (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
@@ -111,7 +133,11 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT hbmix: public bmix
{
public:
- using bmix::bmix;
+ hbmix (context& c, dir_path d, dir_path o, string n)
+ : bmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
@@ -120,84 +146,111 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT bmie: public bmix
{
public:
- using bmix::bmix;
+ bmie (context& c, dir_path d, dir_path o, string n)
+ : bmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT hbmie: public hbmix
{
public:
- using hbmix::hbmix;
+ hbmie (context& c, dir_path d, dir_path o, string n)
+ : hbmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT bmia: public bmix
{
public:
- using bmix::bmix;
+ bmia (context& c, dir_path d, dir_path o, string n)
+ : bmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT hbmia: public hbmix
{
public:
- using hbmix::hbmix;
+ hbmia (context& c, dir_path d, dir_path o, string n)
+ : hbmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT bmis: public bmix
{
public:
- using bmix::bmix;
+ bmis (context& c, dir_path d, dir_path o, string n)
+ : bmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT hbmis: public hbmix
{
public:
- using hbmix::hbmix;
+ hbmis (context& c, dir_path d, dir_path o, string n)
+ : hbmix (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
+ // Note: this is a "choice" target group (similar to obj{}).
+ //
class LIBBUILD2_BIN_SYMEXPORT bmi: public target
{
public:
- using target::target;
+ bmi (context& c, dir_path d, dir_path o, string n)
+ : target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
+ // Note: this is a "choice" target group (similar to bmi{} and obj{}).
+ //
class LIBBUILD2_BIN_SYMEXPORT hbmi: public target
{
public:
- using target::target;
+ hbmi (context& c, dir_path d, dir_path o, string n)
+ : target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
-
// Common base for lib{} and libul{} groups.
//
// Use mtime_target as a base for the "trust me it exists" functionality
@@ -207,7 +260,11 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT libx: public mtime_target
{
public:
- using mtime_target::mtime_target;
+ libx (context& c, dir_path d, dir_path o, string n)
+ : mtime_target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
@@ -240,7 +297,11 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT libux: public file
{
public:
- using file::file;
+ libux (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
@@ -249,41 +310,58 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT libue: public libux
{
public:
- using libux::libux;
+ libue (context& c, dir_path d, dir_path o, string n)
+ : libux (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT libua: public libux
{
public:
- using libux::libux;
+ libua (context& c, dir_path d, dir_path o, string n)
+ : libux (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT libus: public libux
{
public:
- using libux::libux;
+ libus (context& c, dir_path d, dir_path o, string n)
+ : libux (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
+ // Note: this is a "choice" target group.
+ //
+ // @@ Ideally this shouldn't derive from mtime_target (via libx). Maybe
+ // get rid of libx?
+ //
class LIBBUILD2_BIN_SYMEXPORT libul: public libx
{
public:
- using libx::libx;
+ libul (context& c, dir_path d, dir_path o, string n)
+ : libx (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// The lib{} target group.
@@ -291,23 +369,27 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT liba: public file
{
public:
- using file::file;
+ liba (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_BIN_SYMEXPORT libs: public file
{
public:
- using file::file;
+ libs (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
-
- virtual const target_type&
- dynamic_type () const override {return static_type;}
};
// Standard layout type compatible with group_view's const target*[2].
@@ -321,16 +403,17 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT lib: public libx, public lib_members
{
public:
- using libx::libx;
+ lib (context& c, dir_path d, dir_path o, string n)
+ : libx (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
virtual group_view
group_members (action) const override;
public:
static const target_type static_type;
-
- virtual const target_type&
- dynamic_type () const override {return static_type;}
};
// Windows import library.
@@ -338,11 +421,14 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT libi: public file
{
public:
- using file::file;
+ libi (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Windows module definition (.def).
@@ -350,11 +436,14 @@ namespace build2
class LIBBUILD2_BIN_SYMEXPORT def: public file
{
public:
- using file::file;
+ def (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
}
}
diff --git a/libbuild2/bin/utility.cxx b/libbuild2/bin/utility.cxx
index cb06287..2a87bbd 100644
--- a/libbuild2/bin/utility.cxx
+++ b/libbuild2/bin/utility.cxx
@@ -57,6 +57,11 @@ namespace build2
// prefer static over shared since it could be faster (but I am sure
// someone will probably want this configurable).
//
+ // Maybe we should use the bin.exe.lib order as a heuristics (i.e.,
+ // the most likely utility library to be built is the one most likely
+ // to be linked)? Will need the variables rs-only, similar to
+ // bin.lib, which probably is a good thing. See also libul_rule.
+ //
if (li.type == otype::e)
{
// Utility libraries are project-local which means the primarily
diff --git a/libbuild2/build/script/builtin-options.cxx b/libbuild2/build/script/builtin-options.cxx
new file mode 100644
index 0000000..dba3c59
--- /dev/null
+++ b/libbuild2/build/script/builtin-options.cxx
@@ -0,0 +1,606 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+#include <libbuild2/types-parsers.hxx>
+//
+// End prologue.
+
+#include <libbuild2/build/script/builtin-options.hxx>
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+#include <utility>
+#include <ostream>
+#include <sstream>
+#include <cstring>
+
+namespace build2
+{
+ namespace build
+ {
+ namespace cli
+ {
+ template <typename X>
+ struct parser
+ {
+ static void
+ parse (X& x, bool& xs, scanner& s)
+ {
+ using namespace std;
+
+ const char* o (s.next ());
+ if (s.more ())
+ {
+ string v (s.next ());
+ istringstream is (v);
+ if (!(is >> x && is.peek () == istringstream::traits_type::eof ()))
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <>
+ struct parser<bool>
+ {
+ static void
+ parse (bool& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <>
+ struct parser<std::string>
+ {
+ static void
+ parse (std::string& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ x = s.next ();
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename X>
+ struct parser<std::pair<X, std::size_t> >
+ {
+ static void
+ parse (std::pair<X, std::size_t>& x, bool& xs, scanner& s)
+ {
+ x.second = s.position ();
+ parser<X>::parse (x.first, xs, s);
+ }
+ };
+
+ template <typename X>
+ struct parser<std::vector<X> >
+ {
+ static void
+ parse (std::vector<X>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.push_back (x);
+ xs = true;
+ }
+ };
+
+ template <typename X, typename C>
+ struct parser<std::set<X, C> >
+ {
+ static void
+ parse (std::set<X, C>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.insert (x);
+ xs = true;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::map<K, V, C> >
+ {
+ static void
+ parse (std::map<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m[k] = v;
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename X, typename T, T X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, s);
+ }
+
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
+ template <typename X, typename T, T X::*M, bool X::*S>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, x.*S, s);
+ }
+ }
+ }
+}
+
+#include <map>
+
+namespace build2
+{
+ namespace build
+ {
+ namespace script
+ {
+ // depdb_dyndep_options
+ //
+
+ depdb_dyndep_options::
+ depdb_dyndep_options ()
+ : file_ (),
+ file_specified_ (false),
+ format_ (),
+ format_specified_ (false),
+ what_ (),
+ what_specified_ (false),
+ include_path_ (),
+ include_path_specified_ (false),
+ default_type_ (),
+ default_type_specified_ (false),
+ adhoc_ (),
+ cwd_ (),
+ cwd_specified_ (false),
+ drop_cycles_ (),
+ target_what_ (),
+ target_what_specified_ (false),
+ target_default_type_ (),
+ target_default_type_specified_ (false),
+ target_extension_type_ (),
+ target_extension_type_specified_ (false),
+ target_cwd_ (),
+ target_cwd_specified_ (false)
+ {
+ }
+
+ bool depdb_dyndep_options::
+ parse (int& argc,
+ char** argv,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ bool depdb_dyndep_options::
+ parse (int start,
+ int& argc,
+ char** argv,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ bool depdb_dyndep_options::
+ parse (int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ end = s.end ();
+ return r;
+ }
+
+ bool depdb_dyndep_options::
+ parse (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
+ bool r = _parse (s, opt, arg);
+ end = s.end ();
+ return r;
+ }
+
+ bool depdb_dyndep_options::
+ parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ {
+ bool r = _parse (s, opt, arg);
+ return r;
+ }
+
+ typedef
+ std::map<std::string, void (*) (depdb_dyndep_options&, ::build2::build::cli::scanner&)>
+ _cli_depdb_dyndep_options_map;
+
+ static _cli_depdb_dyndep_options_map _cli_depdb_dyndep_options_map_;
+
+ struct _cli_depdb_dyndep_options_map_init
+ {
+ _cli_depdb_dyndep_options_map_init ()
+ {
+ _cli_depdb_dyndep_options_map_["--file"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, path, &depdb_dyndep_options::file_,
+ &depdb_dyndep_options::file_specified_ >;
+ _cli_depdb_dyndep_options_map_["--format"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::format_,
+ &depdb_dyndep_options::format_specified_ >;
+ _cli_depdb_dyndep_options_map_["--what"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::what_,
+ &depdb_dyndep_options::what_specified_ >;
+ _cli_depdb_dyndep_options_map_["--include-path"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, dir_paths, &depdb_dyndep_options::include_path_,
+ &depdb_dyndep_options::include_path_specified_ >;
+ _cli_depdb_dyndep_options_map_["-I"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, dir_paths, &depdb_dyndep_options::include_path_,
+ &depdb_dyndep_options::include_path_specified_ >;
+ _cli_depdb_dyndep_options_map_["--default-type"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::default_type_,
+ &depdb_dyndep_options::default_type_specified_ >;
+ _cli_depdb_dyndep_options_map_["--adhoc"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, &depdb_dyndep_options::adhoc_ >;
+ _cli_depdb_dyndep_options_map_["--cwd"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, dir_path, &depdb_dyndep_options::cwd_,
+ &depdb_dyndep_options::cwd_specified_ >;
+ _cli_depdb_dyndep_options_map_["--drop-cycles"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, &depdb_dyndep_options::drop_cycles_ >;
+ _cli_depdb_dyndep_options_map_["--target-what"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::target_what_,
+ &depdb_dyndep_options::target_what_specified_ >;
+ _cli_depdb_dyndep_options_map_["--target-default-type"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::target_default_type_,
+ &depdb_dyndep_options::target_default_type_specified_ >;
+ _cli_depdb_dyndep_options_map_["--target-extension-type"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, map<string, string>, &depdb_dyndep_options::target_extension_type_,
+ &depdb_dyndep_options::target_extension_type_specified_ >;
+ _cli_depdb_dyndep_options_map_["--target-cwd"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, dir_path, &depdb_dyndep_options::target_cwd_,
+ &depdb_dyndep_options::target_cwd_specified_ >;
+ }
+ };
+
+ static _cli_depdb_dyndep_options_map_init _cli_depdb_dyndep_options_map_init_;
+
+ bool depdb_dyndep_options::
+ _parse (const char* o, ::build2::build::cli::scanner& s)
+ {
+ _cli_depdb_dyndep_options_map::const_iterator i (_cli_depdb_dyndep_options_map_.find (o));
+
+ if (i != _cli_depdb_dyndep_options_map_.end ())
+ {
+ (*(i->second)) (*this, s);
+ return true;
+ }
+
+ return false;
+ }
+
+ bool depdb_dyndep_options::
+ _parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt_mode,
+ ::build2::build::cli::unknown_mode arg_mode)
+ {
+ // Can't skip combined flags (--no-combined-flags).
+ //
+ assert (opt_mode != ::build2::build::cli::unknown_mode::skip);
+
+ bool r = false;
+ bool opt = true;
+
+ while (s.more ())
+ {
+ const char* o = s.peek ();
+
+ if (std::strcmp (o, "--") == 0)
+ {
+ opt = false;
+ s.skip ();
+ r = true;
+ continue;
+ }
+
+ if (opt)
+ {
+ if (_parse (o, s))
+ {
+ r = true;
+ continue;
+ }
+
+ if (std::strncmp (o, "-", 1) == 0 && o[1] != '\0')
+ {
+ // Handle combined option values.
+ //
+ std::string co;
+ if (const char* v = std::strchr (o, '='))
+ {
+ co.assign (o, 0, v - o);
+ ++v;
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (co.c_str ()),
+ const_cast<char*> (v)
+ };
+
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
+
+ if (_parse (co.c_str (), ns))
+ {
+ // Parsed the option but not its value?
+ //
+ if (ns.end () != 2)
+ throw ::build2::build::cli::invalid_value (co, v);
+
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = co.c_str ();
+ }
+ }
+
+ // Handle combined flags.
+ //
+ char cf[3];
+ {
+ const char* p = o + 1;
+ for (; *p != '\0'; ++p)
+ {
+ if (!((*p >= 'a' && *p <= 'z') ||
+ (*p >= 'A' && *p <= 'Z') ||
+ (*p >= '0' && *p <= '9')))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ for (p = o + 1; *p != '\0'; ++p)
+ {
+ std::strcpy (cf, "-");
+ cf[1] = *p;
+ cf[2] = '\0';
+
+ int ac (1);
+ char* av[] =
+ {
+ cf
+ };
+
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
+
+ if (!_parse (cf, ns))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ // All handled.
+ //
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = cf;
+ }
+ }
+ }
+
+ switch (opt_mode)
+ {
+ case ::build2::build::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::build2::build::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::build2::build::cli::unknown_mode::fail:
+ {
+ throw ::build2::build::cli::unknown_option (o);
+ }
+ }
+
+ break;
+ }
+ }
+
+ switch (arg_mode)
+ {
+ case ::build2::build::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::build2::build::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::build2::build::cli::unknown_mode::fail:
+ {
+ throw ::build2::build::cli::unknown_argument (o);
+ }
+ }
+
+ break;
+ }
+
+ return r;
+ }
+ }
+ }
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
diff --git a/libbuild2/build/script/builtin-options.hxx b/libbuild2/build/script/builtin-options.hxx
new file mode 100644
index 0000000..a8c3440
--- /dev/null
+++ b/libbuild2/build/script/builtin-options.hxx
@@ -0,0 +1,284 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+#ifndef LIBBUILD2_BUILD_SCRIPT_BUILTIN_OPTIONS_HXX
+#define LIBBUILD2_BUILD_SCRIPT_BUILTIN_OPTIONS_HXX
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+#include <libbuild2/common-options.hxx>
+
+namespace build2
+{
+ namespace build
+ {
+ namespace script
+ {
+ class depdb_dyndep_options
+ {
+ public:
+ depdb_dyndep_options ();
+
+ // Return true if anything has been parsed.
+ //
+ bool
+ parse (int& argc,
+ char** argv,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ bool
+ parse (int start,
+ int& argc,
+ char** argv,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ bool
+ parse (int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ bool
+ parse (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ bool
+ parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ // Option accessors and modifiers.
+ //
+ const path&
+ file () const;
+
+ path&
+ file ();
+
+ void
+ file (const path&);
+
+ bool
+ file_specified () const;
+
+ void
+ file_specified (bool);
+
+ const string&
+ format () const;
+
+ string&
+ format ();
+
+ void
+ format (const string&);
+
+ bool
+ format_specified () const;
+
+ void
+ format_specified (bool);
+
+ const string&
+ what () const;
+
+ string&
+ what ();
+
+ void
+ what (const string&);
+
+ bool
+ what_specified () const;
+
+ void
+ what_specified (bool);
+
+ const dir_paths&
+ include_path () const;
+
+ dir_paths&
+ include_path ();
+
+ void
+ include_path (const dir_paths&);
+
+ bool
+ include_path_specified () const;
+
+ void
+ include_path_specified (bool);
+
+ const string&
+ default_type () const;
+
+ string&
+ default_type ();
+
+ void
+ default_type (const string&);
+
+ bool
+ default_type_specified () const;
+
+ void
+ default_type_specified (bool);
+
+ const bool&
+ adhoc () const;
+
+ bool&
+ adhoc ();
+
+ void
+ adhoc (const bool&);
+
+ const dir_path&
+ cwd () const;
+
+ dir_path&
+ cwd ();
+
+ void
+ cwd (const dir_path&);
+
+ bool
+ cwd_specified () const;
+
+ void
+ cwd_specified (bool);
+
+ const bool&
+ drop_cycles () const;
+
+ bool&
+ drop_cycles ();
+
+ void
+ drop_cycles (const bool&);
+
+ const string&
+ target_what () const;
+
+ string&
+ target_what ();
+
+ void
+ target_what (const string&);
+
+ bool
+ target_what_specified () const;
+
+ void
+ target_what_specified (bool);
+
+ const string&
+ target_default_type () const;
+
+ string&
+ target_default_type ();
+
+ void
+ target_default_type (const string&);
+
+ bool
+ target_default_type_specified () const;
+
+ void
+ target_default_type_specified (bool);
+
+ const map<string, string>&
+ target_extension_type () const;
+
+ map<string, string>&
+ target_extension_type ();
+
+ void
+ target_extension_type (const map<string, string>&);
+
+ bool
+ target_extension_type_specified () const;
+
+ void
+ target_extension_type_specified (bool);
+
+ const dir_path&
+ target_cwd () const;
+
+ dir_path&
+ target_cwd ();
+
+ void
+ target_cwd (const dir_path&);
+
+ bool
+ target_cwd_specified () const;
+
+ void
+ target_cwd_specified (bool);
+
+ // Implementation details.
+ //
+ protected:
+ bool
+ _parse (const char*, ::build2::build::cli::scanner&);
+
+ private:
+ bool
+ _parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option,
+ ::build2::build::cli::unknown_mode argument);
+
+ public:
+ path file_;
+ bool file_specified_;
+ string format_;
+ bool format_specified_;
+ string what_;
+ bool what_specified_;
+ dir_paths include_path_;
+ bool include_path_specified_;
+ string default_type_;
+ bool default_type_specified_;
+ bool adhoc_;
+ dir_path cwd_;
+ bool cwd_specified_;
+ bool drop_cycles_;
+ string target_what_;
+ bool target_what_specified_;
+ string target_default_type_;
+ bool target_default_type_specified_;
+ map<string, string> target_extension_type_;
+ bool target_extension_type_specified_;
+ dir_path target_cwd_;
+ bool target_cwd_specified_;
+ };
+ }
+ }
+}
+
+#include <libbuild2/build/script/builtin-options.ixx>
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
+#endif // LIBBUILD2_BUILD_SCRIPT_BUILTIN_OPTIONS_HXX
diff --git a/libbuild2/build/script/builtin-options.ixx b/libbuild2/build/script/builtin-options.ixx
new file mode 100644
index 0000000..20847c2
--- /dev/null
+++ b/libbuild2/build/script/builtin-options.ixx
@@ -0,0 +1,363 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+namespace build2
+{
+ namespace build
+ {
+ namespace script
+ {
+ // depdb_dyndep_options
+ //
+
+ inline const path& depdb_dyndep_options::
+ file () const
+ {
+ return this->file_;
+ }
+
+ inline path& depdb_dyndep_options::
+ file ()
+ {
+ return this->file_;
+ }
+
+ inline void depdb_dyndep_options::
+ file (const path& x)
+ {
+ this->file_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ file_specified () const
+ {
+ return this->file_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ file_specified (bool x)
+ {
+ this->file_specified_ = x;
+ }
+
+ inline const string& depdb_dyndep_options::
+ format () const
+ {
+ return this->format_;
+ }
+
+ inline string& depdb_dyndep_options::
+ format ()
+ {
+ return this->format_;
+ }
+
+ inline void depdb_dyndep_options::
+ format (const string& x)
+ {
+ this->format_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ format_specified () const
+ {
+ return this->format_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ format_specified (bool x)
+ {
+ this->format_specified_ = x;
+ }
+
+ inline const string& depdb_dyndep_options::
+ what () const
+ {
+ return this->what_;
+ }
+
+ inline string& depdb_dyndep_options::
+ what ()
+ {
+ return this->what_;
+ }
+
+ inline void depdb_dyndep_options::
+ what (const string& x)
+ {
+ this->what_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ what_specified () const
+ {
+ return this->what_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ what_specified (bool x)
+ {
+ this->what_specified_ = x;
+ }
+
+ inline const dir_paths& depdb_dyndep_options::
+ include_path () const
+ {
+ return this->include_path_;
+ }
+
+ inline dir_paths& depdb_dyndep_options::
+ include_path ()
+ {
+ return this->include_path_;
+ }
+
+ inline void depdb_dyndep_options::
+ include_path (const dir_paths& x)
+ {
+ this->include_path_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ include_path_specified () const
+ {
+ return this->include_path_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ include_path_specified (bool x)
+ {
+ this->include_path_specified_ = x;
+ }
+
+ inline const string& depdb_dyndep_options::
+ default_type () const
+ {
+ return this->default_type_;
+ }
+
+ inline string& depdb_dyndep_options::
+ default_type ()
+ {
+ return this->default_type_;
+ }
+
+ inline void depdb_dyndep_options::
+ default_type (const string& x)
+ {
+ this->default_type_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ default_type_specified () const
+ {
+ return this->default_type_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ default_type_specified (bool x)
+ {
+ this->default_type_specified_ = x;
+ }
+
+ inline const bool& depdb_dyndep_options::
+ adhoc () const
+ {
+ return this->adhoc_;
+ }
+
+ inline bool& depdb_dyndep_options::
+ adhoc ()
+ {
+ return this->adhoc_;
+ }
+
+ inline void depdb_dyndep_options::
+ adhoc (const bool& x)
+ {
+ this->adhoc_ = x;
+ }
+
+ inline const dir_path& depdb_dyndep_options::
+ cwd () const
+ {
+ return this->cwd_;
+ }
+
+ inline dir_path& depdb_dyndep_options::
+ cwd ()
+ {
+ return this->cwd_;
+ }
+
+ inline void depdb_dyndep_options::
+ cwd (const dir_path& x)
+ {
+ this->cwd_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ cwd_specified () const
+ {
+ return this->cwd_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ cwd_specified (bool x)
+ {
+ this->cwd_specified_ = x;
+ }
+
+ inline const bool& depdb_dyndep_options::
+ drop_cycles () const
+ {
+ return this->drop_cycles_;
+ }
+
+ inline bool& depdb_dyndep_options::
+ drop_cycles ()
+ {
+ return this->drop_cycles_;
+ }
+
+ inline void depdb_dyndep_options::
+ drop_cycles (const bool& x)
+ {
+ this->drop_cycles_ = x;
+ }
+
+ inline const string& depdb_dyndep_options::
+ target_what () const
+ {
+ return this->target_what_;
+ }
+
+ inline string& depdb_dyndep_options::
+ target_what ()
+ {
+ return this->target_what_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_what (const string& x)
+ {
+ this->target_what_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_what_specified () const
+ {
+ return this->target_what_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_what_specified (bool x)
+ {
+ this->target_what_specified_ = x;
+ }
+
+ inline const string& depdb_dyndep_options::
+ target_default_type () const
+ {
+ return this->target_default_type_;
+ }
+
+ inline string& depdb_dyndep_options::
+ target_default_type ()
+ {
+ return this->target_default_type_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_default_type (const string& x)
+ {
+ this->target_default_type_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_default_type_specified () const
+ {
+ return this->target_default_type_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_default_type_specified (bool x)
+ {
+ this->target_default_type_specified_ = x;
+ }
+
+ inline const map<string, string>& depdb_dyndep_options::
+ target_extension_type () const
+ {
+ return this->target_extension_type_;
+ }
+
+ inline map<string, string>& depdb_dyndep_options::
+ target_extension_type ()
+ {
+ return this->target_extension_type_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_extension_type (const map<string, string>& x)
+ {
+ this->target_extension_type_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_extension_type_specified () const
+ {
+ return this->target_extension_type_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_extension_type_specified (bool x)
+ {
+ this->target_extension_type_specified_ = x;
+ }
+
+ inline const dir_path& depdb_dyndep_options::
+ target_cwd () const
+ {
+ return this->target_cwd_;
+ }
+
+ inline dir_path& depdb_dyndep_options::
+ target_cwd ()
+ {
+ return this->target_cwd_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_cwd (const dir_path& x)
+ {
+ this->target_cwd_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_cwd_specified () const
+ {
+ return this->target_cwd_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_cwd_specified (bool x)
+ {
+ this->target_cwd_specified_ = x;
+ }
+ }
+ }
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
diff --git a/libbuild2/build/script/builtin.cli b/libbuild2/build/script/builtin.cli
new file mode 100644
index 0000000..5aea034
--- /dev/null
+++ b/libbuild2/build/script/builtin.cli
@@ -0,0 +1,128 @@
+// file : libbuild2/build/script/builtin.cli
+// license : MIT; see accompanying LICENSE file
+
+include <libbuild2/common.cli>;
+
+// Note that options in this file are undocumented because we generate neither
+// the usage printing code nor man pages. Instead, they are documented in the
+// manual.
+//
+namespace build2
+{
+ namespace build
+ {
+ namespace script
+ {
+ // Pseudo-builtin options.
+ //
+ class depdb_dyndep_options
+ {
+ // Note that --byproduct or --dyn-target, if any, must be the first
+ // option and is handled ad hoc.
+ //
+ // Similarly, --update-{include,exclude} are handled ad hoc and must
+ // be literals, similar to the -- separator. They specify prerequisite
+ // targets/patterns to include/exclude (from the static prerequisite
+ // set) for update during match (those excluded will be updated during
+ // execute). The order in which these options are specified is
+ // significant with the first target/pattern that matches determining
+ // the result. If only the --update-include options are specified,
+ // then only the explicitly included prerequisites will be updated.
+ // Otherwise, all prerequisites that are not explicitly excluded will
+ // be updated. If none of these options is specified, then all the
+ // static prerequisites are updated during match. Note also that these
+ // options do not apply to ad hoc prerequisites which are always
+ // updated during match.
+ //
+ // Note that in the future we may extend --cwd support to the non-
+ // byproduct mode where it will also have the `env --cwd` semantics
+ // (thus the matching name). Note that it will also be incompatible
+ // with support for generated files (and thus -I) at least in the make
+ // format where we use relative paths for non-existent files.
+ //
+ // Currently Supported dependency formats (--format) are `make`
+ // (default) and `lines`.
+ //
+ // The `make` format is the make dependency declaration in the
+ // `<target>...: [<prerequisite>...]` form. In the non-byproduct mode
+ // a relative prerequisite path is considered non-existent.
+ //
+ // The `lines` format lists targets and/or prerequisites one per line.
+ // If the --dyn-target option is specified then the target list is
+ // expected to come first separated from the prerequisites list with a
+ // blank line. If there are no prerequisites, then the blank line can
+ // be omitted. If the --dyn-target option is not specified, then all
+ // lines are treated as prerequisites and there should be no blank
+ // lines. In the non-byproduct mode a prerequisite line that starts
+ // with a leading space is considered a non-existent prerequisite.
+ // Currently only relative non-existent prerequisites are supported.
+ // Finally, in this mode, if the prerequisite is syntactically a
+ // directory (that is, it ends with a trailing directory separator),
+ // then it is added as fsdir{}. This can be used to handle situations
+ // where the dynamic targets are placed into subdirectories.
+ //
+ // Note on naming: whenever we (may) have two options, one for target
+ // and the other for prerequisite, we omit "prerequisite" as that's
+ // what we extract by default and most commonly. For example:
+ //
+ // --what --target-what
+ // --default-type --target-default-type
+ //
+ path --file; // Read from file rather than stdin.
+
+ string --format; // Dependency format: `make` (default),
+ // or `lines`.
+
+ // Dynamic dependency extraction options.
+ //
+ string --what; // Prerequisite kind, e.g., "header".
+
+ dir_paths --include-path|-I; // Search paths for generated
+ // prerequisites.
+
+ string --default-type; // Default prerequisite type to use if
+ // none could be derived from extension.
+
+ bool --adhoc; // Treat dynamically discovered
+ // prerequisites as ad hoc (so they
+ // don't end up in $<; only in the
+ // normal mode).
+
+ dir_path --cwd; // Builtin's working directory used
+ // to complete relative paths of
+ // prerequisites (only in --byproduct
+ // mode, lines format for existing
+ // paths).
+
+ bool --drop-cycles; // Drop prerequisites that are also
+ // targets. Only use if you are sure
+ // such cycles are harmless, that is,
+ // the output is not affected by such
+ // prerequisites' content.
+
+ // Dynamic target extraction options.
+ //
+ // This functionality is enabled with the --dyn-target option. Only
+ // the make format is supported, where the listed targets are added as
+ // ad hoc group members (unless already specified as static members).
+ // This functionality is not available in the byproduct mode.
+ //
+ string --target-what; // Target kind, e.g., "source".
+
+ string --target-default-type; // Default target type to use if none
+ // could be derived from extension.
+
+ map<string, string> // Extension to target type mapping in
+ --target-extension-type; // the <ext>=<type> form, for example,
+ // h=hxx. This mapping is considered
+ // before attempting to automatically
+ // map the extension and so can be used
+ // to resolve ambiguities.
+
+ dir_path --target-cwd; // Builtin's working directory used to
+ // complete relative paths of targets.
+
+ };
+ }
+ }
+}
diff --git a/libbuild2/build/script/lexer+for-loop.test.testscript b/libbuild2/build/script/lexer+for-loop.test.testscript
new file mode 100644
index 0000000..3f8e6b5
--- /dev/null
+++ b/libbuild2/build/script/lexer+for-loop.test.testscript
@@ -0,0 +1,188 @@
+# file : libbuild2/build/script/lexer+for-loop.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+test.arguments = for-loop
+
+: redirect
+:
+{
+ : pass
+ :
+ $* <"cmd <| 1>|" >>EOO
+ 'cmd'
+ <|
+ '1'
+ >|
+ <newline>
+ EOO
+
+ : null
+ :
+ $* <"cmd <- 1>-" >>EOO
+ 'cmd'
+ <-
+ '1'
+ >-
+ <newline>
+ EOO
+
+ : trace
+ :
+ $* <"cmd 1>!" >>EOO
+ 'cmd'
+ '1'
+ >!
+ <newline>
+ EOO
+
+ : merge
+ :
+ $* <"cmd 1>&2" >>EOO
+ 'cmd'
+ '1'
+ >&
+ '2'
+ <newline>
+ EOO
+
+ : str
+ :
+ $* <"cmd <<<=a 1>>>?b" >>EOO
+ 'cmd'
+ <<<=
+ 'a'
+ '1'
+ >>>?
+ 'b'
+ <newline>
+ EOO
+
+ : str-nn
+ :
+ $* <"cmd <<<=:a 1>>>?:b" >>EOO
+ 'cmd'
+ <<<=:
+ 'a'
+ '1'
+ >>>?:
+ 'b'
+ <newline>
+ EOO
+
+ : str-nn-alias
+ :
+ $* <"cmd <<<:a 1>>>?:b" >>EOO
+ 'cmd'
+ <<<:
+ 'a'
+ '1'
+ >>>?:
+ 'b'
+ <newline>
+ EOO
+
+ : doc
+ :
+ $* <"cmd <<EOI 1>>EOO" >>EOO
+ 'cmd'
+ <<
+ 'EOI'
+ '1'
+ >>
+ 'EOO'
+ <newline>
+ EOO
+
+ : doc-nn
+ :
+ $* <"cmd <<:EOI 1>>?:EOO" >>EOO
+ 'cmd'
+ <<:
+ 'EOI'
+ '1'
+ >>?:
+ 'EOO'
+ <newline>
+ EOO
+
+ : file-cmp
+ :
+ $* <"cmd <=in >?out 2>?err" >>EOO
+ 'cmd'
+ <=
+ 'in'
+ >?
+ 'out'
+ '2'
+ >?
+ 'err'
+ <newline>
+ EOO
+
+ : file-write
+ :
+ $* <"cmd >=out 2>+err" >>EOO
+ 'cmd'
+ >=
+ 'out'
+ '2'
+ >+
+ 'err'
+ <newline>
+ EOO
+}
+
+: cleanup
+:
+{
+ : always
+ :
+ $* <"cmd &file" >>EOO
+ 'cmd'
+ &
+ 'file'
+ <newline>
+ EOO
+
+ : maybe
+ :
+ $* <"cmd &?file" >>EOO
+ 'cmd'
+ &?
+ 'file'
+ <newline>
+ EOO
+
+ : never
+ :
+ $* <"cmd &!file" >>EOO
+ 'cmd'
+ &!
+ 'file'
+ <newline>
+ EOO
+}
+
+: for
+:
+{
+ : form-1
+ :
+ $* <"for x: a" >>EOO
+ 'for'
+ 'x'
+ :
+ 'a'
+ <newline>
+ EOO
+
+ : form-3
+ :
+ $* <"for <<<a x" >>EOO
+ 'for'
+ <<<
+ 'a'
+ 'x'
+ <newline>
+ EOO
+}
diff --git a/libbuild2/build/script/lexer.cxx b/libbuild2/build/script/lexer.cxx
index d849ac9..e0d87fe 100644
--- a/libbuild2/build/script/lexer.cxx
+++ b/libbuild2/build/script/lexer.cxx
@@ -35,10 +35,7 @@ namespace build2
bool q (true); // quotes
if (!esc)
- {
- assert (!state_.empty ());
- esc = state_.top ().escapes;
- }
+ esc = current_state ().escapes;
switch (m)
{
@@ -78,6 +75,19 @@ namespace build2
s2 = " ";
break;
}
+ case lexer_mode::for_loop:
+ {
+ // Leading tokens of the for-loop. Like command_line but
+ // recognizes colon as a separator and lsbrace like value.
+ //
+ // Note that while sensing the form of the for-loop (`for x:...`
+ // vs `for x <...`) we need to make sure that the pre-parsed token
+ // types are valid for the execution phase.
+ //
+ s1 = ":=!|&<> $(#\t\n";
+ s2 = " == ";
+ break;
+ }
default:
{
// Recognize special variable names ($>, $<, $~).
@@ -94,7 +104,7 @@ namespace build2
}
assert (ps == '\0');
- state_.push (
+ mode_impl (
state {m, data, nullopt, false, false, ps, s, n, q, *esc, s1, s2});
}
@@ -103,12 +113,13 @@ namespace build2
{
token r;
- switch (state_.top ().mode)
+ switch (mode ())
{
case lexer_mode::command_line:
case lexer_mode::first_token:
case lexer_mode::second_token:
case lexer_mode::variable_line:
+ case lexer_mode::for_loop:
r = next_line ();
break;
default: return base_lexer::next ();
@@ -128,7 +139,7 @@ namespace build2
xchar c (get ());
uint64_t ln (c.line), cn (c.column);
- state st (state_.top ()); // Make copy (see first/second_token).
+ state st (current_state ()); // Make copy (see first/second_token).
lexer_mode m (st.mode);
auto make_token = [&sep, ln, cn] (type t)
@@ -141,9 +152,10 @@ namespace build2
//
if (st.lsbrace)
{
- assert (m == lexer_mode::variable_line);
+ assert (m == lexer_mode::variable_line ||
+ m == lexer_mode::for_loop);
- state_.top ().lsbrace = false; // Note: st is a copy.
+ current_state ().lsbrace = false; // Note: st is a copy.
if (c == '[' && (!st.lsbrace_unsep || !sep))
return make_token (type::lsbrace);
@@ -156,7 +168,7 @@ namespace build2
// we push any new mode (e.g., double quote).
//
if (m == lexer_mode::first_token || m == lexer_mode::second_token)
- state_.pop ();
+ expire_mode ();
// NOTE: remember to update mode() if adding new special characters.
@@ -167,7 +179,7 @@ namespace build2
// Expire variable value mode at the end of the line.
//
if (m == lexer_mode::variable_line)
- state_.pop ();
+ expire_mode ();
sep = true; // Treat newline as always separated.
return make_token (type::newline);
@@ -179,11 +191,20 @@ namespace build2
case '(': return make_token (type::lparen);
}
+ if (m == lexer_mode::for_loop)
+ {
+ switch (c)
+ {
+ case ':': return make_token (type::colon);
+ }
+ }
+
// Command line operator/separators.
//
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
switch (c)
{
@@ -205,7 +226,8 @@ namespace build2
//
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
if (optional<token> t = next_cmd_op (c, sep))
return move (*t);
diff --git a/libbuild2/build/script/lexer.hxx b/libbuild2/build/script/lexer.hxx
index 646d3b9..3f51493 100644
--- a/libbuild2/build/script/lexer.hxx
+++ b/libbuild2/build/script/lexer.hxx
@@ -24,9 +24,10 @@ namespace build2
enum
{
command_line = base_type::value_next,
- first_token, // Expires at the end of the token.
- second_token, // Expires at the end of the token.
- variable_line // Expires at the end of the line.
+ first_token, // Expires at the end of the token.
+ second_token, // Expires at the end of the token.
+ variable_line, // Expires at the end of the line.
+ for_loop // Used for sensing the for-loop leading tokens.
};
lexer_mode () = default;
@@ -67,6 +68,8 @@ namespace build2
static redirect_aliases_type redirect_aliases;
private:
+ using build2::script::lexer::mode; // Getter.
+
token
next_line ();
};
diff --git a/libbuild2/build/script/lexer.test.cxx b/libbuild2/build/script/lexer.test.cxx
index e496f94..d8733ba 100644
--- a/libbuild2/build/script/lexer.test.cxx
+++ b/libbuild2/build/script/lexer.test.cxx
@@ -35,6 +35,7 @@ namespace build2
else if (s == "second-token") m = lexer_mode::second_token;
else if (s == "variable-line") m = lexer_mode::variable_line;
else if (s == "variable") m = lexer_mode::variable;
+ else if (s == "for-loop") m = lexer_mode::for_loop;
else assert (false);
}
diff --git a/libbuild2/build/script/parser+command-if.test.testscript b/libbuild2/build/script/parser+command-if.test.testscript
index a18a885..8b19186 100644
--- a/libbuild2/build/script/parser+command-if.test.testscript
+++ b/libbuild2/build/script/parser+command-if.test.testscript
@@ -279,7 +279,7 @@
cmd
end
EOI
- buildfile:12:1: error: 'end' without preceding 'if'
+ buildfile:12:1: error: 'end' without preceding 'if', 'for', or 'while'
EOE
: before
diff --git a/libbuild2/build/script/parser+command-re-parse.test.testscript b/libbuild2/build/script/parser+command-re-parse.test.testscript
index 56e05b5..3dbdc16 100644
--- a/libbuild2/build/script/parser+command-re-parse.test.testscript
+++ b/libbuild2/build/script/parser+command-re-parse.test.testscript
@@ -1,18 +1,14 @@
# file : libbuild2/build/script/parser+command-re-parse.test.testscript
# license : MIT; see accompanying LICENSE file
-# @@ TMP
-#
-#\
: double-quote
:
$* <<EOI >>EOO
-x = [cmd_line] cmd \">-\" "'<-'"
+x = [cmdline] cmd \">-\" "'<-'"
$x
EOI
cmd '>-' '<-'
EOO
-#\
: literal-re-parse
:
diff --git a/libbuild2/build/script/parser+diag.test.testscript b/libbuild2/build/script/parser+diag.test.testscript
index 30eb859..504c9a4 100644
--- a/libbuild2/build/script/parser+diag.test.testscript
+++ b/libbuild2/build/script/parser+diag.test.testscript
@@ -19,17 +19,99 @@ $* <<EOI >>EOO
name: echo
EOO
-: diag
+: name-operation
:
-$* <<EOI >>~%EOO%
- echo abc
- cat abc
- diag copy >= $>
- cp <- $>
+$* <<EOI >>EOO
+ a = 'b'
EOI
- %diag: copy >= .+file\{driver\.\}%
+ name: update
EOO
+: preamble
+:
+{
+ : disambiguate
+ :
+ $* <<EOI >>~%EOO%
+ echo abc | set v
+ cat abc | set v
+ diag copy >= $>
+ cp <- $>
+ EOI
+ echo abc | set v
+ cat abc | set v
+ %diag: copy >= .+file\{driver\.\}%
+ EOO
+
+ : name
+ :
+ $* <<EOI >>EOO
+ n = foo
+ diag copy $n
+ cp $n $>
+ EOI
+ diag: copy foo
+ EOO
+
+ : quoted
+ :
+ $* <<EOI >'diag: foo'
+ f = foo
+ diag "$f"
+ EOI
+
+ : quoted-eval
+ :
+ $* <<EOI >'diag: foo'
+ f = foo
+ diag "($f)"
+ EOI
+
+ : temp_dir
+ :
+ {
+ test.options += -t
+
+ : no
+ :
+ $* <<EOI >false
+ f = foo
+ diag $f
+ f = $~/f
+ foo "$f"
+ EOI
+
+ : no-depdb
+ :
+ $* <<EOI >false
+ f = $~/f
+ depdb hash "$f"
+ diag $f
+ f = $~/f
+ foo "$f"
+ EOI
+
+ : yes
+ :
+ $* <<EOI >true
+ f = $~/f
+ diag $f
+ foo $f
+ EOI
+
+ : yes-depdb
+ :
+ $* <<EOI >true
+ f = $~/f
+ depdb hash "$f"
+ f = $~/t
+ diag $f
+ f = $~/f
+ foo "$f"
+ EOI
+ }
+}
+
: ambiguity
:
{
@@ -67,16 +149,6 @@ $* <<EOI >>~%EOO%
info: consider specifying it explicitly with the 'diag' recipe attribute
info: or provide custom low-verbosity diagnostics with the 'diag' builtin
EOE
-
- : none
- :
- $* <<EOI 2>>EOE != 0
- a = 'b'
- EOI
- buildfile:11:1: error: unable to deduce low-verbosity script diagnostics name
- info: consider specifying it explicitly with the 'diag' recipe attribute
- info: or provide custom low-verbosity diagnostics with the 'diag' builtin
- EOE
}
: inside-if
diff --git a/libbuild2/build/script/parser+expansion.test.testscript b/libbuild2/build/script/parser+expansion.test.testscript
index 086ec8f..eb99ae2 100644
--- a/libbuild2/build/script/parser+expansion.test.testscript
+++ b/libbuild2/build/script/parser+expansion.test.testscript
@@ -24,19 +24,15 @@ EOI
buildfile:12:5: info: while parsing string 'xy'a bc'
EOE
-# @@ TMP
-#
-#\
: invalid-redirect
:
$* <<EOI 2>>EOE != 0
-x = [cmd_line] "1>&a"
+x = [cmdline] "1>&a"
cmd $x
EOI
<string>:1:4: error: stdout merge redirect file descriptor must be 2
buildfile:12:5: info: while parsing string '1>&a'
EOE
-#\
: expansion-re-parse
:
diff --git a/libbuild2/build/script/parser+for.test.testscript b/libbuild2/build/script/parser+for.test.testscript
new file mode 100644
index 0000000..847b253
--- /dev/null
+++ b/libbuild2/build/script/parser+for.test.testscript
@@ -0,0 +1,656 @@
+# file : libbuild2/build/script/parser+for.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: form-1
+:
+: for x: ...
+:
+{
+ : for
+ :
+ {
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for
+ cmd
+ end
+ EOI
+ buildfile:11:1: error: for: missing variable name
+ EOE
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ for x: a b
+ cmd $x
+ end
+ EOI
+ cmd a
+ cmd b
+ EOO
+
+ : null
+ :
+ $* <<EOI >:''
+ for x: [null]
+ cmd $x
+ end
+ EOI
+
+ : empty
+ :
+ $* <<EOI >:''
+ for x:
+ cmd $x
+ end
+ EOI
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ for x: $vs
+ cmd $x
+ end
+ EOI
+ cmd a
+ cmd b
+ EOO
+
+ : typed-values
+ :
+ $* <<EOI >>~%EOO%
+ for x: [dir_paths] a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>~%EOO%
+ for x [dir_path]: a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : typed-elem-value
+ :
+ $* <<EOI >>~%EOO%
+ for x [dir_path]: [strings] a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : defined-var
+ :
+ $* <<EOI >>EOO
+ x = x
+
+ for x: a b
+ cmd $x
+ end
+
+ cmd $x
+ EOI
+ cmd a
+ cmd b
+ cmd b
+ EOO
+ }
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ for x: a b
+ cmd
+ EOI
+ buildfile:13:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ for x: a b
+ elif true
+ cmd
+ end
+ end
+ EOI
+ buildfile:12:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ for x: a b
+ cmd1 $x # 1
+ if ($x == "a") # 2
+ cmd2 # 3
+ for y: x y
+ cmd3 # 4
+ end
+ else
+ cmd4 # 5
+ end
+ cmd5 # 6
+ end
+ cmd6 # 7
+ EOI
+ cmd1 a # 1 i1
+ ? true # 2 i1
+ cmd2 # 3 i1
+ cmd3 # 4 i1 i1
+ cmd3 # 4 i1 i2
+ cmd5 # 6 i1
+ cmd1 b # 1 i2
+ ? false # 2 i2
+ cmd4 # 5 i2
+ cmd5 # 6 i2
+ cmd6 # 7
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ EOI
+ buildfile:12:1: error: expected closing 'end'
+ EOE
+ }
+}
+
+: form-2
+:
+: ... | for x
+:
+{
+ : for
+ :
+ {
+ : status
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x != 0
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: for-loop exit code cannot be checked
+ EOE
+
+ : not-last
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x | echo x
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: for-loop must be last command in a pipe
+ EOE
+
+ : not-last-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x|echo x
+ cmd
+ end
+ EOI
+ buildfile:11:19: error: for-loop must be last command in a pipe
+ EOE
+
+ : expression-after
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x && echo x
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: command expression involving for-loop
+ EOE
+
+ : expression-after-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x&&echo x
+ cmd
+ end
+ EOI
+ buildfile:11:19: error: command expression involving for-loop
+ EOE
+
+ : expression-before
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && echo x | for x
+ cmd
+ end
+ EOI
+ buildfile:11:24: error: command expression involving for-loop
+ EOE
+
+ : expression-before-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && echo x|for x
+ cmd
+ end
+ EOI
+ buildfile:11:22: error: command expression involving for-loop
+ EOE
+
+ : cleanup
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x &f
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: cleanup in for-loop
+ EOE
+
+ : cleanup-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x&f
+ cmd
+ end
+ EOI
+ buildfile:11:19: error: cleanup in for-loop
+ EOE
+
+ : stdout-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x >a
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x>a
+ cmd
+ end
+ EOI
+ buildfile:11:19: error: output redirect in for-loop
+ EOE
+
+ : stdin-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x <a
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: stdin is both piped and redirected
+ EOE
+
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for
+ cmd
+ end
+ EOI
+ buildfile:11:1: error: for: missing variable name
+ EOE
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ echo 'a b' | for -w x
+ cmd $x
+ end
+ EOI
+ echo 'a b' | for -w x
+ EOO
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ echo $vs | for x
+ cmd $x
+ end
+ EOI
+ echo a b | for x
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>EOO
+ echo 'a b' | for -w x [dir_path]
+ cmd $x
+ end
+ EOI
+ echo 'a b' | for -w x [dir_path]
+ EOO
+ }
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd
+ EOI
+ buildfile:13:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ elif true
+ cmd
+ end
+ end
+ EOI
+ buildfile:12:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ echo 'a b' | for x # 1
+ cmd1 $x # 2
+ if ($x == "a") # 3
+ cmd2 # 4
+ echo x y | for y # 5
+ cmd3 # 6
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ end
+ cmd6 # 9
+ EOI
+ echo 'a b' | for x # 1
+ cmd6 # 9
+ EOO
+ }
+}
+
+: form-3
+:
+: for x <...
+:
+{
+ : for
+ :
+ {
+ : status
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a != 0
+ cmd
+ end
+ EOI
+ buildfile:11:10: error: for-loop exit code cannot be checked
+ EOE
+
+ : not-last
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a | echo x
+ cmd
+ end
+ EOI
+ buildfile:11:10: error: for-loop must be last command in a pipe
+ EOE
+
+ : not-last-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x|echo x
+ cmd
+ end
+ EOI
+ buildfile:11:9: error: for-loop must be last command in a pipe
+ EOE
+
+ : expression-after
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a && echo x
+ cmd
+ end
+ EOI
+ buildfile:11:10: error: command expression involving for-loop
+ EOE
+
+ : expression-after-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x&&echo x
+ cmd
+ end
+ EOI
+ buildfile:11:9: error: command expression involving for-loop
+ EOE
+
+ : expression-before
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && for x <a
+ cmd
+ end
+ EOI
+ buildfile:11:15: error: command expression involving for-loop
+ EOE
+
+ : cleanup
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a &f
+ cmd
+ end
+ EOI
+ buildfile:11:10: error: cleanup in for-loop
+ EOE
+
+ : cleanup-before-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for &f x <a
+ cmd
+ end
+ EOI
+ buildfile:11:5: error: cleanup in for-loop
+ EOE
+
+ : cleanup-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x&f
+ cmd
+ end
+ EOI
+ buildfile:11:9: error: cleanup in for-loop
+ EOE
+
+ : stdout-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ for x >a
+ cmd
+ end
+ EOI
+ buildfile:11:7: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-before-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for >a x
+ cmd
+ end
+ EOI
+ buildfile:11:5: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for x>a
+ cmd
+ end
+ EOI
+ buildfile:11:6: error: output redirect in for-loop
+ EOE
+
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a
+ cmd
+ end
+ EOI
+ buildfile:11:1: error: for: missing variable name
+ EOE
+
+ : quoted-opt
+ :
+ $* <<EOI >>EOO
+ o = -w
+ for "$o" x <'a b'
+ cmd $x
+ end
+ for "($o)" x <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x <'a b'
+ for -w x <'a b'
+ EOO
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ for -w x <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x <'a b'
+ EOO
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ for x <$vs
+ cmd $x
+ end
+ EOI
+ for x b <a
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>EOO
+ for -w x [dir_path] <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x [dir_path] <'a b'
+ EOO
+ }
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd
+ EOI
+ buildfile:13:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ elif true
+ cmd
+ end
+ end
+ EOI
+ buildfile:12:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ for -w x <'a b' # 1
+ cmd1 $x # 2
+ if ($x == "a") # 3
+ cmd2 # 4
+ for -w y <'x y' # 5
+ cmd3 # 6
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ end
+ cmd6 # 9
+ EOI
+ for -w x <'a b' # 1
+ cmd6 # 9
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ EOI
+ buildfile:12:1: error: expected closing 'end'
+ EOE
+ }
+}
diff --git a/libbuild2/build/script/parser+while.test.testscript b/libbuild2/build/script/parser+while.test.testscript
new file mode 100644
index 0000000..5587291
--- /dev/null
+++ b/libbuild2/build/script/parser+while.test.testscript
@@ -0,0 +1,133 @@
+# file : libbuild2/build/script/parser+while.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: while
+:
+{
+ : true
+ :
+ $* <<EOI >>EOO
+ while ($v != "aa")
+ cmd "$v"
+ v = "$(v)a"
+ end
+ EOI
+ ? true
+ cmd ''
+ ? true
+ cmd a
+ ? false
+ EOO
+
+ : false
+ :
+ $* <<EOI >>EOO
+ while ($v == "aa")
+ cmd "$v"
+ v = "$(v)a"
+ end
+ EOI
+ ? false
+ EOO
+
+ : without-command
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd
+ end
+ EOI
+ buildfile:11:6: error: missing program
+ EOE
+}
+
+: end
+:
+{
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ while true
+ cmd
+ EOI
+ buildfile:13:1: error: expected closing 'end'
+ EOE
+}
+
+: elif
+:
+{
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ while false
+ elif true
+ cmd
+ end
+ end
+ EOI
+ buildfile:12:3: error: 'elif' without preceding 'if'
+ EOE
+}
+
+: nested
+:
+{
+ $* -l -r <<EOI >>EOO
+ while ($v != "aa") # 1
+ cmd1 "$v" # 2
+ if ($v == "a") # 3
+ cmd2 # 4
+ while ($v2 != "$v") # 5
+ cmd3 # 6
+ v2=$v
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ v = "$(v)a"
+ end
+ EOI
+ ? true # 1 i1
+ cmd1 '' # 2 i1
+ ? false # 3 i1
+ cmd4 # 7 i1
+ cmd5 # 8 i1
+ ? true # 1 i2
+ cmd1 a # 2 i2
+ ? true # 3 i2
+ cmd2 # 4 i2
+ ? true # 5 i2 i1
+ cmd3 # 6 i2 i1
+ ? false # 5 i2 i2
+ cmd5 # 8 i2
+ ? false # 1 i3
+ EOO
+}
+
+: contained
+:
+{
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ EOI
+ buildfile:12:1: error: expected closing 'end'
+ EOE
+}
+
+: var
+:
+$* <<EOI >>EOO
+while ($v1 != "a")
+ v1 = "$(v1)a"
+ v2 = "$v1"
+end
+cmd $v1
+EOI
+? true
+? false
+cmd a
+EOO
diff --git a/libbuild2/build/script/parser.cxx b/libbuild2/build/script/parser.cxx
index 217fa11..d449f4b 100644
--- a/libbuild2/build/script/parser.cxx
+++ b/libbuild2/build/script/parser.cxx
@@ -3,13 +3,25 @@
#include <libbuild2/build/script/parser.hxx>
+#include <cstring> // strcmp()
+#include <sstream>
+
#include <libbutl/builtin.hxx>
+#include <libbutl/path-pattern.hxx>
+#include <libbuild2/depdb.hxx>
+#include <libbuild2/dyndep.hxx>
#include <libbuild2/function.hxx>
#include <libbuild2/algorithm.hxx>
+#include <libbuild2/make-parser.hxx>
+
+#include <libbuild2/adhoc-rule-buildscript.hxx>
+
+#include <libbuild2/script/run.hxx>
#include <libbuild2/build/script/lexer.hxx>
#include <libbuild2/build/script/runner.hxx>
+#include <libbuild2/build/script/builtin-options.hxx>
using namespace std;
using namespace butl;
@@ -49,7 +61,7 @@ namespace build2
pbase_ = scope_->src_path_;
- file_based_ = tt.is_a<file> ();
+ file_based_ = tt.is_a<file> () || tt.is_a<group> ();
perform_update_ = find (as.begin (), as.end (), perform_update_id) !=
as.end ();
@@ -81,15 +93,31 @@ namespace build2
info << "consider using 'depdb' builtin to track its result "
<< "changes";
- // Diagnose absent/ambigous script name.
+ // Diagnose computed variable exansions.
+ //
+ if (computed_var_)
+ fail (*computed_var_)
+ << "expansion of computed variable is only allowed in depdb "
+ << "preamble" <<
+ info << "consider using 'depdb' builtin to track its value "
+ << "changes";
+
+ // Diagnose absent/ambiguous script name. But try to deduce an absent
+ // name from the script operation first.
//
{
diag_record dr;
- if (!diag_name_ && !diag_line_)
+ if (!diag_name_ && diag_preamble_.empty ())
{
- dr << fail (s.start_loc)
- << "unable to deduce low-verbosity script diagnostics name";
+ if (as.size () == 1)
+ {
+ diag_name_ = make_pair (ctx->operation_table[as[0].operation ()],
+ location ());
+ }
+ else
+ dr << fail (s.start_loc)
+ << "unable to deduce low-verbosity script diagnostics name";
}
else if (diag_name2_)
{
@@ -115,16 +143,23 @@ namespace build2
// Save the script name or custom diagnostics line.
//
- assert (diag_name_.has_value () != diag_line_.has_value ());
+ assert (diag_name_.has_value () == diag_preamble_.empty ());
if (diag_name_)
s.diag_name = move (diag_name_->first);
else
- s.diag_line = move (diag_line_->first);
+ s.diag_preamble = move (diag_preamble_);
// Save the custom dependency change tracking lines, if present.
//
s.depdb_clear = depdb_clear_.has_value ();
+ s.depdb_value = depdb_value_;
+ if (depdb_dyndep_)
+ {
+ s.depdb_dyndep = depdb_dyndep_->second;
+ s.depdb_dyndep_byproduct = depdb_dyndep_byproduct_;
+ s.depdb_dyndep_dyn_target = depdb_dyndep_dyn_target_;
+ }
s.depdb_preamble = move (depdb_preamble_);
return s;
@@ -166,13 +201,27 @@ namespace build2
}
}
+ // Parse a logical line, handling the flow control constructs
+ // recursively.
+ //
+ // If the flow control construct type is specified, then this line is
+ // assumed to belong to such a construct.
+ //
void parser::
- pre_parse_line (token& t, type& tt, bool if_line)
+ pre_parse_line (token& t, type& tt, optional<line_type> fct)
{
+ // enter: next token is peeked at (type in tt)
+ // leave: newline
+
+ assert (!fct ||
+ *fct == line_type::cmd_if ||
+ *fct == line_type::cmd_while ||
+ *fct == line_type::cmd_for_stream ||
+ *fct == line_type::cmd_for_args);
+
// Determine the line type/start token.
//
- line_type lt (
- pre_parse_line_start (t, tt, lexer_mode::second_token));
+ line_type lt (pre_parse_line_start (t, tt, lexer_mode::second_token));
line ln;
@@ -205,22 +254,148 @@ namespace build2
break;
}
+ //
+ // See pre_parse_line_start() for details.
+ //
+ case line_type::cmd_for_args: assert (false); break;
+ case line_type::cmd_for_stream:
+ {
+ // First we need to sense the next few tokens and detect which
+ // form of the loop we are dealing with, the first (for x: ...)
+ // or the third (x <...) one. Note that the second form (... | for
+ // x) is handled separately.
+ //
+ // If the next token doesn't look like a variable name, then this
+ // is the third form. Otherwise, if colon follows the variable
+ // name, potentially after the attributes, then this is the first
+ // form and the third form otherwise.
+ //
+ // Note that for the third form we will need to pass the 'for'
+ // token as a program name to the command expression parsing
+ // function since it will be gone from the token stream by that
+ // time. Thus, we save it. We also need to make sure the sensing
+ // always leaves the variable name token in t/tt.
+ //
+ // Note also that in this model it won't be possible to support
+ // options in the first form.
+ //
+ token pt (t);
+ assert (pt.type == type::word && pt.value == "for");
+
+ mode (lexer_mode::for_loop);
+ next (t, tt);
+
+ // Note that we also consider special variable names (those that
+ // don't clash with the command line elements like redirects, etc)
+ // to later fail gracefully.
+ //
+ string& n (t.value);
+
+ if (tt == type::word && t.qtype == quote_type::unquoted &&
+ (n[0] == '_' || alpha (n[0]) || // Variable.
+ n == "~")) // Special variable.
+ {
+ // Detect patterns analogous to parse_variable_name() (so we
+ // diagnose `for x[string]: ...`).
+ //
+ if (n.find_first_of ("[*?") != string::npos)
+ fail (t) << "expected variable name instead of " << n;
+
+ if (special_variable (n))
+ fail (t) << "attempt to set '" << n << "' special variable";
+
+ // Parse out the element attributes, if present.
+ //
+ if (lexer_->peek_char ().first == '[')
+ {
+ // Save the variable name token before the attributes parsing
+ // and restore it afterwards. Also make sure that the token
+ // which follows the attributes stays in the stream.
+ //
+ token vt (move (t));
+ next_with_attributes (t, tt);
+
+ attributes_push (t, tt,
+ true /* standalone */,
+ false /* next_token */);
+
+ t = move (vt);
+ tt = t.type;
+ }
+
+ if (lexer_->peek_char ().first == ':')
+ lt = line_type::cmd_for_args;
+ }
+
+ if (lt == line_type::cmd_for_stream) // for x <...
+ {
+ // At this point t/tt contains the variable name token. Now
+ // pre-parse the command expression in the command_line lexer
+ // mode starting from this position and also passing the 'for'
+ // token as a program name.
+ //
+ // Note that the fact that the potential attributes are already
+ // parsed doesn't affect the command expression pre-parsing.
+ // Also note that they will be available during the execution
+ // phase being replayed.
+ //
+ expire_mode (); // Expire the for-loop lexer mode.
+
+ parse_command_expr_result r (
+ parse_command_expr (t, tt,
+ lexer::redirect_aliases,
+ move (pt)));
+
+ assert (r.for_loop);
+
+ if (tt != type::newline)
+ fail (t) << "expected newline instead of " << t;
+
+ parse_here_documents (t, tt, r);
+ }
+ else // for x: ...
+ {
+ next (t, tt);
+
+ assert (tt == type::colon);
+
+ expire_mode (); // Expire the for-loop lexer mode.
+
+ // Parse the value similar to the var line type (see above).
+ //
+ mode (lexer_mode::variable_line);
+ parse_variable_line (t, tt);
+
+ if (tt != type::newline)
+ fail (t) << "expected newline instead of " << t << " after for";
+ }
+
+ ln.var = nullptr;
+ ++level_;
+ break;
+ }
case line_type::cmd_elif:
case line_type::cmd_elifn:
case line_type::cmd_else:
- case line_type::cmd_end:
{
- if (!if_line)
- {
+ if (!fct || *fct != line_type::cmd_if)
fail (t) << lt << " without preceding 'if'";
- }
+ }
+ // Fall through.
+ case line_type::cmd_end:
+ {
+ if (!fct)
+ fail (t) << lt << " without preceding 'if', 'for', or 'while'";
}
// Fall through.
case line_type::cmd_if:
case line_type::cmd_ifn:
+ case line_type::cmd_while:
next (t, tt); // Skip to start of command.
- if (lt == line_type::cmd_if || lt == line_type::cmd_ifn)
+ if (lt == line_type::cmd_if ||
+ lt == line_type::cmd_ifn ||
+ lt == line_type::cmd_while)
++level_;
else if (lt == line_type::cmd_end)
--level_;
@@ -228,15 +403,24 @@ namespace build2
// Fall through.
case line_type::cmd:
{
- pair<command_expr, here_docs> p;
+ parse_command_expr_result r;
if (lt != line_type::cmd_else && lt != line_type::cmd_end)
- p = parse_command_expr (t, tt, lexer::redirect_aliases);
+ r = parse_command_expr (t, tt, lexer::redirect_aliases);
+
+ if (r.for_loop)
+ {
+ lt = line_type::cmd_for_stream;
+ ln.var = nullptr;
+
+ ++level_;
+ }
if (tt != type::newline)
fail (t) << "expected newline instead of " << t;
- parse_here_documents (t, tt, p);
+ parse_here_documents (t, tt, r);
+
break;
}
}
@@ -254,12 +438,67 @@ namespace build2
*save_line_ = move (ln);
}
- if (lt == line_type::cmd_if || lt == line_type::cmd_ifn)
+ switch (lt)
{
- tt = peek (lexer_mode::first_token);
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ {
+ tt = peek (lexer_mode::first_token);
+
+ pre_parse_if_else (t, tt);
+ break;
+ }
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
+ {
+ tt = peek (lexer_mode::first_token);
+
+ pre_parse_loop (t, tt, lt);
+ break;
+ }
+ default: break;
+ }
+ }
+
+ // Pre-parse the flow control construct block line.
+ //
+ void parser::
+ pre_parse_block_line (token& t, type& tt, line_type bt)
+ {
+ // enter: peeked first token of the line (type in tt)
+ // leave: newline
+
+ const location ll (get_location (peeked ()));
+
+ if (tt == type::eos)
+ fail (ll) << "expected closing 'end'";
- pre_parse_if_else (t, tt);
+ line_type fct; // Flow control type the block type relates to.
+
+ switch (bt)
+ {
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ case line_type::cmd_elif:
+ case line_type::cmd_elifn:
+ case line_type::cmd_else:
+ {
+ fct = line_type::cmd_if;
+ break;
+ }
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
+ {
+ fct = bt;
+ break;
+ }
+ default: assert(false);
}
+
+ pre_parse_line (t, tt, fct);
+ assert (tt == type::newline);
}
void parser::
@@ -268,8 +507,7 @@ namespace build2
// enter: peeked first token of next line (type in tt)
// leave: newline
- // Parse lines until we see closing 'end'. Nested if-else blocks are
- // handled recursively.
+ // Parse lines until we see closing 'end'.
//
for (line_type bt (line_type::cmd_if); // Current block.
;
@@ -277,25 +515,21 @@ namespace build2
{
const location ll (get_location (peeked ()));
- if (tt == type::eos)
- fail (ll) << "expected closing 'end'";
-
// Parse one line. Note that this one line can still be multiple
- // lines in case of if-else. In this case we want to view it as
- // cmd_if, not cmd_end. Thus remember the start position of the
- // next logical line.
+ // lines in case of a flow control construct. In this case we want
+ // to view it as cmd_if, not cmd_end. Thus remember the start
+ // position of the next logical line.
//
size_t i (script_->body.size ());
- pre_parse_line (t, tt, true /* if_line */);
- assert (tt == type::newline);
+ pre_parse_block_line (t, tt, bt);
line_type lt (script_->body[i].type);
// First take care of 'end'.
//
if (lt == line_type::cmd_end)
- return;
+ break;
// Check if-else block sequencing.
//
@@ -319,6 +553,29 @@ namespace build2
}
}
+ void parser::
+ pre_parse_loop (token& t, type& tt, line_type lt)
+ {
+ // enter: peeked first token of next line (type in tt)
+ // leave: newline
+
+ assert (lt == line_type::cmd_while ||
+ lt == line_type::cmd_for_stream ||
+ lt == line_type::cmd_for_args);
+
+ // Parse lines until we see closing 'end'.
+ //
+ for (;; tt = peek (lexer_mode::first_token))
+ {
+ size_t i (script_->body.size ());
+
+ pre_parse_block_line (t, tt, lt);
+
+ if (script_->body[i].type == line_type::cmd_end)
+ break;
+ }
+ }
+
command_expr parser::
parse_command_line (token& t, type& tt)
{
@@ -329,12 +586,12 @@ namespace build2
//
assert (!pre_parse_);
- pair<command_expr, here_docs> p (
+ parse_command_expr_result pr (
parse_command_expr (t, tt, lexer::redirect_aliases));
assert (tt == type::newline);
- parse_here_documents (t, tt, p);
+ parse_here_documents (t, tt, pr);
assert (tt == type::newline);
// @@ Note that currently running programs via a runner (e.g., see
@@ -347,7 +604,7 @@ namespace build2
// passed to the environment constructor, similar to passing the
// script deadline.
//
- return move (p.first);
+ return move (pr.expr);
}
//
@@ -408,6 +665,12 @@ namespace build2
fail (l) << "'" << v << "' call via 'env' builtin";
};
+ auto diag_loc = [this] ()
+ {
+ assert (!diag_preamble_.empty ());
+ return diag_preamble_.back ().tokens[0].location ();
+ };
+
if (v == "diag")
{
verify ();
@@ -424,24 +687,41 @@ namespace build2
}
else // Custom diagnostics.
{
- assert (diag_line_);
-
fail (l) << "multiple 'diag' builtin calls" <<
- info (diag_line_->second) << "previous call is here";
+ info (diag_loc ()) << "previous call is here";
}
}
- // Instruct the parser to save the diag builtin line separately
- // from the script lines, when it is fully parsed. Note that it
- // will be executed prior to the script body execution to obtain
- // the custom diagnostics.
+ // Move the script body to the end of the diag preamble.
//
- diag_line_ = make_pair (line (), l);
- save_line_ = &diag_line_->first;
- diag_weight_ = 4;
+ // Note that we move into the preamble whatever is there and delay
+ // the check until the execution (see the depdb preamble
+ // collecting for the reasoning).
+ //
+ lines& ls (script_->body);
+ diag_preamble_.insert (diag_preamble_.end (),
+ make_move_iterator (ls.begin ()),
+ make_move_iterator (ls.end ()));
+ ls.clear ();
+
+ // Also move the body_temp_dir flag, if it is true.
+ //
+ if (script_->body_temp_dir)
+ {
+ script_->diag_preamble_temp_dir = true;
+ script_->body_temp_dir = false;
+ }
+
+ // Similar to the depdb preamble collection, instruct the parser
+ // to save the depdb builtin line separately from the script
+ // lines.
+ //
+ diag_preamble_.push_back (line ());
+ save_line_ = &diag_preamble_.back ();
- diag_name_ = nullopt;
- diag_name2_ = nullopt;
+ diag_weight_ = 4;
+ diag_name_ = nullopt;
+ diag_name2_ = nullopt;
// Note that the rest of the line contains the builtin argument to
// be printed, thus we parse it in the value lexer mode.
@@ -463,17 +743,16 @@ namespace build2
{
if (a != perform_update_id)
fail (l) << "'depdb' builtin cannot be used to "
- << ctx.meta_operation_table[a.meta_operation ()].name
- << ' ' << ctx.operation_table[a.operation ()];
+ << ctx->meta_operation_table[a.meta_operation ()].name
+ << ' ' << ctx->operation_table[a.operation ()];
}
if (!file_based_)
- fail (l) << "'depdb' builtin can only be used for file-based "
- << "targets";
+ fail (l) << "'depdb' builtin can only be used for file- or "
+ << "file group-based targets";
- if (diag_line_)
- fail (diag_line_->second)
- << "'diag' builtin call before 'depdb' call" <<
+ if (!diag_preamble_.empty ())
+ fail (diag_loc ()) << "'diag' builtin call before 'depdb' call" <<
info (l) << "'depdb' call is here";
// Note that the rest of the line contains the builtin command
@@ -487,7 +766,11 @@ namespace build2
next (t, tt);
if (tt != type::word ||
- (v != "clear" && v != "hash" && v != "string" && v != "env"))
+ (v != "clear" &&
+ v != "hash" &&
+ v != "string" &&
+ v != "env" &&
+ v != "dyndep"))
{
fail (get_location (t))
<< "expected 'depdb' builtin command instead of " << t;
@@ -527,12 +810,49 @@ namespace build2
// the referenced variable list, since it won't be used.
//
depdb_clear_ = l;
- save_line_ = nullptr;
+ save_line_ = nullptr;
script_->vars.clear ();
}
else
{
+ // Verify depdb-dyndep is last and detect the byproduct flavor.
+ //
+ if (v == "dyndep")
+ {
+ // Note that for now we do not allow multiple dyndep calls.
+ // But we may wan to relax this later (though alternating
+ // targets with prerequisites in depdb may be tricky -- maybe
+ // still only allow additional targets in the first call).
+ //
+ if (!depdb_dyndep_)
+ depdb_dyndep_ = make_pair (l, depdb_preamble_.size ());
+ else
+ fail (l) << "multiple 'depdb dyndep' calls" <<
+ info (depdb_dyndep_->first) << "previous call is here";
+
+ if (peek () == type::word)
+ {
+ const string& v (peeked ().value);
+
+ // Note: --byproduct and --dyn-target are mutually
+ // exclusive.
+ //
+ if (v == "--byproduct")
+ depdb_dyndep_byproduct_ = true;
+ else if (v == "--dyn-target")
+ depdb_dyndep_dyn_target_ = true;
+ }
+ }
+ else
+ {
+ if (depdb_dyndep_)
+ fail (l) << "'depdb " << v << "' after 'depdb dyndep'" <<
+ info (depdb_dyndep_->first) << "'depdb dyndep' call is here";
+ }
+
+ depdb_value_ = depdb_value_ || (v == "string" || v == "hash");
+
// Move the script body to the end of the depdb preamble.
//
// Note that at this (pre-parsing) stage we cannot evaluate if
@@ -557,10 +877,12 @@ namespace build2
script_->body_temp_dir = false;
}
- // Reset the impure function call info since it's valid for the
- // depdb preamble.
+ // Reset the impure function call and computed variable
+ // expansion tracking since both are valid for the depdb
+ // preamble.
//
impure_func_ = nullopt;
+ computed_var_ = nullopt;
// Instruct the parser to save the depdb builtin line separately
// from the script lines, when it is fully parsed. Note that the
@@ -612,10 +934,48 @@ namespace build2
//
// This is also the reason why we add a diag frame.
//
+ // The problem turned out to be worse than originally thought: we
+ // may call a function (for example, as part of if) with invalid
+ // arguments. And this could happen in the depdb preamble, which
+ // means we cannot fix this by moving the depdb builtin (which must
+ // come after the preamble). So let's peek at what's ahead and omit
+ // the expansion if it's anything iffy, namely, eval context or
+ // function call.
+ //
+ bool skip_diag (false);
if (pre_parse_ && diag_weight_ != 4)
{
- pre_parse_ = false; // Make parse_names() perform expansions.
- pre_parse_suspended_ = true;
+ // Based on the buildfile expansion parsing logic.
+ //
+ if (tt == type::lparen) // Evaluation context.
+ skip_diag = true;
+ else if (tt == type::dollar)
+ {
+ type ptt (peek (lexer_mode::variable));
+
+ if (!peeked ().separated)
+ {
+ if (ptt == type::lparen)
+ {
+ // While strictly speaking this can also be a function call,
+ // this is highly unusual and we will assume it's a variable
+ // expansion.
+ }
+ else if (ptt == type::word)
+ {
+ pair<char, bool> r (lexer_->peek_char ());
+
+ if (r.first == '(' && !r.second) // Function call.
+ skip_diag = true;
+ }
+ }
+ }
+
+ if (!skip_diag)
+ {
+ pre_parse_ = false; // Make parse_names() perform expansions.
+ pre_parse_suspended_ = true;
+ }
}
auto df = make_diag_frame (
@@ -643,7 +1003,7 @@ namespace build2
pre_parse_ = true;
}
- if (pre_parse_ && diag_weight_ == 4)
+ if (pre_parse_ && (diag_weight_ == 4 || skip_diag))
return nullopt;
}
@@ -665,6 +1025,19 @@ namespace build2
return nullopt;
}
+ // If this is a value of the special cmdline type, then only do
+ // certain tests below if the value is not quoted and doesn't contain
+ // any characters that would be consumed by re-lexing.
+ //
+ // This is somewhat of a hack but handling this properly would not
+ // only require unquoting but also keeping track of which special
+ // characters were quoted (and thus should be treated literally) and
+ // which were not (and thus should act as separators, etc).
+ //
+ bool qs (pr.type != nullptr &&
+ pr.type->is_a<cmdline> () &&
+ need_cmdline_relex (ns[0].value));
+
// We have to handle process_path[_ex] and executable target. The
// process_path[_ex] we may have to recognize syntactically because
// of the loss of type, for example:
@@ -698,10 +1071,14 @@ namespace build2
pp_vt = pr.type;
ns.clear ();
}
- else if (ns[0].file ())
+ else if (ns[0].file () && !qs)
{
// Find the end of the value.
//
+ // Note that here we ignore the whole cmdline issue (see above)
+ // for the further values assuming that they are unquoted and
+ // don't contain any special characters.
+ //
auto b (ns.begin ());
auto i (value_traits<process_path_ex>::find_end (ns));
@@ -768,40 +1145,43 @@ namespace build2
//
else if (!ns[0].simple ())
{
- if (const target* t = search_existing (
- ns[0], *scope_, ns[0].pair ? ns[1].dir : empty_dir_path))
+ if (!qs)
{
- if (const auto* et = t->is_a<exe> ())
+ if (const target* t = search_existing (
+ ns[0], *scope_, ns[0].pair ? ns[1].dir : empty_dir_path))
{
- if (pre_parse_)
+ if (const auto* et = t->is_a<exe> ())
{
- if (auto* n = et->lookup_metadata<string> ("name"))
+ if (pre_parse_)
{
- set_diag (*n, 3);
- return nullopt;
+ if (auto* n = et->lookup_metadata<string> ("name"))
+ {
+ set_diag (*n, 3);
+ return nullopt;
+ }
+ // Fall through.
}
- // Fall through.
- }
- else
- {
- process_path pp (et->process_path ());
+ else
+ {
+ process_path pp (et->process_path ());
- if (pp.empty ())
- fail (l) << "target " << *et << " is out of date" <<
- info << "consider specifying it as a prerequisite of "
- << environment_->target;
+ if (pp.empty ())
+ fail (l) << "target " << *et << " is out of date" <<
+ info << "consider specifying it as a prerequisite of "
+ << environment_->target;
- ns.erase (ns.begin (), ns.begin () + (ns[0].pair ? 2 : 1));
- return optional<process_path> (move (pp));
+ ns.erase (ns.begin (), ns.begin () + (ns[0].pair ? 2 : 1));
+ return optional<process_path> (move (pp));
+ }
}
- }
- if (pre_parse_)
- {
- diag_record dr (fail (l));
- dr << "unable to deduce low-verbosity script diagnostics name "
- << "from target " << *t;
- suggest_diag (dr);
+ if (pre_parse_)
+ {
+ diag_record dr (fail (l));
+ dr << "unable to deduce low-verbosity script diagnostics name "
+ << "from target " << *t;
+ suggest_diag (dr);
+ }
}
}
@@ -819,26 +1199,29 @@ namespace build2
{
// If we are here, the name is simple and is not part of a pair.
//
- string& v (ns[0].value);
+ if (!qs)
+ {
+ string& v (ns[0].value);
- // Try to interpret the name as a builtin.
- //
- const builtin_info* bi (builtins.find (v));
+ // Try to interpret the name as a builtin.
+ //
+ const builtin_info* bi (builtins.find (v));
- if (bi != nullptr)
- {
- set_diag (move (v), bi->weight);
- return nullopt;
- }
- //
- // Try to interpret the name as a pseudo-builtin.
- //
- // Note that both of them has the zero weight and cannot be picked
- // up as a script name.
- //
- else if (v == "set" || v == "exit")
- {
- return nullopt;
+ if (bi != nullptr)
+ {
+ set_diag (move (v), bi->weight);
+ return nullopt;
+ }
+ //
+ // Try to interpret the name as a pseudo-builtin.
+ //
+ // Note that both of them has the zero weight and cannot be picked
+ // up as a script name.
+ //
+ else if (v == "set" || v == "exit")
+ {
+ return nullopt;
+ }
}
diag_record dr (fail (l));
@@ -863,8 +1246,9 @@ namespace build2
// Note that we rely on "small function object" optimization here.
//
auto exec_cmd = [this] (token& t, build2::script::token_type& tt,
- size_t li,
+ const iteration_index* ii, size_t li,
bool single,
+ const function<command_function>& cf,
const location& ll)
{
// We use the 0 index to signal that this is the only command.
@@ -875,7 +1259,7 @@ namespace build2
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- runner_->run (*environment_, ce, li, ll);
+ runner_->run (*environment_, ce, ii, li, cf, ll);
};
exec_lines (s.body, exec_cmd);
@@ -884,129 +1268,189 @@ namespace build2
runner_->leave (e, s.end_loc);
}
+ // Return true if the specified expression executes the set builtin or
+ // is a for-loop.
+ //
+ static bool
+ valid_preamble_cmd (const command_expr& ce,
+ const function<command_function>& cf)
+ {
+ return find_if (
+ ce.begin (), ce.end (),
+ [&cf] (const expr_term& et)
+ {
+ const process_path& p (et.pipe.back ().program);
+ return p.initial == nullptr &&
+ (p.recall.string () == "set" ||
+ (cf != nullptr && p.recall.string () == "for"));
+ }) != ce.end ();
+ }
+
void parser::
- execute_depdb_preamble (const scope& rs, const scope& bs,
- environment& e, const script& s, runner& r,
- depdb& dd)
+ exec_depdb_preamble (action a, const scope& bs, const target& t,
+ environment& e, const script& s, runner& r,
+ lines_iterator begin, lines_iterator end,
+ depdb& dd,
+ dynamic_targets* dyn_targets,
+ bool* update,
+ optional<timestamp> mt,
+ bool* deferred_failure,
+ dyndep_byproduct* byp)
{
- tracer trace ("execute_depdb_preamble");
+ tracer trace ("exec_depdb_preamble");
// The only valid lines in the depdb preamble are the depdb builtin
// itself as well as the variable assignments, including via the set
// builtin.
- pre_exec (rs, bs, e, &s, &r);
+ pre_exec (*bs.root_scope (), bs, e, &s, &r);
// Let's "wrap up" the objects we operate upon into the single object
// to rely on "small function object" optimization.
//
struct
{
+ tracer& trace;
+
+ action a;
+ const scope& bs;
+ const target& t;
+
environment& env;
const script& scr;
+
depdb& dd;
- tracer& trace;
- } ctx {e, s, dd, trace};
-
- auto exec_cmd = [&ctx, this]
- (token& t,
- build2::script::token_type& tt,
- size_t li,
- bool /* single */,
- const location& ll)
+ dynamic_targets* dyn_targets;
+ bool* update;
+ bool* deferred_failure;
+ optional<timestamp> mt;
+ dyndep_byproduct* byp;
+
+ } data {
+ trace,
+ a, bs, t,
+ e, s,
+ dd, dyn_targets, update, deferred_failure, mt, byp};
+
+ auto exec_cmd = [this, &data] (token& t,
+ build2::script::token_type& tt,
+ const iteration_index* ii, size_t li,
+ bool /* single */,
+ const function<command_function>& cf,
+ const location& ll)
{
+ // Note that we never reset the line index to zero (as we do in
+ // execute_body()) assuming that there are some script body commands
+ // to follow.
+ //
if (tt == type::word && t.value == "depdb")
{
- names ns (exec_special (t, tt));
+ next (t, tt);
// This should have been enforced during pre-parsing.
//
- assert (!ns.empty ()); // <cmd> ... <newline>
+ assert (tt == type::word); // <cmd> ... <newline>
- const string& cmd (ns[0].value);
+ string cmd (move (t.value));
- if (cmd == "hash")
+ if (cmd == "dyndep")
{
- sha256 cs;
- for (auto i (ns.begin () + 1); i != ns.end (); ++i) // Skip <cmd>.
- to_checksum (cs, *i);
-
- if (ctx.dd.expect (cs.string ()) != nullptr)
- l4 ([&] {
- ctx.trace (ll)
- << "'depdb hash' argument change forcing update of "
- << ctx.env.target;});
+ // Note: the cast is safe since the part where the target is
+ // modified is always executed in apply().
+ //
+ exec_depdb_dyndep (t, tt,
+ li, ll,
+ data.a, data.bs, const_cast<target&> (data.t),
+ data.dd,
+ *data.dyn_targets,
+ *data.update,
+ *data.mt,
+ *data.deferred_failure,
+ data.byp);
}
- else if (cmd == "string")
+ else
{
- string s;
- try
- {
- s = convert<string> (
- names (make_move_iterator (ns.begin () + 1),
- make_move_iterator (ns.end ())));
- }
- catch (const invalid_argument& e)
- {
- fail (ll) << "invalid 'depdb string' argument: " << e;
- }
+ names ns (exec_special (t, tt, true /* skip <cmd> */));
- if (ctx.dd.expect (s) != nullptr)
- l4 ([&] {
- ctx.trace (ll)
- << "'depdb string' argument change forcing update of "
- << ctx.env.target;});
- }
- else if (cmd == "env")
- {
- sha256 cs;
- const char* pf ("invalid 'depdb env' argument: ");
+ string v;
+ const char* w (nullptr);
+ if (cmd == "hash")
+ {
+ sha256 cs;
+ for (const name& n: ns)
+ to_checksum (cs, n);
- try
+ v = cs.string ();
+ w = "argument";
+ }
+ else if (cmd == "string")
{
- // Skip <cmd>.
- //
- for (auto i (ns.begin () + 1); i != ns.end (); ++i)
+ try
+ {
+ v = convert<string> (move (ns));
+ }
+ catch (const invalid_argument& e)
{
- string vn (convert<string> (move (*i)));
- build2::script::verify_environment_var_name (vn, pf, ll);
- hash_environment (cs, vn);
+ fail (ll) << "invalid 'depdb string' argument: " << e;
}
+
+ w = "argument";
}
- catch (const invalid_argument& e)
+ else if (cmd == "env")
{
- fail (ll) << pf << e;
+ sha256 cs;
+ const char* pf ("invalid 'depdb env' argument: ");
+
+ try
+ {
+ for (name& n: ns)
+ {
+ string vn (convert<string> (move (n)));
+ build2::script::verify_environment_var_name (vn, pf, ll);
+ hash_environment (cs, vn);
+ }
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (ll) << pf << e;
+ }
+
+ v = cs.string ();
+ w = "environment";
}
+ else
+ assert (false);
+
+ // Prefix the value with the type letter. This serves two
+ // purposes:
+ //
+ // 1. It makes sure the result is never a blank line. We use
+ // blank lines as anchors to skip directly to certain entries
+ // (e.g., dynamic targets).
+ //
+ // 2. It allows us to detect the beginning of prerequisites
+ // since an absolute path will be distinguishable from these
+ // entries (in the future we may want to add an explicit
+ // blank after such custom entries to make this easier).
+ //
+ v.insert (0, 1, ' ');
+ v.insert (0, 1, cmd[0]); // `h`, `s`, or `e`
- if (ctx.dd.expect (cs.string ()) != nullptr)
+ if (data.dd.expect (v) != nullptr)
l4 ([&] {
- ctx.trace (ll)
- << "'depdb env' environment change forcing update of "
- << ctx.env.target;});
+ data.trace (ll)
+ << "'depdb " << cmd << "' " << w << " change forcing "
+ << "update of " << data.t;});
}
- else
- assert (false);
}
else
{
- // Note that we don't reset the line index to zero (as we do in
- // execute_body()) assuming that there are some script body
- // commands to follow.
- //
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- // Verify that this expression executes the set builtin.
- //
- if (find_if (ce.begin (), ce.end (),
- [] (const expr_term& et)
- {
- const process_path& p (et.pipe.back ().program);
- return p.initial == nullptr &&
- p.recall.string () == "set";
- }) == ce.end ())
+ if (!valid_preamble_cmd (ce, cf))
{
- const replay_tokens& rt (ctx.scr.depdb_preamble.back ().tokens);
+ const replay_tokens& rt (data.scr.depdb_preamble.back ().tokens);
assert (!rt.empty ());
fail (ll) << "disallowed command in depdb preamble" <<
@@ -1015,11 +1459,84 @@ namespace build2
info (rt[0].location ()) << "depdb preamble ends here";
}
- runner_->run (*environment_, ce, li, ll);
+ runner_->run (*environment_, ce, ii, li, cf, ll);
}
};
- exec_lines (s.depdb_preamble, exec_cmd);
+ exec_lines (begin, end, exec_cmd);
+ }
+
+ pair<names, location> parser::
+ execute_diag_preamble (const scope& rs, const scope& bs,
+ environment& e, const script& s, runner& r,
+ bool diag, bool enter, bool leave)
+ {
+ tracer trace ("execute_diag_preamble");
+
+ assert (!s.diag_preamble.empty ());
+
+ const line& dl (s.diag_preamble.back ()); // Diag builtin line.
+
+ pre_exec (rs, bs, e, &s, &r);
+
+ if (enter)
+ runner_->enter (e, s.start_loc);
+
+ // Perform the variable assignments.
+ //
+ auto exec_cmd = [&dl, this] (token& t,
+ build2::script::token_type& tt,
+ const iteration_index* ii, size_t li,
+ bool /* single */,
+ const function<command_function>& cf,
+ const location& ll)
+ {
+ // Note that we never reset the line index to zero (as we do in
+ // execute_body()) assuming that there are some script body commands
+ // to follow.
+ //
+ command_expr ce (
+ parse_command_line (t, static_cast<token_type&> (tt)));
+
+ if (!valid_preamble_cmd (ce, cf))
+ {
+ const replay_tokens& rt (dl.tokens);
+ assert (!rt.empty ());
+
+ fail (ll) << "disallowed command in diag preamble" <<
+ info << "only variable assignments are allowed in diag preamble"
+ << info (rt[0].location ()) << "diag preamble ends here";
+ }
+
+ runner_->run (*environment_, ce, ii, li, cf, ll);
+ };
+
+ exec_lines (s.diag_preamble.begin (), s.diag_preamble.end () - 1,
+ exec_cmd);
+
+ // Execute the diag line, if requested.
+ //
+ names ns;
+
+ if (diag)
+ {
+ // Copy the tokens and start playing.
+ //
+ replay_data (replay_tokens (dl.tokens));
+
+ token t;
+ build2::script::token_type tt;
+ next (t, tt);
+
+ ns = exec_special (t, tt, true /* skip_first */);
+
+ replay_stop ();
+ }
+
+ if (leave)
+ runner_->leave (e, s.end_loc);
+
+ return make_pair (ns, dl.tokens.front ().location ());
}
void parser::
@@ -1051,7 +1568,7 @@ namespace build2
}
void parser::
- exec_lines (const lines& lns,
+ exec_lines (lines_iterator begin, lines_iterator end,
const function<exec_cmd_function>& exec_cmd)
{
// Note that we rely on "small function object" optimization for the
@@ -1078,63 +1595,1781 @@ namespace build2
apply_value_attributes (&var, lhs, move (rhs), kind);
};
- auto exec_if = [this] (token& t, build2::script::token_type& tt,
- size_t li,
- const location& ll)
+ auto exec_cond = [this] (token& t, build2::script::token_type& tt,
+ const iteration_index* ii, size_t li,
+ const location& ll)
{
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- // Assume if-else always involves multiple commands.
+ // Assume a flow control construct always involves multiple
+ // commands.
//
- return runner_->run_if (*environment_, ce, li, ll);
+ return runner_->run_cond (*environment_, ce, ii, li, ll);
+ };
+
+ auto exec_for = [this] (const variable& var,
+ value&& val,
+ const attributes& val_attrs,
+ const location&)
+ {
+ value& lhs (environment_->assign (var));
+
+ attributes_.push_back (val_attrs);
+
+ apply_value_attributes (&var, lhs, move (val), type::assign);
};
- build2::script::parser::exec_lines (lns.begin (), lns.end (),
- exec_set, exec_cmd, exec_if,
- environment_->exec_line,
- &environment_->var_pool);
+ build2::script::parser::exec_lines (
+ begin, end,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ nullptr /* iteration_index */,
+ environment_->exec_line,
+ &environment_->var_pool);
}
names parser::
- exec_special (token& t, build2::script::token_type& tt,
- bool omit_builtin)
+ exec_special (token& t, build2::script::token_type& tt, bool skip_first)
{
- if (omit_builtin)
+ if (skip_first)
{
assert (tt != type::newline && tt != type::eos);
-
next (t, tt);
}
return tt != type::newline && tt != type::eos
- ? parse_names (t, tt, pattern_mode::expand)
+ ? parse_names (t, tt, pattern_mode::ignore)
: names ();
}
- names parser::
- execute_special (const scope& rs, const scope& bs,
- environment& e,
- const line& ln,
- bool omit_builtin)
+ void parser::
+ exec_depdb_dyndep (token& lt, build2::script::token_type& ltt,
+ size_t li, const location& ll,
+ action a, const scope& bs, target& t,
+ depdb& dd,
+ dynamic_targets& dyn_targets,
+ bool& update,
+ timestamp mt,
+ bool& deferred_failure,
+ dyndep_byproduct* byprod_result)
{
- pre_exec (rs, bs, e, nullptr /* script */, nullptr /* runner */);
+ tracer trace ("exec_depdb_dyndep");
+
+ context& ctx (t.ctx);
- // Copy the tokens and start playing.
+ depdb_dyndep_options ops;
+ bool prog (false);
+ bool byprod (false);
+ bool dyn_tgt (false);
+
+ // Prerequisite update filter (--update-*).
//
- replay_data (replay_tokens (ln.tokens));
+ struct filter
+ {
+ location loc;
+ build2::name name;
+ bool include;
+ bool used = false;
- token t;
- build2::script::token_type tt;
- next (t, tt);
+ union
+ {
+ const target_type* type; // For patterns.
+ const build2::target* target; // For non-patterns.
+ };
- names r (exec_special (t, tt, omit_builtin));
+ filter (const location& l,
+ build2::name n, bool i, const target_type& tt)
+ : loc (l), name (move (n)), include (i), type (&tt) {}
- replay_stop ();
- return r;
+ filter (const location& l,
+ build2::name n, bool i, const build2::target& t)
+ : loc (l), name (move (n)), include (i), target (&t) {}
+
+ const char*
+ option () const
+ {
+ return include ? "--update-include" : "--update-exclude";
+ }
+ };
+
+ vector<filter> filters;
+ bool filter_default (false); // Note: incorrect if filter is empty.
+
+ // Similar approach to parse_env_builtin().
+ //
+ {
+ auto& t (lt);
+ auto& tt (ltt);
+
+ next (t, tt); // Skip the 'dyndep' command.
+
+ if (tt == type::word && ((byprod = (t.value == "--byproduct")) ||
+ (dyn_tgt = (t.value == "--dyn-target"))))
+ next (t, tt);
+
+ assert (byprod == (byprod_result != nullptr));
+
+ // Note that an option name and value can belong to different name
+ // chunks. That's why we parse the arguments in the chunking mode
+ // into the list up to the `--` separator and parse this list into
+ // options afterwards. Note that the `--` separator should be
+ // omitted if there is no program (i.e., additional dependency info
+ // is being read from one of the prerequisites).
+ //
+ strings args;
+
+ for (names ns; tt != type::newline && tt != type::eos; ns.clear ())
+ {
+ location l (get_location (t));
+
+ if (tt == type::word)
+ {
+ if (t.value == "--")
+ {
+ prog = true;
+ break;
+ }
+
+ // See also the non-literal check in the options parsing below.
+ //
+ if ((t.value.compare (0, 16, "--update-include") == 0 ||
+ t.value.compare (0, 16, "--update-exclude") == 0) &&
+ (t.value[16] == '\0' || t.value[16] == '='))
+ {
+ string o;
+
+ if (t.value[16] == '\0')
+ {
+ o = t.value;
+ next (t, tt);
+ }
+ else
+ {
+ o.assign (t.value, 0, 16);
+ t.value.erase (0, 17);
+
+ if (t.value.empty ()) // Think `--update-include=$yacc`.
+ {
+ next (t, tt);
+
+ if (t.separated) // Think `--update-include= $yacc`.
+ fail (l) << "depdb dyndep: expected name after " << o;
+ }
+ }
+
+ if (!start_names (tt))
+ fail (l) << "depdb dyndep: expected name instead of " << t
+ << " after " << o;
+
+ // The chunk may actually contain multiple (or zero) names
+ // (e.g., as a result of a variable expansion or {}-list). Oh,
+ // well, I guess it can be viewed as a feature (to compensate
+ // for the literal option names).
+ //
+ parse_names (t, tt,
+ ns,
+ pattern_mode::preserve,
+ true /* chunk */,
+ ("depdb dyndep " + o + " option value").c_str (),
+ nullptr);
+
+ if (ns.empty ())
+ continue;
+
+ bool i (o[9] == 'i');
+
+ for (name& n: ns)
+ {
+ // @@ Maybe we will want to support out-qualified targets
+ // one day (but they should not be patterns).
+ //
+ if (n.pair)
+ fail (l) << "depdb dyndep: name pair in " << o << " value";
+
+ if (n.pattern)
+ {
+ if (*n.pattern != name::pattern_type::path)
+ fail (l) << "depdb dyndep: non-path pattern in " << o
+ << " value";
+
+ n.canonicalize ();
+
+ // @@ TODO (here and below).
+ //
+ // The reasonable directory semantics for a pattern seems
+ // to be:
+ //
+ // - empty - any directory (the common case)
+ // - relative - complete with base scope and fall through
+ // - absolute - only match targets in subdirectories
+ //
+ // Plus things are complicated by the src/out split (feels
+ // like we should do this in terms of scopes).
+ //
+ // See also target type/pattern-specific vars (where the
+ // directory is used to open a scope) and ad hoc pattern
+ // rules (where we currently don't allow directories).
+ //
+ if (!n.dir.empty ())
+ {
+ if (path_pattern (n.dir))
+ fail (l) << "depdb dyndep: pattern in directory in "
+ << o << " value";
+
+ fail (l) << "depdb dyndep: directory in pattern " << o
+ << " value";
+ }
+
+ // Resolve target type. If none is specified, then it's
+ // file{}.
+ //
+ const target_type* tt (n.untyped ()
+ ? &file::static_type
+ : bs.find_target_type (n.type));
+
+ if (tt == nullptr)
+ fail (l) << "depdb dyndep: unknown target type "
+ << n.type << " in " << o << " value";
+
+ filters.push_back (filter (l, move (n), i, *tt));
+ }
+ else
+ {
+ const target* t (search_existing (n, bs));
+
+ if (t == nullptr)
+ fail (l) << "depdb dyndep: unknown target " << n
+ << " in " << o << " value";
+
+ filters.push_back (filter (l, move (n), i, *t));
+ }
+ }
+
+ // If we have --update-exclude, then the default is include.
+ //
+ if (!i)
+ filter_default = true;
+
+ continue;
+ }
+ }
+
+ if (!start_names (tt))
+ fail (l) << "depdb dyndep: expected option or '--' separator "
+ << "instead of " << t;
+
+ parse_names (t, tt,
+ ns,
+ pattern_mode::ignore,
+ true /* chunk */,
+ "depdb dyndep builtin argument",
+ nullptr);
+
+ for (name& n: ns)
+ {
+ try
+ {
+ args.push_back (convert<string> (move (n)));
+ }
+ catch (const invalid_argument&)
+ {
+ diag_record dr (fail (l));
+ dr << "depdb dyndep: invalid string value ";
+ to_stream (dr.os, n, quote_mode::normal);
+ }
+ }
+ }
+
+ if (prog)
+ {
+ if (byprod)
+ fail (t) << "depdb dyndep: --byproduct cannot be used with "
+ << "program";
+
+ next (t, tt); // Skip '--'.
+
+ if (tt == type::newline || tt == type::eos)
+ fail (t) << "depdb dyndep: expected program name instead of "
+ << t;
+ }
+
+ // Parse the options.
+ //
+ // We would like to support both -I <dir> as well as -I<dir> forms
+ // for better compatibility. The latter requires manual parsing.
+ //
+ try
+ {
+ for (cli::vector_scanner scan (args); scan.more (); )
+ {
+ if (ops.parse (scan, cli::unknown_mode::stop) && !scan.more ())
+ break;
+
+ const char* a (scan.peek ());
+
+ // Handle -I<dir>
+ //
+ if (a[0] == '-' && a[1] == 'I')
+ {
+ try
+ {
+ ops.include_path ().push_back (dir_path (a + 2));
+ }
+ catch (const invalid_path&)
+ {
+ throw cli::invalid_value ("-I", a + 2);
+ }
+
+ scan.next ();
+ continue;
+ }
+
+ // Handle --byproduct and --dyn-target in the wrong place.
+ //
+ if (strcmp (a, "--byproduct") == 0)
+ {
+ fail (ll) << "depdb dyndep: "
+ << (dyn_tgt
+ ? "--byproduct specified with --dyn-target"
+ : "--byproduct must be first option");
+ }
+
+ if (strcmp (a, "--dyn-target") == 0)
+ {
+ fail (ll) << "depdb dyndep: "
+ << (byprod
+ ? "--dyn-target specified with --byproduct"
+ : "--dyn-target must be first option");
+ }
+
+ // Handle non-literal --update-*.
+ //
+ if ((strncmp (a, "--update-include", 16) == 0 ||
+ strncmp (a, "--update-exclude", 16) == 0) &&
+ (a[16] == '\0' || a[16] == '='))
+ fail (ll) << "depdb dyndep: " << a << " must be literal";
+
+ // Handle unknown option.
+ //
+ if (a[0] == '-')
+ throw cli::unknown_option (a);
+
+ // Handle unexpected argument.
+ //
+ fail (ll) << "depdb dyndep: unexpected argument '" << a << "'";
+ }
+ }
+ catch (const cli::exception& e)
+ {
+ fail (ll) << "depdb dyndep: " << e;
+ }
+ }
+
+ // --format
+ //
+ dyndep_format format (dyndep_format::make);
+ if (ops.format_specified ())
+ {
+ const string& f (ops.format ());
+
+ if (f == "lines") format = dyndep_format::lines;
+ else if (f != "make")
+ fail (ll) << "depdb dyndep: invalid --format option value '"
+ << f << "'";
+ }
+
+ // Prerequisite-specific options.
+ //
+
+ // --what
+ //
+ const char* what (ops.what_specified ()
+ ? ops.what ().c_str ()
+ : "file");
+
+ // --cwd
+ //
+ optional<dir_path> cwd;
+ if (ops.cwd_specified ())
+ {
+ if (!byprod)
+ fail (ll) << "depdb dyndep: --cwd only valid in --byproduct mode";
+
+ cwd = move (ops.cwd ());
+
+ if (cwd->relative ())
+ fail (ll) << "depdb dyndep: relative path specified with --cwd";
+ }
+
+ // --include
+ //
+ if (!ops.include_path ().empty ())
+ {
+ if (byprod)
+ fail (ll) << "depdb dyndep: -I specified with --byproduct";
+ }
+
+ // --default-type
+ //
+ // Get the default prerequisite type falling back to file{} if not
+ // specified.
+ //
+ // The reason one would want to specify it is to make sure different
+ // rules "resolve" the same dynamic prerequisites to the same targets.
+ // For example, a rule that implements custom C compilation for some
+ // translation unit would want to make sure it resolves extracted
+ // system headers to h{} targets analogous to the c module's rule.
+ //
+ const target_type* def_pt (&file::static_type);
+ if (ops.default_type_specified ())
+ {
+ const string& t (ops.default_type ());
+
+ def_pt = bs.find_target_type (t);
+ if (def_pt == nullptr)
+ fail (ll) << "depdb dyndep: unknown target type '" << t
+ << "' specified with --default-type";
+ }
+
+ // --adhoc
+ //
+ if (ops.adhoc ())
+ {
+ if (byprod)
+ fail (ll) << "depdb dyndep: --adhoc specified with --byproduct";
+ }
+
+ // Target-specific options.
+ //
+
+ // --target-what
+ //
+ const char* what_tgt ("file");
+ if (ops.target_what_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-what specified without "
+ << "--dyn-target";
+
+ what_tgt = ops.target_what ().c_str ();
+ }
+
+ // --target-cwd
+ //
+ optional<dir_path> cwd_tgt;
+ if (ops.target_cwd_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-cwd specified without "
+ << "--dyn-target";
+
+ cwd_tgt = move (ops.target_cwd ());
+
+ if (cwd_tgt->relative ())
+ fail (ll) << "depdb dyndep: relative path specified with "
+ << "--target-cwd";
+ }
+
+ // --target-default-type
+ //
+ const target_type* def_tt (&file::static_type);
+ if (ops.target_default_type_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-default-type specified "
+ << "without --dyn-target";
+
+ const string& t (ops.target_default_type ());
+
+ def_tt = bs.find_target_type (t);
+ if (def_tt == nullptr)
+ fail (ll) << "depdb dyndep: unknown target type '" << t
+ << "' specified with --target-default-type";
+ }
+
+ map<string, const target_type*> map_tt;
+ if (ops.target_extension_type_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-extension-type specified "
+ << "without --dyn-target";
+
+ for (pair<const string, string>& p: ops.target_extension_type ())
+ {
+ const target_type* tt (bs.find_target_type (p.second));
+ if (tt == nullptr)
+ fail (ll) << "depdb dyndep: unknown target type '" << p.second
+ << "' specified with --target-extension-type";
+
+ map_tt[p.first] = tt;
+ }
+ }
+
+ // --file (last since need --*cwd)
+ //
+ // Note that if --file is specified without a program, then we assume
+ // it is one of the static prerequisites.
+ //
+ optional<path> file;
+ if (ops.file_specified ())
+ {
+ file = move (ops.file ());
+
+ if (file->relative ())
+ {
+ if (!cwd && !cwd_tgt)
+ fail (ll) << "depdb dyndep: relative path specified with --file";
+
+ *file = (cwd ? *cwd : *cwd_tgt) / *file;
+ }
+ }
+ else if (!prog)
+ fail (ll) << "depdb dyndep: program or --file expected";
+
+ // Update prerequisite targets.
+ //
+ using dyndep = dyndep_rule;
+
+ auto& pts (t.prerequisite_targets[a]);
+
+ for (prerequisite_target& p: pts)
+ {
+ if (const target* pt =
+ (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data)
+ : nullptr))
+ {
+ // Automatically skip update=unmatch that we could not unmatch.
+ //
+ // Note that we don't skip update=match here (unless filtered out)
+ // in order to incorporate the result into our out-of-date'ness.
+ // So there is a nuanced interaction between update=match and
+ // --update-*.
+ //
+ if ((p.include & adhoc_buildscript_rule::include_unmatch) != 0)
+ {
+ l6 ([&]{trace << "skipping unmatched " << *pt;});
+ continue;
+ }
+
+ // Apply the --update-* filter.
+ //
+ if (!p.adhoc () && !filters.empty ())
+ {
+ // Compute and cache "effective" name that we will be pattern-
+ // matching (similar code to variable_type_map::find()).
+ //
+ auto ename = [pt, en = optional<string> ()] () mutable
+ -> const string&
+ {
+ if (!en)
+ {
+ en = string ();
+ pt->key ().effective_name (*en);
+ }
+
+ return en->empty () ? pt->name : *en;
+ };
+
+ bool i (filter_default);
+
+ for (filter& f: filters)
+ {
+ if (f.name.pattern)
+ {
+ const name& n (f.name);
+
+#if 0
+ // Match directory if any.
+ //
+ if (!n.dir.empty ())
+ {
+ // @@ TODO (here and above).
+ }
+#endif
+
+ // Match type.
+ //
+ if (!pt->is_a (*f.type))
+ continue;
+
+ // Match name.
+ //
+ if (n.value == "*" || butl::path_match (ename (), n.value))
+ {
+ i = f.include;
+ break;
+ }
+ }
+ else
+ {
+ if (pt == f.target)
+ {
+ i = f.include;
+ f.used = true;
+ break;
+ }
+ }
+ }
+
+ if (!i)
+ continue;
+ }
+
+ update = dyndep::update (
+ trace, a, *pt, update ? timestamp_unknown : mt) || update;
+
+ // While implicit, it is for a static prerequisite, so marking it
+ // feels correct.
+ //
+ p.include |= prerequisite_target::include_udm;
+
+ // Mark as updated (see execute_update_prerequisites() for
+ // details.
+ //
+ if (!p.adhoc ())
+ p.data = 1;
+ }
+ }
+
+ // Detect target filters that do not match anything.
+ //
+ for (const filter& f: filters)
+ {
+ if (!f.name.pattern && !f.used)
+ fail (f.loc) << "depdb dyndep: target " << f.name << " in "
+ << f.option () << " value does not match any "
+ << "prerequisites";
+ }
+
+ if (byprod)
+ {
+ *byprod_result = dyndep_byproduct {
+ ll,
+ format,
+ move (cwd),
+ move (*file),
+ ops.what_specified () ? move (ops.what ()) : string (what),
+ def_pt,
+ ops.drop_cycles ()};
+
+ return;
+ }
+
+ const scope& rs (*bs.root_scope ());
+
+ group* g (t.is_a<group> ()); // If not group then file.
+
+ // This code is based on the prior work in the cc module (specifically
+ // extract_headers()) where you can often find more detailed rationale
+ // for some of the steps performed.
+
+ // Build the maps lazily, only if/when needed.
+ //
+ using prefix_map = dyndep::prefix_map;
+ using srcout_map = dyndep::srcout_map;
+
+ function<dyndep::map_extension_func> map_ext (
+ [] (const scope& bs, const string& n, const string& e)
+ {
+ // NOTE: another version in adhoc_buildscript_rule::apply().
+
+ // @@ TODO: allow specifying base target types.
+ //
+ // Feels like the only reason one would want to specify base types
+ // is to tighten things up (as opposed to making some setup work)
+ // since it essentially restricts the set of registered target
+ // types that we will consider.
+ //
+ // Note also that these would be this project's target types while
+ // the file could be from another project.
+ //
+ return dyndep::map_extension (bs, n, e, nullptr);
+
+ // @@ TODO: should we return something as fallback (file{},
+ // def_pt)? Note: not the same semantics as enter_file()'s
+ // fallback. Feels like it could conceivably be different
+ // (e.g., h{} for fallback and hxx{} for some "unmappable" gen
+ // header). It looks like the "best" way currently is to define
+ // a custom target types for it (see moc{} in libQt5Core).
+ //
+ // Note also that we should only do this if bs is in our
+ // project.
+ });
+
+ // Don't we want to insert a "local"/prefixless mapping in case the
+ // user did not specify any -I's? But then will also need src-out
+ // remapping. So it will be equivalent to -I$out_base -I$src_base? But
+ // then it's not hard to add explicitly...
+ //
+ function<dyndep::prefix_map_func> pfx_map;
+
+ struct
+ {
+ tracer& trace;
+ const location& ll;
+ const depdb_dyndep_options& ops;
+ optional<prefix_map> map;
+ } pfx_data {trace, ll, ops, nullopt};
+
+ if (!ops.include_path ().empty ())
+ {
+ pfx_map = [this, &pfx_data] (action,
+ const scope& bs,
+ const target& t) -> const prefix_map&
+ {
+ if (!pfx_data.map)
+ {
+ pfx_data.map = prefix_map ();
+
+ const scope& rs (*bs.root_scope ());
+
+ for (dir_path d: pfx_data.ops.include_path ())
+ {
+ if (d.relative ())
+ fail (pfx_data.ll) << "depdb dyndep: relative include "
+ << "search path " << d;
+
+ if (!d.normalized (false /* canonical dir seperators */))
+ d.normalize ();
+
+ // If we are not inside our project root, then ignore.
+ //
+ if (d.sub (rs.out_path ()))
+ dyndep::append_prefix (
+ pfx_data.trace, *pfx_data.map, t, move (d));
+ }
+ }
+
+ return *pfx_data.map;
+ };
+ }
+
+ // Parse the remainder of the command line as a program (which can be
+ // a pipe). If file is absent, then we save the command's stdout to a
+ // pipe. Otherwise, assume the command writes to file and add it to
+ // the cleanups.
+ //
+ // Note that MSVC /showInclude sends its output to stderr (and so
+ // could do other broken tools). However, the user can always merge
+ // stderr to stdout (2>&1).
+ //
+ command_expr cmd;
+ srcout_map so_map;
+
+ // Save/restore script cleanups.
+ //
+ struct cleanups
+ {
+ build2::script::cleanups ordinary;
+ paths special;
+ };
+ optional<cleanups> script_cleanups;
+
+ auto cleanups_guard = make_guard (
+ [this, &script_cleanups] ()
+ {
+ if (script_cleanups)
+ {
+ swap (environment_->cleanups, script_cleanups->ordinary);
+ swap (environment_->special_cleanups, script_cleanups->special);
+ }
+ });
+
+ auto init_run = [this, &ctx,
+ &lt, &ltt, &ll,
+ prog, &file, &ops,
+ &cmd, &so_map, &script_cleanups] ()
+ {
+ // Populate the srcout map with the -I$out_base -I$src_base pairs.
+ //
+ {
+ dyndep::srcout_builder builder (ctx, so_map);
+
+ for (dir_path d: ops.include_path ())
+ builder.next (move (d));
+ }
+
+ if (prog)
+ {
+ script_cleanups = cleanups {};
+ swap (environment_->cleanups, script_cleanups->ordinary);
+ swap (environment_->special_cleanups, script_cleanups->special);
+
+ cmd = parse_command_line (lt, static_cast<token_type&> (ltt));
+
+ // If the output goes to stdout, then this should be a single
+ // pipeline without any logical operators (&& or ||).
+ //
+ if (!file && cmd.size () != 1)
+ fail (ll) << "depdb dyndep: command with stdout output cannot "
+ << "contain logical operators";
+
+ // Note that we may need to run this command multiple times. The
+ // two potential issues here are the re-registration of the
+ // clenups and re-use of the special files (stdin, stdout, etc;
+ // they include the line index in their names to avoid clashes
+ // between lines).
+ //
+ // Cleanups are not an issue, they will simply be replaced. And
+ // overriding the contents of the special files seems harmless and
+ // consistent with what would happen if the command redirects its
+ // output to a non-special file.
+ }
+ };
+
+ // Enter as a target, update, and add to the list of prerequisite
+ // targets a file.
+ //
+ size_t skip_count (0);
+
+ auto add = [this, &trace, what,
+ a, &bs, &t, g, &pts, pts_n = pts.size (),
+ &ops, &map_ext, def_pt, &pfx_map, &so_map,
+ &dd, &skip_count] (path fp,
+ size_t* skip,
+ timestamp mt) -> optional<bool>
+ {
+ context& ctx (t.ctx);
+
+ bool cache (skip == nullptr);
+
+ // Handle fsdir{} prerequisite separately.
+ //
+ // Note: inspired by inject_fsdir().
+ //
+ if (fp.to_directory ())
+ {
+ if (!cache)
+ {
+ // Note: already absolute since cannot be non-existent.
+ //
+ fp.normalize ();
+ }
+
+ const fsdir* dt (&search<fsdir> (t,
+ path_cast<dir_path> (fp),
+ dir_path (),
+ string (), nullptr, nullptr));
+
+ // Subset of code for file below.
+ //
+ if (!cache)
+ {
+ for (size_t i (0); i != pts_n; ++i)
+ {
+ const prerequisite_target& p (pts[i]);
+
+ if (const target* pt =
+ (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
+ nullptr))
+ {
+ if (dt == pt)
+ return false;
+ }
+ }
+
+ if (*skip != 0)
+ {
+ --(*skip);
+ return false;
+ }
+ }
+
+ match_sync (a, *dt);
+ pts.push_back (
+ prerequisite_target (
+ nullptr, true /* adhoc */, reinterpret_cast<uintptr_t> (dt)));
+
+ if (!cache)
+ dd.expect (fp.representation ());
+
+ skip_count++;
+ return false;
+ }
+
+ // We can only defer the failure if we will be running the recipe
+ // body.
+ //
+ auto fail = [this, what, &ctx] (const auto& f) -> optional<bool>
+ {
+ bool df (!ctx.match_only && !ctx.dry_run_option);
+
+ diag_record dr;
+ dr << error << what << ' ' << f << " not found and no rule to "
+ << "generate it";
+
+ if (df)
+ dr << info << "failure deferred to recipe body diagnostics";
+
+ if (verb < 4)
+ dr << info << "re-run with --verbose=4 for more information";
+
+ if (df)
+ return nullopt;
+ else
+ dr << endf;
+ };
+
+ if (const build2::file* ft = dyndep::enter_file (
+ trace, what,
+ a, bs, t,
+ fp, cache, cache /* normalized */,
+ map_ext, *def_pt, pfx_map, so_map).first)
+ {
+ // We don't need to do these tests for the cached case since such
+ // prerequisites would have been skipped (and we won't get here if
+ // the target/prerequisite set changes since we hash them).
+ //
+ if (!cache)
+ {
+ // Skip if this is one of the static prerequisites provided it
+ // was updated.
+ //
+ for (size_t i (0); i != pts_n; ++i)
+ {
+ const prerequisite_target& p (pts[i]);
+
+ if (const target* pt =
+ (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
+ nullptr))
+ {
+ if (ft == pt && (p.adhoc () || p.data == 1))
+ return false;
+ }
+ }
+
+ // Skip if this is one of the targets.
+ //
+ // Note that for dynamic targets this only works if we see the
+ // targets before prerequisites (like in the make dependency
+ // format).
+ //
+ if (ops.drop_cycles ())
+ {
+ if (g != nullptr)
+ {
+ auto& ms (g->members);
+ if (find (ms.begin (), ms.end (), ft) != ms.end ())
+ return false;
+ }
+ else
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (ft == m)
+ return false;
+ }
+ }
+ }
+
+ // Skip until where we left off.
+ //
+ // Note that we used to do this outside of this lambda and
+ // before calling enter_file() but due to the above skips we can
+ // only do it here if we want to have a consistent view of the
+ // prerequisite lists between the cached and non-cached cases.
+ //
+ if (*skip != 0)
+ {
+ --(*skip);
+ return false;
+ }
+ }
+
+ // Note: mark the injected prerequisite target as updated (see
+ // execute_update_prerequisites() for details).
+ //
+ if (optional<bool> u = dyndep::inject_file (
+ trace, what,
+ a, t,
+ *ft, mt,
+ false /* fail */,
+ ops.adhoc () /* adhoc */))
+ {
+ prerequisite_target& pt (pts.back ());
+
+ if (pt.adhoc ())
+ {
+ pt.data = reinterpret_cast<uintptr_t> (pt.target);
+ pt.target = nullptr;
+ }
+ else
+ pt.data = 1; // Already updated.
+
+ if (!cache)
+ dd.expect (ft->path ()); // @@ Use fp (or verify match)?
+
+ skip_count++;
+ return *u;
+ }
+ else if (cache)
+ {
+ dd.write (); // Invalidate this line.
+ return true;
+ }
+ else
+ return fail (*ft);
+ }
+ else
+ return fail (fp);
+ };
+
+ // If things go wrong (and they often do in this area), give the user
+ // a bit extra context.
+ //
+ auto df = make_diag_frame (
+ [this, &ll, &t] (const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info (ll) << "while extracting dynamic dependencies for "
+ << t;
+ });
+
+ // While in the make format targets come before prerequisites, in
+ // depdb we store them after since any change to prerequisites can
+ // invalidate the set of targets. So we save them first and process
+ // later.
+ //
+ // Note also that we need to return them to the caller in case we are
+ // updating.
+
+ // If nothing so far has invalidated the dependency database, then try
+ // the cached data before running the program.
+ //
+ bool cache (!update);
+ bool skip_blank (false);
+
+ for (bool restart (true), first_run (true); restart; cache = false)
+ {
+ // Clear the state in case we are restarting.
+ //
+ if (dyn_tgt)
+ dyn_targets.clear ();
+
+ restart = false;
+
+ if (cache)
+ {
+ // If any, this is always the first run.
+ //
+ assert (skip_count == 0);
+
+ // We should always end with a blank line after the list of
+ // dynamic prerequisites.
+ //
+ for (;;)
+ {
+ string* l (dd.read ());
+
+ // If the line is invalid, run the compiler.
+ //
+ if (l == nullptr)
+ {
+ restart = true;
+ break;
+ }
+
+ if (l->empty ()) // Done with prerequisites, nothing changed.
+ {
+ skip_blank = true;
+ break;
+ }
+
+ if (optional<bool> r = add (path (move (*l)), nullptr, mt))
+ {
+ restart = *r;
+
+ if (restart)
+ {
+ update = true;
+ l6 ([&]{trace << "restarting (cache)";});
+ break;
+ }
+ }
+ else
+ {
+ // Trigger rebuild and mark as expected to fail.
+ //
+ update = true;
+ deferred_failure = true;
+ return;
+ }
+ }
+
+ if (!restart) // Nothing changed.
+ {
+ if (dyn_tgt)
+ {
+ // We should always end with a blank line after the list of
+ // dynamic targets.
+ //
+ for (;;)
+ {
+ string* l (dd.read ());
+
+ // If the line is invalid, run the compiler.
+ //
+ if (l == nullptr)
+ {
+ restart = true;
+ break;
+ }
+
+ if (l->empty ()) // Done with targets.
+ break;
+
+ // Split into type and path (see below for background).
+ //
+ size_t p (l->find (' '));
+ if (p == string::npos || // Invalid format.
+ p == 0 || // Empty type.
+ p + 1 == l->size ()) // Empty path.
+ {
+ dd.write (); // Invalidate this line.
+ restart = true;
+ break;
+ }
+
+ string t (*l, 0, p);
+ l->erase (0, p + 1);
+
+ dyn_targets.push_back (
+ dynamic_target {move (t), path (move (*l))});
+ }
+ }
+
+ if (!restart) // Done, nothing changed.
+ break; // Break earliy to keep cache=true.
+ }
+ }
+ else
+ {
+ if (first_run)
+ {
+ init_run ();
+ first_run = false;
+ }
+ else
+ {
+ if (!prog)
+ fail (ll) << "generated " << what << " without program to retry";
+
+ // Drop dyndep cleanups accumulated on the previous run.
+ //
+ assert (script_cleanups); // Sanity check.
+ environment_->cleanups.clear ();
+ environment_->special_cleanups.clear ();
+ }
+
+ // Save the timestamp just before we run the command. If we depend
+ // on any file that has been updated since, then we should assume
+ // we have "seen" the old copy and restart.
+ //
+ timestamp rmt (prog ? system_clock::now () : mt);
+
+ // Run the command if any and reduce outputs to common istream.
+ //
+ // Note that the resulting stream should tolerate partial read.
+ //
+ // While reading the entire stdout into a string is not the most
+ // efficient way to do it, this does simplify things quite a bit,
+ // not least of which is not having to parse the output before
+ // knowing the program exist status.
+ //
+ istringstream iss;
+ if (prog)
+ {
+ // Note: depdb is disallowed inside flow control constructs.
+ //
+ if (!file)
+ {
+ function<command_function> cf (
+ [&iss]
+ (build2::script::environment&,
+ const strings&,
+ auto_fd in,
+ pipe_command* pipe,
+ const optional<deadline>& dl,
+ const location& ll)
+ {
+ read (move (in),
+ false /* whitespace */,
+ false /* newline */,
+ true /* exact */,
+ [&iss] (string&& s) {iss.str (move (s));},
+ pipe,
+ dl,
+ ll,
+ "depdb-dyndep");
+ });
+
+ build2::script::run (*environment_,
+ cmd,
+ nullptr /* iteration_index */, li,
+ ll,
+ cf, false /* last_cmd */);
+
+ iss.exceptions (istream::badbit);
+ }
+ else
+ {
+ build2::script::run (
+ *environment_, cmd, nullptr /* iteration_index */, li, ll);
+
+ // Note: make it a maybe-cleanup in case the command cleans it
+ // up itself.
+ //
+ environment_->clean (
+ {build2::script::cleanup_type::maybe, *file},
+ true /* implicit */);
+ }
+ }
+
+ ifdstream ifs (ifdstream::badbit);
+ if (file)
+ try
+ {
+ ifs.open (*file);
+ }
+ catch (const io_error& e)
+ {
+ fail (ll) << "unable to open file " << *file << ": " << e;
+ }
+
+ istream& is (file
+ ? static_cast<istream&> (ifs)
+ : static_cast<istream&> (iss));
+
+ const path_name& in (file
+ ? path_name (*file)
+ : path_name ("<stdin>"));
+
+ location il (in, 1);
+ size_t skip (skip_count);
+
+ // The way we parse things is format-specific.
+ //
+ // Note: similar code in
+ // adhoc_buildscript_rule::perform_update_file_dyndep_byproduct().
+ //
+ switch (format)
+ {
+ case dyndep_format::make:
+ {
+ using make_state = make_parser;
+ using make_type = make_parser::type;
+
+ make_parser make;
+
+ for (string l; !restart; ++il.line) // Reuse the buffer.
+ {
+ if (eof (getline (is, l)))
+ {
+ if (make.state != make_state::end)
+ fail (il) << "incomplete make dependency declaration";
+
+ break;
+ }
+
+ size_t pos (0);
+ do
+ {
+ pair<make_type, path> r;
+ {
+ auto df = make_diag_frame (
+ [this, &l] (const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while parsing make dependency "
+ << "declaration line '" << l << "'";
+ });
+
+ r = make.next (l, pos, il);
+ }
+
+ if (r.second.empty ())
+ continue;
+
+ // Skip targets unless requested to extract.
+ //
+ // BTW, if you are wondering why don't we extract targets
+ // by default, take GCC as an example, where things are
+ // quite messed up: by default it ignores -o and just
+ // takes the source file name and replaces the extension
+ // with a platform-appropriate object file extension. One
+ // can specify a custom target (or even multiple targets)
+ // with -MT or with -MQ (quoting). So in this case it's
+ // definitely easier for the user to ignore the targets
+ // and just specify everything in the buildfile.
+ //
+ if (r.first == make_type::target)
+ {
+ // NOTE: similar code below.
+ //
+ if (dyn_tgt)
+ {
+ path& f (r.second);
+
+ if (f.relative ())
+ {
+ if (!cwd_tgt)
+ fail (il) << "relative " << what_tgt
+ << " target path '" << f
+ << "' in make dependency declaration" <<
+ info << "consider using --target-cwd to specify "
+ << "relative path base";
+
+ f = *cwd_tgt / f;
+ }
+
+ // Note that unlike prerequisites, here we don't need
+ // normalize_external() since we expect the targets to
+ // be within this project.
+ //
+ try
+ {
+ f.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what_tgt << " target "
+ << "path '" << f.string () << "'";
+ }
+
+ // The target must be within this project.
+ //
+ if (!f.sub (rs.out_path ()))
+ {
+ fail (il) << what_tgt << " target path " << f
+ << " must be inside project output "
+ << "directory " << rs.out_path ();
+ }
+
+ // Note: type is resolved later.
+ //
+ dyn_targets.push_back (
+ dynamic_target {string (), move (f)});
+ }
+
+ continue;
+ }
+
+ // NOTE: similar code below.
+ //
+ if (optional<bool> u = add (move (r.second), &skip, rmt))
+ {
+ restart = *u;
+
+ if (restart)
+ {
+ update = true;
+ l6 ([&]{trace << "restarting";});
+ break;
+ }
+ }
+ else
+ {
+ // Trigger recompilation, mark as expected to fail, and
+ // bail out.
+ //
+ update = true;
+ deferred_failure = true;
+ break;
+ }
+ }
+ while (pos != l.size ());
+
+ if (make.state == make_state::end || deferred_failure)
+ break;
+ }
+
+ break; // case
+ }
+ case dyndep_format::lines:
+ {
+ bool tgt (dyn_tgt); // Reading targets or prerequisites.
+
+ for (string l; !restart; ++il.line) // Reuse the buffer.
+ {
+ if (eof (getline (is, l)))
+ break;
+
+ if (l.empty ())
+ {
+ if (!tgt)
+ fail (il) << "blank line in prerequisites list";
+
+ tgt = false; // Targets/prerequisites separating blank.
+ continue;
+ }
+
+ // See if this line start with space to indicate a non-
+ // existent prerequisite. This variable serves both as a
+ // flag and as a position of the beginning of the path.
+ //
+ size_t n (l.front () == ' ' ? 1 : 0);
+
+ if (tgt)
+ {
+ // NOTE: similar code above.
+ //
+ path f;
+ try
+ {
+ // Non-existent target doesn't make sense.
+ //
+ if (n)
+ throw invalid_path ("");
+
+ f = path (l);
+
+ if (f.relative ())
+ {
+ if (!cwd_tgt)
+ fail (il) << "relative " << what_tgt
+ << " target path '" << f
+ << "' in lines dependency declaration" <<
+ info << "consider using --target-cwd to specify "
+ << "relative path base";
+
+ f = *cwd_tgt / f;
+ }
+
+ // Note that unlike prerequisites, here we don't need
+ // normalize_external() since we expect the targets to
+ // be within this project.
+ //
+ f.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what_tgt << " target path '"
+ << l << "'";
+ }
+
+ // The target must be within this project.
+ //
+ if (!f.sub (rs.out_path ()))
+ {
+ fail (il) << what_tgt << " target path " << f
+ << " must be inside project output directory "
+ << rs.out_path ();
+ }
+
+ // Note: type is resolved later.
+ //
+ dyn_targets.push_back (
+ dynamic_target {string (), move (f)});
+ }
+ else
+ {
+ path f;
+ try
+ {
+ f = path (l.c_str () + n, l.size () - n);
+
+ if (f.empty () ||
+ (n && f.to_directory ())) // Non-existent fsdir{}.
+ throw invalid_path ("");
+
+ if (f.relative ())
+ {
+ if (!n)
+ {
+ if (!cwd)
+ fail (il) << "relative " << what
+ << " prerequisite path '" << f
+ << "' in lines dependency declaration" <<
+ info << "consider using --cwd to specify "
+ << "relative path base";
+
+ f = *cwd / f;
+ }
+ }
+ else if (n)
+ {
+ // @@ TODO: non-existent absolute paths.
+ //
+ throw invalid_path ("");
+ }
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what << " prerequisite path '"
+ << l << "'";
+ }
+
+ // NOTE: similar code above.
+ //
+ if (optional<bool> u = add (move (f), &skip, rmt))
+ {
+ restart = *u;
+
+ if (restart)
+ {
+ update = true;
+ l6 ([&]{trace << "restarting";});
+ }
+ }
+ else
+ {
+ // Trigger recompilation, mark as expected to fail, and
+ // bail out.
+ //
+ update = true;
+ deferred_failure = true;
+ break;
+ }
+ }
+ }
+
+ break; // case
+ }
+ }
+
+ if (file)
+ ifs.close ();
+
+ // Bail out early if we have deferred a failure.
+ //
+ if (deferred_failure)
+ return;
+
+ // Clean after each depdb-dyndep execution.
+ //
+ if (prog)
+ clean (*environment_, ll);
+ }
+ }
+
+ // Add the dynamic prerequisites terminating blank line if we are
+ // updating depdb and unless it's already there.
+ //
+ if (!cache && !skip_blank)
+ dd.expect ("");
+
+ // Handle dynamic targets.
+ //
+ if (dyn_tgt)
+ {
+ if (g != nullptr && g->members_static == 0 && dyn_targets.empty ())
+ fail (ll) << "group " << *g << " has no static or dynamic members";
+
+ // There is one more level (at least that we know of) to this rabbit
+ // hole: if the set of dynamic targets changes between clean and
+ // update and we do a `clean update` batch, then we will end up with
+ // old targets (as entered by clean from old depdb information)
+ // being present during update. So we need to clean them out.
+ //
+ // Optimize this for a first/single batch (common case) by noticing
+ // that there are only real targets to start with.
+ //
+ // Note that this doesn't affect explicit groups where we reset the
+ // members on each update (see adhoc_rule_buildscript::apply()).
+ //
+ optional<vector<const target*>> dts;
+ if (g == nullptr)
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (m->decl != target_decl::real)
+ dts = vector<const target*> ();
+ }
+ }
+
+ struct map_ext_data
+ {
+ const char* what_tgt;
+ const map<string, const target_type*>& map_tt;
+ const path* f; // Updated on each iteration.
+ } d {what_tgt, map_tt, nullptr};
+
+ function<dyndep::map_extension_func> map_ext (
+ [this, &d] (const scope& bs, const string& n, const string& e)
+ {
+ small_vector<const target_type*, 2> tts;
+
+ // Check the custom mapping first.
+ //
+ auto i (d.map_tt.find (e));
+ if (i != d.map_tt.end ())
+ tts.push_back (i->second);
+ else
+ {
+ tts = dyndep::map_extension (bs, n, e, nullptr);
+
+ // Issue custom diagnostics suggesting --target-extension-type.
+ //
+ if (tts.size () > 1)
+ {
+ diag_record dr (fail);
+
+ dr << "mapping of " << d.what_tgt << " target path " << *d.f
+ << " to target type is ambiguous";
+
+ for (const target_type* tt: tts)
+ dr << info << "can be " << tt->name << "{}";
+
+ dr << info << "use --target-extension-type to provide custom "
+ << "mapping";
+ }
+ }
+
+ return tts;
+ });
+
+ function<dyndep::group_filter_func> filter;
+ if (g != nullptr)
+ {
+ // Skip static/duplicate members in explicit group.
+ //
+ filter = [] (mtime_target& g, const build2::file& m)
+ {
+ auto& ms (g.as<group> ().members);
+ return find (ms.begin (), ms.end (), &m) == ms.end ();
+ };
+ }
+
+ // Unlike for prerequisites, for targets we store in depdb both the
+ // resolved target type and path. The target type is used in clean
+ // (see adhoc_rule_buildscript::apply()) where we cannot easily get
+ // hold of all the dyndep options to map the path to target type.
+ // So the format of the target line is:
+ //
+ // <type> <path>
+ //
+ string l; // Reuse the buffer.
+ for (dynamic_target& dt: dyn_targets)
+ {
+ const path& f (dt.path);
+
+ d.f = &f; // Current file being mapped.
+
+ // Note that this logic should be consistent with what we have in
+ // adhoc_buildscript_rule::apply() for perform_clean.
+ //
+ const build2::file* ft (nullptr);
+ if (g != nullptr)
+ {
+ pair<const build2::file&, bool> r (
+ dyndep::inject_group_member (
+ what_tgt,
+ a, bs, *g,
+ f, // Can't move since need to return dyn_targets.
+ map_ext, *def_tt, filter));
+
+ // Note: no target_decl shenanigans since reset the members on
+ // each update.
+ //
+ if (!r.second)
+ {
+ dt.type.clear (); // Static indicator.
+ continue;
+ }
+
+ ft = &r.first;
+
+ // Note: we only currently support dynamic file members so it
+ // will be file if first.
+ //
+ g->members.push_back (ft);
+ }
+ else
+ {
+ pair<const build2::file&, bool> r (
+ dyndep::inject_adhoc_group_member (
+ what_tgt,
+ a, bs, t,
+ f, // Can't move since need to return dyn_targets.
+ map_ext, *def_tt));
+
+ // Note that we have to track the dynamic target even if it was
+ // already a member (think `b update && b clean update`).
+ //
+ if (!r.second && r.first.decl == target_decl::real)
+ {
+ dt.type.clear (); // Static indicator.
+ continue;
+ }
+
+ ft = &r.first;
+
+ if (dts)
+ dts->push_back (ft);
+ }
+
+ const char* tn (ft->type ().name);
+
+ if (dt.type.empty ())
+ dt.type = tn;
+ else if (dt.type != tn)
+ {
+ // This can, for example, happen if the user changed the
+ // extension to target type mapping. Say swapped extension
+ // variable values of two target types.
+ //
+ fail << "mapping of " << what_tgt << " target path " << f
+ << " to target type has changed" <<
+ info << "previously mapped to " << dt.type << "{}" <<
+ info << "now mapped to " << tn << "{}" <<
+ info << "perform from scratch rebuild of " << t;
+ }
+
+ if (!cache)
+ {
+ l = dt.type;
+ l += ' ';
+ l += f.string ();
+ dd.expect (l);
+ }
+ }
+
+ // Add the dynamic targets terminating blank line.
+ //
+ if (!cache)
+ dd.expect ("");
+
+ // Clean out old dynamic targets (skip the primary member).
+ //
+ if (dts)
+ {
+ assert (g == nullptr);
+
+ for (target* p (&t); p->adhoc_member != nullptr; )
+ {
+ target* m (p->adhoc_member);
+
+ if (m->decl != target_decl::real)
+ {
+ // While there could be quite a few dynamic targets (think
+ // something like Doxygen), this will hopefully be optimized
+ // down to a contiguous memory region scan for an integer and
+ // so should be fast.
+ //
+ if (find (dts->begin (), dts->end (), m) == dts->end ())
+ {
+ p->adhoc_member = m->adhoc_member; // Drop m.
+ continue;
+ }
+ }
+
+ p = m;
+ }
+ }
+ }
+
+ // Reload $< and $> to make sure they contain the newly discovered
+ // prerequisites and targets.
+ //
+ if (update)
+ environment_->set_special_variables (a);
}
- // When add a special variable don't forget to update lexer::word().
+ // When add a special variable don't forget to update lexer::word() and
+ // for-loop parsing in pre_parse_line().
//
bool parser::
special_variable (const string& n) noexcept
@@ -1143,11 +3378,14 @@ namespace build2
}
lookup parser::
- lookup_variable (name&& qual, string&& name, const location& loc)
+ lookup_variable (names&& qual, string&& name, const location& loc)
{
// In the pre-parse mode collect the referenced variable names for the
// script semantics change tracking.
//
+ // Note that during pre-parse a computed (including qualified) name
+ // is signalled as an empty name.
+ //
if (pre_parse_ || pre_parse_suspended_)
{
lookup r;
@@ -1167,10 +3405,8 @@ namespace build2
{
if (pre_parse_suspended_)
{
- const variable* pvar (scope_->ctx.var_pool.find (name));
-
- if (pvar != nullptr)
- r = (*scope_)[*pvar];
+ if (const variable* var = scope_->var_pool ().find (name))
+ r = (*scope_)[*var];
}
if (!depdb_clear_)
@@ -1181,12 +3417,27 @@ namespace build2
vars.push_back (move (name));
}
}
+ else
+ {
+ // What about pre_parse_suspended_? Don't think it makes sense to
+ // diagnose this since it can be indirect (that is, via an
+ // intermediate variable).
+ //
+ if (perform_update_ && file_based_ && !computed_var_)
+ computed_var_ = loc;
+ }
return r;
}
if (!qual.empty ())
- fail (loc) << "qualified variable name";
+ {
+ // Qualified variable is computed and we expect the user to track
+ // its changes manually.
+ //
+ return build2::script::parser::lookup_variable (
+ move (qual), move (name), loc);
+ }
lookup r (environment_->lookup (name));
@@ -1197,13 +3448,13 @@ namespace build2
// diag builtin argument change (which can be affected by such a
// variable expansion) doesn't affect the script semantics and the
// depdb argument is specifically used for the script semantics change
- // tracking. We also omit this check it the depdb builtin is used in
- // the script, assuming that such variables are tracked manually, if
- // required.
+ // tracking. We also omit this check if the depdb "value" (string,
+ // hash) builtin is used in the script, assuming that such variables
+ // are tracked manually, if required.
//
if (script_ != nullptr &&
!script_->depdb_clear &&
- script_->depdb_preamble.empty ())
+ !script_->depdb_value)
{
if (r.defined () && !r.belongs (*environment_))
{
@@ -1223,7 +3474,7 @@ namespace build2
{
if (perform_update_ && file_based_ && !impure_func_)
{
- const function_overloads* f (ctx.functions.find (name));
+ const function_overloads* f (ctx->functions.find (name));
if (f != nullptr && !f->pure)
impure_func_ = make_pair (move (name), loc);
diff --git a/libbuild2/build/script/parser.hxx b/libbuild2/build/script/parser.hxx
index b737a13..ce550fc 100644
--- a/libbuild2/build/script/parser.hxx
+++ b/libbuild2/build/script/parser.hxx
@@ -8,7 +8,6 @@
#include <libbuild2/forward.hxx>
#include <libbuild2/utility.hxx>
-#include <libbuild2/depdb.hxx>
#include <libbuild2/diagnostics.hxx>
#include <libbuild2/script/parser.hxx>
@@ -29,7 +28,7 @@ namespace build2
// Pre-parse. Issue diagnostics and throw failed in case of an error.
//
public:
- parser (context& c): build2::script::parser (c, false /* relex */) {}
+ parser (context& c): build2::script::parser (c) {}
// Note that the returned script object references the passed path
// name.
@@ -66,11 +65,18 @@ namespace build2
pre_parse_script ();
void
- pre_parse_line (token&, token_type&, bool if_line = false);
+ pre_parse_line (token&, token_type&,
+ optional<line_type> flow_control_type = nullopt);
+
+ void
+ pre_parse_block_line (token&, token_type&, line_type block_type);
void
pre_parse_if_else (token&, token_type&);
+ void
+ pre_parse_loop (token&, token_type&, line_type);
+
command_expr
parse_command_line (token&, token_type&);
@@ -82,31 +88,122 @@ namespace build2
// initialize/clean up the environment before/after the script
// execution.
//
+ // Note: having both root and base scopes for testing (where we pass
+ // global scope for both).
+ //
void
execute_body (const scope& root, const scope& base,
environment&, const script&, runner&,
bool enter = true, bool leave = true);
-
+ // Execute the first or the second (dyndep) half of the depdb
+ // preamble.
+ //
// Note that it's the caller's responsibility to make sure that the
// runner's enter() function is called before the first preamble/body
// command execution and leave() -- after the last command.
//
+ // Note: target must be file or group.
+ //
void
- execute_depdb_preamble (const scope& root, const scope& base,
- environment&, const script&, runner&,
- depdb&);
-
+ execute_depdb_preamble (action a, const scope& base, const target& t,
+ environment& e, const script& s, runner& r,
+ depdb& dd)
+ {
+ auto b (s.depdb_preamble.begin ());
+ exec_depdb_preamble (
+ a, base, t,
+ e, s, r,
+ b,
+ (s.depdb_dyndep
+ ? b + *s.depdb_dyndep
+ : s.depdb_preamble.end ()),
+ dd);
+ }
+
+ struct dynamic_target
+ {
+ string type; // Target type name (absent if static member).
+ build2::path path;
+ };
+
+ using dynamic_targets = vector<dynamic_target>;
- // Parse a special builtin line into names, performing the variable
- // and pattern expansions. If omit_builtin is true, then omit the
- // builtin name from the result.
+ void
+ execute_depdb_preamble_dyndep (
+ action a, const scope& base, target& t,
+ environment& e, const script& s, runner& r,
+ depdb& dd,
+ dynamic_targets& dyn_targets,
+ bool& update, timestamp mt, bool& deferred_failure)
+ {
+ exec_depdb_preamble (
+ a, base, t,
+ e, s, r,
+ s.depdb_preamble.begin () + *s.depdb_dyndep,
+ s.depdb_preamble.end (),
+ dd, &dyn_targets, &update, mt, &deferred_failure);
+ }
+
+ // This version doesn't actually execute the depdb-dyndep builtin (but
+ // may execute some variable assignments) instead returning all the
+ // information (extracted from options) necessary to implement the
+ // depdb-dyndep --byproduct logic (which fits better into the rule
+ // implementation).
+ //
+ enum class dyndep_format {make, lines};
+
+ struct dyndep_byproduct
+ {
+ location_value location;
+ dyndep_format format;
+ optional<dir_path> cwd;
+ path file;
+ string what;
+ const target_type* default_type;
+ bool drop_cycles;
+ };
+
+ dyndep_byproduct
+ execute_depdb_preamble_dyndep_byproduct (
+ action a, const scope& base, const target& t,
+ environment& e, const script& s, runner& r,
+ depdb& dd, bool& update, timestamp mt)
+ {
+ // Dummies.
+ //
+ // This is getting a bit ugly (we also don't really need to pass
+ // depdb here). One day we will find a better way...
+ //
+ dynamic_targets dyn_targets;
+ bool deferred_failure;
+
+ dyndep_byproduct v;
+ exec_depdb_preamble (
+ a, base, t,
+ e, s, r,
+ s.depdb_preamble.begin () + *s.depdb_dyndep,
+ s.depdb_preamble.end (),
+ dd, &dyn_targets, &update, mt, &deferred_failure, &v);
+ return v;
+ }
+
+ // If the diag argument is true, then execute the preamble including
+ // the (trailing) diagnostics line and return the resulting names and
+ // its location (see exec_special() for the diagnostics line execution
+ // semantics). Otherwise, execute the preamble excluding the
+ // diagnostics line and return an empty names list and location. If
+ // requested, call the runner's enter() and leave() functions that
+ // initialize/clean up the environment before/after the preamble
+ // execution.
//
- names
- execute_special (const scope& root, const scope& base,
- environment&,
- const line&,
- bool omit_builtin = true);
+ // Note: having both root and base scopes for testing (where we pass
+ // global scope for both).
+ //
+ pair<names, location>
+ execute_diag_preamble (const scope& root, const scope& base,
+ environment&, const script&, runner&,
+ bool diag, bool enter, bool leave);
protected:
// Setup the parser for subsequent exec_*() function calls.
@@ -115,12 +212,50 @@ namespace build2
pre_exec (const scope& root, const scope& base,
environment&, const script*, runner*);
+ using lines_iterator = lines::const_iterator;
+
+ void
+ exec_lines (lines_iterator, lines_iterator,
+ const function<exec_cmd_function>&);
+
void
- exec_lines (const lines&, const function<exec_cmd_function>&);
+ exec_lines (const lines& l, const function<exec_cmd_function>& c)
+ {
+ exec_lines (l.begin (), l.end (), c);
+ }
+ // Parse a special builtin line into names, performing the variable
+ // and pattern expansions. Optionally, skip the first token (builtin
+ // name, etc).
+ //
names
- exec_special (token& t, build2::script::token_type& tt,
- bool omit_builtin = true);
+ exec_special (token&, build2::script::token_type&, bool skip_first);
+
+ // Note: target must be file or group.
+ //
+ void
+ exec_depdb_preamble (action, const scope& base, const target&,
+ environment&, const script&, runner&,
+ lines_iterator begin, lines_iterator end,
+ depdb&,
+ dynamic_targets* dyn_targets = nullptr,
+ bool* update = nullptr,
+ optional<timestamp> mt = nullopt,
+ bool* deferred_failure = nullptr,
+ dyndep_byproduct* = nullptr);
+
+ // Note: target must be file or group.
+ //
+ void
+ exec_depdb_dyndep (token&, build2::script::token_type&,
+ size_t line_index, const location&,
+ action, const scope& base, target&,
+ depdb&,
+ dynamic_targets& dyn_targets,
+ bool& update,
+ timestamp,
+ bool& deferred_failure,
+ dyndep_byproduct*);
// Helpers.
//
@@ -132,7 +267,7 @@ namespace build2
//
protected:
virtual lookup
- lookup_variable (name&&, string&&, const location&) override;
+ lookup_variable (names&&, string&&, const location&) override;
virtual void
lookup_function (string&&, const location&) override;
@@ -157,9 +292,9 @@ namespace build2
script* script_;
const small_vector<action, 1>* actions_; // Non-NULL during pre-parse.
- // True if this script is for file-based targets and performing update
- // is one of the actions, respectively. Only set for the pre-parse
- // mode.
+ // True if this script is for file- or file group-based targets and
+ // performing update is one of the actions, respectively. Only set for
+ // the pre-parse mode.
//
bool file_based_;
bool perform_update_;
@@ -189,18 +324,24 @@ namespace build2
//
// If the diag builtin is encountered, then its whole line is saved
// (including the leading 'diag' word) for later execution and the
- // diagnostics weight is set to 4.
+ // diagnostics weight is set to 4. The preceding lines, which can only
+ // contain variable assignments (including via the set builtin,
+ // potentially inside the flow control constructs), are also saved.
//
// Any attempt to manually set the custom diagnostics twice (the diag
// builtin after the script name or after another diag builtin) is
// reported as ambiguity.
//
- // At the end of pre-parsing either diag_name_ or diag_line_ (but not
- // both) are present.
+ // If no script name is deduced by the end of pre-parsing and the
+ // script is used for a single operation, then use this operation's
+ // name as a script name.
+ //
+ // At the end of pre-parsing either diag_name_ is present or
+ // diag_preamble_ is not empty (but not both).
//
optional<pair<string, location>> diag_name_;
optional<pair<string, location>> diag_name2_; // Ambiguous script name.
- optional<pair<line, location>> diag_line_;
+ lines diag_preamble_;
uint8_t diag_weight_ = 0;
// Custom dependency change tracking.
@@ -219,8 +360,24 @@ namespace build2
// depdb env <var-names> - Track the environment variables change as a
// hash.
//
- optional<location> depdb_clear_; // 'depdb clear' location if any.
- lines depdb_preamble_; // Note: excludes 'depdb clear'.
+ // depdb dyndep ... - Extract dynamic dependency information. Can
+ // only be the last depdb builtin call in the
+ // preamble. Note that such dependencies don't
+ // end up in $<. We also don't cause clean of
+ // such dependencies (since there may be no .d
+ // file) -- they should also be listed as
+ // static prerequisites of some other target
+ // (e.g., lib{} for headers) or a custom clean
+ // recipe should be provided.
+ //
+ //
+ optional<location> depdb_clear_; // depdb-clear location.
+ bool depdb_value_ = false; // depdb-{string,hash}
+ optional<pair<location, size_t>>
+ depdb_dyndep_; // depdb-dyndep location/position.
+ bool depdb_dyndep_byproduct_ = false; // --byproduct
+ bool depdb_dyndep_dyn_target_ = false; // --dyn-target
+ lines depdb_preamble_; // Note: excluding depdb-clear.
// If present, the first impure function called in the body of the
// script that performs update of a file-based target.
@@ -233,6 +390,12 @@ namespace build2
//
optional<pair<string, location>> impure_func_;
+ // Similar to the impure function above but for a computed (e.g.,
+ // target-qualified) variable expansion. In this case we don't have a
+ // name (it's computed).
+ //
+ optional<location> computed_var_;
+
// True during pre-parsing when the pre-parse mode is temporarily
// suspended to perform expansion.
//
@@ -243,19 +406,19 @@ namespace build2
// Before the script line gets parsed, it is set to a temporary value
// that will by default be appended to the script. However,
// parse_program() can point it to a different location where the line
- // should be saved instead (e.g., diag_line_, etc) or set it to NULL
- // if the line is handled in an ad-hoc way and should be dropped
- // (e.g., depdb_clear_, etc).
+ // should be saved instead (e.g., diag_preamble_ back, etc) or set it
+ // to NULL if the line is handled in an ad-hoc way and should be
+ // dropped (e.g., depdb_clear_, etc).
//
line* save_line_;
- // The if-else nesting level (and in the future for other flow
- // control constructs).
+ // The flow control constructs nesting level.
//
- // Maintained during pre-parsing and is incremented when the cmd_if or
- // cmd_ifn lines are encountered, which in particular means that it is
- // already incremented by the time the if-condition expression is
- // pre-parsed. Decremented when the cmd_end line is encountered.
+ // Maintained during pre-parsing and is incremented when flow control
+ // construct condition lines are encountered, which in particular
+ // means that it is already incremented by the time the condition
+ // expression is pre-parsed. Decremented when the cmd_end line is
+ // encountered.
//
size_t level_ = 0;
diff --git a/libbuild2/build/script/parser.test.cxx b/libbuild2/build/script/parser.test.cxx
index 4089efa..97eac22 100644
--- a/libbuild2/build/script/parser.test.cxx
+++ b/libbuild2/build/script/parser.test.cxx
@@ -29,35 +29,58 @@ namespace build2
class print_runner: public runner
{
public:
- print_runner (bool line): line_ (line) {}
+ print_runner (bool line, bool iterations):
+ line_ (line),
+ iterations_ (iterations) {}
virtual void
enter (environment&, const location&) override {}
virtual void
- run (environment&,
+ run (environment& env,
const command_expr& e,
- size_t i,
- const location&) override
+ const iteration_index* ii, size_t i,
+ const function<command_function>& cf,
+ const location& ll) override
{
+ // If the functions is specified, then just execute it with an empty
+ // stdin so it can perform the housekeeping (stop replaying tokens,
+ // increment line index, etc).
+ //
+ if (cf != nullptr)
+ {
+ assert (e.size () == 1 && !e[0].pipe.empty ());
+
+ const command& c (e[0].pipe.back ());
+
+ // Must be enforced by the caller.
+ //
+ assert (!c.out && !c.err && !c.exit);
+
+ cf (env, c.arguments,
+ fdopen_null (), nullptr /* pipe */,
+ nullopt /* deadline */,
+ ll);
+ }
+
cout << e;
- if (line_)
- cout << " # " << i;
+ if (line_ || iterations_)
+ print_line_info (ii, i);
cout << endl;
}
virtual bool
- run_if (environment&,
- const command_expr& e,
- size_t i,
- const location&) override
+ run_cond (environment&,
+ const command_expr& e,
+ const iteration_index* ii, size_t i,
+ const location&) override
{
cout << "? " << e;
- if (line_)
- cout << " # " << i;
+ if (line_ || iterations_)
+ print_line_info (ii, i);
cout << endl;
@@ -68,16 +91,36 @@ namespace build2
leave (environment&, const location&) override {}
private:
+ void
+ print_line_info (const iteration_index* ii, size_t i) const
+ {
+ cout << " #";
+
+ if (line_)
+ cout << ' ' << i;
+
+ if (iterations_ && ii != nullptr)
+ {
+ string s;
+ for (const iteration_index* i (ii); i != nullptr; i = i->prev)
+ s.insert (0, " i" + to_string (i->index));
+
+ cout << s;
+ }
+ }
+
+ private:
bool line_;
+ bool iterations_;
};
// Usages:
//
- // argv[0] [-l]
+ // argv[0] [-l] [-r]
// argv[0] -b [-t]
// argv[0] -d [-t]
+ // argv[0] -g [-t] [<diag-name>]
// argv[0] -q
- // argv[0] -g [<diag-name>]
//
// In the first form read the script from stdin and trace the script
// body execution to stdout using the custom print runner.
@@ -88,26 +131,33 @@ namespace build2
// In the third form read the script from stdin, parse it and dump the
// depdb preamble lines to stdout.
//
- // In the forth form read the script from stdin, parse it and print
- // line tokens quoting information to stdout.
- //
- // In the fifth form read the script from stdin, parse it and print the
+ // In the forth form read the script from stdin, parse it and print the
// low-verbosity script diagnostics name or custom low-verbosity
// diagnostics to stdout. If the script doesn't deduce any of them, then
// print the diagnostics and exit with non-zero code.
//
+ // In the fifth form read the script from stdin, parse it and print
+ // line tokens quoting information to stdout.
+ //
// -l
// Print the script line number for each executed expression.
//
+ // -r
+ // Print the loop iteration numbers for each executed expression.
+ //
// -b
// Dump the parsed script body to stdout.
//
// -d
// Dump the parsed script depdb preamble to stdout.
//
+ // -g
+ // Dump the low-verbosity script diagnostics name or custom
+ // low-verbosity diagnostics to stdout.
+ //
// -t
- // Print true if the body (-b) or depdb preamble (-d) references the
- // temporary directory and false otherwise.
+ // Print true if the body (-b), depdb preamble (-d), or diag preamble
+ // (-g) references the temporary directory and false otherwise.
//
// -q
// Print the parsed script tokens quoting information to sdout. If a
@@ -117,10 +167,6 @@ namespace build2
// <quoting> := 'S' | 'D' | 'M'
// <completeness> := 'C' | 'P'
//
- // -g
- // Dump the low-verbosity script diagnostics name or custom
- // low-verbosity diagnostics to stdout.
- //
int
main (int argc, char* argv[])
{
@@ -131,11 +177,12 @@ namespace build2
run,
body,
depdb_preamble,
- quoting,
- diag
+ diag,
+ quoting
} m (mode::run);
bool print_line (false);
+ bool print_iterations (false);
optional<string> diag_name;
bool temp_dir (false);
@@ -145,19 +192,23 @@ namespace build2
if (a == "-l")
print_line = true;
+ else if (a == "-r")
+ print_iterations = true;
else if (a == "-b")
m = mode::body;
else if (a == "-d")
m = mode::depdb_preamble;
+ else if (a == "-g")
+ m = mode::diag;
else if (a == "-t")
{
- assert (m == mode::body || m == mode::depdb_preamble);
+ assert (m == mode::body ||
+ m == mode::depdb_preamble ||
+ m == mode::diag);
temp_dir = true;
}
else if (a == "-q")
m = mode::quoting;
- else if (a == "-g")
- m = mode::diag;
else
{
if (m == mode::diag)
@@ -170,19 +221,20 @@ namespace build2
}
}
- assert (!print_line || m == mode::run);
- assert (!diag_name || m == mode::diag);
+ assert (!print_line || m == mode::run || m == mode::diag);
+ assert (!print_iterations || m == mode::run || m == mode::diag);
+ assert (!diag_name || m == mode::diag);
// Fake build system driver, default verbosity.
//
init_diag (1);
- init (nullptr, argv[0]);
+ init (nullptr, argv[0], true);
// Serial execution.
//
scheduler sched (1);
global_mutexes mutexes (1);
- file_cache fcache;
+ file_cache fcache (true);
context ctx (sched, mutexes, fcache);
try
@@ -203,6 +255,8 @@ namespace build2
tt.path (path ("driver"));
+ const scope& bs (tt.base_scope ());
+
small_vector<action, 1> acts {perform_update_id};
// Parse and run.
@@ -210,7 +264,7 @@ namespace build2
parser p (ctx);
path_name nm ("buildfile");
- script s (p.pre_parse (tt.base_scope (), tt.type (), acts,
+ script s (p.pre_parse (bs, tt.type (), acts,
cin, nm,
11 /* line */,
(m != mode::diag
@@ -222,9 +276,29 @@ namespace build2
{
case mode::run:
{
- environment e (perform_update_id, tt, s.body_temp_dir);
- print_runner r (print_line);
- p.execute_body (ctx.global_scope, ctx.global_scope, e, s, r);
+ environment e (perform_update_id, tt, bs, false /* temp_dir */);
+ print_runner r (print_line, print_iterations);
+
+ bool exec_diag (!s.diag_preamble.empty ());
+
+ if (exec_diag)
+ {
+ if (s.diag_preamble_temp_dir)
+ e.set_temp_dir_variable ();
+
+ p.execute_diag_preamble (ctx.global_scope, ctx.global_scope,
+ e, s, r,
+ false /* diag */,
+ true /* enter */,
+ false /* leave */);
+ }
+
+ if (s.body_temp_dir && !s.diag_preamble_temp_dir)
+ e.set_temp_dir_variable ();
+
+ p.execute_body (ctx.global_scope, ctx.global_scope,
+ e, s, r,
+ !exec_diag /* enter */);
break;
}
case mode::diag:
@@ -235,14 +309,26 @@ namespace build2
}
else
{
- assert (s.diag_line);
+ if (!temp_dir)
+ {
+ environment e (perform_update_id,
+ tt,
+ bs,
+ s.diag_preamble_temp_dir);
- environment e (perform_update_id, tt, false /* temp_dir */);
+ print_runner r (print_line, print_iterations);
- cout << "diag: " << p.execute_special (ctx.global_scope,
+ names diag (p.execute_diag_preamble (ctx.global_scope,
ctx.global_scope,
- e,
- *s.diag_line) << endl;
+ e, s, r,
+ true /* diag */,
+ true /* enter */,
+ true /* leave */).first);
+
+ cout << "diag: " << diag << endl;
+ }
+ else
+ cout << (s.diag_preamble_temp_dir ? "true" : "false") << endl;
}
break;
diff --git a/libbuild2/build/script/runner.cxx b/libbuild2/build/script/runner.cxx
index 51139d4..e08ebbf 100644
--- a/libbuild2/build/script/runner.cxx
+++ b/libbuild2/build/script/runner.cxx
@@ -28,12 +28,29 @@ namespace build2
//
for (auto i (env.cleanups.begin ()); i != env.cleanups.end (); )
{
- const target* m (&env.target);
- for (; m != nullptr; m = m->adhoc_member)
+ const target* m (nullptr);
+ if (const group* g = env.target.is_a<group> ())
{
- if (const path_target* pm = m->is_a<path_target> ())
- if (i->path == pm->path ())
- break;
+ for (const target* gm: g->members)
+ {
+ if (const path_target* pm = gm->is_a<path_target> ())
+ {
+ if (i->path == pm->path ())
+ {
+ m = gm;
+ break;
+ }
+ }
+ }
+ }
+ else
+ {
+ for (m = &env.target; m != nullptr; m = m->adhoc_member)
+ {
+ if (const path_target* pm = m->is_a<path_target> ())
+ if (i->path == pm->path ())
+ break;
+ }
}
if (m != nullptr)
@@ -96,39 +113,43 @@ namespace build2
void default_runner::
run (environment& env,
const command_expr& expr,
- size_t li,
+ const iteration_index* ii, size_t li,
+ const function<command_function>& cf,
const location& ll)
{
if (verb >= 3)
text << ": " << expr;
// Run the expression if we are not in the dry-run mode or if it
- // executes the set or exit builtin and just print the expression
- // otherwise at verbosity level 2 and up.
+ // executes the set or exit builtin or it is a for-loop. Otherwise,
+ // just print the expression otherwise at verbosity level 2 and up.
//
if (!env.context.dry_run ||
find_if (expr.begin (), expr.end (),
- [] (const expr_term& et)
+ [&cf] (const expr_term& et)
{
const process_path& p (et.pipe.back ().program);
return p.initial == nullptr &&
(p.recall.string () == "set" ||
- p.recall.string () == "exit");
+ p.recall.string () == "exit" ||
+ (cf != nullptr &&
+ p.recall.string () == "for"));
}) != expr.end ())
- build2::script::run (env, expr, li, ll);
+ build2::script::run (env, expr, ii, li, ll, cf);
else if (verb >= 2)
text << expr;
}
bool default_runner::
- run_if (environment& env,
- const command_expr& expr,
- size_t li, const location& ll)
+ run_cond (environment& env,
+ const command_expr& expr,
+ const iteration_index* ii, size_t li,
+ const location& ll)
{
if (verb >= 3)
text << ": ?" << expr;
- return build2::script::run_if (env, expr, li, ll);
+ return build2::script::run_cond (env, expr, ii, li, ll);
}
}
}
diff --git a/libbuild2/build/script/runner.hxx b/libbuild2/build/script/runner.hxx
index 431c446..ec8a948 100644
--- a/libbuild2/build/script/runner.hxx
+++ b/libbuild2/build/script/runner.hxx
@@ -32,17 +32,21 @@ namespace build2
// Location is the start position of this command line in the script.
// It can be used in diagnostics.
//
+ // Optionally, execute the specified function instead of the last
+ // pipe command.
+ //
virtual void
run (environment&,
const command_expr&,
- size_t index,
+ const iteration_index*, size_t index,
+ const function<command_function>&,
const location&) = 0;
virtual bool
- run_if (environment&,
- const command_expr&,
- size_t,
- const location&) = 0;
+ run_cond (environment&,
+ const command_expr&,
+ const iteration_index*, size_t,
+ const location&) = 0;
// Location is the script end location (for diagnostics, etc).
//
@@ -52,9 +56,9 @@ namespace build2
// Run command expressions.
//
- // In dry-run mode don't run the expressions unless they are if-
- // conditions or execute the set or exit builtins, but prints them at
- // verbosity level 2 and up.
+ // In dry-run mode don't run the expressions unless they are flow
+ // control construct conditions or execute the set or exit builtins, but
+ // print them at verbosity level 2 and up.
//
class default_runner: public runner
{
@@ -65,14 +69,15 @@ namespace build2
virtual void
run (environment&,
const command_expr&,
- size_t,
+ const iteration_index*, size_t,
+ const function<command_function>&,
const location&) override;
virtual bool
- run_if (environment&,
- const command_expr&,
- size_t,
- const location&) override;
+ run_cond (environment&,
+ const command_expr&,
+ const iteration_index*, size_t,
+ const location&) override;
virtual void
leave (environment&, const location&) override;
diff --git a/libbuild2/build/script/script.cxx b/libbuild2/build/script/script.cxx
index f4f8da8..0f31e7f 100644
--- a/libbuild2/build/script/script.cxx
+++ b/libbuild2/build/script/script.cxx
@@ -28,54 +28,80 @@ namespace build2
environment::
environment (action a,
const target_type& t,
+ const scope_type& s,
bool temp,
const optional<timestamp>& dl)
: build2::script::environment (
t.ctx,
- cast<target_triplet> (t.ctx.global_scope["build.host"]),
+ *t.ctx.build_host,
dir_name_view (&work, &wd_name),
temp_dir.path, false /* temp_dir_keep */,
redirect (redirect_type::none),
redirect (redirect_type::merge, 2),
redirect (redirect_type::pass)),
target (t),
- vars (context, false /* global */),
+ scope (s),
+ vars (context, false /* shared */), // Note: managed.
+ var_ts (var_pool.insert (">")),
+ var_ps (var_pool.insert ("<")),
script_deadline (to_deadline (dl, false /* success */))
{
- // Set special variables.
- //
+ set_special_variables (a);
+
+ if (temp)
+ set_temp_dir_variable ();
+ }
+
+ void environment::
+ set_special_variables (action a)
+ {
{
// $>
//
+ // What should it contain for an explicit group? While it may seem
+ // that just the members should be enough (and analogous to the ad
+ // hoc case), this won't let us get the group name for diagnostics.
+ // So the group name followed by all the members seems like the
+ // logical choice.
+ //
names ns;
- for (const target_type* m (&t); m != nullptr; m = m->adhoc_member)
- m->as_name (ns);
- assign (var_pool.insert (">")) = move (ns);
+ if (const group* g = target.is_a<group> ())
+ {
+ g->as_name (ns);
+ for (const target_type* m: g->members)
+ m->as_name (ns);
+ }
+ else
+ {
+ for (const target_type* m (&target);
+ m != nullptr;
+ m = m->adhoc_member)
+ m->as_name (ns);
+ }
+
+ assign (var_ts) = move (ns);
}
{
// $<
//
- // Note that at this stage (after execute_prerequisites()) ad hoc
- // prerequisites are no longer in prerequisite_targets which means
- // they won't end up in $< either. While at first thought ad hoc
- // prerequisites in ad hoc recipes don't seem to make much sense,
- // they could be handy to exclude certain preresquisites from $<
- // while still treating them as such.
+ // Note that ad hoc prerequisites don't end up in $<. While at first
+ // thought ad hoc prerequisites in ad hoc recipes don't seem to make
+ // much sense, they could be handy to exclude certain prerequisites
+ // from $< while still treating them as such, especially in rule.
//
names ns;
- for (const target_type* pt: t.prerequisite_targets[a])
+ for (const prerequisite_target& pt: target.prerequisite_targets[a])
{
- if (pt != nullptr)
- pt->as_name (ns);
+ // See adhoc_buildscript_rule::execute_update_prerequisites().
+ //
+ if (pt.target != nullptr && !pt.adhoc ())
+ pt.target->as_name (ns);
}
- assign (var_pool.insert ("<")) = move (ns);
+ assign (var_ps) = move (ns);
}
-
- if (temp)
- set_temp_dir_variable ();
}
void environment::
@@ -146,7 +172,7 @@ namespace build2
}
void environment::
- set_variable (string&& nm,
+ set_variable (string nm,
names&& val,
const string& attrs,
const location& ll)
@@ -225,7 +251,7 @@ namespace build2
// in parallel). Plus, if there is no such variable, then we cannot
// possibly find any value.
//
- const variable* pvar (context.var_pool.find (n));
+ const variable* pvar (scope.var_pool ().find (n));
if (pvar == nullptr)
return lookup_type ();
diff --git a/libbuild2/build/script/script.hxx b/libbuild2/build/script/script.hxx
index e11cb45..08f1bf4 100644
--- a/libbuild2/build/script/script.hxx
+++ b/libbuild2/build/script/script.hxx
@@ -20,14 +20,22 @@ namespace build2
namespace script
{
using build2::script::line;
- using build2::script::lines;
using build2::script::line_type;
+ using build2::script::lines;
using build2::script::redirect;
using build2::script::redirect_type;
+ using build2::script::command;
using build2::script::expr_term;
using build2::script::command_expr;
+ using build2::script::iteration_index;
using build2::script::deadline;
using build2::script::timeout;
+ using build2::script::pipe_command;
+ using build2::script::command_function;
+
+ // Forward declarations.
+ //
+ class default_runner;
// Notes:
//
@@ -40,13 +48,11 @@ namespace build2
class script
{
public:
- using lines_type = build::script::lines;
-
// Note that the variables are not pre-entered into a pool during the
// parsing phase, so the line variable pointers are NULL.
//
- lines_type body;
- bool body_temp_dir = false; // True if the body references $~.
+ lines body;
+ bool body_temp_dir = false; // True if the body references $~.
// Referenced ordinary (non-special) variables.
//
@@ -61,18 +67,24 @@ namespace build2
small_vector<string, 2> vars; // 2 for command and options.
// Command name for low-verbosity diagnostics and custom low-verbosity
- // diagnostics line. Note: cannot be both (see the script parser for
+ // diagnostics line, potentially preceded with the variable
+ // assignments. Note: cannot be both (see the script parser for
// details).
//
optional<string> diag_name;
- optional<line> diag_line;
+ lines diag_preamble;
+ bool diag_preamble_temp_dir = false; // True if refs $~.
// The script's custom dependency change tracking lines (see the
// script parser for details).
//
- bool depdb_clear;
- lines_type depdb_preamble;
- bool depdb_preamble_temp_dir = false; // True if references $~.
+ bool depdb_clear;
+ bool depdb_value; // String or hash.
+ optional<size_t> depdb_dyndep; // Pos of first dyndep.
+ bool depdb_dyndep_byproduct = false; // dyndep --byproduct
+ bool depdb_dyndep_dyn_target = false;// dyndep --dyn-target
+ lines depdb_preamble; // Note include vars.
+ bool depdb_preamble_temp_dir = false;// True if refs $~.
location start_loc;
location end_loc;
@@ -81,24 +93,38 @@ namespace build2
class environment: public build2::script::environment
{
public:
+ using scope_type = build2::scope;
using target_type = build2::target;
environment (action,
const target_type&,
+ const scope_type&,
bool temp_dir,
const optional<timestamp>& deadline = nullopt);
+ // (Re)set special $< and $> variables.
+ //
+ void
+ set_special_variables (action);
+
+ // Create the temporary directory (if it doesn't exist yet) and set
+ // the $~ special variable to its path.
+ //
+ void
+ set_temp_dir_variable ();
+
environment (environment&&) = delete;
environment (const environment&) = delete;
environment& operator= (environment&&) = delete;
environment& operator= (const environment&) = delete;
public:
- // Primary target this environment is for.
+ // Primary target this environment is for and its base scope;
//
const target_type& target;
+ const scope_type& scope;
- // Script-local variable pool and map.
+ // Script-private variable pool and map.
//
// Note that it may be tempting to reuse the rule-specific variables
// for this but they should not be modified during execution (i.e.,
@@ -111,6 +137,9 @@ namespace build2
variable_pool var_pool;
variable_map vars;
+ const variable& var_ts; // $>
+ const variable& var_ps; // $<
+
// Temporary directory for the script run.
//
// Currently this directory is removed regardless of the script
@@ -140,14 +169,8 @@ namespace build2
//
size_t exec_line = 1;
- // Create the temporary directory (if it doesn't exist yet) and set
- // the $~ special variable to its path.
- //
- void
- set_temp_dir_variable ();
-
virtual void
- set_variable (string&& name,
+ set_variable (string name,
names&&,
const string& attrs,
const location&) override;
diff --git a/libbuild2/buildfile b/libbuild2/buildfile
index 17003b5..6d7c597 100644
--- a/libbuild2/buildfile
+++ b/libbuild2/buildfile
@@ -4,7 +4,7 @@
# NOTE: remember to update bundled_modules in libbuild2/module.cxx if adding a
# new module.
#
-bundled_modules = bash/ bin/ c/ cc/ cxx/ in/ version/
+bundled_modules = bash/ bin/ c/ cc/ cli/ cxx/ in/ version/
./: lib{build2} $bundled_modules
@@ -25,14 +25,22 @@ include $bundled_modules
#
intf_libs = $libbutl
-lib{build2}: libul{build2}: \
- {hxx ixx txx cxx}{* -utility-*installed -config -version -*.test...} \
+lib{build2}: libul{build2}: \
+ {hxx ixx txx cxx}{* -utility-*installed \
+ -common-options \
+ -b-options \
+ -config \
+ -version \
+ -*.test...} \
+ {hxx ixx cxx}{common-options} \
+ {hxx ixx cxx}{b-options} \
{hxx}{config version}
libul{build2}: script/{hxx ixx txx cxx}{** -*-options -**.test...} \
script/{hxx ixx cxx}{builtin-options}
-libul{build2}: build/{hxx ixx txx cxx}{** -**.test...}
+libul{build2}: build/script/{hxx ixx txx cxx}{** -*-options -**.test...} \
+ build/script/{hxx ixx cxx}{builtin-options}
# Note that this won't work in libul{} since it's not installed.
#
@@ -51,39 +59,64 @@ lib{build2}: cxx{utility-uninstalled}: for_install = false
libul{build2}: config/{hxx ixx txx cxx}{** -host-config -**.test...} \
config/cxx{host-config}
+# Derive ~host and ~build2 configurations from current configuration.
+#
# This will of course blow up spectacularly if we are cross-compiling. But
# let's wait and enjoy the fireworks (and get a sense of why someone would
# want to cross-compile a build system).
#
-config/cxx{host-config}: config/in{host-config}
+# For the ~host configuration we only want c/cxx/cc and bin that they load.
+# For ~build2 we want to keep everything except dist.
+#
+# We also remove comment lines which could be confused with preprocessor
+# directives by some lesser compilers and blank lines between groups of
+# options which could cause spurious rebuilds when we filter out entire
+# groups.
+#
+# For ~build2 also filter out config.install.chroot -- we definitely don't
+# want it carried through. Also filter out variables that control tests
+# execution.
+#
+# Finally, for both ~host and ~build2 we keep config.config.environment
+# but strip config.config.hermetic* (we shouldn't be forcing hermiticity
+# on the users of ~host/~build2; they can decide for themselves if they
+# want it).
+#
+build2_config_lines = [strings]
+host_config_lines = [strings]
+
+for l: $regex.replace_lines( \
+ $config.save(), \
+ '^( *(#|(config\.(test[. ]|dist\.|install\.chroot|config\.hermetic))).*|)$', \
+ [null])
{
- # For the ~host configuration we only want c/cxx/cc and bin that they load.
- # For ~build2 we want to keep everything except dist.
- #
- # We also remove comment lines which could be confused with preprocessor
- # directives by some lesser compilers.
- #
- # For ~build2 also filter out config.install.chroot -- we definitely don't
- # want it carried through.
- #
- # Finally, for both ~host and ~build2 we keep config.config.environment
- # but strip config.config.hermetic* (we shouldn't be forcing hermiticity
- # on the users of ~host/~build2; they can decide for themselves if they
- # want it).
- #
- build2_config = $regex.replace_lines( \
- $config.save(), \
- '^ *(#|(config\.(dist\.|install\.chroot|config\.hermetic))).*$', \
- [null], \
- return_lines)
+ build2_config_lines += $l
- # Also preserve config.version and blank lines between groups of options.
+ # Note: also preserve config.version.
#
- host_config = $regex.replace_lines( \
- $build2_config, \
- '^( *config\.(c[. ]|cxx[. ]|cc[.]|bin[.]|config.environment |version ).*|)$', \
- '$&', \
- format_no_copy return_lines)
+ if $regex.match( \
+ $l, \
+ ' *config\.(c[. ]|cxx[. ]|cc[.]|bin[.]|config.environment |version ).*')
+ {
+ # Filter out sanitizer options in ~host. We run the toolchain with various
+ # sanitizers on CI but sanitizers cause issues in some packages. Note that
+ # we can have both -fsanitize and -fno-sanitize forms. For example:
+ #
+ # -fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all
+ #
+ if $regex.match($l, ' *config\.(c|cxx|cc)\.(coptions|loptions)[ =].*')
+ {
+ l = $regex.replace($l, ' ?-f(no-)?sanitize[=-][^ ]+', '')
+ }
+
+ host_config_lines += $l
+ }
+}
+
+config/cxx{host-config}: config/in{host-config}
+{
+ build2_config = $regex.merge($build2_config_lines, '(.+)', '\1\n')
+ host_config = $regex.merge($host_config_lines, '(.+)', '\1\n')
}
libul{build2}: dist/{hxx ixx txx cxx}{** -**.test...}
@@ -158,14 +191,37 @@ if! $cross
{
{obja objs}{context}: cxx.poptions += \
-DBUILD2_IMPORT_PATH=\"$regex.replace($out_root, '\\', '\\\\')\"
+}
- # While this object file should only be linked when we are installing, it
- # will be compiled even in the uninstalled case.
+# Note that while the -installed object file should only be linked when we
+# are installing, it will be compiled even in the uninstalled case.
+#
+if ($install.root != [null])
+{
+ # Only if installed.
+ #
+ {obja objs}{utility-installed}: cxx.poptions += \
+ -DBUILD2_INSTALL_LIB=\"$regex.replace(\
+ $install.resolve($install.lib), '\\', '\\\\')\"
+
+ # Only if configured.
+ #
+ # Note: strip the last directory component (<project>).
+ #
+ # @@ TMP drop after 0.16.0 release.
#
- if ($install.root != [null])
- {obja objs}{utility-installed}: cxx.poptions += \
- -DBUILD2_INSTALL_LIB=\"$regex.replace(\
- $install.resolve($install.lib), '\\', '\\\\')\"
+ install_buildfile = ($install.buildfile != [null] \
+ ? $directory($install.resolve($install.buildfile)) \
+ :)
+ {obja objs}{utility-installed utility-uninstalled}: cxx.poptions += \
+ -DBUILD2_INSTALL_BUILDFILE=\"$regex.replace($install_buildfile, '\\', '\\\\')\"
+
+ #\
+ {obja objs}{utility-installed utility-uninstalled}: cxx.poptions += \
+ -DBUILD2_INSTALL_BUILDFILE=\"$regex.replace(\
+ $directory($install.resolve($install.buildfile)), '\\', '\\\\')\"
+ #\
+
}
if ($cxx.target.class != 'windows')
@@ -216,38 +272,88 @@ else
# Generated options parser.
#
-script/
+# Note that the cli runtime namespace is build2::build::cli rather than
+# build2::cli. That's because the cli namespace inside build2 is reserved for
+# the cli build system module (libbuild2-cli). In fact, every namespace inside
+# build2 is reserved for a potential module and the only namespace names we
+# can use are build (this name, along with import and export, is reserved by
+# the build system core) and names that start with an underscore.
+#
+if $cli.configured
{
- if $cli.configured
+ cli.options += --std c++11 -I $src_root --include-with-brackets \
+--cli-namespace build2::build::cli --generate-specifier
+
+ cli.cxx{*}:
{
- cli.cxx{builtin-options}: cli{builtin}
+ # Include the generated cli files into the distribution and don't remove
+ # them when cleaning in src (so that clean results in a state identical
+ # to distributed).
+ #
+ dist = true
+ clean = ($src_root != $out_root)
- cli.options += --std c++11 -I $src_root --include-with-brackets \
---include-prefix libbuild2/script --guard-prefix LIBBUILD2_SCRIPT \
---cli-namespace build2::script::cli --generate-vector-scanner \
---generate-modifier --generate-specifier --suppress-usage
+ # We keep the generated code in the repository so copy it back to src in
+ # case of a forwarded configuration.
+ #
+ backlink = overwrite
+ }
- cli.cxx{*}:
- {
- # Include the generated cli files into the distribution and don't remove
- # them when cleaning in src (so that clean results in a state identical
- # to distributed). But don't install their headers since they are only
- # used internally in the testscript implementation.
- #
- dist = true
- clean = ($src_root != $out_root)
- install = false
-
- # We keep the generated code in the repository so copy it back to src in
- # case of a forwarded configuration.
- #
- backlink = overwrite
- }
+ cli.cxx{common-options}: cli{common}
+ {
+ cli.options += --include-prefix libbuild2 --guard-prefix LIBBUILD2 \
+--export-symbol LIBBUILD2_SYMEXPORT \
+--hxx-prologue '#include <libbuild2/export.hxx>' \
+--generate-file-scanner --generate-vector-scanner
+ }
+
+ cli.cxx{b-options}: cli{b}
+ {
+ cli.options += --include-prefix libbuild2 --guard-prefix LIBBUILD2 \
+--export-symbol LIBBUILD2_SYMEXPORT \
+--hxx-prologue '#include <libbuild2/export.hxx>' \
+--cxx-prologue "#include <libbuild2/types-parsers.hxx>" \
+--keep-separator --generate-parse --generate-merge
+
+ # Usage options.
+ #
+ cli.options += --suppress-undocumented --long-usage --ansi-color \
+--ascii-tree --page-usage 'build2::print_$name$_' --option-length 23
}
- else
- # No install for the pre-generated case.
+
+ script/cli.cxx{builtin-options}: script/cli{builtin}
+ {
+ cli.options += --include-prefix libbuild2/script \
+--guard-prefix LIBBUILD2_SCRIPT --generate-modifier --suppress-usage
+
+ # Don't install the generated cli headers since they are only used
+ # internally in the script implementation.
+ #
+ install = false
+ }
+
+ build/script/cli.cxx{builtin-options}: build/script/cli{builtin}
+ {
+ cli.options += --include-prefix libbuild2/build/script \
+--guard-prefix LIBBUILD2_BUILD_SCRIPT \
+--cxx-prologue "#include <libbuild2/types-parsers.hxx>" \
+--generate-parse --generate-modifier --suppress-usage
+
+ # Don't install the generated cli headers since they are only used
+ # internally in the buildscript implementation.
#
- hxx{builtin-options}@./ ixx{builtin-options}@./: install = false
+ install = false
+ }
+}
+else
+{
+ # No install for the pre-generated case.
+ #
+ script/hxx{builtin-options}@script/ \
+ script/ixx{builtin-options}@script/: install = false
+
+ build/script/hxx{builtin-options}@build/script/ \
+ build/script/ixx{builtin-options}@build/script/: install = false
}
# Install into the libbuild2/ subdirectory of, say, /usr/include/
diff --git a/libbuild2/buildspec.cxx b/libbuild2/buildspec.cxx
index bd580ca..2eeaf31 100644
--- a/libbuild2/buildspec.cxx
+++ b/libbuild2/buildspec.cxx
@@ -53,7 +53,7 @@ namespace build2
if (v)
{
names storage;
- os << reverse (v, storage);
+ os << reverse (v, storage, true /* reduce */);
}
else
os << "[null]";
@@ -86,7 +86,7 @@ namespace build2
if (v)
{
names storage;
- os << reverse (v, storage);
+ os << reverse (v, storage, true /* reduce */);
}
else
os << "[null]";
diff --git a/libbuild2/c/init.cxx b/libbuild2/c/init.cxx
index be001a8..42ade68 100644
--- a/libbuild2/c/init.cxx
+++ b/libbuild2/c/init.cxx
@@ -27,6 +27,7 @@ namespace build2
namespace c
{
using cc::compiler_id;
+ using cc::compiler_type;
using cc::compiler_class;
using cc::compiler_info;
@@ -77,7 +78,12 @@ namespace build2
// C17/18 is a bug-fix version of C11 so here we assume it is the
// same as C11.
//
- // And it's still early days for C2X.
+ // And it's still early days for C2X. Specifically, there is not
+ // much about C2X in MSVC in the official places and the following
+ // page shows that it's pretty much unimplement at the time of the
+ // MSVC 17.6 release:
+ //
+ // https://en.cppreference.com/w/c/compiler_support/23
//
// From version 16.8 VC now supports /std:c11 and /std:c17 options
// which enable C11/17 conformance. However, as of version 16.10,
@@ -154,15 +160,20 @@ namespace build2
// Enter all the variables and initialize the module data.
//
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
cc::config_data d {
cc::lang::c,
"c",
"c",
+ "obj-c",
BUILD2_DEFAULT_C,
".i",
+ ".mi",
hinters,
@@ -225,6 +236,9 @@ namespace build2
vp["cc.export.libs"],
vp["cc.export.impl_libs"],
+ vp["cc.pkconfig.include"],
+ vp["cc.pkconfig.lib"],
+
vp.insert_alias (vp["cc.stdlib"], "c.stdlib"), // Same as cc.stdlib.
vp["cc.runtime"],
@@ -276,6 +290,9 @@ namespace build2
vp.insert_alias (d.c_runtime, "c.runtime");
vp.insert_alias (d.c_importable, "c.importable");
+ vp.insert_alias (d.c_pkgconfig_include, "c.pkgconfig.include");
+ vp.insert_alias (d.c_pkgconfig_lib, "c.pkgconfig.lib");
+
auto& m (extra.set_module (new config_module (move (d))));
m.guess (rs, loc, extra.hints);
@@ -312,10 +329,15 @@ namespace build2
nullptr
};
+ // Note that we include S{} here because .S files can include each other.
+ // (And maybe from inline assember instrcutions?)
+ //
static const target_type* const inc[] =
{
&h::static_type,
&c::static_type,
+ &m::static_type,
+ &S::static_type,
nullptr
};
@@ -346,7 +368,6 @@ namespace build2
"c.compile",
"c.link",
"c.install",
- "c.uninstall",
cm.x_info->id.type,
cm.x_info->id.variant,
@@ -386,12 +407,90 @@ namespace build2
inc
};
- auto& m (extra.set_module (new module (move (d))));
+ auto& m (extra.set_module (new module (move (d), rs)));
m.init (rs, loc, extra.hints, *cm.x_info);
return true;
}
+ bool
+ objc_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::objc_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.objc module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("c"));
+
+ if (mod == nullptr)
+ fail (loc) << "c.objc module must be loaded after c module";
+
+ // Register the target type and "enable" it in the module.
+ //
+ // Note that we must register the target type regardless of whether the
+ // C compiler is capable of compiling Objective-C. But we enable only
+ // if it is.
+ //
+ // Note: see similar code in the cxx module.
+ //
+ rs.insert_target_type<m> ();
+
+ // Note that while Objective-C is supported by MinGW GCC, it's unlikely
+ // Clang supports it when targeting MSVC or Emscripten. But let's keep
+ // the check simple for now.
+ //
+ if (mod->ctype == compiler_type::gcc ||
+ mod->ctype == compiler_type::clang)
+ mod->x_obj = &m::static_type;
+
+ return true;
+ }
+
+ bool
+ as_cpp_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::as_cpp_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.as-cpp module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("c"));
+
+ if (mod == nullptr)
+ fail (loc) << "c.as-cpp module must be loaded after c module";
+
+ // Register the target type and "enable" it in the module.
+ //
+ // Note that we must register the target type regardless of whether the
+ // C compiler is capable of compiling Assember with C preprocessor. But
+ // we enable only if it is.
+ //
+ rs.insert_target_type<S> ();
+
+ if (mod->ctype == compiler_type::gcc ||
+ mod->ctype == compiler_type::clang)
+ mod->x_asp = &S::static_type;
+
+ return true;
+ }
+
static const module_functions mod_functions[] =
{
// NOTE: don't forget to also update the documentation in init.hxx if
@@ -399,6 +498,8 @@ namespace build2
{"c.guess", nullptr, guess_init},
{"c.config", nullptr, config_init},
+ {"c.objc", nullptr, objc_init},
+ {"c.as-cpp", nullptr, as_cpp_init},
{"c", nullptr, init},
{nullptr, nullptr, nullptr}
};
diff --git a/libbuild2/c/init.hxx b/libbuild2/c/init.hxx
index 2662bb1..c3126ea 100644
--- a/libbuild2/c/init.hxx
+++ b/libbuild2/c/init.hxx
@@ -22,6 +22,10 @@ namespace build2
// `c.guess` -- registers and sets some variables.
// `c.config` -- loads c.guess and sets more variables.
// `c` -- loads c.config and registers target types and rules.
+ // `c.objc` -- registers m{} target type and enables Objective-C
+ // compilation. Must be loaded after c.
+ // `c.as-cpp` -- registers S{} target type and enables Assembler with
+ // C preprocessor compilation. Must be loaded after c.
//
extern "C" LIBBUILD2_C_SYMEXPORT const module_functions*
build2_c_load ();
diff --git a/libbuild2/c/target.hxx b/libbuild2/c/target.hxx
index 333d39f..39fcf89 100644
--- a/libbuild2/c/target.hxx
+++ b/libbuild2/c/target.hxx
@@ -15,6 +15,8 @@ namespace build2
{
using cc::h;
using cc::c;
+ using cc::m;
+ using cc::S;
}
}
diff --git a/libbuild2/cc/buildfile b/libbuild2/cc/buildfile
index e98e3de..e090e76 100644
--- a/libbuild2/cc/buildfile
+++ b/libbuild2/cc/buildfile
@@ -6,15 +6,24 @@
include ../
impl_libs = ../lib{build2} # Implied interface dependency.
-import impl_libs += libpkgconf%lib{pkgconf}
+libpkgconf = $config.build2.libpkgconf
+
+if $libpkgconf
+ import impl_libs += libpkgconf%lib{pkgconf}
+else
+ import impl_libs += libpkg-config%lib{pkg-config}
include ../bin/
intf_libs = ../bin/lib{build2-bin}
-./: lib{build2-cc}: libul{build2-cc}: {hxx ixx txx cxx}{** -**.test...} \
- h{msvc-setup} \
+./: lib{build2-cc}: libul{build2-cc}: \
+ {hxx ixx txx cxx}{** -pkgconfig-lib* -**.test...} \
+ h{msvc-setup} \
$intf_libs $impl_libs
+libul{build2-cc}: cxx{pkgconfig-libpkgconf}: include = $libpkgconf
+libul{build2-cc}: cxx{pkgconfig-libpkg-config}: include = (!$libpkgconf)
+
# Unit tests.
#
exe{*.test}:
@@ -38,6 +47,9 @@ for t: cxx{**.test...}
obja{*}: cxx.poptions += -DLIBBUILD2_CC_STATIC_BUILD
objs{*}: cxx.poptions += -DLIBBUILD2_CC_SHARED_BUILD
+if $libpkgconf
+ cxx.poptions += -DBUILD2_LIBPKGCONF
+
if ($cxx.target.class == 'windows')
cxx.libs += $regex.apply(advapi32 ole32 oleaut32, \
'(.+)', \
diff --git a/libbuild2/cc/common.cxx b/libbuild2/cc/common.cxx
index 09a1752..07a4d48 100644
--- a/libbuild2/cc/common.cxx
+++ b/libbuild2/cc/common.cxx
@@ -39,6 +39,11 @@ namespace build2
// 3. dependency libs (prerequisite_targets, left to right, depth-first)
// 4. dependency libs (*.libs variables).
//
+ // If proc_opt_group is true, then pass to proc_opt the group rather than
+ // the member if a member was picked (according to linfo) form a group.
+ // This is useful when we only want to see the common options set on the
+ // group.
+ //
// If either proc_opt or proc_lib return false, then any further
// processing of this library or its dependencies is skipped. This can be
// used to "prune" the graph traversal in case of duplicates. Note that
@@ -49,19 +54,19 @@ namespace build2
// array that contains the current library dependency chain all the way to
// the library passed to process_libraries(). The first element of this
// array is NULL. If this argument is NULL, then this is a library without
- // a target (e.g., -lpthread) and its name is in the second argument
- // (which could be resolved to an absolute path or passed as an -l<name>
- // option). Otherwise, (the first argument is not NULL), the second
- // argument contains the target path (which can be empty in case of the
- // unknown DLL path).
+ // a target (e.g., -lm, -pthread, etc) and its name is in the second
+ // argument (which could be resolved to an absolute path or passed as an
+ // -l<name>/-pthread option). Otherwise, (the first argument is not NULL),
+ // the second argument contains the target path (which can be empty in
+ // case of the unknown DLL path).
//
- // Initially, the second argument (library name) was a string (e.g.,
- // -lpthread) but there are cases where the library is identified with
- // multiple options, such as -framework CoreServices (there are also cases
- // like -Wl,--whole-archive -lfoo -lbar -Wl,--no-whole-archive). So now it
- // is a vector_view that contains a fragment of options (from one of the
- // *.libs variables) that corresponds to the library (or several
- // libraries, as in the --whole-archive example above).
+ // Initially, the second argument (library name) was a string (e.g., -lm)
+ // but there are cases where the library is identified with multiple
+ // options, such as -framework CoreServices (there are also cases like
+ // -Wl,--whole-archive -lfoo -lbar -Wl,--no-whole-archive). So now it is a
+ // vector_view that contains a fragment of options (from one of the *.libs
+ // variables) that corresponds to the library (or several libraries, as in
+ // the --whole-archive example above).
//
// Storing a reference to elements of library name in proc_lib is legal
// (they come either from the target's path or from one of the *.libs
@@ -72,10 +77,18 @@ namespace build2
// not to pick the liba/libs{} member for installed libraries instead
// passing the lib{} group itself. This can be used to match the semantics
// of file_rule which, when matching prerequisites, does not pick the
- // liba/libs{} member (naturally) but just matches the lib{} group.
+ // liba/libs{} member (naturally) but just matches the lib{} group. Note
+ // that currently this truly only works for installed lib{} since non-
+ // installed ones don't have cc.type set. See proc_opt_group for an
+ // alternative way to (potentially) achieve the desired semantics.
//
// Note that if top_li is present, then the target passed to proc_impl,
- // proc_lib, and proc_opt is always a file.
+ // proc_lib, and proc_opt (unless proc_opt_group is true) is always a
+ // file.
+ //
+ // The dedup argument is part of the interface dependency deduplication
+ // functionality, similar to $x.deduplicate_export_libs(). Note, however,
+ // that here we do it "properly" (i.e., using group members, etc).
//
void common::
process_libraries (
@@ -83,7 +96,7 @@ namespace build2
const scope& top_bs,
optional<linfo> top_li,
const dir_paths& top_sysd,
- const mtime_target& l, // liba/libs{} or lib{}
+ const mtime_target& l, // liba/libs{}, libux{}, or lib{}
bool la,
lflags lf,
const function<bool (const target&,
@@ -92,34 +105,68 @@ namespace build2
const small_vector<reference_wrapper<
const string>, 2>&, // Library "name".
lflags, // Link flags.
- const string* type, // cc.type
+ const string* type, // whole cc.type
bool sys)>& proc_lib, // System library?
const function<bool (const target&,
- const string& type, // cc.type
+ const string& lang, // lang from cc.type
bool com, // cc. or x.
bool exp)>& proc_opt, // *.export.
- bool self /*= false*/, // Call proc_lib on l?
- library_cache* cache,
- small_vector<const target*, 24>* chain) const
+ bool self, // Call proc_lib on l?
+ bool proc_opt_group, // Call proc_opt on group instead of member?
+ library_cache* cache) const
{
library_cache cache_storage;
if (cache == nullptr)
cache = &cache_storage;
- small_vector<const target*, 24> chain_storage;
- if (chain == nullptr)
- {
- chain = &chain_storage;
+ small_vector<const target*, 32> chain;
- if (proc_lib)
- chain->push_back (nullptr);
- }
+ if (proc_lib)
+ chain.push_back (nullptr);
+
+ process_libraries_impl (a, top_bs, top_li, top_sysd,
+ nullptr, l, la, lf,
+ proc_impl, proc_lib, proc_opt,
+ self, proc_opt_group,
+ cache, &chain, nullptr);
+ }
+ void common::
+ process_libraries_impl (
+ action a,
+ const scope& top_bs,
+ optional<linfo> top_li,
+ const dir_paths& top_sysd,
+ const target* lg,
+ const mtime_target& l,
+ bool la,
+ lflags lf,
+ const function<bool (const target&,
+ bool la)>& proc_impl,
+ const function<bool (const target* const*,
+ const small_vector<reference_wrapper<
+ const string>, 2>&,
+ lflags,
+ const string* type,
+ bool sys)>& proc_lib,
+ const function<bool (const target&,
+ const string& lang,
+ bool com,
+ bool exp)>& proc_opt,
+ bool self,
+ bool proc_opt_group,
+ library_cache* cache,
+ small_vector<const target*, 32>* chain,
+ small_vector<const target*, 32>* dedup) const
+ {
// Add the library to the chain.
//
if (self && proc_lib)
chain->push_back (&l);
+ // We only lookup public variables so go straight for the public
+ // variable pool.
+ //
auto& vp (top_bs.ctx.var_pool);
do // Breakout loop.
@@ -131,25 +178,45 @@ namespace build2
// performance we use lookup_original() directly and only look in the
// target (so no target type/pattern-specific).
//
- const string* t (
+ const string* pt (
cast_null<string> (
l.state[a].lookup_original (c_type, true /* target_only */).first));
+ // cc.type value format is <lang>[,...].
+ //
+ size_t p;
+ const string& t (pt != nullptr
+ ? ((p = pt->find (',')) == string::npos
+ ? *pt
+ : string (*pt, 0, p))
+ : string ());
+
+ // Why are we bothering with impl for binless libraries since all
+ // their dependencies are by definition interface? Well, for one, it
+ // could be that it is dynamically-binless (e.g., binless on some
+ // platforms or in some configurations and binful on/in others). In
+ // this case it would be helpful to have a uniform semantics so that,
+ // for example, *.libs are used for liba{} regardless of whether it is
+ // binless or not. On the other hand, having to specify both
+ // *.export.libs=-lm and *.libs=-lm (or *.export.impl_libs) for an
+ // always-binless library is sure not very intuitive. Not sure if we
+ // can win here.
+ //
bool impl (proc_impl && proc_impl (l, la));
bool cc (false), same (false);
- if (t != nullptr)
+ if (!t.empty ())
{
- cc = (*t == "cc");
- same = (!cc && *t == x);
+ cc = (t == "cc");
+ same = (!cc && t == x);
}
- const scope& bs (t == nullptr || cc ? top_bs : l.base_scope ());
+ const scope& bs (t.empty () || cc ? top_bs : l.base_scope ());
lookup c_e_libs;
lookup x_e_libs;
- if (t != nullptr)
+ if (!t.empty ())
{
// Note that we used to treat *.export.libs set on the liba/libs{}
// members as *.libs overrides rather than as member-specific
@@ -168,8 +235,6 @@ namespace build2
//
// See also deduplicate_export_libs() if changing anything here.
//
- // @@ PERF: do target_only (helps a bit in non-installed case)?
- //
{
const variable& v (impl ? c_export_impl_libs : c_export_libs);
c_e_libs = l.lookup_original (v, false, &bs).first;
@@ -180,7 +245,7 @@ namespace build2
const variable& v (
same
? (impl ? x_export_impl_libs : x_export_libs)
- : vp[*t + (impl ? ".export.impl_libs" : ".export.libs")]);
+ : vp[t + (impl ? ".export.impl_libs" : ".export.libs")]);
x_e_libs = l.lookup_original (v, false, &bs).first;
}
@@ -188,12 +253,14 @@ namespace build2
//
if (proc_opt)
{
+ const target& ol (proc_opt_group && lg != nullptr ? *lg : l);
+
// If all we know is it's a C-common library, then in both cases
// we only look for cc.export.*.
//
if (cc)
{
- if (!proc_opt (l, *t, true, true)) break;
+ if (!proc_opt (ol, t, true, true)) break;
}
else
{
@@ -210,24 +277,24 @@ namespace build2
//
// Note: options come from *.export.* variables.
//
- if (!proc_opt (l, *t, false, true) ||
- !proc_opt (l, *t, true, true)) break;
+ if (!proc_opt (ol, t, false, true) ||
+ !proc_opt (ol, t, true, true)) break;
}
else
{
// For default export we use the same options as were used
// to build the library.
//
- if (!proc_opt (l, *t, false, false) ||
- !proc_opt (l, *t, true, false)) break;
+ if (!proc_opt (ol, t, false, false) ||
+ !proc_opt (ol, t, true, false)) break;
}
}
else
{
// Interface: only add *.export.* (interface dependencies).
//
- if (!proc_opt (l, *t, false, true) ||
- !proc_opt (l, *t, true, true)) break;
+ if (!proc_opt (ol, t, false, true) ||
+ !proc_opt (ol, t, true, true)) break;
}
}
}
@@ -268,12 +335,12 @@ namespace build2
const file* f;
const path& p ((f = l.is_a<file> ()) ? f->path () : empty_path);
- bool s (t != nullptr // If cc library (matched or imported).
+ bool s (pt != nullptr // If cc library (matched or imported).
? cast_false<bool> (l.vars[c_system])
: !p.empty () && sys (top_sysd, p.string ()));
proc_lib_name = {p.string ()};
- if (!proc_lib (&chain->back (), proc_lib_name, lf, t, s))
+ if (!proc_lib (&chain->back (), proc_lib_name, lf, pt, s))
break;
}
@@ -283,21 +350,21 @@ namespace build2
// Find system search directories corresponding to this library, i.e.,
// from its project and for its type (C, C++, etc).
//
- auto find_sysd = [&top_sysd, t, cc, same, &bs, &sysd, this] ()
+ auto find_sysd = [&top_sysd, &vp, t, cc, same, &bs, &sysd, this] ()
{
// Use the search dirs corresponding to this library scope/type.
//
- sysd = (t == nullptr || cc)
+ sysd = (t.empty () || cc)
? &top_sysd // Imported library, use importer's sysd.
: &cast<dir_paths> (
bs.root_scope ()->vars[same
? x_sys_lib_dirs
- : bs.ctx.var_pool[*t + ".sys_lib_dirs"]]);
+ : vp[t + ".sys_lib_dirs"]]);
};
auto find_linfo = [top_li, t, cc, &bs, &l, &li] ()
{
- li = (t == nullptr || cc)
+ li = (t.empty () || cc)
? top_li
: optional<linfo> (link_info (bs, link_type (l).type)); // @@ PERF
};
@@ -315,11 +382,16 @@ namespace build2
for (const prerequisite_target& pt: l.prerequisite_targets[a])
{
// Note: adhoc prerequisites are not part of the library metadata
- // protocol (and we should check for adhoc first to avoid races).
+ // protocol (and we should check for adhoc first to avoid races
+ // during execute).
//
- if (pt.adhoc || pt == nullptr)
+ if (pt.adhoc () || pt == nullptr)
continue;
+ if (marked (pt))
+ fail << "implicit dependency cycle detected involving library "
+ << l;
+
bool la;
const file* f;
@@ -327,13 +399,20 @@ namespace build2
(la = (f = pt->is_a<libux> ())) ||
( f = pt->is_a<libs> ()))
{
+ // See link_rule for details.
+ //
+ const target* g ((pt.include & include_group) != 0
+ ? f->group
+ : nullptr);
+
if (sysd == nullptr) find_sysd ();
if (!li) find_linfo ();
- process_libraries (a, bs, *li, *sysd,
- *f, la, pt.data,
- proc_impl, proc_lib, proc_opt, true,
- cache, chain);
+ process_libraries_impl (a, bs, *li, *sysd,
+ g, *f, la, pt.data /* lflags */,
+ proc_impl, proc_lib, proc_opt,
+ true /* self */, proc_opt_group,
+ cache, chain, nullptr);
}
}
}
@@ -344,7 +423,7 @@ namespace build2
// If it is not a C-common library, then it probably doesn't have any
// of the *.libs.
//
- if (t != nullptr)
+ if (!t.empty ())
{
optional<dir_paths> usrd; // Extract lazily.
@@ -366,8 +445,8 @@ namespace build2
// Determine the length of the library name fragment as well as
// whether it is a system library. Possible length values are:
//
- // 1 - just the argument itself (-lpthread)
- // 2 - argument and next element (-l pthread, -framework CoreServices)
+ // 1 - just the argument itself (-lm, -pthread)
+ // 2 - argument and next element (-l m, -framework CoreServices)
// 0 - unrecognized/until the end (-Wl,--whole-archive ...)
//
// See similar code in find_system_library().
@@ -398,9 +477,9 @@ namespace build2
{
if (l[0] == '-')
{
- // -l<name>, -l <name>
+ // -l<name>, -l <name>, -pthread
//
- if (l[1] == 'l')
+ if (l[1] == 'l' || l == "-pthread")
{
n = l.size () == 2 ? 2 : 1;
}
@@ -427,11 +506,14 @@ namespace build2
return make_pair (n, s);
};
- auto proc_int = [&l, cache, chain,
- &proc_impl, &proc_lib, &proc_lib_name, &proc_opt,
- &sysd, &usrd,
- &find_sysd, &find_linfo, &sense_fragment,
- &bs, a, &li, impl, this] (const lookup& lu)
+ auto proc_intf = [&l, proc_opt_group, cache, chain,
+ &proc_impl, &proc_lib, &proc_lib_name, &proc_opt,
+ &sysd, &usrd,
+ &find_sysd, &find_linfo, &sense_fragment,
+ &bs, a, &li, impl, this] (
+ const lookup& lu,
+ small_vector<const target*, 32>* dedup,
+ size_t dedup_start) // Start of our deps.
{
const vector<name>* ns (cast_null<vector<name>> (lu));
if (ns == nullptr || ns->empty ())
@@ -441,12 +523,15 @@ namespace build2
{
const name& n (*i);
+ // Note: see also recursively-binless logic in link_rule if
+ // changing anything in simple library handling.
+ //
if (n.simple ())
{
- // This is something like -lpthread or shell32.lib so should
- // be a valid path. But it can also be an absolute library
- // path (e.g., something that may come from our
- // .{static/shared}.pc files).
+ // This is something like -lm or shell32.lib so should be a
+ // valid path. But it can also be an absolute library path
+ // (e.g., something that may come from our .{static/shared}.pc
+ // files).
//
if (proc_lib)
{
@@ -471,68 +556,145 @@ namespace build2
if (sysd == nullptr) find_sysd ();
if (!li) find_linfo ();
- const mtime_target& t (
- resolve_library (a,
- bs,
- n,
- (n.pair ? (++i)->dir : dir_path ()),
- *li,
- *sysd, usrd,
- cache));
+ const mtime_target* t;
+ const target* g;
- if (proc_lib)
+ const char* w (nullptr);
+ try
{
- // This can happen if the target is mentioned in
- // *.export.libs (i.e., it is an interface dependency) but
- // not in the library's prerequisites (i.e., it is not an
- // implementation dependency).
+ pair<const mtime_target&, const target*> p (
+ resolve_library (a,
+ bs,
+ n,
+ (n.pair ? (++i)->dir : dir_path ()),
+ *li,
+ *sysd, usrd,
+ cache));
+
+ t = &p.first;
+ g = p.second;
+
+ // Deduplicate.
//
- // Note that we used to just check for path being assigned
- // but on Windows import-installed DLLs may legally have
- // empty paths.
+ // Note that dedup_start makes sure we only consider our
+ // interface dependencies while maintaining the "through"
+ // list.
//
- const char* w (nullptr);
- if (t.ctx.phase == run_phase::match)
+ if (dedup != nullptr)
{
- size_t o (
- t.state[a].task_count.load (memory_order_consume) -
- t.ctx.count_base ());
+ if (find (dedup->begin () + dedup_start,
+ dedup->end (),
+ t) != dedup->end ())
+ {
+ ++i;
+ continue;
+ }
+
+ dedup->push_back (t);
+ }
+ }
+ catch (const non_existent_library& e)
+ {
+ // This is another manifestation of the "mentioned in
+ // *.export.libs but not in prerequisites" case (see below).
+ //
+ t = &e.target;
+ w = "unknown";
+ }
- if (o != target::offset_applied &&
- o != target::offset_executed)
+ // This can happen if the target is mentioned in *.export.libs
+ // (i.e., it is an interface dependency) but not in the
+ // library's prerequisites (i.e., it is not an implementation
+ // dependency).
+ //
+ // Note that we used to just check for path being assigned but
+ // on Windows import-installed DLLs may legally have empty
+ // paths.
+ //
+ if (w != nullptr)
+ ; // See above.
+ else if (l.ctx.phase == run_phase::match)
+ {
+ // We allow not matching installed libraries if all we need
+ // is their options (see compile_rule::apply()).
+ //
+ if (proc_lib || t->base_scope ().root_scope () != nullptr)
+ {
+ if (!t->matched (a))
w = "not matched";
}
- else if (t.mtime () == timestamp_unknown)
- w = "out of date";
-
- if (w != nullptr)
- fail << (impl ? "implementation" : "interface")
- << " dependency " << t << " is " << w <<
- info << "mentioned in *.export." << (impl ? "impl_" : "")
- << "libs of target " << l <<
- info << "is it a prerequisite of " << l << "?";
+ }
+ else
+ {
+ // Note that this check we only do if there is proc_lib
+ // (since it's valid to process library's options before
+ // updating it).
+ //
+ if (proc_lib)
+ {
+ if (t->mtime () == timestamp_unknown)
+ w = "out of date";
+ }
+ }
+
+ if (w != nullptr)
+ {
+ fail << (impl ? "implementation" : "interface")
+ << " dependency " << *t << " is " << w <<
+ info << "mentioned in *.export." << (impl ? "impl_" : "")
+ << "libs of target " << l <<
+ info << "is it a prerequisite of " << l << "?";
}
// Process it recursively.
//
- // @@ Where can we get the link flags? Should we try to find
- // them in the library's prerequisites? What about
- // installed stuff?
+ bool u;
+ bool la ((u = t->is_a<libux> ()) || t->is_a<liba> ());
+ lflags lf (0);
+
+ // If this is a static library, see if we need to link it
+ // whole.
//
- process_libraries (a, bs, *li, *sysd,
- t, t.is_a<liba> () || t.is_a<libux> (), 0,
- proc_impl, proc_lib, proc_opt, true,
- cache, chain);
+ if (la && proc_lib)
+ {
+ // Note: go straight for the public variable pool.
+ //
+ const variable& var (t->ctx.var_pool["bin.whole"]);
+
+ // See the link rule for the lookup semantics.
+ //
+ lookup l (
+ t->lookup_original (var, true /* target_only */).first);
+
+ if (l ? cast<bool> (*l) : u)
+ lf |= lflag_whole;
+ }
+
+ process_libraries_impl (
+ a, bs, *li, *sysd,
+ g, *t, la, lf,
+ proc_impl, proc_lib, proc_opt,
+ true /* self */, proc_opt_group,
+ cache, chain, dedup);
}
++i;
}
};
+ auto proc_intf_storage = [&proc_intf] (const lookup& lu1,
+ const lookup& lu2 = lookup ())
+ {
+ small_vector<const target*, 32> dedup_storage;
+
+ if (lu1) proc_intf (lu1, &dedup_storage, 0);
+ if (lu2) proc_intf (lu2, &dedup_storage, 0);
+ };
+
// Process libraries from *.libs (of type strings).
//
- auto proc_imp = [&proc_lib, &proc_lib_name,
- &sense_fragment] (const lookup& lu)
+ auto proc_impl = [&proc_lib, &proc_lib_name,
+ &sense_fragment] (const lookup& lu)
{
const strings* ns (cast_null<strings> (lu));
if (ns == nullptr || ns->empty ())
@@ -540,8 +702,8 @@ namespace build2
for (auto i (ns->begin ()), e (ns->end ()); i != e; )
{
- // This is something like -lpthread or shell32.lib so should be
- // a valid path.
+ // This is something like -lm or shell32.lib so should be a
+ // valid path.
//
pair<size_t, bool> r (sense_fragment (*i));
@@ -564,10 +726,26 @@ namespace build2
//
if (cc)
{
- if (c_e_libs) proc_int (c_e_libs);
+ if (impl)
+ {
+ if (c_e_libs) proc_intf (c_e_libs, nullptr, 0);
+ }
+ else
+ {
+ if (c_e_libs)
+ {
+ if (dedup != nullptr)
+ proc_intf (c_e_libs, dedup, dedup->size ());
+ else
+ proc_intf_storage (c_e_libs);
+ }
+ }
}
else
{
+ // Note: see also recursively-binless logic in link_rule if
+ // changing anything here.
+
if (impl)
{
// Interface and implementation: as discussed above, we can have
@@ -575,8 +753,12 @@ namespace build2
//
if (c_e_libs.defined () || x_e_libs.defined ())
{
- if (c_e_libs) proc_int (c_e_libs);
- if (x_e_libs) proc_int (x_e_libs);
+ // Why are we calling proc_intf() on *.impl_libs? Perhaps
+ // because proc_impl() expects strings, not names? Yes, and
+ // proc_intf() checks impl.
+ //
+ if (c_e_libs) proc_intf (c_e_libs, nullptr, 0);
+ if (x_e_libs) proc_intf (x_e_libs, nullptr, 0);
}
else
{
@@ -590,9 +772,9 @@ namespace build2
//
if (proc_lib)
{
- const variable& v (same ? x_libs : vp[*t + ".libs"]);
- proc_imp (l.lookup_original (c_libs, false, &bs).first);
- proc_imp (l.lookup_original (v, false, &bs).first);
+ const variable& v (same ? x_libs : vp[t + ".libs"]);
+ proc_impl (l.lookup_original (c_libs, false, &bs).first);
+ proc_impl (l.lookup_original (v, false, &bs).first);
}
}
}
@@ -600,8 +782,18 @@ namespace build2
{
// Interface: only add *.export.* (interface dependencies).
//
- if (c_e_libs) proc_int (c_e_libs);
- if (x_e_libs) proc_int (x_e_libs);
+ if (c_e_libs.defined () || x_e_libs.defined ())
+ {
+ if (dedup != nullptr)
+ {
+ size_t s (dedup->size ()); // Start of our interface deps.
+
+ if (c_e_libs) proc_intf (c_e_libs, dedup, s);
+ if (x_e_libs) proc_intf (x_e_libs, dedup, s);
+ }
+ else
+ proc_intf_storage (c_e_libs, x_e_libs);
+ }
}
}
}
@@ -628,9 +820,14 @@ namespace build2
//
// If li is absent, then don't pick the liba/libs{} member, returning the
// lib{} target itself. If li is present, then the returned target is
- // always a file.
+ // always a file. The second half of the returned pair is the group, if
+ // the member was picked.
+ //
+ // Note: paths in sysd/usrd are expected to be absolute and normalized.
//
- const mtime_target& common::
+ // Note: may throw non_existent_library.
+ //
+ pair<const mtime_target&, const target*> common::
resolve_library (action a,
const scope& s,
const name& cn,
@@ -651,7 +848,8 @@ namespace build2
// large number of times (see Boost for an extreme example of this).
//
// Note also that for non-utility libraries we know that only the link
- // order from linfo is used.
+ // order from linfo is used. While not caching it and always picking an
+ // alternative could also work, we cache it to avoid the lookup.
//
if (cache != nullptr)
{
@@ -671,7 +869,7 @@ namespace build2
}));
if (i != cache->end ())
- return i->lib;
+ return pair<const mtime_target&, const target*> {i->lib, i->group};
}
else
cache = nullptr; // Do not cache.
@@ -710,29 +908,36 @@ namespace build2
fail << "unable to find library " << pk;
}
- // If this is lib{}/libu*{}, pick appropriate member unless we were
+ // If this is lib{}/libul{}, pick appropriate member unless we were
// instructed not to.
//
+ const target* g (nullptr);
if (li)
{
if (const libx* l = xt->is_a<libx> ())
+ {
+ g = xt;
xt = link_member (*l, a, *li); // Pick lib*{e,a,s}{}.
+ }
}
auto& t (xt->as<mtime_target> ());
if (cache != nullptr)
- cache->push_back (library_cache_entry {lo, cn.type, cn.value, t});
+ cache->push_back (library_cache_entry {lo, cn.type, cn.value, t, g});
- return t;
+ return pair<const mtime_target&, const target*> {t, g};
}
- // Note that pk's scope should not be NULL (even if dir is absolute).
+ // Action should be absent if called during the load phase. Note that pk's
+ // scope should not be NULL (even if dir is absolute).
+ //
+ // Note: paths in sysd/usrd are expected to be absolute and normalized.
//
// Note: see similar logic in find_system_library().
//
target* common::
- search_library (action act,
+ search_library (optional<action> act,
const dir_paths& sysd,
optional<dir_paths>& usrd,
const prerequisite_key& p,
@@ -740,7 +945,7 @@ namespace build2
{
tracer trace (x, "search_library");
- assert (p.scope != nullptr);
+ assert (p.scope != nullptr && (!exist || act));
context& ctx (p.scope->ctx);
const scope& rs (*p.scope->root_scope ());
@@ -857,6 +1062,21 @@ namespace build2
{
context& ctx (p.scope->ctx);
+ // Whether to look for a binless variant using the common .pc file
+ // (see below).
+ //
+ // Normally we look for a binless version if the binful one was not
+ // found. However, sometimes we may find what looks like a binful
+ // library but on a closer examination realize that there is something
+ // wrong with it (for example, it's not a Windows import library). In
+ // such cases we want to omit looking for a binless library using the
+ // common .pc file since it most likely corresponds to the binful
+ // library (and we may end up in a infinite loop trying to resolve
+ // itself).
+ //
+ bool ba (true);
+ bool bs (true);
+
timestamp mt;
// libs
@@ -957,10 +1177,24 @@ namespace build2
if (tsys == "win32-msvc")
{
if (s == nullptr && !sn.empty ())
- s = msvc_search_shared (ld, d, p, exist);
+ {
+ pair<libs*, bool> r (msvc_search_shared (ld, d, p, exist));
+
+ if (r.first != nullptr)
+ s = r.first;
+ else if (!r.second)
+ bs = false;
+ }
if (a == nullptr && !an.empty ())
- a = msvc_search_static (ld, d, p, exist);
+ {
+ pair<liba*, bool> r (msvc_search_static (ld, d, p, exist));
+
+ if (r.first != nullptr)
+ a = r.first;
+ else if (!r.second)
+ ba = false;
+ }
}
// Look for binary-less libraries via pkg-config .pc files. Note that
@@ -977,7 +1211,10 @@ namespace build2
// is no binful variant.
//
pair<path, path> r (
- pkgconfig_search (d, p.proj, name, na && ns /* common */));
+ pkgconfig_search (d,
+ p.proj,
+ name,
+ na && ns && ba && bs /* common */));
if (na && !r.first.empty ())
{
@@ -1030,6 +1267,8 @@ namespace build2
// making it the only one to allow things to be overriden (e.g.,
// if build2 was moved or some such).
//
+ // Note: build_install_lib is already normalized.
+ //
usrd->insert (usrd->begin (), build_install_lib);
}
}
@@ -1082,20 +1321,87 @@ namespace build2
if (exist)
return r;
- // If we cannot acquire the lock then this mean the target has already
- // been matched and we assume all of this has already been done.
+ // Try to extract library information from pkg-config. We only add the
+ // default macro if we could not extract more precise information. The
+ // idea is that in .pc files that we generate, we copy those macros (or
+ // custom ones) from *.export.poptions.
//
- auto lock = [act] (const target* t) -> target_lock
+ // @@ Should we add .pc files as ad hoc members so pkconfig_save() can
+ // use their names when deriving -l-names (this would be especially
+ // helpful for binless libraries to get hold of prefix/suffix, etc).
+ //
+ auto load_pc = [this, &trace,
+ act, &p, &name,
+ &sysd, &usrd,
+ pd, &pc, lt, a, s] (pair<bool, bool> metaonly)
{
- auto l (t != nullptr ? build2::lock (act, *t, true) : target_lock ());
+ l5 ([&]{trace << "loading pkg-config information during "
+ << (act ? "match" : "load") << " for "
+ << (a != nullptr ? "static " : "")
+ << (s != nullptr ? "shared " : "")
+ << "member(s) of " << *lt << "; metadata only: "
+ << metaonly.first << " " << metaonly.second;});
+
+ // Add the "using static/shared library" macro (used, for example, to
+ // handle DLL export). The absence of either of these macros would
+ // mean some other build system that cannot distinguish between the
+ // two (and no pkg-config information).
+ //
+ auto add_macro = [this] (target& t, const char* suffix)
+ {
+ // If there is already a value (either in cc.export or x.export),
+ // don't add anything: we don't want to be accumulating defines nor
+ // messing with custom values. And if we are adding, then use the
+ // generic cc.export.
+ //
+ // The only way we could already have this value is if this same
+ // library was also imported as a project (as opposed to installed).
+ // Unlikely but possible. In this case the values were set by the
+ // export stub and we shouldn't touch them.
+ //
+ if (!t.vars[x_export_poptions])
+ {
+ auto p (t.vars.insert (c_export_poptions));
- if (l && l.offset == target::offset_matched)
+ if (p.second)
+ {
+ // The "standard" macro name will be LIB<NAME>_{STATIC,SHARED},
+ // where <name> is the target name. Here we want to strike a
+ // balance between being unique and not too noisy.
+ //
+ string d ("-DLIB");
+
+ d += sanitize_identifier (
+ ucase (const_cast<const string&> (t.name)));
+
+ d += '_';
+ d += suffix;
+
+ strings o;
+ o.push_back (move (d));
+ p.first = move (o);
+ }
+ }
+ };
+
+ if (pc.first.empty () && pc.second.empty ())
{
- assert ((*t)[act].rule == &file_rule::rule_match);
- l.unlock ();
+ if (!pkgconfig_load (act, *p.scope,
+ *lt, a, s,
+ p.proj, name,
+ *pd, sysd, *usrd,
+ metaonly))
+ {
+ if (a != nullptr && !metaonly.first) add_macro (*a, "STATIC");
+ if (s != nullptr && !metaonly.second) add_macro (*s, "SHARED");
+ }
}
-
- return l;
+ else
+ pkgconfig_load (act, *p.scope,
+ *lt, a, s,
+ pc,
+ *pd, sysd, *usrd,
+ metaonly);
};
// Mark as a "cc" library (unless already marked) and set the system
@@ -1116,6 +1422,82 @@ namespace build2
return p.second;
};
+ // Deal with the load phase case. The rest is already hairy enough so
+ // let's not try to weave this logic into that.
+ //
+ if (!act)
+ {
+ assert (ctx.phase == run_phase::load);
+
+ // The overall idea here is to set everything up so that the default
+ // file_rule matches the returned targets, the same way as it would if
+ // multiple operations were executed for the match phase (see below).
+ //
+ // Note however, that there is no guarantee that we won't end up in
+ // the match phase code below even after loading things here. For
+ // example, the same library could be searched from pkgconfig_load()
+ // if specified as -l. And if we try to re-assign group members, then
+ // that would be a race condition. So we use the cc mark to detect
+ // this.
+ //
+ timestamp mt (timestamp_nonexistent);
+ if (a != nullptr) {lt->a = a; a->group = lt; mt = a->mtime ();}
+ if (s != nullptr) {lt->s = s; s->group = lt; mt = s->mtime ();}
+
+ // @@ TODO: we currently always reload pkgconfig for lt (and below).
+ //
+ mark_cc (*lt);
+ lt->mtime (mt);
+
+ // We can only load metadata from here since we can only do this
+ // during the load phase. But it's also possible that a racing match
+ // phase already found and loaded this library without metadata. So
+ // looks like the only way is to load the metadata incrementally. We
+ // can base this decision on the presense/absense of cc.type and
+ // export.metadata.
+ //
+ pair<bool, bool> metaonly {false, false};
+
+ if (a != nullptr && !mark_cc (*a))
+ {
+ if (a->vars[ctx.var_export_metadata])
+ a = nullptr;
+ else
+ metaonly.first = true;
+ }
+
+ if (s != nullptr && !mark_cc (*s))
+ {
+ if (s->vars[ctx.var_export_metadata])
+ s = nullptr;
+ else
+ metaonly.second = true;
+ }
+
+ // Try to extract library information from pkg-config.
+ //
+ if (a != nullptr || s != nullptr)
+ load_pc (metaonly);
+
+ return r;
+ }
+
+ // If we cannot acquire the lock then this mean the target has already
+ // been matched and we assume all of this has already been done.
+ //
+ auto lock = [a = *act] (const target* t) -> target_lock
+ {
+ auto l (t != nullptr ? build2::lock (a, *t, true) : target_lock ());
+
+ if (l && l.offset == target::offset_matched)
+ {
+ assert ((*t)[a].rule == &file_rule::rule_match);
+ l.unlock ();
+ }
+
+ return l;
+ };
+
target_lock ll (lock (lt));
// Set lib{} group members to indicate what's available. Note that we
@@ -1125,13 +1507,22 @@ namespace build2
timestamp mt (timestamp_nonexistent);
if (ll)
{
- if (s != nullptr) {lt->s = s; mt = s->mtime ();}
- if (a != nullptr) {lt->a = a; mt = a->mtime ();}
-
// Mark the group since sometimes we use it itself instead of one of
- // the liba/libs{} members (see process_libraries() for details).
+ // the liba/libs{} members (see process_libraries_impl() for details).
//
- mark_cc (*lt);
+ // If it's already marked, then it could have been imported during
+ // load (see above).
+ //
+ // @@ TODO: we currently always reload pkgconfig for lt (and above).
+ // Maybe pass NULL lt to pkgconfig_load() in this case?
+ //
+ if (mark_cc (*lt))
+ {
+ if (a != nullptr) {lt->a = a; mt = a->mtime ();}
+ if (s != nullptr) {lt->s = s; mt = s->mtime ();}
+ }
+ else
+ ll.unlock ();
}
target_lock al (lock (a));
@@ -1140,81 +1531,20 @@ namespace build2
if (!al) a = nullptr;
if (!sl) s = nullptr;
- if (a != nullptr) a->group = lt;
- if (s != nullptr) s->group = lt;
-
- // If the library already has cc.type, then assume it was either
- // already imported or was matched by a rule.
+ // If the library already has cc.type, then assume it was either already
+ // imported (e.g., during load) or was matched by a rule.
//
if (a != nullptr && !mark_cc (*a)) a = nullptr;
if (s != nullptr && !mark_cc (*s)) s = nullptr;
- // Add the "using static/shared library" macro (used, for example, to
- // handle DLL export). The absence of either of these macros would
- // mean some other build system that cannot distinguish between the
- // two (and no pkg-config information).
- //
- auto add_macro = [this] (target& t, const char* suffix)
- {
- // If there is already a value (either in cc.export or x.export),
- // don't add anything: we don't want to be accumulating defines nor
- // messing with custom values. And if we are adding, then use the
- // generic cc.export.
- //
- // The only way we could already have this value is if this same
- // library was also imported as a project (as opposed to installed).
- // Unlikely but possible. In this case the values were set by the
- // export stub and we shouldn't touch them.
- //
- if (!t.vars[x_export_poptions])
- {
- auto p (t.vars.insert (c_export_poptions));
-
- if (p.second)
- {
- // The "standard" macro name will be LIB<NAME>_{STATIC,SHARED},
- // where <name> is the target name. Here we want to strike a
- // balance between being unique and not too noisy.
- //
- string d ("-DLIB");
-
- d += sanitize_identifier (
- ucase (const_cast<const string&> (t.name)));
-
- d += '_';
- d += suffix;
-
- strings o;
- o.push_back (move (d));
- p.first = move (o);
- }
- }
- };
+ if (a != nullptr) a->group = lt;
+ if (s != nullptr) s->group = lt;
if (ll && (a != nullptr || s != nullptr))
{
- // Try to extract library information from pkg-config. We only add the
- // default macro if we could not extract more precise information. The
- // idea is that in .pc files that we generate, we copy those macros
- // (or custom ones) from *.export.poptions.
- //
- // @@ Should we add .pc files as ad hoc members so pkconfig_save() can
- // use their names when deriving -l-names (this would be expecially
- // helpful for binless libraries to get hold of prefix/suffix, etc).
+ // Try to extract library information from pkg-config.
//
- if (pc.first.empty () && pc.second.empty ())
- {
- if (!pkgconfig_load (act, *p.scope,
- *lt, a, s,
- p.proj, name,
- *pd, sysd, *usrd))
- {
- if (a != nullptr) add_macro (*a, "STATIC");
- if (s != nullptr) add_macro (*s, "SHARED");
- }
- }
- else
- pkgconfig_load (act, *p.scope, *lt, a, s, pc, *pd, sysd, *usrd);
+ load_pc ({false, false} /* metaonly */);
}
// If we have the lock (meaning this is the first time), set the matched
@@ -1227,8 +1557,8 @@ namespace build2
//
// Note also that these calls clear target data.
//
- if (al) match_rule (al, file_rule::rule_match);
- if (sl) match_rule (sl, file_rule::rule_match);
+ if (a != nullptr) match_rule (al, file_rule::rule_match);
+ if (s != nullptr) match_rule (sl, file_rule::rule_match);
if (ll)
{
match_rule (ll, file_rule::rule_match);
@@ -1280,5 +1610,54 @@ namespace build2
return r;
}
+
+ void common::
+ append_diag_color_options (cstrings& args) const
+ {
+ switch (cclass)
+ {
+ case compiler_class::msvc:
+ {
+ // Note: see init_diag logic if enabling anything here (probably
+ // need an "override disable" mode or some such).
+ //
+ break;
+ }
+ case compiler_class::gcc:
+ {
+ // Enable/disable diagnostics color unless a custom option is
+ // specified.
+ //
+ // Supported from GCC 4.9 and (at least) from Clang 3.5. Clang
+ // supports -f[no]color-diagnostics in addition to the GCC's
+ // spelling.
+ //
+ if (ctype == compiler_type::gcc ? cmaj > 4 || (cmaj == 4 && cmin >= 9) :
+ ctype == compiler_type::clang ? cmaj > 3 || (cmaj == 3 && cmin >= 5) :
+ false)
+ {
+ if (!(find_option_prefix ("-fdiagnostics-color", args) ||
+ find_option ("-fno-diagnostics-color", args) ||
+ find_option ("-fdiagnostics-plain-output", args) ||
+ (ctype == compiler_type::clang &&
+ (find_option ("-fcolor-diagnostics", args) ||
+ find_option ("-fno-color-diagnostics", args)))))
+ {
+ // Omit -fno-diagnostics-color if stderr is not a terminal (we
+ // know there will be no color in this case and the option will
+ // just add noise, for example, in build logs).
+ //
+ if (const char* o = (
+ show_diag_color () ? "-fdiagnostics-color" :
+ stderr_term ? "-fno-diagnostics-color" :
+ nullptr))
+ args.push_back (o);
+ }
+ }
+
+ break;
+ }
+ }
+ }
}
}
diff --git a/libbuild2/cc/common.hxx b/libbuild2/cc/common.hxx
index 78442f8..eefcc0d 100644
--- a/libbuild2/cc/common.hxx
+++ b/libbuild2/cc/common.hxx
@@ -32,10 +32,12 @@ namespace build2
{
lang x_lang;
- const char* x; // Module name ("c", "cxx").
- const char* x_name; // Compiler name ("c", "c++").
- const char* x_default; // Compiler default ("gcc", "g++").
- const char* x_pext; // Preprocessed source extension (".i", ".ii").
+ const char* x; // Module name ("c", "cxx").
+ const char* x_name; // Compiler name ("c", "c++").
+ const char* x_obj_name; // Same for Objective-X ("obj-c", "obj-c++").
+ const char* x_default; // Compiler default ("gcc", "g++").
+ const char* x_pext; // Preprocessed source extension (".i", ".ii").
+ const char* x_obj_pext; // Same for Objective-X (".mi", ".mii").
// Array of modules that can hint us the toolchain, terminate with
// NULL.
@@ -102,6 +104,9 @@ namespace build2
const variable& c_export_libs;
const variable& c_export_impl_libs;
+ const variable& c_pkgconfig_include;
+ const variable& c_pkgconfig_lib;
+
const variable& x_stdlib; // x.stdlib
const variable& c_runtime; // cc.runtime
@@ -153,10 +158,9 @@ namespace build2
struct data: config_data
{
- const char* x_compile; // Rule names.
- const char* x_link;
- const char* x_install;
- const char* x_uninstall;
+ string x_compile; // Rule names.
+ string x_link;
+ string x_install;
// Cached values for some commonly-used variables/values.
//
@@ -194,22 +198,53 @@ namespace build2
build2::cc::importable_headers* importable_headers;
// The order of sys_*_dirs is the mode entries first, followed by the
- // compiler built-in entries, and finished off with any extra entries
- // (e.g., fallback directories such as /usr/local/*).
+ // extra entries (e.g., /usr/local/*), followed by the compiler built-in
+ // entries.
+ //
+ // Note that even if we wanted to, we wouldn't be able to support extra
+ // trailing (after built-in) directories since we would need a portable
+ // equivalent of -idirafter for both headers and libraries.
//
const dir_paths& sys_lib_dirs; // x.sys_lib_dirs
const dir_paths& sys_hdr_dirs; // x.sys_hdr_dirs
const dir_paths* sys_mod_dirs; // compiler_info::sys_mod_dirs
- size_t sys_lib_dirs_mode; // Number of leading mode entries (0 if none).
+ size_t sys_lib_dirs_mode; // Number of mode entries (0 if none).
size_t sys_hdr_dirs_mode;
size_t sys_mod_dirs_mode;
- size_t sys_lib_dirs_extra; // First trailing extra entry (size if none).
+ size_t sys_lib_dirs_extra; // Number of extra entries (0 if none).
size_t sys_hdr_dirs_extra;
+ // Note that x_obj is patched in by the x.objx module. So it stays NULL
+ // if Objective-X compilation is not enabled. Similarly for x_asp except
+ // here we don't have duality and it's purely to signal (by the c.as-cpp
+ // module) that it's enabled.
+ //
const target_type& x_src; // Source target type (c{}, cxx{}).
const target_type* x_mod; // Module target type (mxx{}), if any.
+ const target_type* x_obj; // Objective-X target type (m{}, mm{}).
+ const target_type* x_asp; // Assembler with CPP target type (S{}).
+
+ // Check if an object (target, prerequisite, etc) is an Objective-X
+ // source.
+ //
+ template <typename T>
+ bool
+ x_objective (const T& t) const
+ {
+ return x_obj != nullptr && t.is_a (*x_obj);
+ }
+
+ // Check if an object (target, prerequisite, etc) is an Assembler with
+ // C preprocessor source.
+ //
+ template <typename T>
+ bool
+ x_assembler_cpp (const T& t) const
+ {
+ return x_asp != nullptr && t.is_a (*x_asp);
+ }
// Array of target types that are considered the X-language headers
// (excluding h{} except for C). Keep them in the most likely to appear
@@ -217,6 +252,8 @@ namespace build2
//
const target_type* const* x_hdr;
+ // Check if an object (target, prerequisite, etc) is a header.
+ //
template <typename T>
bool
x_header (const T& t, bool c_hdr = true) const
@@ -240,7 +277,6 @@ namespace build2
const char* compile,
const char* link,
const char* install,
- const char* uninstall,
compiler_type ct,
const string& cv,
compiler_class cl,
@@ -267,7 +303,6 @@ namespace build2
x_compile (compile),
x_link (link),
x_install (install),
- x_uninstall (uninstall),
ctype (ct), cvariant (cv), cclass (cl),
cmaj (mj), cmin (mi),
cvmaj (vmj), cvmin (vmi),
@@ -283,7 +318,8 @@ namespace build2
sys_lib_dirs_mode (slm), sys_hdr_dirs_mode (shm),
sys_mod_dirs_mode (smm),
sys_lib_dirs_extra (sle), sys_hdr_dirs_extra (she),
- x_src (src), x_mod (mod), x_hdr (hdr), x_inc (inc) {}
+ x_src (src), x_mod (mod), x_obj (nullptr), x_asp (nullptr),
+ x_hdr (hdr), x_inc (inc) {}
};
class LIBBUILD2_CC_SYMEXPORT common: public data
@@ -300,10 +336,16 @@ namespace build2
string type; // name::type
string value; // name::value
reference_wrapper<const mtime_target> lib;
+ const target* group;
};
using library_cache = small_vector<library_cache_entry, 32>;
+ // The prerequisite_target::include bit that indicates a library
+ // member has been picked from the group.
+ //
+ static const uintptr_t include_group = 0x100;
+
void
process_libraries (
action,
@@ -319,8 +361,29 @@ namespace build2
lflags, const string*, bool)>&,
const function<bool (const target&, const string&, bool, bool)>&,
bool = false,
- library_cache* = nullptr,
- small_vector<const target*, 24>* = nullptr) const;
+ bool = false,
+ library_cache* = nullptr) const;
+
+ void
+ process_libraries_impl (
+ action,
+ const scope&,
+ optional<linfo>,
+ const dir_paths&,
+ const target*,
+ const mtime_target&,
+ bool,
+ lflags,
+ const function<bool (const target&, bool)>&,
+ const function<bool (const target* const*,
+ const small_vector<reference_wrapper<const string>, 2>&,
+ lflags, const string*, bool)>&,
+ const function<bool (const target&, const string&, bool, bool)>&,
+ bool,
+ bool,
+ library_cache*,
+ small_vector<const target*, 32>*,
+ small_vector<const target*, 32>*) const;
const target*
search_library (action a,
@@ -347,7 +410,7 @@ namespace build2
}
public:
- const mtime_target&
+ pair<const mtime_target&, const target*>
resolve_library (action,
const scope&,
const name&,
@@ -357,6 +420,11 @@ namespace build2
optional<dir_paths>&,
library_cache* = nullptr) const;
+ struct non_existent_library
+ {
+ const mtime_target& target;
+ };
+
template <typename T>
static ulock
insert_library (context&,
@@ -369,7 +437,7 @@ namespace build2
tracer&);
target*
- search_library (action,
+ search_library (optional<action>,
const dir_paths&,
optional<dir_paths>&,
const prerequisite_key&,
@@ -389,13 +457,16 @@ namespace build2
// Alternative search logic for VC (msvc.cxx).
//
- bin::liba*
+ // The second half is false if we should poison the binless search via
+ // the common .pc file.
+ //
+ pair<bin::liba*, bool>
msvc_search_static (const process_path&,
const dir_path&,
const prerequisite_key&,
bool existing) const;
- bin::libs*
+ pair<bin::libs*, bool>
msvc_search_shared (const process_path&,
const dir_path&,
const prerequisite_key&,
@@ -415,21 +486,28 @@ namespace build2
bool) const;
void
- pkgconfig_load (action, const scope&,
+ pkgconfig_load (optional<action>, const scope&,
bin::lib&, bin::liba*, bin::libs*,
const pair<path, path>&,
const dir_path&,
const dir_paths&,
- const dir_paths&) const;
+ const dir_paths&,
+ pair<bool, bool>) const;
bool
- pkgconfig_load (action, const scope&,
+ pkgconfig_load (optional<action>, const scope&,
bin::lib&, bin::liba*, bin::libs*,
const optional<project_name>&,
const string&,
const dir_path&,
const dir_paths&,
- const dir_paths&) const;
+ const dir_paths&,
+ pair<bool, bool>) const;
+
+ // Append compiler-specific diagnostics color options as necessary.
+ //
+ void
+ append_diag_color_options (cstrings&) const;
};
}
}
diff --git a/libbuild2/cc/common.txx b/libbuild2/cc/common.txx
index d14f966..8c80686 100644
--- a/libbuild2/cc/common.txx
+++ b/libbuild2/cc/common.txx
@@ -19,15 +19,18 @@ namespace build2
bool exist,
tracer& trace)
{
- auto p (ctx.targets.insert_locked (T::static_type,
- move (dir),
- path_cast<dir_path> (out.effect),
- name,
- move (ext),
- target_decl::implied,
- trace));
+ auto p (ctx.targets.insert_locked (
+ T::static_type,
+ move (dir),
+ dir_path (out.effect_string ()).normalize (),
+ name,
+ move (ext),
+ target_decl::implied,
+ trace));
+
+ if (exist && p.second)
+ throw non_existent_library {p.first.template as<mtime_target> ()};
- assert (!exist || !p.second);
r = &p.first.template as<T> ();
return move (p.second);
}
diff --git a/libbuild2/cc/compile-rule.cxx b/libbuild2/cc/compile-rule.cxx
index 93f05f1..fa46332 100644
--- a/libbuild2/cc/compile-rule.cxx
+++ b/libbuild2/cc/compile-rule.cxx
@@ -3,6 +3,7 @@
#include <libbuild2/cc/compile-rule.hxx>
+#include <cerrno>
#include <cstdlib> // exit()
#include <cstring> // strlen(), strchr(), strncmp()
@@ -16,6 +17,7 @@
#include <libbuild2/algorithm.hxx>
#include <libbuild2/filesystem.hxx> // mtime()
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/make-parser.hxx>
#include <libbuild2/bin/target.hxx>
@@ -174,7 +176,7 @@ namespace build2
if (s == "includes") return preprocessed::includes;
if (s == "modules") return preprocessed::modules;
if (s == "all") return preprocessed::all;
- throw invalid_argument ("invalid preprocessed value '" + s + "'");
+ throw invalid_argument ("invalid preprocessed value '" + s + '\'');
}
// Return true if the compiler supports -isystem (GCC class) or
@@ -228,11 +230,17 @@ namespace build2
return nullopt;
}
+ // Note that we don't really need this for clean (where we only need
+ // unrefined unit type) so we could make this update-only. But let's keep
+ // it simple for now. Note that now we do need the source prerequisite
+ // type in clean to deal with Objective-X.
+ //
struct compile_rule::match_data
{
- explicit
- match_data (unit_type t, const prerequisite_member& s)
- : type (t), src (s) {}
+ match_data (const compile_rule& r,
+ unit_type t,
+ const prerequisite_member& s)
+ : type (t), src (s), rule (r) {}
unit_type type;
preprocessed pp = preprocessed::none;
@@ -245,39 +253,67 @@ namespace build2
path dd; // Dependency database path.
size_t header_units = 0; // Number of imported header units.
module_positions modules = {0, 0, 0}; // Positions of imported modules.
+
+ const compile_rule& rule;
+
+ target_state
+ operator() (action a, const target& t)
+ {
+ return rule.perform_update (a, t, *this);
+ }
};
compile_rule::
- compile_rule (data&& d)
+ compile_rule (data&& d, const scope& rs)
: common (move (d)),
- rule_id (string (x) += ".compile 5")
+ rule_id (string (x) += ".compile 6")
{
- static_assert (sizeof (match_data) <= target::data_size,
- "insufficient space");
+ // Locate the header cache (see enter_header() for details).
+ //
+ {
+ string mn (string (x) + ".config");
+
+ header_cache_ = rs.find_module<config_module> (mn); // Must be there.
+
+ const scope* ws (rs.weak_scope ());
+ if (ws != &rs)
+ {
+ const scope* s (&rs);
+ do
+ {
+ s = s->parent_scope ()->root_scope ();
+
+ if (const auto* m = s->find_module<config_module> (mn))
+ header_cache_ = m;
+
+ } while (s != ws);
+ }
+ }
}
template <typename T>
void compile_rule::
append_sys_hdr_options (T& args) const
{
- assert (sys_hdr_dirs_extra <= sys_hdr_dirs.size ());
+ assert (sys_hdr_dirs_mode + sys_hdr_dirs_extra <= sys_hdr_dirs.size ());
// Note that the mode options are added as part of cmode.
//
auto b (sys_hdr_dirs.begin () + sys_hdr_dirs_mode);
- auto m (sys_hdr_dirs.begin () + sys_hdr_dirs_extra);
- auto e (sys_hdr_dirs.end ());
+ auto x (b + sys_hdr_dirs_extra);
+ // Add extras.
+ //
// Note: starting from 16.10, MSVC gained /external:I option though it
// doesn't seem to affect the order, only "system-ness".
//
append_option_values (
args,
- cclass == compiler_class::gcc ? "-idirafter" :
+ cclass == compiler_class::gcc ? "-isystem" :
cclass == compiler_class::msvc ? (isystem (*this)
? "/external:I"
: "/I") : "-I",
- m, e,
+ b, x,
[] (const dir_path& d) {return d.string ().c_str ();});
// For MSVC if we have no INCLUDE environment variable set, then we
@@ -293,7 +329,7 @@ namespace build2
{
append_option_values (
args, "/I",
- b, m,
+ x, sys_hdr_dirs.end (),
[] (const dir_path& d) {return d.string ().c_str ();});
}
}
@@ -336,10 +372,18 @@ namespace build2
case unit_type::module_impl:
{
o1 = "-x";
- switch (x_lang)
+
+ if (x_assembler_cpp (md.src))
+ o2 = "assembler-with-cpp";
+ else
{
- case lang::c: o2 = "c"; break;
- case lang::cxx: o2 = "c++"; break;
+ bool obj (x_objective (md.src));
+
+ switch (x_lang)
+ {
+ case lang::c: o2 = obj ? "objective-c" : "c"; break;
+ case lang::cxx: o2 = obj ? "objective-c++" : "c++"; break;
+ }
}
break;
}
@@ -406,7 +450,7 @@ namespace build2
}
bool compile_rule::
- match (action a, target& t, const string&) const
+ match (action a, target& t) const
{
tracer trace (x, "compile_rule::match");
@@ -441,11 +485,13 @@ namespace build2
//
if (ut == unit_type::module_header ? p.is_a (**x_hdr) || p.is_a<h> () :
ut == unit_type::module_intf ? p.is_a (*x_mod) :
- p.is_a (x_src))
+ p.is_a (x_src) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && p.is_a (*x_obj)))
{
// Save in the target's auxiliary storage.
//
- t.data (match_data (ut, p));
+ t.data (a, match_data (*this, ut, p));
return true;
}
}
@@ -456,13 +502,16 @@ namespace build2
// Append or hash library options from a pair of *.export.* variables
// (first is x.* then cc.*) recursively, prerequisite libraries first.
+ // If common is true, then only append common options from the lib{}
+ // groups.
//
template <typename T>
void compile_rule::
append_library_options (appended_libraries& ls, T& args,
const scope& bs,
const scope* is, // Internal scope.
- action a, const file& l, bool la, linfo li,
+ action a, const file& l, bool la,
+ linfo li, bool common,
library_cache* lib_cache) const
{
struct data
@@ -476,7 +525,7 @@ namespace build2
//
auto imp = [] (const target& l, bool la) {return la && l.is_a<libux> ();};
- auto opt = [&d, this] (const target& lt,
+ auto opt = [&d, this] (const target& l, // Note: could be lib{}
const string& t, bool com, bool exp)
{
// Note that in our model *.export.poptions are always "interface",
@@ -485,8 +534,6 @@ namespace build2
if (!exp) // Ignore libux.
return true;
- const file& l (lt.as<file> ());
-
// Suppress duplicates.
//
// Compilation is the simple case: we can add the options on the first
@@ -496,6 +543,8 @@ namespace build2
if (find (d.ls.begin (), d.ls.end (), &l) != d.ls.end ())
return false;
+ // Note: go straight for the public variable pool.
+ //
const variable& var (
com
? c_export_poptions
@@ -645,16 +694,24 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0, // lflags unused.
- imp, nullptr, opt, false /* self */, lib_cache);
+ imp, nullptr, opt,
+ false /* self */,
+ common /* proc_opt_group */,
+ lib_cache);
}
void compile_rule::
append_library_options (appended_libraries& ls, strings& args,
const scope& bs,
- action a, const file& l, bool la, linfo li) const
+ action a, const file& l, bool la,
+ linfo li,
+ bool common,
+ bool original) const
{
- const scope* is (isystem (*this) ? effective_iscope (bs) : nullptr);
- append_library_options (ls, args, bs, is, a, l, la, li, nullptr);
+ const scope* is (!original && isystem (*this)
+ ? effective_iscope (bs)
+ : nullptr);
+ append_library_options (ls, args, bs, is, a, l, la, li, common, nullptr);
}
template <typename T>
@@ -695,7 +752,9 @@ namespace build2
append_library_options (ls,
args,
bs, iscope (),
- a, *f, la, li,
+ a, *f, la,
+ li,
+ false /* common */,
&lc);
}
}
@@ -708,7 +767,7 @@ namespace build2
void compile_rule::
append_library_prefixes (appended_libraries& ls, prefix_map& pm,
const scope& bs,
- action a, target& t, linfo li) const
+ action a, const target& t, linfo li) const
{
struct data
{
@@ -731,14 +790,23 @@ namespace build2
if (find (d.ls.begin (), d.ls.end (), &l) != d.ls.end ())
return false;
- const variable& var (
- com
- ? c_export_poptions
- : (t == x
- ? x_export_poptions
- : l.ctx.var_pool[t + ".export.poptions"]));
-
- append_prefixes (d.pm, l, var);
+ // If this target does not belong to any project (e.g, an "imported as
+ // installed" library), then it can't possibly generate any headers
+ // for us.
+ //
+ if (const scope* rs = l.base_scope ().root_scope ())
+ {
+ // Note: go straight for the public variable pool.
+ //
+ const variable& var (
+ com
+ ? c_export_poptions
+ : (t == x
+ ? x_export_poptions
+ : l.ctx.var_pool[t + ".export.poptions"]));
+
+ append_prefixes (d.pm, *rs, l, var);
+ }
if (com)
d.ls.push_back (&l);
@@ -770,75 +838,14 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
pt->as<file> (), la, 0, // lflags unused.
- impf, nullptr, optf, false /* self */,
+ impf, nullptr, optf,
+ false /* self */,
+ false /* proc_opt_group */,
&lib_cache);
}
}
}
- // Update the target during the match phase. Return true if it has changed
- // or if the passed timestamp is not timestamp_unknown and is older than
- // the target.
- //
- // This function is used to make sure header dependencies are up to date.
- //
- // There would normally be a lot of headers for every source file (think
- // all the system headers) and just calling execute_direct() on all of
- // them can get expensive. At the same time, most of these headers are
- // existing files that we will never be updating (again, system headers,
- // for example) and the rule that will match them is the fallback
- // file_rule. That rule has an optimization: it returns noop_recipe (which
- // causes the target state to be automatically set to unchanged) if the
- // file is known to be up to date. So we do the update "smartly".
- //
- static bool
- update (tracer& trace, action a, const target& t, timestamp ts)
- {
- const path_target* pt (t.is_a<path_target> ());
-
- if (pt == nullptr)
- ts = timestamp_unknown;
-
- target_state os (t.matched_state (a));
-
- if (os == target_state::unchanged)
- {
- if (ts == timestamp_unknown)
- return false;
- else
- {
- // We expect the timestamp to be known (i.e., existing file).
- //
- timestamp mt (pt->mtime ());
- assert (mt != timestamp_unknown);
- return mt > ts;
- }
- }
- else
- {
- // We only want to return true if our call to execute() actually
- // caused an update. In particular, the target could already have been
- // in target_state::changed because of a dependency extraction run for
- // some other source file.
- //
- // @@ MT perf: so we are going to switch the phase and execute for
- // any generated header.
- //
- phase_switch ps (t.ctx, run_phase::execute);
- target_state ns (execute_direct (a, t));
-
- if (ns != os && ns != target_state::unchanged)
- {
- l6 ([&]{trace << "updated " << t
- << "; old state " << os
- << "; new state " << ns;});
- return true;
- }
- else
- return ts != timestamp_unknown ? pt->newer (ts, ns) : false;
- }
- }
-
recipe compile_rule::
apply (action a, target& xt) const
{
@@ -846,7 +853,7 @@ namespace build2
file& t (xt.as<file> ()); // Either obj*{} or bmi*{}.
- match_data& md (t.data<match_data> ());
+ match_data& md (t.data<match_data> (a));
context& ctx (t.ctx);
@@ -1000,6 +1007,12 @@ namespace build2
// to match it if we may need its modules or importable headers
// (see search_modules(), make_header_sidebuild() for details).
//
+ // Well, that was the case until we've added support for immediate
+ // importation of libraries, which happens during the load phase
+ // and natually leaves the library unmatched. While we could have
+ // returned from search_library() an indication of whether the
+ // library has been matched, this doesn't seem worth the trouble.
+ //
if (p.proj ())
{
pt = search_library (a,
@@ -1007,8 +1020,10 @@ namespace build2
usr_lib_dirs,
p.prerequisite);
+#if 0
if (pt != nullptr && !modules)
continue;
+#endif
}
if (pt == nullptr)
@@ -1063,8 +1078,13 @@ namespace build2
// match in link::apply() it will be safe unless someone is building
// an obj?{} target directly.
//
+ // @@ If for some reason unmatch fails, this messes up the for_install
+ // logic because we will update this library during match. Perhaps
+ // we should postpone updating them until execute if we failed to
+ // unmatch. See how we do this in ad hoc rule.
+ //
pair<bool, target_state> mr (
- build2::match (
+ match_complete (
a,
*pt,
pt->is_a<liba> () || pt->is_a<libs> () || pt->is_a<libux> ()
@@ -1098,6 +1118,8 @@ namespace build2
md.symexport = l ? cast<bool> (l) : symexport;
}
+ // NOTE: see similar code in adhoc_buildscript_rule::apply().
+
// Make sure the output directory exists.
//
// Is this the right thing to do? It does smell a bit, but then we do
@@ -1198,15 +1220,6 @@ namespace build2
append_options (cs, t, c_coptions);
append_options (cs, t, x_coptions);
-
- if (ot == otype::s)
- {
- // On Darwin, Win32 -fPIC is the default.
- //
- if (tclass == "linux" || tclass == "bsd")
- cs.append ("-fPIC");
- }
-
append_options (cs, cmode);
if (md.pp != preprocessed::all)
@@ -1300,7 +1313,7 @@ namespace build2
//
l5 ([&]{trace << "extracting headers from " << src;});
auto& is (tu.module_info.imports);
- psrc = extract_headers (a, bs, t, li, src, md, dd, u, mt, is);
+ extract_headers (a, bs, t, li, src, md, dd, u, mt, is, psrc);
is.clear (); // No longer needed.
}
@@ -1475,10 +1488,10 @@ namespace build2
// to keep re-validating the file on every subsequent dry-run as well
// on the real run).
//
- if (u && dd.reading () && !ctx.dry_run)
- dd.touch = true;
+ if (u && dd.reading () && !ctx.dry_run_option)
+ dd.touch = timestamp_unknown;
- dd.close ();
+ dd.close (false /* mtime_check */);
md.dd = move (dd.path);
// If the preprocessed output is suitable for compilation, then pass
@@ -1536,67 +1549,25 @@ namespace build2
switch (a)
{
- case perform_update_id: return [this] (action a, const target& t)
- {
- return perform_update (a, t);
- };
- case perform_clean_id: return [this] (action a, const target& t)
+ case perform_update_id: return move (md);
+ case perform_clean_id:
{
- return perform_clean (a, t);
- };
+ return [this, srct = &md.src.type ()] (action a, const target& t)
+ {
+ return perform_clean (a, t, *srct);
+ };
+ }
default: return noop_recipe; // Configure update.
}
}
- // Reverse-lookup target type(s) from extension.
- //
- small_vector<const target_type*, 2> compile_rule::
- map_extension (const scope& s, const string& n, const string& e) const
- {
- // We will just have to try all of the possible ones, in the "most
- // likely to match" order.
- //
- auto test = [&s, &n, &e] (const target_type& tt) -> bool
- {
- // Call the extension derivation function. Here we know that it will
- // only use the target type and name from the target key so we can
- // pass bogus values for the rest.
- //
- target_key tk {&tt, nullptr, nullptr, &n, nullopt};
-
- // This is like prerequisite search.
- //
- optional<string> de (tt.default_extension (tk, s, nullptr, true));
-
- return de && *de == e;
- };
-
- small_vector<const target_type*, 2> r;
-
- for (const target_type* const* p (x_inc); *p != nullptr; ++p)
- if (test (**p))
- r.push_back (*p);
-
- return r;
- }
-
void compile_rule::
- append_prefixes (prefix_map& m, const target& t, const variable& var) const
+ append_prefixes (prefix_map& m,
+ const scope& rs, const target& t,
+ const variable& var) const
{
tracer trace (x, "compile_rule::append_prefixes");
- // If this target does not belong to any project (e.g, an "imported as
- // installed" library), then it can't possibly generate any headers for
- // us.
- //
- const scope& bs (t.base_scope ());
- const scope* rs (bs.root_scope ());
- if (rs == nullptr)
- return;
-
- const dir_path& out_base (t.dir);
- const dir_path& out_root (rs->out_path ());
-
if (auto l = t[var])
{
const auto& v (cast<strings> (l));
@@ -1654,113 +1625,8 @@ namespace build2
// If we are not inside our project root, then ignore.
//
- if (!d.sub (out_root))
- continue;
-
- // If the target directory is a sub-directory of the include
- // directory, then the prefix is the difference between the
- // two. Otherwise, leave it empty.
- //
- // The idea here is to make this "canonical" setup work auto-
- // magically:
- //
- // 1. We include all files with a prefix, e.g., <foo/bar>.
- // 2. The library target is in the foo/ sub-directory, e.g.,
- // /tmp/foo/.
- // 3. The poptions variable contains -I/tmp.
- //
- dir_path p (out_base.sub (d) ? out_base.leaf (d) : dir_path ());
-
- // We use the target's directory as out_base but that doesn't work
- // well for targets that are stashed in subdirectories. So as a
- // heuristics we are going to also enter the outer directories of
- // the original prefix. It is, however, possible, that another -I
- // option after this one will produce one of these outer prefixes as
- // its original prefix in which case we should override it.
- //
- // So we are going to assign the original prefix priority value 0
- // (highest) and then increment it for each outer prefix.
- //
- auto enter = [&trace, &m] (dir_path p, dir_path d, size_t prio)
- {
- auto j (m.find (p));
-
- if (j != m.end ())
- {
- prefix_value& v (j->second);
-
- // We used to reject duplicates but it seems this can be
- // reasonably expected to work according to the order of the
- // -I options.
- //
- // Seeing that we normally have more "specific" -I paths first,
- // (so that we don't pick up installed headers, etc), we ignore
- // it.
- //
- if (v.directory == d)
- {
- if (v.priority > prio)
- v.priority = prio;
- }
- else if (v.priority <= prio)
- {
- if (verb >= 4)
- trace << "ignoring mapping for prefix '" << p << "'\n"
- << " existing mapping to " << v.directory
- << " priority " << v.priority << '\n'
- << " another mapping to " << d
- << " priority " << prio;
- }
- else
- {
- if (verb >= 4)
- trace << "overriding mapping for prefix '" << p << "'\n"
- << " existing mapping to " << v.directory
- << " priority " << v.priority << '\n'
- << " new mapping to " << d
- << " priority " << prio;
-
- v.directory = move (d);
- v.priority = prio;
- }
- }
- else
- {
- l6 ([&]{trace << "'" << p << "' -> " << d << " priority "
- << prio;});
- m.emplace (move (p), prefix_value {move (d), prio});
- }
- };
-
-#if 1
- // Enter all outer prefixes, including prefixless.
- //
- // The prefixless part is fuzzy but seems to be doing the right
- // thing ignoring/overriding-wise, at least in cases where one of
- // the competing -I paths is a subdirectory of another. But the
- // proper solution will be to keep all the prefixless entries (by
- // changing prefix_map to a multimap) since for them we have an
- // extra check (target must be explicitly spelled out in a
- // buildfile).
- //
- for (size_t prio (0);; ++prio)
- {
- bool e (p.empty ());
- enter ((e ? move (p) : p), (e ? move (d) : d), prio);
- if (e)
- break;
- p = p.directory ();
- }
-#else
- size_t prio (0);
- for (bool e (false); !e; ++prio)
- {
- dir_path n (p.directory ());
- e = n.empty ();
- enter ((e ? move (p) : p), (e ? move (d) : d), prio);
- p = move (n);
- }
-#endif
+ if (d.sub (rs.out_path ()))
+ append_prefix (trace, m, t, move (d));
}
}
}
@@ -1768,15 +1634,16 @@ namespace build2
auto compile_rule::
build_prefix_map (const scope& bs,
action a,
- target& t,
+ const target& t,
linfo li) const -> prefix_map
{
prefix_map pm;
// First process our own.
//
- append_prefixes (pm, t, x_poptions);
- append_prefixes (pm, t, c_poptions);
+ const scope& rs (*bs.root_scope ());
+ append_prefixes (pm, rs, t, x_poptions);
+ append_prefixes (pm, rs, t, c_poptions);
// Then process the include directories from prerequisite libraries.
//
@@ -1786,68 +1653,6 @@ namespace build2
return pm;
}
- // Return the next make prerequisite starting from the specified
- // position and update position to point to the start of the
- // following prerequisite or l.size() if there are none left.
- //
- static string
- next_make (const string& l, size_t& p)
- {
- size_t n (l.size ());
-
- // Skip leading spaces.
- //
- for (; p != n && l[p] == ' '; p++) ;
-
- // Lines containing multiple prerequisites are 80 characters max.
- //
- string r;
- r.reserve (n);
-
- // Scan the next prerequisite while watching out for escape sequences.
- //
- for (; p != n && l[p] != ' '; p++)
- {
- char c (l[p]);
-
- if (p + 1 != n)
- {
- if (c == '$')
- {
- // Got to be another (escaped) '$'.
- //
- if (l[p + 1] == '$')
- ++p;
- }
- else if (c == '\\')
- {
- // This may or may not be an escape sequence depending on whether
- // what follows is "escapable".
- //
- switch (c = l[++p])
- {
- case '\\': break;
- case ' ': break;
- default: c = '\\'; --p; // Restore.
- }
- }
- }
-
- r += c;
- }
-
- // Skip trailing spaces.
- //
- for (; p != n && l[p] == ' '; p++) ;
-
- // Skip final '\'.
- //
- if (p == n - 1 && l[p] == '\\')
- p++;
-
- return r;
- }
-
// VC /showIncludes output. The first line is the file being compiled
// (unless clang-cl; handled by our caller). Then we have the list of
// headers, one per line, in this form (text can presumably be
@@ -2043,7 +1848,7 @@ namespace build2
// Any unhandled io_error is handled by the caller as a generic module
// mapper io error. Returning false terminates the communication.
//
- struct compile_rule::module_mapper_state //@@ gcc_module_mapper_state
+ struct compile_rule::gcc_module_mapper_state
{
size_t skip; // Number of depdb entries to skip.
size_t header_units = 0; // Number of header units imported.
@@ -2054,15 +1859,20 @@ namespace build2
optional<const build2::cc::translatable_headers*> translatable_headers;
small_vector<string, 2> batch; // Reuse buffers.
+ size_t batch_n = 0;
- module_mapper_state (size_t s, module_imports& i)
+ gcc_module_mapper_state (size_t s, module_imports& i)
: skip (s), imports (i) {}
};
- bool compile_rule::
- gcc_module_mapper (module_mapper_state& st,
+ // The module mapper is called on one line of input at a time. It should
+ // return nullopt if another line is expected (batch), false if the mapper
+ // interaction should be terminated, and true if it should be continued.
+ //
+ optional<bool> compile_rule::
+ gcc_module_mapper (gcc_module_mapper_state& st,
action a, const scope& bs, file& t, linfo li,
- ifdstream& is,
+ const string& l,
ofdstream& os,
depdb& dd, bool& update, bool& bad_error,
optional<prefix_map>& pfx_map, srcout_map& so_map) const
@@ -2078,35 +1888,40 @@ namespace build2
// Read in the entire batch trying hard to reuse the buffers.
//
- auto& batch (st.batch);
- size_t batch_n (0);
+ small_vector<string, 2>& batch (st.batch);
+ size_t& batch_n (st.batch_n);
- for (;;)
+ // Add the next line.
+ //
{
if (batch.size () == batch_n)
- batch.push_back (string ());
-
- string& r (batch[batch_n]);
-
- if (eof (getline (is, r)))
- break;
+ batch.push_back (l);
+ else
+ batch[batch_n] = l;
batch_n++;
+ }
- if (r.back () != ';')
- break;
+ // Check if more is expected in this batch.
+ //
+ {
+ string& r (batch[batch_n - 1]);
- // Strip the trailing `;` word.
- //
- r.pop_back ();
- r.pop_back ();
- }
+ if (r.back () == ';')
+ {
+ // Strip the trailing `;` word.
+ //
+ r.pop_back ();
+ r.pop_back ();
- if (batch_n == 0) // EOF
- return false;
+ return nullopt;
+ }
+ }
if (verb >= 3)
{
+ // It doesn't feel like buffering this would be useful.
+ //
// Note that we show `;` in requests/responses so that the result
// could be replayed.
//
@@ -2128,23 +1943,211 @@ namespace build2
for (size_t i (0); i != batch_n; ++i)
{
string& r (batch[i]);
+ size_t rn (r.size ());
+
+ // The protocol uses a peculiar quoting/escaping scheme that can be
+ // summarized as follows (see the libcody documentation for details):
+ //
+ // - Words are seperated with spaces and/or tabs.
+ //
+ // - Words need not be quoted if they only containing characters from
+ // the [-+_/%.A-Za-z0-9] set.
+ //
+ // - Otherwise words need to be single-quoted.
+ //
+ // - Inside single-quoted words, the \n \t \' and \\ escape sequences
+ // are recognized.
+ //
+ // Note that we currently don't treat abutted quotes (as in a' 'b) as
+ // a single word (it doesn't seem plausible that we will ever receive
+ // something like this).
+ //
+ size_t b (0), e (0), n; bool q; // Next word.
+
+ auto next = [&r, rn, &b, &e, &n, &q] () -> size_t
+ {
+ if (b != e)
+ b = e;
+
+ // Skip leading whitespaces.
+ //
+ for (; b != rn && (r[b] == ' ' || r[b] == '\t'); ++b) ;
+
+ if (b != rn)
+ {
+ q = (r[b] == '\'');
+
+ // Find first trailing whitespace or closing quote.
+ //
+ for (e = b + 1; e != rn; ++e)
+ {
+ // Note that we deal with invalid quoting/escaping in unquote().
+ //
+ switch (r[e])
+ {
+ case ' ':
+ case '\t':
+ if (q)
+ continue;
+ else
+ break;
+ case '\'':
+ if (q)
+ {
+ ++e; // Include closing quote (hopefully).
+ break;
+ }
+ else
+ {
+ assert (false); // Abutted quote.
+ break;
+ }
+ case '\\':
+ if (++e != rn) // Skip next character (hopefully).
+ continue;
+ else
+ break;
+ default:
+ continue;
+ }
+
+ break;
+ }
+
+ n = e - b;
+ }
+ else
+ {
+ q = false;
+ e = rn;
+ n = 0;
+ }
+
+ return n;
+ };
+
+ // Unquote into tmp the current word returning false if malformed.
+ //
+ auto unquote = [&r, &b, &n, &q, &tmp] (bool clear = true) -> bool
+ {
+ if (q && n > 1)
+ {
+ size_t e (b + n - 1);
+
+ if (r[b] == '\'' && r[e] == '\'')
+ {
+ if (clear)
+ tmp.clear ();
+
+ size_t i (b + 1);
+ for (; i != e; ++i)
+ {
+ char c (r[i]);
+ if (c == '\\')
+ {
+ if (++i == e)
+ {
+ i = 0;
+ break;
+ }
+
+ c = r[i];
+ if (c == 'n') c = '\n';
+ else if (c == 't') c = '\t';
+ }
+ tmp += c;
+ }
+
+ if (i == e)
+ return true;
+ }
+ }
- // @@ TODO: quoting and escaping.
+ return false;
+ };
+
+#if 0
+#define UNQUOTE(x, y) \
+ r = x; rn = r.size (); b = e = 0; \
+ assert (next () && unquote () && tmp == y)
+
+ UNQUOTE ("'foo bar'", "foo bar");
+ UNQUOTE (" 'foo bar' ", "foo bar");
+ UNQUOTE ("'foo\\\\bar'", "foo\\bar");
+ UNQUOTE ("'\\'foo bar'", "'foo bar");
+ UNQUOTE ("'foo bar\\''", "foo bar'");
+ UNQUOTE ("'\\'foo\\\\bar\\''", "'foo\\bar'");
+
+ fail << "all good";
+#endif
+
+ // Escape if necessary the specified string and append to r.
//
- size_t b (0), e (0), n; // Next word.
+ auto escape = [&r] (const string& s)
+ {
+ size_t b (0), e, n (s.size ());
+ while (b != n && (e = s.find_first_of ("\\'\n\t", b)) != string::npos)
+ {
+ r.append (s, b, e - b); // Preceding chunk.
+
+ char c (s[e]);
+ r += '\\';
+ r += (c == '\n' ? 'n' : c == '\t' ? 't' : c);
+ b = e + 1;
+ }
+
+ if (b != n)
+ r.append (s, b, e); // Final chunk.
+ };
- auto next = [&r, &b, &e, &n] () -> size_t
+ // Quote and escape if necessary the specified string and append to r.
+ //
+ auto quote = [&r, &escape] (const string& s)
{
- return (n = next_word (r, b, e, ' ', '\t'));
+ if (find_if (s.begin (), s.end (),
+ [] (char c)
+ {
+ return !((c >= 'a' && c <= 'z') ||
+ (c >= '0' && c <= '9') ||
+ (c >= 'A' && c <= 'Z') ||
+ c == '-' || c == '_' || c == '/' ||
+ c == '.' || c == '+' || c == '%');
+ }) == s.end ())
+ {
+ r += s;
+ }
+ else
+ {
+ r += '\'';
+ escape (s);
+ r += '\'';
+ }
};
+#if 0
+#define QUOTE(x, y) \
+ r.clear (); quote (x); \
+ assert (r == y)
+
+ QUOTE ("foo/Bar-7.h", "foo/Bar-7.h");
+
+ QUOTE ("foo bar", "'foo bar'");
+ QUOTE ("foo\\bar", "'foo\\\\bar'");
+ QUOTE ("'foo bar", "'\\'foo bar'");
+ QUOTE ("foo bar'", "'foo bar\\''");
+ QUOTE ("'foo\\bar'", "'\\'foo\\\\bar\\''");
+
+ fail << "all good";
+#endif
+
next (); // Request name.
- auto name = [&r, b, n] (const char* c) -> bool
+ auto name = [&r, b, n, q] (const char* c) -> bool
{
// We can reasonably assume a command will never be quoted.
//
- return (r.compare (b, n, c) == 0 &&
+ return (!q &&
+ r.compare (b, n, c) == 0 &&
(r[n] == ' ' || r[n] == '\t' || r[n] == '\0'));
};
@@ -2193,7 +2196,17 @@ namespace build2
if (next ())
{
- path f (r, b, n);
+ path f;
+ if (!q)
+ f = path (r, b, n);
+ else if (unquote ())
+ f = path (tmp);
+ else
+ {
+ r = "ERROR 'malformed quoting/escaping in request'";
+ continue;
+ }
+
bool exists (true);
// The TU path we pass to the compiler is always absolute so any
@@ -2204,8 +2217,9 @@ namespace build2
//
if (exists && f.relative ())
{
- tmp.assign (r, b, n);
- r = "ERROR relative header path '"; r += tmp; r += '\'';
+ r = "ERROR 'relative header path ";
+ escape (f.string ());
+ r += '\'';
continue;
}
@@ -2242,16 +2256,17 @@ namespace build2
try
{
pair<const file*, bool> er (
- enter_header (a, bs, t, li,
- move (f), false /* cache */, false /* norm */,
- pfx_map, so_map));
+ enter_header (
+ a, bs, t, li,
+ move (f), false /* cache */, false /* normalized */,
+ pfx_map, so_map));
ht = er.first;
remapped = er.second;
if (remapped)
{
- r = "ERROR remapping of headers not supported";
+ r = "ERROR 'remapping of headers not supported'";
continue;
}
@@ -2261,14 +2276,14 @@ namespace build2
// diagnostics won't really add anything to the compiler's. So
// let's only print it at -V or higher.
//
- if (ht == nullptr)
+ if (ht == nullptr) // f is still valid.
{
assert (!exists); // Sanity check.
if (verb > 2)
{
diag_record dr;
- dr << error << "header '" << f << "' not found and no "
+ dr << error << "header " << f << " not found and no "
<< "rule to generate it";
if (verb < 4)
@@ -2309,8 +2324,10 @@ namespace build2
// messy, let's keep both (it would have been nicer to print
// ours after the compiler's but that isn't easy).
//
- r = "ERROR unable to update header '";
- r += (ht != nullptr ? ht->path () : f).string ();
+ // Note: if ht is NULL, f is still valid.
+ //
+ r = "ERROR 'unable to update header ";
+ escape ((ht != nullptr ? ht->path () : f).string ());
r += '\'';
continue;
}
@@ -2445,17 +2462,27 @@ namespace build2
// original (which we may need to normalize when we read
// this mapping in extract_headers()).
//
- tmp = "@ "; tmp.append (r, b, n); tmp += ' '; tmp += bp;
+ // @@ This still breaks if the header path contains spaces.
+ // GCC bug 110153.
+ //
+ tmp = "@ ";
+ if (!q) tmp.append (r, b, n);
+ else unquote (false /* clear */); // Can't fail.
+ tmp += ' ';
+ tmp += bp;
+
dd.expect (tmp);
st.header_units++;
}
- r = "PATHNAME "; r += bp;
+ r = "PATHNAME ";
+ quote (bp);
}
catch (const failed&)
{
r = "ERROR 'unable to update header unit for ";
- r += hs; r += '\'';
+ escape (hs);
+ r += '\'';
continue;
}
}
@@ -2481,7 +2508,7 @@ namespace build2
// Truncate the response batch and terminate the communication (see
// also libcody issue #22).
//
- tmp.assign (r, b, n);
+ tmp.assign (r, b, n); // Request name (unquoted).
r = "ERROR '"; r += w; r += ' '; r += tmp; r += '\'';
batch_n = i + 1;
term = true;
@@ -2497,6 +2524,9 @@ namespace build2
// Write the response batch.
//
+ // @@ It's theoretically possible that we get blocked writing the
+ // response while the compiler gets blocked writing the diagnostics.
+ //
for (size_t i (0);; )
{
string& r (batch[i]);
@@ -2517,6 +2547,8 @@ namespace build2
os.flush ();
+ batch_n = 0; // Start a new batch.
+
return !term;
}
@@ -2770,9 +2802,10 @@ namespace build2
if (exists)
{
pair<const file*, bool> r (
- enter_header (a, bs, t, li,
- move (f), false /* cache */, false /* norm */,
- pfx_map, so_map));
+ enter_header (
+ a, bs, t, li,
+ move (f), false /* cache */, false /* normalized */,
+ pfx_map, so_map));
if (!r.second) // Shouldn't be remapped.
ht = r.first;
@@ -2780,7 +2813,7 @@ namespace build2
if (ht != pts.back ())
{
- ht = static_cast<const file*> (pts.back ().target);
+ ht = &pts.back ().target->as<file> ();
rs = "ERROR expected header '" + ht->path ().string () +
"' to be found instead";
bad_error = true; // We expect an error from the compiler.
@@ -2797,9 +2830,10 @@ namespace build2
try
{
pair<const file*, bool> er (
- enter_header (a, bs, t, li,
- move (f), false /* cache */, false /* norm */,
- pfx_map, so_map));
+ enter_header (
+ a, bs, t, li,
+ move (f), false /* cache */, false /* normalized */,
+ pfx_map, so_map));
ht = er.first;
remapped = er.second;
@@ -2817,7 +2851,7 @@ namespace build2
// diagnostics won't really add anything to the compiler's. So
// let's only print it at -V or higher.
//
- if (ht == nullptr)
+ if (ht == nullptr) // f is still valid.
{
assert (!exists); // Sanity check.
@@ -2864,10 +2898,12 @@ namespace build2
// messy, let's keep both (it would have been nicer to print
// ours after the compiler's but that isn't easy).
//
+ // Note: if ht is NULL, f is still valid.
+ //
rs = !exists
? string ("INCLUDE")
: ("ERROR unable to update header '" +
- (ht != nullptr ? ht->path () : f).string () + "'");
+ (ht != nullptr ? ht->path () : f).string () + '\'');
bad_error = true;
break;
@@ -2945,7 +2981,7 @@ namespace build2
}
catch (const failed&)
{
- rs = "ERROR unable to update header unit '" + hp + "'";
+ rs = "ERROR unable to update header unit '" + hp + '\'';
bad_error = true;
break;
}
@@ -2987,351 +3023,204 @@ namespace build2
}
#endif
- // Enter as a target a header file. Depending on the cache flag, the file
- // is assumed to either have come from the depdb cache or from the
- // compiler run.
- //
- // Return the header target and an indication of whether it was remapped
- // or NULL if the header does not exist and cannot be generated. In the
- // latter case the passed header path is guaranteed to be still valid but
- // might have been adjusted (e.g., normalized, etc).
+ //atomic_count cache_hit {0};
+ //atomic_count cache_mis {0};
+ //atomic_count cache_cls {0};
+
+ // The fp path is only moved from on success.
//
// Note: this used to be a lambda inside extract_headers() so refer to the
// body of that function for the overall picture.
//
pair<const file*, bool> compile_rule::
enter_header (action a, const scope& bs, file& t, linfo li,
- path&& f, bool cache, bool norm,
- optional<prefix_map>& pfx_map, srcout_map& so_map) const
+ path&& fp, bool cache, bool norm,
+ optional<prefix_map>& pfx_map,
+ const srcout_map& so_map) const
{
tracer trace (x, "compile_rule::enter_header");
- // Find or maybe insert the target. The directory is only moved from if
- // insert is true. Note that it must be normalized.
- //
- auto find = [&trace, &t, this] (dir_path&& d,
- path&& f,
- bool insert) -> const file*
+ // It's reasonable to expect the same header to be included by multiple
+ // translation units, which means we will be re-doing this work over and
+ // over again. And it's not exactly cheap, taking up to 50% of an
+ // up-to-date check time on some projects. So we are going to cache the
+ // header path to target mapping.
+ //
+ // While we pass quite a bit of specific "context" (target, base scope)
+ // to enter_file(), here is the analysis why the result will not depend
+ // on this context for the non-absent header (fp is absolute):
+ //
+ // 1. Let's start with the base scope (bs). Firstly, the base scope
+ // passed to map_extension() is the scope of the header (i.e., it is
+ // the scope of fp.directory()). Other than that, the target base
+ // scope is only passed to build_prefix_map() which is only called
+ // for the absent header (linfo is also only used here).
+ //
+ // 2. Next is the target (t). It is passed to build_prefix_map() but
+ // that doesn't matter for the same reason as in (1). Other than
+ // that, it is only passed to build2::search() which in turn passes
+ // it to target type-specific prerequisite search callback (see
+ // target_type::search) if one is not NULL. The target type in
+ // question here is one of the headers and we know all of them use
+ // the standard file_search() which ignores the passed target.
+ //
+ // 3. Finally, so_map could be used for an absolute fp. While we could
+ // simply not cache the result if it was used (second half of the
+ // result pair is true), there doesn't seem to be any harm in caching
+ // the remapped path->target mapping. In fact, if to think about it,
+ // there is no harm in caching the generated file mapping since it
+ // will be immediately generated and any subsequent inclusions we
+ // will "see" with an absolute path, which we can resolve from the
+ // cache.
+ //
+ // To put it another way, all we need to do is make sure that if we were
+ // to not return an existing cache entry, the call to enter_file() would
+ // have returned exactly the same path/target.
+ //
+ // @@ Could it be that the header is re-mapped in one config but not the
+ // other (e.g., when we do both in src and in out builds and we pick
+ // the generated header in src)? If so, that would lead to a
+ // divergence. I.e., we would cache the no-remap case first and then
+ // return it even though the re-map is necessary? Why can't we just
+ // check for re-mapping ourselves? A: the remapping logic in
+ // enter_file() is not exactly trivial.
+ //
+ // But on the other hand, I think we can assume that different
+ // configurations will end up with different caches. In other words,
+ // we can assume that for the same "cc amalgamation" we use only a
+ // single "version" of a header. Seems reasonable.
+ //
+ // Note also that while it would have been nice to have a unified cc
+ // cache, the map_extension() call is passed x_inc which is module-
+ // specific. In other words, we may end up mapping the same header to
+ // two different targets depending on whether it is included from, say,
+ // C or C++ translation unit. We could have used a unified cache for
+ // headers that were mapped using the fallback target type, which would
+ // cover the installed headers. Maybe, one day (it's also possible that
+ // separate caches reduce contention).
+ //
+ // Another related question is where we want to keep the cache: project,
+ // strong amalgamation, or weak amalgamation (like module sidebuilds).
+ // Some experimentation showed that weak has the best performance (which
+ // suggest that a unified cache will probably be a win).
+ //
+ // Note also that we don't need to clear this cache since we never clear
+ // the targets set. In other words, the only time targets are
+ // invalidated is when we destroy the build context, which also destroys
+ // the cache.
+ //
+ const config_module& hc (*header_cache_);
+
+ // First check the cache.
+ //
+ config_module::header_key hk;
+
+ bool e (fp.absolute ());
+ if (e)
{
- // Split the file into its name part and extension. Here we can assume
- // the name part is a valid filesystem name.
- //
- // Note that if the file has no extension, we record an empty
- // extension rather than NULL (which would signify that the default
- // extension should be added).
- //
- string e (f.extension ());
- string n (move (f).string ());
-
- if (!e.empty ())
- n.resize (n.size () - e.size () - 1); // One for the dot.
-
- // See if this directory is part of any project and if so determine
- // the target type.
- //
- // While at it also determine if this target is from the src or out
- // tree of said project.
- //
- dir_path out;
-
- // It's possible the extension-to-target type mapping is ambiguous
- // (usually because both C and X-language headers use the same .h
- // extension). In this case we will first try to find one that matches
- // an explicit target (similar logic to when insert is false).
- //
- small_vector<const target_type*, 2> tts;
-
- // Note that the path can be in out or src directory and the latter
- // can be associated with multiple scopes. So strictly speaking we
- // need to pick one that is "associated" with us. But that is still a
- // TODO (see scope_map::find() for details) and so for now we just
- // pick the first one (it's highly unlikely the source file extension
- // mapping will differ based on the configuration).
- //
- {
- const scope& bs (**t.ctx.scopes.find (d).first);
- if (const scope* rs = bs.root_scope ())
- {
- tts = map_extension (bs, n, e);
-
- if (!bs.out_eq_src () && d.sub (bs.src_path ()))
- out = out_src (d, *rs);
- }
- }
-
- // If it is outside any project, or the project doesn't have such an
- // extension, assume it is a plain old C header.
- //
- if (tts.empty ())
+ if (!norm)
{
- // If the project doesn't "know" this extension then we can't
- // possibly find an explicit target of this type.
- //
- if (!insert)
- return nullptr;
-
- tts.push_back (&h::static_type);
+ normalize_external (fp, "header");
+ norm = true;
}
- // Find or insert target.
- //
- // Note that in case of the target type ambiguity we first try to find
- // an explicit target that resolves this ambiguity.
- //
- const target* r (nullptr);
+ hk.file = move (fp);
+ hk.hash = hash<path> () (hk.file);
- if (!insert || tts.size () > 1)
+ slock l (hc.header_map_mutex);
+ auto i (hc.header_map.find (hk));
+ if (i != hc.header_map.end ())
{
- // Note that we skip any target type-specific searches (like for an
- // existing file) and go straight for the target object since we
- // need to find the target explicitly spelled out.
- //
- // Also, it doesn't feel like we should be able to resolve an
- // absolute path with a spelled-out extension to multiple targets.
- //
- for (const target_type* tt: tts)
- if ((r = t.ctx.targets.find (*tt, d, out, n, e, trace)) != nullptr)
- break;
-
- // Note: we can't do this because of the in-source builds where
- // there won't be explicit targets for non-generated headers.
- //
- // This should be harmless, however, since in our world generated
- // headers are normally spelled-out as explicit targets. And if not,
- // we will still get an error, just a bit less specific.
- //
-#if 0
- if (r == nullptr && insert)
- {
- f = d / n;
- if (!e.empty ())
- {
- f += '.';
- f += e;
- }
-
- diag_record dr (fail);
- dr << "mapping of header " << f << " to target type is ambiguous";
- for (const target_type* tt: tts)
- dr << info << "could be " << tt->name << "{}";
- dr << info << "spell-out its target to resolve this ambiguity";
- }
-#endif
+ //cache_hit.fetch_add (1, memory_order_relaxed);
+ return make_pair (i->second, false);
}
- // @@ OPT: move d, out, n
- //
- if (r == nullptr && insert)
- r = &search (t, *tts[0], d, out, n, &e, nullptr);
+ fp = move (hk.file);
- return static_cast<const file*> (r);
- };
+ //cache_mis.fetch_add (1, memory_order_relaxed);
+ }
- // If it's not absolute then it either does not (yet) exist or is a
- // relative ""-include (see init_args() for details). Reduce the second
- // case to absolute.
- //
- // Note: we now always use absolute path to the translation unit so this
- // no longer applies. But let's keep it for posterity.
- //
-#if 0
- if (f.relative () && rels.relative ())
+ struct data
{
- // If the relative source path has a directory component, make sure
- // it matches since ""-include will always start with that (none of
- // the compilers we support try to normalize this path). Failed that
- // we may end up searching for a generated header in a random
- // (working) directory.
- //
- const string& fs (f.string ());
- const string& ss (rels.string ());
-
- size_t p (path::traits::rfind_separator (ss));
-
- if (p == string::npos || // No directory.
- (fs.size () > p + 1 &&
- path::traits::compare (fs.c_str (), p, ss.c_str (), p) == 0))
- {
- path t (work / f); // The rels path is relative to work.
-
- if (exists (t))
- f = move (t);
- }
- }
-#endif
+ linfo li;
+ optional<prefix_map>& pfx_map;
+ } d {li, pfx_map};
+
+ // If it is outside any project, or the project doesn't have such an
+ // extension, assume it is a plain old C header.
+ //
+ auto r (enter_file (
+ trace, "header",
+ a, bs, t,
+ fp, cache, norm,
+ [this] (const scope& bs, const string& n, const string& e)
+ {
+ return map_extension (bs, n, e, x_inc);
+ },
+ h::static_type,
+ [this, &d] (action a, const scope& bs, const target& t)
+ -> const prefix_map&
+ {
+ if (!d.pfx_map)
+ d.pfx_map = build_prefix_map (bs, a, t, d.li);
- const file* pt (nullptr);
- bool remapped (false);
+ return *d.pfx_map;
+ },
+ so_map));
- // If still relative then it does not exist.
+ // Cache.
//
- if (f.relative ())
+ if (r.first != nullptr)
{
- // This is probably as often an error as an auto-generated file, so
- // trace at level 4.
- //
- l4 ([&]{trace << "non-existent header '" << f << "'";});
-
- f.normalize ();
-
- // The relative path might still contain '..' (e.g., ../foo.hxx;
- // presumably ""-include'ed). We don't attempt to support auto-
- // generated headers with such inclusion styles.
- //
- if (f.normalized ())
- {
- if (!pfx_map)
- pfx_map = build_prefix_map (bs, a, t, li);
-
- // First try the whole file. Then just the directory.
- //
- // @@ Has to be a separate map since the prefix can be the same as
- // the file name.
- //
- // auto i (pfx_map->find (f));
-
- // Find the most qualified prefix of which we are a sub-path.
- //
- if (!pfx_map->empty ())
- {
- dir_path d (f.directory ());
- auto i (pfx_map->find_sup (d));
+ hk.file = move (fp);
- if (i != pfx_map->end ())
- {
- // Note: value in pfx_map is not necessarily canonical.
- //
- dir_path pd (i->second.directory);
- pd.canonicalize ();
-
- l4 ([&]{trace << "prefix '" << d << "' mapped to " << pd;});
-
- // If this is a prefixless mapping, then only use it if we can
- // resolve it to an existing target (i.e., it is explicitly
- // spelled out in a buildfile).
- //
- // Note that at some point we will probably have a list of
- // directories.
- //
- pt = find (pd / d, f.leaf (), !i->first.empty ());
- if (pt != nullptr)
- {
- f = pd / f;
- l4 ([&]{trace << "mapped as auto-generated " << f;});
- }
- else
- l4 ([&]{trace << "no explicit target in " << pd;});
- }
- else
- l4 ([&]{trace << "no prefix map entry for '" << d << "'";});
- }
- else
- l4 ([&]{trace << "prefix map is empty";});
- }
- }
- else
- {
- // Normalize the path unless it comes from the depdb, in which case
- // we've already done that (normally). This is also where we handle
- // src-out remap (again, not needed if cached).
+ // Calculate the hash if we haven't yet and re-calculate it if the
+ // path has changed (header has been remapped).
//
- if (!cache || norm)
- normalize_header (f);
+ if (!e || r.second)
+ hk.hash = hash<path> () (hk.file);
- if (!cache)
+ const file* f;
{
- if (!so_map.empty ())
- {
- // Find the most qualified prefix of which we are a sub-path.
- //
- auto i (so_map.find_sup (f));
- if (i != so_map.end ())
- {
- // Ok, there is an out tree for this headers. Remap to a path
- // from the out tree and see if there is a target for it. Note
- // that the value in so_map is not necessarily canonical.
- //
- dir_path d (i->second);
- d /= f.leaf (i->first).directory ();
- d.canonicalize ();
-
- pt = find (move (d), f.leaf (), false); // d is not moved from.
-
- if (pt != nullptr)
- {
- path p (d / f.leaf ());
- l4 ([&]{trace << "remapping " << f << " to " << p;});
- f = move (p);
- remapped = true;
- }
- }
- }
+ ulock l (hc.header_map_mutex);
+ auto p (hc.header_map.emplace (move (hk), r.first));
+ f = p.second ? nullptr : p.first->second;
}
- if (pt == nullptr)
+ if (f != nullptr)
{
- l6 ([&]{trace << "entering " << f;});
- pt = find (f.directory (), f.leaf (), true);
+ //cache_cls.fetch_add (1, memory_order_relaxed);
+ assert (r.first == f);
}
}
- return make_pair (pt, remapped);
+ return r;
}
- // Update and add to the list of prerequisite targets a header or header
- // unit target.
- //
- // Return the indication of whether it has changed or, if the passed
- // timestamp is not timestamp_unknown, is older than the target. If the
- // header does not exists nor can be generated (no rule), then issue
- // diagnostics and fail if the fail argument is true and return nullopt
- // otherwise.
- //
// Note: this used to be a lambda inside extract_headers() so refer to the
// body of that function for the overall picture.
//
optional<bool> compile_rule::
inject_header (action a, file& t,
- const file& pt, timestamp mt, bool f /* fail */) const
+ const file& pt, timestamp mt, bool fail) const
{
tracer trace (x, "compile_rule::inject_header");
- // Even if failing we still use try_match() in order to issue consistent
- // (with extract_headers() below) diagnostics (rather than the generic
- // "not rule to update ...").
- //
- if (!try_match (a, pt).first)
- {
- if (!f)
- return nullopt;
-
- diag_record dr;
- dr << fail << "header " << pt << " not found and no rule to "
- << "generate it";
-
- if (verb < 4)
- dr << info << "re-run with --verbose=4 for more information";
- }
-
- bool r (update (trace, a, pt, mt));
-
- // Add to our prerequisite target list.
- //
- t.prerequisite_targets[a].push_back (&pt);
-
- return r;
+ return inject_file (trace, "header", a, t, pt, mt, fail);
}
- // Extract and inject header dependencies. Return the preprocessed source
- // file as well as an indication if it is usable for compilation (see
- // below for details).
+ // Extract and inject header dependencies. Return (in result) the
+ // preprocessed source file as well as an indication if it is usable for
+ // compilation (see below for details). Note that result is expected to
+ // be initialized to {entry (), false}. Not using return type due to
+ // GCC bug #107555.
//
// This is also the place where we handle header units which are a lot
// more like auto-generated headers than modules. In particular, if a
// header unit BMI is out-of-date, then we have to re-preprocess this
// translation unit.
//
- pair<file_cache::entry, bool> compile_rule::
+ void compile_rule::
extract_headers (action a,
const scope& bs,
file& t,
@@ -3341,7 +3230,8 @@ namespace build2
depdb& dd,
bool& update,
timestamp mt,
- module_imports& imports) const
+ module_imports& imports,
+ pair<file_cache::entry, bool>& result) const
{
tracer trace (x, "compile_rule::extract_headers");
@@ -3354,19 +3244,16 @@ namespace build2
file_cache::entry psrc;
bool puse (true);
- // If things go wrong (and they often do in this area), give the user a
- // bit extra context.
+ // Preprocessed file extension.
//
- auto df = make_diag_frame (
- [&src](const diag_record& dr)
- {
- if (verb != 0)
- dr << info << "while extracting header dependencies from " << src;
- });
+ const char* pext (x_assembler_cpp (src) ? ".Si" :
+ x_objective (src) ? x_obj_pext :
+ x_pext);
// Preprocesor mode that preserves as much information as possible while
// still performing inclusions. Also serves as a flag indicating whether
- // this compiler uses the separate preprocess and compile setup.
+ // this (non-MSVC) compiler uses the separate preprocess and compile
+ // setup.
//
const char* pp (nullptr);
@@ -3377,7 +3264,16 @@ namespace build2
// -fdirectives-only is available since GCC 4.3.0.
//
if (cmaj > 4 || (cmaj == 4 && cmin >= 3))
- pp = "-fdirectives-only";
+ {
+ // Note that for assembler-with-cpp GCC currently forces full
+ // preprocessing in (what appears to be) an attempt to paper over
+ // a deeper issue (see GCC bug 109534). If/when that bug gets
+ // fixed, we can enable this on our side. Note also that Clang's
+ // -frewrite-includes appear to work correctly on such files.
+ //
+ if (!x_assembler_cpp (src))
+ pp = "-fdirectives-only";
+ }
break;
}
@@ -3467,7 +3363,7 @@ namespace build2
//
// GCC's -fdirective-only, on the other hand, processes all the
// directives so they are gone from the preprocessed source. Here is
- // what we are going to do to work around this: we will detect if any
+ // what we are going to do to work around this: we will sense if any
// diagnostics has been written to stderr on the -E run. If that's the
// case (but the compiler indicated success) then we assume they are
// warnings and disable the use of the preprocessed output for
@@ -3496,6 +3392,8 @@ namespace build2
//
// So seeing that it is hard to trigger a legitimate VC preprocessor
// warning, for now, we will just treat them as errors by adding /WX.
+ // BTW, another example of a plausible preprocessor warnings are C4819
+ // and C4828 (character unrepresentable in source charset).
//
// Finally, if we are using the module mapper, then all this mess falls
// away: we only run the compiler once, we let the diagnostics through,
@@ -3503,7 +3401,9 @@ namespace build2
// not found, and there is no problem with outdated generated headers
// since we update/remap them before the compiler has a chance to read
// them. Overall, this "dependency mapper" approach is how it should
- // have been done from the beginning.
+ // have been done from the beginning. Note: that's the ideal world,
+ // the reality is that the required mapper extensions are not (yet)
+ // in libcody/GCC.
// Note: diagnostics sensing is currently only supported if dependency
// info is written to a file (see above).
@@ -3513,15 +3413,15 @@ namespace build2
// And here is another problem: if we have an already generated header
// in src and the one in out does not yet exist, then the compiler will
// pick the one in src and we won't even notice. Note that this is not
- // only an issue with mixing in- and out-of-tree builds (which does feel
+ // only an issue with mixing in and out of source builds (which does feel
// wrong but is oh so convenient): this is also a problem with
// pre-generated headers, a technique we use to make installing the
// generator by end-users optional by shipping pre-generated headers.
//
// This is a nasty problem that doesn't seem to have a perfect solution
- // (except, perhaps, C++ modules). So what we are going to do is try to
- // rectify the situation by detecting and automatically remapping such
- // mis-inclusions. It works as follows.
+ // (except, perhaps, C++ modules and/or module mapper). So what we are
+ // going to do is try to rectify the situation by detecting and
+ // automatically remapping such mis-inclusions. It works as follows.
//
// First we will build a map of src/out pairs that were specified with
// -I. Here, for performance and simplicity, we will assume that they
@@ -3534,10 +3434,7 @@ namespace build2
// case, then we calculate a corresponding header in the out tree and,
// (this is the most important part), check if there is a target for
// this header in the out tree. This should be fairly accurate and not
- // require anything explicit from the user except perhaps for a case
- // where the header is generated out of nothing (so there is no need to
- // explicitly mention its target in the buildfile). But this probably
- // won't be very common.
+ // require anything explicit from the user.
//
// One tricky area in this setup are target groups: if the generated
// sources are mentioned in the buildfile as a group, then there might
@@ -3547,10 +3444,7 @@ namespace build2
// generated depending on the options (e.g., inline files might be
// suppressed), headers are usually non-optional.
//
- // Note that we use path_map instead of dir_path_map to allow searching
- // using path (file path).
- //
- srcout_map so_map; // path_map<dir_path>
+ srcout_map so_map;
// Dynamic module mapper.
//
@@ -3558,13 +3452,13 @@ namespace build2
// The gen argument to init_args() is in/out. The caller signals whether
// to force the generated header support and on return it signals
- // whether this support is enabled. The first call to init_args is
- // expected to have gen false.
+ // whether this support is enabled. If gen is false, then stderr is
+ // expected to be either discarded or merged with sdtout.
//
// Return NULL if the dependency information goes to stdout and a
// pointer to the temporary file path otherwise.
//
- auto init_args = [a, &t, ot, li, reprocess,
+ auto init_args = [a, &t, ot, li, reprocess, pext,
&src, &md, &psrc, &sense_diag, &mod_mapper, &bs,
pp, &env, &args, &args_gen, &args_i, &out, &drm,
&so_map, this]
@@ -3630,17 +3524,13 @@ namespace build2
// Populate the src-out with the -I$out_base -I$src_base pairs.
//
{
+ srcout_builder builder (ctx, so_map);
+
// Try to be fast and efficient by reusing buffers as much as
// possible.
//
string ds;
- // Previous -I innermost scope if out_base plus the difference
- // between the scope path and the -I path (normally empty).
- //
- const scope* s (nullptr);
- dir_path p;
-
for (auto i (args.begin ()), e (args.end ()); i != e; ++i)
{
const char* o (*i);
@@ -3665,7 +3555,7 @@ namespace build2
if (p == 0)
{
- s = nullptr;
+ builder.skip ();
continue;
}
@@ -3698,68 +3588,14 @@ namespace build2
//
if (!d.empty ())
{
- // Ignore any paths containing '.', '..' components. Allow
- // any directory separators though (think -I$src_root/foo
- // on Windows).
- //
- if (d.absolute () && d.normalized (false))
- {
- // If we have a candidate out_base, see if this is its
- // src_base.
- //
- if (s != nullptr)
- {
- const dir_path& bp (s->src_path ());
-
- if (d.sub (bp))
- {
- if (p.empty () || d.leaf (bp) == p)
- {
- // We've got a pair.
- //
- so_map.emplace (move (d), s->out_path () / p);
- s = nullptr; // Taken.
- continue;
- }
- }
-
- // Not a pair. Fall through to consider as out_base.
- //
- s = nullptr;
- }
-
- // See if this path is inside a project with an out-of-
- // tree build and is in the out directory tree.
- //
- const scope& bs (ctx.scopes.find_out (d));
- if (bs.root_scope () != nullptr)
- {
- if (!bs.out_eq_src ())
- {
- const dir_path& bp (bs.out_path ());
-
- bool e;
- if ((e = (d == bp)) || d.sub (bp))
- {
- s = &bs;
- if (e)
- p.clear ();
- else
- p = d.leaf (bp);
- }
- }
- }
- }
- else
- s = nullptr;
-
- ds = move (d).string (); // Move the buffer out.
+ if (!builder.next (move (d)))
+ ds = move (d).string (); // Move the buffer back out.
}
else
- s = nullptr;
+ builder.skip ();
}
else
- s = nullptr;
+ builder.skip ();
}
}
@@ -3806,19 +3642,41 @@ namespace build2
append_options (args, cmode);
append_sys_hdr_options (args); // Extra system header dirs (last).
- // See perform_update() for details on /external:W0, /EHsc, /MD.
+ // Note that for MSVC stderr is merged with stdout and is then
+ // parsed, so no append_diag_color_options() call.
+
+ // See perform_update() for details on the choice of options.
//
+ {
+ bool sc (find_option_prefixes (
+ {"/source-charset:", "-source-charset:"}, args));
+ bool ec (find_option_prefixes (
+ {"/execution-charset:", "-execution-charset:"}, args));
+
+ if (!sc && !ec)
+ args.push_back ("/utf-8");
+ else
+ {
+ if (!sc)
+ args.push_back ("/source-charset:UTF-8");
+
+ if (!ec)
+ args.push_back ("/execution-charset:UTF-8");
+ }
+ }
+
if (cvariant != "clang" && isystem (*this))
{
- if (find_option_prefix ("/external:I", args) &&
- !find_option_prefix ("/external:W", args))
+ if (find_option_prefixes ({"/external:I", "-external:I"}, args) &&
+ !find_option_prefixes ({"/external:W", "-external:W"}, args))
args.push_back ("/external:W0");
}
- if (x_lang == lang::cxx && !find_option_prefix ("/EH", args))
+ if (x_lang == lang::cxx &&
+ !find_option_prefixes ({"/EH", "-EH"}, args))
args.push_back ("/EHsc");
- if (!find_option_prefixes ({"/MD", "/MT"}, args))
+ if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args))
args.push_back ("/MD");
args.push_back ("/P"); // Preprocess to file.
@@ -3829,7 +3687,7 @@ namespace build2
msvc_sanitize_cl (args);
- psrc = ctx.fcache.create (t.path () + x_pext, !modules);
+ psrc = ctx.fcache->create (t.path () + pext, !modules);
if (fc)
{
@@ -3848,8 +3706,20 @@ namespace build2
}
case compiler_class::gcc:
{
+ append_options (args, cmode,
+ cmode.size () - (modules && clang ? 1 : 0));
+ append_sys_hdr_options (args); // Extra system header dirs (last).
+
+ // If not gen, then stderr is discarded.
+ //
+ if (gen)
+ append_diag_color_options (args);
+
// See perform_update() for details on the choice of options.
//
+ if (!find_option_prefix ("-finput-charset=", args))
+ args.push_back ("-finput-charset=UTF-8");
+
if (ot == otype::s)
{
if (tclass == "linux" || tclass == "bsd")
@@ -3878,10 +3748,6 @@ namespace build2
}
}
- append_options (args, cmode,
- cmode.size () - (modules && clang ? 1 : 0));
- append_sys_hdr_options (args); // Extra system header dirs (last).
-
// Setup the dynamic module mapper if needed.
//
// Note that it's plausible in the future we will use it even if
@@ -3973,7 +3839,7 @@ namespace build2
// Preprocessor output.
//
- psrc = ctx.fcache.create (t.path () + x_pext, !modules);
+ psrc = ctx.fcache->create (t.path () + pext, !modules);
args.push_back ("-o");
args.push_back (psrc.path ().string ().c_str ());
}
@@ -4099,15 +3965,12 @@ namespace build2
// to be inconvenient: some users like to re-run a failed build with
// -s not to get "swamped" with errors.
//
- bool df (!ctx.match_only && !ctx.dry_run_option);
-
- const file* ht (enter_header (a, bs, t, li,
- move (hp), cache, false /* norm */,
- pfx_map, so_map).first);
- if (ht == nullptr)
+ auto fail = [&ctx] (const auto& h) -> optional<bool>
{
+ bool df (!ctx.match_only && !ctx.dry_run_option);
+
diag_record dr;
- dr << error << "header '" << hp << "' not found and no rule to "
+ dr << error << "header " << h << " not found and no rule to "
<< "generate it";
if (df)
@@ -4116,41 +3979,44 @@ namespace build2
if (verb < 4)
dr << info << "re-run with --verbose=4 for more information";
- if (df) return nullopt; else dr << endf;
- }
+ if (df)
+ return nullopt;
+ else
+ dr << endf;
+ };
- // If we are reading the cache, then it is possible the file has since
- // been removed (think of a header in /usr/local/include that has been
- // uninstalled and now we need to use one from /usr/include). This
- // will lead to the match failure which we translate to a restart.
- //
- if (optional<bool> u = inject_header (a, t, *ht, mt, false /* fail */))
+ if (const file* ht = enter_header (
+ a, bs, t, li,
+ move (hp), cache, cache /* normalized */,
+ pfx_map, so_map).first)
{
- // Verify/add it to the dependency database.
+ // If we are reading the cache, then it is possible the file has
+ // since been removed (think of a header in /usr/local/include that
+ // has been uninstalled and now we need to use one from
+ // /usr/include). This will lead to the match failure which we
+ // translate to a restart. And, yes, this case will trip up
+ // inject_header(), not enter_header().
//
- if (!cache)
- dd.expect (ht->path ());
-
- skip_count++;
- return *u;
- }
- else if (!cache)
- {
- diag_record dr;
- dr << error << "header " << *ht << " not found and no rule to "
- << "generate it";
-
- if (df)
- dr << info << "failure deferred to compiler diagnostics";
-
- if (verb < 4)
- dr << info << "re-run with --verbose=4 for more information";
+ if (optional<bool> u = inject_header (a, t, *ht, mt, false /*fail*/))
+ {
+ // Verify/add it to the dependency database.
+ //
+ if (!cache)
+ dd.expect (ht->path ());
- if (df) return nullopt; else dr << endf;
+ skip_count++;
+ return *u;
+ }
+ else if (cache)
+ {
+ dd.write (); // Invalidate this line.
+ return true;
+ }
+ else
+ return fail (*ht);
}
-
- dd.write (); // Invalidate this line.
- return true;
+ else
+ return fail (hp); // hp is still valid.
};
// As above but for a header unit. Note that currently it is only used
@@ -4167,13 +4033,13 @@ namespace build2
const file* ht (
enter_header (a, bs, t, li,
- move (hp), true /* cache */, true /* norm */,
+ move (hp), true /* cache */, false /* normalized */,
pfx_map, so_map).first);
- if (ht == nullptr)
+ if (ht == nullptr) // hp is still valid.
{
diag_record dr;
- dr << error << "header '" << hp << "' not found and no rule to "
+ dr << error << "header " << hp << " not found and no rule to "
<< "generate it";
if (df)
@@ -4219,6 +4085,16 @@ namespace build2
const path* drmp (nullptr); // Points to drm.path () if active.
+ // If things go wrong (and they often do in this area), give the user a
+ // bit extra context.
+ //
+ auto df = make_diag_frame (
+ [&src](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while extracting header dependencies from " << src;
+ });
+
// If nothing so far has invalidated the dependency database, then try
// the cached data before running the compiler.
//
@@ -4255,11 +4131,14 @@ namespace build2
//
// See apply() for details on the extra MSVC check.
//
- return modules && (ctype != compiler_type::msvc ||
- md.type != unit_type::module_intf)
- ? make_pair (ctx.fcache.create_existing (t.path () + x_pext),
- true)
- : make_pair (file_cache::entry (), false);
+ if (modules && (ctype != compiler_type::msvc ||
+ md.type != unit_type::module_intf))
+ {
+ result.first = ctx.fcache->create_existing (t.path () + pext);
+ result.second = true;
+ }
+
+ return;
}
// This can be a header or a header unit (mapping).
@@ -4312,7 +4191,7 @@ namespace build2
// Bail out early if we have deferred a failure.
//
- return make_pair (file_cache::entry (), false);
+ return;
}
}
}
@@ -4338,6 +4217,12 @@ namespace build2
process pr;
+ // We use the fdstream_mode::skip mode on stdout (cannot be used
+ // on both) and so dbuf must be destroyed (closed) first.
+ //
+ ifdstream is (ifdstream::badbit);
+ diag_buffer dbuf (ctx);
+
try
{
// Assume the preprocessed output (if produced) is usable
@@ -4358,217 +4243,229 @@ namespace build2
//
bool good_error (false), bad_error (false);
- // If we have no generated header support, then suppress all
- // diagnostics (if things go badly we will restart with this
- // support).
- //
- if (drmp == nullptr) // Dependency info goes to stdout.
+ if (mod_mapper) // Dependency info is implied by mapper requests.
{
- assert (!sense_diag); // Note: could support with fdselect().
+ assert (gen && !sense_diag); // Not used in this mode.
- // For VC with /P the dependency info and diagnostics all go
- // to stderr so redirect it to stdout.
+ // Note that here we use the skip mode on the diagnostics
+ // stream which means we have to use own instance of stdout
+ // stream for the correct destruction order (see below).
//
- pr = process (
- cpath,
- args.data (),
- 0,
- -1,
- cclass == compiler_class::msvc ? 1 : gen ? 2 : -2,
- nullptr, // CWD
- env.empty () ? nullptr : env.data ());
- }
- else // Dependency info goes to a temporary file.
- {
pr = process (cpath,
- args.data (),
- mod_mapper ? -1 : 0,
- mod_mapper ? -1 : 2, // Send stdout to stderr.
- gen ? 2 : sense_diag ? -1 : -2,
+ args,
+ -1,
+ -1,
+ diag_buffer::pipe (ctx),
nullptr, // CWD
env.empty () ? nullptr : env.data ());
- // Monitor for module mapper requests and/or diagnostics. If
- // diagnostics is detected, mark the preprocessed output as
- // unusable for compilation.
- //
- if (mod_mapper || sense_diag)
+ dbuf.open (args[0],
+ move (pr.in_efd),
+ fdstream_mode::non_blocking |
+ fdstream_mode::skip);
+ try
{
- module_mapper_state mm_state (skip_count, imports);
+ gcc_module_mapper_state mm_state (skip_count, imports);
+
+ // Note that while we read both streams until eof in normal
+ // circumstances, we cannot use fdstream_mode::skip for the
+ // exception case on both of them: we may end up being
+ // blocked trying to read one stream while the process may
+ // be blocked writing to the other. So in case of an
+ // exception we only skip the diagnostics and close the
+ // mapper stream hard. The latter (together with closing of
+ // the stdin stream) should happen first so the order of
+ // the following variable is important.
+ //
+ // Note also that we open the stdin stream in the blocking
+ // mode.
+ //
+ ifdstream is (move (pr.in_ofd),
+ fdstream_mode::non_blocking,
+ ifdstream::badbit); // stdout
+ ofdstream os (move (pr.out_fd)); // stdin (badbit|failbit)
- const char* w (nullptr);
- try
+ // Read until we reach EOF on all streams.
+ //
+ // Note that if dbuf is not opened, then we automatically
+ // get an inactive nullfd entry.
+ //
+ fdselect_set fds {is.fd (), dbuf.is.fd ()};
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
+
+ bool more (false);
+ for (string l; ist.fd != nullfd || dst.fd != nullfd; )
{
- // For now we don't need to do both so let's use a simpler
- // blocking implementation. Note that the module mapper
- // also needs to be adjusted when switching to the
- // non-blocking version.
+ // @@ Currently we will accept a (potentially truncated)
+ // line that ends with EOF rather than newline.
//
-#if 1
- assert (mod_mapper != sense_diag);
-
- if (mod_mapper)
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
{
- w = "module mapper request";
-
- // Note: the order is important (see the non-blocking
- // verison for details).
- //
- ifdstream is (move (pr.in_ofd),
- fdstream_mode::skip,
- ifdstream::badbit);
- ofdstream os (move (pr.out_fd));
-
- do
+ if (eof (is))
{
- if (!gcc_module_mapper (mm_state,
- a, bs, t, li,
- is, os,
- dd, update, bad_error,
- pfx_map, so_map))
- break;
+ os.close ();
+ is.close ();
- } while (!is.eof ());
+ if (more)
+ throw_generic_ios_failure (EIO, "unexpected EOF");
- os.close ();
- is.close ();
- }
-
- if (sense_diag)
- {
- w = "diagnostics";
- ifdstream is (move (pr.in_efd), fdstream_mode::skip);
- puse = puse && (is.peek () == ifdstream::traits_type::eof ());
- is.close ();
- }
-#else
- fdselect_set fds;
- auto add = [&fds] (const auto_fd& afd) -> fdselect_state*
- {
- int fd (afd.get ());
- fdmode (fd, fdstream_mode::non_blocking);
- fds.push_back (fd);
- return &fds.back ();
- };
-
- // Note that while we read both streams until eof in
- // normal circumstances, we cannot use fdstream_mode::skip
- // for the exception case on both of them: we may end up
- // being blocked trying to read one stream while the
- // process may be blocked writing to the other. So in case
- // of an exception we only skip the diagnostics and close
- // the mapper stream hard. The latter should happen first
- // so the order of the following variable is important.
- //
- ifdstream es;
- ofdstream os;
- ifdstream is;
-
- fdselect_state* ds (nullptr);
- if (sense_diag)
- {
- w = "diagnostics";
- ds = add (pr.in_efd);
- es.open (move (pr.in_efd), fdstream_mode::skip);
- }
-
- fdselect_state* ms (nullptr);
- if (mod_mapper)
- {
- w = "module mapper request";
- ms = add (pr.in_ofd);
- is.open (move (pr.in_ofd));
- os.open (move (pr.out_fd)); // Note: blocking.
- }
-
- // Set each state pointer to NULL when the respective
- // stream reaches eof.
- //
- while (ds != nullptr || ms != nullptr)
- {
- w = "output";
- ifdselect (fds);
-
- // First read out the diagnostics in case the mapper
- // interaction produces more. To make sure we don't get
- // blocked by full stderr, the mapper should only handle
- // one request at a time.
- //
- if (ds != nullptr && ds->ready)
+ ist.fd = nullfd;
+ }
+ else
{
- w = "diagnostics";
-
- for (char buf[4096];;)
- {
- streamsize c (sizeof (buf));
- streamsize n (es.readsome (buf, c));
+ optional<bool> r (
+ gcc_module_mapper (mm_state,
+ a, bs, t, li,
+ l, os,
+ dd, update, bad_error,
+ pfx_map, so_map));
- if (puse && n > 0)
- puse = false;
+ more = !r.has_value ();
- if (n < c)
- break;
- }
-
- if (es.eof ())
- {
- es.close ();
- ds->fd = nullfd;
- ds = nullptr;
- }
- }
-
- if (ms != nullptr && ms->ready)
- {
- w = "module mapper request";
-
- gcc_module_mapper (mm_state,
- a, bs, t, li,
- is, os,
- dd, update, bad_error,
- pfx_map, so_map);
- if (is.eof ())
+ if (more || *r)
+ l.clear ();
+ else
{
os.close ();
is.close ();
- ms->fd = nullfd;
- ms = nullptr;
+ ist.fd = nullfd;
}
}
+
+ continue;
}
-#endif
- }
- catch (const io_error& e)
- {
- if (pr.wait ())
- fail << "io error handling " << x_lang << " compiler "
- << w << ": " << e;
- // Fall through.
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
}
- if (mod_mapper)
- md.header_units += mm_state.header_units;
+ md.header_units += mm_state.header_units;
+ }
+ catch (const io_error& e)
+ {
+ // Note that diag_buffer handles its own io errors so this
+ // is about mapper stdin/stdout.
+ //
+ if (pr.wait ())
+ fail << "io error handling " << x_lang << " compiler "
+ << "module mapper request: " << e;
+
+ // Fall through.
}
// The idea is to reduce this to the stdout case.
//
- pr.wait ();
-
- // With -MG we want to read dependency info even if there is
- // an error (in case an outdated header file caused it). But
- // with the GCC module mapper an error is non-negotiable, so
- // to speak, and so we want to skip all of that. In fact, we
- // now write directly to depdb without generating and then
+ // We now write directly to depdb without generating and then
// parsing an intermadiate dependency makefile.
//
- pr.in_ofd = (ctype == compiler_type::gcc && mod_mapper)
- ? auto_fd (nullfd)
- : fdopen (*drmp, fdopen_mode::in);
+ pr.wait ();
+ pr.in_ofd = nullfd;
}
+ else
+ {
+ // If we have no generated header support, then suppress all
+ // diagnostics (if things go badly we will restart with this
+ // support).
+ //
+ if (drmp == nullptr) // Dependency info goes to stdout.
+ {
+ assert (!sense_diag); // Note: could support if necessary.
+ // For VC with /P the dependency info and diagnostics all go
+ // to stderr so redirect it to stdout.
+ //
+ int err (
+ cclass == compiler_class::msvc ? 1 : // stdout
+ !gen ? -2 : // /dev/null
+ diag_buffer::pipe (ctx, sense_diag /* force */));
+
+ pr = process (
+ cpath,
+ args,
+ 0,
+ -1,
+ err,
+ nullptr, // CWD
+ env.empty () ? nullptr : env.data ());
+
+ if (cclass != compiler_class::msvc && gen)
+ {
+ dbuf.open (args[0],
+ move (pr.in_efd),
+ fdstream_mode::non_blocking); // Skip on stdout.
+ }
+ }
+ else // Dependency info goes to temporary file.
+ {
+ // Since we only need to read from one stream (dbuf) let's
+ // use the simpler blocking setup.
+ //
+ int err (
+ !gen && !sense_diag ? -2 : // /dev/null
+ diag_buffer::pipe (ctx, sense_diag /* force */));
+
+ pr = process (cpath,
+ args,
+ 0,
+ 2, // Send stdout to stderr.
+ err,
+ nullptr, // CWD
+ env.empty () ? nullptr : env.data ());
+
+ if (gen || sense_diag)
+ {
+ dbuf.open (args[0], move (pr.in_efd));
+ dbuf.read (sense_diag /* force */);
+ }
+
+ if (sense_diag)
+ {
+ if (!dbuf.buf.empty ())
+ {
+ puse = false;
+ dbuf.buf.clear (); // Discard.
+ }
+ }
+
+ // The idea is to reduce this to the stdout case.
+ //
+ // Note that with -MG we want to read dependency info even
+ // if there is an error (in case an outdated header file
+ // caused it).
+ //
+ pr.wait ();
+ pr.in_ofd = fdopen (*drmp, fdopen_mode::in);
+ }
+ }
+
+ // Read and process dependency information, if any.
+ //
if (pr.in_ofd != nullfd)
{
+ // We have two cases here: reading from stdout and potentially
+ // stderr (dbuf) or reading from file (see the process startup
+ // code above for details). If we have to read from two
+ // streams, then we have to use the non-blocking setup. But we
+ // cannot use the non-blocking setup uniformly because on
+ // Windows it's only suppored for pipes. So things are going
+ // to get a bit hairy.
+ //
+ // And there is another twist to this: for MSVC we redirect
+ // stderr to stdout since the header dependency information is
+ // part of the diagnostics. If, however, there is some real
+ // diagnostics, we need to pass it through, potentially with
+ // buffering. The way we achieve this is by later opening dbuf
+ // in the EOF state and using it to buffer or stream the
+ // diagnostics.
+ //
+ bool nb (dbuf.is.is_open ());
+
// We may not read all the output (e.g., due to a restart).
// Before we used to just close the file descriptor to signal
// to the other end that we are not interested in the rest.
@@ -4576,20 +4473,69 @@ namespace build2
// impolite and complains, loudly (broken pipe). So now we are
// going to skip until the end.
//
- ifdstream is (move (pr.in_ofd),
- fdstream_mode::text | fdstream_mode::skip,
- ifdstream::badbit);
+ // Note that this means we are not using skip on dbuf (see
+ // above for the destruction order details).
+ //
+ {
+ fdstream_mode m (fdstream_mode::text |
+ fdstream_mode::skip);
+
+ if (nb)
+ m |= fdstream_mode::non_blocking;
+
+ is.open (move (pr.in_ofd), m);
+ }
+
+ fdselect_set fds;
+ if (nb)
+ fds = {is.fd (), dbuf.is.fd ()};
size_t skip (skip_count);
string l, l2; // Reuse.
for (bool first (true), second (false); !restart; )
{
- if (eof (getline (is, l)))
+ if (nb)
{
- if (bad_error && !l2.empty ())
- text << l2;
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
- break;
+ // We read until we reach EOF on both streams.
+ //
+ if (ist.fd == nullfd && dst.fd == nullfd)
+ break;
+
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
+ {
+ if (eof (is))
+ {
+ ist.fd = nullfd;
+ continue;
+ }
+
+ // Fall through to parse (and clear) the line.
+ }
+ else
+ {
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
+
+ continue;
+ }
+ }
+ else
+ {
+ if (eof (getline (is, l)))
+ {
+ if (bad_error && !l2.empty ()) // MSVC only (see below).
+ dbuf.write (l2, true /* newline */);
+
+ break;
+ }
}
l6 ([&]{trace << "header dependency line '" << l << "'";});
@@ -4640,9 +4586,15 @@ namespace build2
else
{
l2 = l;
- bad_error = true;
+
+ if (!bad_error)
+ {
+ dbuf.open_eof (args[0]);
+ bad_error = true;
+ }
}
+ l.clear ();
continue;
}
@@ -4652,6 +4604,7 @@ namespace build2
}
first = false;
+ l.clear ();
continue;
}
@@ -4659,8 +4612,13 @@ namespace build2
if (f.empty ()) // Some other diagnostics.
{
- text << l;
- bad_error = true;
+ if (!bad_error)
+ {
+ dbuf.open_eof (args[0]);
+ bad_error = true;
+ }
+
+ dbuf.write (l, true /* newline */);
break;
}
@@ -4754,12 +4712,9 @@ namespace build2
if (l.empty () ||
l[0] != '^' || l[1] != ':' || l[2] != ' ')
{
- // @@ Hm, we don't seem to redirect stderr to stdout
- // for this class of compilers so I wonder why
- // we are doing this?
- //
if (!l.empty ())
- text << l;
+ l5 ([&]{trace << "invalid header dependency line '"
+ << l << "'";});
bad_error = true;
break;
@@ -4774,22 +4729,37 @@ namespace build2
// "^: \".
//
if (l.size () == 4 && l[3] == '\\')
+ {
+ l.clear ();
continue;
+ }
else
pos = 3; // Skip "^: ".
// Fall through to the 'second' block.
}
- if (second)
- {
- second = false;
- next_make (l, pos); // Skip the source file.
- }
-
while (pos != l.size ())
{
- string f (next_make (l, pos));
+ string f (
+ make_parser::next (
+ l, pos, make_parser::type::prereq).first);
+
+ if (pos != l.size () && l[pos] == ':')
+ {
+ l5 ([&]{trace << "invalid header dependency line '"
+ << l << "'";});
+ bad_error = true;
+ break;
+ }
+
+ // Skip the source file.
+ //
+ if (second)
+ {
+ second = false;
+ continue;
+ }
// Skip until where we left off.
//
@@ -4833,19 +4803,56 @@ namespace build2
}
if (bad_error || md.deferred_failure)
+ {
+ // Note that it may be tempting to finish reading out the
+ // diagnostics before bailing out. But that may end up in
+ // a deadlock if the process gets blocked trying to write
+ // to stdout.
+ //
break;
+ }
+
+ l.clear ();
+ }
+
+ // We may bail out early from the above loop in case of a
+ // restart or error. Which means the stderr stream (dbuf) may
+ // still be open and we need to close it before closing the
+ // stdout stream (which may try to skip).
+ //
+ // In this case we may also end up with incomplete diagnostics
+ // so discard it.
+ //
+ // Generally, it may be tempting to start thinking if we
+ // should discard buffered diagnostics in other cases, such as
+ // restart. But remember that during serial execution it will
+ // go straight to stderr so for consistency (and simplicity)
+ // we should just print it unless there are good reasons not
+ // to (also remember that in the restartable modes we normally
+ // redirect stderr to /dev/null; see the process startup code
+ // for details).
+ //
+ if (dbuf.is.is_open ())
+ {
+ dbuf.is.close ();
+ dbuf.buf.clear ();
}
// Bail out early if we have deferred a failure.
//
+ // Let's ignore any buffered diagnostics in this case since
+ // it would appear after the deferred failure note.
+ //
if (md.deferred_failure)
{
is.close ();
- return make_pair (file_cache::entry (), false);
+ return;
}
- // In case of VC, we are parsing stderr and if things go
- // south, we need to copy the diagnostics for the user to see.
+ // In case of VC, we are parsing redirected stderr and if
+ // things go south, we need to copy the diagnostics for the
+ // user to see. Note that we should have already opened dbuf
+ // at EOF above.
//
if (bad_error && cclass == compiler_class::msvc)
{
@@ -4860,7 +4867,7 @@ namespace build2
l.compare (p.first, 4, "1083") != 0 &&
msvc_header_c1083 (l, p))
{
- diag_stream_lock () << l << endl;
+ dbuf.write (l, true /* newline */);
}
}
}
@@ -4883,27 +4890,42 @@ namespace build2
if (pr.wait ())
{
- if (!bad_error) // Ignore expected successes (we are done).
{
- if (!restart && psrc)
- psrcw.close ();
+ diag_record dr;
- continue;
+ if (bad_error)
+ dr << fail << "expected error exit status from "
+ << x_lang << " compiler";
+
+ if (dbuf.is_open ())
+ dbuf.close (move (dr)); // Throws if error.
}
- fail << "expected error exit status from " << x_lang
- << " compiler";
+ // Ignore expected successes (we are done).
+ //
+ if (!restart && psrc)
+ psrcw.close ();
+
+ continue;
}
else if (pr.exit->normal ())
{
if (good_error) // Ignore expected errors (restart).
+ {
+ if (dbuf.is_open ())
+ dbuf.close ();
+
continue;
+ }
}
// Fall through.
}
catch (const io_error& e)
{
+ // Ignore buffered diagnostics (since reading it could be the
+ // cause of this failure).
+ //
if (pr.wait ())
fail << "unable to read " << x_lang << " compiler header "
<< "dependency output: " << e;
@@ -4912,18 +4934,23 @@ namespace build2
}
assert (pr.exit && !*pr.exit);
- const process_exit& e (*pr.exit);
+ const process_exit& pe (*pr.exit);
// For normal exit we assume the child process issued some
// diagnostics.
//
- if (e.normal ())
+ if (pe.normal ())
{
- // If this run was with the generated header support then we
- // have issued diagnostics and it's time to give up.
+ // If this run was with the generated header support then it's
+ // time to give up.
//
if (gen)
+ {
+ if (dbuf.is_open ())
+ dbuf.close (args, pe, 2 /* verbosity */);
+
throw failed ();
+ }
// Just to recap, being here means something is wrong with the
// source: it can be a missing generated header, it can be an
@@ -4941,7 +4968,12 @@ namespace build2
// or will issue diagnostics.
//
if (restart)
+ {
+ if (dbuf.is_open ())
+ dbuf.close ();
+
l6 ([&]{trace << "trying again without generated headers";});
+ }
else
{
// In some pathological situations we may end up switching
@@ -4966,19 +4998,24 @@ namespace build2
// example, because we have removed all the partially
// preprocessed source files).
//
- if (force_gen_skip && *force_gen_skip == skip_count)
{
- diag_record dr (fail);
+ diag_record dr;
+ if (force_gen_skip && *force_gen_skip == skip_count)
+ {
+ dr <<
+ fail << "inconsistent " << x_lang << " compiler behavior" <<
+ info << "run the following two commands to investigate";
- dr << "inconsistent " << x_lang << " compiler behavior" <<
- info << "run the following two commands to investigate";
+ dr << info;
+ print_process (dr, args.data ()); // No pipes.
- dr << info;
- print_process (dr, args.data ()); // No pipes.
+ init_args ((gen = true));
+ dr << info << "";
+ print_process (dr, args.data ()); // No pipes.
+ }
- init_args ((gen = true));
- dr << info << "";
- print_process (dr, args.data ()); // No pipes.
+ if (dbuf.is_open ())
+ dbuf.close (move (dr)); // Throws if error.
}
restart = true;
@@ -4989,7 +5026,15 @@ namespace build2
continue;
}
else
- run_finish (args, pr); // Throws.
+ {
+ if (dbuf.is_open ())
+ {
+ dbuf.close (args, pe, 2 /* verbosity */);
+ throw failed ();
+ }
+ else
+ run_finish (args, pr, 2 /* verbosity */);
+ }
}
catch (const process_error& e)
{
@@ -5015,7 +5060,9 @@ namespace build2
dd.expect ("");
puse = puse && !reprocess && psrc;
- return make_pair (move (psrc), puse);
+
+ result.first = move (psrc);
+ result.second = puse;
}
// Return the translation unit information (last argument) and its
@@ -5128,19 +5175,41 @@ namespace build2
append_options (args, cmode);
append_sys_hdr_options (args);
- // See perform_update() for details on /external:W0, /EHsc, /MD.
+ // Note: no append_diag_color_options() call since the
+ // diagnostics is discarded.
+
+ // See perform_update() for details on the choice of options.
//
+ {
+ bool sc (find_option_prefixes (
+ {"/source-charset:", "-source-charset:"}, args));
+ bool ec (find_option_prefixes (
+ {"/execution-charset:", "-execution-charset:"}, args));
+
+ if (!sc && !ec)
+ args.push_back ("/utf-8");
+ else
+ {
+ if (!sc)
+ args.push_back ("/source-charset:UTF-8");
+
+ if (!ec)
+ args.push_back ("/execution-charset:UTF-8");
+ }
+ }
+
if (cvariant != "clang" && isystem (*this))
{
- if (find_option_prefix ("/external:I", args) &&
- !find_option_prefix ("/external:W", args))
+ if (find_option_prefixes ({"/external:I", "-external:I"}, args) &&
+ !find_option_prefixes ({"/external:W", "-external:W"}, args))
args.push_back ("/external:W0");
}
- if (x_lang == lang::cxx && !find_option_prefix ("/EH", args))
+ if (x_lang == lang::cxx &&
+ !find_option_prefixes ({"/EH", "-EH"}, args))
args.push_back ("/EHsc");
- if (!find_option_prefixes ({"/MD", "/MT"}, args))
+ if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args))
args.push_back ("/MD");
args.push_back ("/E");
@@ -5154,6 +5223,18 @@ namespace build2
}
case compiler_class::gcc:
{
+ append_options (args, cmode,
+ cmode.size () - (modules && clang ? 1 : 0));
+ append_sys_hdr_options (args);
+
+ // Note: no append_diag_color_options() call since the
+ // diagnostics is discarded.
+
+ // See perform_update() for details on the choice of options.
+ //
+ if (!find_option_prefix ("-finput-charset=", args))
+ args.push_back ("-finput-charset=UTF-8");
+
if (ot == otype::s)
{
if (tclass == "linux" || tclass == "bsd")
@@ -5182,10 +5263,6 @@ namespace build2
}
}
- append_options (args, cmode,
- cmode.size () - (modules && clang ? 1 : 0));
- append_sys_hdr_options (args);
-
args.push_back ("-E");
append_lang_options (args, md);
@@ -5194,12 +5271,36 @@ namespace build2
//
if (ps)
{
- if (ctype == compiler_type::gcc)
+ switch (ctype)
{
- // Note that only these two *plus* -x do the trick.
- //
- args.push_back ("-fpreprocessed");
- args.push_back ("-fdirectives-only");
+ case compiler_type::gcc:
+ {
+ // Note that only these two *plus* -x do the trick.
+ //
+ args.push_back ("-fpreprocessed");
+ args.push_back ("-fdirectives-only");
+ break;
+ }
+ case compiler_type::clang:
+ {
+ // See below for details.
+ //
+ if (ctype == compiler_type::clang &&
+ cmaj >= (cvariant != "apple" ? 15 : 16))
+ {
+ if (find_options ({"-pedantic", "-pedantic-errors",
+ "-Wpedantic", "-Werror=pedantic"},
+ args))
+ {
+ args.push_back ("-Wno-gnu-line-marker");
+ }
+ }
+
+ break;
+ }
+ case compiler_type::msvc:
+ case compiler_type::icc:
+ assert (false);
}
}
@@ -5253,10 +5354,10 @@ namespace build2
print_process (args);
// We don't want to see warnings multiple times so ignore all
- // diagnostics.
+ // diagnostics (thus no need for diag_buffer).
//
pr = process (cpath,
- args.data (),
+ args,
0, -1, -2,
nullptr, // CWD
env.empty () ? nullptr : env.data ());
@@ -5326,7 +5427,15 @@ namespace build2
// accurate (parts of the translation unit could have been
// #ifdef'ed out; see __build2_preprocess).
//
- return reprocess ? string () : move (p.checksum);
+ // Also, don't use the checksum for header units since it ignores
+ // preprocessor directives and may therefore cause us to ignore a
+ // change to an exported macro. @@ TODO: maybe we should add a
+ // flag to the parser not to waste time calculating the checksum
+ // in these cases.
+ //
+ return reprocess || ut == unit_type::module_header
+ ? string ()
+ : move (p.checksum);
}
// Fall through.
@@ -5357,7 +5466,7 @@ namespace build2
info << "then run failing command to display compiler diagnostics";
}
else
- run_finish (args, pr); // Throws.
+ run_finish (args, pr, 2 /* verbosity */); // Throws.
}
catch (const process_error& e)
{
@@ -5770,10 +5879,11 @@ namespace build2
// 1. There is no good place in prerequisite_targets to store the
// exported flag (no, using the marking facility across match/execute
// is a bad idea). So what we are going to do is put re-exported
- // bmi{}s at the back and store (in the target's data pad) the start
- // position. One bad aspect about this part is that we assume those
- // bmi{}s have been matched by the same rule. But let's not kid
- // ourselves, there will be no other rule that matches bmi{}s.
+ // bmi{}s at the back and store (in the target's auxiliary data
+ // storage) the start position. One bad aspect about this part is
+ // that we assume those bmi{}s have been matched by the same
+ // rule. But let's not kid ourselves, there will be no other rule
+ // that matches bmi{}s.
//
// @@ I think now we could use prerequisite_targets::data for this?
//
@@ -6160,11 +6270,11 @@ namespace build2
// Hash (we know it's a file).
//
- cs.append (static_cast<const file&> (*bt).path ().string ());
+ cs.append (bt->as<file> ().path ().string ());
// Copy over bmi{}s from our prerequisites weeding out duplicates.
//
- if (size_t j = bt->data<match_data> ().modules.start)
+ if (size_t j = bt->data<match_data> (a).modules.start)
{
// Hard to say whether we should reserve or not. We will probably
// get quite a bit of duplications.
@@ -6186,7 +6296,7 @@ namespace build2
}) == imports.end ())
{
pts.push_back (et);
- cs.append (static_cast<const file&> (*et).path ().string ());
+ cs.append (et->as<file> ().path ().string ());
// Add to the list of imports for further duplicate suppression.
// We could have stored reference to the name (e.g., in score)
@@ -6226,6 +6336,9 @@ namespace build2
// cc.config module and that is within our amalgmantion seems like a
// good place.
//
+ // @@ TODO: maybe we should cache this in compile_rule ctor like we
+ // do for the header cache?
+ //
const scope* as (&rs);
{
const scope* ws (as->weak_scope ());
@@ -6241,7 +6354,7 @@ namespace build2
// This is also the module that registers the scope operation
// callback that cleans up the subproject.
//
- if (cast_false<bool> ((*s)["cc.core.vars.loaded"]))
+ if (cast_false<bool> (s->vars["cc.core.vars.loaded"]))
as = s;
} while (s != ws);
@@ -6377,7 +6490,10 @@ namespace build2
ps.push_back (prerequisite (lt));
for (prerequisite_member p: group_prerequisite_members (a, lt))
{
- if (include (a, lt, p) != include_type::normal) // Excluded/ad hoc.
+ // Ignore update=match.
+ //
+ lookup l;
+ if (include (a, lt, p, &l) != include_type::normal) // Excluded/ad hoc.
continue;
if (p.is_a<libx> () ||
@@ -6394,8 +6510,9 @@ namespace build2
move (mf),
nullopt, // Use default extension.
target_decl::implied,
- trace));
- file& bt (static_cast<file&> (p.first));
+ trace,
+ true /* skip_find */));
+ file& bt (p.first.as<file> ());
// Note that this is racy and someone might have created this target
// while we were preparing the prerequisite list.
@@ -6526,7 +6643,9 @@ namespace build2
//
process_libraries (a, bs, nullopt, sys_lib_dirs,
*f, la, 0, // lflags unused.
- imp, lib, nullptr, true /* self */,
+ imp, lib, nullptr,
+ true /* self */,
+ false /* proc_opt_group */,
&lib_cache);
if (lt != nullptr)
@@ -6611,7 +6730,10 @@ namespace build2
//
for (prerequisite_member p: group_prerequisite_members (a, t))
{
- if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
+ // Ignore update=match.
+ //
+ lookup l;
+ if (include (a, t, p, &l) != include_type::normal) // Excluded/ad hoc.
continue;
if (p.is_a<libx> () ||
@@ -6629,8 +6751,9 @@ namespace build2
move (mf),
nullopt, // Use default extension.
target_decl::implied,
- trace));
- file& bt (static_cast<file&> (p.first));
+ trace,
+ true /* skip_find */));
+ file& bt (p.first.as<file> ());
// Note that this is racy and someone might have created this target
// while we were preparing the prerequisite list.
@@ -6668,7 +6791,7 @@ namespace build2
// Filter cl.exe noise (msvc.cxx).
//
void
- msvc_filter_cl (ifdstream&, const path& src);
+ msvc_filter_cl (diag_buffer&, const path& src);
// Append header unit-related options.
//
@@ -6898,12 +7021,11 @@ namespace build2
}
target_state compile_rule::
- perform_update (action a, const target& xt) const
+ perform_update (action a, const target& xt, match_data& md) const
{
const file& t (xt.as<file> ());
const path& tp (t.path ());
- match_data md (move (t.data<match_data> ()));
unit_type ut (md.type);
context& ctx (t.ctx);
@@ -6926,9 +7048,6 @@ namespace build2
},
md.modules.copied)); // See search_modules() for details.
- const file& s (pr.second);
- const path* sp (&s.path ());
-
// Force recompilation in case of a deferred failure even if nothing
// changed.
//
@@ -6945,11 +7064,14 @@ namespace build2
return *pr.first;
}
+ const file& s (pr.second);
+ const path* sp (&s.path ());
+
// Make sure depdb is no older than any of our prerequisites (see md.mt
// logic description above for details). Also save the sequence start
// time if doing mtime checks (see the depdb::check_mtime() call below).
//
- timestamp start (depdb::mtime_check ()
+ timestamp start (!ctx.dry_run && depdb::mtime_check ()
? system_clock::now ()
: timestamp_unknown);
@@ -7016,7 +7138,6 @@ namespace build2
small_vector<string, 2> module_args; // Module options storage.
size_t out_i (0); // Index of the -o option.
- size_t lang_n (0); // Number of lang options.
switch (cclass)
{
@@ -7037,13 +7158,40 @@ namespace build2
if (md.pp != preprocessed::all)
append_sys_hdr_options (args); // Extra system header dirs (last).
+ // Note: could be overridden in mode.
+ //
+ append_diag_color_options (args);
+
+ // Set source/execution charsets to UTF-8 unless a custom charset
+ // is specified.
+ //
+ // Note that clang-cl supports /utf-8 and /*-charset.
+ //
+ {
+ bool sc (find_option_prefixes (
+ {"/source-charset:", "-source-charset:"}, args));
+ bool ec (find_option_prefixes (
+ {"/execution-charset:", "-execution-charset:"}, args));
+
+ if (!sc && !ec)
+ args.push_back ("/utf-8");
+ else
+ {
+ if (!sc)
+ args.push_back ("/source-charset:UTF-8");
+
+ if (!ec)
+ args.push_back ("/execution-charset:UTF-8");
+ }
+ }
+
// If we have any /external:I options but no /external:Wn, then add
// /external:W0 to emulate the -isystem semantics.
//
if (cvariant != "clang" && isystem (*this))
{
- if (find_option_prefix ("/external:I", args) &&
- !find_option_prefix ("/external:W", args))
+ if (find_option_prefixes ({"/external:I", "-external:I"}, args) &&
+ !find_option_prefixes ({"/external:W", "-external:W"}, args))
args.push_back ("/external:W0");
}
@@ -7057,7 +7205,9 @@ namespace build2
// For C looks like no /EH* (exceptions supported but no C++ objects
// destroyed) is a reasonable default.
//
- if (x_lang == lang::cxx && !find_option_prefix ("/EH", args))
+
+ if (x_lang == lang::cxx &&
+ !find_option_prefixes ({"/EH", "-EH"}, args))
args.push_back ("/EHsc");
// The runtime is a bit more interesting. At first it may seem like
@@ -7079,7 +7229,7 @@ namespace build2
// unreasonable thing to do). So by default we will always use the
// release runtime.
//
- if (!find_option_prefixes ({"/MD", "/MT"}, args))
+ if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args))
args.push_back ("/MD");
msvc_sanitize_cl (args);
@@ -7104,7 +7254,7 @@ namespace build2
//
// @@ MOD: TODO deal with absent relo.
//
- if (find_options ({"/Zi", "/ZI"}, args))
+ if (find_options ({"/Zi", "/ZI", "-Zi", "-ZI"}, args))
{
if (fc)
args.push_back ("/Fd:");
@@ -7150,6 +7300,65 @@ namespace build2
}
case compiler_class::gcc:
{
+ append_options (args, cmode);
+
+ // Clang 15 introduced the unqualified-std-cast-call warning which
+ // warns about unqualified calls to std::move() and std::forward()
+ // (because they can be "hijacked" via ADL). Surprisingly, this
+ // warning is enabled by default, as opposed to with -Wextra or at
+ // least -Wall. It has also proven to be quite disruptive, causing a
+ // large number of warnings in a large number of packages. So we are
+ // going to "remap" it to -Wextra for now and in the future may
+ // "relax" it to -Wall and potentially to being enabled by default.
+ // See GitHub issue #259 for background and details.
+ //
+ if (x_lang == lang::cxx &&
+ ctype == compiler_type::clang &&
+ cmaj >= 15)
+ {
+ bool w (false); // Seen -W[no-]unqualified-std-cast-call
+ optional<bool> extra; // Seen -W[no-]extra
+
+ for (const char* s: reverse_iterate (args))
+ {
+ if (s != nullptr)
+ {
+ if (strcmp (s, "-Wunqualified-std-cast-call") == 0 ||
+ strcmp (s, "-Wno-unqualified-std-cast-call") == 0)
+ {
+ w = true;
+ break;
+ }
+
+ if (!extra) // Last seen option wins.
+ {
+ if (strcmp (s, "-Wextra") == 0) extra = true;
+ else if (strcmp (s, "-Wno-extra") == 0) extra = false;
+ }
+ }
+ }
+
+ if (!w && (!extra || !*extra))
+ args.push_back ("-Wno-unqualified-std-cast-call");
+ }
+
+ if (md.pp != preprocessed::all)
+ append_sys_hdr_options (args); // Extra system header dirs (last).
+
+ // Note: could be overridden in mode.
+ //
+ append_diag_color_options (args);
+
+ // Set the input charset to UTF-8 unless a custom one is specified.
+ //
+ // Note that the execution charset (-fexec-charset) is UTF-8 by
+ // default.
+ //
+ // Note that early versions of Clang only recognize uppercase UTF-8.
+ //
+ if (!find_option_prefix ("-finput-charset=", args))
+ args.push_back ("-finput-charset=UTF-8");
+
if (ot == otype::s)
{
// On Darwin, Win32 -fPIC is the default.
@@ -7253,11 +7462,6 @@ namespace build2
}
}
- append_options (args, cmode);
-
- if (md.pp != preprocessed::all)
- append_sys_hdr_options (args); // Extra system header dirs (last).
-
append_header_options (env, args, header_args, a, t, md, md.dd);
append_module_options (env, args, module_args, a, t, md, md.dd);
@@ -7332,7 +7536,7 @@ namespace build2
args.push_back ("-c");
}
- lang_n = append_lang_options (args, md);
+ append_lang_options (args, md);
if (md.pp == preprocessed::all)
{
@@ -7381,19 +7585,32 @@ namespace build2
// the source file, not its preprocessed version (so that it's easy to
// copy and re-run, etc). Only at level 3 and above print the real deal.
//
+ // @@ TODO: why don't we print env (here and/or below)? Also link rule.
+ //
if (verb == 1)
- text << x_name << ' ' << s;
+ {
+ const char* name (x_assembler_cpp (s) ? "as-cpp" :
+ x_objective (s) ? x_obj_name :
+ x_name);
+
+ print_diag (name, s, t);
+ }
else if (verb == 2)
print_process (args);
// If we have the (partially) preprocessed output, switch to that.
//
- bool psrc (md.psrc);
+ // But we remember the original source/position to restore later.
+ //
+ bool psrc (md.psrc); // Note: false if cc.reprocess.
bool ptmp (psrc && md.psrc.temporary);
+ pair<size_t, const char*> osrc;
if (psrc)
{
args.pop_back (); // nullptr
+ osrc.second = args.back ();
args.pop_back (); // sp
+ osrc.first = args.size ();
sp = &md.psrc.path ();
@@ -7403,25 +7620,40 @@ namespace build2
{
case compiler_type::gcc:
{
- // The -fpreprocessed is implied by .i/.ii. But not when compiling
- // a header unit (there is no .hi/.hii).
+ // -fpreprocessed is implied by .i/.ii unless compiling a header
+ // unit (there is no .hi/.hii). Also, we would need to pop -x
+ // since it takes precedence over the extension, which would mess
+ // up our osrc logic. So in the end it feels like always passing
+ // explicit -fpreprocessed is the way to go.
//
- if (ut == unit_type::module_header)
- args.push_back ("-fpreprocessed");
- else
- // Pop -x since it takes precedence over the extension.
- //
- // @@ I wonder why bother and not just add -fpreprocessed? Are
- // we trying to save an option or does something break?
- //
- for (; lang_n != 0; --lang_n)
- args.pop_back ();
-
+ // Also note that similarly there is no .Si for .S files.
+ //
+ args.push_back ("-fpreprocessed");
args.push_back ("-fdirectives-only");
break;
}
case compiler_type::clang:
{
+ // Clang 15 and later with -pedantic warns about GNU-style line
+ // markers that it wrote itself in the -frewrite-includes output
+ // (llvm-project issue 63284). So we suppress this warning unless
+ // compiling from source.
+ //
+ // In Apple Clang this warning/option are absent in 14.0.3 (which
+ // is said to be based on vanilla Clang 15.0.5) for some reason
+ // (let's hope it's because they patched it out rather than due to
+ // a misleading __LIBCPP_VERSION value).
+ //
+ if (ctype == compiler_type::clang &&
+ cmaj >= (cvariant != "apple" ? 15 : 16))
+ {
+ if (find_options ({"-pedantic", "-pedantic-errors",
+ "-Wpedantic", "-Werror=pedantic"}, args))
+ {
+ args.push_back ("-Wno-gnu-line-marker");
+ }
+ }
+
// Note that without -x Clang will treat .i/.ii as fully
// preprocessed.
//
@@ -7470,45 +7702,38 @@ namespace build2
file_cache::read psrcr (psrc ? md.psrc.open () : file_cache::read ());
// VC cl.exe sends diagnostics to stdout. It also prints the file
- // name being compiled as the first line. So for cl.exe we redirect
- // stdout to a pipe, filter that noise out, and send the rest to
- // stderr.
+ // name being compiled as the first line. So for cl.exe we filter
+ // that noise out.
//
- // For other compilers redirect stdout to stderr, in case any of
- // them tries to pull off something similar. For sane compilers this
- // should be harmless.
+ // For other compilers also redirect stdout to stderr, in case any
+ // of them tries to pull off something similar. For sane compilers
+ // this should be harmless.
//
bool filter (ctype == compiler_type::msvc);
process pr (cpath,
- args.data (),
- 0, (filter ? -1 : 2), 2,
+ args,
+ 0, 2, diag_buffer::pipe (ctx, filter /* force */),
nullptr, // CWD
env.empty () ? nullptr : env.data ());
- if (filter)
- {
- try
- {
- ifdstream is (
- move (pr.in_ofd), fdstream_mode::text, ifdstream::badbit);
+ diag_buffer dbuf (ctx, args[0], pr);
- msvc_filter_cl (is, *sp);
+ if (filter)
+ msvc_filter_cl (dbuf, *sp);
- // If anything remains in the stream, send it all to stderr.
- // Note that the eof check is important: if the stream is at
- // eof, this and all subsequent writes to the diagnostics stream
- // will fail (and you won't see a thing).
- //
- if (is.peek () != ifdstream::traits_type::eof ())
- diag_stream_lock () << is.rdbuf ();
+ dbuf.read ();
- is.close ();
- }
- catch (const io_error&) {} // Assume exits with error.
+ // Restore the original source if we switched to preprocessed.
+ //
+ if (psrc)
+ {
+ args.resize (osrc.first);
+ args.push_back (osrc.second);
+ args.push_back (nullptr);
}
- run_finish (args, pr);
+ run_finish (dbuf, args, pr, 1 /* verbosity */);
}
catch (const process_error& e)
{
@@ -7559,12 +7784,14 @@ namespace build2
try
{
process pr (cpath,
- args.data (),
- 0, 2, 2,
+ args,
+ 0, 2, diag_buffer::pipe (ctx),
nullptr, // CWD
env.empty () ? nullptr : env.data ());
- run_finish (args, pr);
+ diag_buffer dbuf (ctx, args[0], pr);
+ dbuf.read ();
+ run_finish (dbuf, args, pr, 1 /* verbosity */);
}
catch (const process_error& e)
{
@@ -7595,25 +7822,27 @@ namespace build2
}
target_state compile_rule::
- perform_clean (action a, const target& xt) const
+ perform_clean (action a, const target& xt, const target_type& srct) const
{
const file& t (xt.as<file> ());
+ // Preprocessed file extension.
+ //
+ const char* pext (x_assembler_cpp (srct) ? ".Si" :
+ x_objective (srct) ? x_obj_pext :
+ x_pext);
+
// Compressed preprocessed file extension.
//
- auto cpext = [this, &t, s = string ()] () mutable -> const char*
- {
- return (s = t.ctx.fcache.compressed_extension (x_pext)).c_str ();
- };
+ string cpext (t.ctx.fcache->compressed_extension (pext));
clean_extras extras;
-
switch (ctype)
{
- case compiler_type::gcc: extras = {".d", x_pext, cpext (), ".t"}; break;
- case compiler_type::clang: extras = {".d", x_pext, cpext ()}; break;
- case compiler_type::msvc: extras = {".d", x_pext, cpext (), ".idb", ".pdb"};break;
- case compiler_type::icc: extras = {".d"}; break;
+ case compiler_type::gcc: extras = {".d", pext, cpext.c_str (), ".t"}; break;
+ case compiler_type::clang: extras = {".d", pext, cpext.c_str ()}; break;
+ case compiler_type::msvc: extras = {".d", pext, cpext.c_str (), ".idb", ".pdb"}; break;
+ case compiler_type::icc: extras = {".d"}; break;
}
return perform_clean_extra (a, t, extras);
diff --git a/libbuild2/cc/compile-rule.hxx b/libbuild2/cc/compile-rule.hxx
index daea600..a9a22c4 100644
--- a/libbuild2/cc/compile-rule.hxx
+++ b/libbuild2/cc/compile-rule.hxx
@@ -8,6 +8,7 @@
#include <libbuild2/utility.hxx>
#include <libbuild2/rule.hxx>
+#include <libbuild2/dyndep.hxx>
#include <libbuild2/file-cache.hxx>
#include <libbuild2/cc/types.hxx>
@@ -21,6 +22,8 @@ namespace build2
namespace cc
{
+ class config_module;
+
// The order is arranged so that their integral values indicate whether
// one is a "stronger" than another.
//
@@ -37,22 +40,25 @@ namespace build2
};
class LIBBUILD2_CC_SYMEXPORT compile_rule: public simple_rule,
- virtual common
+ virtual common,
+ dyndep_rule
{
public:
- compile_rule (data&&);
+ struct match_data;
+
+ compile_rule (data&&, const scope&);
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
target_state
- perform_update (action, const target&) const;
+ perform_update (action, const target&, match_data&) const;
target_state
- perform_clean (action, const target&) const;
+ perform_clean (action, const target&, const target_type&) const;
public:
using appended_libraries = small_vector<const target*, 256>;
@@ -60,7 +66,8 @@ namespace build2
void
append_library_options (appended_libraries&, strings&,
const scope&,
- action, const file&, bool, linfo) const;
+ action, const file&, bool, linfo,
+ bool, bool) const;
optional<path>
find_system_header (const path&) const;
@@ -70,7 +77,6 @@ namespace build2
functions (function_family&, const char*); // functions.cxx
private:
- struct match_data;
using environment = small_vector<const char*, 2>;
template <typename T>
@@ -82,7 +88,7 @@ namespace build2
append_library_options (appended_libraries&, T&,
const scope&,
const scope*,
- action, const file&, bool, linfo,
+ action, const file&, bool, linfo, bool,
library_cache*) const;
template <typename T>
@@ -91,66 +97,44 @@ namespace build2
const scope&,
action, const target&, linfo) const;
- // Mapping of include prefixes (e.g., foo in <foo/bar>) for auto-
- // generated headers to directories where they will be generated.
- //
- // We are using a prefix map of directories (dir_path_map) instead of
- // just a map in order to also cover sub-paths (e.g., <foo/more/bar> if
- // we continue with the example). Specifically, we need to make sure we
- // don't treat foobar as a sub-directory of foo.
- //
- // The priority is used to decide who should override whom. Lesser
- // values are considered higher priority. See append_prefixes() for
- // details.
- //
- // @@ The keys should be normalized.
- //
- struct prefix_value
- {
- dir_path directory;
- size_t priority;
- };
- using prefix_map = dir_path_map<prefix_value>;
+ using prefix_map = dyndep_rule::prefix_map;
+ using srcout_map = dyndep_rule::srcout_map;
void
- append_prefixes (prefix_map&, const target&, const variable&) const;
+ append_prefixes (prefix_map&,
+ const scope&, const target&,
+ const variable&) const;
void
append_library_prefixes (appended_libraries&, prefix_map&,
const scope&,
- action, target&, linfo) const;
+ action, const target&, linfo) const;
prefix_map
- build_prefix_map (const scope&, action, target&, linfo) const;
+ build_prefix_map (const scope&, action, const target&, linfo) const;
- small_vector<const target_type*, 2>
- map_extension (const scope&, const string&, const string&) const;
-
- // Src-to-out re-mapping. See extract_headers() for details.
- //
- using srcout_map = path_map<dir_path>;
+ struct gcc_module_mapper_state;
- struct module_mapper_state;
-
- bool
- gcc_module_mapper (module_mapper_state&,
+ optional<bool>
+ gcc_module_mapper (gcc_module_mapper_state&,
action, const scope&, file&, linfo,
- ifdstream&, ofdstream&,
+ const string&, ofdstream&,
depdb&, bool&, bool&,
optional<prefix_map>&, srcout_map&) const;
pair<const file*, bool>
enter_header (action, const scope&, file&, linfo,
path&&, bool, bool,
- optional<prefix_map>&, srcout_map&) const;
+ optional<prefix_map>&, const srcout_map&) const;
optional<bool>
inject_header (action, file&, const file&, timestamp, bool) const;
- pair<file_cache::entry, bool>
+ void
extract_headers (action, const scope&, file&, linfo,
const file&, match_data&,
- depdb&, bool&, timestamp, module_imports&) const;
+ depdb&, bool&, timestamp, module_imports&,
+ pair<file_cache::entry, bool>&) const;
string
parse_unit (action, file&, linfo,
@@ -201,6 +185,7 @@ namespace build2
private:
const string rule_id;
+ const config_module* header_cache_;
};
}
}
diff --git a/libbuild2/cc/functions.cxx b/libbuild2/cc/functions.cxx
index cafb7f0..9d408af 100644
--- a/libbuild2/cc/functions.cxx
+++ b/libbuild2/cc/functions.cxx
@@ -13,11 +13,10 @@
#include <libbuild2/cc/module.hxx>
#include <libbuild2/cc/utility.hxx>
+#include <libbuild2/functions-name.hxx> // to_target()
+
namespace build2
{
- const target&
- to_target (const scope&, name&&, name&&); // libbuild2/functions-name.cxx
-
namespace cc
{
using namespace bin;
@@ -47,8 +46,13 @@ namespace build2
if (rs == nullptr)
fail << f.name << " called out of project";
- if (bs->ctx.phase != run_phase::execute)
- fail << f.name << " can only be called during execution";
+ // Note that we also allow calling this during match since an ad hoc
+ // recipe with dynamic dependency extraction (depdb-dyndep) executes its
+ // depdb preamble during match (after matching all the prerequisites).
+ //
+ if (bs->ctx.phase != run_phase::match &&
+ bs->ctx.phase != run_phase::execute)
+ fail << f.name << " can only be called from recipe";
const module* m (rs->find_module<module> (d.x));
@@ -57,6 +61,9 @@ namespace build2
// We can assume these are present due to function's types signature.
//
+ if (vs[0].null)
+ throw invalid_argument ("null value");
+
names& ts_ns (vs[0].as<names> ()); // <targets>
// In a somewhat hackish way strip the outer operation to match how we
@@ -70,20 +77,40 @@ namespace build2
{
name& n (*i), o;
const target& t (to_target (*bs, move (n), move (n.pair ? *++i : o)));
+
+ if (!t.matched (a))
+ fail << t << " is not matched" <<
+ info << "make sure this target is listed as prerequisite";
+
d.f (r, vs, *m, *bs, a, t);
}
return value (move (r));
}
- // Common thunk for $x.lib_*(<targets>, <otype> [, ...]) functions.
+ // Common thunk for $x.lib_*(...) functions.
+ //
+ // The two supported function signatures are:
+ //
+ // $x.lib_*(<targets>, <otype> [, ...]])
+ //
+ // $x.lib_*(<targets>)
+ //
+ // For the first signature, the passed targets cannot be library groups
+ // (so they are always file-based) and linfo is always present.
+ //
+ // For the second signature, targets can only be utility libraries
+ // (including the libul{} group).
+ //
+ // If <otype> in the first signature is NULL, then it is treated as
+ // the second signature.
//
struct lib_thunk_data
{
const char* x;
void (*f) (void*, strings&,
const vector_view<value>&, const module&, const scope&,
- action, const file&, bool, linfo);
+ action, const target&, bool, optional<linfo>);
};
static value
@@ -102,21 +129,27 @@ namespace build2
if (rs == nullptr)
fail << f.name << " called out of project";
- if (bs->ctx.phase != run_phase::execute)
- fail << f.name << " can only be called during execution";
+ if (bs->ctx.phase != run_phase::match && // See above.
+ bs->ctx.phase != run_phase::execute)
+ fail << f.name << " can only be called from recipe";
const module* m (rs->find_module<module> (d.x));
if (m == nullptr)
fail << f.name << " called without " << d.x << " module loaded";
- // We can assume these are present due to function's types signature.
+ // We can assume this is present due to function's types signature.
//
+ if (vs[0].null)
+ throw invalid_argument ("null value");
+
names& ts_ns (vs[0].as<names> ()); // <targets>
- names& ot_ns (vs[1].as<names> ()); // <otype>
- linfo li;
+ optional<linfo> li;
+ if (vs.size () > 1 && !vs[1].null)
{
+ names& ot_ns (vs[1].as<names> ()); // <otype>
+
string t (convert<string> (move (ot_ns)));
const target_type* tt (bs->find_target_type (t));
@@ -162,17 +195,22 @@ namespace build2
name& n (*i), o;
const target& t (to_target (*bs, move (n), move (n.pair ? *++i : o)));
- const file* f;
bool la (false);
-
- if ((la = (f = t.is_a<libux> ())) ||
- (la = (f = t.is_a<liba> ())) ||
- ( (f = t.is_a<libs> ())))
+ if (li
+ ? ((la = t.is_a<libux> ()) ||
+ (la = t.is_a<liba> ()) ||
+ ( t.is_a<libs> ()))
+ : ((la = t.is_a<libux> ()) ||
+ ( t.is_a<libul> ())))
{
- d.f (ls, r, vs, *m, *bs, a, *f, la, li);
+ if (!t.matched (a))
+ fail << t << " is not matched" <<
+ info << "make sure this target is listed as prerequisite";
+
+ d.f (ls, r, vs, *m, *bs, a, t, la, li);
}
else
- fail << t << " is not a library target";
+ fail << t << " is not a library of expected type";
}
return value (move (r));
@@ -199,33 +237,61 @@ namespace build2
void compile_rule::
functions (function_family& f, const char* x)
{
- // $<module>.lib_poptions(<lib-targets>, <otype>)
+ // $<module>.lib_poptions(<lib-targets>[, <otype>[, <original>]])
//
// Return the preprocessor options that should be passed when compiling
// sources that depend on the specified libraries. The second argument
// is the output target type (obje, objs, etc).
//
+ // The output target type may be omitted for utility libraries (libul{}
+ // or libu[eas]{}). In this case, only "common interface" options will
+ // be returned for lib{} dependencies. This is primarily useful for
+ // obtaining poptions to be passed to tools other than C/C++ compilers
+ // (for example, Qt moc).
+ //
+ // If <original> is true, then return the original -I options without
+ // performing any translation (for example, to -isystem or /external:I).
+ // This is the default if <otype> is omitted. To get the translation for
+ // the common interface options, pass [null] for <otype> and true for
+ // <original>.
+ //
// Note that passing multiple targets at once is not a mere convenience:
// this also allows for more effective duplicate suppression.
//
- // Note also that this function can only be called during execution
- // after all the specified library targets have been matched. Normally
- // it is used in ad hoc recipes to implement custom compilation.
+ // Note also that this function can only be called during execution (or,
+ // carefully, during match) after all the specified library targets have
+ // been matched. Normally it is used in ad hoc recipes to implement
+ // custom compilation.
//
// Note that this function is not pure.
//
f.insert (".lib_poptions", false).
- insert<lib_thunk_data, names, names> (
+ insert<lib_thunk_data, names, optional<names*>, optional<names>> (
&lib_thunk<appended_libraries>,
lib_thunk_data {
x,
[] (void* ls, strings& r,
- const vector_view<value>&, const module& m, const scope& bs,
- action a, const file& l, bool la, linfo li)
+ const vector_view<value>& vs, const module& m, const scope& bs,
+ action a, const target& l, bool la, optional<linfo> li)
{
+ // If this is libul{}, get the matched member (see bin::libul_rule
+ // for details).
+ //
+ const file& f (
+ la || li
+ ? l.as<file> ()
+ : (la = true,
+ l.prerequisite_targets[a].back ().target->as<file> ()));
+
+ bool common (!li);
+ bool original (vs.size () > 2 ? convert<bool> (vs[2]) : !li);
+
+ if (!li)
+ li = link_info (bs, link_type (f).type);
+
m.append_library_options (
*static_cast<appended_libraries*> (ls), r,
- bs, a, l, la, li);
+ bs, a, f, la, *li, common, original);
}});
// $<module>.find_system_header(<name>)
@@ -289,9 +355,10 @@ namespace build2
// Note that passing multiple targets at once is not a mere convenience:
// this also allows for more effective duplicate suppression.
//
- // Note also that this function can only be called during execution
- // after all the specified library targets have been matched. Normally
- // it is used in ad hoc recipes to implement custom linking.
+ // Note also that this function can only be called during execution (or,
+ // carefully, during match) after all the specified library targets have
+ // been matched. Normally it is used in ad hoc recipes to implement
+ // custom linking.
//
// Note that this function is not pure.
//
@@ -302,12 +369,15 @@ namespace build2
x,
[] (void* ls, strings& r,
const vector_view<value>& vs, const module& m, const scope& bs,
- action a, const file& l, bool la, linfo li)
+ action a, const target& l, bool la, optional<linfo> li)
{
lflags lf (0);
bool rel (true);
if (vs.size () > 2)
{
+ if (vs[2].null)
+ throw invalid_argument ("null value");
+
for (const name& f: vs[2].as<names> ())
{
string s (convert<string> (name (f)));
@@ -326,7 +396,8 @@ namespace build2
m.append_libraries (
*static_cast<appended_libraries*> (ls), r,
nullptr /* sha256 */, nullptr /* update */, timestamp_unknown,
- bs, a, l, la, lf, li, nullopt /* for_install */, self, rel);
+ bs, a, l.as<file> (), la, lf, *li,
+ nullopt /* for_install */, self, rel);
}});
// $<module>.lib_rpaths(<lib-targets>, <otype> [, <link> [, <self>]])
@@ -358,13 +429,12 @@ namespace build2
x,
[] (void* ls, strings& r,
const vector_view<value>& vs, const module& m, const scope& bs,
- action a, const file& l, bool la, linfo li)
+ action a, const target& l, bool la, optional<linfo> li)
{
bool link (vs.size () > 2 ? convert<bool> (vs[2]) : false);
bool self (vs.size () > 3 ? convert<bool> (vs[3]) : true);
m.rpath_libraries (*static_cast<rpathed_libraries*> (ls), r,
- bs,
- a, l, la, li, link, self);
+ bs, a, l.as<file> (), la, *li, link, self);
}});
// $cxx.obj_modules(<obj-targets>)
@@ -422,7 +492,16 @@ namespace build2
// look for cc.export.libs and <module>.export.libs.
//
// 3. No member/group selection/linkup: we resolve *.export.libs on
- // whatever is listed.
+ // whatever is listed (so no liba{}/libs{} overrides will be
+ // considered).
+ //
+ // Because of (2) and (3), this functionality should only be used on a
+ // controlled list of libraries (usually libraries that belong to the
+ // same family as this library).
+ //
+ // Note that a similar deduplication is also performed when processing
+ // the libraries. However, it may still make sense to do it once at the
+ // source for really severe cases (like Boost).
//
// Note that this function is not pure.
//
@@ -450,6 +529,9 @@ namespace build2
// We can assume the argument is present due to function's types
// signature.
//
+ if (vs[0].null)
+ throw invalid_argument ("null value");
+
names& r (vs[0].as<names> ());
m->deduplicate_export_libs (*bs,
vector<name> (r.begin (), r.end ()),
diff --git a/libbuild2/cc/gcc.cxx b/libbuild2/cc/gcc.cxx
index 30f2092..b553c8c 100644
--- a/libbuild2/cc/gcc.cxx
+++ b/libbuild2/cc/gcc.cxx
@@ -45,6 +45,13 @@ namespace build2
d = dir_path (o, 2, string::npos);
else
continue;
+
+ // Ignore relative paths. Or maybe we should warn?
+ //
+ if (d.relative ())
+ continue;
+
+ d.normalize ();
}
catch (const invalid_path& e)
{
@@ -52,13 +59,29 @@ namespace build2
<< o << "'";
}
- // Ignore relative paths. Or maybe we should warn?
- //
- if (!d.relative ())
- r.push_back (move (d));
+ r.push_back (move (d));
}
}
+#ifdef _WIN32
+ // Some misconfigured MinGW GCC builds add absolute POSIX directories to
+ // their built-in search paths (e.g., /mingw/{include,lib}) which GCC then
+ // interprets as absolute paths relative to the current drive (so the set
+ // of built-in search paths starts depending on where we run things from).
+ //
+ // While that's definitely misguided, life is short and we don't want to
+ // waste it explaining this in long mailing list threads and telling
+ // people to complain to whomever built their GCC. So we will just
+ // recreate the behavior in a way that's consistent with GCC and let
+ // people discover this on their own.
+ //
+ static inline void
+ add_current_drive (string& s)
+ {
+ s.insert (0, work.string (), 0, 2); // Add e.g., `c:`.
+ }
+#endif
+
// Extract system header search paths from GCC (gcc/g++) or compatible
// (Clang, Intel) using the `-v -E </dev/null` method.
//
@@ -113,101 +136,94 @@ namespace build2
if (verb >= 3)
print_process (env, args);
+ // Open pipe to stderr, redirect stdin and stdout to /dev/null.
+ //
+ process pr (run_start (
+ env,
+ args,
+ -2, /* stdin */
+ -2, /* stdout */
+ -1 /* stderr */));
try
{
- //@@ TODO: why don't we use run_start() here? Because it's unable to
- // open pipe for stderr and we need to change it first, for example,
- // making the err parameter a file descriptor rather than a flag.
- //
+ ifdstream is (
+ move (pr.in_efd), fdstream_mode::skip, ifdstream::badbit);
- // Open pipe to stderr, redirect stdin and stdout to /dev/null.
+ // Normally the system header paths appear between the following
+ // lines:
//
- process pr (xc,
- args.data (),
- -2, /* stdin */
- -2, /* stdout */
- -1, /* stderr */
- nullptr /* cwd */,
- env.vars);
-
- try
+ // #include <...> search starts here:
+ // End of search list.
+ //
+ // The exact text depends on the current locale. What we can rely on
+ // is the presence of the "#include <...>" substring in the "opening"
+ // line and the fact that the paths are indented with a single space
+ // character, unlike the "closing" line.
+ //
+ // Note that on Mac OS we will also see some framework paths among
+ // system header paths, followed with a comment. For example:
+ //
+ // /Library/Frameworks (framework directory)
+ //
+ // For now we ignore framework paths and to filter them out we will
+ // only consider valid paths to existing directories, skipping those
+ // which we fail to normalize or stat. @@ Maybe this is a bit too
+ // loose, especially compared to gcc_library_search_dirs()?
+ //
+ string s;
+ for (bool found (false); getline (is, s); )
{
- ifdstream is (
- move (pr.in_efd), fdstream_mode::skip, ifdstream::badbit);
-
- // Normally the system header paths appear between the following
- // lines:
- //
- // #include <...> search starts here:
- // End of search list.
- //
- // The exact text depends on the current locale. What we can rely on
- // is the presence of the "#include <...>" substring in the
- // "opening" line and the fact that the paths are indented with a
- // single space character, unlike the "closing" line.
- //
- // Note that on Mac OS we will also see some framework paths among
- // system header paths, followed with a comment. For example:
- //
- // /Library/Frameworks (framework directory)
- //
- // For now we ignore framework paths and to filter them out we will
- // only consider valid paths to existing directories, skipping those
- // which we fail to normalize or stat.
- //
- string s;
- for (bool found (false); getline (is, s); )
+ if (!found)
+ found = s.find ("#include <...>") != string::npos;
+ else
{
- if (!found)
- found = s.find ("#include <...>") != string::npos;
- else
+ if (s[0] != ' ')
+ break;
+
+ dir_path d;
+ try
{
- if (s[0] != ' ')
- break;
-
- try
- {
- dir_path d (s, 1, s.size () - 1);
-
- if (d.absolute () && exists (d, true) &&
- find (r.begin (), r.end (), d.normalize ()) == r.end ())
- r.emplace_back (move (d));
- }
- catch (const invalid_path&)
- {
- // Skip this path.
- }
- }
- }
+ string ds (s, 1, s.size () - 1);
- is.close (); // Don't block.
+#ifdef _WIN32
+ if (path_traits::is_separator (ds[0]))
+ add_current_drive (ds);
+#endif
+ d = dir_path (move (ds));
- if (!pr.wait ())
- {
- // We have read stderr so better print some diagnostics.
- //
- diag_record dr (fail);
+ if (d.relative () || !exists (d, true))
+ continue;
- dr << "failed to extract " << x_lang << " header search paths" <<
- info << "command line: ";
+ d.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ continue;
+ }
- print_process (dr, args);
+ if (find (r.begin (), r.end (), d) == r.end ())
+ r.emplace_back (move (d));
}
}
- catch (const io_error&)
+
+ is.close (); // Don't block.
+
+ if (!run_wait (args, pr))
{
- pr.wait ();
- fail << "error reading " << x_lang << " compiler -v -E output";
+ // We have read stderr so better print some diagnostics.
+ //
+ diag_record dr (fail);
+
+ dr << "failed to extract " << x_lang << " header search paths" <<
+ info << "command line: ";
+
+ print_process (dr, args);
}
}
- catch (const process_error& e)
+ catch (const io_error&)
{
- error << "unable to execute " << args[0] << ": " << e;
-
- if (e.child)
- exit (1);
-
- throw failed ();
+ run_wait (args, pr);
+ fail << "error reading " << x_lang << " compiler -v -E output";
}
// It's highly unlikely not to have any system directories. More likely
@@ -271,6 +287,9 @@ namespace build2
// Open pipe to stdout.
//
+ // Note: this function is called in the serial load phase and so no
+ // diagnostics buffering is needed.
+ //
process pr (run_start (env,
args,
0, /* stdin */
@@ -305,7 +324,7 @@ namespace build2
// by that and let run_finish() deal with it.
}
- run_finish (args, pr);
+ run_finish (args, pr, 2 /* verbosity */);
if (l.empty ())
fail << "unable to extract " << x_lang << " compiler system library "
@@ -334,9 +353,35 @@ namespace build2
//
for (string::size_type b (0);; e = l.find (d, (b = e + 1)))
{
- dir_path d (l, b, (e != string::npos ? e - b : e));
+ dir_path d;
+ try
+ {
+ string ds (l, b, (e != string::npos ? e - b : e));
+
+ // Skip empty entries (sometimes found in random MinGW toolchains).
+ //
+ if (!ds.empty ())
+ {
+#ifdef _WIN32
+ if (path_traits::is_separator (ds[0]))
+ add_current_drive (ds);
+#endif
+
+ d = dir_path (move (ds));
+
+ if (d.relative ())
+ throw invalid_path (move (d).string ());
+
+ d.normalize ();
+ }
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid directory '" << e.path << "'" << " in "
+ << args[0] << " -print-search-dirs output";
+ }
- if (find (r.begin (), r.end (), d.normalize ()) == r.end ())
+ if (!d.empty () && find (r.begin (), r.end (), d) == r.end ())
r.emplace_back (move (d));
if (e == string::npos)
diff --git a/libbuild2/cc/guess.cxx b/libbuild2/cc/guess.cxx
index ff06c5f..a0ed34b 100644
--- a/libbuild2/cc/guess.cxx
+++ b/libbuild2/cc/guess.cxx
@@ -106,7 +106,7 @@ namespace build2
else if (id.compare (0, p, "icc" ) == 0) type = compiler_type::icc;
else
throw invalid_argument (
- "invalid compiler type '" + string (id, 0, p) + "'");
+ "invalid compiler type '" + string (id, 0, p) + '\'');
if (p != string::npos)
{
@@ -181,12 +181,12 @@ namespace build2
// could also be because there is something wrong with the compiler or
// options but that we simply leave to blow up later).
//
- process pr (run_start (3 /* verbosity */,
+ process pr (run_start (3 /* verbosity */,
xp,
args,
- -1 /* stdin */,
- -1 /* stdout */,
- false /* error */));
+ -1 /* stdin */,
+ -1 /* stdout */,
+ 1 /* stderr (to stdout) */));
string l, r;
try
{
@@ -222,7 +222,7 @@ namespace build2
// that.
}
- if (!run_finish_code (args.data (), pr, l))
+ if (!run_finish_code (args.data (), pr, l, 2 /* verbosity */))
r = "none";
if (r.empty ())
@@ -262,6 +262,8 @@ namespace build2
" stdlib:=\"freebsd\" \n"
"# elif defined(__NetBSD__) \n"
" stdlib:=\"netbsd\" \n"
+"# elif defined(__OpenBSD__) \n"
+" stdlib:=\"openbsd\" \n"
"# elif defined(__APPLE__) \n"
" stdlib:=\"apple\" \n"
"# elif defined(__EMSCRIPTEN__) \n"
@@ -410,11 +412,13 @@ namespace build2
//
// Note that Visual Studio versions prior to 15.0 are not supported.
//
+ // Note also the directories are absolute and normalized.
+ //
struct msvc_info
{
- dir_path msvc_dir; // VC directory (...\Tools\MSVC\<ver>\).
- dir_path psdk_dir; // Platfor SDK version (under Include/, Lib/, etc).
- string psdk_ver; // Platfor SDK directory (...\Windows Kits\<ver>\).
+ dir_path msvc_dir; // VC tools directory (...\Tools\MSVC\<ver>\).
+ dir_path psdk_dir; // Platform SDK directory (...\Windows Kits\<ver>\).
+ string psdk_ver; // Platform SDK version (under Include/, Lib/, etc).
};
#if defined(_WIN32) && !defined(BUILD2_BOOTSTRAP)
@@ -456,13 +460,16 @@ namespace build2
{0x87, 0xBF, 0xD5, 0x77, 0x83, 0x8F, 0x1D, 0x5C}};
// If cl is not empty, then find an installation that contains this cl.exe
- // path.
+ // path. In this case the path must be absolute and normalized.
//
static optional<msvc_info>
- find_msvc (const path& cl = path ())
+ find_msvc (const path& cl = path ())
{
using namespace butl;
+ assert (cl.empty () ||
+ (cl.absolute () && cl.normalized (false /* sep */)));
+
msvc_info r;
// Try to obtain the MSVC directory.
@@ -528,7 +535,7 @@ namespace build2
// Note: we cannot use bstr_t due to the Clang 9.0 bug #42842.
//
BSTR p;
- if (vs->ResolvePath (L"VC", &p) != S_OK)
+ if (vs->ResolvePath (L"VC", &p) != S_OK)
return dir_path ();
unique_ptr<wchar_t, bstr_deleter> deleter (p);
@@ -634,36 +641,73 @@ namespace build2
return nullopt;
}
- // Read the VC version from the file and bail out on error.
+ // If cl.exe path is not specified, then deduce the default VC tools
+ // directory for this Visual Studio instance. Otherwise, extract the
+ // tools directory from this path.
+ //
+ // Note that in the latter case we could potentially avoid the above
+ // iterating over the VS instances, but let's make sure that the
+ // specified cl.exe path actually belongs to one of them as a sanity
+ // check.
//
- string vc_ver; // For example, 14.23.28105.
+ if (cl.empty ())
+ {
+ // Read the VC version from the file and bail out on error.
+ //
+ string vc_ver; // For example, 14.23.28105.
- path vp (
- r.msvc_dir /
- path ("Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"));
+ path vp (
+ r.msvc_dir /
+ path ("Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"));
- try
- {
- ifdstream is (vp);
- vc_ver = trim (is.read_text ());
- }
- catch (const io_error&) {}
+ try
+ {
+ ifdstream is (vp);
+ vc_ver = trim (is.read_text ());
+ }
+ catch (const io_error&) {}
- // Make sure that the VC version directory exists.
- //
- if (!vc_ver.empty ())
- try
- {
- ((r.msvc_dir /= "Tools") /= "MSVC") /= vc_ver;
+ if (vc_ver.empty ())
+ return nullopt;
+
+ // Make sure that the VC version directory exists.
+ //
+ try
+ {
+ ((r.msvc_dir /= "Tools") /= "MSVC") /= vc_ver;
- if (!dir_exists (r.msvc_dir))
- r.msvc_dir.clear ();
+ if (!dir_exists (r.msvc_dir))
+ return nullopt;
+ }
+ catch (const invalid_path&) {return nullopt;}
+ catch (const system_error&) {return nullopt;}
}
- catch (const invalid_path&) {}
- catch (const system_error&) {}
+ else
+ {
+ (r.msvc_dir /= "Tools") /= "MSVC";
- if (r.msvc_dir.empty ())
- return nullopt;
+ // Extract the VC tools version from the cl.exe path and append it
+ // to r.msvc_dir.
+ //
+ if (!cl.sub (r.msvc_dir))
+ return nullopt;
+
+ // For example, 14.23.28105\bin\Hostx64\x64\cl.exe.
+ //
+ path p (cl.leaf (r.msvc_dir)); // Can't throw.
+
+ auto i (p.begin ()); // Tools version.
+ if (i == p.end ())
+ return nullopt;
+
+ r.msvc_dir /= *i; // Can't throw.
+
+ // For good measure, make sure that the tools version is not the
+ // last component in the cl.exe path.
+ //
+ if (++i == p.end ())
+ return nullopt;
+ }
}
// Try to obtain the latest Platform SDK directory and version.
@@ -717,7 +761,7 @@ namespace build2
//
for (const dir_entry& de:
dir_iterator (r.psdk_dir / dir_path ("Include"),
- false /* ignore_dangling */))
+ dir_iterator::no_follow))
{
if (de.type () == entry_type::directory)
{
@@ -735,6 +779,16 @@ namespace build2
return nullopt;
}
+ try
+ {
+ r.msvc_dir.normalize ();
+ r.psdk_dir.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ return nullopt;
+ }
+
return r;
}
#endif
@@ -775,7 +829,8 @@ namespace build2
// Note: allowed to change pre if succeeds.
//
static guess_result
- guess (const char* xm,
+ guess (context& ctx,
+ const char* xm,
lang xl,
const path& xc,
const strings& x_mo,
@@ -926,10 +981,12 @@ namespace build2
// We try to find the matching installation only for MSVC (for Clang
// we extract this information from the compiler).
//
- if (xc.absolute () &&
- (pt == type::msvc && !pv))
+ if (xc.absolute () && (pt == type::msvc && !pv))
{
- if (optional<msvc_info> mi = find_msvc (xc))
+ path cl (xc); // Absolute but may not be normalized.
+ cl.normalize (); // Can't throw since this is an existing path.
+
+ if (optional<msvc_info> mi = find_msvc (cl))
{
search_info = info_ptr (
new msvc_info (move (*mi)), msvc_info_deleter);
@@ -965,7 +1022,7 @@ namespace build2
#endif
string cache;
- auto run = [&cs, &env, &args, &cache] (
+ auto run = [&ctx, &cs, &env, &args, &cache] (
const char* o,
auto&& f,
bool checksum = false) -> guess_result
@@ -973,9 +1030,10 @@ namespace build2
args[args.size () - 2] = o;
cache.clear ();
return build2::run<guess_result> (
+ ctx,
3 /* verbosity */,
env,
- args.data (),
+ args,
forward<decltype (f)> (f),
false /* error */,
false /* ignore_exit */,
@@ -1022,7 +1080,7 @@ namespace build2
// The gcc -v output will have a last line in the form:
//
- // "gcc version X.Y[.Z][...] ..."
+ // "gcc version X[.Y[.Z]][...] ..."
//
// The "version" word can probably be translated. For example:
//
@@ -1034,6 +1092,7 @@ namespace build2
// gcc version 5.1.0 (Ubuntu 5.1.0-0ubuntu11~14.04.1)
// gcc version 6.0.0 20160131 (experimental) (GCC)
// gcc version 9.3-win32 20200320 (GCC)
+ // gcc version 10-win32 20220324 (GCC)
//
if (cache.empty ())
{
@@ -1273,7 +1332,11 @@ namespace build2
//
const char* evars[] = {"CL=", "_CL_=", nullptr};
- r = build2::run<guess_result> (3, process_env (xp, evars), f, false);
+ r = build2::run<guess_result> (ctx,
+ 3,
+ process_env (xp, evars),
+ f,
+ false);
if (r.empty ())
{
@@ -1424,10 +1487,12 @@ namespace build2
// And VC 16 seems to have the runtime version 14.1 (and not 14.2, as
// one might expect; DLLs are still *140.dll but there are now _1 and _2
// variants for, say, msvcp140.dll). We will, however, call it 14.2
- // (which is the version of the "toolset") in our target triplet.
+ // (which is the version of the "toolset") in our target triplet. And we
+ // will call VC 17 14.3 (which is also the version of the "toolset").
//
// year ver cl crt/dll toolset
//
+ // 2022 17.X 19.3X 14.?/140 14.3X
// 2019 16.X 19.2X 14.2/140 14.2X
// 2017 15.9 19.16 14.1/140 14.16
// 2017 15.8 19.15 14.1/140
@@ -1446,7 +1511,8 @@ namespace build2
//
// _MSC_VER is the numeric cl version, e.g., 1921 for 19.21.
//
- /**/ if (v.major == 19 && v.minor >= 20) return "14.2";
+ /**/ if (v.major == 19 && v.minor >= 30) return "14.3";
+ else if (v.major == 19 && v.minor >= 20) return "14.2";
else if (v.major == 19 && v.minor >= 10) return "14.1";
else if (v.major == 19 && v.minor == 0) return "14.0";
else if (v.major == 18 && v.minor == 0) return "12.0";
@@ -1470,8 +1536,8 @@ namespace build2
// Studio command prompt puts into INCLUDE) including any paths from the
// compiler mode and their count.
//
- // Note that currently we don't add any ATL/MFC or WinRT paths (but could
- // do that probably first checking if they exist/empty).
+ // Note that currently we don't add any ATL/MFC paths (but could do that
+ // probably first checking if they exist/empty).
//
static pair<dir_paths, size_t>
msvc_hdr (const msvc_info& mi, const strings& mo)
@@ -1483,6 +1549,8 @@ namespace build2
msvc_extract_header_search_dirs (mo, r);
size_t rn (r.size ());
+ // Note: the resulting directories are normalized by construction.
+ //
r.push_back (dir_path (mi.msvc_dir) /= "include");
// This path structure only appeared in Platform SDK 10 (if anyone wants
@@ -1496,6 +1564,7 @@ namespace build2
r.push_back (dir_path (d) /= "ucrt" );
r.push_back (dir_path (d) /= "shared");
r.push_back (dir_path (d) /= "um" );
+ r.push_back (dir_path (d) /= "winrt" );
}
return make_pair (move (r), rn);
@@ -1531,6 +1600,8 @@ namespace build2
msvc_extract_library_search_dirs (mo, r);
size_t rn (r.size ());
+ // Note: the resulting directories are normalized by construction.
+ //
r.push_back ((dir_path (mi.msvc_dir) /= "lib") /= cpu);
// This path structure only appeared in Platform SDK 10 (if anyone wants
@@ -1585,7 +1656,8 @@ namespace build2
"LIB", "LINK", "_LINK_", nullptr};
static compiler_info
- guess_msvc (const char* xm,
+ guess_msvc (context&,
+ const char* xm,
lang xl,
const path& xc,
const string* xv,
@@ -1608,6 +1680,7 @@ namespace build2
// "x86"
// "x64"
// "ARM"
+ // "ARM64"
//
compiler_version ver;
{
@@ -1671,9 +1744,10 @@ namespace build2
for (size_t b (0), e (0), n;
(n = next_word (s, b, e, ' ', ',')) != 0; )
{
- if (s.compare (b, n, "x64", 3) == 0 ||
- s.compare (b, n, "x86", 3) == 0 ||
- s.compare (b, n, "ARM", 3) == 0 ||
+ if (s.compare (b, n, "x64", 3) == 0 ||
+ s.compare (b, n, "x86", 3) == 0 ||
+ s.compare (b, n, "ARM64", 5) == 0 ||
+ s.compare (b, n, "ARM", 3) == 0 ||
s.compare (b, n, "80x86", 5) == 0)
{
cpu.assign (s, b, n);
@@ -1684,15 +1758,15 @@ namespace build2
if (cpu.empty ())
fail << "unable to extract MSVC target CPU from " << "'" << s << "'";
- // Now we need to map x86, x64, and ARM to the target triplets. The
- // problem is, there aren't any established ones so we got to invent
- // them ourselves. Based on the discussion in
+ // Now we need to map x86, x64, ARM, and ARM64 to the target
+ // triplets. The problem is, there aren't any established ones so we
+ // got to invent them ourselves. Based on the discussion in
// <libbutl/target-triplet.hxx>, we need something in the
// CPU-VENDOR-OS-ABI form.
//
// The CPU part is fairly straightforward with x86 mapped to 'i386'
- // (or maybe 'i686'), x64 to 'x86_64', and ARM to 'arm' (it could also
- // include the version, e.g., 'amrv8').
+ // (or maybe 'i686'), x64 to 'x86_64', ARM to 'arm' (it could also
+ // include the version, e.g., 'amrv8'), and ARM64 to 'aarch64'.
//
// The (toolchain) VENDOR is also straightforward: 'microsoft'. Why
// not omit it? Two reasons: firstly, there are other compilers with
@@ -1702,7 +1776,7 @@ namespace build2
//
// OS-ABI is where things are not as clear cut. The OS part shouldn't
// probably be just 'windows' since we have Win32 and WinCE. And
- // WinRT. And Universal Windows Platform (UWP). So perhaps the
+ // WinRT. And Universal Windows Platform (UWP). So perhaps the
// following values for OS: 'win32', 'wince', 'winrt', 'winup'.
//
// For 'win32' the ABI part could signal the Microsoft C/C++ runtime
@@ -1727,9 +1801,10 @@ namespace build2
// Putting it all together, Visual Studio 2015 will then have the
// following target triplets:
//
- // x86 i386-microsoft-win32-msvc14.0
- // x64 x86_64-microsoft-win32-msvc14.0
- // ARM arm-microsoft-winup-???
+ // x86 i386-microsoft-win32-msvc14.0
+ // x64 x86_64-microsoft-win32-msvc14.0
+ // ARM arm-microsoft-winup-???
+ // ARM64 aarch64-microsoft-win32-msvc14.0
//
if (cpu == "ARM")
fail << "cl.exe ARM/WinRT/UWP target is not yet supported";
@@ -1739,6 +1814,8 @@ namespace build2
t = "x86_64-microsoft-win32-msvc";
else if (cpu == "x86" || cpu == "80x86")
t = "i386-microsoft-win32-msvc";
+ else if (cpu == "ARM64")
+ t = "aarch64-microsoft-win32-msvc";
else
assert (false);
@@ -1750,6 +1827,8 @@ namespace build2
else
ot = t = *xt;
+ target_triplet tt (t); // Shouldn't fail.
+
// If we have the MSVC installation information, then this means we are
// running out of the Visual Studio command prompt and will have to
// supply PATH/INCLUDE/LIB/IFCPATH equivalents ourselves.
@@ -1761,7 +1840,7 @@ namespace build2
if (const msvc_info* mi = static_cast<msvc_info*> (gr.info.get ()))
{
- const char* cpu (msvc_cpu (target_triplet (t).cpu));
+ const char* cpu (msvc_cpu (tt.cpu));
lib_dirs = msvc_lib (*mi, x_mo, cpu);
hdr_dirs = msvc_hdr (*mi, x_mo);
@@ -1849,7 +1928,8 @@ namespace build2
"SDKROOT", "MACOSX_DEPLOYMENT_TARGET", nullptr};
static compiler_info
- guess_gcc (const char* xm,
+ guess_gcc (context& ctx,
+ const char* xm,
lang xl,
const path& xc,
const string* xv,
@@ -1868,7 +1948,7 @@ namespace build2
// though language words can be translated and even rearranged (see
// examples above).
//
- // "gcc version X.Y[.Z][...]"
+ // "gcc version X[.Y[.Z]][...]"
//
compiler_version ver;
{
@@ -1907,7 +1987,10 @@ namespace build2
//
try
{
- semantic_version v (string (s, b, e - b), ".-+");
+ semantic_version v (string (s, b, e - b),
+ semantic_version::allow_omit_minor |
+ semantic_version::allow_build,
+ ".-+");
ver.major = v.major;
ver.minor = v.minor;
ver.patch = v.patch;
@@ -1959,7 +2042,7 @@ namespace build2
//
auto f = [] (string& l, bool) {return move (l);};
- t = run<string> (3, xp, args.data (), f, false);
+ t = run<string> (ctx, 3, xp, args, f, false);
if (t.empty ())
{
@@ -1967,7 +2050,7 @@ namespace build2
<< "falling back to -dumpmachine";});
args[args.size () - 2] = "-dumpmachine";
- t = run<string> (3, xp, args.data (), f, false);
+ t = run<string> (ctx, 3, xp, args, f, false);
}
if (t.empty ())
@@ -2110,9 +2193,9 @@ namespace build2
process pr (run_start (3 /* verbosity */,
xp,
args,
- -2 /* stdin (/dev/null) */,
- -1 /* stdout */,
- false /* error (2>&1) */));
+ -2 /* stdin (to /dev/null) */,
+ -1 /* stdout */,
+ 1 /* stderr (to stdout) */));
clang_msvc_info r;
@@ -2264,7 +2347,7 @@ namespace build2
// that.
}
- if (!run_finish_code (args.data (), pr, l))
+ if (!run_finish_code (args.data (), pr, l, 2 /* verbosity */))
fail << "unable to extract MSVC information from " << xp;
if (const char* w = (
@@ -2298,7 +2381,8 @@ namespace build2
nullptr};
static compiler_info
- guess_clang (const char* xm,
+ guess_clang (context& ctx,
+ const char* xm,
lang xl,
const path& xc,
const string* xv,
@@ -2439,8 +2523,8 @@ namespace build2
// https://gist.github.com/yamaya/2924292
//
// Specifically, we now look in the libc++'s __config file for the
- // _LIBCPP_VERSION and use the previous version as a conservative
- // estimate (note that there could be multiple __config files with
+ // __LIBCPP_VERSION and use the previous version as a conservative
+ // estimate (NOTE that there could be multiple __config files with
// potentially different versions so compile with -v to see which one
// gets picked up).
//
@@ -2463,34 +2547,42 @@ namespace build2
// 12.0.0 -> 9.0
// 12.0.5 -> 10.0 (yes, seriously!)
// 13.0.0 -> 11.0
+ // 13.1.6 -> 12.0
+ // 14.0.0 -> 12.0 (__LIBCPP_VERSION=130000)
+ // 14.0.3 -> 15.0.5 (__LIBCPP_VERSION=150006)
+ // 15.0.0 -> 16.0.1 (__LIBCPP_VERSION=160002)
//
uint64_t mj (var_ver->major);
uint64_t mi (var_ver->minor);
uint64_t pa (var_ver->patch);
- if (mj >= 13) {mj = 11; mi = 0;}
- else if (mj == 12 && (mi > 0 || pa >= 5)) {mj = 10; mi = 0;}
- else if (mj == 12) {mj = 9; mi = 0;}
- else if (mj == 11 && (mi > 0 || pa >= 3)) {mj = 8; mi = 0;}
- else if (mj == 11) {mj = 7; mi = 0;}
- else if (mj == 10) {mj = 6; mi = 0;}
- else if (mj == 9 && mi >= 1) {mj = 5; mi = 0;}
- else if (mj == 9) {mj = 4; mi = 0;}
- else if (mj == 8) {mj = 3; mi = 9;}
- else if (mj == 7 && mi >= 3) {mj = 3; mi = 8;}
- else if (mj == 7) {mj = 3; mi = 7;}
- else if (mj == 6 && mi >= 1) {mj = 3; mi = 5;}
- else if (mj == 6) {mj = 3; mi = 4;}
- else if (mj == 5 && mi >= 1) {mj = 3; mi = 3;}
- else if (mj == 5) {mj = 3; mi = 2;}
- else if (mj == 4 && mi >= 2) {mj = 3; mi = 1;}
- else {mj = 3; mi = 0;}
+
+ if (mj >= 15) {mj = 16; mi = 0; pa = 1;}
+ else if (mj == 14 && (mi > 0 || pa >= 3)) {mj = 15; mi = 0; pa = 5;}
+ else if (mj == 14 || (mj == 13 && mi >= 1)) {mj = 12; mi = 0; pa = 0;}
+ else if (mj == 13) {mj = 11; mi = 0; pa = 0;}
+ else if (mj == 12 && (mi > 0 || pa >= 5)) {mj = 10; mi = 0; pa = 0;}
+ else if (mj == 12) {mj = 9; mi = 0; pa = 0;}
+ else if (mj == 11 && (mi > 0 || pa >= 3)) {mj = 8; mi = 0; pa = 0;}
+ else if (mj == 11) {mj = 7; mi = 0; pa = 0;}
+ else if (mj == 10) {mj = 6; mi = 0; pa = 0;}
+ else if (mj == 9 && mi >= 1) {mj = 5; mi = 0; pa = 0;}
+ else if (mj == 9) {mj = 4; mi = 0; pa = 0;}
+ else if (mj == 8) {mj = 3; mi = 9; pa = 0;}
+ else if (mj == 7 && mi >= 3) {mj = 3; mi = 8; pa = 0;}
+ else if (mj == 7) {mj = 3; mi = 7; pa = 0;}
+ else if (mj == 6 && mi >= 1) {mj = 3; mi = 5; pa = 0;}
+ else if (mj == 6) {mj = 3; mi = 4; pa = 0;}
+ else if (mj == 5 && mi >= 1) {mj = 3; mi = 3; pa = 0;}
+ else if (mj == 5) {mj = 3; mi = 2; pa = 0;}
+ else if (mj == 4 && mi >= 2) {mj = 3; mi = 1; pa = 0;}
+ else {mj = 3; mi = 0; pa = 0;}
ver = compiler_version {
- to_string (mj) + '.' + to_string (mi) + ".0",
+ to_string (mj) + '.' + to_string (mi) + '.' + to_string (pa),
mj,
mi,
- 0,
+ pa,
""};
}
else if (emscr)
@@ -2543,7 +2635,7 @@ namespace build2
// for LC_ALL.
//
auto f = [] (string& l, bool) {return move (l);};
- t = run<string> (3, xp, args.data (), f, false);
+ t = run<string> (ctx, 3, xp, args, f, false);
if (t.empty ())
fail << "unable to extract target architecture from " << xc
@@ -2771,7 +2863,8 @@ namespace build2
}
static compiler_info
- guess_icc (const char* xm,
+ guess_icc (context& ctx,
+ const char* xm,
lang xl,
const path& xc,
const string* xv,
@@ -2835,7 +2928,7 @@ namespace build2
//
// @@ TODO: running without the mode options.
//
- s = run<string> (3, env, "-V", f, false);
+ s = run<string> (ctx, 3, env, "-V", f, false);
if (s.empty ())
fail << "unable to extract signature from " << xc << " -V output";
@@ -2961,7 +3054,7 @@ namespace build2
// The -V output is sent to STDERR.
//
- t = run<string> (3, env, args.data (), f, false);
+ t = run<string> (ctx, 3, env, args, f, false);
if (t.empty ())
fail << "unable to extract target architecture from " << xc
@@ -3012,7 +3105,7 @@ namespace build2
//
{
auto f = [] (string& l, bool) {return move (l);};
- t = run<string> (3, xp, "-dumpmachine", f);
+ t = run<string> (ctx, 3, xp, "-dumpmachine", f);
}
if (t.empty ())
@@ -3093,7 +3186,8 @@ namespace build2
static global_cache<compiler_info> cache;
const compiler_info&
- guess (const char* xm,
+ guess (context& ctx,
+ const char* xm,
lang xl,
const string& ec,
const path& xc,
@@ -3167,7 +3261,7 @@ namespace build2
if (pre.type != invalid_compiler_type)
{
- gr = guess (xm, xl, xc, x_mo, xi, pre, cs);
+ gr = guess (ctx, xm, xl, xc, x_mo, xi, pre, cs);
if (gr.empty ())
{
@@ -3183,13 +3277,14 @@ namespace build2
}
if (gr.empty ())
- gr = guess (xm, xl, xc, x_mo, xi, pre, cs);
+ gr = guess (ctx, xm, xl, xc, x_mo, xi, pre, cs);
if (gr.empty ())
fail << "unable to guess " << xl << " compiler type of " << xc <<
info << "use config." << xm << ".id to specify explicitly";
compiler_info (*gf) (
+ context&,
const char*, lang, const path&, const string*, const string*,
const strings&,
const strings*, const strings*,
@@ -3209,7 +3304,8 @@ namespace build2
case compiler_type::icc: gf = &guess_icc; break;
}
- compiler_info r (gf (xm, xl, xc, xv, xt,
+ compiler_info r (gf (ctx,
+ xm, xl, xc, xv, xt,
x_mo, c_po, x_po, c_co, x_co, c_lo, x_lo,
move (gr), cs));
@@ -3367,6 +3463,7 @@ namespace build2
// In the future we will probably have to maintain per-standard additions.
//
static const char* std_importable[] = {
+ "<initializer_list>", // Note: keep first (present in freestanding).
"<algorithm>",
"<any>",
"<array>",
@@ -3391,7 +3488,6 @@ namespace build2
"<fstream>",
"<functional>",
"<future>",
- "<initializer_list>",
"<iomanip>",
"<ios>",
"<iosfwd>",
@@ -3490,6 +3586,9 @@ namespace build2
// is currently not provided by GCC. Though entering missing headers
// should be harmless.
//
+ // Plus, a freestanding implementation may only have a subset of such
+ // headers (see [compliance]).
+ //
pair<const path, importable_headers::groups>* p;
auto add_groups = [&p] (bool imp)
{
@@ -3511,29 +3610,39 @@ namespace build2
}
else
{
+ // While according to [compliance] a freestanding implementation
+ // should provide a subset of headers, including <initializer_list>,
+ // there seem to be cases where no headers are provided at all (see GH
+ // issue #219). So if we cannot find <initializer_list>, we just skip
+ // the whole thing.
+ //
p = hs.insert_angle (sys_hdr_dirs, std_importable[0]);
- assert (p != nullptr);
- add_groups (true);
+ if (p != nullptr)
+ {
+ assert (p != nullptr);
- dir_path d (p->first.directory ());
+ add_groups (true);
- auto add_header = [&hs, &d, &p, add_groups] (const char* f, bool imp)
- {
- path fp (d);
- fp.combine (f + 1, strlen (f) - 2, '\0'); // Assuming simple.
+ dir_path d (p->first.directory ());
- p = &hs.insert_angle (move (fp), f);
- add_groups (imp);
- };
+ auto add_header = [&hs, &d, &p, add_groups] (const char* f, bool imp)
+ {
+ path fp (d);
+ fp.combine (f + 1, strlen (f) - 2, '\0'); // Assuming simple.
- for (size_t i (1);
- i != sizeof (std_importable) / sizeof (std_importable[0]);
- ++i)
- add_header (std_importable[i], true);
+ p = &hs.insert_angle (move (fp), f);
+ add_groups (imp);
+ };
- for (const char* f: std_non_importable)
- add_header (f, false);
+ for (size_t i (1);
+ i != sizeof (std_importable) / sizeof (std_importable[0]);
+ ++i)
+ add_header (std_importable[i], true);
+
+ for (const char* f: std_non_importable)
+ add_header (f, false);
+ }
}
}
}
diff --git a/libbuild2/cc/guess.hxx b/libbuild2/cc/guess.hxx
index 53acc15..7cbbd87 100644
--- a/libbuild2/cc/guess.hxx
+++ b/libbuild2/cc/guess.hxx
@@ -253,7 +253,8 @@ namespace build2
// that most of it will be the same, at least for C and C++.
//
const compiler_info&
- guess (const char* xm, // Module (for var names in diagnostics).
+ guess (context&,
+ const char* xm, // Module (for var names in diagnostics).
lang xl, // Language.
const string& ec, // Environment checksum.
const path& xc, // Compiler path.
diff --git a/libbuild2/cc/init.cxx b/libbuild2/cc/init.cxx
index affc4ab..33a1133 100644
--- a/libbuild2/cc/init.cxx
+++ b/libbuild2/cc/init.cxx
@@ -86,7 +86,10 @@ namespace build2
// Enter variables.
//
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
auto v_t (variable_visibility::target);
@@ -113,6 +116,13 @@ namespace build2
vp.insert<vector<name>> ("cc.export.libs");
vp.insert<vector<name>> ("cc.export.impl_libs");
+ // Header (-I) and library (-L) search paths to use in the generated .pc
+ // files instead of the default install.{include,lib}. Relative paths
+ // are resolved as install paths.
+ //
+ vp.insert<dir_paths> ("cc.pkconfig.include");
+ vp.insert<dir_paths> ("cc.pkconfig.lib");
+
// Hint variables (not overridable).
//
vp.insert<string> ("config.cc.id", false);
@@ -126,15 +136,20 @@ namespace build2
vp.insert<string> ("cc.runtime");
vp.insert<string> ("cc.stdlib");
- // Target type, for example, "C library" or "C++ library". Should be set
- // on the target as a rule-specific variable by the matching rule to the
- // name of the module (e.g., "c", "cxx"). Currenly only set for
- // libraries and is used to decide which *.libs to use during static
- // linking.
- //
- // It can also be the special "cc" value which means a C-common library
- // but specific language is not known. Used in the import installed
- // logic.
+ // Library target type in the <lang>[,<type>...] form where <lang> is
+ // "c" (C library), "cxx" (C++ library), or "cc" (C-common library but
+ // the specific language is not known). Currently recognized <type>
+ // values are "binless" (library is binless) and "recursively-binless"
+ // (library and all its prerequisite libraries are binless). Note that
+ // another indication of a binless library is an empty path, which could
+ // be easier/faster to check. Note also that there should be no
+ // whitespaces of any kind and <lang> is always first.
+ //
+ // This value should be set on the library target as a rule-specific
+ // variable by the matching rule. It is also saved in the generated
+ // pkg-config files. Currently <lang> is used to decide which *.libs to
+ // use during static linking. The "cc" language is used in the import
+ // installed logic.
//
// Note that this variable cannot be set via the target type/pattern-
// specific mechanism (see process_libraries()).
@@ -326,10 +341,11 @@ namespace build2
//
if (!cast_false<bool> (rs["bin.config.loaded"]))
{
- // Prepare configuration hints. They are only used on the first load
- // of bin.config so we only populate them on our first load.
+ // Prepare configuration hints (pretend it belongs to root scope).
+ // They are only used on the first load of bin.config so we only
+ // populate them on our first load.
//
- variable_map h (rs.ctx);
+ variable_map h (rs);
if (first)
{
diff --git a/libbuild2/cc/install-rule.cxx b/libbuild2/cc/install-rule.cxx
index 560b8a7..dae65db 100644
--- a/libbuild2/cc/install-rule.cxx
+++ b/libbuild2/cc/install-rule.cxx
@@ -90,14 +90,16 @@ namespace build2
{
return (x_header (p) ||
p.is_a (x_src) ||
- (x_mod != nullptr && p.is_a (*x_mod)));
+ (x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && p.is_a (*x_obj)));
};
if (t.is_a<exe> ())
{
if (header_source (p))
pt = nullptr;
- else if (p.type.see_through)
+ else if (p.type.see_through ())
{
for (i.enter_group (); i.group (); )
{
@@ -151,17 +153,29 @@ namespace build2
}
bool install_rule::
- match (action a, target& t, const string& hint) const
+ match (action a, target& t, const string&, match_extra& me) const
{
- // @@ How do we split the hint between the two?
- //
-
// We only want to handle installation if we are also the ones building
// this target. So first run link's match().
//
- return link_.match (a, t, hint) && file_rule::match (a, t, "");
+ return link_.sub_match (x_link, update_id, a, t, me) &&
+ file_rule::match (a, t);
}
+ // Wrap the file_rule's recipe into a data-carrying recipe.
+ //
+ struct install_match_data
+ {
+ build2::recipe recipe;
+ link_rule::libs_paths libs_paths;
+
+ target_state
+ operator() (action a, const target& t)
+ {
+ return recipe (a, t);
+ }
+ };
+
recipe install_rule::
apply (action a, target& t) const
{
@@ -175,7 +189,7 @@ namespace build2
// Signal to the link rule that this is update for install. And if the
// update has already been executed, verify it was done for install.
//
- auto& md (t.data<link_rule::match_data> ());
+ auto& md (t.data<link_rule::match_data> (a.inner_action ()));
if (md.for_install)
{
@@ -194,19 +208,18 @@ namespace build2
// storage if we are un/installing (used in the *_extra() functions
// below).
//
- static_assert (sizeof (link_rule::libs_paths) <= target::data_size,
- "insufficient space");
-
if (file* f = t.is_a<libs> ())
{
if (!f->path ().empty ()) // Not binless.
{
const string* p (cast_null<string> (t["bin.lib.prefix"]));
const string* s (cast_null<string> (t["bin.lib.suffix"]));
- t.data (
+
+ return install_match_data {
+ move (r),
link_.derive_libs_paths (*f,
p != nullptr ? p->c_str (): nullptr,
- s != nullptr ? s->c_str (): nullptr));
+ s != nullptr ? s->c_str (): nullptr)};
}
}
}
@@ -224,11 +237,11 @@ namespace build2
// Here we may have a bunch of symlinks that we need to install.
//
const scope& rs (t.root_scope ());
- auto& lp (t.data<link_rule::libs_paths> ());
+ auto& lp (t.data<install_match_data> (perform_install_id).libs_paths);
- auto ln = [&rs, &id] (const path& f, const path& l)
+ auto ln = [&t, &rs, &id] (const path& f, const path& l)
{
- install_l (rs, id, f.leaf (), l.leaf (), 2 /* verbosity */);
+ install_l (rs, id, l.leaf (), t, f.leaf (), 2 /* verbosity */);
return true;
};
@@ -258,11 +271,11 @@ namespace build2
// Here we may have a bunch of symlinks that we need to uninstall.
//
const scope& rs (t.root_scope ());
- auto& lp (t.data<link_rule::libs_paths> ());
+ auto& lp (t.data<install_match_data> (perform_uninstall_id).libs_paths);
- auto rm = [&rs, &id] (const path& l)
+ auto rm = [&rs, &id] (const path& f, const path& l)
{
- return uninstall_f (rs, id, nullptr, l.leaf (), 2 /* verbosity */);
+ return uninstall_l (rs, id, l.leaf (), f.leaf (), 2 /* verbosity */);
};
const path& lk (lp.link);
@@ -270,10 +283,12 @@ namespace build2
const path& so (lp.soname);
const path& in (lp.interm);
- if (!lk.empty ()) r = rm (lk) || r;
- if (!ld.empty ()) r = rm (ld) || r;
- if (!so.empty ()) r = rm (so) || r;
- if (!in.empty ()) r = rm (in) || r;
+ const path* f (lp.real);
+
+ if (!in.empty ()) {r = rm (*f, in) || r; f = &in;}
+ if (!so.empty ()) {r = rm (*f, so) || r; f = &so;}
+ if (!ld.empty ()) {r = rm (*f, ld) || r; f = &ld;}
+ if (!lk.empty ()) {r = rm (*f, lk) || r; }
}
return r;
@@ -325,14 +340,16 @@ namespace build2
{
return (x_header (p) ||
p.is_a (x_src) ||
- (x_mod != nullptr && p.is_a (*x_mod)));
+ (x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && p.is_a (*x_obj)));
};
if (t.is_a<libue> ())
{
if (header_source (p))
pt = nullptr;
- else if (p.type.see_through)
+ else if (p.type.see_through ())
{
for (i.enter_group (); i.group (); )
{
@@ -372,12 +389,13 @@ namespace build2
}
bool libux_install_rule::
- match (action a, target& t, const string& hint) const
+ match (action a, target& t, const string&, match_extra& me) const
{
// We only want to handle installation if we are also the ones building
// this target. So first run link's match().
//
- return link_.match (a, t, hint) && alias_rule::match (a, t, "");
+ return link_.sub_match (x_link, update_id, a, t, me) &&
+ alias_rule::match (a, t);
}
}
}
diff --git a/libbuild2/cc/install-rule.hxx b/libbuild2/cc/install-rule.hxx
index acd1bd8..6998d63 100644
--- a/libbuild2/cc/install-rule.hxx
+++ b/libbuild2/cc/install-rule.hxx
@@ -38,8 +38,12 @@ namespace build2
filter (const scope*,
action, const target&, prerequisite_iterator&) const override;
+ // Note: rule::match() override.
+ //
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&, const string&, match_extra&) const override;
+
+ using file_rule::match; // Make Clang happy.
virtual recipe
apply (action, target&) const override;
@@ -71,8 +75,12 @@ namespace build2
filter (const scope*,
action, const target&, prerequisite_iterator&) const override;
+ // Note: rule::match() override.
+ //
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&, const string&, match_extra&) const override;
+
+ using alias_rule::match; // Make Clang happy.
private:
const link_rule& link_;
diff --git a/libbuild2/cc/lexer+raw-string-literal.test.testscript b/libbuild2/cc/lexer+raw-string-literal.test.testscript
index bca489a..a6455eb 100644
--- a/libbuild2/cc/lexer+raw-string-literal.test.testscript
+++ b/libbuild2/cc/lexer+raw-string-literal.test.testscript
@@ -16,6 +16,7 @@ R"X(a
b)X"
R"X(a\
b)X"
+R""(a)""
EOI
<string literal>
<string literal>
@@ -24,6 +25,7 @@ EOI
<string literal>
<string literal>
<string literal>
+<string literal>
EOO
: prefix
diff --git a/libbuild2/cc/lexer.cxx b/libbuild2/cc/lexer.cxx
index beeb970..467c0b1 100644
--- a/libbuild2/cc/lexer.cxx
+++ b/libbuild2/cc/lexer.cxx
@@ -734,8 +734,8 @@ namespace build2
// R"<delimiter>(<raw_characters>)<delimiter>"
//
// Where <delimiter> is a potentially-empty character sequence made of
- // any source character but parentheses, backslash and spaces. It can be
- // at most 16 characters long.
+ // any source character but parentheses, backslash, and spaces (in
+ // particular, it can be `"`). It can be at most 16 characters long.
//
// Note that the <raw_characters> are not processed in any way, not even
// for line continuations.
@@ -750,7 +750,7 @@ namespace build2
{
c = geth ();
- if (eos (c) || c == '\"' || c == ')' || c == '\\' || c == ' ')
+ if (eos (c) || c == ')' || c == '\\' || c == ' ')
fail (l) << "invalid raw string literal";
if (c == '(')
diff --git a/libbuild2/cc/lexer.test.cxx b/libbuild2/cc/lexer.test.cxx
index 0d7d12f..39e4279 100644
--- a/libbuild2/cc/lexer.test.cxx
+++ b/libbuild2/cc/lexer.test.cxx
@@ -6,6 +6,7 @@
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
+#include <libbuild2/cc/types.hxx>
#include <libbuild2/cc/lexer.hxx>
#undef NDEBUG
diff --git a/libbuild2/cc/link-rule.cxx b/libbuild2/cc/link-rule.cxx
index fa9a1f1..e2bdf5d 100644
--- a/libbuild2/cc/link-rule.cxx
+++ b/libbuild2/cc/link-rule.cxx
@@ -20,6 +20,8 @@
#include <libbuild2/bin/target.hxx>
#include <libbuild2/bin/utility.hxx>
+#include <libbuild2/install/utility.hxx>
+
#include <libbuild2/cc/target.hxx> // c, pc*
#include <libbuild2/cc/utility.hxx>
@@ -156,7 +158,7 @@ namespace build2
{
if (s[0] == '-')
{
- // -l<name>, -l <name>
+ // -l<name>, -l <name> (Note: not -pthread, which is system)
//
if (s[1] == 'l')
{
@@ -256,8 +258,6 @@ namespace build2
: common (move (d)),
rule_id (string (x) += ".link 3")
{
- static_assert (sizeof (match_data) <= target::data_size,
- "insufficient space");
}
link_rule::match_result link_rule::
@@ -282,17 +282,25 @@ namespace build2
{
// If excluded or ad hoc, then don't factor it into our tests.
//
- if (include (a, t, p) != include_type::normal)
+ // Note that here we don't validate the update operation override
+ // value (since we may not match). Instead we do this in apply().
+ //
+ lookup l;
+ if (include (a, t, p, a.operation () == update_id ? &l : nullptr) !=
+ include_type::normal)
continue;
if (p.is_a (x_src) ||
(x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && p.is_a (*x_obj)) ||
// Header-only X library (or library with C source and X header).
(library && x_header (p, false /* c_hdr */)))
{
r.seen_x = true;
}
- else if (p.is_a<c> () ||
+ else if (p.is_a<c> () ||
+ (x_obj != nullptr && p.is_a<m> ()) ||
// Header-only C library.
(library && p.is_a<h> ()))
{
@@ -347,6 +355,11 @@ namespace build2
const target* pg (nullptr);
const target* pt (p.search_existing ());
+ auto search = [&t, &p] (const target_type& tt)
+ {
+ return search_existing (t.ctx, p.prerequisite.key (tt));
+ };
+
if (p.is_a<libul> ())
{
if (pt != nullptr)
@@ -369,23 +382,33 @@ namespace build2
{
// It's possible we have no group but have a member so try that.
//
- const target_type& tt (ot == otype::a ? libua::static_type :
- ot == otype::s ? libus::static_type :
- libue::static_type);
+ if (ot != otype::e)
+ {
+ // We know this prerequisite member is a prerequisite since
+ // otherwise the above search would have returned the member
+ // target.
+ //
+ pt = search (ot == otype::a
+ ? libua::static_type
+ : libus::static_type);
+ }
+ else
+ {
+ // Similar semantics to bin::link_member(): prefer static over
+ // shared.
+ //
+ pt = search (libua::static_type);
- // We know this prerequisite member is a prerequisite since
- // otherwise the above search would have returned the member
- // target.
- //
- pt = search_existing (t.ctx, p.prerequisite.key (tt));
+ if (pt == nullptr)
+ pt = search (libus::static_type);
+ }
}
}
else if (!p.is_a<libue> ())
{
// See if we also/instead have a group.
//
- pg = search_existing (t.ctx,
- p.prerequisite.key (libul::static_type));
+ pg = search (libul::static_type);
if (pt == nullptr)
swap (pt, pg);
@@ -426,7 +449,7 @@ namespace build2
}
bool link_rule::
- match (action a, target& t, const string& hint) const
+ match (action a, target& t, const string& hint, match_extra&) const
{
// NOTE: may be called multiple times and for both inner and outer
// operations (see the install rules).
@@ -465,17 +488,22 @@ namespace build2
return false;
}
- if (!(r.seen_x || r.seen_c || r.seen_obj || r.seen_lib))
+ // Sometimes we may need to have a binless library whose only purpose is
+ // to export dependencies on other libraries (potentially in a platform-
+ // specific manner; think the whole -pthread mess). So allow a library
+ // without any sources with a hint.
+ //
+ if (!(r.seen_x || r.seen_c || r.seen_obj || r.seen_lib || !hint.empty ()))
{
- l4 ([&]{trace << "no " << x_lang << ", C, or obj/lib prerequisite "
- << "for target " << t;});
+ l4 ([&]{trace << "no " << x_lang << ", C, obj/lib prerequisite or "
+ << "hint for target " << t;});
return false;
}
// We will only chain a C source if there is also an X source or we were
// explicitly told to.
//
- if (r.seen_c && !r.seen_x && hint < x)
+ if (r.seen_c && !r.seen_x && hint.empty ())
{
l4 ([&]{trace << "C prerequisite without " << x_lang << " or hint "
<< "for target " << t;});
@@ -813,6 +841,12 @@ namespace build2
//
if (const libul* ul = pt->is_a<libul> ())
{
+ // @@ Isn't libul{} member already picked or am I missing something?
+ // If not, then we may need the same in recursive-binless logic.
+ //
+#if 0
+ assert (false); // @@ TMP (remove before 0.16.0 release)
+#endif
ux = &link_member (*ul, a, li)->as<libux> ();
}
else if ((ux = pt->is_a<libue> ()) ||
@@ -829,8 +863,20 @@ namespace build2
return nullptr;
};
+ // Given the cc.type value return true if the library is recursively
+ // binless.
+ //
+ static inline bool
+ recursively_binless (const string& type)
+ {
+ size_t p (type.find ("recursively-binless"));
+ return (p != string::npos &&
+ type[p - 1] == ',' && // <lang> is first.
+ (type[p += 19] == '\0' || type[p] == ','));
+ }
+
recipe link_rule::
- apply (action a, target& xt) const
+ apply (action a, target& xt, match_extra&) const
{
tracer trace (x, "link_rule::apply");
@@ -840,7 +886,11 @@ namespace build2
// Note that for_install is signalled by install_rule and therefore
// can only be relied upon during execute.
//
- match_data& md (t.data (match_data ()));
+ // Note that we don't really need to set it as target data: while there
+ // are calls to get it, they should only happen after the target has
+ // been matched.
+ //
+ match_data md (*this);
const scope& bs (t.base_scope ());
const scope& rs (*bs.root_scope ());
@@ -849,11 +899,6 @@ namespace build2
otype ot (lt.type);
linfo li (link_info (bs, ot));
- // Set the library type (C, C++, etc) as rule-specific variable.
- //
- if (lt.library ())
- t.state[a].assign (c_type) = string (x);
-
bool binless (lt.library ()); // Binary-less until proven otherwise.
bool user_binless (lt.library () && cast_false<bool> (t[b_binless]));
@@ -875,7 +920,7 @@ namespace build2
// We do libraries first in order to indicate that we will execute these
// targets before matching any of the obj/bmi{}. This makes it safe for
// compile::apply() to unmatch them and therefore not to hinder
- // parallelism.
+ // parallelism (or mess up for-install'ness).
//
// We also create obj/bmi{} chain targets because we need to add
// (similar to lib{}) all the bmi{} as prerequisites to all the other
@@ -899,33 +944,100 @@ namespace build2
return a.operation () == clean_id && !pt.dir.sub (rs.out_path ());
};
+ bool update_match (false); // Have update during match.
+
auto& pts (t.prerequisite_targets[a]);
size_t start (pts.size ());
for (prerequisite_member p: group_prerequisite_members (a, t))
{
- include_type pi (include (a, t, p));
+ // Note that we have to recognize update=match for *(update), not just
+ // perform(update). But only actually update for perform(update).
+ //
+ lookup l; // The `update` variable value, if any.
+ include_type pi (
+ include (a, t, p, a.operation () == update_id ? &l : nullptr));
// We pre-allocate a NULL slot for each (potential; see clean)
// prerequisite target.
//
pts.push_back (prerequisite_target (nullptr, pi));
- const target*& pt (pts.back ());
+ auto& pto (pts.back ());
+
+ // Use bit 2 of prerequisite_target::include to signal update during
+ // match.
+ //
+ // Not that for now we only allow updating during match ad hoc and
+ // mark 3 (headers, etc; see below) prerequisites.
+ //
+ // By default we update during match headers and ad hoc sources (which
+ // are commonly marked as such because they are #include'ed).
+ //
+ optional<bool> um;
+
+ if (l)
+ {
+ const string& v (cast<string> (l));
+
+ if (v == "match")
+ um = true;
+ else if (v == "execute")
+ um = false;
+ else if (v != "false" && v != "true")
+ {
+ fail << "unrecognized update variable value '" << v
+ << "' specified for prerequisite " << p.prerequisite;
+ }
+ }
+
+ // Skip excluded and ad hoc (unless updated during match) on this
+ // pass.
+ //
+ if (pi != include_type::normal)
+ {
+ if (a == perform_update_id && pi == include_type::adhoc)
+ {
+ // By default update ad hoc headers/sources during match (see
+ // above).
+ //
+#if 1
+ if (!um)
+ um = (p.is_a (x_src) || p.is_a<c> () ||
+ (x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && (p.is_a (*x_obj) || p.is_a<m> ())) ||
+ x_header (p, true));
+#endif
+
+ if (*um)
+ {
+ pto.target = &p.search (t); // mark 0
+ pto.include |= prerequisite_target::include_udm;
+ update_match = true;
+ }
+ }
- if (pi != include_type::normal) // Skip excluded and ad hoc.
continue;
+ }
+
+ const target*& pt (pto);
- // Mark:
- // 0 - lib
+ // Mark (2 bits):
+ //
+ // 0 - lib or update during match
// 1 - src
// 2 - mod
- // 3 - obj/bmi and also lib not to be cleaned
+ // 3 - obj/bmi and also lib not to be cleaned (and other stuff)
//
- uint8_t m (0);
+ uint8_t mk (0);
bool mod (x_mod != nullptr && p.is_a (*x_mod));
+ bool hdr (false);
- if (mod || p.is_a (x_src) || p.is_a<c> ())
+ if (mod ||
+ p.is_a (x_src) || p.is_a<c> () ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && (p.is_a (*x_obj) || p.is_a<m> ())))
{
binless = binless && (mod ? user_binless : false);
@@ -976,8 +1088,8 @@ namespace build2
// be the group -- we will pick a member in part 2 below.
//
pair<target&, ulock> r (
- search_locked (
- t, rtt, d, dir_path (), *cp.tk.name, nullptr, cp.scope));
+ search_new_locked (
+ ctx, rtt, d, dir_path (), *cp.tk.name, nullptr, cp.scope));
// If we shouldn't clean obj{}, then it is fair to assume we
// shouldn't clean the source either (generated source will be in
@@ -1013,7 +1125,7 @@ namespace build2
}
pt = &r.first;
- m = mod ? 2 : 1;
+ mk = mod ? 2 : 1;
}
else if (p.is_a<libx> () ||
p.is_a<liba> () ||
@@ -1022,12 +1134,8 @@ namespace build2
{
// Handle imported libraries.
//
- // Note that since the search is rule-specific, we don't cache the
- // target in the prerequisite.
- //
if (p.proj ())
- pt = search_library (
- a, sys_lib_dirs, usr_lib_dirs, p.prerequisite);
+ pt = search_library (a, sys_lib_dirs, usr_lib_dirs, p.prerequisite);
// The rest is the same basic logic as in search_and_match().
//
@@ -1035,13 +1143,17 @@ namespace build2
pt = &p.search (t);
if (skip (*pt))
- m = 3; // Mark so it is not matched.
+ mk = 3; // Mark so it is not matched.
// If this is the lib{}/libul{} group, then pick the appropriate
- // member.
+ // member. Also note this in prerequisite_target::include (used
+ // by process_libraries()).
//
if (const libx* l = pt->is_a<libx> ())
+ {
pt = link_member (*l, a, li);
+ pto.include |= include_group;
+ }
}
else
{
@@ -1054,8 +1166,11 @@ namespace build2
// Windows module definition (.def). For other platforms (and for
// static libraries) treat it as an ordinary prerequisite.
//
- else if (p.is_a<def> () && tclass == "windows" && ot != otype::a)
+ else if (p.is_a<def> ())
{
+ if (tclass != "windows" || ot == otype::a)
+ continue;
+
pt = &p.search (t);
}
//
@@ -1065,11 +1180,14 @@ namespace build2
//
else
{
- if (!p.is_a<objx> () && !p.is_a<bmix> () && !x_header (p, true))
+ if (!p.is_a<objx> () &&
+ !p.is_a<bmix> () &&
+ !(hdr = x_header (p, true)))
{
// @@ Temporary hack until we get the default outer operation
// for update. This allows operations like test and install to
- // skip such tacked on stuff.
+ // skip such tacked on stuff. @@ This doesn't feel temporary
+ // anymore...
//
// Note that ad hoc inputs have to be explicitly marked with the
// include=adhoc prerequisite-specific variable.
@@ -1097,21 +1215,58 @@ namespace build2
!pt->is_a<hbmix> () &&
cast_false<bool> ((*pt)[b_binless])));
- m = 3;
+ mk = 3;
}
if (user_binless && !binless)
fail << t << " cannot be binless due to " << p << " prerequisite";
- mark (pt, m);
+ // Upgrade update during match prerequisites to mark 0 (see above for
+ // details).
+ //
+ if (a == perform_update_id)
+ {
+ // By default update headers during match (see above).
+ //
+#if 1
+ if (!um)
+ um = hdr;
+#endif
+
+ if (*um)
+ {
+ if (mk != 3)
+ fail << "unable to update during match prerequisite " << p <<
+ info << "updating this type of prerequisites during match is "
+ << "not supported by this rule";
+
+ mk = 0;
+ pto.include |= prerequisite_target::include_udm;
+ update_match = true;
+ }
+ }
+
+ mark (pt, mk);
}
- // Match lib{} (the only unmarked) in parallel and wait for completion.
+ // Match lib{} first and then update during match (the only unmarked) in
+ // parallel and wait for completion. We need to match libraries first
+ // because matching generated headers/sources may lead to matching some
+ // of the libraries (for example, if generation requires some of the
+ // metadata; think poptions needed by Qt moc).
//
- match_members (a, t, pts, start);
+ {
+ auto mask (prerequisite_target::include_udm);
+
+ match_members (a, t, pts, start, {mask, 0});
+
+ if (update_match)
+ match_members (a, t, pts, start, {mask, mask});
+ }
// Check if we have any binful utility libraries.
//
+ bool rec_binless (false); // Recursively-binless.
if (binless)
{
if (const libux* l = find_binful (a, t, li))
@@ -1122,8 +1277,128 @@ namespace build2
fail << t << " cannot be binless due to binful " << *l
<< " prerequisite";
}
+
+ // See if we are recursively-binless.
+ //
+ if (binless)
+ {
+ rec_binless = true;
+
+ for (const target* pt: t.prerequisite_targets[a])
+ {
+ if (pt == nullptr || unmark (pt) != 0) // See above.
+ continue;
+
+ const file* ft;
+ if ((ft = pt->is_a<libs> ()) ||
+ (ft = pt->is_a<liba> ()) ||
+ (ft = pt->is_a<libux> ()))
+ {
+ if (ft->path ().empty ()) // Binless.
+ {
+ // The same lookup as in process_libraries().
+ //
+ if (const string* t = cast_null<string> (
+ ft->state[a].lookup_original (
+ c_type, true /* target_only */).first))
+ {
+ if (recursively_binless (*t))
+ continue;
+ }
+ }
+
+ rec_binless = false;
+ break;
+ }
+ }
+
+ // Another thing we must check is for the presence of any simple
+ // libraries (-lm, shell32.lib, etc) in *.export.libs. See
+ // process_libraries() for details.
+ //
+ if (rec_binless)
+ {
+ auto find = [&t, &bs] (const variable& v) -> lookup
+ {
+ return t.lookup_original (v, false, &bs).first;
+ };
+
+ auto has_simple = [] (lookup l)
+ {
+ if (const auto* ns = cast_null<vector<name>> (l))
+ {
+ for (auto i (ns->begin ()), e (ns->end ()); i != e; ++i)
+ {
+ if (i->pair)
+ ++i;
+ else if (i->simple ()) // -l<name>, etc.
+ return true;
+ }
+ }
+
+ return false;
+ };
+
+ if (lt.shared_library ()) // process_libraries()::impl == false
+ {
+ if (has_simple (find (x_export_libs)) ||
+ has_simple (find (c_export_libs)))
+ rec_binless = false;
+ }
+ else // process_libraries()::impl == true
+ {
+ lookup x (find (x_export_impl_libs));
+ lookup c (find (c_export_impl_libs));
+
+ if (x.defined () || c.defined ())
+ {
+ if (has_simple (x) || has_simple (c))
+ rec_binless = false;
+ }
+ else
+ {
+ // These are strings and we assume if either is defined and
+ // not empty, then we have simple libraries.
+ //
+ if (((x = find (x_libs)) && !x->empty ()) ||
+ ((c = find (c_libs)) && !c->empty ()))
+ rec_binless = false;
+ }
+ }
+ }
+ }
+ }
+
+ // Set the library type (C, C++, binless) as rule-specific variable.
+ //
+ if (lt.library ())
+ {
+ string v (x);
+
+ if (rec_binless)
+ v += ",recursively-binless";
+ else if (binless)
+ v += ",binless";
+
+ t.state[a].assign (c_type) = move (v);
}
+ // If we have any update during match prerequisites, now is the time to
+ // update them. Note that we have to do it before any further matches
+ // since they may rely on these prerequisites already being updated (for
+ // example, object file matches may need the headers to be already
+ // updated). We also must do it after matching all our prerequisite
+ // libraries since they may generate headers that we depend upon.
+ //
+ // Note that we ignore the result and whether it renders us out of date,
+ // leaving it to the common execute logic in perform_update().
+ //
+ // Note also that update_during_match_prerequisites() spoils
+ // prerequisite_target::data.
+ //
+ if (update_match)
+ update_during_match_prerequisites (trace, a, t);
+
// Now that we know for sure whether we are binless, derive file name(s)
// and add ad hoc group members. Note that for binless we still need the
// .pc member (whose name depends on the libray prefix) so we take care
@@ -1267,11 +1542,26 @@ namespace build2
if (wasm.path ().empty ())
wasm.derive_path ();
+ // We don't want to print this member at level 1 diagnostics.
+ //
+ wasm.state[a].assign (ctx.var_backlink) = names {
+ name ("group"), name ("false")};
+
// If we have -pthread then we get additional .worker.js file
// which is used for thread startup. In a somewhat hackish way we
// represent it as an exe{} member to make sure it gets installed
// next to the main .js file.
//
+ // @@ Note that our recommendation is to pass -pthread in *.libs
+ // but checking that is not straightforward (it could come from
+ // one of the libraries that we are linking). We could have called
+ // append_libraries() (similar to $x.lib_libs()) and then looked
+ // there. But this is quite heavy handed and it's not clear this
+ // is worth the trouble since the -pthread support in Emscripten
+ // is quite high-touch (i.e., it's not like we can write a library
+ // that starts some threads and then run its test as on any other
+ // POSIX platform).
+ //
if (find_option ("-pthread", cmode) ||
find_option ("-pthread", t, c_loptions) ||
find_option ("-pthread", t, x_loptions))
@@ -1280,6 +1570,11 @@ namespace build2
if (worker.path ().empty ())
worker.derive_path ();
+
+ // We don't want to print this member at level 1 diagnostics.
+ //
+ worker.state[a].assign (ctx.var_backlink) = names {
+ name ("group"), name ("false")};
}
}
@@ -1288,22 +1583,31 @@ namespace build2
//
if (!binless && ot != otype::a && tsys == "win32-msvc")
{
- if (find_option ("/DEBUG", t, c_loptions, true) ||
- find_option ("/DEBUG", t, x_loptions, true))
+ const string* o;
+ if ((o = find_option_prefix ("/DEBUG", t, c_loptions, true)) != nullptr ||
+ (o = find_option_prefix ("/DEBUG", t, x_loptions, true)) != nullptr)
{
- const target_type& tt (*bs.find_target_type ("pdb"));
+ if (icasecmp (*o, "/DEBUG:NONE") != 0)
+ {
+ const target_type& tt (*bs.find_target_type ("pdb"));
- // We call the target foo.{exe,dll}.pdb rather than just foo.pdb
- // because we can have both foo.exe and foo.dll in the same
- // directory.
- //
- file& pdb (add_adhoc_member<file> (t, tt, e));
+ // We call the target foo.{exe,dll}.pdb rather than just
+ // foo.pdb because we can have both foo.exe and foo.dll in the
+ // same directory.
+ //
+ file& pdb (add_adhoc_member<file> (t, tt, e));
- // Note that the path is derived from the exe/dll path (so it
- // will include the version in case of a dll).
- //
- if (pdb.path ().empty ())
- pdb.derive_path (t.path ());
+ // Note that the path is derived from the exe/dll path (so it
+ // will include the version in case of a dll).
+ //
+ if (pdb.path ().empty ())
+ pdb.derive_path (t.path ());
+
+ // We don't want to print this member at level 1 diagnostics.
+ //
+ pdb.state[a].assign (ctx.var_backlink) = names {
+ name ("group"), name ("false")};
+ }
}
}
@@ -1325,6 +1629,13 @@ namespace build2
// we will use its bin.lib to decide what will be installed and in
// perform_update() we will confirm that it is actually installed.
//
+ // This, of course, works only if we actually have explicit lib{}.
+ // But the user could only have liba{} (common in testing frameworks
+ // that provide main()) or only libs{} (e.g., plugin that can also
+ // be linked). It's also theoretically possible to have both liba{}
+ // and libs{} but no lib{}, in which case it feels correct not to
+ // generate the common file at all.
+ //
if (ot != otype::e)
{
// Note that here we always use the lib name prefix, even on
@@ -1336,7 +1647,13 @@ namespace build2
// Note also that the order in which we are adding these members
// is important (see add_addhoc_member() for details).
//
- if (ot == otype::a || !link_members (rs).a)
+ if (operator>= (t.group->decl, target_decl::implied) // @@ VC14
+ ? ot == (link_members (rs).a ? otype::a : otype::s)
+ : search_existing (ctx,
+ ot == otype::a
+ ? libs::static_type
+ : liba::static_type,
+ t.dir, t.out, t.name) == nullptr)
{
auto& pc (add_adhoc_member<pc> (t));
@@ -1369,14 +1686,12 @@ namespace build2
// exists (windows_rpath_assembly() does take care to clean it up
// if not used).
//
-#ifdef _WIN32
- target& dir =
-#endif
+ target& dir (
add_adhoc_member (t,
fsdir::static_type,
path_cast<dir_path> (t.path () + ".dlls"),
t.out,
- string () /* name */);
+ string () /* name */));
// By default our backlinking logic will try to symlink the
// directory and it can even be done on Windows using junctions.
@@ -1390,9 +1705,15 @@ namespace build2
// Wine. So we only resort to copy-link'ing if we are running on
// Windows.
//
+ // We also don't want to print this member at level 1 diagnostics.
+ //
+ dir.state[a].assign (ctx.var_backlink) = names {
#ifdef _WIN32
- dir.state[a].assign (ctx.var_backlink) = "copy";
+ name ("copy"), name ("false")
+#else
+ name ("group"), name ("false")
#endif
+ };
}
}
}
@@ -1414,23 +1735,24 @@ namespace build2
continue;
// New mark:
+ // 0 - already matched
// 1 - completion
// 2 - verification
//
- uint8_t m (unmark (pt));
+ uint8_t mk (unmark (pt));
- if (m == 3) // obj/bmi or lib not to be cleaned
+ if (mk == 3) // obj/bmi or lib not to be cleaned
{
- m = 1; // Just completion.
+ mk = 1; // Just completion.
// Note that if this is a library not to be cleaned, we keep it
// marked for completion (see the next phase).
}
- else if (m == 1 || m == 2) // Source/module chain.
+ else if (mk == 1 || mk == 2) // Source/module chain.
{
- bool mod (m == 2);
+ bool mod (mk == 2); // p is_a x_mod
- m = 1;
+ mk = 1;
const target& rt (*pt);
bool group (!p.prerequisite.belongs (t)); // Group's prerequisite.
@@ -1462,7 +1784,21 @@ namespace build2
if (!pt->has_prerequisites () &&
(!group || !rt.has_prerequisites ()))
{
- prerequisites ps {p.as_prerequisite ()}; // Source.
+ prerequisites ps;
+
+ // Add source.
+ //
+ // Remove the update variable (we may have stray update=execute
+ // that was specified together with the header).
+ //
+ {
+ prerequisite pc (p.as_prerequisite ());
+
+ if (!pc.vars.empty ())
+ pc.vars.erase (*ctx.var_update);
+
+ ps.push_back (move (pc));
+ }
// Add our lib*{} (see the export.* machinery for details) and
// bmi*{} (both original and chained; see module search logic)
@@ -1481,7 +1817,7 @@ namespace build2
// might depend on the imported one(s) which we will never "see"
// unless we start with this library.
//
- // Note: have similar logic in make_module_sidebuild().
+ // Note: have similar logic in make_{module,header}_sidebuild().
//
size_t j (start);
for (prerequisite_member p: group_prerequisite_members (a, t))
@@ -1567,7 +1903,11 @@ namespace build2
// Most of the time we will have just a single source so fast-
// path that case.
//
- if (p1.is_a (mod ? *x_mod : x_src) || p1.is_a<c> ())
+ if (mod
+ ? p1.is_a (*x_mod)
+ : (p1.is_a (x_src) || p1.is_a<c> () ||
+ (x_asp != nullptr && p1.is_a (*x_asp)) ||
+ (x_obj != nullptr && (p1.is_a (*x_obj) || p1.is_a<m> ()))))
{
src = true;
continue; // Check the rest of the prerequisites.
@@ -1580,8 +1920,12 @@ namespace build2
p1.is_a<libx> () ||
p1.is_a<liba> () || p1.is_a<libs> () || p1.is_a<libux> () ||
p1.is_a<bmi> () || p1.is_a<bmix> () ||
- (p.is_a (mod ? *x_mod : x_src) && x_header (p1)) ||
- (p.is_a<c> () && p1.is_a<h> ()))
+ ((mod ||
+ p.is_a (x_src) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && p.is_a (*x_obj))) && x_header (p1)) ||
+ ((p.is_a<c> () ||
+ (x_obj != nullptr && p.is_a<m> ())) && p1.is_a<h> ()))
continue;
fail << "synthesized dependency for prerequisite " << p
@@ -1594,14 +1938,14 @@ namespace build2
if (!src)
fail << "synthesized dependency for prerequisite " << p
<< " would be incompatible with existing target " << *pt <<
- info << "no existing c/" << x_name << " source prerequisite" <<
+ info << "no existing c/" << x_lang << " source prerequisite" <<
info << "specify corresponding " << rtt.name << "{} "
<< "dependency explicitly";
- m = 2; // Needs verification.
+ mk = 2; // Needs verification.
}
}
- else // lib*{}
+ else // lib*{} or update during match
{
// If this is a static library, see if we need to link it whole.
// Note that we have to do it after match since we rely on the
@@ -1610,6 +1954,8 @@ namespace build2
bool u;
if ((u = pt->is_a<libux> ()) || pt->is_a<liba> ())
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (ctx.var_pool["bin.whole"]); // @@ Cache.
// See the bin module for the lookup semantics discussion. Note
@@ -1619,7 +1965,7 @@ namespace build2
lookup l (p.prerequisite.vars[var]);
if (!l.defined ())
- l = pt->lookup_original (var, true).first;
+ l = pt->lookup_original (var, true /* target_only */).first;
if (!l.defined ())
{
@@ -1638,7 +1984,7 @@ namespace build2
}
}
- mark (pt, m);
+ mark (pt, mk);
}
// Process prerequisites, pass 3: match everything and verify chains.
@@ -1651,10 +1997,10 @@ namespace build2
i = start;
for (prerequisite_member p: group_prerequisite_members (a, t))
{
- bool adhoc (pts[i].adhoc);
+ bool adhoc (pts[i].adhoc ());
const target*& pt (pts[i++]);
- uint8_t m;
+ uint8_t mk;
if (pt == nullptr)
{
@@ -1664,10 +2010,15 @@ namespace build2
continue;
pt = &p.search (t);
- m = 1; // Mark for completion.
+ mk = 1; // Mark for completion.
}
- else if ((m = unmark (pt)) != 0)
+ else
{
+ mk = unmark (pt);
+
+ if (mk == 0)
+ continue; // Already matched.
+
// If this is a library not to be cleaned, we can finally blank it
// out.
//
@@ -1679,7 +2030,7 @@ namespace build2
}
match_async (a, *pt, ctx.count_busy (), t[a].task_count);
- mark (pt, m);
+ mark (pt, mk);
}
wg.wait ();
@@ -1694,15 +2045,15 @@ namespace build2
// Skipped or not marked for completion.
//
- uint8_t m;
- if (pt == nullptr || (m = unmark (pt)) == 0)
+ uint8_t mk;
+ if (pt == nullptr || (mk = unmark (pt)) == 0)
continue;
- build2::match (a, *pt);
+ match_complete (a, *pt);
// Nothing else to do if not marked for verification.
//
- if (m == 1)
+ if (mk == 1)
continue;
// Finish verifying the existing dependency (which is now matched)
@@ -1714,7 +2065,11 @@ namespace build2
for (prerequisite_member p1: group_prerequisite_members (a, *pt))
{
- if (p1.is_a (mod ? *x_mod : x_src) || p1.is_a<c> ())
+ if (mod
+ ? p1.is_a (*x_mod)
+ : (p1.is_a (x_src) || p1.is_a<c> () ||
+ (x_asp != nullptr && p1.is_a (*x_asp)) ||
+ (x_obj != nullptr && (p1.is_a (*x_obj) || p1.is_a<m> ()))))
{
// Searching our own prerequisite is ok, p1 must already be
// resolved.
@@ -1750,14 +2105,11 @@ namespace build2
switch (a)
{
- case perform_update_id: return [this] (action a, const target& t)
- {
- return perform_update (a, t);
- };
- case perform_clean_id: return [this] (action a, const target& t)
- {
- return perform_clean (a, t);
- };
+ // Keep the recipe (which is match_data) after execution to allow the
+ // install rule to examine it.
+ //
+ case perform_update_id: t.keep_data (a); // Fall through.
+ case perform_clean_id: return md;
default: return noop_recipe; // Configure update.
}
}
@@ -1804,7 +2156,7 @@ namespace build2
const target* const* lc,
const small_vector<reference_wrapper<const string>, 2>& ns,
lflags f,
- const string* type, // cc.type
+ const string* type, // Whole cc.type in the <lang>[,...] form.
bool)
{
// Note: see also make_header_sidebuild().
@@ -1825,6 +2177,13 @@ namespace build2
// that range of elements to the end of args. See GitHub issue #114
// for details.
//
+ // One case where we can prune the graph is if the library is
+ // recursively-binless. It's tempting to wish that we can do the same
+ // just for binless, but alas that's not the case: we have to hoist
+ // its binful interface dependency because, for example, it must
+ // appear after the preceding static library of which this binless
+ // library is a dependency.
+ //
// From the process_libraries() semantics we know that this callback
// is always called and always after the options callbacks.
//
@@ -1836,8 +2195,13 @@ namespace build2
{
// Hoist the elements corresponding to this library to the end.
// Note that we cannot prune the traversal since we need to see the
- // last occurrence of each library.
+ // last occurrence of each library, unless the library is
+ // recursively-binless (in which case there will be no need to
+ // hoist since there can be no libraries among the elements).
//
+ if (type != nullptr && recursively_binless (*type))
+ return false;
+
d.ls.hoist (d.args, *al);
return true;
}
@@ -1883,9 +2247,12 @@ namespace build2
// install or both not. We can only do this if the library is build
// by our link_rule.
//
- else if (d.for_install && type != nullptr && *type != "cc")
+ else if (d.for_install &&
+ type != nullptr &&
+ *type != "cc" &&
+ type->compare (0, 3, "cc,") != 0)
{
- auto& md (l->data<link_rule::match_data> ());
+ auto& md (l->data<link_rule::match_data> (d.a));
assert (md.for_install); // Must have been executed.
// The user will get the target name from the context info.
@@ -2043,6 +2410,8 @@ namespace build2
//
if (const target* g = exp && l.is_a<libs> () ? l.group : &l)
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (
com
? (exp ? c_export_loptions : c_loptions)
@@ -2061,7 +2430,9 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
l, la,
- lf, imp, lib, opt, self,
+ lf, imp, lib, opt,
+ self,
+ false /* proc_opt_group */,
lib_cache);
}
@@ -2075,9 +2446,14 @@ namespace build2
// Use -rpath-link only on targets that support it (Linux, *BSD). Note
// that we don't really need it for top-level libraries.
//
+ // Note that more recent versions of FreeBSD are using LLVM lld without
+ // any mentioning of -rpath-link in the man pages.
+ //
+ auto have_link = [this] () {return tclass == "linux" || tclass == "bsd";};
+
if (link)
{
- if (tclass != "linux" && tclass != "bsd")
+ if (!have_link ())
return;
}
@@ -2107,8 +2483,56 @@ namespace build2
{
rpathed_libraries& ls;
strings& args;
- bool link;
- } d {ls, args, link};
+ bool rpath;
+ bool rpath_link;
+ } d {ls, args, false, false};
+
+ if (link)
+ d.rpath_link = true;
+ else
+ {
+ // While one would naturally expect -rpath to be a superset of
+ // -rpath-link, according to GNU ld:
+ //
+ // "The -rpath option is also used when locating shared objects which
+ // are needed by shared objects explicitly included in the link; see
+ // the description of the -rpath-link option. Searching -rpath in
+ // this way is only supported by native linkers and cross linkers
+ // which have been configured with the --with-sysroot option."
+ //
+ // So we check if this is cross-compilation and request both options
+ // if that's the case (we have no easy way of detecting whether the
+ // linker has been configured with the --with-sysroot option, whatever
+ // that means, so we will just assume the worst case).
+ //
+ d.rpath = true;
+
+ if (have_link ())
+ {
+ // Detecting cross-compilation is not as easy as it seems. Comparing
+ // complete target triplets proved too strict. For example, we may be
+ // running on x86_64-apple-darwin17.7.0 while the compiler is
+ // targeting x86_64-apple-darwin17.3.0. Also, there is the whole i?86
+ // family of CPUs which, at least for linking, should probably be
+ // considered the same.
+ //
+ const target_triplet& h (*bs.ctx.build_host);
+ const target_triplet& t (ctgt);
+
+ auto x86 = [] (const string& c)
+ {
+ return (c.size () == 4 &&
+ c[0] == 'i' &&
+ (c[1] >= '3' && c[1] <= '6') &&
+ c[2] == '8' &&
+ c[3] == '6');
+ };
+
+ if (t.system != h.system ||
+ (t.cpu != h.cpu && !(x86 (t.cpu) && x86 (h.cpu))))
+ d.rpath_link = true;
+ }
+ }
auto lib = [&d, this] (
const target* const* lc,
@@ -2130,13 +2554,22 @@ namespace build2
auto append = [&d] (const string& f)
{
- string o (d.link ? "-Wl,-rpath-link," : "-Wl,-rpath,");
-
size_t p (path::traits_type::rfind_separator (f));
assert (p != string::npos);
- o.append (f, 0, (p != 0 ? p : 1)); // Don't include trailing slash.
- d.args.push_back (move (o));
+ if (d.rpath)
+ {
+ string o ("-Wl,-rpath,");
+ o.append (f, 0, (p != 0 ? p : 1)); // Don't include trailing slash.
+ d.args.push_back (move (o));
+ }
+
+ if (d.rpath_link)
+ {
+ string o ("-Wl,-rpath-link,");
+ o.append (f, 0, (p != 0 ? p : 1));
+ d.args.push_back (move (o));
+ }
};
if (l != nullptr)
@@ -2213,7 +2646,10 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0 /* lflags */,
- imp, lib, nullptr, false /* self */, lib_cache);
+ imp, lib, nullptr,
+ false /* self */,
+ false /* proc_opt_group */,
+ lib_cache);
}
void link_rule::
@@ -2275,7 +2711,7 @@ namespace build2
// Filter link.exe noise (msvc.cxx).
//
void
- msvc_filter_link (ifdstream&, const file&, otype);
+ msvc_filter_link (diag_buffer&, const file&, otype);
// Translate target CPU to the link.exe/lib.exe /MACHINE option.
//
@@ -2283,7 +2719,7 @@ namespace build2
msvc_machine (const string& cpu); // msvc.cxx
target_state link_rule::
- perform_update (action a, const target& xt) const
+ perform_update (action a, const target& xt, match_data& md) const
{
tracer trace (x, "link_rule::perform_update");
@@ -2295,8 +2731,6 @@ namespace build2
const scope& bs (t.base_scope ());
const scope& rs (*bs.root_scope ());
- match_data& md (t.data<match_data> ());
-
// Unless the outer install rule signalled that this is update for
// install, signal back that we've performed plain update.
//
@@ -2325,14 +2759,33 @@ namespace build2
// Note that execute_prerequisites() blanks out all the ad hoc
// prerequisites so we don't need to worry about them from now on.
//
+ // There is an interesting trade-off between the straight and reverse
+ // execution. With straight we may end up with inaccurate progress if
+ // most of our library prerequisites (typically specified last) are
+ // already up to date. In this case, the progress will first increase
+ // slowly as we compile this target's source files and then jump
+ // straight to 100% as we "realize" that all the libraries (and all
+ // their prerequisites) are already up to date.
+ //
+ // Switching to reverse fixes this but messes up incremental building:
+ // now instead of starting to compile source files right away, we will
+ // first spend some time making sure all the libraries are up to date
+ // (which, in case of an error in the source code, will be a complete
+ // waste).
+ //
+ // There doesn't seem to be an easy way to distinguish between
+ // incremental and from-scratch builds and on balance fast incremental
+ // builds feel more important.
+ //
target_state ts;
- if (optional<target_state> s =
- execute_prerequisites (a,
- t,
- mt,
- [] (const target&, size_t) {return false;}))
+ if (optional<target_state> s = execute_prerequisites (
+ a, t,
+ mt,
+ [] (const target&, size_t) {return false;}))
+ {
ts = *s;
+ }
else
{
// An ad hoc prerequisite renders us out-of-date. Let's update from
@@ -2346,7 +2799,7 @@ namespace build2
// those that don't match. Note that we have to do it after updating
// prerequisites to keep the dependency counts straight.
//
- if (const variable* var_fi = ctx.var_pool.find ("for_install"))
+ if (const variable* var_fi = rs.var_pool ().find ("for_install"))
{
// Parallel prerequisites/prerequisite_targets loop.
//
@@ -2379,6 +2832,13 @@ namespace build2
// install or no install, the reason is unless and until we are updating
// for install, we have no idea where-to things will be installed.
//
+ // There is a further complication: we may have no intention of
+ // installing the library but still need to update it for install (see
+ // install_scope() for background). In which case we may still not have
+ // the installation directories. We handle this in pkconfig_save() by
+ // skipping the generation of .pc files (and letting the install rule
+ // complain if we do end up trying to install them).
+ //
if (for_install && lt.library () && !lt.utility)
{
bool la (lt.static_library ());
@@ -2392,8 +2852,12 @@ namespace build2
if (!m->is_a (la ? pca::static_type : pcs::static_type))
{
- if (t.group->matched (a))
+ if (operator>= (t.group->decl, target_decl::implied) // @@ VC14
+ ? t.group->matched (a)
+ : true)
+ {
pkgconfig_save (a, t, la, true /* common */, binless);
+ }
else
// Mark as non-existent not to confuse the install rule.
//
@@ -2505,14 +2969,19 @@ namespace build2
try
{
+ // We assume that what we write to stdin is small enough to
+ // fit into the pipe's buffer without blocking.
+ //
process pr (rc,
args,
- -1 /* stdin */,
- 1 /* stdout */,
- 2 /* stderr */,
- nullptr /* cwd */,
+ -1 /* stdin */,
+ 1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */,
+ nullptr /* cwd */,
env_ptrs.empty () ? nullptr : env_ptrs.data ());
+ diag_buffer dbuf (ctx, args[0], pr);
+
try
{
ofdstream os (move (pr.out_fd));
@@ -2536,7 +3005,8 @@ namespace build2
// was caused by that and let run_finish() deal with it.
}
- run_finish (args, pr);
+ dbuf.read ();
+ run_finish (dbuf, args, pr, 2 /* verbosity */);
}
catch (const process_error& e)
{
@@ -2591,6 +3061,8 @@ namespace build2
{
// For VC we use link.exe directly.
//
+ // Note: go straight for the public variable pool.
+ //
const string& cs (
cast<string> (
rs[tsys == "win32-msvc"
@@ -2684,6 +3156,9 @@ namespace build2
// probably safe to assume that the two came from the same version
// of binutils/LLVM.
//
+ // @@ Note also that GNU ar deprecated -T in favor of --thin in
+ // version 2.38.
+ //
if (lt.utility)
{
const string& id (cast<string> (rs["bin.ar.id"]));
@@ -2797,10 +3272,72 @@ namespace build2
rpath_libraries (sargs, bs, a, t, li, for_install /* link */);
lookup l;
-
if ((l = t["bin.rpath"]) && !l->empty ())
+ {
+ // See if we need to make the specified paths relative using the
+ // $ORIGIN (Linux, BSD) or @loader_path (Mac OS) mechanisms.
+ //
+ optional<dir_path> origin;
+ if (for_install && cast_false<bool> (rs["install.relocatable"]))
+ {
+ // Note that both $ORIGIN and @loader_path will be expanded to
+ // the path of the binary that we are building (executable or
+ // shared library) as opposed to top-level executable.
+ //
+ path p (install::resolve_file (t));
+
+ // If the file is not installable then the install.relocatable
+ // semantics does not apply, naturally.
+ //
+ if (!p.empty ())
+ origin = p.directory ();
+ }
+
+ bool origin_used (false);
for (const dir_path& p: cast<dir_paths> (l))
- sargs.push_back ("-Wl,-rpath," + p.string ());
+ {
+ string o ("-Wl,-rpath,");
+
+ // Note that we only rewrite absolute paths so if the user
+ // specified $ORIGIN or @loader_path manually, we will pass it
+ // through as is.
+ //
+ if (origin && p.absolute ())
+ {
+ dir_path l;
+ try
+ {
+ l = p.relative (*origin);
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make rpath " << p << " relative to "
+ << *origin <<
+ info << "required for relocatable installation";
+ }
+
+ o += (tclass == "macos" ? "@loader_path" : "$ORIGIN");
+
+ if (!l.empty ())
+ {
+ o += path_traits::directory_separator;
+ o += l.string ();
+ }
+
+ origin_used = true;
+ }
+ else
+ o += p.string ();
+
+ sargs.push_back (move (o));
+ }
+
+ // According to the Internet, `-Wl,-z,origin` is not needed except
+ // potentially for older BSDs.
+ //
+ if (origin_used && tclass == "bsd")
+ sargs.push_back ("-Wl,-z,origin");
+ }
if ((l = t["bin.rpath_link"]) && !l->empty ())
{
@@ -2834,25 +3371,24 @@ namespace build2
// Extra system library dirs (last).
//
- assert (sys_lib_dirs_extra <= sys_lib_dirs.size ());
+ assert (sys_lib_dirs_mode + sys_lib_dirs_extra <= sys_lib_dirs.size ());
+
+ // Note that the mode options are added as part of cmode.
+ //
+ auto b (sys_lib_dirs.begin () + sys_lib_dirs_mode);
+ auto x (b + sys_lib_dirs_extra);
if (tsys == "win32-msvc")
{
// If we have no LIB environment variable set, then we add all of
// them. But we want extras to come first.
//
- // Note that the mode options are added as part of cmode.
- //
- auto b (sys_lib_dirs.begin () + sys_lib_dirs_mode);
- auto m (sys_lib_dirs.begin () + sys_lib_dirs_extra);
- auto e (sys_lib_dirs.end ());
-
- for (auto i (m); i != e; ++i)
+ for (auto i (b); i != x; ++i)
sargs1.push_back ("/LIBPATH:" + i->string ());
if (!getenv ("LIB"))
{
- for (auto i (b); i != m; ++i)
+ for (auto i (x), e (sys_lib_dirs.end ()); i != e; ++i)
sargs1.push_back ("/LIBPATH:" + i->string ());
}
@@ -2863,7 +3399,7 @@ namespace build2
append_option_values (
args,
"-L",
- sys_lib_dirs.begin () + sys_lib_dirs_extra, sys_lib_dirs.end (),
+ b, x,
[] (const dir_path& d) {return d.string ().c_str ();});
}
}
@@ -3048,6 +3584,10 @@ namespace build2
//
path relt (relative (tp));
+ path reli; // Import library.
+ if (lt.shared_library () && (tsys == "win32-msvc" || tsys == "mingw32"))
+ reli = relative (find_adhoc_member<libi> (t)->path ());
+
const process_path* ld (nullptr);
if (lt.static_library ())
{
@@ -3179,7 +3719,7 @@ namespace build2
// derived from the import library by changing the extension.
// Lucky for us -- there is no option to name it.
//
- out2 += relative (find_adhoc_member<libi> (t)->path ()).string ();
+ out2 += reli.string ();
}
else
{
@@ -3192,14 +3732,17 @@ namespace build2
// If we have /DEBUG then name the .pdb file. It is an ad hoc group
// member.
//
- if (find_option ("/DEBUG", args, true))
+ if (const char* o = find_option_prefix ("/DEBUG", args, true))
{
- const file& pdb (
- *find_adhoc_member<file> (t, *bs.find_target_type ("pdb")));
+ if (icasecmp (o, "/DEBUG:NONE") != 0)
+ {
+ const file& pdb (
+ *find_adhoc_member<file> (t, *bs.find_target_type ("pdb")));
- out1 = "/PDB:";
- out1 += relative (pdb.path ()).string ();
- args.push_back (out1.c_str ());
+ out1 = "/PDB:";
+ out1 += relative (pdb.path ()).string ();
+ args.push_back (out1.c_str ());
+ }
}
out = "/OUT:" + relt.string ();
@@ -3213,6 +3756,8 @@ namespace build2
{
ld = &cpath;
+ append_diag_color_options (args);
+
// Add the option that triggers building a shared library and
// take care of any extras (e.g., import library).
//
@@ -3228,8 +3773,7 @@ namespace build2
// On Windows libs{} is the DLL and an ad hoc group member
// is the import library.
//
- const file& imp (*find_adhoc_member<libi> (t));
- out = "-Wl,--out-implib=" + relative (imp.path ()).string ();
+ out = "-Wl,--out-implib=" + reli.string ();
args.push_back (out.c_str ());
}
}
@@ -3387,12 +3931,25 @@ namespace build2
}
if (verb == 1)
- text << (lt.static_library () ? "ar " : "ld ") << t;
+ print_diag (lt.static_library () ? "ar" : "ld", t);
else if (verb == 2)
print_process (args);
+ // Do any necessary fixups to the command line to make it runnable.
+ //
+ // Notice the split in the diagnostics: at verbosity level 1 we print
+ // the "logical" command line while at level 2 and above -- what we are
+ // actually executing.
+ //
+ // We also need to save the original for the diag_buffer::close() call
+ // below if at verbosity level 1.
+ //
+ cstrings oargs;
+
// Adjust linker parallelism.
//
+ // Note that we are not going to bother with oargs for this.
+ //
string jobs_arg;
scheduler::alloc_guard jobs_extra;
@@ -3412,7 +3969,7 @@ namespace build2
auto i (find_option_prefix ("-flto", args.rbegin (), args.rend ()));
if (i != args.rend () && strcmp (*i, "-flto=auto") == 0)
{
- jobs_extra = scheduler::alloc_guard (ctx.sched, 0);
+ jobs_extra = scheduler::alloc_guard (*ctx.sched, 0);
jobs_arg = "-flto=" + to_string (1 + jobs_extra.n);
*i = jobs_arg.c_str ();
}
@@ -3431,7 +3988,7 @@ namespace build2
strcmp (*i, "-flto=thin") == 0 &&
!find_option_prefix ("-flto-jobs=", args))
{
- jobs_extra = scheduler::alloc_guard (ctx.sched, 0);
+ jobs_extra = scheduler::alloc_guard (*ctx.sched, 0);
jobs_arg = "-flto-jobs=" + to_string (1 + jobs_extra.n);
args.insert (i.base (), jobs_arg.c_str ()); // After -flto=thin.
}
@@ -3443,12 +4000,6 @@ namespace build2
}
}
- // Do any necessary fixups to the command line to make it runnable.
- //
- // Notice the split in the diagnostics: at verbosity level 1 we print
- // the "logical" command line while at level 2 and above -- what we are
- // actually executing.
- //
// On Windows we need to deal with the command line length limit. The
// best workaround seems to be passing (part of) the command line in an
// "options file" ("response file" in Microsoft's terminology). Both
@@ -3534,19 +4085,20 @@ namespace build2
fail << "unable to write to " << f << ": " << e;
}
+ if (verb == 1)
+ oargs = args;
+
// Replace input arguments with @file.
//
targ = '@' + f.string ();
args.resize (args_input);
args.push_back (targ.c_str());
args.push_back (nullptr);
-
- //@@ TODO: leave .t file if linker failed and verb > 2?
}
}
#endif
- if (verb > 2)
+ if (verb >= 3)
print_process (args);
// Remove the target file if any of the subsequent (after the linker)
@@ -3564,51 +4116,52 @@ namespace build2
{
// VC tools (both lib.exe and link.exe) send diagnostics to stdout.
// Also, link.exe likes to print various gratuitous messages. So for
- // link.exe we redirect stdout to a pipe, filter that noise out, and
- // send the rest to stderr.
+ // link.exe we filter that noise out.
//
// For lib.exe (and any other insane linker that may try to pull off
// something like this) we are going to redirect stdout to stderr.
// For sane compilers this should be harmless.
//
// Note that we don't need this for LLD's link.exe replacement which
- // is quiet.
+ // is thankfully quiet.
//
bool filter (tsys == "win32-msvc" &&
!lt.static_library () &&
cast<string> (rs["bin.ld.id"]) != "msvc-lld");
process pr (*ld,
- args.data (),
- 0 /* stdin */,
- (filter ? -1 : 2) /* stdout */,
- 2 /* stderr */,
- nullptr /* cwd */,
+ args,
+ 0 /* stdin */,
+ 2 /* stdout */,
+ diag_buffer::pipe (ctx, filter /* force */) /* stderr */,
+ nullptr /* cwd */,
env_ptrs.empty () ? nullptr : env_ptrs.data ());
+ diag_buffer dbuf (ctx, args[0], pr);
+
if (filter)
+ msvc_filter_link (dbuf, t, ot);
+
+ dbuf.read ();
+
{
- try
- {
- ifdstream is (
- move (pr.in_ofd), fdstream_mode::text, ifdstream::badbit);
+ bool e (pr.wait ());
- msvc_filter_link (is, t, ot);
+#ifdef _WIN32
+ // Keep the options file if we have shown it.
+ //
+ if (!e && verb >= 3)
+ trm.cancel ();
+#endif
- // If anything remains in the stream, send it all to stderr.
- // Note that the eof check is important: if the stream is at
- // eof, this and all subsequent writes to the diagnostics stream
- // will fail (and you won't see a thing).
- //
- if (is.peek () != ifdstream::traits_type::eof ())
- diag_stream_lock () << is.rdbuf ();
+ dbuf.close (oargs.empty () ? args : oargs,
+ *pr.exit,
+ 1 /* verbosity */);
- is.close ();
- }
- catch (const io_error&) {} // Assume exits with error.
+ if (!e)
+ throw failed ();
}
- run_finish (args, pr);
jobs_extra.deallocate ();
}
catch (const process_error& e)
@@ -3631,12 +4184,24 @@ namespace build2
throw failed ();
}
- // Clean up executable's import library (see above for details).
+ // Clean up executable's import library (see above for details). And
+ // make sure we have an import library for a shared library.
//
- if (lt.executable () && tsys == "win32-msvc")
+ if (tsys == "win32-msvc")
{
- try_rmfile (relt + ".lib", true /* ignore_errors */);
- try_rmfile (relt + ".exp", true /* ignore_errors */);
+ if (lt.executable ())
+ {
+ try_rmfile (relt + ".lib", true /* ignore_errors */);
+ try_rmfile (relt + ".exp", true /* ignore_errors */);
+ }
+ else if (lt.shared_library ())
+ {
+ if (!file_exists (reli,
+ false /* follow_symlinks */,
+ true /* ignore_error */))
+ fail << "linker did not produce import library " << reli <<
+ info << "perhaps this library does not export any symbols?";
+ }
}
// Set executable bit on the .js file so that it can be run with a
@@ -3668,10 +4233,13 @@ namespace build2
print_process (args);
if (!ctx.dry_run)
- run (rl,
+ {
+ run (ctx,
+ rl,
args,
- dir_path () /* cwd */,
+ 1 /* finish_verbosity */,
env_ptrs.empty () ? nullptr : env_ptrs.data ());
+ }
}
// For Windows generate (or clean up) rpath-emulating assembly.
@@ -3776,12 +4344,11 @@ namespace build2
}
target_state link_rule::
- perform_clean (action a, const target& xt) const
+ perform_clean (action a, const target& xt, match_data& md) const
{
const file& t (xt.as<file> ());
ltype lt (link_type (t));
- const match_data& md (t.data<match_data> ());
clean_extras extras;
clean_adhoc_extras adhoc_extras;
@@ -3854,5 +4421,25 @@ namespace build2
return perform_clean_extra (a, t, extras, adhoc_extras);
}
+
+ const target* link_rule::
+ import (const prerequisite_key& pk,
+ const optional<string>&,
+ const location&) const
+ {
+ tracer trace (x, "link_rule::import");
+
+ // @@ TODO: do we want to make metadata loading optional?
+ //
+ optional<dir_paths> usr_lib_dirs;
+ const target* r (search_library (nullopt /* action */,
+ sys_lib_dirs, usr_lib_dirs,
+ pk));
+
+ if (r == nullptr)
+ l4 ([&]{trace << "unable to find installed library " << pk;});
+
+ return r;
+ }
}
}
diff --git a/libbuild2/cc/link-rule.hxx b/libbuild2/cc/link-rule.hxx
index c6d06d2..9b491c2 100644
--- a/libbuild2/cc/link-rule.hxx
+++ b/libbuild2/cc/link-rule.hxx
@@ -18,11 +18,13 @@ namespace build2
{
namespace cc
{
- class LIBBUILD2_CC_SYMEXPORT link_rule: public simple_rule, virtual common
+ class LIBBUILD2_CC_SYMEXPORT link_rule: public rule, virtual common
{
public:
link_rule (data&&);
+ struct match_data;
+
struct match_result
{
bool seen_x = false;
@@ -46,18 +48,21 @@ namespace build2
match (action, const target&, const target*, otype, bool) const;
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&, const string&, match_extra&) const override;
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
target_state
- perform_update (action, const target&) const;
+ perform_update (action, const target&, match_data&) const;
target_state
- perform_clean (action, const target&) const;
+ perform_clean (action, const target&, match_data&) const;
- using simple_rule::match; // To make Clang happy.
+ virtual const target*
+ import (const prerequisite_key&,
+ const optional<string>&,
+ const location&) const override;
public:
// Library handling.
@@ -228,9 +233,9 @@ namespace build2
static void
functions (function_family&, const char*); // functions.cxx
- private:
- friend class install_rule;
- friend class libux_install_rule;
+ // Implementation details.
+ //
+ public:
// Shared library paths.
//
@@ -273,6 +278,9 @@ namespace build2
struct match_data
{
+ explicit
+ match_data (const link_rule& r): rule (r) {}
+
// The "for install" condition is signalled to us by install_rule when
// it is matched for the update operation. It also verifies that if we
// have already been executed, then it was for install.
@@ -307,10 +315,21 @@ namespace build2
size_t start; // Parallel prerequisites/prerequisite_targets start.
link_rule::libs_paths libs_paths;
+
+ const link_rule& rule;
+
+ target_state
+ operator() (action a, const target& t)
+ {
+ return a == perform_update_id
+ ? rule.perform_update (a, t, *this)
+ : rule.perform_clean (a, t, *this);
+ }
};
// Windows rpath emulation (windows-rpath.cxx).
//
+ private:
struct windows_dll
{
reference_wrapper<const string> dll;
diff --git a/libbuild2/cc/module.cxx b/libbuild2/cc/module.cxx
index 871cfb6..f33ddf4 100644
--- a/libbuild2/cc/module.cxx
+++ b/libbuild2/cc/module.cxx
@@ -30,6 +30,8 @@ namespace build2
{
tracer trace (x, "guess_init");
+ context& ctx (rs.ctx);
+
bool cc_loaded (cast_false<bool> (rs["cc.core.guess.loaded"]));
// Adjust module priority (compiler). Also order cc module before us
@@ -41,7 +43,10 @@ namespace build2
config::save_module (rs, x, 250);
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// Must already exist.
//
@@ -154,6 +159,7 @@ namespace build2
// we are now folding *.std options into mode options.
//
x_info = &build2::cc::guess (
+ ctx,
x, x_lang,
rs.root_extra->environment_checksum,
move (xc),
@@ -180,7 +186,8 @@ namespace build2
if (config_sub)
{
- ct = run<string> (3,
+ ct = run<string> (ctx,
+ 3,
*config_sub,
xi.target.c_str (),
[] (string& l, bool) {return move (l);});
@@ -265,9 +272,9 @@ namespace build2
//
if (!cc_loaded)
{
- // Prepare configuration hints.
+ // Prepare configuration hints (pretend it belongs to root scope).
//
- variable_map h (rs.ctx);
+ variable_map h (rs);
// Note that all these variables have already been registered.
//
@@ -376,7 +383,9 @@ namespace build2
//
if (!cast_false<bool> (rs["cc.core.config.loaded"]))
{
- variable_map h (rs.ctx);
+ // Prepare configuration hints (pretend it belongs to root scope).
+ //
+ variable_map h (rs);
if (!xi.bin_pattern.empty ())
h.assign ("config.bin.pattern") = xi.bin_pattern;
@@ -640,8 +649,8 @@ namespace build2
sys_hdr_dirs_mode = hdr_dirs.second;
sys_mod_dirs_mode = mod_dirs ? mod_dirs->second : 0;
- sys_lib_dirs_extra = lib_dirs.first.size ();
- sys_hdr_dirs_extra = hdr_dirs.first.size ();
+ sys_lib_dirs_extra = 0;
+ sys_hdr_dirs_extra = 0;
#ifndef _WIN32
// Add /usr/local/{include,lib}. We definitely shouldn't do this if we
@@ -657,11 +666,11 @@ namespace build2
// on the next invocation.
//
{
- auto& is (hdr_dirs.first);
+ auto& hs (hdr_dirs.first);
auto& ls (lib_dirs.first);
- bool ui (find (is.begin (), is.end (), usr_inc) != is.end ());
- bool uli (find (is.begin (), is.end (), usr_loc_inc) != is.end ());
+ bool ui (find (hs.begin (), hs.end (), usr_inc) != hs.end ());
+ bool uli (find (hs.begin (), hs.end (), usr_loc_inc) != hs.end ());
#ifdef __APPLE__
// On Mac OS starting from 10.14 there is no longer /usr/include.
@@ -686,7 +695,7 @@ namespace build2
//
if (!ui && !uli)
{
- for (const dir_path& d: is)
+ for (const dir_path& d: hs)
{
if (path_match (d, a_usr_inc))
{
@@ -700,18 +709,29 @@ namespace build2
{
bool ull (find (ls.begin (), ls.end (), usr_loc_lib) != ls.end ());
- // Many platforms don't search in /usr/local/lib by default (but do
- // for headers in /usr/local/include). So add it as the last option.
+ // Many platforms don't search in /usr/local/lib by default but do
+ // for headers in /usr/local/include.
+ //
+ // Note that customarily /usr/local/include is searched before
+ // /usr/include so we add /usr/local/lib before built-in entries
+ // (there isn't really a way to add it after since all we can do is
+ // specify it with -L).
//
if (!ull && exists (usr_loc_lib, true /* ignore_error */))
- ls.push_back (usr_loc_lib);
+ {
+ ls.insert (ls.begin () + sys_lib_dirs_mode, usr_loc_lib);
+ ++sys_lib_dirs_extra;
+ }
// FreeBSD is at least consistent: it searches in neither. Quoting
// its wiki: "FreeBSD can't even find libraries that it installed."
// So let's help it a bit.
//
if (!uli && exists (usr_loc_inc, true /* ignore_error */))
- is.push_back (usr_loc_inc);
+ {
+ hs.insert (hs.begin () + sys_hdr_dirs_mode, usr_loc_inc);
+ ++sys_hdr_dirs_extra;
+ }
}
}
#endif
@@ -815,8 +835,11 @@ namespace build2
dr << "\n hdr dirs";
for (size_t i (0); i != incs.size (); ++i)
{
- if (i == sys_hdr_dirs_extra)
+ if ((sys_hdr_dirs_mode != 0 && i == sys_hdr_dirs_mode) ||
+ (sys_hdr_dirs_extra != 0 &&
+ i == sys_hdr_dirs_extra + sys_hdr_dirs_mode))
dr << "\n --";
+
dr << "\n " << incs[i];
}
}
@@ -826,8 +849,11 @@ namespace build2
dr << "\n lib dirs";
for (size_t i (0); i != libs.size (); ++i)
{
- if (i == sys_lib_dirs_extra)
+ if ((sys_lib_dirs_mode != 0 && i == sys_lib_dirs_mode) ||
+ (sys_lib_dirs_extra != 0 &&
+ i == sys_lib_dirs_extra + sys_lib_dirs_mode))
dr << "\n --";
+
dr << "\n " << libs[i];
}
}
@@ -953,6 +979,9 @@ namespace build2
{
using namespace install;
+ // Note: not registering x_obj or x_asp (they are registered
+ // seperately by the respective optional submodules).
+ //
rs.insert_target_type (x_src);
auto insert_hdr = [&rs, install_loaded] (const target_type& tt)
@@ -1083,30 +1112,30 @@ namespace build2
{
const install_rule& ir (*this);
- r.insert<exe> (perform_install_id, x_install, ir);
- r.insert<exe> (perform_uninstall_id, x_uninstall, ir);
+ r.insert<exe> (perform_install_id, x_install, ir);
+ r.insert<exe> (perform_uninstall_id, x_install, ir);
- r.insert<liba> (perform_install_id, x_install, ir);
- r.insert<liba> (perform_uninstall_id, x_uninstall, ir);
+ r.insert<liba> (perform_install_id, x_install, ir);
+ r.insert<liba> (perform_uninstall_id, x_install, ir);
if (s)
{
- r.insert<libs> (perform_install_id, x_install, ir);
- r.insert<libs> (perform_uninstall_id, x_uninstall, ir);
+ r.insert<libs> (perform_install_id, x_install, ir);
+ r.insert<libs> (perform_uninstall_id, x_install, ir);
}
const libux_install_rule& lr (*this);
- r.insert<libue> (perform_install_id, x_install, lr);
- r.insert<libue> (perform_uninstall_id, x_uninstall, lr);
+ r.insert<libue> (perform_install_id, x_install, lr);
+ r.insert<libue> (perform_uninstall_id, x_install, lr);
- r.insert<libua> (perform_install_id, x_install, lr);
- r.insert<libua> (perform_uninstall_id, x_uninstall, lr);
+ r.insert<libua> (perform_install_id, x_install, lr);
+ r.insert<libua> (perform_uninstall_id, x_install, lr);
if (s)
{
- r.insert<libus> (perform_install_id, x_install, lr);
- r.insert<libus> (perform_uninstall_id, x_uninstall, lr);
+ r.insert<libus> (perform_install_id, x_install, lr);
+ r.insert<libus> (perform_uninstall_id, x_install, lr);
}
}
}
diff --git a/libbuild2/cc/module.hxx b/libbuild2/cc/module.hxx
index a91d723..2a8611b 100644
--- a/libbuild2/cc/module.hxx
+++ b/libbuild2/cc/module.hxx
@@ -4,6 +4,8 @@
#ifndef LIBBUILD2_CC_MODULE_HXX
#define LIBBUILD2_CC_MODULE_HXX
+#include <unordered_map>
+
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
@@ -78,6 +80,37 @@ namespace build2
bool new_config = false; // See guess() and init() for details.
+ // Header cache (see compile_rule::enter_header()).
+ //
+ // We place it into the config module so that we have an option of
+ // sharing it for the entire weak amalgamation.
+ //
+ public:
+ // Keep the hash in the key. This way we can compute it outside of the
+ // lock.
+ //
+ struct header_key
+ {
+ path file;
+ size_t hash;
+
+ friend bool
+ operator== (const header_key& x, const header_key& y)
+ {
+ return x.file == y.file; // Note: hash was already compared.
+ }
+ };
+
+ struct header_key_hasher
+ {
+ size_t operator() (const header_key& k) const {return k.hash;}
+ };
+
+ mutable shared_mutex header_map_mutex;
+ mutable std::unordered_map<header_key,
+ const file*,
+ header_key_hasher> header_map;
+
private:
// Defined in gcc.cxx.
//
@@ -105,10 +138,10 @@ namespace build2
{
public:
explicit
- module (data&& d)
+ module (data&& d, const scope& rs)
: common (move (d)),
link_rule (move (d)),
- compile_rule (move (d)),
+ compile_rule (move (d), rs),
install_rule (move (d), *this),
libux_install_rule (move (d), *this) {}
diff --git a/libbuild2/cc/msvc.cxx b/libbuild2/cc/msvc.cxx
index f95cab0..3a7fd6f 100644
--- a/libbuild2/cc/msvc.cxx
+++ b/libbuild2/cc/msvc.cxx
@@ -164,18 +164,21 @@ namespace build2
// Filter cl.exe and link.exe noise.
//
+ // Note: must be followed with the dbuf.read() call.
+ //
void
- msvc_filter_cl (ifdstream& is, const path& src)
+ msvc_filter_cl (diag_buffer& dbuf, const path& src)
+ try
{
// While it appears VC always prints the source name (event if the
// file does not exist), let's do a sanity check. Also handle the
// command line errors/warnings which come before the file name.
//
- for (string l; !eof (getline (is, l)); )
+ for (string l; !eof (getline (dbuf.is, l)); )
{
if (l != src.leaf ().string ())
{
- diag_stream_lock () << l << endl;
+ dbuf.write (l, true /* newline */);
if (msvc_sense_diag (l, 'D').first != string::npos)
continue;
@@ -184,14 +187,19 @@ namespace build2
break;
}
}
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << dbuf.args0 << " stderr: " << e;
+ }
void
- msvc_filter_link (ifdstream& is, const file& t, otype lt)
+ msvc_filter_link (diag_buffer& dbuf, const file& t, otype lt)
+ try
{
// Filter lines until we encounter something we don't recognize. We also
// have to assume the messages can be translated.
//
- for (string l; getline (is, l); )
+ for (string l; getline (dbuf.is, l); )
{
// " Creating library foo\foo.dll.lib and object foo\foo.dll.exp"
//
@@ -216,12 +224,15 @@ namespace build2
// /INCREMENTAL causes linker to sometimes issue messages but now I
// can't quite reproduce it.
- //
- diag_stream_lock () << l << endl;
+ dbuf.write (l, true /* newline */);
break;
}
}
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << dbuf.args0 << " stderr: " << e;
+ }
void
msvc_extract_header_search_dirs (const strings& v, dir_paths& r)
@@ -253,6 +264,13 @@ namespace build2
}
else
continue;
+
+ // Ignore relative paths. Or maybe we should warn?
+ //
+ if (d.relative ())
+ continue;
+
+ d.normalize ();
}
catch (const invalid_path& e)
{
@@ -260,10 +278,7 @@ namespace build2
<< o << "'";
}
- // Ignore relative paths. Or maybe we should warn?
- //
- if (!d.relative ())
- r.push_back (move (d));
+ r.push_back (move (d));
}
}
@@ -284,6 +299,13 @@ namespace build2
d = dir_path (o, 9, string::npos);
else
continue;
+
+ // Ignore relative paths. Or maybe we should warn?
+ //
+ if (d.relative ())
+ continue;
+
+ d.normalize ();
}
catch (const invalid_path& e)
{
@@ -291,10 +313,7 @@ namespace build2
<< o << "'";
}
- // Ignore relative paths. Or maybe we should warn?
- //
- if (!d.relative ())
- r.push_back (move (d));
+ r.push_back (move (d));
}
}
@@ -313,7 +332,7 @@ namespace build2
{
try
{
- r.push_back (dir_path (move (d)));
+ r.push_back (dir_path (move (d)).normalize ());
}
catch (const invalid_path&)
{
@@ -379,9 +398,22 @@ namespace build2
// Inspect the file and determine if it is static or import library.
// Return otype::e if it is neither (which we quietly ignore).
//
+ static global_cache<otype> library_type_cache;
+
static otype
library_type (const process_path& ld, const path& l)
{
+ string key;
+ {
+ sha256 cs;
+ cs.append (ld.effect_string ());
+ cs.append (l.string ());
+ key = cs.string ();
+
+ if (const otype* r = library_type_cache.find (key))
+ return *r;
+ }
+
// The are several reasonably reliable methods to tell whether it is a
// static or import library. One is lib.exe /LIST -- if there aren't any
// .obj members, then it is most likely an import library (it can also
@@ -422,9 +454,9 @@ namespace build2
//
process pr (run_start (ld,
args,
- 0 /* stdin */,
- -1 /* stdout */,
- false /* error */));
+ 0 /* stdin */,
+ -1 /* stdout */,
+ 1 /* stderr (to stdout) */));
bool obj (false), dll (false);
string s;
@@ -447,14 +479,11 @@ namespace build2
// libhello\hello.lib.obj
// hello-0.1.0-a.0.19700101000000.dll
//
- // Archive member name at 746: [...]hello.dll[/][ ]*
- // Archive member name at 8C70: [...]hello.lib.obj[/][ ]*
- //
size_t n (s.size ());
for (; n != 0 && s[n - 1] == ' '; --n) ; // Skip trailing spaces.
- if (n >= 7) // At least ": X.obj" or ": X.dll".
+ if (n >= 5) // At least "X.obj" or "X.dll".
{
n -= 4; // Beginning of extension.
@@ -480,7 +509,7 @@ namespace build2
io = true;
}
- if (!run_finish_code (args, pr, s) || io)
+ if (!run_finish_code (args, pr, s, 2 /* verbosity */) || io)
{
diag_record dr;
dr << warn << "unable to detect " << l << " library type, ignoring" <<
@@ -489,23 +518,25 @@ namespace build2
return otype::e;
}
- if (obj && dll)
+ otype r;
+ if (obj != dll)
+ r = obj ? otype::a : otype::s;
+ else
{
- warn << l << " looks like hybrid static/import library, ignoring";
- return otype::e;
- }
+ if (obj && dll)
+ warn << l << " looks like hybrid static/import library, ignoring";
- if (!obj && !dll)
- {
- warn << l << " looks like empty static or import library, ignoring";
- return otype::e;
+ if (!obj && !dll)
+ warn << l << " looks like empty static or import library, ignoring";
+
+ r = otype::e;
}
- return obj ? otype::a : otype::s;
+ return library_type_cache.insert (move (key), r);
}
template <typename T>
- static T*
+ static pair<T*, bool>
msvc_search_library (const process_path& ld,
const dir_path& d,
const prerequisite_key& p,
@@ -551,20 +582,26 @@ namespace build2
//
timestamp mt (mtime (f));
- if (mt != timestamp_nonexistent && library_type (ld, f) == lt)
+ pair<T*, bool> r (nullptr, true);
+
+ if (mt != timestamp_nonexistent)
{
- // Enter the target.
- //
- T* t;
- common::insert_library (p.scope->ctx, t, name, d, ld, e, exist, trace);
- t->path_mtime (move (f), mt);
- return t;
+ if (library_type (ld, f) == lt)
+ {
+ // Enter the target.
+ //
+ common::insert_library (
+ p.scope->ctx, r.first, name, d, ld, e, exist, trace);
+ r.first->path_mtime (move (f), mt);
+ }
+ else
+ r.second = false; // Don't search for binless.
}
- return nullptr;
+ return r;
}
- liba* common::
+ pair<bin::liba*, bool> common::
msvc_search_static (const process_path& ld,
const dir_path& d,
const prerequisite_key& p,
@@ -572,14 +609,21 @@ namespace build2
{
tracer trace (x, "msvc_search_static");
- liba* r (nullptr);
+ liba* a (nullptr);
+ bool b (true);
- auto search = [&r, &ld, &d, &p, exist, &trace] (
+ auto search = [&a, &b, &ld, &d, &p, exist, &trace] (
const char* pf, const char* sf) -> bool
{
- r = msvc_search_library<liba> (
- ld, d, p, otype::a, pf, sf, exist, trace);
- return r != nullptr;
+ pair<liba*, bool> r (msvc_search_library<liba> (
+ ld, d, p, otype::a, pf, sf, exist, trace));
+
+ if (r.first != nullptr)
+ a = r.first;
+ else if (!r.second)
+ b = false;
+
+ return a != nullptr;
};
// Try:
@@ -592,10 +636,10 @@ namespace build2
search ("", "") ||
search ("lib", "") ||
search ("", "lib") ||
- search ("", "_static") ? r : nullptr;
+ search ("", "_static") ? make_pair (a, true) : make_pair (nullptr, b);
}
- libs* common::
+ pair<bin::libs*, bool> common::
msvc_search_shared (const process_path& ld,
const dir_path& d,
const prerequisite_key& pk,
@@ -606,12 +650,14 @@ namespace build2
assert (pk.scope != nullptr);
libs* s (nullptr);
+ bool b (true);
- auto search = [&s, &ld, &d, &pk, exist, &trace] (
+ auto search = [&s, &b, &ld, &d, &pk, exist, &trace] (
const char* pf, const char* sf) -> bool
{
- if (libi* i = msvc_search_library<libi> (
- ld, d, pk, otype::s, pf, sf, exist, trace))
+ pair<libi*, bool> r (msvc_search_library<libi> (
+ ld, d, pk, otype::s, pf, sf, exist, trace));
+ if (r.first != nullptr)
{
ulock l (
insert_library (
@@ -619,6 +665,8 @@ namespace build2
if (!exist)
{
+ libi* i (r.first);
+
if (l.owns_lock ())
{
s->adhoc_member = i; // We are first.
@@ -632,6 +680,8 @@ namespace build2
s->path_mtime (path (), i->mtime ());
}
}
+ else if (!r.second)
+ b = false;
return s != nullptr;
};
@@ -644,7 +694,7 @@ namespace build2
return
search ("", "") ||
search ("lib", "") ||
- search ("", "dll") ? s : nullptr;
+ search ("", "dll") ? make_pair (s, true) : make_pair (nullptr, b);
}
}
}
diff --git a/libbuild2/cc/pkgconfig-libpkg-config.cxx b/libbuild2/cc/pkgconfig-libpkg-config.cxx
new file mode 100644
index 0000000..ecbc019
--- /dev/null
+++ b/libbuild2/cc/pkgconfig-libpkg-config.cxx
@@ -0,0 +1,271 @@
+// file : libbuild2/cc/pkgconfig-libpkg-config.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_BOOTSTRAP
+
+#include <libbuild2/cc/pkgconfig.hxx>
+
+#include <new> // std::bad_alloc
+
+#include <libbuild2/diagnostics.hxx>
+
+namespace build2
+{
+ namespace cc
+ {
+ // The package dependency traversal depth limit.
+ //
+ static const int max_depth = 100;
+
+ static void
+ error_handler (unsigned int,
+ const char* file,
+ size_t line,
+ const char* msg,
+ const pkg_config_client_t*,
+ const void*)
+ {
+ if (file != nullptr)
+ {
+ path_name n (file);
+ const location l (n, static_cast<uint64_t> (line));
+ error (l) << msg;
+ }
+ else
+ error << msg;
+ }
+
+ // Deleters.
+ //
+ struct fragments_deleter
+ {
+ void operator() (pkg_config_list_t* f) const
+ {
+ pkg_config_fragment_free (f);
+ }
+ };
+
+ // Convert fragments to strings. Skip the -I/-L options that refer to
+ // system directories.
+ //
+ static strings
+ to_strings (const pkg_config_list_t& frags,
+ char type,
+ const pkg_config_list_t& sysdirs)
+ {
+ assert (type == 'I' || type == 'L');
+
+ strings r;
+ auto add = [&r] (const pkg_config_fragment_t* frag)
+ {
+ string s;
+ if (frag->type != '\0')
+ {
+ s += '-';
+ s += frag->type;
+ }
+
+ s += frag->data;
+ r.push_back (move (s));
+ };
+
+ // Option that is separated from its value, for example:
+ //
+ // -I /usr/lib
+ //
+ const pkg_config_fragment_t* opt (nullptr);
+
+ pkg_config_node_t *node;
+ LIBPKG_CONFIG_FOREACH_LIST_ENTRY(frags.head, node)
+ {
+ auto frag (static_cast<const pkg_config_fragment_t*> (node->data));
+
+ // Add the separated option and directory, unless the latest is a
+ // system one.
+ //
+ if (opt != nullptr)
+ {
+ assert (frag->type == '\0'); // See pkg_config_fragment_add().
+
+ if (!pkg_config_path_match_list (frag->data, &sysdirs))
+ {
+ add (opt);
+ add (frag);
+ }
+
+ opt = nullptr;
+ continue;
+ }
+
+ // Skip the -I/-L option if it refers to a system directory.
+ //
+ if (frag->type == type)
+ {
+ // The option is separated from a value, that will (presumably)
+ // follow.
+ //
+ if (*frag->data == '\0')
+ {
+ opt = frag;
+ continue;
+ }
+
+ if (pkg_config_path_match_list (frag->data, &sysdirs))
+ continue;
+ }
+
+ add (frag);
+ }
+
+ if (opt != nullptr) // Add the dangling option.
+ add (opt);
+
+ return r;
+ }
+
+ // Note that some libpkg-config functions can potentially return NULL,
+ // failing to allocate the required memory block. However, we will not
+ // check the returned value for NULL as the library doesn't do so, prior
+ // to filling the allocated structures. So such a code complication on our
+ // side would be useless. Also, for some functions the NULL result has a
+ // special semantics, for example "not found". @@ TODO: can we fix this?
+ // This is now somewhat addressed, see the eflags argument in
+ // pkg_config_pkg_find().
+ //
+ pkgconfig::
+ pkgconfig (path_type p,
+ const dir_paths& pc_dirs,
+ const dir_paths& sys_lib_dirs,
+ const dir_paths& sys_hdr_dirs)
+ : path (move (p))
+ {
+ auto add_dirs = [] (pkg_config_list_t& dir_list,
+ const dir_paths& dirs,
+ bool suppress_dups)
+ {
+ for (const auto& d: dirs)
+ pkg_config_path_add (d.string ().c_str (), &dir_list, suppress_dups);
+ };
+
+ // Initialize the client handle.
+ //
+ // Note: omit initializing the filters from environment/defaults.
+ //
+ unique_ptr<pkg_config_client_t, void (*) (pkg_config_client_t*)> c (
+ pkg_config_client_new (&error_handler,
+ nullptr /* handler_data */,
+ false /* init_filters */),
+ [] (pkg_config_client_t* c) {pkg_config_client_free (c);});
+
+ if (c == nullptr)
+ throw std::bad_alloc ();
+
+ add_dirs (c->filter_libdirs, sys_lib_dirs, false /* suppress_dups */);
+ add_dirs (c->filter_includedirs, sys_hdr_dirs, false /* suppress_dups */);
+
+ // Note that the loaded file directory is added to the (for now empty)
+ // .pc file search list. Also note that loading of the dependency
+ // packages is delayed until the flags retrieval, and their file
+ // directories are not added to the search list.
+ //
+ // @@ Hm, is there a way to force this resolution? But we may not
+ // need this (e.g., only loading from variables).
+ //
+ unsigned int e;
+ pkg_ = pkg_config_pkg_find (c.get (), path.string ().c_str (), &e);
+
+ if (pkg_ == nullptr)
+ {
+ if (e == LIBPKG_CONFIG_ERRF_OK)
+ fail << "package '" << path << "' not found";
+ else
+ // Diagnostics should have already been issued except for allocation
+ // errors.
+ //
+ fail << "unable to load package '" << path << "'";
+ }
+
+ // Add the .pc file search directories.
+ //
+ assert (c->dir_list.length == 1); // Package file directory (see above).
+ add_dirs (c->dir_list, pc_dirs, true /* suppress_dups */);
+
+ client_ = c.release ();
+ }
+
+ void pkgconfig::
+ free ()
+ {
+ assert (client_ != nullptr && pkg_ != nullptr);
+
+ pkg_config_pkg_unref (client_, pkg_);
+ pkg_config_client_free (client_);
+ }
+
+ strings pkgconfig::
+ cflags (bool stat) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ pkg_config_client_set_flags (
+ client_,
+ // Walk through the private package dependencies (Requires.private)
+ // besides the public ones while collecting the flags. Note that we do
+ // this for both static and shared linking. @@ Hm, I wonder why...?
+ //
+ LIBPKG_CONFIG_PKG_PKGF_SEARCH_PRIVATE |
+
+ // Collect flags from Cflags.private besides those from Cflags for the
+ // static linking.
+ //
+ (stat
+ ? LIBPKG_CONFIG_PKG_PKGF_ADD_PRIVATE_FRAGMENTS
+ : 0));
+
+ pkg_config_list_t f = LIBPKG_CONFIG_LIST_INITIALIZER; // Empty list.
+ int e (pkg_config_pkg_cflags (client_, pkg_, &f, max_depth));
+
+ if (e != LIBPKG_CONFIG_ERRF_OK)
+ throw failed (); // Assume the diagnostics is issued.
+
+ unique_ptr<pkg_config_list_t, fragments_deleter> fd (&f);
+ return to_strings (f, 'I', client_->filter_includedirs);
+ }
+
+ strings pkgconfig::
+ libs (bool stat) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ pkg_config_client_set_flags (
+ client_,
+ // Additionally collect flags from the private dependency packages
+ // (see above) and from the Libs.private value for the static linking.
+ //
+ (stat
+ ? LIBPKG_CONFIG_PKG_PKGF_SEARCH_PRIVATE |
+ LIBPKG_CONFIG_PKG_PKGF_ADD_PRIVATE_FRAGMENTS
+ : 0));
+
+ pkg_config_list_t f = LIBPKG_CONFIG_LIST_INITIALIZER; // Empty list.
+ int e (pkg_config_pkg_libs (client_, pkg_, &f, max_depth));
+
+ if (e != LIBPKG_CONFIG_ERRF_OK)
+ throw failed (); // Assume the diagnostics is issued.
+
+ unique_ptr<pkg_config_list_t, fragments_deleter> fd (&f);
+ return to_strings (f, 'L', client_->filter_libdirs);
+ }
+
+ optional<string> pkgconfig::
+ variable (const char* name) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ const char* r (pkg_config_tuple_find (client_, &pkg_->vars, name));
+ return r != nullptr ? optional<string> (r) : nullopt;
+ }
+ }
+}
+
+#endif // BUILD2_BOOTSTRAP
diff --git a/libbuild2/cc/pkgconfig-libpkgconf.cxx b/libbuild2/cc/pkgconfig-libpkgconf.cxx
new file mode 100644
index 0000000..f3754d3
--- /dev/null
+++ b/libbuild2/cc/pkgconfig-libpkgconf.cxx
@@ -0,0 +1,355 @@
+// file : libbuild2/cc/pkgconfig-libpkgconf.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_BOOTSTRAP
+
+#include <libbuild2/cc/pkgconfig.hxx>
+
+#include <libbuild2/diagnostics.hxx>
+
+// Note that the libpkgconf library did not used to provide the version macro
+// that we could use to compile the code conditionally against different API
+// versions. Thus, we need to sense the pkgconf_client_new() function
+// signature ourselves to call it properly.
+//
+namespace details
+{
+ void*
+ pkgconf_cross_personality_default (); // Never called.
+}
+
+using namespace details;
+
+template <typename H>
+static inline pkgconf_client_t*
+call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*),
+ H error_handler,
+ void* error_handler_data)
+{
+ return f (error_handler, error_handler_data);
+}
+
+template <typename H, typename P>
+static inline pkgconf_client_t*
+call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*, P),
+ H error_handler,
+ void* error_handler_data)
+{
+ return f (error_handler,
+ error_handler_data,
+ ::pkgconf_cross_personality_default ());
+}
+
+namespace build2
+{
+ namespace cc
+ {
+ // The libpkgconf library is not thread-safe, even on the pkgconf_client_t
+ // level (see issue #128 for details). While it seems that the obvious
+ // thread-safety issues are fixed, the default personality initialization,
+ // which is still not thread-safe. So let's keep the mutex for now not to
+ // introduce potential issues.
+ //
+ static mutex pkgconf_mutex;
+
+ // The package dependency traversal depth limit.
+ //
+ static const int pkgconf_max_depth = 100;
+
+ // Normally the error_handler() callback can be called multiple times to
+ // report a single error (once per message line), to produce a multi-line
+ // message like this:
+ //
+ // Package foo was not found in the pkg-config search path.\n
+ // Perhaps you should add the directory containing `foo.pc'\n
+ // to the PKG_CONFIG_PATH environment variable\n
+ // Package 'foo', required by 'bar', not found\n
+ //
+ // For the above example callback will be called 4 times. To suppress all
+ // the junk we will use PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS to get just:
+ //
+ // Package 'foo', required by 'bar', not found\n
+ //
+ // Also disable merging options like -framework into a single fragment, if
+ // possible.
+ //
+ static const int pkgconf_flags =
+ PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS
+ | PKGCONF_PKG_PKGF_SKIP_PROVIDES
+#ifdef PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS
+ | PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS
+#endif
+ ;
+
+#if defined(LIBPKGCONF_VERSION) && LIBPKGCONF_VERSION >= 10900
+ static bool
+ pkgconf_error_handler (const char* msg,
+ const pkgconf_client_t*,
+ void*)
+#else
+ static bool
+ pkgconf_error_handler (const char* msg,
+ const pkgconf_client_t*,
+ const void*)
+#endif
+ {
+ error << runtime_error (msg); // Sanitize the message (trailing dot).
+ return true;
+ }
+
+ // Deleters. Note that they are thread-safe.
+ //
+ struct fragments_deleter
+ {
+ void operator() (pkgconf_list_t* f) const {pkgconf_fragment_free (f);}
+ };
+
+ // Convert fragments to strings. Skip the -I/-L options that refer to system
+ // directories.
+ //
+ static strings
+ to_strings (const pkgconf_list_t& frags,
+ char type,
+ const pkgconf_list_t& sysdirs)
+ {
+ assert (type == 'I' || type == 'L');
+
+ strings r;
+
+ auto add = [&r] (const pkgconf_fragment_t* frag)
+ {
+ string s;
+ if (frag->type != '\0')
+ {
+ s += '-';
+ s += frag->type;
+ }
+
+ s += frag->data;
+ r.push_back (move (s));
+ };
+
+ // Option that is separated from its value, for example:
+ //
+ // -I /usr/lib
+ //
+ const pkgconf_fragment_t* opt (nullptr);
+
+ pkgconf_node_t *node;
+ PKGCONF_FOREACH_LIST_ENTRY(frags.head, node)
+ {
+ auto frag (static_cast<const pkgconf_fragment_t*> (node->data));
+
+ // Add the separated option and directory, unless the latest is a
+ // system one.
+ //
+ if (opt != nullptr)
+ {
+ // Note that we should restore the directory path that was
+ // (mis)interpreted as an option, for example:
+ //
+ // -I -Ifoo
+ //
+ // In the above example option '-I' is followed by directory
+ // '-Ifoo', which is represented by libpkgconf library as fragment
+ // 'foo' with type 'I'.
+ //
+ if (!pkgconf_path_match_list (
+ frag->type == '\0'
+ ? frag->data
+ : (string ({'-', frag->type}) + frag->data).c_str (),
+ &sysdirs))
+ {
+ add (opt);
+ add (frag);
+ }
+
+ opt = nullptr;
+ continue;
+ }
+
+ // Skip the -I/-L option if it refers to a system directory.
+ //
+ if (frag->type == type)
+ {
+ // The option is separated from a value, that will (presumably)
+ // follow.
+ //
+ if (*frag->data == '\0')
+ {
+ opt = frag;
+ continue;
+ }
+
+ if (pkgconf_path_match_list (frag->data, &sysdirs))
+ continue;
+ }
+
+ add (frag);
+ }
+
+ if (opt != nullptr) // Add the dangling option.
+ add (opt);
+
+ return r;
+ }
+
+ // Note that some libpkgconf functions can potentially return NULL,
+ // failing to allocate the required memory block. However, we will not
+ // check the returned value for NULL as the library doesn't do so, prior
+ // to filling the allocated structures. So such a code complication on our
+ // side would be useless. Also, for some functions the NULL result has a
+ // special semantics, for example "not found".
+ //
+ pkgconfig::
+ pkgconfig (path_type p,
+ const dir_paths& pc_dirs,
+ const dir_paths& sys_lib_dirs,
+ const dir_paths& sys_hdr_dirs)
+ : path (move (p))
+ {
+ auto add_dirs = [] (pkgconf_list_t& dir_list,
+ const dir_paths& dirs,
+ bool suppress_dups,
+ bool cleanup = false)
+ {
+ if (cleanup)
+ {
+ pkgconf_path_free (&dir_list);
+ dir_list = PKGCONF_LIST_INITIALIZER;
+ }
+
+ for (const auto& d: dirs)
+ pkgconf_path_add (d.string ().c_str (), &dir_list, suppress_dups);
+ };
+
+ mlock l (pkgconf_mutex);
+
+ // Initialize the client handle.
+ //
+ unique_ptr<pkgconf_client_t, void (*) (pkgconf_client_t*)> c (
+ call_pkgconf_client_new (&pkgconf_client_new,
+ pkgconf_error_handler,
+ nullptr /* handler_data */),
+ [] (pkgconf_client_t* c) {pkgconf_client_free (c);});
+
+ pkgconf_client_set_flags (c.get (), pkgconf_flags);
+
+ // Note that the system header and library directory lists are
+ // automatically pre-filled by the pkgconf_client_new() call (see
+ // above). We will re-create these lists from scratch.
+ //
+ add_dirs (c->filter_libdirs,
+ sys_lib_dirs,
+ false /* suppress_dups */,
+ true /* cleanup */);
+
+ add_dirs (c->filter_includedirs,
+ sys_hdr_dirs,
+ false /* suppress_dups */,
+ true /* cleanup */);
+
+ // Note that the loaded file directory is added to the (yet empty)
+ // search list. Also note that loading of the prerequisite packages is
+ // delayed until flags retrieval, and their file directories are not
+ // added to the search list.
+ //
+ pkg_ = pkgconf_pkg_find (c.get (), path.string ().c_str ());
+
+ if (pkg_ == nullptr)
+ fail << "package '" << path << "' not found or invalid";
+
+ // Add the .pc file search directories.
+ //
+ assert (c->dir_list.length == 1); // Package file directory (see above).
+ add_dirs (c->dir_list, pc_dirs, true /* suppress_dups */);
+
+ client_ = c.release ();
+ }
+
+ void pkgconfig::
+ free ()
+ {
+ assert (pkg_ != nullptr);
+
+ mlock l (pkgconf_mutex);
+ pkgconf_pkg_unref (client_, pkg_);
+ pkgconf_client_free (client_);
+ }
+
+ strings pkgconfig::
+ cflags (bool stat) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ mlock l (pkgconf_mutex);
+
+ pkgconf_client_set_flags (
+ client_,
+ pkgconf_flags |
+
+ // Walk through the private package dependencies (Requires.private)
+ // besides the public ones while collecting the flags. Note that we do
+ // this for both static and shared linking.
+ //
+ PKGCONF_PKG_PKGF_SEARCH_PRIVATE |
+
+ // Collect flags from Cflags.private besides those from Cflags for the
+ // static linking.
+ //
+ (stat
+ ? PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS
+ : 0));
+
+ pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization.
+ int e (pkgconf_pkg_cflags (client_, pkg_, &f, pkgconf_max_depth));
+
+ if (e != PKGCONF_PKG_ERRF_OK)
+ throw failed (); // Assume the diagnostics is issued.
+
+ unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter.
+ return to_strings (f, 'I', client_->filter_includedirs);
+ }
+
+ strings pkgconfig::
+ libs (bool stat) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ mlock l (pkgconf_mutex);
+
+ pkgconf_client_set_flags (
+ client_,
+ pkgconf_flags |
+
+ // Additionally collect flags from the private dependency packages
+ // (see above) and from the Libs.private value for the static linking.
+ //
+ (stat
+ ? PKGCONF_PKG_PKGF_SEARCH_PRIVATE |
+ PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS
+ : 0));
+
+ pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization.
+ int e (pkgconf_pkg_libs (client_, pkg_, &f, pkgconf_max_depth));
+
+ if (e != PKGCONF_PKG_ERRF_OK)
+ throw failed (); // Assume the diagnostics is issued.
+
+ unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter.
+ return to_strings (f, 'L', client_->filter_libdirs);
+ }
+
+ optional<string> pkgconfig::
+ variable (const char* name) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ mlock l (pkgconf_mutex);
+ const char* r (pkgconf_tuple_find (client_, &pkg_->vars, name));
+ return r != nullptr ? optional<string> (r) : nullopt;
+ }
+ }
+}
+
+#endif // BUILD2_BOOTSTRAP
diff --git a/libbuild2/cc/pkgconfig.cxx b/libbuild2/cc/pkgconfig.cxx
index 151473d..4fd7486 100644
--- a/libbuild2/cc/pkgconfig.cxx
+++ b/libbuild2/cc/pkgconfig.cxx
@@ -1,13 +1,6 @@
// file : libbuild2/cc/pkgconfig.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-// In order not to complicate the bootstrap procedure with libpkgconf building
-// exclude functionality that involves reading of .pc files.
-//
-#ifndef BUILD2_BOOTSTRAP
-# include <libpkgconf/libpkgconf.h>
-#endif
-
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
#include <libbuild2/context.hxx>
@@ -25,436 +18,25 @@
#include <libbuild2/cc/utility.hxx>
#include <libbuild2/cc/common.hxx>
+#include <libbuild2/cc/pkgconfig.hxx>
#include <libbuild2/cc/compile-rule.hxx>
#include <libbuild2/cc/link-rule.hxx>
-#ifndef BUILD2_BOOTSTRAP
-
-// Note that the libpkgconf library doesn't provide the version macro that we
-// could use to compile the code conditionally against different API versions.
-// Thus, we need to sense the pkgconf_client_new() function signature
-// ourselves to call it properly.
-//
-namespace details
-{
- void*
- pkgconf_cross_personality_default (); // Never called.
-}
-
-using namespace details;
-
-template <typename H>
-static inline pkgconf_client_t*
-call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*),
- H error_handler,
- void* error_handler_data)
-{
- return f (error_handler, error_handler_data);
-}
-
-template <typename H, typename P>
-static inline pkgconf_client_t*
-call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*, P),
- H error_handler,
- void* error_handler_data)
-{
- return f (error_handler,
- error_handler_data,
- ::pkgconf_cross_personality_default ());
-}
-
-#endif
-
-using namespace std;
-using namespace butl;
+using namespace std; // VC16
namespace build2
{
-#ifndef BUILD2_BOOTSTRAP
-
- // Load package information from a .pc file. Filter out the -I/-L options
- // that refer to system directories. This makes sure all the system search
- // directories are "pushed" to the back which minimizes the chances of
- // picking up wrong (e.g., old installed version) header/library.
- //
- // Note that the prerequisite package .pc files search order is as follows:
- //
- // - in directory of the specified file
- // - in pc_dirs directories (in the natural order)
- //
- class pkgconf
- {
- public:
- using path_type = build2::path;
-
- path_type path;
-
- public:
- explicit
- pkgconf (path_type,
- const dir_paths& pc_dirs,
- const dir_paths& sys_hdr_dirs,
- const dir_paths& sys_lib_dirs);
-
- // Create a special empty object. Querying package information on such
- // an object is illegal.
- //
- pkgconf () = default;
-
- ~pkgconf ();
-
- // Movable-only type.
- //
- pkgconf (pkgconf&& p)
- : path (move (p.path)),
- client_ (p.client_),
- pkg_ (p.pkg_)
- {
- p.client_ = nullptr;
- p.pkg_ = nullptr;
- }
-
- pkgconf&
- operator= (pkgconf&& p)
- {
- if (this != &p)
- {
- this->~pkgconf ();
- new (this) pkgconf (move (p)); // Assume noexcept move-construction.
- }
- return *this;
- }
-
- pkgconf (const pkgconf&) = delete;
- pkgconf& operator= (const pkgconf&) = delete;
-
- strings
- cflags (bool stat) const;
-
- strings
- libs (bool stat) const;
-
- string
- variable (const char*) const;
-
- string
- variable (const string& s) const {return variable (s.c_str ());}
-
- private:
- // Keep them as raw pointers not to deal with API thread-unsafety in
- // deleters and introducing additional mutex locks.
- //
- pkgconf_client_t* client_ = nullptr;
- pkgconf_pkg_t* pkg_ = nullptr;
- };
-
- // Currently the library is not thread-safe, even on the pkgconf_client_t
- // level (see issue #128 for details).
- //
- // @@ An update: seems that the obvious thread-safety issues are fixed.
- // However, let's keep mutex locking for now not to introduce potential
- // issues before we make sure that there are no other ones.
- //
- static mutex pkgconf_mutex;
-
- // The package dependency traversal depth limit.
- //
- static const int pkgconf_max_depth = 100;
-
- // Normally the error_handler() callback can be called multiple times to
- // report a single error (once per message line), to produce a multi-line
- // message like this:
- //
- // Package foo was not found in the pkg-config search path.\n
- // Perhaps you should add the directory containing `foo.pc'\n
- // to the PKG_CONFIG_PATH environment variable\n
- // Package 'foo', required by 'bar', not found\n
- //
- // For the above example callback will be called 4 times. To suppress all the
- // junk we will use PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS to get just:
- //
- // Package 'foo', required by 'bar', not found\n
- //
- // Also disable merging options like -framework into a single fragment, if
- // possible.
- //
- static const int pkgconf_flags =
- PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS
-#ifdef PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS
- | PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS
-#endif
- ;
-
- static bool
- pkgconf_error_handler (const char* msg, const pkgconf_client_t*, const void*)
- {
- error << runtime_error (msg); // Sanitize the message.
- return true;
- }
-
- // Deleters. Note that they are thread-safe.
- //
- struct fragments_deleter
- {
- void operator() (pkgconf_list_t* f) const {pkgconf_fragment_free (f);}
- };
-
- // Convert fragments to strings. Skip the -I/-L options that refer to system
- // directories.
- //
- static strings
- to_strings (const pkgconf_list_t& frags,
- char type,
- const pkgconf_list_t& sysdirs)
- {
- assert (type == 'I' || type == 'L');
-
- strings r;
-
- auto add = [&r] (const pkgconf_fragment_t* frag)
- {
- string s;
- if (frag->type != '\0')
- {
- s += '-';
- s += frag->type;
- }
-
- s += frag->data;
- r.push_back (move (s));
- };
-
- // Option that is separated from its value, for example:
- //
- // -I /usr/lib
- //
- const pkgconf_fragment_t* opt (nullptr);
-
- pkgconf_node_t *node;
- PKGCONF_FOREACH_LIST_ENTRY(frags.head, node)
- {
- auto frag (static_cast<const pkgconf_fragment_t*> (node->data));
-
- // Add the separated option and directory, unless the latest is a system
- // one.
- //
- if (opt != nullptr)
- {
- // Note that we should restore the directory path that was
- // (mis)interpreted as an option, for example:
- //
- // -I -Ifoo
- //
- // In the above example option '-I' is followed by directory '-Ifoo',
- // which is represented by libpkgconf library as fragment 'foo' with
- // type 'I'.
- //
- if (!pkgconf_path_match_list (
- frag->type == '\0'
- ? frag->data
- : (string ({'-', frag->type}) + frag->data).c_str (),
- &sysdirs))
- {
- add (opt);
- add (frag);
- }
-
- opt = nullptr;
- continue;
- }
-
- // Skip the -I/-L option if it refers to a system directory.
- //
- if (frag->type == type)
- {
- // The option is separated from a value, that will (presumably) follow.
- //
- if (*frag->data == '\0')
- {
- opt = frag;
- continue;
- }
-
- if (pkgconf_path_match_list (frag->data, &sysdirs))
- continue;
- }
-
- add (frag);
- }
-
- if (opt != nullptr) // Add the dangling option.
- add (opt);
-
- return r;
- }
-
- // Note that some libpkgconf functions can potentially return NULL, failing
- // to allocate the required memory block. However, we will not check the
- // returned value for NULL as the library doesn't do so, prior to filling the
- // allocated structures. So such a code complication on our side would be
- // useless. Also, for some functions the NULL result has a special semantics,
- // for example "not found".
- //
- pkgconf::
- pkgconf (path_type p,
- const dir_paths& pc_dirs,
- const dir_paths& sys_lib_dirs,
- const dir_paths& sys_hdr_dirs)
- : path (move (p))
- {
- auto add_dirs = [] (pkgconf_list_t& dir_list,
- const dir_paths& dirs,
- bool suppress_dups,
- bool cleanup = false)
- {
- if (cleanup)
- {
- pkgconf_path_free (&dir_list);
- dir_list = PKGCONF_LIST_INITIALIZER;
- }
-
- for (const auto& d: dirs)
- pkgconf_path_add (d.string ().c_str (), &dir_list, suppress_dups);
- };
-
- mlock l (pkgconf_mutex);
-
- // Initialize the client handle.
- //
- unique_ptr<pkgconf_client_t, void (*) (pkgconf_client_t*)> c (
- call_pkgconf_client_new (&pkgconf_client_new,
- pkgconf_error_handler,
- nullptr /* handler_data */),
- [] (pkgconf_client_t* c) {pkgconf_client_free (c);});
-
- pkgconf_client_set_flags (c.get (), pkgconf_flags);
-
- // Note that the system header and library directory lists are
- // automatically pre-filled by the pkgconf_client_new() call (see above).
- // We will re-create these lists from scratch.
- //
- add_dirs (c->filter_libdirs,
- sys_lib_dirs,
- false /* suppress_dups */,
- true /* cleanup */);
-
- add_dirs (c->filter_includedirs,
- sys_hdr_dirs,
- false /* suppress_dups */,
- true /* cleanup */);
-
- // Note that the loaded file directory is added to the (yet empty) search
- // list. Also note that loading of the prerequisite packages is delayed
- // until flags retrieval, and their file directories are not added to the
- // search list.
- //
- pkg_ = pkgconf_pkg_find (c.get (), path.string ().c_str ());
-
- if (pkg_ == nullptr)
- fail << "package '" << path << "' not found or invalid";
-
- // Add the .pc file search directories.
- //
- assert (c->dir_list.length == 1); // Package file directory (see above).
- add_dirs (c->dir_list, pc_dirs, true /* suppress_dups */);
-
- client_ = c.release ();
- }
-
- pkgconf::
- ~pkgconf ()
- {
- if (client_ != nullptr) // Not empty.
- {
- assert (pkg_ != nullptr);
-
- mlock l (pkgconf_mutex);
- pkgconf_pkg_unref (client_, pkg_);
- pkgconf_client_free (client_);
- }
- }
-
- strings pkgconf::
- cflags (bool stat) const
- {
- assert (client_ != nullptr); // Must not be empty.
-
- mlock l (pkgconf_mutex);
-
- pkgconf_client_set_flags (
- client_,
- pkgconf_flags |
-
- // Walk through the private package dependencies (Requires.private)
- // besides the public ones while collecting the flags. Note that we do
- // this for both static and shared linking.
- //
- PKGCONF_PKG_PKGF_SEARCH_PRIVATE |
-
- // Collect flags from Cflags.private besides those from Cflags for the
- // static linking.
- //
- (stat
- ? PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS
- : 0));
-
- pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization.
- int e (pkgconf_pkg_cflags (client_, pkg_, &f, pkgconf_max_depth));
-
- if (e != PKGCONF_PKG_ERRF_OK)
- throw failed (); // Assume the diagnostics is issued.
-
- unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter.
- return to_strings (f, 'I', client_->filter_includedirs);
- }
-
- strings pkgconf::
- libs (bool stat) const
- {
- assert (client_ != nullptr); // Must not be empty.
-
- mlock l (pkgconf_mutex);
-
- pkgconf_client_set_flags (
- client_,
- pkgconf_flags |
-
- // Additionally collect flags from the private dependency packages
- // (see above) and from the Libs.private value for the static linking.
- //
- (stat
- ? PKGCONF_PKG_PKGF_SEARCH_PRIVATE |
- PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS
- : 0));
-
- pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization.
- int e (pkgconf_pkg_libs (client_, pkg_, &f, pkgconf_max_depth));
-
- if (e != PKGCONF_PKG_ERRF_OK)
- throw failed (); // Assume the diagnostics is issued.
-
- unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter.
- return to_strings (f, 'L', client_->filter_libdirs);
- }
-
- string pkgconf::
- variable (const char* name) const
- {
- assert (client_ != nullptr); // Must not be empty.
-
- mlock l (pkgconf_mutex);
- const char* r (pkgconf_tuple_find (client_, &pkg_->vars, name));
- return r != nullptr ? string (r) : string ();
- }
-
-#endif
-
namespace cc
{
using namespace bin;
// In pkg-config backslashes, spaces, etc are escaped with a backslash.
//
+ // @@ TODO: handle empty values (save as ''?)
+ //
+ // Note: may contain variable expansions (e.g, ${pcfiledir}) so unclear
+ // if can use quoting.
+ //
static string
escape (const string& s)
{
@@ -481,6 +63,35 @@ namespace build2
return r;
}
+ // Resolve metadata value type from type name. Return in the second half
+ // of the pair whether this is a dir_path-based type.
+ //
+ static pair<const value_type*, bool>
+ metadata_type (const string& tn)
+ {
+ bool d (false);
+ const value_type* r (nullptr);
+
+ if (tn == "bool") r = &value_traits<bool>::value_type;
+ else if (tn == "int64") r = &value_traits<int64_t>::value_type;
+ else if (tn == "uint64") r = &value_traits<uint64_t>::value_type;
+ else if (tn == "string") r = &value_traits<string>::value_type;
+ else if (tn == "path") r = &value_traits<path>::value_type;
+ else if (tn == "dir_path") {r = &value_traits<dir_path>::value_type; d = true;}
+ else if (tn == "int64s") r = &value_traits<int64s>::value_type;
+ else if (tn == "uint64s") r = &value_traits<uint64s>::value_type;
+ else if (tn == "strings") r = &value_traits<strings>::value_type;
+ else if (tn == "paths") r = &value_traits<paths>::value_type;
+ else if (tn == "dir_paths") {r = &value_traits<dir_paths>::value_type; d = true;}
+
+ return make_pair (r, d);
+ }
+
+ // In order not to complicate the bootstrap procedure with libpkg-config
+ // building, exclude functionality that involves reading of .pc files.
+ //
+#ifndef BUILD2_BOOTSTRAP
+
// Try to find a .pc file in the pkgconfig/ subdirectory of libd, trying
// several names derived from stem. If not found, return false. If found,
// load poptions, loptions, libs, and modules, set the corresponding
@@ -497,9 +108,8 @@ namespace build2
// Also note that the bootstrapped version of build2 will not search for
// .pc files, always returning false (see above for the reasoning).
//
-#ifndef BUILD2_BOOTSTRAP
- // Derive pkgconf search directories from the specified library search
+ // Derive pkg-config search directories from the specified library search
// directory passing them to the callback function for as long as it
// returns false (e.g., not found). Return true if the callback returned
// true.
@@ -543,8 +153,8 @@ namespace build2
return false;
}
- // Search for the .pc files in the pkgconf directories that correspond to
- // the specified library directory. If found, return static (first) and
+ // Search for the .pc files in the pkg-config directories that correspond
+ // to the specified library directory. If found, return static (first) and
// shared (second) library .pc files. If common is false, then only
// consider our .static/.shared files.
//
@@ -554,6 +164,8 @@ namespace build2
const string& stem,
bool common) const
{
+ tracer trace (x, "pkgconfig_search");
+
// When it comes to looking for .pc files we have to decide where to
// search (which directory(ies)) as well as what to search for (which
// names). Suffix is our ".shared" or ".static" extension.
@@ -575,28 +187,36 @@ namespace build2
// then you get something like zlib which calls it zlib.pc. So let's
// just do it.
//
- f = dir;
- f /= "lib";
- f += stem;
- f += sfx;
- f += ".pc";
- if (exists (f))
- return f;
+ // And as you think you've covered all the bases, someone decides to
+ // play with the case (libXau.* vs xau.pc). So let's also try the
+ // lower-case versions of the stem unless we are on a case-insensitive
+ // filesystem.
+ //
+ auto check = [&dir, & sfx, &f] (const string& n)
+ {
+ f = dir;
+ f /= n;
+ f += sfx;
+ f += ".pc";
+ return exists (f);
+ };
- f = dir;
- f /= stem;
- f += sfx;
- f += ".pc";
- if (exists (f))
+ if (check ("lib" + stem) || check (stem))
return f;
+#ifndef _WIN32
+ string lstem (lcase (stem));
+
+ if (lstem != stem)
+ {
+ if (check ("lib" + lstem) || check (lstem))
+ return f;
+ }
+#endif
+
if (proj)
{
- f = dir;
- f /= proj->string ();
- f += sfx;
- f += ".pc";
- if (exists (f))
+ if (check (proj->string ()))
return f;
}
@@ -636,15 +256,18 @@ namespace build2
if (pkgconfig_derive (libd, check))
{
+ l6 ([&]{trace << "found " << libd << stem << " in "
+ << (d.a.empty () ? d.a : d.s).directory ();});
+
r.first = move (d.a);
r.second = move (d.s);
}
return r;
- };
+ }
bool common::
- pkgconfig_load (action a,
+ pkgconfig_load (optional<action> act,
const scope& s,
lib& lt,
liba* at,
@@ -653,7 +276,8 @@ namespace build2
const string& stem,
const dir_path& libd,
const dir_paths& top_sysd,
- const dir_paths& top_usrd) const
+ const dir_paths& top_usrd,
+ pair<bool, bool> metaonly) const
{
assert (at != nullptr || st != nullptr);
@@ -663,12 +287,16 @@ namespace build2
if (p.first.empty () && p.second.empty ())
return false;
- pkgconfig_load (a, s, lt, at, st, p, libd, top_sysd, top_usrd);
+ pkgconfig_load (
+ act, s, lt, at, st, p, libd, top_sysd, top_usrd, metaonly);
return true;
}
+ // Action should be absent if called during the load phase. If metaonly is
+ // true then only load the metadata.
+ //
void common::
- pkgconfig_load (action a,
+ pkgconfig_load (optional<action> act,
const scope& s,
lib& lt,
liba* at,
@@ -676,7 +304,8 @@ namespace build2
const pair<path, path>& paths,
const dir_path& libd,
const dir_paths& top_sysd,
- const dir_paths& top_usrd) const
+ const dir_paths& top_usrd,
+ pair<bool /* a */, bool /* s */> metaonly) const
{
tracer trace (x, "pkgconfig_load");
@@ -687,24 +316,66 @@ namespace build2
assert (!ap.empty () || !sp.empty ());
- // Extract --cflags and set them as lib?{}:export.poptions. Note that we
- // still pass --static in case this is pkgconf which has Cflags.private.
+ // Append -I<dir> or -L<dir> option suppressing duplicates.
//
- auto parse_cflags = [&trace, this] (target& t,
- const pkgconf& pc,
- bool la)
+ auto append_dir = [] (strings& ops, string&& o)
{
+ char c (o[1]);
+
+ // @@ Should we normalize the path for good measure? But on the other
+ // hand, most of the time when it's not normalized, it will likely
+ // be "consistently-relative", e.g., something like
+ // ${prefix}/lib/../include. I guess let's wait and see for some
+ // real-world examples.
+ //
+ // Well, we now support generating relocatable .pc files that have
+ // a bunch of -I${pcfiledir}/../../include and -L${pcfiledir}/.. .
+ //
+ // On the other hand, there could be symlinks involved and just
+ // normalize() may not be correct.
+ //
+ // Note that we do normalize -L paths in the usrd logic later
+ // (but not when seeting as *.export.loptions).
+
+ for (const string& x: ops)
+ {
+ if (x.size () > 2 && x[0] == '-' && x[1] == c)
+ {
+ if (path_traits::compare (x.c_str () + 2, x.size () - 2,
+ o.c_str () + 2, o.size () - 2) == 0)
+ return; // Duplicate.
+ }
+ }
+
+ ops.push_back (move (o));
+ };
+
+ // Extract --cflags and set them as lib?{}:export.poptions..
+ //
+ auto parse_cflags = [&trace, this, &append_dir] (target& t,
+ const pkgconfig& pc,
+ bool la)
+ {
+ // Note that we normalize `-[IDU] <arg>` to `-[IDU]<arg>`.
+ //
strings pops;
- bool arg (false);
- for (auto& o: pc.cflags (la))
+ char arg ('\0'); // Option with pending argument.
+ for (string& o: pc.cflags (la))
{
if (arg)
{
// Can only be an argument for -I, -D, -U options.
//
- pops.push_back (move (o));
- arg = false;
+ o.insert (0, 1, arg);
+ o.insert (0, 1, '-');
+
+ if (arg == 'I')
+ append_dir (pops, move (o));
+ else
+ pops.push_back (move (o));
+
+ arg = '\0';
continue;
}
@@ -713,11 +384,17 @@ namespace build2
// We only keep -I, -D and -U.
//
if (n >= 2 &&
- o[0] == '-' &&
- (o[1] == 'I' || o[1] == 'D' || o[1] == 'U'))
+ o[0] == '-' && (o[1] == 'I' || o[1] == 'D' || o[1] == 'U'))
{
- pops.push_back (move (o));
- arg = (n == 2);
+ if (n > 2)
+ {
+ if (o[1] == 'I')
+ append_dir (pops, move (o));
+ else
+ pops.push_back (move (o));
+ }
+ else
+ arg = o[1];
continue;
}
@@ -726,7 +403,7 @@ namespace build2
}
if (arg)
- fail << "argument expected after " << pops.back () <<
+ fail << "argument expected after -" << arg <<
info << "while parsing pkg-config --cflags " << pc.path;
if (!pops.empty ())
@@ -746,12 +423,16 @@ namespace build2
// Parse --libs into loptions/libs (interface and implementation). If
// ps is not NULL, add each resolved library target as a prerequisite.
//
- auto parse_libs = [a, &s, top_sysd, this] (target& t,
- bool binless,
- const pkgconf& pc,
- bool la,
- prerequisites* ps)
+ auto parse_libs = [this,
+ &append_dir,
+ act, &s, top_sysd] (target& t,
+ bool binless,
+ const pkgconfig& pc,
+ bool la,
+ prerequisites* ps)
{
+ // Note that we normalize `-L <arg>` to `-L<arg>`.
+ //
strings lops;
vector<name> libs;
@@ -760,22 +441,29 @@ namespace build2
// library is binless. But sometimes we may have other linker options,
// for example, -Wl,... or -pthread. It's probably a bad idea to
// ignore them. Also, theoretically, we could have just the library
- // name/path.
+ // name/path. Note that (after some meditation) we consider -pthread
+ // a special form of -l.
//
// The tricky part, of course, is to know whether what follows after
// an option we don't recognize is its argument or another option or
// library. What we do at the moment is stop recognizing just library
// names (without -l) after seeing an unknown option.
//
- bool arg (false), first (true), known (true), have_L;
- for (auto& o: pc.libs (la))
+ bool first (true), known (true), have_L (false);
+
+ string self; // The library itself (-l of just name/path).
+
+ char arg ('\0'); // Option with pending argument.
+ for (string& o: pc.libs (la))
{
if (arg)
{
- // Can only be an argument for an loption.
+ // Can only be an argument for an -L option.
//
- lops.push_back (move (o));
- arg = false;
+ o.insert (0, 1, arg);
+ o.insert (0, 1, '-');
+ append_dir (lops, move (o));
+ arg = '\0';
continue;
}
@@ -785,44 +473,54 @@ namespace build2
//
if (n >= 2 && o[0] == '-' && o[1] == 'L')
{
+ if (n > 2)
+ append_dir (lops, move (o));
+ else
+ arg = o[1];
have_L = true;
- lops.push_back (move (o));
- arg = (n == 2);
continue;
}
- // See if that's -l or just the library name/path.
+ // See if that's -l, -pthread, or just the library name/path.
//
- if ((known && o[0] != '-') ||
- (n > 2 && o[0] == '-' && o[1] == 'l'))
+ if ((known && n != 0 && o[0] != '-') ||
+ (n > 2 && o[0] == '-' && (o[1] == 'l' || o == "-pthread")))
{
// Unless binless, the first one is the library itself, which we
// skip. Note that we don't verify this and theoretically it could
// be some other library, but we haven't encountered such a beast
// yet.
//
+ // What we have enountered (e.g., in the Magick++ library) is the
+ // library itself repeated in Libs.private. So now we save it and
+ // filter all its subsequent occurences.
+ //
+ // @@ To be safe we probably shouldn't rely on the position and
+ // filter out all occurrences of the library itself (by name?)
+ // and complain if none were encountered.
+ //
+ // Note also that the same situation can occur if we have a
+ // binful library for which we could not find the library
+ // binary and are treating it as binless. We now have a diag
+ // frame around the call to search_library() to help diagnose
+ // such situations.
+ //
if (first)
{
first = false;
if (!binless)
+ {
+ self = move (o);
+ continue;
+ }
+ }
+ else
+ {
+ if (!binless && o == self)
continue;
}
- // @@ If by some reason this is the library itself (doesn't go
- // first or libpkgconf parsed libs in some bizarre way) we will
- // have a dependency cycle by trying to lock its target inside
- // search_library() as by now it is already locked. To be safe
- // we probably shouldn't rely on the position and filter out
- // all occurrences of the library itself (by name?) and
- // complain if none were encountered.
- //
- // Note also that the same situation can occur if we have a
- // binful library for which we could not find the library
- // binary and are treating it as binless. We now have a diag
- // frame around the call to search_library() to help diagnose
- // such situations.
- //
libs.push_back (name (move (o)));
continue;
}
@@ -834,7 +532,7 @@ namespace build2
}
if (arg)
- fail << "argument expected after " << lops.back () <<
+ fail << "argument expected after -" << arg <<
info << "while parsing pkg-config --libs " << pc.path;
// Space-separated list of escaped library flags.
@@ -842,7 +540,7 @@ namespace build2
auto lflags = [&pc, la] () -> string
{
string r;
- for (const auto& o: pc.libs (la))
+ for (const string& o: pc.libs (la))
{
if (!r.empty ())
r += ' ';
@@ -851,7 +549,7 @@ namespace build2
return r;
};
- if (first && !binless)
+ if (!binless && self.empty ())
fail << "library expected in '" << lflags () << "'" <<
info << "while parsing pkg-config --libs " << pc.path;
@@ -864,8 +562,8 @@ namespace build2
// import installed, or via a .pc file (which we could have generated
// from the export stub). The exception is "runtime libraries" (which
// are really the extension of libc or the operating system in case of
- // Windows) such as -lm, -ldl, -lpthread, etc. Those we will detect
- // and leave as -l*.
+ // Windows) such as -lm, -ldl, -lpthread (or its -pthread variant),
+ // etc. Those we will detect and leave as -l*.
//
// If we managed to resolve all the -l's (sans runtime), then we can
// omit -L's for a nice and tidy command line.
@@ -892,20 +590,28 @@ namespace build2
if (l[0] != '-') // e.g., just shell32.lib
continue;
else if (cmp ("advapi32") ||
+ cmp ("authz") ||
cmp ("bcrypt") ||
+ cmp ("comdlg32") ||
cmp ("crypt32") ||
- cmp ("dbgeng") ||
cmp ("d2d1") ||
cmp ("d3d", 3) || // d3d*
+ cmp ("dbgeng") ||
+ cmp ("dbghelp") ||
+ cmp ("dnsapi") ||
cmp ("dwmapi") ||
cmp ("dwrite") ||
cmp ("dxgi") ||
cmp ("dxguid") ||
cmp ("gdi32") ||
+ cmp ("glu32") ||
cmp ("imagehlp") ||
cmp ("imm32") ||
+ cmp ("iphlpapi") ||
cmp ("kernel32") ||
+ cmp ("mincore") ||
cmp ("mpr") ||
+ cmp ("msimg32") ||
cmp ("mswsock") ||
cmp ("msxml", 5) || // msxml*
cmp ("netapi32") ||
@@ -913,6 +619,9 @@ namespace build2
cmp ("odbc32") ||
cmp ("ole32") ||
cmp ("oleaut32") ||
+ cmp ("opengl32") ||
+ cmp ("powrprof") ||
+ cmp ("psapi") ||
cmp ("rpcrt4") ||
cmp ("secur32") ||
cmp ("shell32") ||
@@ -922,6 +631,8 @@ namespace build2
cmp ("userenv") ||
cmp ("uuid") ||
cmp ("version") ||
+ cmp ("windowscodecs") ||
+ cmp ("winhttp") ||
cmp ("winmm") ||
cmp ("winspool") ||
cmp ("ws2") ||
@@ -938,6 +649,11 @@ namespace build2
}
continue;
}
+ else if (tsys == "mingw32")
+ {
+ if (l == "-pthread")
+ continue;
+ }
}
else
{
@@ -947,6 +663,7 @@ namespace build2
l == "-lm" ||
l == "-ldl" ||
l == "-lrt" ||
+ l == "-pthread" ||
l == "-lpthread")
continue;
@@ -982,18 +699,13 @@ namespace build2
{
usrd = dir_paths ();
- for (auto i (lops.begin ()); i != lops.end (); ++i)
+ for (const string& o: lops)
{
- const string& o (*i);
-
- if (o.size () >= 2 && o[0] == '-' && o[1] == 'L')
+ // Note: always in the -L<dir> form (see above).
+ //
+ if (o.size () > 2 && o[0] == '-' && o[1] == 'L')
{
- string p;
-
- if (o.size () == 2)
- p = *++i; // We've verified it's there.
- else
- p = string (o, 2);
+ string p (o, 2);
try
{
@@ -1004,6 +716,7 @@ namespace build2
<< lflags () << "'" <<
info << "while parsing pkg-config --libs " << pc.path;
+ d.normalize ();
usrd->push_back (move (d));
}
catch (const invalid_path& e)
@@ -1034,7 +747,7 @@ namespace build2
dr << info (f) << "while resolving pkg-config dependency " << l;
});
- lt = search_library (a, top_sysd, usrd, pk);
+ lt = search_library (act, top_sysd, usrd, pk);
}
if (lt != nullptr)
@@ -1083,24 +796,16 @@ namespace build2
{
// Translate -L to /LIBPATH.
//
- for (auto i (lops.begin ()); i != lops.end (); )
+ for (string& o: lops)
{
- string& o (*i);
size_t n (o.size ());
- if (n >= 2 && o[0] == '-' && o[1] == 'L')
+ // Note: always in the -L<dir> form (see above).
+ //
+ if (n > 2 && o[0] == '-' && o[1] == 'L')
{
o.replace (0, 2, "/LIBPATH:");
-
- if (n == 2)
- {
- o += *++i; // We've verified it's there.
- i = lops.erase (i);
- continue;
- }
}
-
- ++i;
}
}
@@ -1124,6 +829,10 @@ namespace build2
// may escape things even on non-Windows platforms, for example,
// spaces. So we use a slightly modified version of next_word().
//
+ // @@ TODO: handle quotes (e.g., empty values; see parse_metadata()).
+ // I wonder what we get here if something is quoted in the
+ // .pc file.
+ //
auto next = [] (const string& s, size_t& b, size_t& e) -> string
{
string r;
@@ -1159,17 +868,123 @@ namespace build2
return r;
};
+ // Parse the build2.metadata variable value and, if user is true,
+ // extract the user metadata, if any, and set extracted variables on the
+ // specified target.
+ //
+ auto parse_metadata = [&next] (target& t,
+ pkgconfig& pc,
+ const string& md,
+ bool user)
+ {
+ const location loc (pc.path);
+
+ context& ctx (t.ctx);
+
+ optional<uint64_t> ver;
+ optional<string> pfx;
+
+ variable_pool* vp (nullptr); // Resolve lazily.
+
+ string s;
+ for (size_t b (0), e (0); !(s = next (md, b, e)).empty (); )
+ {
+ if (!ver)
+ {
+ try
+ {
+ ver = value_traits<uint64_t>::convert (name (s), nullptr);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (loc) << "invalid version in build2.metadata variable: "
+ << e;
+ }
+
+ if (*ver != 1)
+ fail (loc) << "unexpected metadata version " << *ver;
+
+ if (!user)
+ return;
+
+ continue;
+ }
+
+ if (!pfx)
+ {
+ if (s.empty ())
+ fail (loc) << "empty variable prefix in build2.metadata varible";
+
+ pfx = s;
+ continue;
+ }
+
+ // The rest is variable name/type pairs.
+ //
+ size_t p (s.find ('/'));
+
+ if (p == string::npos)
+ fail (loc) << "expected name/type pair instead of '" << s << "'";
+
+ string vn (s, 0, p);
+ string tn (s, p + 1);
+
+ optional<string> val (pc.variable (vn));
+
+ if (!val)
+ fail (loc) << "metadata variable " << vn << " not set";
+
+ pair<const value_type*, bool> vt (metadata_type (tn));
+ if (vt.first == nullptr)
+ fail (loc) << "unknown metadata type " << tn;
+
+ names ns;
+ for (size_t b (0), e (0); !(s = next (*val, b, e)).empty (); )
+ {
+ ns.push_back (vt.second
+ ? name (dir_path (move (s)))
+ : name (move (s)));
+ }
+
+ // These should be public (qualified) variables so go straight for
+ // the public variable pool.
+ //
+ if (vp == nullptr)
+ vp = &ctx.var_pool.rw (); // Load phase if user==true.
+
+ const variable& var (vp->insert (move (vn)));
+
+ value& v (t.assign (var));
+ v.assign (move (ns), &var);
+ typify (v, *vt.first, &var);
+ }
+
+ if (!ver)
+ fail (loc) << "version expected in build2.metadata variable";
+
+ if (!pfx)
+ return; // No user metadata.
+
+ // Set export.metadata to indicate the presence of user metadata.
+ //
+ t.assign (ctx.var_export_metadata) = names {
+ name (std::to_string (*ver)), name (move (*pfx))};
+ };
+
// Parse modules, enter them as targets, and add them to the
// prerequisites.
//
auto parse_modules = [&trace, this,
- &next, &s, &lt] (const pkgconf& pc,
+ &next, &s, &lt] (const pkgconfig& pc,
prerequisites& ps)
{
- string val (pc.variable ("cxx_modules"));
+ optional<string> val (pc.variable ("cxx.modules"));
+
+ if (!val)
+ return;
string m;
- for (size_t b (0), e (0); !(m = next (val, b, e)).empty (); )
+ for (size_t b (0), e (0); !(m = next (*val, b, e)).empty (); )
{
// The format is <name>=<path> with `..` used as a partition
// separator (see pkgconfig_save() for details).
@@ -1178,18 +993,26 @@ namespace build2
if (p == string::npos ||
p == 0 || // Empty name.
p == m.size () - 1) // Empty path.
- fail << "invalid module information in '" << val << "'" <<
- info << "while parsing pkg-config --variable=cxx_modules "
+ fail << "invalid module information in '" << *val << "'" <<
+ info << "while parsing pkg-config --variable=cxx.modules "
<< pc.path;
string mn (m, 0, p);
path mp (m, p + 1, string::npos);
+
+ // Must be absolute but may not be normalized due to a relocatable
+ // .pc file. We assume there are no symlink shenanigans that would
+ // require realize().
+ //
+ if (!mp.normalized ())
+ mp.normalize ();
+
path mf (mp.leaf ());
// Extract module properties, if any.
//
- string pp (pc.variable ("cxx_module_preprocessed." + mn));
- string se (pc.variable ("cxx_module_symexport." + mn));
+ optional<string> pp (pc.variable ("cxx.module_preprocessed." + mn));
+ optional<string> se (pc.variable ("cxx.module_symexport." + mn));
// Replace the partition separator.
//
@@ -1234,11 +1057,12 @@ namespace build2
//
{
value& v (mt.vars.assign (x_preprocessed)); // NULL
- if (!pp.empty ()) v = move (pp);
+ if (pp)
+ v = move (*pp);
}
{
- mt.vars.assign (x_symexport) = (se == "true");
+ mt.vars.assign (x_symexport) = (se && *se == "true");
}
tl.second.unlock ();
@@ -1260,18 +1084,29 @@ namespace build2
// the prerequisites.
//
auto parse_headers = [&trace, this,
- &next, &s, &lt] (const pkgconf& pc,
+ &next, &s, &lt] (const pkgconfig& pc,
const target_type& tt,
const char* lang,
prerequisites& ps)
{
- string var (string (lang) + "_importable_headers");
- string val (pc.variable (var));
+ string var (string (lang) + ".importable_headers");
+ optional<string> val (pc.variable (var));
+
+ if (!val)
+ return;
string h;
- for (size_t b (0), e (0); !(h = next (val, b, e)).empty (); )
+ for (size_t b (0), e (0); !(h = next (*val, b, e)).empty (); )
{
path hp (move (h));
+
+ // Must be absolute but may not be normalized due to a relocatable
+ // .pc file. We assume there are no symlink shenanigans that would
+ // require realize().
+ //
+ if (!hp.normalized ())
+ hp.normalize ();
+
path hf (hp.leaf ());
auto tl (
@@ -1309,19 +1144,10 @@ namespace build2
}
};
- // For now we only populate prerequisites for lib{}. To do it for
- // liba{} would require weeding out duplicates that are already in
- // lib{}.
+ // Load the information from the pkg-config files.
//
- // Currently, this information is only used by the modules machinery to
- // resolve module names to module files (but we cannot only do this if
- // modules are enabled since the same installed library can be used by
- // multiple builds).
- //
- prerequisites prs;
-
- pkgconf apc;
- pkgconf spc;
+ pkgconfig apc;
+ pkgconfig spc;
// Create the .pc files search directory list.
//
@@ -1329,9 +1155,16 @@ namespace build2
// Note that we rely on the "small function object" optimization here.
//
- auto add_pc_dir = [&pc_dirs] (dir_path&& d) -> bool
+ auto add_pc_dir = [&trace, &pc_dirs] (dir_path&& d) -> bool
{
- pc_dirs.emplace_back (move (d));
+ // Suppress duplicated.
+ //
+ if (find (pc_dirs.begin (), pc_dirs.end (), d) == pc_dirs.end ())
+ {
+ l6 ([&]{trace << "search path " << d;});
+ pc_dirs.emplace_back (move (d));
+ }
+
return false;
};
@@ -1341,18 +1174,115 @@ namespace build2
bool pa (at != nullptr && !ap.empty ());
if (pa || sp.empty ())
- apc = pkgconf (ap, pc_dirs, sys_lib_dirs, sys_hdr_dirs);
+ apc = pkgconfig (ap, pc_dirs, sys_lib_dirs, sys_hdr_dirs);
bool ps (st != nullptr && !sp.empty ());
if (ps || ap.empty ())
- spc = pkgconf (sp, pc_dirs, sys_lib_dirs, sys_hdr_dirs);
+ spc = pkgconfig (sp, pc_dirs, sys_lib_dirs, sys_hdr_dirs);
+
+ // Load the user metadata if we are in the load phase. Otherwise just
+ // determine if we have metadata.
+ //
+ // Note also that we are not failing here if the metadata was requested
+ // but not present (potentially only partially) letting the caller
+ // (i.e., the import machinery) verify that the export.metadata was set
+ // on the target being imported. This would also allow supporting
+ // optional metadata.
+ //
+ bool apc_meta (false);
+ bool spc_meta (false);
+ if (!act)
+ {
+ // We can only do it during the load phase.
+ //
+ assert (lt.ctx.phase == run_phase::load);
+
+ pkgconfig& ipc (ps ? spc : apc); // As below.
+
+ // Since it's not easy to say if things are the same, we load a copy
+ // into the group and each member, if any.
+ //
+ // @@ TODO: check if already loaded? Don't we have the same problem
+ // below with reloading the rest for lt? What if we passed NULL
+ // in this case (and I suppose another bool in metaonly)?
+ //
+ if (optional<string> md = ipc.variable ("build2.metadata"))
+ parse_metadata (lt, ipc, *md, true);
+
+ if (pa)
+ {
+ if (optional<string> md = apc.variable ("build2.metadata"))
+ {
+ parse_metadata (*at, apc, *md, true);
+ apc_meta = true;
+ }
+ }
+
+ if (ps)
+ {
+ if (optional<string> md = spc.variable ("build2.metadata"))
+ {
+ parse_metadata (*st, spc, *md, true);
+ spc_meta = true;
+ }
+ }
+
+ // If we only need metadata, then we are done.
+ //
+ if (at != nullptr && metaonly.first)
+ {
+ pa = false;
+ at = nullptr;
+ }
+
+ if (st != nullptr && metaonly.second)
+ {
+ ps = false;
+ st = nullptr;
+ }
+
+ if (at == nullptr && st == nullptr)
+ return;
+ }
+ else
+ {
+ if (pa)
+ {
+ if (optional<string> md = apc.variable ("build2.metadata"))
+ {
+ parse_metadata (*at, apc, *md, false);
+ apc_meta = true;
+ }
+ }
+
+ if (ps)
+ {
+ if (optional<string> md = spc.variable ("build2.metadata"))
+ {
+ parse_metadata (*st, spc, *md, false);
+ spc_meta = true;
+ }
+ }
+ }
// Sort out the interface dependencies (which we are setting on lib{}).
// If we have the shared .pc variant, then we use that. Otherwise --
// static but extract without the --static option (see also the saving
// logic).
//
- pkgconf& ipc (ps ? spc : apc); // Interface package info.
+ pkgconfig& ipc (ps ? spc : apc); // Interface package info.
+ bool ipc_meta (ps ? spc_meta : apc_meta);
+
+ // For now we only populate prerequisites for lib{}. To do it for
+ // liba{} would require weeding out duplicates that are already in
+ // lib{}.
+ //
+ // Currently, this information is only used by the modules machinery to
+ // resolve module names to module files (but we cannot only do this if
+ // modules are enabled since the same installed library can be used by
+ // multiple builds).
+ //
+ prerequisites prs;
parse_libs (
lt,
@@ -1370,12 +1300,30 @@ namespace build2
if (ps)
parse_cflags (*st, spc, false);
+ // @@ TODO: we can now load cc.type if there is metadata (but need to
+ // return this rather than set, see search_library() for
+ // details).
+
+ // Load the bin.whole flag (whole archive).
+ //
+ if (at != nullptr && (pa ? apc_meta : spc_meta))
+ {
+ // Note that if unspecified we leave it unset letting the consumer
+ // override it, if necessary (see the bin.lib lookup semantics for
+ // details).
+ //
+ if (optional<string> v = (pa ? apc : spc).variable ("bin.whole"))
+ {
+ at->vars.assign ("bin.whole") = (*v == "true");
+ }
+ }
+
// For now we assume static and shared variants export the same set of
// modules/importable headers. While technically possible, having
// different sets will most likely lead to all sorts of complications
// (at least for installed libraries) and life is short.
//
- if (modules)
+ if (modules && ipc_meta)
{
parse_modules (ipc, prs);
@@ -1403,7 +1351,7 @@ namespace build2
}
bool common::
- pkgconfig_load (action,
+ pkgconfig_load (optional<action>,
const scope&,
lib&,
liba*,
@@ -1412,13 +1360,14 @@ namespace build2
const string&,
const dir_path&,
const dir_paths&,
- const dir_paths&) const
+ const dir_paths&,
+ pair<bool, bool>) const
{
return false;
}
void common::
- pkgconfig_load (action,
+ pkgconfig_load (optional<action>,
const scope&,
lib&,
liba*,
@@ -1426,7 +1375,8 @@ namespace build2
const pair<path, path>&,
const dir_path&,
const dir_paths&,
- const dir_paths&) const
+ const dir_paths&,
+ pair<bool, bool>) const
{
assert (false); // Should never be called.
}
@@ -1440,6 +1390,11 @@ namespace build2
// file must be generated based on the static library to get accurate
// Libs.private.
//
+ // The other things that we omit from the common variant are -l options
+ // for binless libraries (so that it's usable from other build systems) as
+ // well as metadata (which could become incomplete due the previous
+ // omissions; for example, importable headers metadata).
+ //
void link_rule::
pkgconfig_save (action a,
const file& l,
@@ -1459,41 +1414,147 @@ namespace build2
/* */ pcs::static_type)));
assert (t != nullptr);
+ const path& p (t->path ());
+
+ // If we are uninstalling, skip regenerating the file if it already
+ // exists (I think we could have skipped this even if it doesn't exist,
+ // but let's keep things close to the install case).
+ //
+ if (ctx.current_action ().outer_operation () == uninstall_id)
+ {
+ if (exists (p))
+ return;
+ }
+
// This is the lib{} group if we are generating the common file and the
// target itself otherwise.
//
- const file& g (common ? l.group->as<file> () : l);
+ const target& g (common ? *l.group : l);
// By default we assume things go into install.{include, lib}.
//
+ // If include.lib does not resolve, then assume this is update-for-
+ // install without actual install and remove the file if it exists.
+ //
+ // @@ Shouldn't we use target's install value rather than install.lib
+ // in case it gets installed into a custom location? I suppose one
+ // can now use cc.pkgconfig.lib to customize this.
+ //
using install::resolve_dir;
- dir_path idir (resolve_dir (g, cast<dir_path> (g["install.include"])));
- dir_path ldir (resolve_dir (g, cast<dir_path> (g["install.lib"])));
+ small_vector<dir_path, 1> ldirs;
- const path& p (t->path ());
+ if (const dir_paths* ds = cast_null<dir_paths> (g[c_pkgconfig_lib]))
+ {
+ for (const dir_path& d: *ds)
+ {
+ bool f (ldirs.empty ());
- // If we are uninstalling, skip regenerating the file if it already
- // exists (I think we could have skipped this even if it doesn't exist,
- // but let's keep things close to the install case).
- //
- if (ctx.current_action ().outer_operation () == uninstall_id)
+ ldirs.push_back (resolve_dir (g, d, {}, !f /* fail_unknown */));
+
+ if (f && ldirs.back ().empty ())
+ break;
+ }
+ }
+ else
+ ldirs.push_back (resolve_dir (g,
+ cast<dir_path> (g["install.lib"]),
+ {},
+ false /* fail_unknown */));
+
+ if (!ldirs.empty () && ldirs.front ().empty ())
{
- if (exists (p))
- return;
+ rmfile (ctx, p, 3 /* verbosity */);
+ return;
}
+ small_vector<dir_path, 1> idirs;
+
+ if (const dir_paths* ds = cast_null<dir_paths> (g[c_pkgconfig_include]))
+ {
+ for (const dir_path& d: *ds)
+ idirs.push_back (resolve_dir (g, d));
+ }
+ else
+ idirs.push_back (resolve_dir (g,
+ cast<dir_path> (g["install.include"])));
+
// Note that generation can take some time if we have a large number of
// prerequisite libraries.
//
- if (verb)
- text << "pc " << *t;
- else if (verb >= 2)
+ if (verb >= 2)
text << "cat >" << p;
+ else if (verb)
+ print_diag ("pc", g, *t);
if (ctx.dry_run)
return;
+ // See if we should be generating a relocatable .pc file and if so get
+ // its installation location. The plan is to make all absolute paths
+ // that we write relative to this location and prefix them with the
+ // built-in ${pcfiledir} variable (which supported by everybody: the
+ // original pkg-config, pkgconf, and our libpkg-config library).
+ //
+ dir_path rel_base;
+ if (cast_false<bool> (rs["install.relocatable"]))
+ {
+ path f (install::resolve_file (*t));
+ if (!f.empty ()) // Shouldn't happen but who knows.
+ rel_base = f.directory ();
+ }
+
+ // Note: reloc_*path() expect absolute and normalized paths.
+ //
+ // Note also that reloc_path() can be used on dir_path to get the path
+ // without the trailing slash.
+ //
+ auto reloc_path = [&rel_base,
+ s = string ()] (const path& p,
+ const char* what) mutable
+ -> const string&
+ {
+ if (rel_base.empty ())
+ return p.string ();
+
+ try
+ {
+ s = p.relative (rel_base).string ();
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make " << what << " path " << p << " relative to "
+ << rel_base;
+ }
+
+ if (!s.empty ()) s.insert (0, 1, path_traits::directory_separator);
+ s.insert (0, "${pcfiledir}");
+ return s;
+ };
+
+ auto reloc_dir_path = [&rel_base,
+ s = string ()] (const dir_path& p,
+ const char* what) mutable
+ -> const string&
+ {
+ if (rel_base.empty ())
+ return (s = p.representation ());
+
+ try
+ {
+ s = p.relative (rel_base).representation ();
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make " << what << " path " << p << " relative to "
+ << rel_base;
+ }
+
+ if (!s.empty ()) s.insert (0, 1, path_traits::directory_separator);
+ s.insert (0, "${pcfiledir}");
+ return s;
+ };
+
auto_rmfile arm (p);
try
@@ -1511,6 +1572,20 @@ namespace build2
fail << "no version variable in project " << n <<
info << "while generating " << p;
+ // When comparing versions, pkg-config uses RPM semantics, which is
+ // basically comparing each all-digit/alpha fragments in order.
+ // This means, for example, a semver with a pre-release will be
+ // compared incorrectly (pre-release will be greater than the final
+ // version). We could detect if this project uses stdver and chop
+ // off any pre-release information (so, essentially only saving the
+ // major.minor.patch part). But that means such .pc files will
+ // contain inaccurate version information. And seeing that we don't
+ // recommend using pkg-config (rather primitive) package dependency
+ // support, having complete version information for documentation
+ // seems more important.
+ //
+ // @@ Maybe still makes sense to only save version.project_id?
+ //
const string& v (cast<string> (vl));
os << "Name: " << n << endl;
@@ -1627,13 +1702,11 @@ namespace build2
return n;
};
- // @@ TODO: support whole archive?
- //
-
// Cflags.
//
os << "Cflags:";
- os << " -I" << escape (idir.string ());
+ for (const dir_path& d: idirs)
+ os << " -I" << escape (reloc_path (d, "header search"));
save_poptions (x_export_poptions);
save_poptions (c_export_poptions);
os << endl;
@@ -1652,7 +1725,8 @@ namespace build2
// While we don't need it for a binless library itselt, it may be
// necessary to resolve its binful dependencies.
//
- os << " -L" << escape (ldir.string ());
+ for (const dir_path& d: ldirs)
+ os << " -L" << escape (reloc_path (d, "library search"));
// Now process ourselves as if we were being linked to something (so
// pretty similar to link_rule::append_libraries()). We also reuse
@@ -1668,7 +1742,8 @@ namespace build2
appended_libraries* pls; // Previous.
appended_libraries* ls; // Current.
strings& args;
- } d {os, nullptr, &ls, args};
+ bool common;
+ } d {os, nullptr, &ls, args, common};
auto imp = [&priv] (const target&, bool la) {return priv && la;};
@@ -1712,7 +1787,17 @@ namespace build2
if (l != nullptr)
{
if (l->is_a<libs> () || l->is_a<liba> ()) // See through libux.
- d.args.push_back (save_library_target (*l));
+ {
+ // Omit binless libraries from the common .pc file (see
+ // above).
+ //
+ // Note that in this case we still want to recursively
+ // traverse such libraries since they may still link to some
+ // non-binless system libraries (-lm, etc).
+ //
+ if (!d.common || !l->path ().empty ())
+ d.args.push_back (save_library_target (*l));
+ }
}
else
{
@@ -1734,7 +1819,7 @@ namespace build2
//@@ TODO: should we filter -L similar to -I?
//@@ TODO: how will the Libs/Libs.private work?
- //@@ TODO: remember to use escape()
+ //@@ TODO: remember to use reloc_*() and escape().
if (d.pls != nullptr && d.pls->find (l) != nullptr)
return true;
@@ -1755,7 +1840,10 @@ namespace build2
library_cache lib_cache;
process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0, // Link flags.
- imp, lib, opt, !binless /* self */, &lib_cache);
+ imp, lib, opt,
+ !binless /* self */,
+ false /* proc_opt_group */, // @@ !priv?
+ &lib_cache);
for (const string& a: args)
os << ' ' << a;
@@ -1777,11 +1865,325 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0, // Link flags.
- imp, lib, opt, false /* self */, &lib_cache);
+ imp, lib, opt,
+ false /* self */,
+ false /* proc_opt_group */, // @@ !priv?
+ &lib_cache);
for (const string& a: args)
os << ' ' << a;
os << endl;
+
+ // See also bin.whole below.
+ }
+ }
+
+ // Save metadata unless this is the common .pc file (see above).
+ //
+ if (common)
+ {
+ os.close ();
+ arm.cancel ();
+ return;
+ }
+
+ // The build2.metadata variable is a general indication of the
+ // metadata being present. Its value is the metadata version
+ // optionally followed by the user metadata variable prefix and
+ // variable list (see below for details). Having only the version
+ // indicates the absense of user metadata.
+ //
+ // See if we have the user metadata.
+ //
+ lookup um (g[ctx.var_export_metadata]); // Target visibility.
+
+ if (um && !um->empty ())
+ {
+ const names& ns (cast<names> (um));
+
+ // First verify the version.
+ //
+ uint64_t ver;
+ try
+ {
+ // Note: does not change the passed name.
+ //
+ ver = value_traits<uint64_t>::convert (
+ ns[0], ns[0].pair ? &ns[1] : nullptr);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "invalid metadata version in library " << g << ": " << e;
+ }
+
+ if (ver != 1)
+ fail << "unexpected metadata version " << ver << " in library "
+ << g;
+
+ // Next verify the metadata variable prefix.
+ //
+ if (ns.size () != 2 || !ns[1].simple ())
+ fail << "invalid metadata variable prefix in library " << g;
+
+ const string& pfx (ns[1].value);
+
+ // Now find all the target-specific variables with this prefix.
+ //
+ // If this is the common .pc file, then we only look in the group.
+ // Otherwise, in the member and the group.
+ //
+ // To allow setting different values for the for-install and
+ // development build cases (required when a library comes with
+ // additional "assets"), we recognize the special .for_install
+ // variable name suffix: if there is a both <prefix>.<name> and
+ // <prefix>.<name>.for_install variables, then here we take the
+ // value from the latter. Note that we don't consider just
+ // <prefix>.for_install as special (so it's available to the user).
+ //
+ // We only expect a handful of variables so let's use a vector and
+ // linear search instead of a map.
+ //
+ struct binding
+ {
+ const string* name; // Name to be saved (without .for_install).
+ const variable* var; // Actual variable (potentially .for_install).
+ const value* val; // Actual value.
+ };
+ vector<binding> vars;
+
+ auto append = [&l, &pfx, &vars,
+ tmp = string ()] (const target& t, bool dup) mutable
+ {
+ for (auto p (t.vars.lookup_namespace (pfx));
+ p.first != p.second;
+ ++p.first)
+ {
+ const variable* var (&p.first->first.get ());
+
+ // Handle .for_install.
+ //
+ // The plan is as follows: if this is .for_install, then just
+ // verify we also have the value without the suffix and skip
+ // it. Otherwise, check if there also the .for_install variant
+ // and if so, use that instead. While we could probably do this
+ // more efficiently by remembering what we saw in vars, this is
+ // not performance-sensitive and so we keep it simple for now.
+ //
+ const string* name;
+ {
+ const string& v (var->name);
+ size_t n (v.size ());
+
+ if (n > pfx.size () + 1 + 12 && // <prefix>..for_install
+ v.compare (n - 12, 12, ".for_install") == 0)
+ {
+ tmp.assign (v, 0, n - 12);
+
+ if (t.vars.find (tmp) == t.vars.end ())
+ fail << v << " variant without " << tmp << " in library "
+ << l;
+
+ continue;
+ }
+ else
+ {
+ name = &v;
+
+ tmp = v; tmp += ".for_install";
+
+ auto i (t.vars.find (tmp));
+ if (i != t.vars.end ())
+ var = &i->first.get ();
+ }
+ }
+
+ if (dup)
+ {
+ if (find_if (vars.begin (), vars.end (),
+ [name] (const binding& p)
+ {
+ return *p.name == *name;
+ }) != vars.end ())
+ continue;
+ }
+
+ // Re-lookup the value in order to apply target type/pattern
+ // specific prepends/appends.
+ //
+ lookup l (t[*var]);
+ assert (l.defined ());
+
+ vars.push_back (binding {name, var, l.value});
+ }
+ };
+
+ append (g, false);
+
+ if (!common)
+ {
+ if (l.group != nullptr)
+ append (*l.group, true);
+ }
+
+ // First write the build2.metadata variable with the version,
+ // prefix, and all the variable names/types (which should not
+ // require any escaping).
+ //
+ os << endl
+ << "build2.metadata = " << ver << ' ' << pfx;
+
+ for (const binding& b: vars)
+ {
+ const variable& var (*b.var);
+ const value& val (*b.val);
+
+ // There is no notion of NULL in pkg-config variables and it's
+ // probably best not to conflate them with empty.
+ //
+ if (val.null)
+ fail << "null value in exported variable " << var
+ << " of library " << l;
+
+ if (val.type == nullptr)
+ fail << "untyped value in exported variable " << var
+ << " of library " << l;
+
+ // Tighten this to only a sensible subset of types (see
+ // parsing/serialization code for some of the potential problems).
+ //
+ if (!metadata_type (val.type->name).first)
+ fail << "unsupported value type " << val.type->name
+ << " in exported variable " << var << " of library " << l;
+
+ os << " \\" << endl
+ << *b.name << '/' << val.type->name;
+ }
+
+ os << endl
+ << endl;
+
+ // Now the variables themselves.
+ //
+ string s; // Reuse the buffer.
+ for (const binding& b: vars)
+ {
+ const variable& var (*b.var);
+ const value& val (*b.val);
+
+ names ns;
+ names_view nv (reverse (val, ns, true /* reduce */));
+
+ os << *b.name << " =";
+
+ auto append = [&rel_base,
+ &reloc_path,
+ &reloc_dir_path,
+ &l, &var, &val, &s] (const name& v)
+ {
+ // If this is absolute path or dir_path, then attempt to
+ // relocate. Without that the result will not be relocatable.
+ //
+ if (v.simple ())
+ {
+ path p;
+ if (!rel_base.empty () &&
+ val.type != nullptr &&
+ (val.type->is_a<path> () || val.type->is_a<paths> ()) &&
+ (p = path (v.value)).absolute ())
+ {
+ p.normalize ();
+ s += reloc_path (p, var.name.c_str ());
+ }
+ else
+ s += v.value;
+ }
+ else if (v.directory ())
+ {
+ if (!rel_base.empty () && v.dir.absolute ())
+ {
+ dir_path p (v.dir);
+ p.normalize ();
+ s += reloc_dir_path (p, var.name.c_str ());
+ }
+ else
+ s += v.dir.representation ();
+ }
+ else
+ // It seems like we shouldn't end up here due to the type
+ // check but let's keep it for good measure.
+ //
+ fail << "simple or directory value expected instead of '"
+ << v << "' in exported variable " << var << " of library "
+ << l;
+ };
+
+ for (auto i (nv.begin ()); i != nv.end (); ++i)
+ {
+ s.clear ();
+ append (*i);
+
+ if (i->pair)
+ {
+ // @@ What if the value contains the pair character? Maybe
+ // quote the halves in this case? Note: need to handle in
+ // parse_metadata() above if enable here. Note: none of the
+ // types currently allowed use pairs.
+#if 0
+ s += i->pair;
+ append (*++i);
+#else
+ fail << "pair in exported variable " << var << " of library "
+ << l;
+#endif
+ }
+
+ os << ' ' << escape (s);
+ }
+
+ os << endl;
+ }
+ }
+ else
+ {
+ // No user metadata.
+ //
+ os << endl
+ << "build2.metadata = 1" << endl;
+ }
+
+ // Save cc.type (see init() for the format documentation).
+ //
+ // Note that this value is set by link_rule and therefore should
+ // be there.
+ //
+ {
+ const string& t (
+ cast<string> (
+ l.state[a].lookup_original (
+ c_type, true /* target_only */).first));
+
+ // If common, then only save the language (the rest could be
+ // static/shared-specific; strictly speaking even the language could
+ // be, but that seems far fetched).
+ //
+ os << endl
+ << "cc.type = " << (common ? string (t, 0, t.find (',')) : t)
+ << endl;
+ }
+
+ // Save the bin.whole (whole archive) flag (see the link rule for
+ // details on the lookup semantics).
+ //
+ if (la)
+ {
+ // Note: go straight for the public variable pool.
+ //
+ if (cast_false<bool> (l.lookup_original (
+ ctx.var_pool["bin.whole"],
+ true /* target_only */).first))
+ {
+ os << endl
+ << "bin.whole = true" << endl;
}
}
@@ -1838,7 +2240,7 @@ namespace build2
const target* mt (nullptr);
for (const target* t: pt->prerequisite_targets[a])
{
- if ((mt = t->is_a (*x_mod)))
+ if (t != nullptr && (mt = t->is_a (*x_mod)))
break;
}
@@ -1888,7 +2290,7 @@ namespace build2
if (size_t n = mods.size ())
{
os << endl
- << "cxx_modules =";
+ << "cxx.modules =";
// The partition separator (`:`) is not a valid character in the
// variable name. In fact, from the pkg-config source we can see
@@ -1906,33 +2308,35 @@ namespace build2
// Module names shouldn't require escaping.
//
os << (n != 1 ? " \\\n" : " ")
- << m.name << '=' << escape (m.file.string ());
+ << m.name << '='
+ << escape (reloc_path (m.file, "module interface"));
}
os << endl;
// Module-specific properties. The format is:
//
- // <lang>_module_<property>.<module> = <value>
+ // <lang>.module_<property>.<module> = <value>
//
for (const module& m: mods)
{
if (!m.preprocessed.empty ())
- os << "cxx_module_preprocessed." << m.name << " = "
+ os << "cxx.module_preprocessed." << m.name << " = "
<< m.preprocessed << endl;
if (m.symexport)
- os << "cxx_module_symexport." << m.name << " = true" << endl;
+ os << "cxx.module_symexport." << m.name << " = true" << endl;
}
}
if (size_t n = c_hdrs.size ())
{
os << endl
- << "c_importable_headers =";
+ << "c.importable_headers =";
for (const path& h: c_hdrs)
- os << (n != 1 ? " \\\n" : " ") << escape (h.string ());
+ os << (n != 1 ? " \\\n" : " ")
+ << escape (reloc_path (h, "header unit"));
os << endl;
}
@@ -1940,10 +2344,11 @@ namespace build2
if (size_t n = x_hdrs.size ())
{
os << endl
- << x << "_importable_headers =";
+ << x << ".importable_headers =";
for (const path& h: x_hdrs)
- os << (n != 1 ? " \\\n" : " ") << escape (h.string ());
+ os << (n != 1 ? " \\\n" : " ")
+ << escape (reloc_path (h, "header unit"));
os << endl;
}
diff --git a/libbuild2/cc/pkgconfig.hxx b/libbuild2/cc/pkgconfig.hxx
new file mode 100644
index 0000000..a1bcdee
--- /dev/null
+++ b/libbuild2/cc/pkgconfig.hxx
@@ -0,0 +1,129 @@
+// file : libbuild2/cc/pkgconfig.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CC_PKGCONFIG_HXX
+#define LIBBUILD2_CC_PKGCONFIG_HXX
+
+// In order not to complicate the bootstrap procedure with libpkg-config
+// building, exclude functionality that involves reading of .pc files.
+//
+#ifndef BUILD2_BOOTSTRAP
+
+#ifndef BUILD2_LIBPKGCONF
+# include <libpkg-config/pkg-config.h>
+#else
+# include <libpkgconf/libpkgconf.h>
+#endif
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+namespace build2
+{
+ namespace cc
+ {
+ // Load package information from a .pc file. Filter out the -I/-L options
+ // that refer to system directories. This makes sure all the system search
+ // directories are "pushed" to the back which minimizes the chances of
+ // picking up wrong (e.g., old installed version) header/library.
+ //
+ // Note that the prerequisite package .pc files search order is as
+ // follows:
+ //
+ // - in the directory of the specified file
+ // - in pc_dirs directories (in the specified order)
+ //
+ // Issue diagnostics and throw failed on any errors.
+ //
+ class pkgconfig
+ {
+ public:
+ using path_type = build2::path;
+
+ path_type path;
+
+ public:
+ pkgconfig (path_type,
+ const dir_paths& pc_dirs,
+ const dir_paths& sys_hdr_dirs,
+ const dir_paths& sys_lib_dirs);
+
+ // Create an unloaded/empty object. Querying package information on such
+ // an object is illegal.
+ //
+ pkgconfig () = default;
+ ~pkgconfig ();
+
+ // Movable-only type.
+ //
+ pkgconfig (pkgconfig&&) noexcept;
+ pkgconfig& operator= (pkgconfig&&) noexcept;
+
+ pkgconfig (const pkgconfig&) = delete;
+ pkgconfig& operator= (const pkgconfig&) = delete;
+
+ strings
+ cflags (bool static_) const;
+
+ strings
+ libs (bool static_) const;
+
+ optional<string>
+ variable (const char*) const;
+
+ optional<string>
+ variable (const string& s) const {return variable (s.c_str ());}
+
+ private:
+ void
+ free ();
+
+#ifndef BUILD2_LIBPKGCONF
+ pkg_config_client_t* client_ = nullptr;
+ pkg_config_pkg_t* pkg_ = nullptr;
+#else
+ pkgconf_client_t* client_ = nullptr;
+ pkgconf_pkg_t* pkg_ = nullptr;
+#endif
+ };
+
+ inline pkgconfig::
+ ~pkgconfig ()
+ {
+ if (client_ != nullptr) // Not empty.
+ free ();
+ }
+
+ inline pkgconfig::
+ pkgconfig (pkgconfig&& p) noexcept
+ : path (move (p.path)),
+ client_ (p.client_),
+ pkg_ (p.pkg_)
+ {
+ p.client_ = nullptr;
+ p.pkg_ = nullptr;
+ }
+
+ inline pkgconfig& pkgconfig::
+ operator= (pkgconfig&& p) noexcept
+ {
+ if (this != &p)
+ {
+ if (client_ != nullptr) // Not empty.
+ free ();
+
+ path = move (p.path);
+ client_ = p.client_;
+ pkg_ = p.pkg_;
+
+ p.client_ = nullptr;
+ p.pkg_ = nullptr;
+ }
+ return *this;
+ }
+ }
+}
+
+#endif // BUILD2_BOOTSTRAP
+
+#endif // LIBBUILD2_CC_PKGCONFIG_HXX
diff --git a/libbuild2/cc/target.cxx b/libbuild2/cc/target.cxx
index b17e1ef..6c5d7c8 100644
--- a/libbuild2/cc/target.cxx
+++ b/libbuild2/cc/target.cxx
@@ -21,11 +21,10 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
extern const char h_ext_def[] = "h";
-
const target_type h::static_type
{
"h",
@@ -36,11 +35,10 @@ namespace build2
&target_pattern_var<h_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
extern const char c_ext_def[] = "c";
-
const target_type c::static_type
{
"c",
@@ -51,11 +49,38 @@ namespace build2
&target_pattern_var<c_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
- extern const char pc_ext[] = "pc"; // VC14 rejects constexpr.
+ extern const char m_ext_def[] = "m";
+ const target_type m::static_type
+ {
+ "m",
+ &cc::static_type,
+ &target_factory<m>,
+ nullptr, /* fixed_extension */
+ &target_extension_var<m_ext_def>,
+ &target_pattern_var<m_ext_def>,
+ nullptr,
+ &file_search,
+ target_type::flag::none
+ };
+ extern const char S_ext_def[] = "S";
+ const target_type S::static_type
+ {
+ "S",
+ &cc::static_type,
+ &target_factory<S>,
+ nullptr, /* fixed_extension */
+ &target_extension_var<S_ext_def>,
+ &target_pattern_var<S_ext_def>,
+ nullptr,
+ &file_search,
+ target_type::flag::none
+ };
+
+ extern const char pc_ext[] = "pc"; // VC14 rejects constexpr.
const target_type pc::static_type
{
"pc",
@@ -66,11 +91,10 @@ namespace build2
&target_pattern_fix<pc_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
&file_search,
- false
+ target_type::flag::none
};
extern const char pca_ext[] = "static.pc"; // VC14 rejects constexpr.
-
const target_type pca::static_type
{
"pca",
@@ -81,11 +105,10 @@ namespace build2
&target_pattern_fix<pca_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
&file_search,
- false
+ target_type::flag::none
};
extern const char pcs_ext[] = "shared.pc"; // VC14 rejects constexpr.
-
const target_type pcs::static_type
{
"pcs",
@@ -96,7 +119,7 @@ namespace build2
&target_pattern_fix<pcs_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
&file_search,
- false
+ target_type::flag::none
};
}
}
diff --git a/libbuild2/cc/target.hxx b/libbuild2/cc/target.hxx
index 7067421..a078422 100644
--- a/libbuild2/cc/target.hxx
+++ b/libbuild2/cc/target.hxx
@@ -23,11 +23,14 @@ namespace build2
class LIBBUILD2_CC_SYMEXPORT cc: public file
{
public:
- using file::file;
+ cc (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const = 0;
};
// There is hardly a c-family compilation without a C header inclusion.
@@ -36,11 +39,14 @@ namespace build2
class LIBBUILD2_CC_SYMEXPORT h: public cc
{
public:
- using cc::cc;
+ h (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// This one we define in cc but the target type is only registered by the
@@ -52,11 +58,46 @@ namespace build2
class LIBBUILD2_CC_SYMEXPORT c: public cc
{
public:
- using cc::cc;
+ c (context& ctx, dir_path d, dir_path o, string n)
+ : cc (ctx, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // Objective-C source file (the same rationale for having it here as for
+ // c{} above).
+ //
+ class LIBBUILD2_CC_SYMEXPORT m: public cc
+ {
+ public:
+ m (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // Assembler with C preprocessor source file (the same rationale for
+ // having it here as for c{} above).
+ //
+ class LIBBUILD2_CC_SYMEXPORT S: public cc
+ {
+ public:
+ S (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// pkg-config file targets.
@@ -64,31 +105,40 @@ namespace build2
class LIBBUILD2_CC_SYMEXPORT pc: public file // .pc (common)
{
public:
- using file::file;
+ pc (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_CC_SYMEXPORT pca: public pc // .static.pc
{
public:
- using pc::pc;
+ pca (context& c, dir_path d, dir_path o, string n)
+ : pc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_CC_SYMEXPORT pcs: public pc // .shared.pc
{
public:
- using pc::pc;
+ pcs (context& c, dir_path d, dir_path o, string n)
+ : pc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
}
}
diff --git a/libbuild2/cc/types.cxx b/libbuild2/cc/types.cxx
index 8ee4fa9..c6cfae9 100644
--- a/libbuild2/cc/types.cxx
+++ b/libbuild2/cc/types.cxx
@@ -6,6 +6,7 @@
#include <libbuild2/cc/utility.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
@@ -123,6 +124,8 @@ namespace build2
size_t importable_headers::
insert_angle_pattern (const dir_paths& sys_hdr_dirs, const string& pat)
{
+ tracer trace ("importable_headers::insert_angle_pattern");
+
assert (pat.front () == '<' && pat.back () == '>' && path_pattern (pat));
// First see if it has already been inserted.
@@ -172,7 +175,17 @@ namespace build2
try
{
- path_search (f, process, dir);
+ path_search (
+ f,
+ process,
+ dir,
+ path_match_flags::follow_symlinks,
+ [&trace] (const dir_entry& de)
+ {
+ l5 ([&]{trace << "skipping inaccessible/dangling entry "
+ << de.base () / de.path ();});
+ return true;
+ });
}
catch (const system_error& e)
{
diff --git a/libbuild2/cc/types.hxx b/libbuild2/cc/types.hxx
index c5b35f5..93f31bc 100644
--- a/libbuild2/cc/types.hxx
+++ b/libbuild2/cc/types.hxx
@@ -175,6 +175,10 @@ namespace build2
const target_type& bmi;
const target_type& hbmi;
};
+
+ // "Unhide" operator<< from the build2 namespace.
+ //
+ using build2::operator<<;
}
}
diff --git a/libbuild2/cc/utility.cxx b/libbuild2/cc/utility.cxx
index ffe3e03..e02f85a 100644
--- a/libbuild2/cc/utility.cxx
+++ b/libbuild2/cc/utility.cxx
@@ -3,10 +3,6 @@
#include <libbuild2/cc/utility.hxx>
-#include <libbuild2/file.hxx>
-
-using namespace std;
-
namespace build2
{
namespace cc
@@ -17,58 +13,5 @@ namespace build2
const dir_path module_build_dir (dir_path (module_dir) /= "build");
const dir_path module_build_modules_dir (
dir_path (module_build_dir) /= "modules");
-
- void
- normalize_header (path& f)
- {
- // Interestingly, on most paltforms and with most compilers (Clang on
- // Linux being a notable exception) most system/compiler headers are
- // already normalized.
- //
- path_abnormality a (f.abnormalities ());
- if (a != path_abnormality::none)
- {
- // While we can reasonably expect this path to exit, things do go
- // south from time to time (like compiling under wine with file
- // wlantypes.h included as WlanTypes.h).
- //
- try
- {
- // If we have any parent components, then we have to verify the
- // normalized path matches realized.
- //
- path r;
- if ((a & path_abnormality::parent) == path_abnormality::parent)
- {
- r = f;
- r.realize ();
- }
-
- try
- {
- f.normalize ();
-
- // Note that we might still need to resolve symlinks in the
- // normalized path.
- //
- if (!r.empty () && f != r && path (f).realize () != r)
- f = move (r);
- }
- catch (const invalid_path&)
- {
- assert (!r.empty ()); // Shouldn't have failed if no `..`.
- f = move (r); // Fallback to realize.
- }
- }
- catch (const invalid_path&)
- {
- fail << "invalid header path '" << f.string () << "'";
- }
- catch (const system_error& e)
- {
- fail << "invalid header path '" << f.string () << "': " << e;
- }
- }
- }
}
}
diff --git a/libbuild2/cc/utility.hxx b/libbuild2/cc/utility.hxx
index 42e53e3..6ba4a20 100644
--- a/libbuild2/cc/utility.hxx
+++ b/libbuild2/cc/utility.hxx
@@ -9,6 +9,7 @@
#include <libbuild2/utility.hxx>
#include <libbuild2/target.hxx>
+#include <libbuild2/filesystem.hxx>
#include <libbuild2/bin/target.hxx>
#include <libbuild2/bin/utility.hxx>
@@ -51,29 +52,11 @@ namespace build2
// Normalize an absolute path to an existing header.
//
- // We used to just normalize the path but that could result in an invalid
- // path (e.g., for some system/compiler headers on CentOS 7 with Clang
- // 3.4) because of the symlinks (if a directory component is a symlink,
- // then any following `..` are resolved relative to the target; see
- // path::normalize() for background).
- //
- // Initially, to fix this, we realized (i.e., realpath(3)) it instead.
- // But that turned out also not to be quite right since now we have all
- // the symlinks resolved: conceptually it feels correct to keep the
- // original header names since that's how the user chose to arrange things
- // and practically this is how the compilers see/report them (e.g., the
- // GCC module mapper).
- //
- // So now we have a pretty elaborate scheme where we try to use the
- // normalized path if possible and fallback to realized. Normalized paths
- // will work for situations where `..` does not cross symlink boundaries,
- // which is the sane case. And for the insane case we only really care
- // about out-of-project files (i.e., system/compiler headers). In other
- // words, if you have the insane case inside your project, then you are on
- // your own.
- //
- void
- normalize_header (path&);
+ inline void
+ normalize_header (path& f)
+ {
+ normalize_external (f, "header");
+ }
}
}
diff --git a/libbuild2/cc/windows-rpath.cxx b/libbuild2/cc/windows-rpath.cxx
index 2d90ace..bd5a928 100644
--- a/libbuild2/cc/windows-rpath.cxx
+++ b/libbuild2/cc/windows-rpath.cxx
@@ -128,7 +128,9 @@ namespace build2
library_cache lib_cache;
for (const prerequisite_target& pt: t.prerequisite_targets[a])
{
- if (pt.adhoc || pt == nullptr)
+ // Note: during execute so check for ad hoc first to avoid data races.
+ //
+ if (pt.adhoc () || pt == nullptr)
continue;
bool la;
@@ -139,7 +141,9 @@ namespace build2
( f = pt->is_a<libs> ()))
process_libraries (a, bs, li, sys_lib_dirs,
*f, la, pt.data,
- imp, lib, nullptr, true /* self */,
+ imp, lib, nullptr,
+ true /* self */,
+ false /* proc_opt_group */,
&lib_cache);
}
@@ -253,7 +257,9 @@ namespace build2
library_cache lib_cache;
for (const prerequisite_target& pt: t.prerequisite_targets[a])
{
- if (pt.adhoc || pt == nullptr)
+ // Note: during execute so check for ad hoc first to avoid data races.
+ //
+ if (pt.adhoc () || pt == nullptr)
continue;
bool la;
@@ -264,7 +270,9 @@ namespace build2
( f = pt->is_a<libs> ()))
process_libraries (a, bs, li, sys_lib_dirs,
*f, la, pt.data,
- imp, lib, nullptr, true /* self */,
+ imp, lib, nullptr,
+ true /* self */,
+ false /* proc_opt_group */,
&lib_cache);
}
@@ -361,11 +369,16 @@ namespace build2
// of the same amalgamation. This way if the amalgamation is moved
// as a whole, the links will remain valid.
//
+ // Note: mkanylink() is from libbutl and thus doesn't handle the
+ // dry-run mode.
+ //
try
{
- switch (mkanylink (f, l,
- true /* copy */,
- f.sub (as.out_path ()) /* relative */))
+ switch (as.ctx.dry_run
+ ? entry_type::symlink
+ : mkanylink (f, l,
+ true /* copy */,
+ f.sub (as.out_path ()) /* relative */))
{
case entry_type::regular: print ("cp"); break;
case entry_type::symlink: print ("ln -s"); break;
diff --git a/libbuild2/cli/buildfile b/libbuild2/cli/buildfile
new file mode 100644
index 0000000..9b6e4eb
--- /dev/null
+++ b/libbuild2/cli/buildfile
@@ -0,0 +1,71 @@
+# file : libbuild2/cli/buildfile
+# license : MIT; see accompanying LICENSE file
+
+# NOTE: shared imports should go into root.build.
+#
+include ../
+impl_libs = ../lib{build2} # Implied interface dependency.
+
+include ../cxx/
+intf_libs = ../cxx/lib{build2-cxx}
+
+./: lib{build2-cli}: libul{build2-cli}: {hxx ixx txx cxx}{** -**.test...} \
+ $intf_libs $impl_libs
+
+# Unit tests.
+#
+exe{*.test}:
+{
+ test = true
+ install = false
+}
+
+for t: cxx{**.test...}
+{
+ d = $directory($t)
+ n = $name($t)...
+
+ ./: $d/exe{$n}: $t $d/{hxx ixx txx}{+$n} $d/testscript{+$n}
+ $d/exe{$n}: libul{build2-cli}: bin.whole = false
+}
+
+# Build options.
+#
+obja{*}: cxx.poptions += -DLIBBUILD2_CLI_STATIC_BUILD
+objs{*}: cxx.poptions += -DLIBBUILD2_CLI_SHARED_BUILD
+
+# Export options.
+#
+lib{build2-cli}:
+{
+ cxx.export.poptions = "-I$out_root" "-I$src_root"
+ cxx.export.libs = $intf_libs
+}
+
+liba{build2-cli}: cxx.export.poptions += -DLIBBUILD2_CLI_STATIC
+libs{build2-cli}: cxx.export.poptions += -DLIBBUILD2_CLI_SHARED
+
+# For pre-releases use the complete version to make sure they cannot be used
+# in place of another pre-release or the final version. See the version module
+# for details on the version.* variable values.
+#
+# And because this is a build system module, we also embed the same value as
+# the interface version (note that we cannot use build.version.interface for
+# bundled modules because we could be built with a different version of the
+# build system).
+#
+ver = ($version.pre_release \
+ ? "$version.project_id" \
+ : "$version.major.$version.minor")
+
+lib{build2-cli}: bin.lib.version = @"-$ver"
+libs{build2-cli}: bin.lib.load_suffix = "-$ver"
+
+# Install into the libbuild2/cli/ subdirectory of, say, /usr/include/
+# recreating subdirectories.
+#
+{hxx ixx txx}{*}:
+{
+ install = include/libbuild2/cli/
+ install.subdirs = true
+}
diff --git a/libbuild2/cli/export.hxx b/libbuild2/cli/export.hxx
new file mode 100644
index 0000000..67c1eb9
--- /dev/null
+++ b/libbuild2/cli/export.hxx
@@ -0,0 +1,37 @@
+// file : libbuild2/cli/export.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#pragma once
+
+// Normally we don't export class templates (but do complete specializations),
+// inline functions, and classes with only inline member functions. Exporting
+// classes that inherit from non-exported/imported bases (e.g., std::string)
+// will end up badly. The only known workarounds are to not inherit or to not
+// export. Also, MinGW GCC doesn't like seeing non-exported functions being
+// used before their inline definition. The workaround is to reorder code. In
+// the end it's all trial and error.
+
+#if defined(LIBBUILD2_CLI_STATIC) // Using static.
+# define LIBBUILD2_CLI_SYMEXPORT
+#elif defined(LIBBUILD2_CLI_STATIC_BUILD) // Building static.
+# define LIBBUILD2_CLI_SYMEXPORT
+#elif defined(LIBBUILD2_CLI_SHARED) // Using shared.
+# ifdef _WIN32
+# define LIBBUILD2_CLI_SYMEXPORT __declspec(dllimport)
+# else
+# define LIBBUILD2_CLI_SYMEXPORT
+# endif
+#elif defined(LIBBUILD2_CLI_SHARED_BUILD) // Building shared.
+# ifdef _WIN32
+# define LIBBUILD2_CLI_SYMEXPORT __declspec(dllexport)
+# else
+# define LIBBUILD2_CLI_SYMEXPORT
+# endif
+#else
+// If none of the above macros are defined, then we assume we are being used
+// by some third-party build system that cannot/doesn't signal the library
+// type. Note that this fallback works for both static and shared but in case
+// of shared will be sub-optimal compared to having dllimport.
+//
+# define LIBBUILD2_CLI_SYMEXPORT // Using static or shared.
+#endif
diff --git a/build2/cli/init.cxx b/libbuild2/cli/init.cxx
index eadf32c..581fdaf 100644
--- a/build2/cli/init.cxx
+++ b/libbuild2/cli/init.cxx
@@ -1,7 +1,7 @@
-// file : build2/cli/init.cxx -*- C++ -*-
+// file : libbuild2/cli/init.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <build2/cli/init.hxx>
+#include <libbuild2/cli/init.hxx>
#include <libbuild2/file.hxx>
#include <libbuild2/scope.hxx>
@@ -13,9 +13,9 @@
#include <libbuild2/cxx/target.hxx>
-#include <build2/cli/rule.hxx>
-#include <build2/cli/module.hxx>
-#include <build2/cli/target.hxx>
+#include <libbuild2/cli/rule.hxx>
+#include <libbuild2/cli/module.hxx>
+#include <libbuild2/cli/target.hxx>
namespace build2
{
@@ -72,7 +72,9 @@ namespace build2
// Enter metadata variables.
//
- auto& vp (rs.var_pool ());
+ // They are all qualified so go straight for the public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
auto& v_ver (vp.insert<string> ("cli.version"));
auto& v_sum (vp.insert<string> ("cli.checksum"));
@@ -84,7 +86,7 @@ namespace build2
// module unconfigured.
//
bool new_cfg (false);
- pair<const exe*, import_kind> ir (
+ import_result<exe> ir (
import_direct<exe> (
new_cfg,
rs,
@@ -95,7 +97,7 @@ namespace build2
loc,
"module load"));
- const exe* tgt (ir.first);
+ const exe* tgt (ir.target);
// Extract metadata.
//
@@ -125,7 +127,7 @@ namespace build2
// The cli variable (untyped) is an imported compiler target name.
//
- rs.assign ("cli") = tgt->as_name ();
+ rs.assign ("cli") = move (ir.name);
rs.assign (v_sum) = *sum;
rs.assign (v_ver) = *ver;
@@ -246,6 +248,9 @@ namespace build2
// Register our rules.
//
+ // Other rules (e.g., cc::compile) may need to have the group members
+ // resolved/linked up. Looks like a general pattern: groups should
+ // resolve on *(update).
{
auto reg = [&rs, &m] (meta_operation_id mid, operation_id oid)
{
@@ -255,17 +260,8 @@ namespace build2
rs.insert_rule<cxx::ixx> (mid, oid, "cli.compile", m);
};
- reg (perform_id, update_id);
- reg (perform_id, clean_id);
-
- // Other rules (e.g., cc::compile) may need to have the group members
- // resolved/linked up. Looks like a general pattern: groups should
- // resolve on *(update).
- //
- // @@ meta-op wildcard?
- //
- reg (configure_id, update_id);
- reg (dist_id, update_id);
+ reg (0 /* wildcard */, update_id);
+ reg (perform_id, clean_id);
}
return true;
diff --git a/build2/cli/init.hxx b/libbuild2/cli/init.hxx
index 1c54316..6d23795 100644
--- a/build2/cli/init.hxx
+++ b/libbuild2/cli/init.hxx
@@ -1,14 +1,16 @@
-// file : build2/cli/init.hxx -*- C++ -*-
+// file : libbuild2/cli/init.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef BUILD2_CLI_INIT_HXX
-#define BUILD2_CLI_INIT_HXX
+#ifndef LIBBUILD2_CLI_INIT_HXX
+#define LIBBUILD2_CLI_INIT_HXX
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
#include <libbuild2/module.hxx>
+#include <libbuild2/cli/export.hxx>
+
namespace build2
{
namespace cli
@@ -21,9 +23,9 @@ namespace build2
// `cli.config` -- load `cli.guess` and set the rest of the variables.
// `cli` -- load `cli.config` and register targets and rules.
//
- extern "C" const module_functions*
+ extern "C" LIBBUILD2_CLI_SYMEXPORT const module_functions*
build2_cli_load ();
}
}
-#endif // BUILD2_CLI_INIT_HXX
+#endif // LIBBUILD2_CLI_INIT_HXX
diff --git a/build2/cli/module.hxx b/libbuild2/cli/module.hxx
index 70f6ba8..ba10540 100644
--- a/build2/cli/module.hxx
+++ b/libbuild2/cli/module.hxx
@@ -1,15 +1,15 @@
-// file : build2/cli/module.hxx -*- C++ -*-
+// file : libbuild2/cli/module.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef BUILD2_CLI_MODULE_HXX
-#define BUILD2_CLI_MODULE_HXX
+#ifndef LIBBUILD2_CLI_MODULE_HXX
+#define LIBBUILD2_CLI_MODULE_HXX
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
#include <libbuild2/module.hxx>
-#include <build2/cli/rule.hxx>
+#include <libbuild2/cli/rule.hxx>
namespace build2
{
@@ -27,4 +27,4 @@ namespace build2
}
}
-#endif // BUILD2_CLI_MODULE_HXX
+#endif // LIBBUILD2_CLI_MODULE_HXX
diff --git a/build2/cli/rule.cxx b/libbuild2/cli/rule.cxx
index 99b6bee..996ca51 100644
--- a/build2/cli/rule.cxx
+++ b/libbuild2/cli/rule.cxx
@@ -1,7 +1,7 @@
-// file : build2/cli/rule.cxx -*- C++ -*-
+// file : libbuild2/cli/rule.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <build2/cli/rule.hxx>
+#include <libbuild2/cli/rule.hxx>
#include <libbuild2/depdb.hxx>
#include <libbuild2/scope.hxx>
@@ -11,7 +11,7 @@
#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
-#include <build2/cli/target.hxx>
+#include <libbuild2/cli/target.hxx>
namespace build2
{
@@ -41,7 +41,7 @@ namespace build2
}
bool compile_rule::
- match (action a, target& t, const string&) const
+ match (action a, target& t) const
{
tracer trace ("cli::compile_rule::match");
@@ -122,7 +122,9 @@ namespace build2
if (g == nullptr)
g = &t.ctx.targets.insert<cli_cxx> (t.dir, t.out, t.name, trace);
- g->prerequisites (prerequisites {p->as_prerequisite ()});
+ prerequisites ps;
+ ps.push_back (p->as_prerequisite ());
+ g->prerequisites (move (ps));
}
}
@@ -181,7 +183,7 @@ namespace build2
else
{
const cli_cxx& g (xt.group->as<cli_cxx> ());
- build2::match (a, g);
+ match_sync (a, g);
return group_recipe; // Execute the group's recipe.
}
}
@@ -220,6 +222,8 @@ namespace build2
const cli_cxx& t (xt.as<cli_cxx> ());
const path& tp (t.h->path ());
+ context& ctx (t.ctx);
+
// Update prerequisites and determine if any relevant ones render us
// out-of-date. Note that currently we treat all the prerequisites as
// potentially affecting the result (think prologues/epilogues, CLI
@@ -321,11 +325,11 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "cli " << s;
+ print_diag ("cli", s, t);
- if (!t.ctx.dry_run)
+ if (!ctx.dry_run)
{
- run (pp, args);
+ run (ctx, pp, args, 1 /* finish_verbosity */);
dd.check_mtime (tp);
}
diff --git a/build2/cli/rule.hxx b/libbuild2/cli/rule.hxx
index b3ecc2c..0132b44 100644
--- a/build2/cli/rule.hxx
+++ b/libbuild2/cli/rule.hxx
@@ -1,14 +1,16 @@
-// file : build2/cli/rule.hxx -*- C++ -*-
+// file : libbuild2/cli/rule.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef BUILD2_CLI_RULE_HXX
-#define BUILD2_CLI_RULE_HXX
+#ifndef LIBBUILD2_CLI_RULE_HXX
+#define LIBBUILD2_CLI_RULE_HXX
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
#include <libbuild2/rule.hxx>
+#include <libbuild2/cli/export.hxx>
+
namespace build2
{
namespace cli
@@ -23,13 +25,14 @@ namespace build2
// @@ Redo as two separate rules?
//
- class compile_rule: public simple_rule, virtual data
+ class LIBBUILD2_CLI_SYMEXPORT compile_rule: public simple_rule,
+ private virtual data
{
public:
compile_rule (data&& d): data (move (d)) {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -40,4 +43,4 @@ namespace build2
}
}
-#endif // BUILD2_CLI_RULE_HXX
+#endif // LIBBUILD2_CLI_RULE_HXX
diff --git a/build2/cli/target.cxx b/libbuild2/cli/target.cxx
index ca16044..22ae75c 100644
--- a/build2/cli/target.cxx
+++ b/libbuild2/cli/target.cxx
@@ -1,7 +1,7 @@
-// file : build2/cli/target.cxx -*- C++ -*-
+// file : libbuild2/cli/target.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <build2/cli/target.hxx>
+#include <libbuild2/cli/target.hxx>
#include <libbuild2/context.hxx>
@@ -23,7 +23,7 @@ namespace build2
&target_pattern_var<cli_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
// cli.cxx
@@ -69,7 +69,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- true // "See through" default iteration mode.
+ target_type::flag::see_through // Group with "see through" iteration.
};
}
}
diff --git a/libbuild2/cli/target.hxx b/libbuild2/cli/target.hxx
new file mode 100644
index 0000000..8efb837
--- /dev/null
+++ b/libbuild2/cli/target.hxx
@@ -0,0 +1,61 @@
+// file : libbuild2/cli/target.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CLI_TARGET_HXX
+#define LIBBUILD2_CLI_TARGET_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/target.hxx>
+
+#include <libbuild2/cxx/target.hxx>
+
+#include <libbuild2/cli/export.hxx>
+
+namespace build2
+{
+ namespace cli
+ {
+ class LIBBUILD2_CLI_SYMEXPORT cli: public file
+ {
+ public:
+ cli (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // Standard layout type compatible with group_view's const target*[3].
+ //
+ struct cli_cxx_members
+ {
+ const cxx::hxx* h = nullptr;
+ const cxx::cxx* c = nullptr;
+ const cxx::ixx* i = nullptr;
+ };
+
+ class LIBBUILD2_CLI_SYMEXPORT cli_cxx: public mtime_target,
+ public cli_cxx_members
+ {
+ public:
+ cli_cxx (context& c, dir_path d, dir_path o, string n)
+ : mtime_target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ virtual group_view
+ group_members (action) const override;
+
+ public:
+ static const target_type static_type;
+ };
+ }
+}
+
+#endif // LIBBUILD2_CLI_TARGET_HXX
diff --git a/libbuild2/common-options.cxx b/libbuild2/common-options.cxx
new file mode 100644
index 0000000..03e7e60
--- /dev/null
+++ b/libbuild2/common-options.cxx
@@ -0,0 +1,809 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+#include <libbuild2/common-options.hxx>
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+#include <utility>
+#include <ostream>
+#include <sstream>
+#include <cstring>
+#include <fstream>
+
+namespace build2
+{
+ namespace build
+ {
+ namespace cli
+ {
+ // unknown_option
+ //
+ unknown_option::
+ ~unknown_option () noexcept
+ {
+ }
+
+ void unknown_option::
+ print (::std::ostream& os) const
+ {
+ os << "unknown option '" << option ().c_str () << "'";
+ }
+
+ const char* unknown_option::
+ what () const noexcept
+ {
+ return "unknown option";
+ }
+
+ // unknown_argument
+ //
+ unknown_argument::
+ ~unknown_argument () noexcept
+ {
+ }
+
+ void unknown_argument::
+ print (::std::ostream& os) const
+ {
+ os << "unknown argument '" << argument ().c_str () << "'";
+ }
+
+ const char* unknown_argument::
+ what () const noexcept
+ {
+ return "unknown argument";
+ }
+
+ // missing_value
+ //
+ missing_value::
+ ~missing_value () noexcept
+ {
+ }
+
+ void missing_value::
+ print (::std::ostream& os) const
+ {
+ os << "missing value for option '" << option ().c_str () << "'";
+ }
+
+ const char* missing_value::
+ what () const noexcept
+ {
+ return "missing option value";
+ }
+
+ // invalid_value
+ //
+ invalid_value::
+ ~invalid_value () noexcept
+ {
+ }
+
+ void invalid_value::
+ print (::std::ostream& os) const
+ {
+ os << "invalid value '" << value ().c_str () << "' for option '"
+ << option ().c_str () << "'";
+
+ if (!message ().empty ())
+ os << ": " << message ().c_str ();
+ }
+
+ const char* invalid_value::
+ what () const noexcept
+ {
+ return "invalid option value";
+ }
+
+ // eos_reached
+ //
+ void eos_reached::
+ print (::std::ostream& os) const
+ {
+ os << what ();
+ }
+
+ const char* eos_reached::
+ what () const noexcept
+ {
+ return "end of argument stream reached";
+ }
+
+ // file_io_failure
+ //
+ file_io_failure::
+ ~file_io_failure () noexcept
+ {
+ }
+
+ void file_io_failure::
+ print (::std::ostream& os) const
+ {
+ os << "unable to open file '" << file ().c_str () << "' or read failure";
+ }
+
+ const char* file_io_failure::
+ what () const noexcept
+ {
+ return "unable to open file or read failure";
+ }
+
+ // unmatched_quote
+ //
+ unmatched_quote::
+ ~unmatched_quote () noexcept
+ {
+ }
+
+ void unmatched_quote::
+ print (::std::ostream& os) const
+ {
+ os << "unmatched quote in argument '" << argument ().c_str () << "'";
+ }
+
+ const char* unmatched_quote::
+ what () const noexcept
+ {
+ return "unmatched quote";
+ }
+
+ // scanner
+ //
+ scanner::
+ ~scanner ()
+ {
+ }
+
+ // argv_scanner
+ //
+ bool argv_scanner::
+ more ()
+ {
+ return i_ < argc_;
+ }
+
+ const char* argv_scanner::
+ peek ()
+ {
+ if (i_ < argc_)
+ return argv_[i_];
+ else
+ throw eos_reached ();
+ }
+
+ const char* argv_scanner::
+ next ()
+ {
+ if (i_ < argc_)
+ {
+ const char* r (argv_[i_]);
+
+ if (erase_)
+ {
+ for (int i (i_ + 1); i < argc_; ++i)
+ argv_[i - 1] = argv_[i];
+
+ --argc_;
+ argv_[argc_] = 0;
+ }
+ else
+ ++i_;
+
+ ++start_position_;
+ return r;
+ }
+ else
+ throw eos_reached ();
+ }
+
+ void argv_scanner::
+ skip ()
+ {
+ if (i_ < argc_)
+ {
+ ++i_;
+ ++start_position_;
+ }
+ else
+ throw eos_reached ();
+ }
+
+ std::size_t argv_scanner::
+ position ()
+ {
+ return start_position_;
+ }
+
+ // vector_scanner
+ //
+ bool vector_scanner::
+ more ()
+ {
+ return i_ < v_.size ();
+ }
+
+ const char* vector_scanner::
+ peek ()
+ {
+ if (i_ < v_.size ())
+ return v_[i_].c_str ();
+ else
+ throw eos_reached ();
+ }
+
+ const char* vector_scanner::
+ next ()
+ {
+ if (i_ < v_.size ())
+ return v_[i_++].c_str ();
+ else
+ throw eos_reached ();
+ }
+
+ void vector_scanner::
+ skip ()
+ {
+ if (i_ < v_.size ())
+ ++i_;
+ else
+ throw eos_reached ();
+ }
+
+ std::size_t vector_scanner::
+ position ()
+ {
+ return start_position_ + i_;
+ }
+
+ // argv_file_scanner
+ //
+ int argv_file_scanner::zero_argc_ = 0;
+ std::string argv_file_scanner::empty_string_;
+
+ bool argv_file_scanner::
+ more ()
+ {
+ if (!args_.empty ())
+ return true;
+
+ while (base::more ())
+ {
+ // See if the next argument is the file option.
+ //
+ const char* a (base::peek ());
+ const option_info* oi = 0;
+ const char* ov = 0;
+
+ if (!skip_)
+ {
+ if ((oi = find (a)) != 0)
+ {
+ base::next ();
+
+ if (!base::more ())
+ throw missing_value (a);
+
+ ov = base::next ();
+ }
+ else if (std::strncmp (a, "-", 1) == 0)
+ {
+ if ((ov = std::strchr (a, '=')) != 0)
+ {
+ std::string o (a, 0, ov - a);
+ if ((oi = find (o.c_str ())) != 0)
+ {
+ base::next ();
+ ++ov;
+ }
+ }
+ }
+ }
+
+ if (oi != 0)
+ {
+ if (oi->search_func != 0)
+ {
+ std::string f (oi->search_func (ov, oi->arg));
+
+ if (!f.empty ())
+ load (f);
+ }
+ else
+ load (ov);
+
+ if (!args_.empty ())
+ return true;
+ }
+ else
+ {
+ if (!skip_)
+ skip_ = (std::strcmp (a, "--") == 0);
+
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ const char* argv_file_scanner::
+ peek ()
+ {
+ if (!more ())
+ throw eos_reached ();
+
+ return args_.empty () ? base::peek () : args_.front ().value.c_str ();
+ }
+
+ const std::string& argv_file_scanner::
+ peek_file ()
+ {
+ if (!more ())
+ throw eos_reached ();
+
+ return args_.empty () ? empty_string_ : *args_.front ().file;
+ }
+
+ std::size_t argv_file_scanner::
+ peek_line ()
+ {
+ if (!more ())
+ throw eos_reached ();
+
+ return args_.empty () ? 0 : args_.front ().line;
+ }
+
+ const char* argv_file_scanner::
+ next ()
+ {
+ if (!more ())
+ throw eos_reached ();
+
+ if (args_.empty ())
+ return base::next ();
+ else
+ {
+ hold_[i_ == 0 ? ++i_ : --i_].swap (args_.front ().value);
+ args_.pop_front ();
+ ++start_position_;
+ return hold_[i_].c_str ();
+ }
+ }
+
+ void argv_file_scanner::
+ skip ()
+ {
+ if (!more ())
+ throw eos_reached ();
+
+ if (args_.empty ())
+ return base::skip ();
+ else
+ {
+ args_.pop_front ();
+ ++start_position_;
+ }
+ }
+
+ const argv_file_scanner::option_info* argv_file_scanner::
+ find (const char* a) const
+ {
+ for (std::size_t i (0); i < options_count_; ++i)
+ if (std::strcmp (a, options_[i].option) == 0)
+ return &options_[i];
+
+ return 0;
+ }
+
+ std::size_t argv_file_scanner::
+ position ()
+ {
+ return start_position_;
+ }
+
+ void argv_file_scanner::
+ load (const std::string& file)
+ {
+ using namespace std;
+
+ ifstream is (file.c_str ());
+
+ if (!is.is_open ())
+ throw file_io_failure (file);
+
+ files_.push_back (file);
+
+ arg a;
+ a.file = &*files_.rbegin ();
+
+ for (a.line = 1; !is.eof (); ++a.line)
+ {
+ string line;
+ getline (is, line);
+
+ if (is.fail () && !is.eof ())
+ throw file_io_failure (file);
+
+ string::size_type n (line.size ());
+
+ // Trim the line from leading and trailing whitespaces.
+ //
+ if (n != 0)
+ {
+ const char* f (line.c_str ());
+ const char* l (f + n);
+
+ const char* of (f);
+ while (f < l && (*f == ' ' || *f == '\t' || *f == '\r'))
+ ++f;
+
+ --l;
+
+ const char* ol (l);
+ while (l > f && (*l == ' ' || *l == '\t' || *l == '\r'))
+ --l;
+
+ if (f != of || l != ol)
+ line = f <= l ? string (f, l - f + 1) : string ();
+ }
+
+ // Ignore empty lines, those that start with #.
+ //
+ if (line.empty () || line[0] == '#')
+ continue;
+
+ string::size_type p (string::npos);
+ if (line.compare (0, 1, "-") == 0)
+ {
+ p = line.find (' ');
+
+ string::size_type q (line.find ('='));
+ if (q != string::npos && q < p)
+ p = q;
+ }
+
+ string s1;
+ if (p != string::npos)
+ {
+ s1.assign (line, 0, p);
+
+ // Skip leading whitespaces in the argument.
+ //
+ if (line[p] == '=')
+ ++p;
+ else
+ {
+ n = line.size ();
+ for (++p; p < n; ++p)
+ {
+ char c (line[p]);
+ if (c != ' ' && c != '\t' && c != '\r')
+ break;
+ }
+ }
+ }
+ else if (!skip_)
+ skip_ = (line == "--");
+
+ string s2 (line, p != string::npos ? p : 0);
+
+ // If the string (which is an option value or argument) is
+ // wrapped in quotes, remove them.
+ //
+ n = s2.size ();
+ char cf (s2[0]), cl (s2[n - 1]);
+
+ if (cf == '"' || cf == '\'' || cl == '"' || cl == '\'')
+ {
+ if (n == 1 || cf != cl)
+ throw unmatched_quote (s2);
+
+ s2 = string (s2, 1, n - 2);
+ }
+
+ if (!s1.empty ())
+ {
+ // See if this is another file option.
+ //
+ const option_info* oi;
+ if (!skip_ && (oi = find (s1.c_str ())))
+ {
+ if (s2.empty ())
+ throw missing_value (oi->option);
+
+ if (oi->search_func != 0)
+ {
+ string f (oi->search_func (s2.c_str (), oi->arg));
+ if (!f.empty ())
+ load (f);
+ }
+ else
+ {
+ // If the path of the file being parsed is not simple and the
+ // path of the file that needs to be loaded is relative, then
+ // complete the latter using the former as a base.
+ //
+#ifndef _WIN32
+ string::size_type p (file.find_last_of ('/'));
+ bool c (p != string::npos && s2[0] != '/');
+#else
+ string::size_type p (file.find_last_of ("/\\"));
+ bool c (p != string::npos && s2[1] != ':');
+#endif
+ if (c)
+ s2.insert (0, file, 0, p + 1);
+
+ load (s2);
+ }
+
+ continue;
+ }
+
+ a.value = s1;
+ args_.push_back (a);
+ }
+
+ a.value = s2;
+ args_.push_back (a);
+ }
+ }
+
+ template <typename X>
+ struct parser
+ {
+ static void
+ parse (X& x, bool& xs, scanner& s)
+ {
+ using namespace std;
+
+ const char* o (s.next ());
+ if (s.more ())
+ {
+ string v (s.next ());
+ istringstream is (v);
+ if (!(is >> x && is.peek () == istringstream::traits_type::eof ()))
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <>
+ struct parser<bool>
+ {
+ static void
+ parse (bool& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <>
+ struct parser<std::string>
+ {
+ static void
+ parse (std::string& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ x = s.next ();
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename X>
+ struct parser<std::pair<X, std::size_t> >
+ {
+ static void
+ parse (std::pair<X, std::size_t>& x, bool& xs, scanner& s)
+ {
+ x.second = s.position ();
+ parser<X>::parse (x.first, xs, s);
+ }
+ };
+
+ template <typename X>
+ struct parser<std::vector<X> >
+ {
+ static void
+ parse (std::vector<X>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.push_back (x);
+ xs = true;
+ }
+ };
+
+ template <typename X, typename C>
+ struct parser<std::set<X, C> >
+ {
+ static void
+ parse (std::set<X, C>& c, bool& xs, scanner& s)
+ {
+ X x;
+ bool dummy;
+ parser<X>::parse (x, dummy, s);
+ c.insert (x);
+ xs = true;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::map<K, V, C> >
+ {
+ static void
+ parse (std::map<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m[k] = v;
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
+ template <typename X, typename T, T X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, s);
+ }
+
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
+ template <typename X, typename T, T X::*M, bool X::*S>
+ void
+ thunk (X& x, scanner& s)
+ {
+ parser<T>::parse (x.*M, x.*S, s);
+ }
+ }
+ }
+}
+
+#include <map>
+
+namespace build2
+{
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
diff --git a/libbuild2/common-options.hxx b/libbuild2/common-options.hxx
new file mode 100644
index 0000000..f90f563
--- /dev/null
+++ b/libbuild2/common-options.hxx
@@ -0,0 +1,484 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+#ifndef LIBBUILD2_COMMON_OPTIONS_HXX
+#define LIBBUILD2_COMMON_OPTIONS_HXX
+
+// Begin prologue.
+//
+#include <libbuild2/export.hxx>
+//
+// End prologue.
+
+#include <list>
+#include <deque>
+#include <vector>
+#include <iosfwd>
+#include <string>
+#include <cstddef>
+#include <exception>
+
+#ifndef CLI_POTENTIALLY_UNUSED
+# if defined(_MSC_VER) || defined(__xlC__)
+# define CLI_POTENTIALLY_UNUSED(x) (void*)&x
+# else
+# define CLI_POTENTIALLY_UNUSED(x) (void)x
+# endif
+#endif
+
+namespace build2
+{
+ namespace build
+ {
+ namespace cli
+ {
+ class usage_para
+ {
+ public:
+ enum value
+ {
+ none,
+ text,
+ option
+ };
+
+ usage_para (value);
+
+ operator value () const
+ {
+ return v_;
+ }
+
+ private:
+ value v_;
+ };
+
+ class unknown_mode
+ {
+ public:
+ enum value
+ {
+ skip,
+ stop,
+ fail
+ };
+
+ unknown_mode (value);
+
+ operator value () const
+ {
+ return v_;
+ }
+
+ private:
+ value v_;
+ };
+
+ // Exceptions.
+ //
+
+ class LIBBUILD2_SYMEXPORT exception: public std::exception
+ {
+ public:
+ virtual void
+ print (::std::ostream&) const = 0;
+ };
+
+ ::std::ostream&
+ operator<< (::std::ostream&, const exception&);
+
+ class LIBBUILD2_SYMEXPORT unknown_option: public exception
+ {
+ public:
+ virtual
+ ~unknown_option () noexcept;
+
+ unknown_option (const std::string& option);
+
+ const std::string&
+ option () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string option_;
+ };
+
+ class LIBBUILD2_SYMEXPORT unknown_argument: public exception
+ {
+ public:
+ virtual
+ ~unknown_argument () noexcept;
+
+ unknown_argument (const std::string& argument);
+
+ const std::string&
+ argument () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string argument_;
+ };
+
+ class LIBBUILD2_SYMEXPORT missing_value: public exception
+ {
+ public:
+ virtual
+ ~missing_value () noexcept;
+
+ missing_value (const std::string& option);
+
+ const std::string&
+ option () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string option_;
+ };
+
+ class LIBBUILD2_SYMEXPORT invalid_value: public exception
+ {
+ public:
+ virtual
+ ~invalid_value () noexcept;
+
+ invalid_value (const std::string& option,
+ const std::string& value,
+ const std::string& message = std::string ());
+
+ const std::string&
+ option () const;
+
+ const std::string&
+ value () const;
+
+ const std::string&
+ message () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string option_;
+ std::string value_;
+ std::string message_;
+ };
+
+ class LIBBUILD2_SYMEXPORT eos_reached: public exception
+ {
+ public:
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+ };
+
+ class LIBBUILD2_SYMEXPORT file_io_failure: public exception
+ {
+ public:
+ virtual
+ ~file_io_failure () noexcept;
+
+ file_io_failure (const std::string& file);
+
+ const std::string&
+ file () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string file_;
+ };
+
+ class LIBBUILD2_SYMEXPORT unmatched_quote: public exception
+ {
+ public:
+ virtual
+ ~unmatched_quote () noexcept;
+
+ unmatched_quote (const std::string& argument);
+
+ const std::string&
+ argument () const;
+
+ virtual void
+ print (::std::ostream&) const;
+
+ virtual const char*
+ what () const noexcept;
+
+ private:
+ std::string argument_;
+ };
+
+ // Command line argument scanner interface.
+ //
+ // The values returned by next() are guaranteed to be valid
+ // for the two previous arguments up until a call to a third
+ // peek() or next().
+ //
+ // The position() function returns a monotonically-increasing
+ // number which, if stored, can later be used to determine the
+ // relative position of the argument returned by the following
+ // call to next(). Note that if multiple scanners are used to
+ // extract arguments from multiple sources, then the end
+ // position of the previous scanner should be used as the
+ // start position of the next.
+ //
+ class LIBBUILD2_SYMEXPORT scanner
+ {
+ public:
+ virtual
+ ~scanner ();
+
+ virtual bool
+ more () = 0;
+
+ virtual const char*
+ peek () = 0;
+
+ virtual const char*
+ next () = 0;
+
+ virtual void
+ skip () = 0;
+
+ virtual std::size_t
+ position () = 0;
+ };
+
+ class LIBBUILD2_SYMEXPORT argv_scanner: public scanner
+ {
+ public:
+ argv_scanner (int& argc,
+ char** argv,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ argv_scanner (int start,
+ int& argc,
+ char** argv,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ int
+ end () const;
+
+ virtual bool
+ more ();
+
+ virtual const char*
+ peek ();
+
+ virtual const char*
+ next ();
+
+ virtual void
+ skip ();
+
+ virtual std::size_t
+ position ();
+
+ protected:
+ std::size_t start_position_;
+ int i_;
+ int& argc_;
+ char** argv_;
+ bool erase_;
+ };
+
+ class LIBBUILD2_SYMEXPORT vector_scanner: public scanner
+ {
+ public:
+ vector_scanner (const std::vector<std::string>&,
+ std::size_t start = 0,
+ std::size_t start_position = 0);
+
+ std::size_t
+ end () const;
+
+ void
+ reset (std::size_t start = 0, std::size_t start_position = 0);
+
+ virtual bool
+ more ();
+
+ virtual const char*
+ peek ();
+
+ virtual const char*
+ next ();
+
+ virtual void
+ skip ();
+
+ virtual std::size_t
+ position ();
+
+ private:
+ std::size_t start_position_;
+ const std::vector<std::string>& v_;
+ std::size_t i_;
+ };
+
+ class LIBBUILD2_SYMEXPORT argv_file_scanner: public argv_scanner
+ {
+ public:
+ argv_file_scanner (int& argc,
+ char** argv,
+ const std::string& option,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ argv_file_scanner (int start,
+ int& argc,
+ char** argv,
+ const std::string& option,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ argv_file_scanner (const std::string& file,
+ const std::string& option,
+ std::size_t start_position = 0);
+
+ struct option_info
+ {
+ // If search_func is not NULL, it is called, with the arg
+ // value as the second argument, to locate the options file.
+ // If it returns an empty string, then the file is ignored.
+ //
+ const char* option;
+ std::string (*search_func) (const char*, void* arg);
+ void* arg;
+ };
+
+ argv_file_scanner (int& argc,
+ char** argv,
+ const option_info* options,
+ std::size_t options_count,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ argv_file_scanner (int start,
+ int& argc,
+ char** argv,
+ const option_info* options,
+ std::size_t options_count,
+ bool erase = false,
+ std::size_t start_position = 0);
+
+ argv_file_scanner (const std::string& file,
+ const option_info* options = 0,
+ std::size_t options_count = 0,
+ std::size_t start_position = 0);
+
+ virtual bool
+ more ();
+
+ virtual const char*
+ peek ();
+
+ virtual const char*
+ next ();
+
+ virtual void
+ skip ();
+
+ virtual std::size_t
+ position ();
+
+ // Return the file path if the peeked at argument came from a file and
+ // the empty string otherwise. The reference is guaranteed to be valid
+ // till the end of the scanner lifetime.
+ //
+ const std::string&
+ peek_file ();
+
+ // Return the 1-based line number if the peeked at argument came from
+ // a file and zero otherwise.
+ //
+ std::size_t
+ peek_line ();
+
+ private:
+ const option_info*
+ find (const char*) const;
+
+ void
+ load (const std::string& file);
+
+ typedef argv_scanner base;
+
+ const std::string option_;
+ option_info option_info_;
+ const option_info* options_;
+ std::size_t options_count_;
+
+ struct arg
+ {
+ std::string value;
+ const std::string* file;
+ std::size_t line;
+ };
+
+ std::deque<arg> args_;
+ std::list<std::string> files_;
+
+ // Circular buffer of two arguments.
+ //
+ std::string hold_[2];
+ std::size_t i_;
+
+ bool skip_;
+
+ static int zero_argc_;
+ static std::string empty_string_;
+ };
+
+ template <typename X>
+ struct parser;
+ }
+ }
+}
+
+#include <libbuild2/types.hxx>
+
+#include <libbuild2/options-types.hxx>
+
+namespace build2
+{
+}
+
+#include <libbuild2/common-options.ixx>
+
+// Begin epilogue.
+//
+//
+// End epilogue.
+
+#endif // LIBBUILD2_COMMON_OPTIONS_HXX
diff --git a/libbuild2/common-options.ixx b/libbuild2/common-options.ixx
new file mode 100644
index 0000000..1b7b74e
--- /dev/null
+++ b/libbuild2/common-options.ixx
@@ -0,0 +1,312 @@
+// -*- C++ -*-
+//
+// This file was generated by CLI, a command line interface
+// compiler for C++.
+//
+
+// Begin prologue.
+//
+//
+// End prologue.
+
+#include <cassert>
+
+namespace build2
+{
+ namespace build
+ {
+ namespace cli
+ {
+ // usage_para
+ //
+ inline usage_para::
+ usage_para (value v)
+ : v_ (v)
+ {
+ }
+
+ // unknown_mode
+ //
+ inline unknown_mode::
+ unknown_mode (value v)
+ : v_ (v)
+ {
+ }
+
+ // exception
+ //
+ inline ::std::ostream&
+ operator<< (::std::ostream& os, const exception& e)
+ {
+ e.print (os);
+ return os;
+ }
+
+ // unknown_option
+ //
+ inline unknown_option::
+ unknown_option (const std::string& option)
+ : option_ (option)
+ {
+ }
+
+ inline const std::string& unknown_option::
+ option () const
+ {
+ return option_;
+ }
+
+ // unknown_argument
+ //
+ inline unknown_argument::
+ unknown_argument (const std::string& argument)
+ : argument_ (argument)
+ {
+ }
+
+ inline const std::string& unknown_argument::
+ argument () const
+ {
+ return argument_;
+ }
+
+ // missing_value
+ //
+ inline missing_value::
+ missing_value (const std::string& option)
+ : option_ (option)
+ {
+ }
+
+ inline const std::string& missing_value::
+ option () const
+ {
+ return option_;
+ }
+
+ // invalid_value
+ //
+ inline invalid_value::
+ invalid_value (const std::string& option,
+ const std::string& value,
+ const std::string& message)
+ : option_ (option),
+ value_ (value),
+ message_ (message)
+ {
+ }
+
+ inline const std::string& invalid_value::
+ option () const
+ {
+ return option_;
+ }
+
+ inline const std::string& invalid_value::
+ value () const
+ {
+ return value_;
+ }
+
+ inline const std::string& invalid_value::
+ message () const
+ {
+ return message_;
+ }
+
+ // file_io_failure
+ //
+ inline file_io_failure::
+ file_io_failure (const std::string& file)
+ : file_ (file)
+ {
+ }
+
+ inline const std::string& file_io_failure::
+ file () const
+ {
+ return file_;
+ }
+
+ // unmatched_quote
+ //
+ inline unmatched_quote::
+ unmatched_quote (const std::string& argument)
+ : argument_ (argument)
+ {
+ }
+
+ inline const std::string& unmatched_quote::
+ argument () const
+ {
+ return argument_;
+ }
+
+ // argv_scanner
+ //
+ inline argv_scanner::
+ argv_scanner (int& argc,
+ char** argv,
+ bool erase,
+ std::size_t sp)
+ : start_position_ (sp + 1),
+ i_ (1),
+ argc_ (argc),
+ argv_ (argv),
+ erase_ (erase)
+ {
+ }
+
+ inline argv_scanner::
+ argv_scanner (int start,
+ int& argc,
+ char** argv,
+ bool erase,
+ std::size_t sp)
+ : start_position_ (sp + static_cast<std::size_t> (start)),
+ i_ (start),
+ argc_ (argc),
+ argv_ (argv),
+ erase_ (erase)
+ {
+ }
+
+ inline int argv_scanner::
+ end () const
+ {
+ return i_;
+ }
+
+ // vector_scanner
+ //
+ inline vector_scanner::
+ vector_scanner (const std::vector<std::string>& v,
+ std::size_t i,
+ std::size_t sp)
+ : start_position_ (sp), v_ (v), i_ (i)
+ {
+ }
+
+ inline std::size_t vector_scanner::
+ end () const
+ {
+ return i_;
+ }
+
+ inline void vector_scanner::
+ reset (std::size_t i, std::size_t sp)
+ {
+ i_ = i;
+ start_position_ = sp;
+ }
+
+ // argv_file_scanner
+ //
+ inline argv_file_scanner::
+ argv_file_scanner (int& argc,
+ char** argv,
+ const std::string& option,
+ bool erase,
+ std::size_t sp)
+ : argv_scanner (argc, argv, erase, sp),
+ option_ (option),
+ options_ (&option_info_),
+ options_count_ (1),
+ i_ (1),
+ skip_ (false)
+ {
+ option_info_.option = option_.c_str ();
+ option_info_.search_func = 0;
+ }
+
+ inline argv_file_scanner::
+ argv_file_scanner (int start,
+ int& argc,
+ char** argv,
+ const std::string& option,
+ bool erase,
+ std::size_t sp)
+ : argv_scanner (start, argc, argv, erase, sp),
+ option_ (option),
+ options_ (&option_info_),
+ options_count_ (1),
+ i_ (1),
+ skip_ (false)
+ {
+ option_info_.option = option_.c_str ();
+ option_info_.search_func = 0;
+ }
+
+ inline argv_file_scanner::
+ argv_file_scanner (const std::string& file,
+ const std::string& option,
+ std::size_t sp)
+ : argv_scanner (0, zero_argc_, 0, sp),
+ option_ (option),
+ options_ (&option_info_),
+ options_count_ (1),
+ i_ (1),
+ skip_ (false)
+ {
+ option_info_.option = option_.c_str ();
+ option_info_.search_func = 0;
+
+ load (file);
+ }
+
+ inline argv_file_scanner::
+ argv_file_scanner (int& argc,
+ char** argv,
+ const option_info* options,
+ std::size_t options_count,
+ bool erase,
+ std::size_t sp)
+ : argv_scanner (argc, argv, erase, sp),
+ options_ (options),
+ options_count_ (options_count),
+ i_ (1),
+ skip_ (false)
+ {
+ }
+
+ inline argv_file_scanner::
+ argv_file_scanner (int start,
+ int& argc,
+ char** argv,
+ const option_info* options,
+ std::size_t options_count,
+ bool erase,
+ std::size_t sp)
+ : argv_scanner (start, argc, argv, erase, sp),
+ options_ (options),
+ options_count_ (options_count),
+ i_ (1),
+ skip_ (false)
+ {
+ }
+
+ inline argv_file_scanner::
+ argv_file_scanner (const std::string& file,
+ const option_info* options,
+ std::size_t options_count,
+ std::size_t sp)
+ : argv_scanner (0, zero_argc_, 0, sp),
+ options_ (options),
+ options_count_ (options_count),
+ i_ (1),
+ skip_ (false)
+ {
+ load (file);
+ }
+ }
+ }
+}
+
+namespace build2
+{
+}
+
+// Begin epilogue.
+//
+//
+// End epilogue.
diff --git a/libbuild2/common.cli b/libbuild2/common.cli
new file mode 100644
index 0000000..86c2ad1
--- /dev/null
+++ b/libbuild2/common.cli
@@ -0,0 +1,9 @@
+// file : libbuild2/common.cli
+// license : MIT; see accompanying LICENSE file
+
+include <libbuild2/types.hxx>;
+include <libbuild2/options-types.hxx>;
+
+namespace build2
+{
+}
diff --git a/libbuild2/config/functions.cxx b/libbuild2/config/functions.cxx
index 398512c..b1a61a2 100644
--- a/libbuild2/config/functions.cxx
+++ b/libbuild2/config/functions.cxx
@@ -21,6 +21,58 @@ namespace build2
{
function_family f (m, "config");
+ // $config.origin()
+ //
+ // Return the origin of the value of the specified configuration
+ // variable. Possible result values and their semantics are as follows:
+ //
+ // undefined
+ // The variable is undefined.
+ //
+ // default
+ // The variable has the default value from the config directive (or
+ // as specified by a module).
+ //
+ // buildfile
+ // The variable has the value from a buildfile, normally config.build
+ // but could also be from file(s) specified with config.config.load.
+ //
+ // override
+ // The variable has the command line override value. Note that if
+ // the override happens to be append/prepend, then the value could
+ // incorporate the original value.
+ //
+ // Note that the variable must be specified as a name and not as an
+ // expansion (i.e., without $).
+ //
+ // Note that this function is not pure.
+ //
+ f.insert (".origin", false) += [] (const scope* s, names name)
+ {
+ if (s == nullptr)
+ fail << "config.origin() called out of scope" << endf;
+
+ // Only look in the root scope since that's the only config.*
+ // variables we generally consider.
+ //
+ s = s->root_scope ();
+
+ if (s == nullptr)
+ fail << "config.origin() called out of project" << endf;
+
+ switch (origin (*s, convert<string> (move (name))).first)
+ {
+ case variable_origin::undefined: return "undefined";
+ case variable_origin::default_: return "default";
+ case variable_origin::buildfile: return "buildfile";
+ case variable_origin::override_: return "override";
+ }
+
+ return ""; // Should not reach.
+ };
+
+ // $config.save()
+ //
// Return the configuration file contents as a string, similar to the
// config.config.save variable functionality.
//
@@ -40,7 +92,10 @@ namespace build2
if (s == nullptr)
fail << "config.save() called out of project" << endf;
- module* mod (s->find_module<module> (module::name));
+ // See save_config() for details.
+ //
+ assert (s->ctx.phase == run_phase::load);
+ const module* mod (s->find_module<module> (module::name));
if (mod == nullptr)
fail << "config.save() called without config module";
diff --git a/libbuild2/config/init.cxx b/libbuild2/config/init.cxx
index 87b492c..d42bace 100644
--- a/libbuild2/config/init.cxx
+++ b/libbuild2/config/init.cxx
@@ -39,7 +39,7 @@ namespace build2
save_environment (const value& d, const value* b, names& storage)
{
if (b == nullptr)
- return make_pair (reverse (d, storage), "=");
+ return make_pair (reverse (d, storage, true /* reduce */), "=");
// The plan is to iterator over environment variables adding those that
// are not in base to storage. There is, however, a complication: we may
@@ -100,7 +100,10 @@ namespace build2
// reserved to not be valid module names (`build`). We also currently
// treat `import` as special.
//
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// NOTE: all config.** variables are by default made (via a pattern) to
// be overridable with global visibility. So we must override this if a
@@ -175,10 +178,6 @@ namespace build2
if (!d)
{
- // Used as a variable prefix by configure_execute().
- //
- vp.insert ("config");
-
// Adjust priority for the config module and import pseudo-module so
// that their variables come first in config.build.
//
@@ -238,7 +237,7 @@ namespace build2
? &extra.module_as<module> ()
: nullptr);
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true /* public */));
// Note: config.* is pattern-typed to global visibility.
//
@@ -247,6 +246,60 @@ namespace build2
auto& c_v (vp.insert<uint64_t> ("config.version", false /*ovr*/, v_p));
auto& c_l (vp.insert<paths> ("config.config.load", true /* ovr */));
+ // Omit loading the configuration from the config.build file (it is
+ // still loaded from config.config.load if specified). Similar to
+ // config.config.load, only values specified on this project's root
+ // scope and global scope are considered.
+ //
+ // Note that this variable is not saved in config.build and is expected
+ // to always be specified as a command line override.
+ //
+ auto& c_u (vp.insert<bool> ("config.config.unload", true /*ovr*/));
+
+ // Configuration variables to disfigure.
+ //
+ // The exact semantics is to ignore these variables when loading
+ // config.build (and any files specified in config.config.load), letting
+ // them to take on the default values (more precisely, the current
+ // implementation undefined them after loading config.build). See also
+ // config.config.unload.
+ //
+ // Besides names, variables can also be specified as patterns in the
+ // config.<prefix>.(*|**)[<suffix>] form where `*` matches single
+ // component names (i.e., `foo` but not `foo.bar`), and `**` matches
+ // single and multi-component names. Currently only single wildcard (`*`
+ // or `**`) is supported. Additionally, a pattern in the
+ // config.<prefix>(*|**) form (i.e., without `.` after <prefix>) matches
+ // config.<prefix>.(*|**) plus config.<prefix> itself (but not
+ // config.<prefix>foo).
+ //
+ // For example, to disfigure all the project configuration variables
+ // (while preserving all the module configuration variables; note
+ // quoting to prevent pattern expansion):
+ //
+ // b config.config.disfigure="'config.hello**'"
+ //
+ // Note that this variable is not saved in config.build and is expected
+ // to always be specified as a command line override.
+ //
+ // We also had the idea of using NULL values as a more natural way to
+ // undefine a configuration variable, which would only work for non-
+ // nullable variables (such as project configuration variables) or for
+ // those where NULL is the default value (most of the others). However,
+ // this cannot work in our model since we cannot reset a NULL override
+ // to a default value. So setting the variable itself to some special
+ // value does not seem to be an option and we have to convey this in
+ // some other way, such as in config.config.disfigure. Another idea is
+ // to invent a parallel set of variables, such as disfig.*, that can be
+ // used for that (though they would still have to be specified with some
+ // dummy value, for example disfig.hello.fancy=). On the other hand,
+ // this desire to disfigure individual variables does not seem to be
+ // very common (we lived without it for years without noticing). So
+ // it's not clear we need to do something like disfig.* which has a
+ // wiff of hack to it.
+ //
+ auto& c_d (vp.insert<strings> ("config.config.disfigure", true /*ovr*/));
+
// Hermetic configurations.
//
// A hermetic configuration stores environment variables that affect the
@@ -328,9 +381,10 @@ namespace build2
save_null_omitted | save_empty_omitted | save_base,
&save_environment);
- // Load config.build if one exists followed by extra files specified in
- // config.config.load (we don't need to worry about disfigure since we
- // will never be init'ed).
+ // Load config.build if one exists (and unless config.config.unload is
+ // specified) followed by extra files specified in config.config.load
+ // (we don't need to worry about disfigure since we will never be
+ // init'ed).
//
auto load_config = [&rs, &c_v] (istream& is,
const path_name& in,
@@ -375,15 +429,37 @@ namespace build2
auto load_config_file = [&load_config] (const path& f, const location& l)
{
path_name fn (f);
- ifdstream ifs;
- load_config (open_file_or_stdin (fn, ifs), fn, l);
+ try
+ {
+ ifdstream ifs;
+ load_config (open_file_or_stdin (fn, ifs), fn, l);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read buildfile " << fn << ": " << e;
+ }
};
+ // Load config.build unless requested not to.
+ //
{
- path f (config_file (rs));
+ // The same semantics as in config.config.load below.
+ //
+ bool u;
+ {
+ lookup l (rs[c_u]);
+ u = (l &&
+ (l.belongs (rs) || l.belongs (ctx.global_scope)) &&
+ cast_false<bool> (l));
+ }
+
+ if (!u)
+ {
+ path f (config_file (rs));
- if (exists (f))
- load_config_file (f, l);
+ if (exists (f))
+ load_config_file (f, l);
+ }
}
if (lookup l = rs[c_l])
@@ -424,6 +500,117 @@ namespace build2
}
}
+ // Undefine variables specified with config.config.disfigure.
+ //
+ if (const strings* ns = cast_null<strings> (rs[c_d]))
+ {
+ auto p (rs.vars.lookup_namespace ("config"));
+
+ for (auto i (p.first); i != p.second; )
+ {
+ const variable& var (i->first);
+
+ // This can be one of the overrides (__override, __prefix, etc),
+ // which we skip.
+ //
+ if (!var.override ())
+ {
+ bool m (false);
+
+ for (const string& n: *ns)
+ {
+ if (n.compare (0, 7, "config.") != 0)
+ fail << "config.* variable expected in "
+ << "config.config.disfigure instead of '" << n << "'";
+
+ size_t p (n.find ('*'));
+
+ if (p == string::npos)
+ {
+ if ((m = var.name == n))
+ break;
+ }
+ else
+ {
+ // Pattern in one of these forms:
+ //
+ // config.<prefix>.(*|**)[<suffix>]
+ // config.<prefix>(*|**)
+ //
+ // BTW, an alternative way to handle this would be to
+ // translate it to a path and use our path_match() machinery,
+ // similar to how we do it for build config include/exclude.
+ // Perhaps one day when/if we decide to support multiple
+ // wildcards.
+ //
+ if (p == 7)
+ fail << "config.<prefix>* pattern expected in "
+ << "config.config.disfigure instead of '" << n << "'";
+
+ bool r (n[p + 1] == '*'); // Recursive.
+
+ size_t pe; // Prefix end/size.
+ if (n[p - 1] != '.')
+ {
+ // Second form should have no suffix.
+ //
+ if (p + (r ? 2 : 1) != n.size ())
+ fail << "config.<prefix>(*|**) pattern expected in "
+ << "config.config.disfigure instead of '" << n << "'";
+
+ // Match just <prefix>.
+ //
+ if ((m = n.compare (0, p, var.name) == 0))
+ break;
+
+ pe = p;
+ }
+ else
+ pe = p - 1;
+
+ // Match <prefix> followed by `.`.
+ //
+ if (n.compare (0, pe, var.name, 0, pe) != 0 ||
+ var.name[pe] != '.')
+ continue;
+
+ // Match suffix.
+ //
+ size_t sb (p + (r ? 2 : 1)); // Suffix begin.
+ size_t sn (n.size () - sb); // Suffix size.
+
+ size_t te; // Stem end.
+ if (sn == 0) // No suffix.
+ te = var.name.size ();
+ else
+ {
+ if (var.name.size () < pe + 1 + sn) // Too short.
+ continue;
+
+ te = var.name.size () - sn;
+
+ if (n.compare (sb, sn, var.name, te, sn) != 0)
+ continue;
+ }
+
+ // Match stem.
+ //
+ if ((m = r || var.name.find ('.', pe + 1) >= te))
+ break;
+ }
+ }
+
+ if (m)
+ {
+ i = rs.vars.erase (i); // Undefine.
+ continue;
+ }
+ }
+
+ ++i;
+ }
+ }
+
// Save and cache the config.config.persist value, if any.
//
if (m != nullptr)
@@ -525,20 +712,21 @@ namespace build2
// Register alias and fallback rule for the configure meta-operation.
//
- // We need this rule for out-of-any-project dependencies (e.g.,
- // libraries imported from /usr/lib). We are registring it on the
+ // We need this rule for out-of-any-project dependencies (for example,
+ // libraries imported from /usr/lib). We are registering it on the
// global scope similar to builtin rules.
//
+ // See a similar rule in the dist module.
+ //
rs.global_scope ().insert_rule<mtime_target> (
configure_id, 0, "config.file", file_rule::instance);
- //@@ outer
rs.insert_rule<alias> (configure_id, 0, "config.alias", alias_rule::instance);
// This allows a custom configure rule while doing nothing by default.
//
- rs.insert_rule<target> (configure_id, 0, "config", noop_rule::instance);
- rs.insert_rule<file> (configure_id, 0, "config.file", noop_rule::instance);
+ rs.insert_rule<target> (configure_id, 0, "config.noop", noop_rule::instance);
+ rs.insert_rule<file> (configure_id, 0, "config.noop", noop_rule::instance);
return true;
}
diff --git a/libbuild2/config/module.hxx b/libbuild2/config/module.hxx
index 82b79be..8d3ff67 100644
--- a/libbuild2/config/module.hxx
+++ b/libbuild2/config/module.hxx
@@ -160,7 +160,7 @@ namespace build2
save_module (scope&, const char*, int);
const saved_variable*
- find_variable (const variable& var)
+ find_variable (const variable& var) const
{
auto i (saved_modules.find_sup (var.name));
if (i != saved_modules.end ())
diff --git a/libbuild2/config/operation.cxx b/libbuild2/config/operation.cxx
index 5883d8c..b06c29d 100644
--- a/libbuild2/config/operation.cxx
+++ b/libbuild2/config/operation.cxx
@@ -42,7 +42,7 @@ namespace build2
ofs << "# Created automatically by the config module." << endl
<< "#" << endl
<< "src_root = ";
- to_stream (ofs, name (src_root), true /* quote */, '@');
+ to_stream (ofs, name (src_root), quote_mode::normal, '@');
ofs << endl;
ofs.close ();
@@ -61,8 +61,10 @@ namespace build2
path f (src_root / rs.root_extra->out_root_file);
- if (verb)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ if (verb >= 2)
+ text << "cat >" << f;
+ else if (verb)
+ print_diag ("save", f);
try
{
@@ -71,7 +73,7 @@ namespace build2
ofs << "# Created automatically by the config module." << endl
<< "#" << endl
<< "out_root = ";
- to_stream (ofs, name (out_root), true /* quote */, '@');
+ to_stream (ofs, name (out_root), quote_mode::normal, '@');
ofs << endl;
ofs.close ();
@@ -161,11 +163,18 @@ namespace build2
// and this function can be called from a buildfile (probably only
// during serial execution but still).
//
+ // We could also be configuring multiple projects (including from
+ // pkg_configure() in bpkg) but feels like we should be ok since we
+ // only modify this project's root scope data which should not affect
+ // any other project.
+ //
+ // See also save_environment() for a similar issue.
+ //
void
save_config (const scope& rs,
ostream& os, const path_name& on,
bool inherit,
- module& mod,
+ const module& mod,
const project_set& projects)
{
context& ctx (rs.ctx);
@@ -179,7 +188,7 @@ namespace build2
if (v)
{
storage.clear ();
- dr << "'" << reverse (v, storage) << "'";
+ dr << "'" << reverse (v, storage, true /* reduce */) << "'";
}
else
dr << "[null]";
@@ -207,9 +216,11 @@ namespace build2
// saved according to config.config.persist potentially warning if the
// variable would otherwise be dropped.
//
+ // Note: go straight for the public variable pool.
+ //
auto& vp (ctx.var_pool);
- for (auto p (rs.vars.lookup_namespace (*vp.find ("config")));
+ for (auto p (rs.vars.lookup_namespace ("config"));
p.first != p.second;
++p.first)
{
@@ -247,6 +258,24 @@ namespace build2
continue;
}
+ // A common reason behind an unused config.import.* value is an
+ // unused dependency. That is, there is depends in manifest but no
+ // import in buildfile (or import could be conditional in which case
+ // depends should also be conditional). So let's suggest this
+ // possibility. Note that the project name may have been sanitized
+ // to a variable name. Oh, well, better than nothing.
+ //
+ auto info_import = [] (diag_record& dr, const string& var)
+ {
+ if (var.compare (0, 14, "config.import.") == 0)
+ {
+ size_t p (var.find ('.', 14));
+
+ dr << info << "potentially unused dependency on "
+ << string (var, 14, p == string::npos ? p : p - 14);
+ }
+ };
+
const value& v (p.first->second);
pair<bool, bool> r (save_config_variable (*var,
@@ -255,7 +284,7 @@ namespace build2
true /* unused */));
if (r.first) // save
{
- mod.save_variable (*var, 0);
+ const_cast<module&> (mod).save_variable (*var, 0);
if (r.second) // warn
{
@@ -274,6 +303,7 @@ namespace build2
diag_record dr;
dr << warn (on) << "saving no longer used variable " << *var;
+ info_import (dr, var->name);
if (verb >= 2)
info_value (dr, v);
}
@@ -284,6 +314,7 @@ namespace build2
{
diag_record dr;
dr << warn (on) << "dropping no longer used variable " << *var;
+ info_import (dr, var->name);
info_value (dr, v);
}
}
@@ -509,8 +540,8 @@ namespace build2
// Handle the save_default_commented flag.
//
- if ((org.first.defined () && org.first->extra) && // Default value.
- org.first == ovr.first && // Not overriden.
+ if (org.first.defined () && org.first->extra == 1 && // Default.
+ org.first == ovr.first && // No override.
(flags & save_default_commented) != 0)
{
os << first () << '#' << n << " =" << endl;
@@ -527,7 +558,7 @@ namespace build2
pair<names_view, const char*> p (
sv.save != nullptr
? sv.save (v, base, storage)
- : make_pair (reverse (v, storage), "="));
+ : make_pair (reverse (v, storage, true /* reduce */), "="));
// Might becomes empty after a custom save function had at it.
//
@@ -539,7 +570,7 @@ namespace build2
if (!p.first.empty ())
{
os << ' ';
- to_stream (os, p.first, true /* quote */, '@');
+ to_stream (os, p.first, quote_mode::normal, '@');
}
os << endl;
@@ -556,7 +587,7 @@ namespace build2
save_config (const scope& rs,
const path& f,
bool inherit,
- module& mod,
+ const module& mod,
const project_set& projects)
{
path_name fn (f);
@@ -564,8 +595,10 @@ namespace build2
if (f.string () == "-")
fn.name = "<stdout>";
- if (verb)
- text << (verb >= 2 ? "cat >" : "save ") << fn;
+ if (verb >= 2)
+ text << "cat >" << fn;
+ else if (verb)
+ print_diag ("save", fn);
try
{
@@ -582,6 +615,9 @@ namespace build2
// Update config.config.environment value for a hermetic configuration.
//
+ // @@ We are modifying the module. See also save_config() for a similar
+ // issue.
+ //
static void
save_environment (scope& rs, module& mod)
{
@@ -636,6 +672,8 @@ namespace build2
}
}
+ // Note: go straight for the public variable pool.
+ //
value& v (rs.assign (*rs.ctx.var_pool.find ("config.config.environment")));
// Note that setting new config.config.environment value invalidates the
@@ -652,9 +690,9 @@ namespace build2
static void
configure_project (action a,
- scope& rs,
+ const scope& rs,
const variable* c_s, // config.config.save
- module& mod,
+ const module& mod,
project_set& projects)
{
tracer trace ("configure_project");
@@ -674,7 +712,7 @@ namespace build2
//
if (out_root != src_root)
{
- mkdir_p (out_root / rs.root_extra->build_dir);
+ mkdir_p (out_root / rs.root_extra->build_dir, 1);
mkdir (out_root / rs.root_extra->bootstrap_dir, 2);
}
@@ -688,7 +726,7 @@ namespace build2
// for the other half of this logic).
//
if (cast_false<bool> (rs["config.config.hermetic"]))
- save_environment (rs, mod);
+ save_environment (const_cast<scope&> (rs), const_cast<module&> (mod));
// Save src-root.build unless out_root is the same as src.
//
@@ -751,14 +789,14 @@ namespace build2
{
const dir_path& pd (p.second);
dir_path out_nroot (out_root / pd);
- scope& nrs (ctx.scopes.find_out (out_nroot).rw ());
+ const scope& nrs (ctx.scopes.find_out (out_nroot));
// Skip this subproject if it is not loaded or doesn't use the
// config module.
//
if (nrs.out_path () == out_nroot)
{
- if (module* m = nrs.find_module<module> (module::name))
+ if (const module* m = nrs.find_module<module> (module::name))
{
configure_project (a, nrs, c_s, *m, projects);
}
@@ -806,11 +844,13 @@ namespace build2
operation_id (*pre) (const values&, meta_operation_id, const location&);
static operation_id
- configure_operation_pre (const values&, operation_id o)
+ configure_operation_pre (context&, const values&, operation_id o)
{
// Don't translate default to update. In our case unspecified
// means configure everything.
//
+ // Note: see pkg_configure() in bpkg if changing anything here.
+ //
return o;
}
@@ -845,8 +885,10 @@ namespace build2
}
static void
- configure_pre (const values& params, const location& l)
+ configure_pre (context&, const values& params, const location& l)
{
+ // Note: see pkg_configure() in bpkg if changing anything here.
+ //
forward (params, "configure", l); // Validate.
}
@@ -870,7 +912,9 @@ namespace build2
fail (l) << "forwarding to source directory " << rs.src_path ();
}
else
- load (params, rs, buildfile, out_base, src_base, l); // Normal load.
+ // Normal load.
+ //
+ perform_load (params, rs, buildfile, out_base, src_base, l);
}
static void
@@ -890,7 +934,7 @@ namespace build2
ts.push_back (&rs);
}
else
- search (params, rs, bs, bf, tk, l, ts); // Normal search.
+ perform_search (params, rs, bs, bf, tk, l, ts); // Normal search.
}
static void
@@ -910,6 +954,8 @@ namespace build2
context& ctx (fwd ? ts[0].as<scope> ().ctx : ts[0].as<target> ().ctx);
+ // Note: go straight for the public variable pool.
+ //
const variable* c_s (ctx.var_pool.find ("config.config.save"));
if (c_s->overrides == nullptr)
@@ -964,13 +1010,19 @@ namespace build2
ctx.current_operation (*oif);
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, true /* inner */, location ());
+
phase_lock pl (ctx, run_phase::match);
- match (action (configure_id, id), t);
+ match_sync (action (configure_id, id), t);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, true /* inner */);
}
}
configure_project (a,
- rs->rw (),
+ *rs,
c_s,
*rs->find_module<module> (module::name),
projects);
@@ -978,6 +1030,8 @@ namespace build2
}
}
+ // NOTE: see pkg_configure() in bpkg if changing anything here.
+ //
const meta_operation_info mo_configure {
configure_id,
"configure",
@@ -1053,7 +1107,7 @@ namespace build2
}
}
- if (module* m = rs.find_module<module> (module::name))
+ if (const module* m = rs.find_module<module> (module::name))
{
for (auto hook: m->disfigure_pre_)
r = hook (a, rs) || r;
@@ -1152,13 +1206,13 @@ namespace build2
}
static void
- disfigure_pre (const values& params, const location& l)
+ disfigure_pre (context&, const values& params, const location& l)
{
forward (params, "disfigure", l); // Validate.
}
static operation_id
- disfigure_operation_pre (const values&, operation_id o)
+ disfigure_operation_pre (context&, const values&, operation_id o)
{
// Don't translate default to update. In our case unspecified
// means disfigure everything.
@@ -1276,6 +1330,8 @@ namespace build2
// Add the default config.config.persist value unless there is a custom
// one (specified as a command line override).
//
+ // Note: go straight for the public variable pool.
+ //
const variable& var (*ctx.var_pool.find ("config.config.persist"));
if (!rs[var].defined ())
@@ -1392,7 +1448,8 @@ namespace build2
string ("config"), /* config_module */
nullopt, /* config_file */
true, /* buildfile */
- "the create meta-operation");
+ "the create meta-operation",
+ 1 /* verbosity */);
save_config (ctx, d);
}
diff --git a/libbuild2/config/operation.hxx b/libbuild2/config/operation.hxx
index 9e2a91e..1662941 100644
--- a/libbuild2/config/operation.hxx
+++ b/libbuild2/config/operation.hxx
@@ -15,8 +15,8 @@ namespace build2
{
class module;
- extern const meta_operation_info mo_configure;
- extern const meta_operation_info mo_disfigure;
+ LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_configure;
+ LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_disfigure;
const string&
preprocess_create (context&,
@@ -37,7 +37,7 @@ namespace build2
save_config (const scope& rs,
ostream&, const path_name&,
bool inherit,
- module&,
+ const module&,
const project_set&);
// See config.config.hermetic.environment.
diff --git a/libbuild2/config/types.hxx b/libbuild2/config/types.hxx
new file mode 100644
index 0000000..3cdc5e3
--- /dev/null
+++ b/libbuild2/config/types.hxx
@@ -0,0 +1,25 @@
+// file : libbuild2/config/types.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CONFIG_TYPES_HXX
+#define LIBBUILD2_CONFIG_TYPES_HXX
+
+#include <libbuild2/types.hxx>
+
+namespace build2
+{
+ namespace config
+ {
+ // The origin of the value of a configuration variable.
+ //
+ enum class variable_origin
+ {
+ undefined, // Undefined.
+ default_, // Default value from the config directive.
+ buildfile, // Value from a buildfile, normally config.build.
+ override_ // Value from a command line override.
+ };
+ }
+}
+
+#endif // LIBBUILD2_CONFIG_TYPES_HXX
diff --git a/libbuild2/config/utility.cxx b/libbuild2/config/utility.cxx
index 928709a..6574367 100644
--- a/libbuild2/config/utility.cxx
+++ b/libbuild2/config/utility.cxx
@@ -32,7 +32,7 @@ namespace build2
// Treat an inherited value that was set to default as new.
//
- if (l.defined () && l->extra)
+ if (l.defined () && l->extra == 1)
n = true;
if (var.overrides != nullptr)
@@ -81,7 +81,9 @@ namespace build2
const string& n,
initializer_list<const char*> ig)
{
- auto& vp (rs.var_pool ());
+ // Note: go straight for the public variable pool.
+ //
+ auto& vp (rs.ctx.var_pool);
// Search all outer scopes for any value in this namespace.
//
@@ -91,7 +93,7 @@ namespace build2
// any original values, they will be "visible"; see find_override() for
// details.
//
- const variable& ns (vp.insert ("config." + n));
+ const string ns ("config." + n);
for (scope* s (&rs); s != nullptr; s = s->parent_scope ())
{
for (auto p (s->vars.lookup_namespace (ns));
@@ -107,12 +109,12 @@ namespace build2
auto match_tail = [&ns, v] (const char* t)
{
- return v->name.compare (ns.name.size () + 1, string::npos, t) == 0;
+ return v->name.compare (ns.size () + 1, string::npos, t) == 0;
};
// Ignore config.*.configured and user-supplied names.
//
- if (v->name.size () <= ns.name.size () ||
+ if (v->name.size () <= ns.size () ||
(!match_tail ("configured") &&
find_if (ig.begin (), ig.end (), match_tail) == ig.end ()))
return true;
@@ -128,7 +130,7 @@ namespace build2
// Pattern-typed as bool.
//
const variable& var (
- rs.var_pool ().insert ("config." + n + ".configured"));
+ rs.var_pool (true).insert ("config." + n + ".configured"));
save_variable (rs, var);
@@ -142,7 +144,7 @@ namespace build2
// Pattern-typed as bool.
//
const variable& var (
- rs.var_pool ().insert ("config." + n + ".configured"));
+ rs.var_pool (true).insert ("config." + n + ".configured"));
save_variable (rs, var);
@@ -156,5 +158,56 @@ namespace build2
else
return false;
}
+
+ pair<variable_origin, lookup>
+ origin (const scope& rs, const string& n)
+ {
+ // Note: go straight for the public variable pool.
+ //
+ const variable* var (rs.ctx.var_pool.find (n));
+
+ if (var == nullptr)
+ {
+ if (n.compare (0, 7, "config.") != 0)
+ throw invalid_argument ("config.* variable expected");
+
+ return make_pair (variable_origin::undefined, lookup ());
+ }
+
+ return origin (rs, *var);
+ }
+
+ pair<variable_origin, lookup>
+ origin (const scope& rs, const variable& var)
+ {
+ // Make sure this is a config.* variable. This could matter since we
+ // rely on the semantics of value::extra. We could also detect
+ // special variables like config.booted, some config.config.*, etc.,
+ // (see config_save() for details) but that seems harmless.
+ //
+ if (var.name.compare (0, 7, "config.") != 0)
+ throw invalid_argument ("config.* variable expected");
+
+ return origin (rs, var, rs.lookup_original (var));
+ }
+
+ pair<variable_origin, lookup>
+ origin (const scope& rs, const variable& var, pair<lookup, size_t> org)
+ {
+ pair<lookup, size_t> ovr (var.overrides == nullptr
+ ? org
+ : rs.lookup_override (var, org));
+
+ if (!ovr.first.defined ())
+ return make_pair (variable_origin::undefined, lookup ());
+
+ if (org.first != ovr.first)
+ return make_pair (variable_origin::override_, ovr.first);
+
+ return make_pair (org.first->extra == 1
+ ? variable_origin::default_
+ : variable_origin::buildfile,
+ org.first);
+ }
}
}
diff --git a/libbuild2/config/utility.hxx b/libbuild2/config/utility.hxx
index bafcafa..1e2ff53 100644
--- a/libbuild2/config/utility.hxx
+++ b/libbuild2/config/utility.hxx
@@ -11,6 +11,8 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/variable.hxx>
+#include <libbuild2/config/types.hxx>
+
#include <libbuild2/export.hxx>
namespace build2
@@ -58,6 +60,15 @@ namespace build2
{
// Mark a variable to be saved during configuration.
//
+ // Note: the save_*_omitted flags work best when undefined or (one of) the
+ // omitted value(s) is the default (see a note in lookup_config()
+ // documentation for details).
+ //
+ // The below lookup_*() functions mark the default value by setting
+ // value::extra to 1. Note that it's exactly 1 and not "not 0" since other
+ // values could have other meaning (see, for example, package skeleton
+ // in bpkg).
+ //
const uint64_t save_default_commented = 0x01; // Based on value::extra.
const uint64_t save_null_omitted = 0x02; // Treat NULL as undefined.
const uint64_t save_empty_omitted = 0x04; // Treat empty as undefined.
@@ -242,9 +253,6 @@ namespace build2
// expensive. It is also ok to call both versions multiple times provided
// the flags are the same.
//
- // @@ Should save_null_omitted be interpreted to treat null as undefined?
- // Sounds logical.
- //
lookup
lookup_config (scope& rs,
const variable&,
@@ -263,6 +271,8 @@ namespace build2
const string& var,
uint64_t save_flags = 0)
{
+ // Note: go straight for the public variable pool.
+ //
return lookup_config (rs, rs.ctx.var_pool[var], save_flags);
}
@@ -272,6 +282,8 @@ namespace build2
const string& var,
uint64_t save_flags = 0)
{
+ // Note: go straight for the public variable pool.
+ //
return lookup_config (new_value, rs, rs.ctx.var_pool[var], save_flags);
}
@@ -300,8 +312,14 @@ namespace build2
// or from the command line (i.e., it is inherited from the amalgamation),
// then its value is "overridden" to the default value on this root scope.
//
- // @@ Should save_null_omitted be interpreted to treat null as undefined?
- // Sounds logical.
+ // Note that while it may seem logical, these functions do not
+ // "reinterpret" defined values according to the save_*_omitted flags (for
+ // example, by returning the default value if the defined value is NULL
+ // and the save_null_omitted flag is specified). This is because such a
+ // reinterpretation may cause a diversion between the returned value and
+ // the re-queried config.* variable value if the defined value came from
+ // an override. To put another way, the save_*_omitted flags are purely to
+ // reduce the noise in config.build.
//
template <typename T>
lookup
@@ -353,6 +371,8 @@ namespace build2
uint64_t save_flags = 0,
bool override = false)
{
+ // Note: go straight for the public variable pool.
+ //
return lookup_config (rs,
rs.ctx.var_pool[var],
std::forward<T> (default_value), // VC14
@@ -369,6 +389,8 @@ namespace build2
uint64_t save_flags = 0,
bool override = false)
{
+ // Note: go straight for the public variable pool.
+ //
return lookup_config (new_value,
rs,
rs.ctx.var_pool[var],
@@ -413,7 +435,7 @@ namespace build2
const V* cv (
cast_null<V> (
lookup_config (rs,
- rs.var_pool ().insert<V> ("config." + var),
+ rs.var_pool (true).insert<V> ("config." + var),
std::forward<T> (default_value)))); // VC14
value& v (bs.assign<V> (move (var)));
@@ -431,7 +453,7 @@ namespace build2
const V* cv (
cast_null<V> (
lookup_config (rs,
- rs.var_pool ().insert<V> ("config." + var),
+ rs.var_pool (true).insert<V> ("config." + var),
std::forward<T> (default_value)))); // VC14
value& v (bs.append<V> (move (var)));
@@ -494,6 +516,25 @@ namespace build2
//
LIBBUILD2_SYMEXPORT bool
unconfigured (scope& rs, const string& var, bool value);
+
+ // Return the origin of the value of the specified configuration variable
+ // plus the value itself. See $config.origin() for details.
+ //
+ // Throws invalid_argument if the passed variable is not config.*.
+ //
+ LIBBUILD2_SYMEXPORT pair<variable_origin, lookup>
+ origin (const scope& rs, const string& name);
+
+ LIBBUILD2_SYMEXPORT pair<variable_origin, lookup>
+ origin (const scope& rs, const variable&);
+
+ // As above but using the result of scope::lookup_original() or
+ // semantically equivalent (e.g., lookup_namespace()).
+ //
+ // Note that this version does not check that the variable is config.*.
+ //
+ LIBBUILD2_SYMEXPORT pair<variable_origin, lookup>
+ origin (const scope& rs, const variable&, pair<lookup, size_t> original);
}
}
diff --git a/libbuild2/config/utility.txx b/libbuild2/config/utility.txx
index b88f76c..71e41fd 100644
--- a/libbuild2/config/utility.txx
+++ b/libbuild2/config/utility.txx
@@ -58,7 +58,7 @@ namespace build2
if (!l.defined () || (def_ovr && !l.belongs (rs)))
{
value& v (rs.assign (var) = std::forward<T> (def_val)); // VC14
- v.extra = true; // Default value flag.
+ v.extra = 1; // Default value flag.
n = (sflags & save_default_commented) == 0; // Absence means default.
l = lookup (v, var, rs);
@@ -66,7 +66,7 @@ namespace build2
}
// Treat an inherited value that was set to default as new.
//
- else if (l->extra)
+ else if (l->extra == 1)
n = (sflags & save_default_commented) == 0; // Absence means default.
if (var.overrides != nullptr)
diff --git a/libbuild2/context.cxx b/libbuild2/context.cxx
index 36b70b8..c0442f0 100644
--- a/libbuild2/context.cxx
+++ b/libbuild2/context.cxx
@@ -45,6 +45,7 @@ namespace build2
scope_map scopes;
target_set targets;
variable_pool var_pool;
+ variable_patterns var_patterns;
variable_overrides var_overrides;
function_map functions;
@@ -52,32 +53,267 @@ namespace build2
variable_override_cache global_override_cache;
strings global_var_overrides;
- data (context& c): scopes (c), targets (c), var_pool (&c /* global */) {}
+ data (context& c)
+ : scopes (c),
+ targets (c),
+ var_pool (&c /* shared */, nullptr /* outer */, &var_patterns),
+ var_patterns (&c /* shared */, &var_pool) {}
};
+ void context::
+ reserve (reserves res)
+ {
+ assert (phase == run_phase::load);
+
+ if (res.targets != 0)
+ data_->targets.map_.reserve (res.targets);
+
+ if (res.variables != 0)
+ data_->var_pool.map_.reserve (res.variables);
+ }
+
+ pair<char, variable_override> context::
+ parse_variable_override (const string& s, size_t i, bool buildspec)
+ {
+ istringstream is (s);
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ // Similar to buildspec we do "effective escaping" of the special `'"\$(`
+ // characters (basically what's escapable inside a double-quoted literal
+ // plus the single quote; note, however, that we exclude line
+ // continuations and `)` since they would make directory paths on Windows
+ // unusable).
+ //
+ path_name in ("<cmdline>");
+ lexer l (is, in, 1 /* line */, "\'\"\\$(");
+
+ // At the buildfile level the scope-specific variable should be separated
+ // from the directory with a whitespace, for example:
+ //
+ // ./ foo=$bar
+ //
+ // However, requiring this for command line variables would be too
+ // inconvinient so we support both.
+ //
+ // We also have the optional visibility modifier as a first character of
+ // the variable name:
+ //
+ // ! - global
+ // % - project
+ // / - scope
+ //
+ // The last one clashes a bit with the directory prefix:
+ //
+ // ./ /foo=bar
+ // .//foo=bar
+ //
+ // But that's probably ok (the need for a scope-qualified override with
+ // scope visibility should be pretty rare). Note also that to set the
+ // value on the global scope we use !.
+ //
+ // And so the first token should be a word which can be either a variable
+ // name (potentially with the directory qualification) or just the
+ // directory, in which case it should be followed by another word
+ // (unqualified variable name). To avoid treating any of the visibility
+ // modifiers as special we use the cmdvar mode.
+ //
+ l.mode (lexer_mode::cmdvar);
+ token t (l.next ());
+
+ optional<dir_path> dir;
+ if (t.type == token_type::word)
+ {
+ string& v (t.value);
+ size_t p (path::traits_type::rfind_separator (v));
+
+ if (p != string::npos && p != 0) // If first then visibility.
+ {
+ if (p == v.size () - 1)
+ {
+ // Separate directory.
+ //
+ dir = dir_path (move (v));
+ t = l.next ();
+
+ // Target-specific overrides are not yet supported (and probably
+ // never will be; the beast is already complex enough).
+ //
+ if (t.type == token_type::colon)
+ {
+ diag_record dr (fail);
+
+ dr << "'" << s << "' is a target-specific override";
+
+ if (buildspec)
+ dr << info << "use double '--' to treat this argument as "
+ << "buildspec";
+ }
+ }
+ else
+ {
+ // Combined directory.
+ //
+ // If double separator (visibility marker), then keep the first in
+ // name.
+ //
+ if (p != 0 && path::traits_type::is_separator (v[p - 1]))
+ --p;
+
+ dir = dir_path (t.value, 0, p + 1); // Include the separator.
+ t.value.erase (0, p + 1); // Erase the separator.
+ }
+
+ if (dir->relative ())
+ {
+ // Handle the special relative to base scope case (.../).
+ //
+ auto i (dir->begin ());
+
+ if (*i == "...")
+ dir = dir_path (++i, dir->end ()); // Note: can become empty.
+ else
+ dir->complete (); // Relative to CWD.
+ }
+
+ if (dir->absolute ())
+ dir->normalize ();
+ }
+ }
+
+ token_type tt (l.next ().type);
+
+ // The token should be the variable name followed by =, +=, or =+.
+ //
+ if (t.type != token_type::word || t.value.empty () ||
+ (tt != token_type::assign &&
+ tt != token_type::prepend &&
+ tt != token_type::append))
+ {
+ diag_record dr (fail);
+
+ dr << "expected variable assignment instead of '" << s << "'";
+
+ if (buildspec)
+ dr << info << "use double '--' to treat this argument as buildspec";
+ }
+
+ // Take care of the visibility. Note that here we rely on the fact that
+ // none of these characters are lexer's name separators.
+ //
+ char c (t.value[0]);
+
+ if (path::traits_type::is_separator (c))
+ c = '/'; // Normalize.
+
+ string n (t.value, c == '!' || c == '%' || c == '/' ? 1 : 0);
+
+ // Make sure it is qualified.
+ //
+ // We can support overridable public unqualified variables (which must
+ // all be pre-entered by the end of this constructor) but we will need
+ // to detect their names here in an ad hoc manner (we cannot enter them
+ // before this logic because of the "untyped override" requirement).
+ //
+ // Note: issue the same diagnostics as in variable_pool::update().
+ //
+ if (n.find ('.') == string::npos)
+ fail << "variable " << n << " cannot be overridden";
+
+ if (c == '!' && dir)
+ fail << "scope-qualified global override of variable " << n;
+
+ // Pre-enter the main variable. Note that we rely on all the overridable
+ // variables with global visibility to be known (either entered or
+ // handled via a pattern) at this stage.
+ //
+ variable_pool& vp (data_->var_pool);
+ variable& var (
+ const_cast<variable&> (vp.insert (n, true /* overridable */)));
+
+ const variable* o;
+ {
+ variable_visibility v (c == '/' ? variable_visibility::scope :
+ c == '%' ? variable_visibility::project :
+ variable_visibility::global);
+
+ const char* k (tt == token_type::assign ? "__override" :
+ tt == token_type::append ? "__suffix" : "__prefix");
+
+ unique_ptr<variable> p (
+ new variable {
+ n + '.' + to_string (i + 1) + '.' + k,
+ &vp /* owner */,
+ nullptr /* aliases */,
+ nullptr /* type */,
+ nullptr /* overrides */,
+ v});
+
+ // Back link.
+ //
+ p->aliases = p.get ();
+ if (var.overrides != nullptr)
+ swap (p->aliases,
+ const_cast<variable*> (var.overrides.get ())->aliases);
+
+ // Forward link.
+ //
+ p->overrides = move (var.overrides);
+ var.overrides = move (p);
+
+ o = var.overrides.get ();
+ }
+
+ // Currently we expand project overrides in the global scope to keep
+ // things simple. Pass original variable for diagnostics. Use current
+ // working directory as pattern base.
+ //
+ scope& gs (global_scope.rw ());
+
+ parser p (*this);
+ pair<value, token> r (p.parse_variable_value (l, gs, &work, var));
+
+ if (r.second.type != token_type::eos)
+ fail << "unexpected " << r.second << " in variable assignment "
+ << "'" << s << "'";
+
+ // Make sure the value is not typed.
+ //
+ if (r.first.type != nullptr)
+ fail << "typed override of variable " << n;
+
+ return make_pair (
+ c,
+ variable_override {var, *o, move (dir), move (r.first)});
+ }
+
context::
context (scheduler& s,
global_mutexes& ms,
file_cache& fc,
- bool mo,
+ optional<match_only_level> mo,
bool nem,
bool dr,
+ bool ndb,
bool kg,
const strings& cmd_vars,
+ reserves res,
optional<context*> mc,
- const loaded_modules_lock* ml)
+ const module_libraries_lock* ml,
+ const function<var_override_function>& var_ovr_func)
: data_ (new data (*this)),
- sched (s),
- mutexes (ms),
- fcache (fc),
+ sched (&s),
+ mutexes (&ms),
+ fcache (&fc),
match_only (mo),
no_external_modules (nem),
dry_run_option (dr),
+ no_diag_buffer (ndb),
keep_going (kg),
phase_mutex (*this),
scopes (data_->scopes),
targets (data_->targets),
var_pool (data_->var_pool),
+ var_patterns (data_->var_patterns),
var_overrides (data_->var_overrides),
functions (data_->functions),
global_scope (create_global_scope (data_->scopes)),
@@ -90,12 +326,17 @@ namespace build2
? optional<unique_ptr<context>> (nullptr)
: nullopt)
{
+ // NOTE: see also the bare minimum version below if adding anything here.
+
tracer trace ("context");
l6 ([&]{trace << "initializing build state";});
+ reserve (res);
+
scope_map& sm (data_->scopes);
variable_pool& vp (data_->var_pool);
+ variable_patterns& vpats (data_->var_patterns);
insert_builtin_functions (functions);
@@ -104,7 +345,7 @@ namespace build2
//
meta_operation_table.insert ("noop");
meta_operation_table.insert ("perform");
- meta_operation_table.insert ("configure");
+ meta_operation_table.insert ("configure"); // bpkg assumes no process.
meta_operation_table.insert ("disfigure");
if (config_preprocess_create != nullptr)
@@ -134,13 +375,26 @@ namespace build2
// Any variable assigned on the global scope should natually have the
// global visibility.
//
- auto set = [&gs, &vp] (const char* var, auto val)
+ auto set = [&gs, &vp] (const char* var, auto val) -> const value&
{
using T = decltype (val);
value& v (gs.assign (vp.insert<T> (var, variable_visibility::global)));
v = move (val);
+ return v;
};
+ // Build system mode.
+ //
+ // This value signals any special mode the build system may be running
+ // in. The two core modes are `no-external-modules` (bootstrapping of
+ // external modules is disabled) and `normal` (normal build system
+ // execution). Build system drivers may invent additional modes (for
+ // example, the bpkg `skeleton` mode that is used to evaluate depends
+ // clauses).
+ //
+ set ("build.mode",
+ no_external_modules ? "no-external-modules" : "normal");
+
set ("build.work", work);
set ("build.home", home);
@@ -167,6 +421,29 @@ namespace build2
//
set ("build.verbosity", uint64_t (verb));
+ // Build system diagnostics progress and color.
+ //
+ // Note that these can be true, false, or NULL if neither requested nor
+ // suppressed explicitly.
+ //
+ {
+ value& v (gs.assign (vp.insert<bool> ("build.progress", v_g)));
+ if (diag_progress_option)
+ v = *diag_progress_option;
+ }
+
+ {
+ value& v (gs.assign (vp.insert<bool> ("build.diag_color", v_g)));
+ if (diag_color_option)
+ v = *diag_color_option;
+ }
+
+ // These are the "effective" values that incorporate a suitable default
+ // if neither requested nor suppressed explicitly.
+ //
+ set ("build.show_progress", show_progress (verb_never));
+ set ("build.show_diag_color", show_diag_color ());
+
// Build system version (similar to what we do in the version module
// except here we don't include package epoch/revision).
//
@@ -222,7 +499,8 @@ namespace build2
// Did the user ask us to use config.guess?
//
string orig (config_guess
- ? run<string> (3,
+ ? run<string> (*this,
+ 3,
*config_guess,
[](string& l, bool) {return move (l);})
: BUILD2_HOST_TRIPLET);
@@ -245,7 +523,7 @@ namespace build2
set ("build.host.version", t.version);
set ("build.host.class", t.class_);
- set ("build.host", move (t));
+ build_host = &set ("build.host", move (t)).as<target_triplet> ();
}
catch (const invalid_argument& e)
{
@@ -269,6 +547,7 @@ namespace build2
t.insert<path_target> ();
t.insert<file> ();
+ t.insert<group> ();
t.insert<alias> ();
t.insert<dir> ();
t.insert<fsdir> ();
@@ -303,215 +582,51 @@ namespace build2
// Note that some config.config.* variables have project visibility thus
// the match argument is false.
//
- vp.insert_pattern ("config.**", nullopt, true, v_g, true, false);
+ vpats.insert ("config.**", nullopt, true, v_g, true, false);
// Parse and enter the command line variables. We do it before entering
// any other variables so that all the variables that are overriden are
// marked as such first. Then, as we enter variables, we can verify that
// the override is alowed.
//
- for (size_t i (0); i != cmd_vars.size (); ++i)
{
- const string& s (cmd_vars[i]);
-
- istringstream is (s);
- is.exceptions (istringstream::failbit | istringstream::badbit);
-
- // Similar to buildspec we do "effective escaping" and only for ['"\$(]
- // (basically what's necessary inside a double-quoted literal plus the
- // single quote).
- //
- path_name in ("<cmdline>");
- lexer l (is, in, 1 /* line */, "\'\"\\$(");
+ size_t i (0);
+ for (; i != cmd_vars.size (); ++i)
+ {
+ const string& s (cmd_vars[i]);
- // At the buildfile level the scope-specific variable should be
- // separated from the directory with a whitespace, for example:
- //
- // ./ foo=$bar
- //
- // However, requiring this for command line variables would be too
- // inconvinient so we support both.
- //
- // We also have the optional visibility modifier as a first character of
- // the variable name:
- //
- // ! - global
- // % - project
- // / - scope
- //
- // The last one clashes a bit with the directory prefix:
- //
- // ./ /foo=bar
- // .//foo=bar
- //
- // But that's probably ok (the need for a scope-qualified override with
- // scope visibility should be pretty rare). Note also that to set the
- // value on the global scope we use !.
- //
- // And so the first token should be a word which can be either a
- // variable name (potentially with the directory qualification) or just
- // the directory, in which case it should be followed by another word
- // (unqualified variable name). To avoid treating any of the visibility
- // modifiers as special we use the cmdvar mode.
- //
- l.mode (lexer_mode::cmdvar);
- token t (l.next ());
+ pair<char, variable_override> p (
+ parse_variable_override (s, i, true /* buildspec */));
- optional<dir_path> dir;
- if (t.type == token_type::word)
- {
- string& v (t.value);
- size_t p (path::traits_type::rfind_separator (v));
+ char c (p.first);
+ variable_override& vo (p.second);
- if (p != string::npos && p != 0) // If first then visibility.
+ // Global and absolute scope overrides we can enter directly. Project
+ // and relative scope ones will be entered later for each project.
+ //
+ if (c == '!' || (vo.dir && vo.dir->absolute ()))
{
- if (p == v.size () - 1)
- {
- // Separate directory.
- //
- dir = dir_path (move (v));
- t = l.next ();
-
- // Target-specific overrides are not yet supported (and probably
- // never will be; the beast is already complex enough).
- //
- if (t.type == token_type::colon)
- fail << "'" << s << "' is a target-specific override" <<
- info << "use double '--' to treat this argument as buildspec";
- }
- else
- {
- // Combined directory.
- //
- // If double separator (visibility marker), then keep the first in
- // name.
- //
- if (p != 0 && path::traits_type::is_separator (v[p - 1]))
- --p;
-
- dir = dir_path (t.value, 0, p + 1); // Include the separator.
- t.value.erase (0, p + 1); // Erase the separator.
- }
+ scope& s (c == '!' ? gs : *sm.insert_out (*vo.dir)->second.front ());
- if (dir->relative ())
- {
- // Handle the special relative to base scope case (.../).
- //
- auto i (dir->begin ());
-
- if (*i == "...")
- dir = dir_path (++i, dir->end ()); // Note: can become empty.
- else
- dir->complete (); // Relative to CWD.
- }
+ auto p (s.vars.insert (vo.ovr));
+ assert (p.second); // Variable name is unique.
- if (dir->absolute ())
- dir->normalize ();
+ value& v (p.first);
+ v = move (vo.val);
}
- }
-
- token_type tt (l.next ().type);
-
- // The token should be the variable name followed by =, +=, or =+.
- //
- if (t.type != token_type::word || t.value.empty () ||
- (tt != token_type::assign &&
- tt != token_type::prepend &&
- tt != token_type::append))
- {
- fail << "expected variable assignment instead of '" << s << "'" <<
- info << "use double '--' to treat this argument as buildspec";
- }
-
- // Take care of the visibility. Note that here we rely on the fact that
- // none of these characters are lexer's name separators.
- //
- char c (t.value[0]);
-
- if (path::traits_type::is_separator (c))
- c = '/'; // Normalize.
-
- string n (t.value, c == '!' || c == '%' || c == '/' ? 1 : 0);
-
- if (c == '!' && dir)
- fail << "scope-qualified global override of variable " << n;
+ else
+ data_->var_overrides.push_back (move (vo));
- // Pre-enter the main variable. Note that we rely on all the overridable
- // variables with global visibility to be known (either entered or
- // handled via a pettern) at this stage.
- //
- variable& var (
- const_cast<variable&> (vp.insert (n, true /* overridable */)));
-
- const variable* o;
- {
- variable_visibility v (c == '/' ? variable_visibility::scope :
- c == '%' ? variable_visibility::project :
- variable_visibility::global);
-
- const char* k (tt == token_type::assign ? "__override" :
- tt == token_type::append ? "__suffix" : "__prefix");
-
- unique_ptr<variable> p (
- new variable {
- n + '.' + to_string (i + 1) + '.' + k,
- nullptr /* aliases */,
- nullptr /* type */,
- nullptr /* overrides */,
- v});
-
- // Back link.
+ // Save global overrides for nested contexts.
//
- p->aliases = p.get ();
- if (var.overrides != nullptr)
- swap (p->aliases,
- const_cast<variable*> (var.overrides.get ())->aliases);
-
- // Forward link.
- //
- p->overrides = move (var.overrides);
- var.overrides = move (p);
-
- o = var.overrides.get ();
- }
-
- // Currently we expand project overrides in the global scope to keep
- // things simple. Pass original variable for diagnostics. Use current
- // working directory as pattern base.
- //
- parser p (*this);
- pair<value, token> r (p.parse_variable_value (l, gs, &work, var));
-
- if (r.second.type != token_type::eos)
- fail << "unexpected " << r.second << " in variable assignment "
- << "'" << s << "'";
-
- // Make sure the value is not typed.
- //
- if (r.first.type != nullptr)
- fail << "typed override of variable " << n;
-
- // Global and absolute scope overrides we can enter directly. Project
- // and relative scope ones will be entered later for each project.
- //
- if (c == '!' || (dir && dir->absolute ()))
- {
- scope& s (c == '!' ? gs : *sm.insert_out (*dir)->second.front ());
-
- auto p (s.vars.insert (*o));
- assert (p.second); // Variable name is unique.
-
- value& v (p.first);
- v = move (r.first);
+ if (c == '!')
+ data_->global_var_overrides.push_back (s);
}
- else
- data_->var_overrides.push_back (
- variable_override {var, *o, move (dir), move (r.first)});
- // Save global overrides for nested contexts.
+ // Parse any ad hoc project-wide overrides.
//
- if (c == '!')
- data_->global_var_overrides.push_back (s);
+ if (var_ovr_func != nullptr)
+ var_ovr_func (*this, i);
}
// Enter remaining variable patterns and builtin variables.
@@ -520,24 +635,26 @@ namespace build2
const auto v_t (variable_visibility::target);
const auto v_q (variable_visibility::prereq);
- vp.insert_pattern<bool> ("config.**.configured", false, v_p);
+ vpats.insert<bool> ("config.**.configured", false, v_p);
- // file.cxx:import() (note: order is important; see insert_pattern()).
+ // file.cxx:import()
+ //
+ // Note: the order is important (see variable_patterns::insert()).
//
// Note that if any are overriden, they are "pre-typed" by the config.**
// pattern above and we just "add" the types.
//
- vp.insert_pattern<abs_dir_path> ("config.import.*", true, v_g, true);
- vp.insert_pattern<path> ("config.import.**", true, v_g, true);
+ vpats.insert<abs_dir_path> ("config.import.*", true, v_g, true);
+ vpats.insert<path> ("config.import.**", true, v_g, true);
// module.cxx:boot/init_module().
//
// Note that we also have the config.<module>.configured variable (see
// above).
//
- vp.insert_pattern<bool> ("**.booted", false /* overridable */, v_p);
- vp.insert_pattern<bool> ("**.loaded", false, v_p);
- vp.insert_pattern<bool> ("**.configured", false, v_p);
+ vpats.insert<bool> ("**.booted", false /* overridable */, v_p);
+ vpats.insert<bool> ("**.loaded", false, v_p);
+ vpats.insert<bool> ("**.configured", false, v_p);
var_src_root = &vp.insert<dir_path> ("src_root");
var_out_root = &vp.insert<dir_path> ("out_root");
@@ -563,29 +680,71 @@ namespace build2
var_export_metadata = &vp.insert ("export.metadata", v_t); // Untyped.
var_extension = &vp.insert<string> ("extension", v_t);
- var_clean = &vp.insert<bool> ("clean", v_t);
- var_backlink = &vp.insert<string> ("backlink", v_t);
- var_include = &vp.insert<string> ("include", v_q);
+ var_update = &vp.insert<string> ("update", v_q);
+ var_clean = &vp.insert<bool> ("clean", v_t);
+ var_backlink = &vp.insert ("backlink", v_t); // Untyped.
+ var_include = &vp.insert<string> ("include", v_q);
// Backlink executables and (generated) documentation by default.
//
- gs.target_vars[exe::static_type]["*"].assign (var_backlink) = "true";
- gs.target_vars[doc::static_type]["*"].assign (var_backlink) = "true";
+ gs.target_vars[exe::static_type]["*"].assign (var_backlink) =
+ names {name ("true")};
+ gs.target_vars[doc::static_type]["*"].assign (var_backlink) =
+ names {name ("true")};
// Register builtin rules.
//
{
rule_map& r (gs.rules); // Note: global scope!
- //@@ outer
- r.insert<alias> (perform_id, 0, "alias", alias_rule::instance);
+ r.insert<alias> (perform_id, 0, "build.alias", alias_rule::instance);
- r.insert<fsdir> (perform_update_id, "fsdir", fsdir_rule::instance);
- r.insert<fsdir> (perform_clean_id, "fsdir", fsdir_rule::instance);
+ r.insert<fsdir> (perform_update_id, "build.fsdir", fsdir_rule::instance);
+ r.insert<fsdir> (perform_clean_id, "build.fsdir", fsdir_rule::instance);
- r.insert<mtime_target> (perform_update_id, "file", file_rule::instance);
- r.insert<mtime_target> (perform_clean_id, "file", file_rule::instance);
+ r.insert<mtime_target> (perform_update_id, "build.file", file_rule::instance);
+ r.insert<mtime_target> (perform_clean_id, "build.file", file_rule::instance);
}
+
+ // End of initialization.
+ //
+ load_generation = 1;
+ }
+
+ context::
+ context ()
+ : data_ (new data (*this)),
+ sched (nullptr),
+ mutexes (nullptr),
+ fcache (nullptr),
+ match_only (nullopt),
+ no_external_modules (true),
+ dry_run_option (false),
+ no_diag_buffer (false),
+ keep_going (false),
+ phase_mutex (*this),
+ scopes (data_->scopes),
+ targets (data_->targets),
+ var_pool (data_->var_pool),
+ var_patterns (data_->var_patterns),
+ var_overrides (data_->var_overrides),
+ functions (data_->functions),
+ global_scope (create_global_scope (data_->scopes)),
+ global_target_types (data_->global_target_types),
+ global_override_cache (data_->global_override_cache),
+ global_var_overrides (data_->global_var_overrides),
+ modules_lock (nullptr),
+ module_context (nullptr)
+ {
+ variable_pool& vp (data_->var_pool);
+
+ var_src_root = &vp.insert<dir_path> ("src_root");
+ var_out_root = &vp.insert<dir_path> ("out_root");
+
+ var_project = &vp.insert<project_name> ("project");
+ var_amalgamation = &vp.insert<dir_path> ("amalgamation");
+
+ load_generation = 1;
}
context::
@@ -595,6 +754,68 @@ namespace build2
}
void context::
+ enter_project_overrides (scope& rs,
+ const dir_path& out_base,
+ const variable_overrides& ovrs,
+ scope* as)
+ {
+ // The mildly tricky part here is to distinguish the situation where we
+ // are bootstrapping the same project multiple times. The first override
+ // that we set cannot already exist (because the override variable names
+ // are unique) so if it is already set, then it can only mean this project
+ // is already bootstrapped.
+ //
+ // This is further complicated by the project vs amalgamation logic (we
+ // may have already done the amalgamation but not the project). So we
+ // split it into two passes.
+ //
+ auto& sm (scopes.rw ());
+
+ for (const variable_override& o: ovrs)
+ {
+ if (o.ovr.visibility != variable_visibility::global)
+ continue;
+
+ // If we have a directory, enter the scope, similar to how we do
+ // it in the context ctor.
+ //
+ scope& s (
+ o.dir
+ ? *sm.insert_out ((out_base / *o.dir).normalize ())->second.front ()
+ : *(as != nullptr ? as : (as = rs.weak_scope ())));
+
+ auto p (s.vars.insert (o.ovr));
+
+ if (!p.second)
+ break;
+
+ value& v (p.first);
+ v = o.val;
+ }
+
+ for (const variable_override& o: ovrs)
+ {
+ // Ours is either project (%foo) or scope (/foo).
+ //
+ if (o.ovr.visibility == variable_visibility::global)
+ continue;
+
+ scope& s (
+ o.dir
+ ? *sm.insert_out ((out_base / *o.dir).normalize ())->second.front ()
+ : rs);
+
+ auto p (s.vars.insert (o.ovr));
+
+ if (!p.second)
+ break;
+
+ value& v (p.first);
+ v = o.val;
+ }
+ }
+
+ void context::
current_meta_operation (const meta_operation_info& mif)
{
if (current_mname != mif.name)
@@ -604,6 +825,7 @@ namespace build2
}
current_mif = &mif;
+ current_mdata = current_data_ptr (nullptr, null_current_data_deleter);
current_on = 0; // Reset.
}
@@ -612,9 +834,13 @@ namespace build2
const operation_info* outer_oif,
bool diag_noise)
{
- current_oname = (outer_oif == nullptr ? inner_oif : *outer_oif).name;
+ const auto& oif (outer_oif == nullptr ? inner_oif : *outer_oif);
+
+ current_oname = oif.name;
current_inner_oif = &inner_oif;
current_outer_oif = outer_oif;
+ current_inner_odata = current_data_ptr (nullptr, null_current_data_deleter);
+ current_outer_odata = current_data_ptr (nullptr, null_current_data_deleter);
current_on++;
current_mode = inner_oif.mode;
current_diag_noise = diag_noise;
@@ -624,6 +850,11 @@ namespace build2
dependency_count.store (0, memory_order_relaxed);
target_count.store (0, memory_order_relaxed);
skip_count.store (0, memory_order_relaxed);
+ resolve_count.store (0, memory_order_relaxed);
+
+ // Clear accumulated targets with post hoc prerequisites.
+ //
+ current_posthoc_targets.clear ();
}
bool run_phase_mutex::
@@ -656,11 +887,13 @@ namespace build2
}
else if (ctx_.phase != n)
{
- ctx_.sched.deactivate (false /* external */);
+ ++contention; // Protected by m_.
+
+ ctx_.sched->deactivate (false /* external */);
for (; ctx_.phase != n; v->wait (l)) ;
r = !fail_;
l.unlock (); // Important: activate() can block.
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
}
else
r = !fail_;
@@ -672,9 +905,11 @@ namespace build2
{
if (!lm_.try_lock ())
{
- ctx_.sched.deactivate (false /* external */);
+ ctx_.sched->deactivate (false /* external */);
lm_.lock ();
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
+
+ ++contention_load; // Protected by lm_.
}
r = !fail_; // Re-query.
}
@@ -722,9 +957,9 @@ namespace build2
// relock().
//
if (o == run_phase::match && n == run_phase::execute)
- ctx_.sched.push_phase ();
+ ctx_.sched->push_phase ();
else if (o == run_phase::execute && n == run_phase::match)
- ctx_.sched.pop_phase ();
+ ctx_.sched->pop_phase ();
if (v != nullptr)
{
@@ -735,7 +970,7 @@ namespace build2
}
}
- bool run_phase_mutex::
+ optional<bool> run_phase_mutex::
relock (run_phase o, run_phase n)
{
// Pretty much a fused unlock/lock implementation except that we always
@@ -744,6 +979,7 @@ namespace build2
assert (o != n);
bool r;
+ bool s (true); // True switch.
if (o == run_phase::load)
lm_.unlock ();
@@ -778,9 +1014,9 @@ namespace build2
// unlock().
//
if (o == run_phase::match && n == run_phase::execute)
- ctx_.sched.push_phase ();
+ ctx_.sched->push_phase ();
else if (o == run_phase::execute && n == run_phase::match)
- ctx_.sched.pop_phase ();
+ ctx_.sched->pop_phase ();
// Notify others that could be waiting for this phase.
//
@@ -792,11 +1028,13 @@ namespace build2
}
else // phase != n
{
- ctx_.sched.deactivate (false /* external */);
+ ++contention; // Protected by m_.
+
+ ctx_.sched->deactivate (false /* external */);
for (; ctx_.phase != n; v->wait (l)) ;
r = !fail_;
l.unlock (); // Important: activate() can block.
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
}
}
@@ -804,14 +1042,23 @@ namespace build2
{
if (!lm_.try_lock ())
{
- ctx_.sched.deactivate (false /* external */);
+ // If we failed to acquire the load mutex, then we know there is (or
+ // was) someone before us in the load phase. And it's impossible to
+ // switch to a different phase between our calls to try_lock() above
+ // and lock() below because of our +1 in lc_.
+ //
+ s = false;
+
+ ctx_.sched->deactivate (false /* external */);
lm_.lock ();
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
+
+ ++contention_load; // Protected by lm_.
}
r = !fail_; // Re-query.
}
- return r;
+ return r ? optional<bool> (s) : nullopt;
}
// C++17 deprecated uncaught_exception() so use uncaught_exceptions() if
@@ -926,7 +1173,8 @@ namespace build2
phase_lock* pl (phase_lock_instance);
assert (&pl->ctx == &ctx);
- if (!ctx.phase_mutex.relock (old_phase, new_phase))
+ optional<bool> r (ctx.phase_mutex.relock (old_phase, new_phase));
+ if (!r)
{
ctx.phase_mutex.relock (new_phase, old_phase);
throw failed ();
@@ -935,14 +1183,37 @@ namespace build2
pl->phase = new_phase;
if (new_phase == run_phase::load) // Note: load lock is exclusive.
+ {
ctx.load_generation++;
+ // Invalidate cached target base_scope values if we are switching from a
+ // non-load phase (we don't cache during load which means load->load
+ // switch doesn't have anything to invalidate).
+ //
+ // @@ This is still quite expensive on project like Boost with a large
+ // number of files (targets) and a large number of load phase
+ // switches (due to directory buildfiles).
+ //
+ // Thinking some more on this, we shouldn't need to do this since such
+ // loads can (or at least should) only perform "island appends" see
+ // comment on context::phase for details.
+ //
+#if 0
+ if (*r)
+ {
+ for (const unique_ptr<target>& t: ctx.targets)
+ t->base_scope_.store (nullptr, memory_order_relaxed);
+ }
+#endif
+ }
+
//text << this_thread::get_id () << " phase switch "
// << old_phase << " " << new_phase;
}
#if 0
- // NOTE: see push/pop_phase() logic if trying to enable this.
+ // NOTE: see push/pop_phase() logic if trying to enable this. Also
+ // the load stuff above.
//
phase_switch::
phase_switch (phase_unlock&& u, phase_lock&& l)
diff --git a/libbuild2/context.hxx b/libbuild2/context.hxx
index c4d85c9..8898c92 100644
--- a/libbuild2/context.hxx
+++ b/libbuild2/context.hxx
@@ -21,13 +21,14 @@
namespace build2
{
class file_cache;
- class loaded_modules_lock;
+ class module_libraries_lock;
class LIBBUILD2_SYMEXPORT run_phase_mutex
{
public:
// Acquire a phase lock potentially blocking (unless already in the
// desired phase) until switching to the desired phase is possible.
+ // Return false on failure.
//
bool
lock (run_phase);
@@ -38,11 +39,22 @@ namespace build2
void
unlock (run_phase);
- // Switch from one phase to another.
+ // Switch from one phase to another. Return nullopt on failure (so can be
+ // used as bool), true if switched from a different phase, and false if
+ // joined/switched to the same phase (this, for example, can be used to
+ // decide if a phase switching housekeeping is really necessary). Note:
+ // currently only implemented for the load phase (always returns true
+ // for the others).
//
- bool
+ optional<bool>
relock (run_phase unlock, run_phase lock);
+ // Statistics.
+ //
+ public:
+ size_t contention = 0; // # of contentious phase (re)locks.
+ size_t contention_load = 0; // # of contentious load phase locks.
+
private:
friend class context;
@@ -61,7 +73,7 @@ namespace build2
// is exclusive so we have a separate mutex to serialize it (think of it
// as a second level locking).
//
- // When the mutex is unlocked (all three counters become zero, the phase
+ // When the mutex is unlocked (all three counters become zero), the phase
// is always changed to load (this is also the initial state).
//
context& ctx_;
@@ -94,8 +106,28 @@ namespace build2
explicit
global_mutexes (size_t vc)
- : variable_cache_size (vc),
- variable_cache (new shared_mutex[variable_cache_size]) {}
+ {
+ init (vc);
+ }
+
+ global_mutexes () = default; // Create uninitialized instance.
+
+ void
+ init (size_t vc)
+ {
+ variable_cache_size = vc;
+ variable_cache.reset (new shared_mutex[vc]);
+ }
+ };
+
+ // Match-only level.
+ //
+ // See the --match-only and --load-only options for background.
+ //
+ enum class match_only_level
+ {
+ alias, // Match only alias{} targets.
+ all // Match all targets.
};
// A build context encapsulates the state of a build. It is possible to have
@@ -120,9 +152,9 @@ namespace build2
// instead go the multiple communicating schedulers route, a la the job
// server).
//
- // The loaded_modules state (module.hxx) is shared among all the contexts
+ // The module_libraries state (module.hxx) is shared among all the contexts
// (there is no way to have multiple shared library loading "contexts") and
- // is protected by loaded_modules_lock. A nested context should normally
+ // is protected by module_libraries_lock. A nested context should normally
// inherit this lock value from its outer context.
//
// Note also that any given thread should not participate in multiple
@@ -138,17 +170,66 @@ namespace build2
//
class LIBBUILD2_SYMEXPORT context
{
+ public:
+ // In order to perform each operation the build system goes through the
+ // following phases:
+ //
+ // load - load the buildfiles
+ // match - search prerequisites and match rules
+ // execute - execute the matched rule
+ //
+ // The build system starts with a "serial load" phase and then continues
+ // with parallel match and execute. Match, however, can be interrupted
+ // both with load and execute.
+ //
+ // Match can be interrupted with "exclusive load" in order to load
+ // additional buildfiles. Similarly, it can be interrupted with (parallel)
+ // execute in order to build targetd required to complete the match (for
+ // example, generated source code or source code generators themselves).
+ //
+ // Such interruptions are performed by phase change that is protected by
+ // phase_mutex (which is also used to synchronize the state changes
+ // between phases).
+ //
+ // Serial load can perform arbitrary changes to the build state. Exclusive
+ // load, however, can only perform "island appends". That is, it can
+ // create new "nodes" (variables, scopes, etc) but not (semantically)
+ // change already existing nodes or invalidate any references to such (the
+ // idea here is that one should be able to load additional buildfiles as
+ // long as they don't interfere with the existing build state). The
+ // "islands" are identified by the load_generation number (1 for the
+ // initial/serial load). It is incremented in case of a phase switch and
+ // can be stored in various "nodes" to verify modifications are only done
+ // "within the islands". Another example of invalidation would be
+ // insertion of a new scope "under" an existing target thus changing its
+ // scope hierarchy (and potentially even its base scope). This would be
+ // bad because we may have made decisions based on the original hierarchy,
+ // for example, we may have queried a variable which in the new hierarchy
+ // would "see" a new value from the newly inserted scope.
+ //
+ // The special load_generation value 0 indicates initialization before
+ // anything has been loaded. Currently, it is changed to 1 at the end
+ // of the context constructor.
+ //
+ // Note must come (and thus initialized) before the data_ member.
+ //
+ run_phase phase = run_phase::load;
+ size_t load_generation = 0;
+
+ private:
struct data;
unique_ptr<data> data_;
public:
- scheduler& sched;
- global_mutexes& mutexes;
- file_cache& fcache;
+ // These are only NULL for the "bare minimum" context (see below).
+ //
+ scheduler* sched;
+ global_mutexes* mutexes;
+ file_cache* fcache;
- // Match only flag (see --match-only but also dist).
+ // Match only flag/level (see --{load,match}-only but also dist).
//
- bool match_only;
+ optional<match_only_level> match_only;
// Skip booting external modules flag (see --no-external-modules).
//
@@ -189,6 +270,10 @@ namespace build2
bool dry_run = false;
bool dry_run_option;
+ // Diagnostics buffering flag (--no-diag-buffer).
+ //
+ bool no_diag_buffer;
+
// Keep going flag.
//
// Note that setting it to false is not of much help unless we are running
@@ -197,39 +282,13 @@ namespace build2
//
bool keep_going;
- // In order to perform each operation the build system goes through the
- // following phases:
+ // Targets to trace (see the --trace-* options).
//
- // load - load the buildfiles
- // match - search prerequisites and match rules
- // execute - execute the matched rule
+ // Note that these must be set after construction and must remain valid
+ // for the lifetime of the context instance.
//
- // The build system starts with a "serial load" phase and then continues
- // with parallel match and execute. Match, however, can be interrupted
- // both with load and execute.
- //
- // Match can be interrupted with "exclusive load" in order to load
- // additional buildfiles. Similarly, it can be interrupted with (parallel)
- // execute in order to build targetd required to complete the match (for
- // example, generated source code or source code generators themselves).
- //
- // Such interruptions are performed by phase change that is protected by
- // phase_mutex (which is also used to synchronize the state changes
- // between phases).
- //
- // Serial load can perform arbitrary changes to the build state. Exclusive
- // load, however, can only perform "island appends". That is, it can
- // create new "nodes" (variables, scopes, etc) but not (semantically)
- // change already existing nodes or invalidate any references to such (the
- // idea here is that one should be able to load additional buildfiles as
- // long as they don't interfere with the existing build state). The
- // "islands" are identified by the load_generation number (0 for the
- // initial/serial load). It is incremented in case of a phase switch and
- // can be stored in various "nodes" to verify modifications are only done
- // "within the islands".
- //
- run_phase phase = run_phase::load;
- size_t load_generation = 0;
+ const vector<name>* trace_match = nullptr;
+ const vector<name>* trace_execute = nullptr;
// A "tri-mutex" that keeps all the threads in one of the three phases.
// When a thread wants to switch a phase, it has to wait for all the other
@@ -270,6 +329,7 @@ namespace build2
string current_oname;
const meta_operation_info* current_mif;
+
const operation_info* current_inner_oif;
const operation_info* current_outer_oif;
@@ -291,6 +351,22 @@ namespace build2
(current_mname.empty () && current_oname == mo));
};
+ // Meta/operation-specific context-global auxiliary data storage.
+ //
+ // Note: cleared by current_[meta_]operation() below. Normally set by
+ // meta/operation-specific callbacks from [mate_]operation_info.
+ //
+ // Note also: watch out for MT-safety in the data itself.
+ //
+ static void
+ null_current_data_deleter (void* p) { assert (p == nullptr); }
+
+ using current_data_ptr = unique_ptr<void, void (*) (void*)>;
+
+ current_data_ptr current_mdata = {nullptr, null_current_data_deleter};
+ current_data_ptr current_inner_odata = {nullptr, null_current_data_deleter};
+ current_data_ptr current_outer_odata = {nullptr, null_current_data_deleter};
+
// Current operation number (1-based) in the meta-operation batch.
//
size_t current_on;
@@ -329,20 +405,41 @@ namespace build2
// decremented after such recipe has been executed. If such a recipe has
// skipped executing the operation, then it should increment the skip
// count. These two counters are used for progress monitoring and
- // diagnostics.
+ // diagnostics. The resolve count keeps track of the number of targets
+ // matched but not executed as a result of the resolve_members() calls
+ // (see also target::resolve_counted).
//
atomic_count dependency_count;
atomic_count target_count;
atomic_count skip_count;
+ atomic_count resolve_count;
// Build state (scopes, targets, variables, etc).
//
const scope_map& scopes;
target_set& targets;
- const variable_pool& var_pool;
+ const variable_pool& var_pool; // Public variables pool.
+ const variable_patterns& var_patterns; // Public variables patterns.
const variable_overrides& var_overrides; // Project and relative scope.
function_map& functions;
+ // Current targets with post hoc prerequisites.
+ //
+ // Note that we don't expect many of these so a simple mutex should be
+ // sufficient. Note also that we may end up adding more entries as we
+ // match existing so use list for node and iterator stability. See
+ // match_poshoc() for details.
+ //
+ struct posthoc_target
+ {
+ build2::action action;
+ reference_wrapper<const build2::target> target;
+ vector<const build2::target*> prerequisite_targets;
+ };
+
+ list<posthoc_target> current_posthoc_targets;
+ mutex current_posthoc_targets_mutex;
+
// Global scope.
//
const scope& global_scope;
@@ -350,6 +447,10 @@ namespace build2
variable_override_cache& global_override_cache;
const strings& global_var_overrides;
+ // Cached values (from global scope).
+ //
+ const target_triplet* build_host; // build.host
+
// Cached variables.
//
@@ -379,8 +480,8 @@ namespace build2
const variable* var_import_build2;
const variable* var_import_target;
- // The import.metadata variable and the --build2-metadata option are used
- // to pass the metadata compatibility version.
+ // The import.metadata export stub variable and the --build2-metadata
+ // executable option are used to pass the metadata compatibility version.
//
// This serves both as an indication that the metadata is required (can be
// useful, for example, in cases where it is expensive to calculate) as
@@ -392,7 +493,8 @@ namespace build2
// The export.metadata value should start with the version followed by the
// metadata variable prefix (for example, cli in cli.version).
//
- // The following metadata variable names have pre-defined meaning:
+ // The following metadata variable names have pre-defined meaning for
+ // executable targets (exe{}; see also process_path_ex):
//
// <var-prefix>.name = [string] # Stable name for diagnostics.
// <var-prefix>.version = [string] # Version for diagnostics.
@@ -402,7 +504,8 @@ namespace build2
// If the <var-prefix>.name variable is missing, it is set to the target
// name as imported.
//
- // See also process_path_ex.
+ // Note that the same mechanism is used for library user metadata (see
+ // cc::pkgconfig_{load,save}() for details).
//
const variable* var_import_metadata;
const variable* var_export_metadata;
@@ -411,6 +514,21 @@ namespace build2
//
const variable* var_extension;
+ // This variable can only be specified as prerequisite-specific (see the
+ // `include` variable for details).
+ //
+ // [string] prerequisite visibility
+ //
+ // Valid values are `true` and `false`. Additionally, some rules (and
+ // potentially only for certain types of prerequisites) may support the
+ // `unmatch` (match but do not update, if possible), `match` (update
+ // during match), and `execute` (update during execute, as is normally)
+ // values (the `execute` value may be useful if the rule has the `match`
+ // semantics by default). Note that if unmatch is impossible, then the
+ // prerequisite is treated as ad hoc.
+ //
+ const variable* var_update;
+
// Note that this variable can also be specified as prerequisite-specific
// (see the `include` variable for details).
//
@@ -418,18 +536,51 @@ namespace build2
//
const variable* var_clean;
- // Forwarded configuration backlink mode. Valid values are:
+ // Forwarded configuration backlink mode. The value has two components
+ // in the form:
+ //
+ // <mode> [<print>]
+ //
+ // Valid <mode> values are:
//
// false - no link.
// true - make a link using appropriate mechanism.
// symbolic - make a symbolic link.
// hard - make a hard link.
// copy - make a copy.
- // overwrite - copy over but don't remove on clean (committed gen code).
+ // overwrite - copy over but don't remove on clean.
+ // group - inherit the group mode (only valid for group members).
//
- // Note that it can be set by a matching rule as a rule-specific variable.
+ // While the <print> component should be either true or false and can be
+ // used to suppress printing of specific ad hoc group members at verbosity
+ // level 1. Note that it cannot be false for the primary member.
//
- // [string] target visibility
+ // Note that this value can be set by a matching rule as a rule-specific
+ // variable.
+ //
+ // Note also that the overwrite mode was originally meant for handling
+ // pregenerated source code. But in the end this did not pan out for
+ // the following reasons:
+ //
+ // 1. This would mean that the pregenerated and regenerated files end up
+ // in the same place (e.g., depending on the develop mode) and it's
+ // hard to make this work without resorting to a conditional graph.
+ //
+ // This could potentially be addressed by allowing backlink to specify
+ // a different location (similar to dist).
+ //
+ // 2. This support for pregenerated source code would be tied to forwarded
+ // configurations.
+ //
+ // Nevertheless, there may be a kernel of an idea here in that we may be
+ // able to provide a built-in "post-copy" mechanism which would allow one
+ // to have a pregenerated setup even when using non-ad hoc recipes
+ // (currently we just manually diff/copy stuff at the end of a recipe).
+ // (Or maybe we should stick to ad hoc recipes with post-diff/copy and
+ // just expose a mechanism to delegate to a different rule, which we
+ // already have).
+ //
+ // [names] target visibility
//
const variable* var_backlink;
@@ -456,14 +607,19 @@ namespace build2
// Sometimes it may be desirable to apply exclusions only to specific
// operations. The initial idea was to extend this value to allow
// specifying the operation (e.g., clean@false). However, later we
- // realized that we could reuse the "operation variables" (clean, install,
- // test) with a more natural-looking result. Note that currently we only
- // recognize the built-in clean variable (for other variables we will need
- // some kind of registration in an operation-to-variable map, probably in
- // root scope). See also install::file_rule::filter().
+ // realized that we could reuse the "operation-specific variables"
+ // (update, clean, install, test; see project_operation_info) with a more
+ // natural-looking and composable result. Plus, this allows for
+ // operation-specific "modifiers", for example, "unmatch" and "update
+ // during match" logic for update (see var_update for details) or
+ // requiring explicit install=true to install exe{} prerequisites (see
+ // install::file_rule::filter()).
//
- // To query this value in rule implementations use the include() helpers
- // from <libbuild2/prerequisites.hxx>.
+ // To query this value and its operation-specific override if any, the
+ // rule implementations use the include() helper.
+ //
+ // Note that there are also related (but quite different) for_<operation>
+ // variables for operations that act as outer (e.g., test, install).
//
// [string] prereq visibility
//
@@ -480,14 +636,34 @@ namespace build2
build2::meta_operation_table meta_operation_table;
build2::operation_table operation_table;
+ // Import cache (see import_load()).
+ //
+ struct import_key
+ {
+ dir_path out_root; // Imported project's out root.
+ name target; // Imported target (unqualified).
+ uint64_t metadata; // Metadata version (0 if none).
+
+ friend bool
+ operator< (const import_key& x, const import_key& y)
+ {
+ int r;
+ return ((r = x.out_root.compare (y.out_root)) != 0 ? r < 0 :
+ (r = x.target.compare (y.target)) != 0 ? r < 0 :
+ x.metadata < y.metadata);
+ }
+ };
+
+ map<import_key, pair<names, const scope&>> import_cache;
+
// The old/new src_root remapping for subprojects.
//
dir_path old_src_root;
dir_path new_src_root;
- // NULL if this context hasn't already locked the loaded_modules state.
+ // NULL if this context hasn't already locked the module_libraries state.
//
- const loaded_modules_lock* modules_lock;
+ const module_libraries_lock* modules_lock;
// Nested context for updating build system modules and ad hoc recipes.
//
@@ -504,17 +680,76 @@ namespace build2
// properly setup context (including, normally, a self-reference in
// modules_context).
//
- explicit
+ // The var_override_function callback can be used to parse ad hoc project-
+ // wide variable overrides (see parse_variable_override()). This has to
+ // happen at a specific point during context construction (see the
+ // implementation for details).
+ //
+ // Note: see also the trace_* data members that, if needed, must be set
+ // separately, after construction.
+ //
+ struct reserves
+ {
+ size_t targets;
+ size_t variables;
+
+ reserves (): targets (0), variables (0) {}
+ reserves (size_t t, size_t v): targets (t), variables (v) {}
+ };
+
+ using var_override_function = void (context&, size_t&);
+
context (scheduler&,
global_mutexes&,
file_cache&,
- bool match_only = false,
+ optional<match_only_level> match_only = nullopt,
bool no_external_modules = false,
bool dry_run = false,
+ bool no_diag_buffer = false,
bool keep_going = true,
const strings& cmd_vars = {},
+ reserves = {0, 160},
optional<context*> module_context = nullptr,
- const loaded_modules_lock* inherited_mudules_lock = nullptr);
+ const module_libraries_lock* inherited_modules_lock = nullptr,
+ const function<var_override_function>& = nullptr);
+
+ // Special context with bare minimum of initializations. It is only
+ // guaranteed to be sufficiently initialized to call extract_variable().
+ //
+ // Note that for this purpose you may omit calls to init_diag() and
+ // init().
+ //
+ context ();
+
+ // Reserve elements in containers to avoid re-allocation/re-hashing. Zero
+ // values are ignored (that is, the corresponding container reserve()
+ // function is not called). Can only be called in the load phase.
+ //
+ void
+ reserve (reserves);
+
+ // Parse a variable override returning its type in the first half of the
+ // pair. Index is the variable index (used to derive unique name) and if
+ // buildspec is true then assume `--` is used as a separator between
+ // variables and buildscpec and issue appropriate diagnostics.
+ //
+ // Note: should only be called from the var_override_function constructor
+ // callback.
+ //
+ pair<char, variable_override>
+ parse_variable_override (const string& var, size_t index, bool buildspec);
+
+ // Enter project-wide (as opposed to global) variable overrides.
+ //
+ // If the amalgamation scope is specified, then use it instead of
+ // rs.weak_scope() to set overrides with global visibility (make sure you
+ // understand the implications before doing this).
+ //
+ void
+ enter_project_overrides (scope& rs,
+ const dir_path& out_base,
+ const variable_overrides&,
+ scope* amalgamation = nullptr);
// Set current meta-operation and operation.
//
@@ -661,8 +896,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- wait_guard (wait_guard&&);
- wait_guard& operator= (wait_guard&&);
+ wait_guard (wait_guard&&) noexcept;
+ wait_guard& operator= (wait_guard&&) noexcept;
wait_guard (const wait_guard&) = delete;
wait_guard& operator= (const wait_guard&) = delete;
diff --git a/libbuild2/context.ixx b/libbuild2/context.ixx
index 4f86c28..7b2a405 100644
--- a/libbuild2/context.ixx
+++ b/libbuild2/context.ixx
@@ -31,7 +31,7 @@ namespace build2
}
inline wait_guard::
- wait_guard (wait_guard&& x)
+ wait_guard (wait_guard&& x) noexcept
: ctx (x.ctx),
start_count (x.start_count),
task_count (x.task_count),
@@ -41,7 +41,7 @@ namespace build2
}
inline wait_guard& wait_guard::
- operator= (wait_guard&& x)
+ operator= (wait_guard&& x) noexcept
{
if (&x != this)
{
@@ -57,7 +57,7 @@ namespace build2
wait ()
{
phase_unlock u (*ctx, phase, true /* delay */);
- ctx->sched.wait (start_count, *task_count, u);
+ ctx->sched->wait (start_count, *task_count, u);
task_count = nullptr;
}
}
diff --git a/libbuild2/cxx/init.cxx b/libbuild2/cxx/init.cxx
index cd5169d..3ca920e 100644
--- a/libbuild2/cxx/init.cxx
+++ b/libbuild2/cxx/init.cxx
@@ -93,7 +93,7 @@ namespace build2
// Feature flags.
//
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true /* public */)); // All qualified.
// Similar to config.cxx.std, config.cxx.features.* overrides
// cxx.features.*.
@@ -194,6 +194,8 @@ namespace build2
// for this mode. So starting from 16 we only enable it in
// `experimental`.
//
+ // Note: no /std:c++23 yet as of MSVC 17.6.
+ //
if (v16_11)
o = "/std:c++20";
else if (v16_0)
@@ -316,12 +318,14 @@ namespace build2
;
else
{
- // Translate 11 to 0x, 14 to 1y, 17 to 1z, 20 to 2a, and 23 to 2b
- // for compatibility with older versions of the compilers.
+ // Translate 11 to 0x, 14 to 1y, 17 to 1z, 20 to 2a, 23 to 2b, and
+ // 26 to 2c for compatibility with older versions of the
+ // compilers.
//
o = "-std=";
- if (*v == "23") o += "c++2b";
+ if (*v == "26") o += "c++2c";
+ else if (*v == "23") o += "c++2b";
else if (*v == "20") o += "c++2a";
else if (*v == "17") o += "c++1z";
else if (*v == "14") o += "c++1y";
@@ -469,15 +473,20 @@ namespace build2
// Enter all the variables and initialize the module data.
//
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
cc::config_data d {
cc::lang::cxx,
"cxx",
"c++",
+ "obj-c++",
BUILD2_DEFAULT_CXX,
".ii",
+ ".mii",
hinters,
@@ -668,6 +677,9 @@ namespace build2
vp["cc.export.libs"],
vp["cc.export.impl_libs"],
+ vp["cc.pkconfig.include"],
+ vp["cc.pkconfig.lib"],
+
vp.insert<string> ("cxx.stdlib"),
vp["cc.runtime"],
@@ -733,6 +745,9 @@ namespace build2
vp.insert_alias (d.c_module_name, "cxx.module_name");
vp.insert_alias (d.c_importable, "cxx.importable");
+ vp.insert_alias (d.c_pkgconfig_include, "cxx.pkgconfig.include");
+ vp.insert_alias (d.c_pkgconfig_lib, "cxx.pkgconfig.lib");
+
auto& m (extra.set_module (new config_module (move (d))));
m.guess (rs, loc, extra.hints);
@@ -772,6 +787,10 @@ namespace build2
nullptr
};
+ // Note that we don't include S{} here because none of the files we
+ // compile can plausibly want to include .S. (Maybe in inline assember
+ // instrcutions?)
+ //
static const target_type* const inc[] =
{
&hxx::static_type,
@@ -781,6 +800,8 @@ namespace build2
&mxx::static_type,
&cxx::static_type,
&c::static_type,
+ &mm::static_type,
+ &m::static_type,
nullptr
};
@@ -805,7 +826,7 @@ namespace build2
auto& cm (
load_module<config_module> (rs, rs, "cxx.config", loc, extra.hints));
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true /* public */)); // All qualified.
bool modules (cast<bool> (rs["cxx.features.modules"]));
@@ -823,7 +844,6 @@ namespace build2
"cxx.compile",
"cxx.link",
"cxx.install",
- "cxx.uninstall",
cm.x_info->id.type,
cm.x_info->id.variant,
@@ -863,12 +883,54 @@ namespace build2
inc
};
- auto& m (extra.set_module (new module (move (d))));
+ auto& m (extra.set_module (new module (move (d), rs)));
m.init (rs, loc, extra.hints, *cm.x_info);
return true;
}
+ bool
+ objcxx_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("cxx::objcxx_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cxx.objcxx module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("cxx"));
+
+ if (mod == nullptr)
+ fail (loc) << "cxx.objcxx module must be loaded after cxx module";
+
+ // Register the target type and "enable" it in the module.
+ //
+ // Note that we must register the target type regardless of whether the
+ // C++ compiler is capable of compiling Objective-C++. But we enable
+ // only if it is.
+ //
+ // Note: see similar code in the c module.
+ //
+ rs.insert_target_type<mm> ();
+
+ // Note that while Objective-C++ is supported by MinGW GCC, it's
+ // unlikely Clang supports it when targeting MSVC or Emscripten. But
+ // let's keep the check simple for now.
+ //
+ if (mod->ctype == compiler_type::gcc ||
+ mod->ctype == compiler_type::clang)
+ mod->x_obj = &mm::static_type;
+
+ return true;
+ }
+
static const module_functions mod_functions[] =
{
// NOTE: don't forget to also update the documentation in init.hxx if
@@ -876,6 +938,7 @@ namespace build2
{"cxx.guess", nullptr, guess_init},
{"cxx.config", nullptr, config_init},
+ {"cxx.objcxx", nullptr, objcxx_init},
{"cxx", nullptr, init},
{nullptr, nullptr, nullptr}
};
diff --git a/libbuild2/cxx/init.hxx b/libbuild2/cxx/init.hxx
index 094fea4..0e42cbe 100644
--- a/libbuild2/cxx/init.hxx
+++ b/libbuild2/cxx/init.hxx
@@ -22,6 +22,8 @@ namespace build2
// `cxx.guess` -- registers and sets some variables.
// `cxx.config` -- loads cxx.guess and sets more variables.
// `cxx` -- loads cxx.config and registers target types and rules.
+ // `cxx.objcxx` -- registers mm{} target type and enables Objective-C++
+ // compilation.
//
extern "C" LIBBUILD2_CXX_SYMEXPORT const module_functions*
build2_cxx_load ();
diff --git a/libbuild2/cxx/target.cxx b/libbuild2/cxx/target.cxx
index 982dcb4..5ead620 100644
--- a/libbuild2/cxx/target.cxx
+++ b/libbuild2/cxx/target.cxx
@@ -3,10 +3,6 @@
#include <libbuild2/cxx/target.hxx>
-#include <libbuild2/context.hxx>
-
-using namespace std;
-
namespace build2
{
namespace cxx
@@ -22,7 +18,7 @@ namespace build2
&target_pattern_var<hxx_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
extern const char ixx_ext_def[] = "ixx";
@@ -36,7 +32,7 @@ namespace build2
&target_pattern_var<ixx_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
extern const char txx_ext_def[] = "txx";
@@ -50,7 +46,7 @@ namespace build2
&target_pattern_var<txx_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
extern const char cxx_ext_def[] = "cxx";
@@ -64,7 +60,7 @@ namespace build2
&target_pattern_var<cxx_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
extern const char mxx_ext_def[] = "mxx";
@@ -78,7 +74,21 @@ namespace build2
&target_pattern_var<mxx_ext_def>,
nullptr,
&file_search,
- false
+ target_type::flag::none
+ };
+
+ extern const char mm_ext_def[] = "mm";
+ const target_type mm::static_type
+ {
+ "mm",
+ &cc::static_type,
+ &target_factory<mm>,
+ nullptr, /* fixed_extension */
+ &target_extension_var<mm_ext_def>,
+ &target_pattern_var<mm_ext_def>,
+ nullptr,
+ &file_search,
+ target_type::flag::none
};
}
}
diff --git a/libbuild2/cxx/target.hxx b/libbuild2/cxx/target.hxx
index cddab68..fc85f75 100644
--- a/libbuild2/cxx/target.hxx
+++ b/libbuild2/cxx/target.hxx
@@ -18,45 +18,58 @@ namespace build2
{
using cc::h;
using cc::c;
+ using cc::m;
class LIBBUILD2_CXX_SYMEXPORT hxx: public cc::cc
{
public:
- using cc::cc;
+ hxx (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_CXX_SYMEXPORT ixx: public cc::cc
{
public:
- using cc::cc;
+ ixx (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_CXX_SYMEXPORT txx: public cc::cc
{
public:
- using cc::cc;
+ txx (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_CXX_SYMEXPORT cxx: public cc::cc
{
public:
- using cc::cc;
+ cxx (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// The module interface unit is both like a header (e.g., we need to
@@ -67,11 +80,29 @@ namespace build2
class LIBBUILD2_CXX_SYMEXPORT mxx: public cc::cc
{
public:
- using cc::cc;
+ mxx (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // Objective-C++ source file.
+ //
+ class LIBBUILD2_CXX_SYMEXPORT mm: public cc::cc
+ {
+ public:
+ mm (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
}
}
diff --git a/libbuild2/depdb.cxx b/libbuild2/depdb.cxx
index 657c772..0dabeca 100644
--- a/libbuild2/depdb.cxx
+++ b/libbuild2/depdb.cxx
@@ -15,22 +15,39 @@ using namespace butl;
namespace build2
{
+ // Note that state::write with absent pos is interpreted as non-existent.
+ //
depdb_base::
- depdb_base (const path& p, timestamp mt)
+ depdb_base (const path& p, bool ro, state s, optional<uint64_t> pos)
+ : state_ (s), ro_ (ro)
{
- fdopen_mode om (fdopen_mode::out | fdopen_mode::binary);
+ if (s == state::write && ro)
+ {
+ new (&is_) ifdstream ();
+ buf_ = nullptr; // Shouldn't be needed.
+ return;
+ }
+
+ fdopen_mode om (fdopen_mode::binary);
ifdstream::iostate em (ifdstream::badbit);
- if (mt == timestamp_nonexistent)
+ if (s == state::write)
{
- state_ = state::write;
- om |= fdopen_mode::create | fdopen_mode::exclusive;
+ om |= fdopen_mode::out;
+
+ if (!pos)
+ om |= fdopen_mode::create | fdopen_mode::exclusive;
+
em |= ifdstream::failbit;
}
else
{
- state_ = state::read;
om |= fdopen_mode::in;
+
+ // Both in & out so can switch from read to write.
+ //
+ if (!ro)
+ om |= fdopen_mode::out;
}
auto_fd fd;
@@ -40,10 +57,10 @@ namespace build2
}
catch (const io_error&)
{
- bool c (state_ == state::write);
+ bool c (s == state::write && !pos);
diag_record dr (fail);
- dr << "unable to " << (c ? "create" : "open") << ' ' << p;
+ dr << "unable to " << (c ? "create " : "open ") << p;
if (c)
dr << info << "did you forget to add fsdir{} prerequisite for "
@@ -52,6 +69,16 @@ namespace build2
dr << endf;
}
+ if (pos)
+ try
+ {
+ fdseek (fd.get (), *pos, fdseek_mode::set);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to rewind " << p << ": " << e;
+ }
+
// Open the corresponding stream. Note that if we throw after that, the
// corresponding member will not be destroyed. This is the reason for the
// depdb/base split.
@@ -63,33 +90,46 @@ namespace build2
}
else
{
- new (&os_) ofdstream (move (fd), em);
+ new (&os_) ofdstream (move (fd), em, pos ? *pos : 0);
buf_ = static_cast<fdstreambuf*> (os_.rdbuf ());
}
}
depdb::
- depdb (path_type&& p, timestamp mt)
- : depdb_base (p, mt),
+ depdb (path_type&& p, bool ro, timestamp mt)
+ : depdb_base (p,
+ ro,
+ mt != timestamp_nonexistent ? state::read : state::write),
path (move (p)),
- mtime (mt != timestamp_nonexistent ? mt : timestamp_unknown),
- touch (false)
+ mtime (mt != timestamp_nonexistent ? mt : timestamp_unknown)
{
// Read/write the database format version.
//
if (state_ == state::read)
{
string* l (read ());
- if (l == nullptr || *l != "1")
- write ('1');
+ if (l != nullptr && *l == "1")
+ return;
}
- else
+
+ if (!ro)
write ('1');
+ else if (reading ())
+ change ();
+ }
+
+ depdb::
+ depdb (path_type p, bool ro)
+ : depdb (move (p), ro, build2::mtime (p))
+ {
}
depdb::
- depdb (path_type p)
- : depdb (move (p), build2::mtime (p))
+ depdb (reopen_state rs)
+ : depdb_base (rs.path, false, state::write, rs.pos),
+ path (move (rs.path)),
+ mtime (timestamp_unknown),
+ touch (rs.mtime)
{
}
@@ -98,51 +138,58 @@ namespace build2
{
assert (state_ != state::write);
- // Transfer the file descriptor from ifdstream to ofdstream. Note that the
- // steps in this dance must be carefully ordered to make sure we don't
- // call any destructors twice in the face of exceptions.
- //
- auto_fd fd (is_.release ());
-
- // Consider this scenario: we are overwriting an old line (so it ends with
- // a newline and the "end marker") but the operation failed half way
- // through. Now we have the prefix from the new line, the suffix from the
- // old, and everything looks valid. So what we need is to somehow
- // invalidate the old content so that it can never combine with (partial)
- // new content to form a valid line. One way to do that would be to
- // truncate the file.
- //
- if (trunc)
- try
+ if (ro_)
{
- fdtruncate (fd.get (), pos_);
+ buf_ = nullptr;
}
- catch (const io_error& e)
+ else
{
- fail << "unable to truncate " << path << ": " << e;
- }
+ // Transfer the file descriptor from ifdstream to ofdstream. Note that
+ // the steps in this dance must be carefully ordered to make sure we
+ // don't call any destructors twice in the face of exceptions.
+ //
+ auto_fd fd (is_.release ());
+
+ // Consider this scenario: we are overwriting an old line (so it ends
+ // with a newline and the "end marker") but the operation failed half
+ // way through. Now we have the prefix from the new line, the suffix
+ // from the old, and everything looks valid. So what we need is to
+ // somehow invalidate the old content so that it can never combine with
+ // (partial) new content to form a valid line. One way to do that would
+ // be to truncate the file.
+ //
+ if (trunc)
+ try
+ {
+ fdtruncate (fd.get (), pos_);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to truncate " << path << ": " << e;
+ }
- // Note: the file descriptor position can be beyond the pos_ value due to
- // the ifdstream buffering. That's why we need to seek to switch from
- // reading to writing.
- //
- try
- {
- fdseek (fd.get (), pos_, fdseek_mode::set);
- }
- catch (const io_error& e)
- {
- fail << "unable to rewind " << path << ": " << e;
- }
+ // Note: the file descriptor position can be beyond the pos_ value due
+ // to the ifdstream buffering. That's why we need to seek to switch from
+ // reading to writing.
+ //
+ try
+ {
+ fdseek (fd.get (), pos_, fdseek_mode::set);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to rewind " << path << ": " << e;
+ }
- // @@ Strictly speaking, ofdstream can throw which will leave us in a
- // non-destructible state. Unlikely but possible.
- //
- is_.~ifdstream ();
- new (&os_) ofdstream (move (fd),
- ofdstream::badbit | ofdstream::failbit,
- pos_);
- buf_ = static_cast<fdstreambuf*> (os_.rdbuf ());
+ // @@ Strictly speaking, ofdstream can throw which will leave us in a
+ // non-destructible state. Unlikely but possible.
+ //
+ is_.~ifdstream ();
+ new (&os_) ofdstream (move (fd),
+ ofdstream::badbit | ofdstream::failbit,
+ pos_);
+ buf_ = static_cast<fdstreambuf*> (os_.rdbuf ());
+ }
state_ = state::write;
mtime = timestamp_unknown;
@@ -282,14 +329,24 @@ namespace build2
}
void depdb::
- close ()
+ close (bool mc)
{
+ if (ro_)
+ {
+ is_.close ();
+ return;
+ }
+
// If we are at eof, then it means all lines are good, there is the "end
// marker" at the end, and we don't need to do anything, except, maybe
// touch the file. Otherwise, if we are still in the read mode, truncate
// the rest, and then add the "end marker" (we cannot have anything in the
// write mode since we truncate in change()).
//
+ // Note that we handle touch with timestamp_unknown specially by making a
+ // modification to the file (which happens naturally in the write mode)
+ // and letting the filesystem update its mtime.
+ //
if (state_ == state::read_eof)
{
if (!touch)
@@ -314,8 +371,11 @@ namespace build2
// Note also that utime() on Windows is a bad idea (see touch_file() for
// details).
//
- pos_ = buf_->tellg (); // The last line is accepted.
- change (false /* truncate */); // Write end marker below.
+ if (*touch == timestamp_unknown)
+ {
+ pos_ = buf_->tellg (); // The last line is accepted.
+ change (false /* truncate */); // Write end marker below.
+ }
}
else if (state_ != state::write)
{
@@ -323,9 +383,10 @@ namespace build2
change (true /* truncate */);
}
- if (mtime_check ())
+ if (mc && mtime_check ())
start_ = system_clock::now ();
+ if (state_ == state::write)
try
{
os_.put ('\0'); // The "end marker".
@@ -333,7 +394,17 @@ namespace build2
}
catch (const io_error& e)
{
- fail << "unable to flush " << path << ": " << e;
+ fail << "unable to flush file " << path << ": " << e;
+ }
+
+ if (touch && *touch != timestamp_unknown)
+ try
+ {
+ file_mtime (path, *touch);
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to touch file " << path << ": " << e;
}
// On some platforms (currently confirmed on FreeBSD running as VMs) one
@@ -353,6 +424,37 @@ namespace build2
#endif
}
+ depdb::reopen_state depdb::
+ close_to_reopen ()
+ {
+ assert (!touch);
+
+ if (state_ != state::write)
+ {
+ pos_ = buf_->tellg (); // The last line is accepted.
+ change (state_ != state::read_eof /* truncate */);
+ }
+
+ pos_ = buf_->tellp ();
+
+ try
+ {
+ os_.put ('\0'); // The "end marker".
+ os_.close ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to flush file " << path << ": " << e;
+ }
+
+ // Note: must still be done for FreeBSD if changing anything here (see
+ // close() for details).
+ //
+ mtime = build2::mtime (path);
+
+ return reopen_state {move (path), pos_, mtime};
+ }
+
void depdb::
check_mtime_ (const path_type& t, timestamp e)
{
diff --git a/libbuild2/depdb.hxx b/libbuild2/depdb.hxx
index c3e60a2..5855c3f 100644
--- a/libbuild2/depdb.hxx
+++ b/libbuild2/depdb.hxx
@@ -62,16 +62,19 @@ namespace build2
//
struct LIBBUILD2_SYMEXPORT depdb_base
{
- explicit
- depdb_base (const path&, timestamp);
+ // Implementation details.
+ //
+ enum class state {read, read_eof, write};
+ depdb_base (const path&, bool ro, state, optional<uint64_t> pos = nullopt);
~depdb_base ();
- enum class state {read, read_eof, write} state_;
+ state state_;
+ bool ro_;
union
{
- ifdstream is_; // read, read_eof
+ ifdstream is_; // read, read_eof, (ro && write)
ofdstream os_; // write
};
@@ -90,31 +93,77 @@ namespace build2
// close() even if otherwise no modifications are necessary (i.e., the
// database is in the read mode and is at eof).
//
- path_type path;
- timestamp mtime;
- bool touch;
+ // If touch is present then update the database modification time in
+ // close() even if otherwise no modifications are necessary (i.e., the
+ // database is in the read mode and is at eof). Specifically, if touch is
+ // timestamp_unknown, then set mtime to the current (filesystem) time.
+ // Otherwise, set it to the specified time (which should be sourced from
+ // the filesystem, see touch_file() for details).
+ //
+ path_type path;
+ timestamp mtime;
+ optional<timestamp> touch;
// Open the database for reading. Note that if the file does not exist,
// has wrong format version, or is corrupt, then the database will be
// immediately switched to writing.
//
+ // If read_only is true, then don't actually make any modifications to the
+ // database file. In other words, the database is still nominally switched
+ // to writing but without any filesystem changes. Note that calling any
+ // write-only functions (write(), touch, etc) on such a database is
+ // illegal.
+ //
// The failure commonly happens when the user tries to stash the target in
// a non-existent subdirectory but forgets to add the corresponding fsdir{}
// prerequisite. That's why the issued diagnostics may provide the
// corresponding hint.
//
explicit
- depdb (path_type);
+ depdb (path_type, bool read_only = false);
+
+ struct reopen_state
+ {
+ path_type path;
+ uint64_t pos;
+ timestamp mtime;
+ };
+
+ // Reopen the database for writing. The reopen state must have been
+ // obtained by calling close_to_reopen() below. Besides opening the file
+ // and adjusting its write position, this constructor also sets touch to
+ // the timestamp returned by close_to_reopen() to help maintain the
+ // "database mtime is before target mtime" invariant.
+ //
+ // This functionality is primarily useful to handle dynamic dependency
+ // information that is produced as a byproduct of compilation. In this
+ // case the "static" part of the database is written in match and the
+ // "dynamic" part -- in execute.
+ //
+ explicit
+ depdb (reopen_state);
// Close the database. If this function is not called, then the database
// may be left in the old/currupt state. Note that in the read mode this
// function will "chop off" lines that haven't been read.
//
// Make sure to also call check_mtime() after updating the target to
- // perform the target/database modification times sanity checks.
+ // perform the target/database modification times sanity checks. Pass
+ // false to close() to avoid unnecessary work if using the static version
+ // of check_mtime() (or not using it at all for some reason).
//
void
- close ();
+ close (bool mtime_check = true);
+
+ // Temporarily close the database to be reopened for writing later.
+ // Besides the file path and write position also return the database file
+ // modification time after closing.
+ //
+ // Note that after this call the resulting database file is valid and if
+ // it's not reopened later, the result is equivalent to calling close().
+ //
+ reopen_state
+ close_to_reopen ();
// Flush any unwritten data to disk. This is primarily useful when reusing
// a (partially written) database as an input to external programs (e.g.,
@@ -149,7 +198,7 @@ namespace build2
// the next line in the database (which you are free to move from). If you
// then call write(), this line will be overwritten.
//
- // If the result is NULL, then it means no next line is unavailable. This
+ // If the result is NULL, then it means no next line is available. This
// can be due to several reasons:
//
// - eof reached (you can detect this by calling more() before read())
@@ -262,7 +311,7 @@ namespace build2
depdb& operator= (const depdb&) = delete;
private:
- depdb (path_type&&, timestamp);
+ depdb (path_type&&, bool, timestamp);
void
change (bool truncate = true);
diff --git a/libbuild2/depdb.ixx b/libbuild2/depdb.ixx
index 819fadd..18b4351 100644
--- a/libbuild2/depdb.ixx
+++ b/libbuild2/depdb.ixx
@@ -8,7 +8,7 @@ namespace build2
inline depdb_base::
~depdb_base ()
{
- if (state_ != state::write)
+ if (state_ != state::write || ro_)
is_.~ifdstream ();
else
os_.~ofdstream ();
@@ -17,7 +17,7 @@ namespace build2
inline void depdb::
flush ()
{
- if (state_ == state::write)
+ if (state_ == state::write && !ro_)
try
{
os_.flush ();
@@ -37,7 +37,7 @@ namespace build2
inline void depdb::
check_mtime (const path_type& t, timestamp e)
{
- if (state_ == state::write && mtime_check ())
+ if (state_ == state::write && !ro_ && mtime_check ())
check_mtime_ (t, e);
}
diff --git a/libbuild2/diagnostics.cxx b/libbuild2/diagnostics.cxx
index 4d2d7ce..e164f10 100644
--- a/libbuild2/diagnostics.cxx
+++ b/libbuild2/diagnostics.cxx
@@ -3,7 +3,8 @@
#include <libbuild2/diagnostics.hxx>
-#include <cstring> // strchr()
+#include <cstring> // strcmp(), strchr(), memcpy()
+#include <cstdlib> // getenv()
#include <libbutl/process-io.hxx>
@@ -13,39 +14,515 @@
#include <libbuild2/context.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
- // Diagnostics state (verbosity level, progress, etc). Keep disabled until
- // set from options.
+ // Diagnostics state (verbosity level, progress, etc). Keep default/disabled
+ // until set from options.
//
- uint16_t verb = 0;
- bool silent = true;
+ uint16_t verb = 1;
+ bool silent = false;
optional<bool> diag_progress_option;
+ optional<bool> diag_color_option;
bool diag_no_line = false;
bool diag_no_column = false;
- bool stderr_term = false;
+ optional<const char*> stderr_term = nullopt;
+ bool stderr_term_color = false;
void
- init_diag (uint16_t v, bool s, optional<bool> p, bool nl, bool nc, bool st)
+ init_diag (uint16_t v,
+ bool s,
+ optional<bool> p,
+ optional<bool> c,
+ bool nl,
+ bool nc,
+ bool st)
{
assert (!s || v == 0);
verb = v;
silent = s;
diag_progress_option = p;
+ diag_color_option = c;
diag_no_line = nl;
diag_no_column = nc;
- stderr_term = st;
+
+ if (st)
+ {
+ stderr_term = std::getenv ("TERM");
+
+ stderr_term_color =
+#ifdef _WIN32
+ // For now we disable color on Windows since it's unclear if/where/how
+ // it is supported. Maybe one day someone will figure this out.
+ //
+ false
+#else
+ // This test was lifted from GCC (Emacs shell sets TERM=dumb).
+ //
+ *stderr_term != nullptr && strcmp (*stderr_term, "dumb") != 0
+#endif
+ ;
+ }
+ else
+ {
+ stderr_term = nullopt;
+ stderr_term_color = false;
+ }
}
// Stream verbosity.
//
const int stream_verb_index = ostream::xalloc ();
+ // print_diag()
+ //
+ void
+ print_diag_impl (const char* p, target_key* l, target_key&& r, const char* c)
+ {
+ // @@ Print directly to diag_stream (and below)? Won't we be holding
+ // the lock longer?
+
+ diag_record dr (text);
+
+ dr << p << ' ';
+
+ if (l != nullptr)
+ {
+ // Omit the @.../ qualification in either lhs or rhs if it's implied by
+ // the other.
+ //
+ // @@ Shouldn't we, strictly speaking, also check that they belong to
+ // the same project? Though it would be far-fetched to use another
+ // project's target from src. Or maybe not.
+ //
+ if (!l->out->empty ())
+ {
+ if (r.out->empty ())
+ l->out = &empty_dir_path;
+ }
+ else if (!r.out->empty ())
+ r.out = &empty_dir_path;
+
+ dr << *l << ' ' << (c == nullptr ? "->" : c) << ' ';
+ }
+
+ dr << r;
+ }
+
+
+ static inline bool
+ print_diag_cmp (const pair<optional<string>, const target_key*>& x,
+ const pair<optional<string>, const target_key*>& y)
+ {
+ return (x.second->dir->compare (*y.second->dir) == 0 &&
+ x.first->compare (*y.first) == 0);
+ }
+
+ // Return true if we have multiple partitions (see below for details).
+ //
+ static bool
+ print_diag_collect (const vector<target_key>& tks,
+ ostringstream& os,
+ stream_verbosity sv,
+ vector<pair<optional<string>, const target_key*>>& ns)
+ {
+ ns.reserve (tks.size ());
+
+ for (const target_key& k: tks)
+ {
+ bool r;
+ if (auto p = k.type->print)
+ r = p (os, k, true /* name_only */);
+ else
+ r = to_stream (os, k, sv, true /* name_only */);
+
+ ns.push_back (make_pair (r ? optional<string> (os.str ()) : nullopt, &k));
+
+ os.clear ();
+ os.str (string ()); // Note: just seekp(0) is not enough.
+ }
+
+ // Partition.
+ //
+ // While at it also determine whether we have multiple partitions.
+ //
+ bool ml (false);
+ for (auto b (ns.begin ()), e (ns.end ()); b != e; )
+ {
+ const pair<optional<string>, const target_key*>& x (*b++);
+
+ // Move all the elements that are equal to x to the front, preserving
+ // order.
+ //
+ b = stable_partition (
+ b, e,
+ [&x] (const pair<optional<string>, const target_key*>& y)
+ {
+ return (x.first && y.first && print_diag_cmp (x, y));
+ });
+
+ if (!ml && b != e)
+ ml = true;
+ }
+
+ return ml;
+ }
+
+ static void
+ print_diag_print (const vector<pair<optional<string>, const target_key*>>& ns,
+ ostringstream& os,
+ stream_verbosity sv,
+ const optional<string>& ml)
+ {
+ for (auto b (ns.begin ()), i (b), e (ns.end ()); i != e; )
+ {
+ if (i != b)
+ os << '\n' << *ml;
+
+ const pair<optional<string>, const target_key*>& p (*i);
+
+ if (!p.first) // Irregular.
+ {
+ os << *p.second;
+ ++i;
+ continue;
+ }
+
+ // Calculate the number of members in this partition.
+ //
+ size_t n (1);
+ for (auto j (i + 1); j != e && j->first && print_diag_cmp (*i, *j); ++j)
+ ++n;
+
+ // Similar code to to_stream(target_key).
+ //
+
+ // Print the directory.
+ //
+ {
+ const target_key& k (*p.second);
+
+ uint16_t dv (sv.path);
+
+ // Note: relative() returns empty for './'.
+ //
+ const dir_path& rd (dv < 1 ? relative (*k.dir) : *k.dir);
+
+ if (!rd.empty ())
+ {
+ if (dv < 1)
+ os << diag_relative (rd);
+ else
+ to_stream (os, rd, true /* representation */);
+ }
+ }
+
+ // Print target types.
+ //
+ {
+ if (n != 1)
+ os << '{';
+
+ for (auto j (i), e (i + n); j != e; ++j)
+ os << (j != i ? " " : "") << j->second->type->name;
+
+ if (n != 1)
+ os << '}';
+ }
+
+ // Print the target name (the same for all members of this partition).
+ //
+ os << '{' << *i->first << '}';
+
+ i += n;
+ }
+ }
+
+ template <typename L> // L can be target_key, path, or string.
+ static void
+ print_diag_impl (const char* p,
+ const L* l, bool lempty,
+ vector<target_key>&& rs,
+ const char* c)
+ {
+ assert (rs.size () > 1);
+
+ // The overall plan is as follows:
+ //
+ // 1. Collect the printed names for all the group members.
+ //
+ // Note if the printed representation is irregular (see
+ // to_stream(target_key) for details). We will print such members each
+ // on a separate line.
+ //
+ // 2. Move the names around so that we end up with contiguous partitions
+ // of targets with the same name.
+ //
+ // 3. Print the partitions, one per line.
+ //
+ // The steps 1-2 are performed by print_diag_impl_common() above.
+ //
+ vector<pair<optional<string>, const target_key*>> ns;
+
+ // Use the diag_record's ostringstream so that we get the appropriate
+ // stream verbosity, etc.
+ //
+ diag_record dr (text);
+ ostringstream& os (dr.os);
+ stream_verbosity sv (stream_verb (os));
+
+ optional<string> ml;
+ if (print_diag_collect (rs, os, sv, ns))
+ ml = string ();
+
+ // Print.
+ //
+ os << p << ' ';
+
+ if (l != nullptr)
+ os << *l << (lempty ? "" : " ") << (c == nullptr ? "->" : c) << ' ';
+
+ if (ml)
+ ml = string (os.str ().size (), ' '); // Indentation.
+
+ print_diag_print (ns, os, sv, ml);
+ }
+
+ template <typename R> // R can be target_key, path, or string.
+ static void
+ print_diag_impl (const char* p,
+ vector<target_key>&& ls, const R& r,
+ const char* c)
+ {
+ assert (ls.size () > 1);
+
+ // As above but for the group on the LHS.
+ //
+ vector<pair<optional<string>, const target_key*>> ns;
+
+ diag_record dr (text);
+ ostringstream& os (dr.os);
+ stream_verbosity sv (stream_verb (os));
+
+ optional<string> ml;
+ if (print_diag_collect (ls, os, sv, ns))
+ ml = string ();
+
+ // Print.
+ //
+ os << p << ' ';
+
+ if (ml)
+ ml = string (os.str ().size (), ' '); // Indentation.
+
+ print_diag_print (ns, os, sv, ml);
+
+ // @@ TODO: make sure `->` is aligned with longest line printed by
+ // print_diag_print(). Currently it can look like this:
+ //
+ // ln /tmp/hello-gcc/hello/hello/{hxx cxx}{hello-types}
+ // /tmp/hello-gcc/hello/hello/{hxx cxx}{hello-stubs}
+ // /tmp/hello-gcc/hello/hello/cxx{hello-ext} -> ./
+ //
+ os << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag_impl (const char* p,
+ target_key* l, vector<target_key>&& rs,
+ const char* c)
+ {
+ // Note: keep this implementation separate from the above for performance.
+ //
+ assert (!rs.empty ());
+
+ if (rs.size () == 1)
+ {
+ print_diag_impl (p, l, move (rs.front ()), c);
+ return;
+ }
+
+ // At the outset handle out-qualification as above. Here we assume that
+ // all the targets in the group have the same out.
+ //
+ if (l != nullptr)
+ {
+ if (!l->out->empty ())
+ {
+ if (rs.front ().out->empty ())
+ l->out = &empty_dir_path;
+ }
+ else if (!rs.front ().out->empty ())
+ {
+ for (target_key& r: rs)
+ r.out = &empty_dir_path;
+ }
+ }
+
+ print_diag_impl<target_key> (p, l, false /* empty */, move (rs), c);
+ }
+
+ // Note: these can't be inline since need the target class definition.
+ //
+ void
+ print_diag (const char* p, const target& l, const target& r, const char* c)
+ {
+ target_key lk (l.key ());
+ print_diag_impl (p, &lk, r.key (), c);
+ }
+
+ void
+ print_diag (const char* p, target_key&& l, const target& r, const char* c)
+ {
+ print_diag_impl (p, &l, r.key (), c);
+ }
+
+ void
+ print_diag (const char* p, const target& l, target_key&& r, const char* c)
+ {
+ target_key lk (l.key ());
+ print_diag_impl (p, &lk, move (r), c);
+ }
+
+ void
+ print_diag (const char* p, const path& l, const target& r, const char* c)
+ {
+ return print_diag (p, l, r.key (), c);
+ }
+
+ void
+ print_diag (const char* p, const path& l, target_key&& r, const char* c)
+ {
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const path& l, vector<target_key>&& rs,
+ const char* c)
+ {
+ assert (!rs.empty ());
+
+ if (rs.size () == 1)
+ print_diag (p, l, move (rs.front ()), c);
+ else
+ print_diag_impl<path> (p, &l, false /* empty */, move (rs), c);
+ }
+
+ void
+ print_diag (const char* p, const string& l, const target& r, const char* c)
+ {
+ return print_diag (p, l, r.key (), c);
+ }
+
+ void
+ print_diag (const char* p, const string& l, target_key&& r, const char* c)
+ {
+ text << p << ' '
+ << l << (l.empty () ? "" : " ")
+ << (c == nullptr ? "->" : c) << ' '
+ << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const string& l, vector<target_key>&& rs,
+ const char* c)
+ {
+ assert (!rs.empty ());
+
+ if (rs.size () == 1)
+ print_diag (p, l, move (rs.front ()), c);
+ else
+ print_diag_impl<string> (p, &l, l.empty (), move (rs), c);
+ }
+
+ void
+ print_diag (const char* p, const target& r)
+ {
+ print_diag_impl (p, nullptr, r.key (), nullptr);
+ }
+
+ void
+ print_diag (const char* p, const dir_path& r)
+ {
+ text << p << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p, const path_name_view& r)
+ {
+ text << p << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const target& l, const path_name_view& r,
+ const char* c)
+ {
+ // @@ TODO: out qualification stripping: only do if p.out is subdir of t
+ // (also below)?
+
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p, const target& l, const dir_path& r, const char* c)
+ {
+ print_diag (p, l.key (), r, c);
+ }
+
+ void
+ print_diag (const char* p, target_key&& l, const dir_path& r, const char* c)
+ {
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ vector<target_key>&& ls, const dir_path& r,
+ const char* c)
+ {
+ assert (!ls.empty ());
+
+ if (ls.size () == 1)
+ print_diag (p, move (ls.front ()), r, c);
+ else
+ print_diag_impl<dir_path> (p, move (ls), r, c);
+ }
+
+ void
+ print_diag (const char* p, const path& l, const dir_path& r, const char* c)
+ {
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const path& l, const path_name_view& r,
+ const char* c)
+ {
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const string& l, const path_name_view& r,
+ const char* c)
+ {
+ text << p << ' '
+ << l << (l.empty () ? "" : " ")
+ << (c == nullptr ? "->" : c) << ' '
+ << r;
+ }
+
+ // print_process()
+ //
void
print_process (const char* const* args, size_t n)
{
@@ -77,30 +554,6 @@ namespace build2
dr << butl::process_args {args, n};
}
- // Diagnostics stack.
- //
- static
-#ifdef __cpp_thread_local
- thread_local
-#else
- __thread
-#endif
- const diag_frame* diag_frame_stack = nullptr;
-
- const diag_frame* diag_frame::
- stack () noexcept
- {
- return diag_frame_stack;
- }
-
- const diag_frame* diag_frame::
- stack (const diag_frame* f) noexcept
- {
- const diag_frame* r (diag_frame_stack);
- diag_frame_stack = f;
- return r;
- }
-
// Diagnostic facility, project specifics.
//
@@ -162,6 +615,305 @@ namespace build2
const fail_mark fail ("error");
const fail_end endf;
+ // diag_buffer
+ //
+
+ int diag_buffer::
+ pipe (context& ctx, bool force)
+ {
+ return (ctx.sched->serial () || ctx.no_diag_buffer) && !force ? 2 : -1;
+ }
+
+ void diag_buffer::
+ open (const char* args0, auto_fd&& fd, fdstream_mode m)
+ {
+ assert (state_ == state::closed && args0 != nullptr);
+
+ serial = ctx_.sched->serial ();
+ nobuf = !serial && ctx_.no_diag_buffer;
+
+ if (fd != nullfd)
+ {
+ try
+ {
+ is.open (move (fd), m | fdstream_mode::text);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << args0 << " stderr: " << e;
+ }
+ }
+
+ this->args0 = args0;
+ state_ = state::opened;
+ }
+
+ void diag_buffer::
+ open_eof (const char* args0)
+ {
+ assert (state_ == state::closed && args0 != nullptr);
+
+ serial = ctx_.sched->serial ();
+ nobuf = !serial && ctx_.no_diag_buffer;
+ this->args0 = args0;
+ state_ = state::eof;
+ }
+
+ bool diag_buffer::
+ read (bool force)
+ {
+ assert (state_ == state::opened);
+
+ bool r;
+ if (is.is_open ())
+ {
+ try
+ {
+ // Copy buffers directly.
+ //
+ auto copy = [this] (fdstreambuf& sb)
+ {
+ const char* p (sb.gptr ());
+ size_t n (sb.egptr () - p);
+
+ // Allocate at least fdstreambuf::buffer_size to reduce
+ // reallocations and memory fragmentation.
+ //
+ size_t i (buf.size ());
+ if (i == 0 && n < fdstreambuf::buffer_size)
+ buf.reserve (fdstreambuf::buffer_size);
+
+ buf.resize (i + n);
+ memcpy (buf.data () + i, p, n);
+
+ sb.gbump (static_cast<int> (n));
+ };
+
+ if (is.blocking ())
+ {
+ if ((serial || nobuf) && !force)
+ {
+ // This is the case where we are called after custom processing.
+ //
+ assert (buf.empty ());
+
+ // Note that the eof check is important: if the stream is at eof,
+ // this and all subsequent writes to the diagnostics stream will
+ // fail (and you won't see a thing).
+ //
+ if (is.peek () != ifdstream::traits_type::eof ())
+ {
+ if (serial)
+ {
+ // Holding the diag lock while waiting for diagnostics from
+ // the child process would be a bad idea in the parallel
+ // build. But it should be harmless in serial.
+ //
+ // @@ TODO: do direct buffer copy.
+ //
+ diag_stream_lock dl;
+ *diag_stream << is.rdbuf ();
+ }
+ else
+ {
+ // Read/write one line at a time not to hold the lock for too
+ // long.
+ //
+ for (string l; !eof (std::getline (is, l)); )
+ {
+ diag_stream_lock dl;
+ *diag_stream << l << '\n';
+ }
+ }
+ }
+ }
+ else
+ {
+ fdstreambuf& sb (*static_cast<fdstreambuf*> (is.rdbuf ()));
+
+ while (is.peek () != istream::traits_type::eof ())
+ copy (sb);
+ }
+
+ r = false;
+ }
+ else
+ {
+ // We do not support finishing off after the custom processing in
+ // the non-blocking mode unless forced to buffer (but could probably
+ // do if necessary).
+ //
+ assert (!(serial || nobuf) || force);
+
+ fdstreambuf& sb (*static_cast<fdstreambuf*> (is.rdbuf ()));
+
+ // Try not to allocate the buffer if there is no diagnostics (the
+ // common case).
+ //
+ // Note that we must read until blocked (0) or EOF (-1).
+ //
+ streamsize n;
+ while ((n = sb.in_avail ()) > 0)
+ copy (sb);
+
+ r = (n != -1);
+ }
+
+ if (!r)
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ // For now we assume (here and pretty much everywhere else) that the
+ // output can't fail.
+ //
+ fail << "unable to read from " << args0 << " stderr: " << e;
+ }
+ }
+ else
+ r = false;
+
+ if (!r)
+ state_ = state::eof;
+
+ return r;
+ }
+
+ void diag_buffer::
+ write (const string& s, bool nl, bool force)
+ {
+ assert (state_ != state::closed);
+
+ // Similar logic to read() above.
+ //
+ if ((serial || nobuf) && !force)
+ {
+ assert (buf.empty ());
+
+ diag_stream_lock dl;
+ *diag_stream << s;
+ if (nl)
+ *diag_stream << '\n';
+ }
+ else
+ {
+ size_t n (s.size () + (nl ? 1 : 0));
+
+ size_t i (buf.size ());
+ if (i == 0 && n < fdstreambuf::buffer_size)
+ buf.reserve (fdstreambuf::buffer_size);
+
+ buf.resize (i + n);
+ memcpy (buf.data () + i, s.c_str (), s.size ());
+
+ if (nl)
+ buf.back () = '\n';
+ }
+ }
+
+ void diag_buffer::
+ close (const char* const* args,
+ const process_exit& pe,
+ uint16_t v,
+ bool omit_normal,
+ const location& loc)
+ {
+ tracer trace ("diag_buffer::close");
+
+ assert (state_ != state::closed);
+
+ // We need to make sure the command line we print on the unsuccessful exit
+ // is inseparable from any buffered diagnostics. So we prepare the record
+ // first and then write both while holding the diagnostics stream lock.
+ //
+ diag_record dr;
+ if (!pe)
+ {
+ // Note: see similar code in run_finish_impl().
+ //
+ if (omit_normal && pe.normal ())
+ {
+ l4 ([&]{trace << "process " << args[0] << " " << pe;});
+ }
+ else
+ {
+ dr << error (loc) << "process " << args[0] << " " << pe;
+
+ if (verb >= 1 && verb <= v)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+ }
+ }
+
+ close (move (dr));
+ }
+
+ void diag_buffer::
+ close (diag_record&& dr)
+ {
+ assert (state_ != state::closed);
+
+ // We may still be in the open state in case of custom processing.
+ //
+ if (state_ == state::opened)
+ {
+ if (is.is_open ())
+ {
+ try
+ {
+ if (is.good ())
+ {
+ if (is.blocking ())
+ {
+ assert (is.peek () == ifdstream::traits_type::eof ());
+ }
+ else
+ {
+ assert (is.rdbuf ()->in_avail () == -1);
+ }
+ }
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << args0 << " stderr: " << e;
+ }
+ }
+
+ state_ = state::eof;
+ }
+
+ // Note: flushing of the diag record may throw.
+ //
+ args0 = nullptr;
+ state_ = state::closed;
+
+ if (!buf.empty () || !dr.empty ())
+ {
+ diag_stream_lock l;
+
+ if (!buf.empty ())
+ {
+ diag_stream->write (buf.data (), static_cast<streamsize> (buf.size ()));
+ buf.clear ();
+ }
+
+ if (!dr.empty ())
+ dr.flush ([] (const butl::diag_record& r)
+ {
+ // Similar to default_writer().
+ //
+ *diag_stream << r.os.str () << '\n';
+ diag_stream->flush ();
+ });
+ else
+ diag_stream->flush ();
+ }
+ }
+
// diag_do(), etc.
//
string
diff --git a/libbuild2/diagnostics.hxx b/libbuild2/diagnostics.hxx
index f3d9db4..ef41f22 100644
--- a/libbuild2/diagnostics.hxx
+++ b/libbuild2/diagnostics.hxx
@@ -14,17 +14,239 @@
namespace build2
{
- using butl::diag_record;
+ struct diag_record;
// Throw this exception to terminate the build. The handler should
// assume that the diagnostics has already been issued.
//
class failed: public std::exception {};
- // Print process commmand line. If the number of elements is specified
- // (or the second version is used), then it will print the piped multi-
- // process command line, if present. In this case, the expected format
- // is as follows:
+ // Print low-verbosity recipe diagnostics in the forms:
+ //
+ // <prog> <l-target> <comb> <r-target>
+ // <prog> <r-target>
+ //
+ // Where <prog> is an abbreviated/generalized program name, such as c++
+ // (rather than g++ or clang++) or yacc (rather than bison or byacc),
+ // <l-target> is typically the "main" prerequisite target, such as the C++
+ // source file to compile, <r-target> is typically the target being
+ // produced, and <comb> is the combiner, typically "->".
+ //
+ // The second form (without <l-target> and <comb>) should be used when there
+ // is no natural "main" prerequisite, for example, for linking as well as
+ // for programs that act upon the target, such as mkdir, rm, test, etc.
+ //
+ // Note also that these functions omit the @.../ qualification in either
+ // <l-target> or <r-target> if it's implied by the other.
+ //
+ // For example:
+ //
+ // mkdir fsdir{details/}
+ // c++ cxx{hello} -> obje{hello}
+ // ld exe{hello}
+ //
+ // test exe{hello} + testscript
+ //
+ // install exe{hello} -> /usr/bin/
+ // uninstall exe{hello} <- /usr/bin/
+ //
+ // rm exe{hello}
+ // rm obje{hello}
+ // rmdir fsdir{details/}
+ //
+ // Examples of target groups:
+ //
+ // cli cli{foo} -> {hxx cxx}{foo}
+ //
+ // thrift thrift{foo} -> {hxx cxx}{foo-types}
+ // {hxx cxx}{foo-stubs}
+ //
+ // Potentially we could also support target groups for <l-target>:
+ //
+ // tool {hxx cxx}{foo} -> {hxx cxx}{foo-types}
+ //
+ // tool {hxx cxx}{foo-types}
+ // {hxx cxx}{foo-stubs} -> {hxx cxx}{foo-insts}
+ // {hxx cxx}{foo-impls}
+ //
+ // Currently we only support this for the `group -> dir_path` form (used
+ // by the backlink machinery).
+ //
+ // See also the `diag` Buildscript pseudo-builtin which is reduced to one of
+ // the print_diag() calls (adhoc_buildscript_rule::print_custom_diag()). In
+ // particular, if you are adding a new overload, also consider if/how it
+ // should handled there.
+ //
+ // Note: see GH issue #40 for additional background and rationale.
+ //
+ // If <comb> is not specified, then "->" is used by default.
+
+ // prog target -> target
+ // prog target -> group
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const target& l, const target& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ target_key&& l, const target& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const target& l, target_key&& r,
+ const char* comb = nullptr);
+
+ void
+ print_diag (const char* prog,
+ target_key&& l, target_key&& r,
+ const char* comb = nullptr);
+
+ // Note: using small_vector would require target_key definition.
+ //
+ void
+ print_diag (const char* prog,
+ target_key&& l, vector<target_key>&& r,
+ const char* comb = nullptr);
+
+ // prog path -> target
+ // prog path -> group
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, const target& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, target_key&& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, vector<target_key>&& r,
+ const char* comb = nullptr);
+
+ // prog string -> target
+ // prog string -> group
+ //
+ // Use these versions if, for example, input information is passed as an
+ // argument.
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const string& l, const target& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const string& l, target_key&& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const string& l, vector<target_key>&& r,
+ const char* comb = nullptr);
+
+ // prog target
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog, const target&);
+
+ void
+ print_diag (const char* prog, target_key&&);
+
+ // prog group
+ //
+ void
+ print_diag (const char* prog, vector<target_key>&&);
+
+ // prog path
+ //
+ // Special versions for cases like mkdir/rmdir, save, etc.
+ //
+ // Note: use path_name("-") if the result is written to stdout.
+ //
+ void
+ print_diag (const char* prog, const path&);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog, const dir_path&);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog, const path_name_view&);
+
+ // Special versions for ln, cp, rm, install/unistall, dist, etc.
+ //
+ // Note: use path_name ("-") if the result is written to stdout.
+
+ // prog target -> path
+ //
+ void
+ print_diag (const char* prog,
+ const target& l, const path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const target& l, const dir_path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const target& l, const path_name_view& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ target_key&& l, const dir_path& r,
+ const char* comb = nullptr);
+
+ // prog group -> dir_path
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ vector<target_key>&& l, const dir_path& r,
+ const char* comb = nullptr);
+
+ // prog path -> path
+ //
+ void
+ print_diag (const char* prog,
+ const path& l, const path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, const dir_path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, const path_name_view& r,
+ const char* comb = nullptr);
+
+ // prog string -> path
+ //
+ // Use this version if, for example, input information is passed as an
+ // argument.
+ //
+ void
+ print_diag (const char* prog,
+ const string& l, const path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const string& l, const path_name_view& r,
+ const char* comb = nullptr);
+
+ // Print process commmand line. If the number of elements is specified (or
+ // the const cstrings& version is used), then it will print the piped multi-
+ // process command line, if present. In this case, the expected format is as
+ // follows:
//
// name1 arg arg ... nullptr
// name2 arg arg ... nullptr
@@ -179,92 +401,124 @@ namespace build2
using butl::diag_progress_lock;
// Return true if progress is to be shown. The max_verb argument is the
- // maximum verbosity level that this type of progress should be shown by
- // default.
+ // maximum verbosity level that this type of progress should be shown at by
+ // default. If it is verb_never, then both min and max verbosity checks are
+ // omitted, assuming the caller takes care of that themselves.
//
inline bool
show_progress (uint16_t max_verb)
{
return diag_progress_option
? *diag_progress_option
- : stderr_term && verb >= 1 && verb <= max_verb;
+ : stderr_term && (max_verb == verb_never ||
+ (verb >= 1 && verb <= max_verb));
}
- // Diagnostic facility, base infrastructure.
+ // Diagnostics color.
+ //
+ inline bool
+ show_diag_color ()
+ {
+ return diag_color_option ? *diag_color_option : stderr_term_color;
+ }
+
+ // Diagnostic facility.
+ //
+ // Note that this is the "complex" case we we derive from (rather than
+ // alias) a number of butl::diag_* types and provide custom operator<<
+ // "overrides" in order to make ADL look in the build2 rather than butl
+ // namespace.
//
using butl::diag_stream_lock;
using butl::diag_stream;
using butl::diag_epilogue;
+ using butl::diag_frame;
- // Diagnostics stack. Each frame is "applied" to the fail/error/warn/info
- // diag record.
- //
- // Unfortunately most of our use-cases don't fit into the 2-pointer small
- // object optimization of std::function. So we have to complicate things
- // a bit here.
- //
- struct LIBBUILD2_SYMEXPORT diag_frame
+ template <typename> struct diag_prologue;
+ template <typename> struct diag_mark;
+
+ struct diag_record: butl::diag_record
{
- explicit
- diag_frame (void (*f) (const diag_frame&, const diag_record&))
- : func_ (f)
+ template <typename T>
+ const diag_record&
+ operator<< (const T& x) const
{
- if (func_ != nullptr)
- prev_ = stack (this);
+ os << x;
+ return *this;
}
- diag_frame (diag_frame&& x)
- : func_ (x.func_)
- {
- if (func_ != nullptr)
- {
- prev_ = x.prev_;
- stack (this);
+ diag_record () = default;
- x.func_ = nullptr;
- }
- }
+ template <typename B>
+ explicit
+ diag_record (const diag_prologue<B>& p): diag_record () { *this << p;}
- diag_frame& operator= (diag_frame&&) = delete;
+ template <typename B>
+ explicit
+ diag_record (const diag_mark<B>& m): diag_record () { *this << m;}
+ };
- diag_frame (const diag_frame&) = delete;
- diag_frame& operator= (const diag_frame&) = delete;
+ template <typename B>
+ struct diag_prologue: butl::diag_prologue<B>
+ {
+ using butl::diag_prologue<B>::diag_prologue;
- ~diag_frame ()
+ template <typename T>
+ diag_record
+ operator<< (const T& x) const
{
- if (func_ != nullptr )
- stack (prev_);
+ diag_record r;
+ r.append (this->indent, this->epilogue);
+ B::operator() (r);
+ r << x;
+ return r;
}
- static void
- apply (const diag_record& r)
+ friend const diag_record&
+ operator<< (const diag_record& r, const diag_prologue& p)
{
- for (const diag_frame* f (stack ()); f != nullptr; f = f->prev_)
- f->func_ (*f, r);
+ r.append (p.indent, p.epilogue);
+ p (r);
+ return r;
}
+ };
- // Tip of the stack.
- //
- static const diag_frame*
- stack () noexcept;
+ template <typename B>
+ struct diag_mark: butl::diag_mark<B>
+ {
+ using butl::diag_mark<B>::diag_mark;
- // Set the new and return the previous tip of the stack.
- //
- static const diag_frame*
- stack (const diag_frame*) noexcept;
+ template <typename T>
+ diag_record
+ operator<< (const T& x) const
+ {
+ return B::operator() () << x;
+ }
- struct stack_guard
+ friend const diag_record&
+ operator<< (const diag_record& r, const diag_mark& m)
{
- explicit stack_guard (const diag_frame* s): s_ (stack (s)) {}
- ~stack_guard () {stack (s_);}
- const diag_frame* s_;
- };
+ return r << m ();
+ }
+ };
- private:
- void (*func_) (const diag_frame&, const diag_record&);
- const diag_frame* prev_;
+ template <typename B>
+ struct diag_noreturn_end: butl::diag_noreturn_end<B>
+ {
+ diag_noreturn_end () {} // For Clang 3.7 (const needs user default ctor).
+
+ using butl::diag_noreturn_end<B>::diag_noreturn_end;
+
+ [[noreturn]] friend void
+ operator<< (const diag_record& r, const diag_noreturn_end& e)
+ {
+ assert (r.full ());
+ e.B::operator() (r);
+ }
};
+ // Note: diag frames are not applied to text/trace diagnostics.
+ //
template <typename F>
struct diag_frame_impl: diag_frame
{
@@ -273,9 +527,10 @@ namespace build2
private:
static void
- thunk (const diag_frame& f, const diag_record& r)
+ thunk (const diag_frame& f, const butl::diag_record& r)
{
- static_cast<const diag_frame_impl&> (f).func_ (r);
+ static_cast<const diag_frame_impl&> (f).func_ (
+ static_cast<const diag_record&> (r));
}
const F func_;
@@ -288,8 +543,6 @@ namespace build2
return diag_frame_impl<F> (move (f));
}
- // Diagnostic facility, project specifics.
- //
struct LIBBUILD2_SYMEXPORT simple_prologue_base
{
explicit
@@ -352,8 +605,8 @@ namespace build2
struct basic_mark_base
{
- using simple_prologue = butl::diag_prologue<simple_prologue_base>;
- using location_prologue = butl::diag_prologue<location_prologue_base>;
+ using simple_prologue = diag_prologue<simple_prologue_base>;
+ using location_prologue = diag_prologue<location_prologue_base>;
explicit
basic_mark_base (const char* type,
@@ -427,7 +680,7 @@ namespace build2
const void* data_;
diag_epilogue* const epilogue_;
};
- using basic_mark = butl::diag_mark<basic_mark_base>;
+ using basic_mark = diag_mark<basic_mark_base>;
LIBBUILD2_SYMEXPORT extern const basic_mark error;
LIBBUILD2_SYMEXPORT extern const basic_mark warn;
@@ -452,7 +705,7 @@ namespace build2
mod,
name) {}
};
- using trace_mark = butl::diag_mark<trace_mark_base>;
+ using trace_mark = diag_mark<trace_mark_base>;
using tracer = trace_mark;
// fail
@@ -464,17 +717,17 @@ namespace build2
const void* data = nullptr)
: basic_mark_base (type,
data,
- [](const diag_record& r)
+ [](const butl::diag_record& r, butl::diag_writer* w)
{
diag_frame::apply (r);
- r.flush ();
+ r.flush (w);
throw failed ();
},
&stream_verb_map,
nullptr,
nullptr) {}
};
- using fail_mark = butl::diag_mark<fail_mark_base>;
+ using fail_mark = diag_mark<fail_mark_base>;
struct fail_end_base
{
@@ -488,11 +741,289 @@ namespace build2
throw failed ();
}
};
- using fail_end = butl::diag_noreturn_end<fail_end_base>;
+ using fail_end = diag_noreturn_end<fail_end_base>;
LIBBUILD2_SYMEXPORT extern const fail_mark fail;
LIBBUILD2_SYMEXPORT extern const fail_end endf;
+ // Diagnostics buffer.
+ //
+ // The purpose of this class is to handle diagnostics from child processes,
+ // where handle can mean:
+ //
+ // - Buffer it (to avoid interleaving in parallel builds).
+ //
+ // - Stream it (if the input can be split into diagnostic records).
+ //
+ // - Do nothing (in serial builds or if requested not to buffer).
+ //
+ // In the future this class may also be responsible for converting the
+ // diagnostics into the structured form (which means it may need to buffer
+ // even in serial builds).
+ //
+ // The typical usage is as follows:
+ //
+ // process pr (..., diag_buffer::pipe (ctx));
+ // diag_buffer dbuf (ctx, args[0], pr); // Skip.
+ // ifdstream is (move (pr.in_ofd)); // No skip.
+ // ofdstream os (move (pr.out_fd));
+ //
+ // The reason for this somewhat roundabout setup is to make sure the
+ // diag_buffer instance is destroyed before the process instance. This is
+ // important in case an exception is thrown where we want to make sure all
+ // our pipe ends are closed before we wait for the process exit (which
+ // happens in the process destructor).
+ //
+ // And speaking of the destruction order, another thing to keep in mind is
+ // that only one stream can use the skip mode (fdstream_mode::skip; because
+ // skipping is performed in the blocking mode) and the stream that skips
+ // should come first so that all other streams are destroyed/closed before
+ // it (failed that, we may end up in a deadlock). For example:
+ //
+ // process pr (..., diag_buffer::pipe (ctx));
+ // ifdstream is (move (pr.in_ofd), fdstream_mode::skip); // Skip.
+ // diag_buffer dbuf (ctx, args[0], pr, fdstream_mode::none); // No skip.
+ // ofdstream os (move (pr.out_fd));
+ //
+ class LIBBUILD2_SYMEXPORT diag_buffer
+ {
+ public:
+ // If buffering is necessary or force is true, return an "instruction"
+ // (-1) to the process class constructor to open a pipe and redirect
+ // stderr to it. Otherwise, return an "instruction" to inherit stderr (2).
+ //
+ // The force flag is normally used if custom diagnostics processing is
+ // required (filter, split, etc; see read() below).
+ //
+ // Note that the diagnostics buffer must be opened (see below) regardless
+ // of the pipe() result.
+ //
+ static int
+ pipe (context&, bool force = false);
+
+ // Open the diagnostics buffer given the parent end of the pipe (normally
+ // process:in_efd). If it is nullfd, then assume no buffering is
+ // necessary. If mode is non_blocking, then make reading from the parent
+ // end of the pipe non-blocking.
+ //
+ // The args0 argument is the child process program name for diagnostics.
+ // It is expected to remain valid until the call to close() and should
+ // normally be the same as args[0] passed to close().
+ //
+ // Note that the same buffer can go through multiple open-read-close
+ // sequences, for example, to execute multiple commands.
+ //
+ // All the below functions handle io errors, issue suitable diagnostics,
+ // and throw failed. If an exception is thrown from any of them, then the
+ // instance should not be used any further.
+ //
+ // Note that when reading from multiple streams in the non-blocking mode,
+ // only the last stream to be destroyed can normally have the skip mode
+ // since in case of an exception, skipping will be blocking.
+ //
+ diag_buffer (context&,
+ const char* args0,
+ auto_fd&&,
+ fdstream_mode = fdstream_mode::skip);
+
+ // As above, but the parrent end of the pipe (process:in_efd) is passed
+ // via a process instance.
+ //
+ diag_buffer (context&,
+ const char* args0,
+ process&,
+ fdstream_mode = fdstream_mode::skip);
+
+ // As above but with support for the underlying buffer reuse.
+ //
+ // Note that in most cases reusing the buffer is probably not worth the
+ // trouble because we normally don't expect any diagnostics in the common
+ // case. However, if needed, it can be arranged, for example:
+ //
+ // vector<char> buf;
+ //
+ // {
+ // process pr (...);
+ // diag_buffer dbuf (ctx, move (buf), args[0], pr);
+ // dbuf.read ();
+ // dbuf.close ();
+ // buf = move (dbuf.buf);
+ // }
+ //
+ // {
+ // ...
+ // }
+ //
+ // Note also that while there is no guarantee the underlying buffer is
+ // moved when, say, the vector is empty, all the main implementations
+ // always steal the buffer.
+ //
+ diag_buffer (context&,
+ vector<char>&& buf,
+ const char* args0,
+ auto_fd&&,
+ fdstream_mode = fdstream_mode::skip);
+
+ diag_buffer (context&,
+ vector<char>&& buf,
+ const char* args0,
+ process&,
+ fdstream_mode = fdstream_mode::skip);
+
+ // Separate construction and opening.
+ //
+ // Note: be careful with the destruction order (see above for details).
+ //
+ explicit
+ diag_buffer (context&);
+
+ diag_buffer (context&, vector<char>&& buf);
+
+ void
+ open (const char* args0,
+ auto_fd&&,
+ fdstream_mode = fdstream_mode::skip);
+
+ // Open the buffer in the state as if after read() returned false, that
+ // is, the stream corresponding to the parent's end of the pipe reached
+ // EOF and has been closed. This is primarily useful when the diagnostics
+ // is being read in a custom way (for example, it has been merged to
+ // stdout) and all we want is to be able to call write() and close().
+ //
+ void
+ open_eof (const char* args0);
+
+ // Check whether the buffer has been opened with the open() call and
+ // hasn't yet been closed.
+ //
+ // Note that this function returning true does not mean that the pipe was
+ // opened (to check that, call is_open() on the stream member; see below).
+ //
+ bool
+ is_open () const
+ {
+ return state_ != state::closed;
+ }
+
+ // Read the diagnostics from the parent's end of the pipe if one was
+ // opened and buffer/stream it as necessary or forced. Return true if
+ // there could be more diagnostics to read (only possible in the non-
+ // blocking mode) and false otherwise, in which case also close the
+ // stream.
+ //
+ // Note that the force argument here (as well as in write() below) and
+ // in open() above are independent. Specifically, force in open() forces
+ // the opening of the pipe while force in read() and write() forces
+ // the buffering of the diagnostics.
+ //
+ // Instead of calling this function you can perform custom reading and, if
+ // necessary, buffering of the diagnostics by accessing the input stream
+ // (is) and underlying buffer (buf) directly. This can be used to filter,
+ // split the diagnostics into records according to a certain format, etc.
+ // Note that such custom processing implementation should maintain the
+ // overall semantics of diagnostics buffering in that it may only omit
+ // buffering in the serial case or if the diagnostics can be streamed in
+ // atomic records. See also write() below.
+ //
+ // The input stream is opened in the text mode and has the badbit but not
+ // failbit exception mask. The custom processing should also be compatible
+ // with the stream mode (blocking or non). If buffering is performed, then
+ // depending on the expected diagnostics the custom processing may want to
+ // reserve an appropriate initial buffer size to avoid unnecessary
+ // reallocation. As a convenience, in the blocking mode only, if the
+ // stream still contains some diagnostics, then it can be handled by
+ // calling read(). This is useful when needing to process only the inital
+ // part of the diagnostics. The custom processing may also close the
+ // stream manually before calling close().
+ //
+ bool
+ read (bool force = false);
+
+ // Close the parent end of the pipe if one was opened and write out any
+ // buffered diagnostics.
+ //
+ // If the child process exited abnormally or normally with non-0 code,
+ // then print the error diagnostics to this effect. Additionally, if the
+ // verbosity level is between 1 and the specified value, then print the
+ // command line as info after the error. If omit_normal is true, then
+ // don't print either for the normal exit (usually used for custom
+ // diagnostics or when process failure can be tolerated).
+ //
+ // Normally the specified verbosity will be 1 and the command line args
+ // represent the verbosity level 2 (logical) command line. Note that args
+ // should only represent a single command in a pipe (see print_process()
+ // below for details).
+ //
+ // If the diag_buffer instance is destroyed before calling close(), then
+ // any buffered diagnostics is discarded.
+ //
+ // Note: see also run_finish(diag_buffer&).
+ //
+ // @@ TODO: need overload with process_env (see print_process). Also in
+ // run_finish_impl().
+ //
+ void
+ close (const cstrings& args,
+ const process_exit&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = {});
+
+ void
+ close (const char* const* args,
+ const process_exit&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = {});
+
+ // As above but with a custom diag record for the child exit diagnostics,
+ // if any. Note that if the diag record has the fail epilogue, then this
+ // function will throw.
+ //
+ void
+ close (diag_record&& = {});
+
+ // Direct access to the underlying stream and buffer for custom processing
+ // (see read() above for details).
+ //
+ // If serial is true, then we are running serially. If nobuf is true,
+ // then we are running in parallel but diagnostics buffering has been
+ // disabled (--no-diag-buffer). Note that there is a difference: during
+ // the serial execution we are free to hold the diag_stream_lock for as
+ // long as convenient, for example, for the whole duration of child
+ // process execution. Doing the same during parallel execution is very
+ // bad idea and we should read/write the diagnostics in chunks, normally
+ // one line at a time.
+ //
+ public:
+ ifdstream is;
+ vector<char> buf;
+ const char* args0;
+ bool serial;
+ bool nobuf;
+
+ // Buffer or stream a fragment of diagnostics as necessary or forced. If
+ // newline is true, also add a trailing newline.
+ //
+ // This function is normally called from a custom diagnostics processing
+ // implementation (see read() above for details). If nobuf is true, then
+ // the fragment should end on the line boundary to avoid interleaving.
+ //
+ void
+ write (const string&, bool newline, bool force = false);
+
+ private:
+ // Note that we don't seem to need a custom destructor to achieve the
+ // desired semantics: we can assume the process has exited before we are
+ // destroyed (because we supply stderr to its constructor) which means
+ // closing fdstream without reading any futher should be ok.
+ //
+ enum class state {closed, opened, eof};
+
+ context& ctx_;
+ state state_ = state::closed;
+ };
+
// Action phrases, e.g., "configure update exe{foo}", "updating exe{foo}",
// and "updating exe{foo} is configured". Use like this:
//
@@ -558,4 +1089,6 @@ namespace build2
}
}
+#include <libbuild2/diagnostics.ixx>
+
#endif // LIBBUILD2_DIAGNOSTICS_HXX
diff --git a/libbuild2/diagnostics.ixx b/libbuild2/diagnostics.ixx
new file mode 100644
index 0000000..273dfad
--- /dev/null
+++ b/libbuild2/diagnostics.ixx
@@ -0,0 +1,126 @@
+// file : libbuild2/diagnostics.ixx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+namespace build2
+{
+ // print_diag()
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag_impl (const char*, target_key*, target_key&&, const char*);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag_impl (const char*,
+ target_key*, vector<target_key>&& r,
+ const char*);
+
+ inline void
+ print_diag (const char* p, target_key&& l, target_key&& r, const char* c)
+ {
+ print_diag_impl (p, &l, move (r), c);
+ }
+
+ inline void
+ print_diag (const char* p,
+ target_key&& l, vector<target_key>&& r,
+ const char* c)
+ {
+ print_diag_impl (p, &l, move (r), c);
+ }
+
+ inline void
+ print_diag (const char* p, target_key& r)
+ {
+ print_diag_impl (p, nullptr, move (r), nullptr);
+ }
+
+ inline void
+ print_diag (const char* p, vector<target_key>&& r)
+ {
+ print_diag_impl (p, nullptr, move (r), nullptr);
+ }
+
+ inline void
+ print_diag (const char* p, const path& r)
+ {
+ print_diag (p, path_name (&r));
+ }
+
+ inline void
+ print_diag (const char* p, const target& l, const path& r, const char* c)
+ {
+ print_diag (p, l, path_name (&r), c);
+ }
+
+ inline void
+ print_diag (const char* p, const path& l, const path& r, const char* c)
+ {
+ print_diag (p, l, path_name (&r), c);
+ }
+
+ inline void
+ print_diag (const char* p, const string& l, const path& r, const char* c)
+ {
+ print_diag (p, l, path_name (&r), c);
+ }
+
+ // diag_buffer
+ //
+ inline diag_buffer::
+ diag_buffer (context& ctx)
+ : is (ifdstream::badbit), ctx_ (ctx)
+ {
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx, vector<char>&& b)
+ : is (ifdstream::badbit), buf (move (b)), ctx_ (ctx)
+ {
+ buf.clear ();
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx, const char* args0, auto_fd&& fd, fdstream_mode m)
+ : diag_buffer (ctx)
+ {
+ open (args0, move (fd), m);
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx, const char* args0, process& pr, fdstream_mode m)
+ : diag_buffer (ctx)
+ {
+ open (args0, move (pr.in_efd), m);
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx,
+ vector<char>&& b,
+ const char* args0,
+ auto_fd&& fd,
+ fdstream_mode m)
+ : diag_buffer (ctx, move (b))
+ {
+ open (args0, move (fd), m);
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx,
+ vector<char>&& b,
+ const char* args0,
+ process& pr,
+ fdstream_mode m)
+ : diag_buffer (ctx, move (b))
+ {
+ open (args0, move (pr.in_efd), m);
+ }
+
+ inline void diag_buffer::
+ close (const cstrings& args,
+ const process_exit& pe,
+ uint16_t verbosity,
+ bool omit_normal,
+ const location& loc)
+ {
+ close (args.data (), pe, verbosity, omit_normal, loc);
+ }
+}
diff --git a/libbuild2/dist/init.cxx b/libbuild2/dist/init.cxx
index 2be4c3f..2a25992 100644
--- a/libbuild2/dist/init.cxx
+++ b/libbuild2/dist/init.cxx
@@ -3,8 +3,9 @@
#include <libbuild2/dist/init.hxx>
-#include <libbuild2/scope.hxx>
#include <libbuild2/file.hxx>
+#include <libbuild2/rule.hxx>
+#include <libbuild2/scope.hxx>
#include <libbuild2/diagnostics.hxx>
#include <libbuild2/config/utility.hxx>
@@ -32,7 +33,34 @@ namespace build2
// Enter module variables. Do it during boot in case they get assigned
// in bootstrap.build (which is customary for, e.g., dist.package).
//
- auto& vp (rs.var_pool ());
+
+ // The dist flag or path. Normally it is a flag (true or false) but can
+ // also be used to remap the distribution location.
+ //
+ // In the latter case it specifies the "imaginary" source location which
+ // is used to derive the corresponding distribution local. This location
+ // can be specified as either a directory path (to remap with the same
+ // file name) or a file path (to remap with a different name). And the
+ // way we distinguish between the two is via the presence/absence of the
+ // trailing directory separator. If the path is relative, then it's
+ // treated relative to the target directory. Note that to make things
+ // less error prone, simple paths without any directory separators are
+ // not allowed (use ./<name> instead).
+ //
+ // Note that if multiple targets end up with the same source location,
+ // the behavior is undefined and no diagnostics is issued.
+ //
+ // Note also that such remapping has no effect in the bootstrap
+ // distribution mode.
+ //
+ // Note: project-private.
+ //
+ rs.var_pool ().insert<path> ("dist", variable_visibility::target);
+
+ // The rest of the variables we enter are qualified so go straight for
+ // the public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// config.dist.archives is a list of archive extensions (e.g., zip,
// tar.gz) that can be optionally prefixed with a directory. If it is
@@ -57,7 +85,7 @@ namespace build2
// The bootstrap distribution mode. Note that it can only be specified
// as a global override and is thus marked as unsaved in init(). Unlike
- // the normal load distribution mode, we can do in-source and multiple
+ // the normal load distribution mode, we can do in source and multiple
// projects at once.
//
// Note also that other config.dist.* variables can only be specified as
@@ -71,8 +99,6 @@ namespace build2
vp.insert<paths> ("dist.archives");
vp.insert<paths> ("dist.checksums");
- vp.insert<bool> ("dist", variable_visibility::target); // Flag.
-
// Project's package name. Note: if set, must be in bootstrap.build.
//
auto& v_d_p (vp.insert<string> ("dist.package"));
@@ -107,7 +133,7 @@ namespace build2
//
bool s (specified_config (rs, "dist", {"bootstrap"}));
- // dist.root
+ // config.dist.root
//
{
value& v (rs.assign ("dist.root"));
@@ -119,22 +145,24 @@ namespace build2
}
}
- // dist.cmd
+ // config.dist.cmd
+ //
+ // By default we use in-process code for creating directories and
+ // copying files (for performance, especially on Windows). But an
+ // external program (normally install) can be used if configured.
//
{
- value& v (rs.assign<process_path> ("dist.cmd"));
+ value& v (rs.assign<process_path> ("dist.cmd")); // NULL
if (s)
{
- if (lookup l = lookup_config (rs,
- "config.dist.cmd",
- path ("install")))
+ if (lookup l = lookup_config (rs, "config.dist.cmd", nullptr))
v = run_search (cast<path> (l), true);
}
}
- // dist.archives
- // dist.checksums
+ // config.dist.archives
+ // config.dist.checksums
//
{
value& a (rs.assign ("dist.archives"));
@@ -157,7 +185,7 @@ namespace build2
}
}
- // dist.uncommitted
+ // config.dist.uncommitted
//
// Omit it from the configuration unless specified.
//
@@ -182,13 +210,22 @@ namespace build2
l5 ([&]{trace << "for " << rs;});
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true /* public */)); // All qualified.
// Register our wildcard rule. Do it explicitly for the alias to prevent
// something like insert<target>(dist_id, test_id) taking precedence.
//
rs.insert_rule<target> (dist_id, 0, "dist", rule_);
- rs.insert_rule<alias> (dist_id, 0, "dist.alias", rule_); //@@ outer?
+ rs.insert_rule<alias> (dist_id, 0, "dist.alias", rule_);
+
+ // We need this rule for out-of-any-project dependencies (for example,
+ // executables imported from /usr/bin, etc). We are registering it on
+ // the global scope similar to builtin rules.
+ //
+ // See a similar rule in the config module.
+ //
+ rs.global_scope ().insert_rule<mtime_target> (
+ dist_id, 0, "dist.file", file_rule::instance);
// Configuration.
//
diff --git a/libbuild2/dist/module.hxx b/libbuild2/dist/module.hxx
index 314dc96..da97939 100644
--- a/libbuild2/dist/module.hxx
+++ b/libbuild2/dist/module.hxx
@@ -10,14 +10,17 @@
#include <libbuild2/module.hxx>
#include <libbuild2/variable.hxx>
+#include <libbuild2/dist/types.hxx>
+
#include <libbuild2/export.hxx>
namespace build2
{
namespace dist
{
- struct LIBBUILD2_SYMEXPORT module: build2::module
+ class LIBBUILD2_SYMEXPORT module: public build2::module
{
+ public:
static const string name;
const variable& var_dist_package;
@@ -38,6 +41,10 @@ namespace build2
adhoc.push_back (move (f));
}
+ // List of postponed prerequisites (see rule for details).
+ //
+ mutable postponed_prerequisites postponed;
+
// Distribution post-processing callbacks.
//
// Only the last component in the pattern may contain wildcards. If the
@@ -69,10 +76,11 @@ namespace build2
// Implementation details.
//
- module (const variable& v_d_p)
- : var_dist_package (v_d_p) {}
+ public:
+ module (const variable& v_d_p): var_dist_package (v_d_p) {}
public:
+ bool distributed = false; // True if this project is being distributed.
vector<path> adhoc;
struct callback
diff --git a/libbuild2/dist/operation.cxx b/libbuild2/dist/operation.cxx
index 7a85119..cd88eac 100644
--- a/libbuild2/dist/operation.cxx
+++ b/libbuild2/dist/operation.cxx
@@ -6,6 +6,8 @@
#include <libbutl/sha1.hxx>
#include <libbutl/sha256.hxx>
+#include <libbutl/filesystem.hxx> // try_mkdir_p(), cpfile()
+
#include <libbuild2/file.hxx>
#include <libbuild2/dump.hxx>
#include <libbuild2/scope.hxx>
@@ -15,6 +17,8 @@
#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/dist/types.hxx>
+#include <libbuild2/dist/rule.hxx>
#include <libbuild2/dist/module.hxx>
using namespace std;
@@ -27,14 +31,14 @@ namespace build2
// install -d <dir>
//
static void
- install (const process_path& cmd, const dir_path&);
+ install (const process_path*, context&, const dir_path&);
- // install <file> <dir>
+ // install <file> <dir>[/<name>]
//
// Return the destination file path.
//
static path
- install (const process_path& cmd, const file&, const dir_path&);
+ install (const process_path*, const file&, const dir_path&, const path&);
// tar|zip ... <dir>/<pkg>.<ext> <pkg>
//
@@ -56,7 +60,7 @@ namespace build2
const path& arc, const dir_path& dir, const string& ext);
static operation_id
- dist_operation_pre (const values&, operation_id o)
+ dist_operation_pre (context&, const values&, operation_id o)
{
if (o != default_id)
fail << "explicit operation specified for dist meta-operation";
@@ -64,6 +68,30 @@ namespace build2
return o;
}
+ static void
+ dist_load_load (const values& vs,
+ scope& rs,
+ const path& bf,
+ const dir_path& out_base,
+ const dir_path& src_base,
+ const location& l)
+ {
+ // @@ TMP: redo after release (do it here and not in execute, also add
+ // custom search and do the other half there).
+ //
+#if 0
+ if (rs.out_path () != out_base || rs.src_path () != src_base)
+ fail (l) << "dist meta-operation target must be project root directory";
+#endif
+
+ // Mark this project as being distributed.
+ //
+ if (auto* m = rs.find_module<module> (module::name))
+ m->distributed = true;
+
+ perform_load (vs, rs, bf, out_base, src_base, l);
+ }
+
// Enter the specified source file as a target of type T. The path is
// expected to be normalized and relative to src_root. If the third
// argument is false, then first check if the file exists. If the fourth
@@ -82,9 +110,7 @@ namespace build2
// Figure out if we need out.
//
- dir_path out (rs.src_path () != rs.out_path ()
- ? out_src (d, rs)
- : dir_path ());
+ dir_path out (!rs.out_eq_src () ? out_src (d, rs) : dir_path ());
const T& t (rs.ctx.targets.insert<T> (
move (d),
@@ -105,16 +131,28 @@ namespace build2
// Recursively traverse an src_root subdirectory entering/collecting the
// contained files and file symlinks as the file targets and skipping
// entries that start with a dot. Follow directory symlinks (preserving
- // their names) and fail on dangling symlinks.
+ // their names) and fail on dangling symlinks. Also detect directory
+ // symlink cycles.
//
+ struct subdir
+ {
+ const subdir* prev;
+ const dir_path& dir;
+ };
+
static void
- add_subdir (const scope& rs, const dir_path& sd, action_targets& files)
+ add_subdir (const scope& rs,
+ const dir_path& sd,
+ action_targets& files,
+ const subdir* prev = nullptr)
{
dir_path d (rs.src_path () / sd);
+ const subdir next {prev, d};
+
try
{
- for (const dir_entry& e: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& e: dir_iterator (d, dir_iterator::no_follow))
{
const path& n (e.path ());
@@ -122,7 +160,36 @@ namespace build2
try
{
if (e.type () == entry_type::directory) // Can throw.
- add_subdir (rs, sd / path_cast<dir_path> (n), files);
+ {
+ // If this is a symlink, check that it doesn't cause a cycle.
+ //
+ if (e.ltype () == entry_type::symlink)
+ {
+ // Note that the resulting path will be absolute and
+ // normalized.
+ //
+ dir_path ld (d / path_cast<dir_path> (n));
+ dir_path td (path_cast<dir_path> (followsymlink (ld)));
+
+ const subdir* s (&next);
+ for (; s != nullptr; s = s->prev)
+ {
+ if (s->dir == td)
+ {
+ if (verb)
+ warn << "directory cycle caused by symlink " << ld <<
+ info << "symlink target " << td;
+
+ break;
+ }
+ }
+
+ if (s != nullptr)
+ break;
+ }
+
+ add_subdir (rs, sd / path_cast<dir_path> (n), files, &next);
+ }
else
files.push_back (add_target<file> (rs, sd / n, true, true));
}
@@ -171,8 +238,13 @@ namespace build2
fail << "unknown distribution package name" <<
info << "did you forget to set dist.package?";
+ const module& mod (*rs.find_module<module> (module::name));
+
const string& dist_package (cast<string> (l));
- const process_path& dist_cmd (cast<process_path> (rs.vars["dist.cmd"]));
+ const process_path* dist_cmd (
+ cast_null<process_path> (rs.vars["dist.cmd"]));
+
+ dir_path td (dist_root / dir_path (dist_package));
// We used to print 'dist <target>' at verbosity level 1 but that has
// proven to be just noise. Though we still want to print something
@@ -183,79 +255,143 @@ namespace build2
// (e.g., output directory creation) in all the operations below.
//
if (verb == 1)
- text << "dist " << dist_package;
+ print_diag ("dist", src_root, td);
// Get the list of files to distribute.
//
action_targets files;
+ const variable* dist_var (nullptr);
if (tgt != nullptr)
{
l5 ([&]{trace << "load dist " << rs;});
+ dist_var = rs.var_pool ().find ("dist");
+
// Match a rule for every operation supported by this project. Skip
// default_id.
//
// Note that we are not calling operation_pre/post() callbacks here
// since the meta operation is dist and we know what we are doing.
//
- values params;
path_name pn ("<dist>");
const location loc (pn); // Dummy location.
+ action_targets ts {tgt};
+
+ auto process_postponed = [&ctx, &mod] ()
{
- action_targets ts {tgt};
+ if (!mod.postponed.list.empty ())
+ {
+ // Re-grab the phase lock similar to perform_match().
+ //
+ phase_lock l (ctx, run_phase::match);
+
+ // Note that we don't need to bother with the mutex since we do
+ // all of this serially. But we can end up with new elements at
+ // the end.
+ //
+ // Strictly speaking, to handle this correctly we would need to do
+ // multiple passes over this list and only give up when we cannot
+ // make any progress since earlier entries that we cannot resolve
+ // could be "fixed" by later entries. But this feels far-fetched
+ // and so let's wait for a real example before complicating this.
+ //
+ for (auto i (mod.postponed.list.begin ());
+ i != mod.postponed.list.end ();
+ ++i)
+ rule::match_postponed (*i);
+ }
+ };
- auto mog = make_guard ([&ctx] () {ctx.match_only = false;});
- ctx.match_only = true;
+ auto mog = make_guard ([&ctx] () {ctx.match_only = nullopt;});
+ ctx.match_only = match_only_level::all;
- const operations& ops (rs.root_extra->operations);
- for (operations::size_type id (default_id + 1); // Skip default_id.
- id < ops.size ();
- ++id)
+ const operations& ops (rs.root_extra->operations);
+ for (operations::size_type id (default_id + 1); // Skip default_id.
+ id < ops.size ();
+ ++id)
+ {
+ if (const operation_info* oif = ops[id])
{
- if (const operation_info* oif = ops[id])
- {
- // Skip aliases (e.g., update-for-install). In fact, one can
- // argue the default update should be sufficient since it is
- // assumed to update all prerequisites and we no longer support
- // ad hoc stuff like test.input. Though here we are using the
- // dist meta-operation, not perform.
- //
- if (oif->id != id)
- continue;
+ // Skip aliases (e.g., update-for-install). In fact, one can argue
+ // the default update should be sufficient since it is assumed to
+ // update all prerequisites and we no longer support ad hoc stuff
+ // like test.input. Though here we are using the dist
+ // meta-operation, not perform.
+ //
+ if (oif->id != id)
+ continue;
- // Use standard (perform) match.
- //
- if (oif->pre != nullptr)
+ // Use standard (perform) match.
+ //
+ if (auto pp = oif->pre_operation)
+ {
+ if (operation_id pid = pp (ctx, {}, dist_id, loc))
{
- if (operation_id pid = oif->pre (params, dist_id, loc))
- {
- const operation_info* poif (ops[pid]);
- ctx.current_operation (*poif, oif, false /* diag_noise */);
- action a (dist_id, poif->id, oif->id);
- match (params, a, ts,
- 1 /* diag (failures only) */,
- false /* progress */);
- }
+ const operation_info* poif (ops[pid]);
+ ctx.current_operation (*poif, oif, false /* diag_noise */);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, false /* inner */, loc);
+
+ if (poif->operation_pre != nullptr)
+ poif->operation_pre (ctx, {}, true /* inner */, loc);
+
+ action a (dist_id, poif->id, oif->id);
+ mod.postponed.list.clear ();
+ perform_match ({}, a, ts,
+ 1 /* diag (failures only) */,
+ false /* progress */);
+ process_postponed ();
+
+ if (poif->operation_post != nullptr)
+ poif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, false /* inner */);
}
+ }
+
+ ctx.current_operation (*oif, nullptr, false /* diag_noise */);
- ctx.current_operation (*oif, nullptr, false /* diag_noise */);
- action a (dist_id, oif->id);
- match (params, a, ts,
- 1 /* diag (failures only) */,
- false /* progress */);
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, true /* inner */, loc);
- if (oif->post != nullptr)
+ action a (dist_id, oif->id);
+ mod.postponed.list.clear ();
+ perform_match ({}, a, ts,
+ 1 /* diag (failures only) */,
+ false /* progress */);
+ process_postponed ();
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, true /* inner */);
+
+ if (auto po = oif->post_operation)
+ {
+ if (operation_id pid = po (ctx, {}, dist_id))
{
- if (operation_id pid = oif->post (params, dist_id))
- {
- const operation_info* poif (ops[pid]);
- ctx.current_operation (*poif, oif, false /* diag_noise */);
- action a (dist_id, poif->id, oif->id);
- match (params, a, ts,
- 1 /* diag (failures only) */,
- false /* progress */);
- }
+ const operation_info* poif (ops[pid]);
+ ctx.current_operation (*poif, oif, false /* diag_noise */);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, false /* inner */, loc);
+
+ if (poif->operation_pre != nullptr)
+ poif->operation_pre (ctx, {}, true /* inner */, loc);
+
+ action a (dist_id, poif->id, oif->id);
+ mod.postponed.list.clear ();
+ perform_match ({}, a, ts,
+ 1 /* diag (failures only) */,
+ false /* progress */);
+ process_postponed ();
+
+ if (poif->operation_post != nullptr)
+ poif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, false /* inner */);
}
}
}
@@ -264,7 +400,7 @@ namespace build2
// Add ad hoc files and buildfiles that are not normally loaded as
// part of the project, for example, the export stub. They will still
// be ignored on the next step if the user explicitly marked them
- // dist=false.
+ // with dist=false.
//
auto add_adhoc = [] (const scope& rs)
{
@@ -311,7 +447,7 @@ namespace build2
dir_path out_nroot (out_root / pd);
const scope& nrs (ctx.scopes.find_out (out_nroot));
- if (nrs.out_path () != out_nroot) // This subproject not loaded.
+ if (nrs.out_path () != out_nroot) // This subproject is not loaded.
continue;
if (!nrs.src_path ().sub (src_root)) // Not a strong amalgamation.
@@ -327,50 +463,96 @@ namespace build2
// Note that we are not showing progress here (e.g., "N targets to
// distribute") since it will be useless (too fast).
//
- const variable& dist_var (ctx.var_pool["dist"]);
-
- for (const auto& pt: ctx.targets)
+ auto see_through = [] (const target& t)
{
- file* ft (pt->is_a<file> ());
-
- if (ft == nullptr) // Not a file.
- continue;
+ return ((t.type ().flags & target_type::flag::see_through) ==
+ target_type::flag::see_through);
+ };
- if (ft->dir.sub (src_root))
+ auto collect = [&trace, &dist_var,
+ &src_root, &out_root] (const file& ft)
+ {
+ if (ft.dir.sub (src_root))
{
// Include unless explicitly excluded.
//
- auto l ((*ft)[dist_var]);
-
- if (l && !cast<bool> (l))
- l5 ([&]{trace << "excluding " << *ft;});
- else
- files.push_back (ft);
+ if (const path* v = cast_null<path> (ft[dist_var]))
+ {
+ if (v->string () == "false")
+ {
+ l5 ([&]{trace << "excluding " << ft;});
+ return false;
+ }
+ }
- continue;
+ return true;
}
-
- if (ft->dir.sub (out_root))
+ else if (ft.dir.sub (out_root))
{
// Exclude unless explicitly included.
//
- auto l ((*ft)[dist_var]);
+ if (const path* v = cast_null<path> (ft[dist_var]))
+ {
+ if (v->string () != "false")
+ {
+ l5 ([&]{trace << "including " << ft;});
+ return true;
+ }
+ }
- if (l && cast<bool> (l))
+ return false;
+ }
+ else
+ return false; // Out of project.
+ };
+
+ for (const auto& pt: ctx.targets)
+ {
+ // Collect see-through groups if they are marked with dist=true.
+ //
+ // Note that while it's possible that only their certain members are
+ // marked as such (e.g., via a pattern), we will still require
+ // dist=true on the group itself (and potentially dist=false on some
+ // of its members) for such cases because we don't want to update
+ // every see-through group only to discover that most of them don't
+ // have anything to distribute.
+ //
+ if (see_through (*pt))
+ {
+ if (const path* v = cast_null<path> ((*pt)[dist_var]))
{
- l5 ([&]{trace << "including " << *ft;});
- files.push_back (ft);
+ if (v->string () != "false")
+ {
+ l5 ([&]{trace << "including group " << *pt;});
+ files.push_back (pt.get ());
+ }
}
continue;
}
+
+ file* ft (pt->is_a<file> ());
+
+ if (ft == nullptr) // Not a file.
+ continue;
+
+ // Skip member of see-through groups since after dist_* their list
+ // can be incomplete (or even bogus, e.g., the "representative
+ // sample"). Instead, we will collect them during perfrom_update
+ // below.
+ //
+ if (ft->group != nullptr && see_through (*ft->group))
+ continue;
+
+ if (collect (*ft))
+ files.push_back (ft);
}
// Make sure what we need to distribute is up to date.
//
{
if (mo_perform.meta_operation_pre != nullptr)
- mo_perform.meta_operation_pre (params, loc);
+ mo_perform.meta_operation_pre (ctx, {}, loc);
// This is a hack since according to the rules we need to completely
// reset the state. We could have done that (i.e., saved target
@@ -386,25 +568,75 @@ namespace build2
ctx.current_on = on + 1;
if (mo_perform.operation_pre != nullptr)
- mo_perform.operation_pre (params, update_id);
+ mo_perform.operation_pre (ctx, {}, update_id);
ctx.current_operation (op_update, nullptr, false /* diag_noise */);
+ if (op_update.operation_pre != nullptr)
+ op_update.operation_pre (ctx, {}, true /* inner */, loc);
+
action a (perform_update_id);
- mo_perform.match (params, a, files,
+ mo_perform.match ({}, a, files,
1 /* diag (failures only) */,
prog /* progress */);
- mo_perform.execute (params, a, files,
+ mo_perform.execute ({}, a, files,
1 /* diag (failures only) */,
prog /* progress */);
+ // Replace see-through groups (which now should have their members
+ // resolved) with members.
+ //
+ for (auto i (files.begin ()); i != files.end (); )
+ {
+ const target& t (i->as<target> ());
+ if (see_through (t))
+ {
+ group_view gv (t.group_members (a)); // Go directly.
+
+ if (gv.members == nullptr)
+ fail << "unable to resolve see-through group " << t
+ << " members";
+
+ i = files.erase (i); // Drop the group itself.
+
+ for (size_t j (0); j != gv.count; ++j)
+ {
+ if (const target* m = gv.members[j])
+ {
+ if (const file* ft = m->is_a<file> ())
+ {
+ // Note that a rule may only link-up its members to groups
+ // if/when matched (for example, the cli.cxx{} group). It
+ // feels harmless for us to do the linking here.
+ //
+ if (ft->group == nullptr)
+ const_cast<file*> (ft)->group = &t;
+ else
+ assert (ft->group == &t); // Sanity check.
+
+ if (collect (*ft))
+ {
+ i = files.insert (i, ft); // Insert instead of the group.
+ i++; // Stay after the group.
+ }
+ }
+ }
+ }
+ }
+ else
+ ++i;
+ }
+
+ if (op_update.operation_post != nullptr)
+ op_update.operation_post (ctx, {}, true /* inner */);
+
if (mo_perform.operation_post != nullptr)
- mo_perform.operation_post (params, update_id);
+ mo_perform.operation_post (ctx, {}, update_id);
if (mo_perform.meta_operation_post != nullptr)
- mo_perform.meta_operation_post (params);
+ mo_perform.meta_operation_post (ctx, {});
}
}
else
@@ -430,37 +662,80 @@ namespace build2
//
auto_project_env penv (rs);
- dir_path td (dist_root / dir_path (dist_package));
-
// Clean up the target directory.
//
if (rmdir_r (ctx, td, true, 2) == rmdir_status::not_empty)
fail << "unable to clean target directory " << td;
auto_rmdir rm_td (td); // Clean it up if things go bad.
- install (dist_cmd, td);
+ install (dist_cmd, ctx, td);
// Copy over all the files. Apply post-processing callbacks.
//
- module& mod (*rs.find_module<module> (module::name));
-
prog = prog && show_progress (1 /* max_verb */);
size_t prog_percent (0);
for (size_t i (0), n (files.size ()); i != n; ++i)
{
- const file& t (*files[i].as<target> ().is_a<file> ());
+ const file& t (files[i].as<target> ().as<file> ()); // Only files.
// Figure out where this file is inside the target directory.
//
- bool src (t.dir.sub (src_root));
- dir_path dl (src ? t.dir.leaf (src_root) : t.dir.leaf (out_root));
+ // First see if the path has been remapped (unless bootstrap).
+ //
+ const path* rp (nullptr);
+ if (tgt != nullptr)
+ {
+ if ((rp = cast_null<path> (t[dist_var])) != nullptr)
+ {
+ if (rp->string () == "true") // Wouldn't be here if false.
+ rp = nullptr;
+ }
+ }
+
+ bool src;
+ path rn;
+ dir_path dl;
+ if (rp == nullptr)
+ {
+ src = t.dir.sub (src_root);
+ dl = src ? t.dir.leaf (src_root) : t.dir.leaf (out_root);
+ }
+ else
+ {
+ // Sort the remapped path into name (if any) and directory,
+ // completing the latter if relative.
+ //
+ bool n (!rp->to_directory ());
+
+ if (n)
+ {
+ if (rp->simple ())
+ {
+ fail << "expected true, false, of path in the dist variable "
+ << "value of target " << t <<
+ info << "specify ./" << *rp << " to remap the name";
+ }
+
+ rn = rp->leaf ();
+ }
+
+ dir_path rd (n ? rp->directory () : path_cast<dir_path> (*rp));
+
+ if (rd.relative ())
+ rd = t.dir / rd;
+
+ rd.normalize ();
+
+ src = rd.sub (src_root);
+ dl = src ? rd.leaf (src_root) : rd.leaf (out_root);
+ }
dir_path d (td / dl);
if (!exists (d))
- install (dist_cmd, d);
+ install (dist_cmd, ctx, d);
- path r (install (dist_cmd, t, d));
+ path r (install (dist_cmd, t, d, rn));
// See if this file is in a subproject.
//
@@ -605,8 +880,8 @@ namespace build2
fail << "dist meta-operation target must be project root directory";
if (rs->out_eq_src ())
- fail << "in-tree distribution of target " << t <<
- info << "distribution requires out-of-tree build";
+ fail << "in source distribution of target " << t <<
+ info << "distribution requires out of source build";
dist_project (*rs, &t, prog);
}
@@ -614,60 +889,131 @@ namespace build2
// install -d <dir>
//
static void
- install (const process_path& cmd, const dir_path& d)
+ install (const process_path* cmd, context& ctx, const dir_path& d)
{
- path reld (relative (d));
+ path reld;
+ cstrings args;
- cstrings args {cmd.recall_string (), "-d"};
+ if (cmd != nullptr || verb >= 2)
+ {
+ reld = relative (d);
- args.push_back ("-m");
- args.push_back ("755");
- args.push_back (reld.string ().c_str ());
- args.push_back (nullptr);
+ args.push_back (cmd != nullptr ? cmd->recall_string () : "install");
+ args.push_back ("-d");
+ args.push_back ("-m");
+ args.push_back ("755");
+ args.push_back (reld.string ().c_str ());
+ args.push_back (nullptr);
- if (verb >= 2)
- print_process (args);
+ if (verb >= 2)
+ print_process (args);
+ }
- run (cmd, args);
+ if (cmd != nullptr)
+ run (ctx, *cmd, args, 1 /* finish_verbosity */);
+ else
+ {
+ try
+ {
+ // Note that mode has no effect on Windows, which is probably for
+ // the best.
+ //
+ try_mkdir_p (d, 0755);
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to create directory " << d << ": " << e;
+ }
+ }
}
- // install <file> <dir>
+ // install <file> <dir>[/<name>]
//
static path
- install (const process_path& cmd, const file& t, const dir_path& d)
+ install (const process_path* cmd,
+ const file& t,
+ const dir_path& d,
+ const path& n)
{
- dir_path reld (relative (d));
- path relf (relative (t.path ()));
-
- cstrings args {cmd.recall_string ()};
+ const path& f (t.path ());
+ path r (d / (n.empty () ? f.leaf () : n));
- // Preserve timestamps. This could becomes important if, for
- // example, we have pre-generated sources. Note that the
- // install-sh script doesn't support this option, while both
- // Linux and BSD install's do.
+ // Assume the file is executable if the owner has execute permission,
+ // in which case we make it executable for everyone.
//
- args.push_back ("-p");
+ bool exe ((path_perms (f) & permissions::xu) == permissions::xu);
- // Assume the file is executable if the owner has execute
- // permission, in which case we make it executable for
- // everyone.
- //
- args.push_back ("-m");
- args.push_back (
- (path_perms (t.path ()) & permissions::xu) == permissions::xu
- ? "755"
- : "644");
+ path relf, reld;
+ cstrings args;
- args.push_back (relf.string ().c_str ());
- args.push_back (reld.string ().c_str ());
- args.push_back (nullptr);
+ if (cmd != nullptr || verb >= 2)
+ {
+ relf = relative (f);
+ reld = relative (d);
- if (verb >= 2)
- print_process (args);
+ if (!n.empty ()) // Leave as just directory if no custom name.
+ reld /= n;
+
+ args.push_back (cmd != nullptr ? cmd->recall_string () : "install");
+
+ // Preserve timestamps. This could becomes important if, for example,
+ // we have pre-generated sources. Note that the install-sh script
+ // doesn't support this option, while both Linux and BSD install's do.
+ //
+ args.push_back ("-p");
+
+ // Assume the file is executable if the owner has execute permission,
+ // in which case we make it executable for everyone.
+ //
+ args.push_back ("-m");
+ args.push_back (exe ? "755" : "644");
+ args.push_back (relf.string ().c_str ());
+ args.push_back (reld.string ().c_str ());
+ args.push_back (nullptr);
+
+ if (verb >= 2)
+ print_process (args);
+ }
- run (cmd, args);
+ if (cmd != nullptr)
+ run (t.ctx, *cmd, args, 1 /* finish_verbosity */);
+ else
+ {
+ permissions perm (permissions::ru | permissions::wu |
+ permissions::rg |
+ permissions::ro); // 644
+ if (exe)
+ perm |= permissions::xu | permissions::xg | permissions::xo; // 755
+
+ try
+ {
+ // Note that we don't pass cpflags::overwrite_content which means
+ // this will fail if the file already exists. Since we clean up the
+ // destination directory, this will detect cases where we have
+ // multiple source files with the same distribution destination.
+ //
+ cpfile (f,
+ r,
+ cpflags::overwrite_permissions | cpflags::copy_timestamps,
+ perm);
+ }
+ catch (const system_error& e)
+ {
+ if (e.code ().category () == generic_category () &&
+ e.code ().value () == EEXIST)
+ {
+ // @@ TMP (added in 0.16.0).
+ //
+ warn << "multiple files are distributed as " << r <<
+ info << "second file is " << f <<
+ info << "this warning will become error in the future";
+ }
+ else
+ fail << "unable to copy " << f << " to " << r << ": " << e;
+ }
+ }
- return d / relf.leaf ();
+ return r;
}
static path
@@ -677,13 +1023,15 @@ namespace build2
const dir_path& dir,
const string& e)
{
+ // NOTE: similar code in bpkg (system-package-manager-archive.cxx).
+
path an (pkg + '.' + e);
// Delete old archive for good measure.
//
path ap (dir / an);
if (exists (ap, false))
- rmfile (ctx, ap);
+ rmfile (ctx, ap, 3 /* verbosity */);
// Use zip for .zip archives. Also recognize and handle a few well-known
// tar.xx cases (in case tar doesn't support -a or has other issues like
@@ -699,7 +1047,7 @@ namespace build2
if (e == "zip")
{
- // On Windows we use libarchive's bsdtar (zip is an MSYS executabales).
+ // On Windows we use libarchive's bsdtar (zip is an MSYS executable).
//
// While not explicitly stated, the compression-level option works
// for zip archives.
@@ -724,15 +1072,28 @@ namespace build2
// On Windows we use libarchive's bsdtar with auto-compression (tar
// itself and quite a few compressors are MSYS executables).
//
+ // OpenBSD tar does not support --format but it appear ustar is the
+ // default (while this is not said explicitly in tar(1), it is said in
+ // pax(1) and confirmed on the mailing list). Nor does it support -a,
+ // at least as of 7.1 but we will let this play out naturally, in case
+ // this support gets added.
+ //
+ // Note also that our long-term plan is to switch to libarchive in
+ // order to generate reproducible archives.
+ //
const char* l (nullptr); // Compression level (option).
#ifdef _WIN32
- const char* tar = "bsdtar";
+ args = {"bsdtar", "--format", "ustar"};
if (e == "tar.gz")
l = "--options=compression-level=9";
#else
- const char* tar = "tar";
+ args = {"tar"
+#ifndef __OpenBSD__
+ , "--format", "ustar"
+#endif
+ };
// For gzip it's a good idea to use -9 by default. For bzip2, -9 is
// the default. And for xz, -9 is not recommended as the default due
@@ -750,13 +1111,10 @@ namespace build2
if (c != nullptr)
{
- args = {tar,
- "--format", "ustar",
- "-cf", "-",
- pkg.c_str (),
- nullptr};
-
- i = args.size ();
+ args.push_back ("-cf");
+ args.push_back ("-");
+ args.push_back (pkg.c_str ());
+ args.push_back (nullptr); i = args.size ();
args.push_back (c);
if (l != nullptr)
args.push_back (l);
@@ -777,20 +1135,13 @@ namespace build2
}
else
#endif
- if (e == "tar")
- args = {tar,
- "--format", "ustar",
- "-cf", ap.string ().c_str (),
- pkg.c_str (),
- nullptr};
- else
{
- args = {tar,
- "--format", "ustar",
- "-a"};
-
- if (l != nullptr)
- args.push_back (l);
+ if (e != "tar")
+ {
+ args.push_back ("-a");
+ if (l != nullptr)
+ args.push_back (l);
+ }
args.push_back ("-cf");
args.push_back (ap.string ().c_str ());
@@ -810,19 +1161,20 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << args[0] << ' ' << ap;
+ print_diag (args[0], dir / dir_path (pkg), ap);
process apr;
process cpr;
- // Change the archiver's working directory to dist_root.
+ // Change the archiver's working directory to root.
+ //
+ // Note: this function is called during serial execution and so no
+ // diagnostics buffering is needed (here and below).
//
- apr = run_start (app,
+ apr = run_start (process_env (app, root),
args,
0 /* stdin */,
- (i != 0 ? -1 : 1) /* stdout */,
- true /* error */,
- root);
+ (i != 0 ? -1 : 1) /* stdout */);
// Start the compressor if required.
//
@@ -834,10 +1186,17 @@ namespace build2
out_fd.get () /* stdout */);
cpr.in_ofd.reset (); // Close the archiver's stdout on our side.
- run_finish (args.data () + i, cpr);
}
- run_finish (args.data (), apr);
+ // Delay throwing until we diagnose both ends of the pipe.
+ //
+ if (!run_finish_code (args.data (),
+ apr,
+ 1 /* verbosity */,
+ false /* omit_normal */) ||
+ !(i == 0 || run_finish_code (args.data () + i, cpr, 1, false)))
+ throw failed ();
+
out_rm.cancel ();
return ap;
@@ -856,7 +1215,7 @@ namespace build2
//
path cp (dir / cn);
if (exists (cp, false))
- rmfile (ctx, cp);
+ rmfile (ctx, cp, 3 /* verbosity */);
auto_rmfile c_rm; // Note: must come first.
auto_fd c_fd;
@@ -895,18 +1254,20 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << args[0] << ' ' << cp;
+ print_diag (args[0], ap, cp);
// Note that to only get the archive name (without the directory) in
// the output we have to run from the archive's directory.
//
- process pr (run_start (pp,
+ // Note: this function is called during serial execution and so no
+ // diagnostics buffering is needed.
+ //
+ process pr (run_start (process_env (pp, ad /* cwd */),
args,
- 0 /* stdin */,
- c_fd.get () /* stdout */,
- true /* error */,
- ad /* cwd */));
- run_finish (args, pr);
+ 0 /* stdin */,
+ c_fd.get () /* stdout */));
+
+ run_finish (args, pr, 1 /* verbosity */);
}
else
{
@@ -926,7 +1287,7 @@ namespace build2
if (verb >= 2)
text << "cat >" << cp;
else if (verb)
- text << e << "sum " << cp;
+ print_diag ((e + "sum").c_str (), ap, cp);
string c;
try
@@ -960,7 +1321,8 @@ namespace build2
dist_include (action,
const target&,
const prerequisite_member& p,
- include_type i)
+ include_type i,
+ lookup& l)
{
tracer trace ("dist::dist_include");
@@ -969,12 +1331,18 @@ namespace build2
// given the prescribed semantics of adhoc (match/execute but otherwise
// ignore) is followed.
//
+ // Note that we don't need to do anything for posthoc.
+ //
if (i == include_type::excluded)
{
l5 ([&]{trace << "overriding exclusion of " << p;});
i = include_type::adhoc;
}
+ // Also clear any operation-specific overrides.
+ //
+ l = lookup ();
+
return i;
}
@@ -988,12 +1356,12 @@ namespace build2
true, // bootstrap_outer
nullptr, // meta-operation pre
&dist_operation_pre,
- &load, // normal load
- &search, // normal search
- nullptr, // no match (see dist_execute()).
+ &dist_load_load,
+ &perform_search, // normal search
+ nullptr, // no match (see dist_execute()).
&dist_load_execute,
- nullptr, // operation post
- nullptr, // meta-operation post
+ nullptr, // operation post
+ nullptr, // meta-operation post
&dist_include
};
@@ -1024,7 +1392,7 @@ namespace build2
init_config (rs);
}
- void
+ static void
dist_bootstrap_search (const values&,
const scope& rs,
const scope&,
diff --git a/libbuild2/dist/rule.cxx b/libbuild2/dist/rule.cxx
index ef144d0..320d17a 100644
--- a/libbuild2/dist/rule.cxx
+++ b/libbuild2/dist/rule.cxx
@@ -8,6 +8,9 @@
#include <libbuild2/algorithm.hxx>
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/dist/types.hxx>
+#include <libbuild2/dist/module.hxx>
+
using namespace std;
namespace build2
@@ -15,7 +18,7 @@ namespace build2
namespace dist
{
bool rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
return true; // We always match.
}
@@ -27,17 +30,34 @@ namespace build2
const dir_path& src_root (rs.src_path ());
const dir_path& out_root (rs.out_path ());
- // If we can, go inside see-through groups.
+ // Note that we don't go inside see-through groups since the members for
+ // dist_* may be incomplete (or even bogus, e.g., the "representative
+ // sample"). Instead, for see-through groups our plan is as follows:
+ //
+ // 1. Here we match them as groups (so that we still match all their
+ // prerequisites).
+ //
+ // 2. In dist_project() we collect them along with files after dist_*
+ // but before perform_update. Here we also skip files that are
+ // members of see-through groups (which we may still get).
//
- for (prerequisite_member pm:
- group_prerequisite_members (a, t, members_mode::maybe))
+ // 3. During perform_update we collect all the see-through group
+ // members, similar to files on step (2).
+ //
+ for (const prerequisite& p: group_prerequisites (t))
{
// Note: no exclusion tests, we want all of them (and see also the
- // dist_include() override).
+ // dist_include() override). But if we don't ignore post hoc ones
+ // here, we will end up with a cycle (they will still be handled
+ // by the post-pass).
+ //
+ lookup l; // Ignore any operation-specific values.
+ if (include (a, t, p, &l) == include_type::posthoc)
+ continue;
// Skip prerequisites imported from other projects.
//
- if (pm.proj ())
+ if (p.proj)
continue;
// We used to always search and match but that resulted in the
@@ -56,16 +76,16 @@ namespace build2
// @@ Note that this is still an issue in a custom dist rule.
//
const target* pt (nullptr);
- if (pm.is_a<file> ())
+ if (p.is_a<file> ())
{
- pt = pm.load ();
+ pt = p.target.load ();
if (pt == nullptr)
{
- const prerequisite& p (pm.prerequisite);
-
// Search for an existing target or existing file in src.
//
+ // Note: see also similar code in match_postponed() below.
+ //
const prerequisite_key& k (p.key ());
pt = k.tk.type->search (t, k);
@@ -79,23 +99,65 @@ namespace build2
!p.dir.sub (out_root))
continue;
- fail << "prerequisite " << k << " is not existing source file "
- << "nor known output target" << endf;
+ // This can be order-dependent: for example libs{} prerequisite
+ // may be unknown because we haven't matched the lib{} group
+ // yet. So we postpone this for later (see match_postponed()).
+ //
+ const module& mod (*rs.find_module<module> (module::name));
+
+ mlock l (mod.postponed.mutex);
+ mod.postponed.list.push_back (
+ postponed_prerequisite {a, t, p, t.state[a].rule->first});
+ continue;
}
search_custom (p, *pt); // Cache.
}
}
else
- pt = &pm.search (t);
+ pt = &search (t, p);
// Don't match targets that are outside of our project.
//
if (pt->dir.sub (out_root))
- build2::match (a, *pt);
+ match_sync (a, *pt);
}
return noop_recipe; // We will never be executed.
}
+
+ void rule::
+ match_postponed (const postponed_prerequisite& pp)
+ {
+ action a (pp.action);
+ const target& t (pp.target);
+ const prerequisite& p (pp.prereq);
+
+ const prerequisite_key& k (p.key ());
+ const target* pt (k.tk.type->search (t, k));
+
+ if (pt == nullptr)
+ {
+ // Note that we do loose the diag frame that we normally get when
+ // failing during match. So let's mention the target/rule manually.
+ //
+ fail << "prerequisite " << k << " is not existing source file nor "
+ << "known output target" <<
+ info << "while applying rule " << pp.rule << " to " << diag_do (a, t);
+ }
+
+ search_custom (p, *pt); // Cache.
+
+ // It's theoretically possible that the target gets entered but nobody
+ // else depends on it but us. So we need to make sure it's matched
+ // (since it, in turns, can pull in other targets). Note that this could
+ // potentially add new postponed prerequisites to the list.
+ //
+ if (!pt->matched (a))
+ {
+ if (pt->dir.sub (t.root_scope ().out_path ()))
+ match_direct_sync (a, *pt);
+ }
+ }
}
}
diff --git a/libbuild2/dist/rule.hxx b/libbuild2/dist/rule.hxx
index e63016d..69ab3d9 100644
--- a/libbuild2/dist/rule.hxx
+++ b/libbuild2/dist/rule.hxx
@@ -11,6 +11,10 @@
#include <libbuild2/action.hxx>
#include <libbuild2/target.hxx>
+#include <libbuild2/dist/types.hxx>
+
+#include <libbuild2/export.hxx>
+
namespace build2
{
namespace dist
@@ -19,20 +23,28 @@ namespace build2
//
// A custom rule (usually the same as perform_update) may be necessary to
// establish group links (so that we see the dist variable set on a group)
- // or to see through non-see-through groups (like lib{}; see the
- // bin::lib_rule for an example). Note that in the latter case the rule
- // should "see" all its members for the dist case.
+ // or to see through non-see-through groups (like lib{}, obj{}; see rule
+ // in the bin module for an example). Note that in the latter case the
+ // rule should "see" all its members for the dist case.
//
- class rule: public simple_rule
+ class LIBBUILD2_SYMEXPORT rule: public simple_rule
{
public:
rule () {}
+ // Always matches (returns true).
+ //
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
+ // Matches all the prerequisites (including from group) and returns
+ // noop_recipe (which will never be executed).
+ //
virtual recipe
apply (action, target&) const override;
+
+ static void
+ match_postponed (const postponed_prerequisite&);
};
}
}
diff --git a/libbuild2/dist/types.hxx b/libbuild2/dist/types.hxx
new file mode 100644
index 0000000..b833951
--- /dev/null
+++ b/libbuild2/dist/types.hxx
@@ -0,0 +1,41 @@
+// file : libbuild2/dist/types.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_DIST_TYPES_HXX
+#define LIBBUILD2_DIST_TYPES_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/forward.hxx>
+
+#include <libbuild2/prerequisite-key.hxx>
+
+namespace build2
+{
+ namespace dist
+ {
+ // List of prerequisites that could not be searched to a target and were
+ // postponed for later re-search. This can happen, for example, because a
+ // prerequisite would resolve to a member of a group that hasn't been
+ // matched yet (for example, libs{} of lib{}). See rule::apply() for
+ // details.
+ //
+ // Note that we are using list instead of vector because new elements can
+ // be added at the end while we are iterating over the list.
+ //
+ struct postponed_prerequisite
+ {
+ build2::action action;
+ reference_wrapper<const build2::target> target;
+ reference_wrapper<const prerequisite> prereq;
+ string rule;
+ };
+
+ struct postponed_prerequisites
+ {
+ build2::mutex mutex;
+ build2::list<postponed_prerequisite> list;
+ };
+ }
+}
+
+#endif // LIBBUILD2_DIST_TYPES_HXX
diff --git a/libbuild2/dump.cxx b/libbuild2/dump.cxx
index b1a16ba..9b7f5b1 100644
--- a/libbuild2/dump.cxx
+++ b/libbuild2/dump.cxx
@@ -3,6 +3,11 @@
#include <libbuild2/dump.hxx>
+#ifndef BUILD2_BOOTSTRAP
+# include <iostream> // cout
+# include <unordered_map>
+#endif
+
#include <libbuild2/rule.hxx>
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
@@ -11,6 +16,7 @@
#include <libbuild2/diagnostics.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
@@ -49,10 +55,321 @@ namespace build2
if (v)
{
names storage;
- os << (a ? " " : "") << reverse (v, storage);
+ os << (a ? " " : "") << reverse (v, storage, true /* reduce */);
+ }
+ }
+
+#ifndef BUILD2_BOOTSTRAP
+
+ static string
+ quoted_target_name (const names_view& ns, bool rel)
+ {
+ ostringstream os;
+ stream_verb (os, stream_verbosity (rel ? 0 : 1, 0));
+ to_stream (os, ns, quote_mode::effective, '@');
+ return os.str ();
+ }
+
+ static void
+ dump_quoted_target_name (json::stream_serializer& j,
+ const names_view& ns,
+ bool rel)
+ {
+ j.value (quoted_target_name (ns, rel));
+ }
+
+ static string
+ quoted_target_name (const target& t, bool rel)
+ {
+ names ns (t.as_name ()); // Note: potentially adds an extension.
+
+ // Don't print target names relative if the target is in src and out!=src.
+ // Failed that, we will end up with pointless ../../../... paths.
+ //
+ // It may also seem that we can omit @-qualification in this case, since
+ // it is implied by the containing scope. However, keep in mind that the
+ // target may not be directly in this scope. We could make it relative,
+ // though.
+ //
+ if (rel && !t.out.empty ())
+ {
+ // Make the out relative ourselves and then disable relative for src.
+ //
+ dir_path& o (ns.back ().dir);
+ o = relative (o); // Note: may return empty path.
+ if (o.empty ())
+ o = dir_path (".");
+
+ rel = false;
+ }
+
+ return quoted_target_name (ns, rel);
+ }
+
+ void
+ dump_quoted_target_name (json::stream_serializer& j,
+ const target& t,
+ bool rel)
+ {
+ j.value (quoted_target_name (t, rel));
+ }
+
+ using target_name_cache = unordered_map<const target*, string>;
+
+ static void
+ dump_quoted_target_name (json::stream_serializer& j,
+ const target& t,
+ target_name_cache& tc)
+ {
+ auto i (tc.find (&t));
+ if (i == tc.end ())
+ i = tc.emplace (&t, quoted_target_name (t, false /* relative */)).first;
+
+ j.value (i->second);
+ }
+
+ void
+ dump_display_target_name (json::stream_serializer& j,
+ const target& t,
+ bool rel)
+ {
+ // Note: see the quoted version above for details.
+
+ target_key tk (t.key ());
+
+ dir_path o;
+ if (rel && !tk.out->empty ())
+ {
+ o = relative (*tk.out);
+ if (o.empty ())
+ o = dir_path (".");
+ tk.out = &o;
+
+ rel = false;
}
+
+ // Change the stream verbosity to print relative if requested and omit
+ // extension.
+ //
+ ostringstream os;
+ stream_verb (os, stream_verbosity (rel ? 0 : 1, 0));
+ os << tk;
+ j.value (os.str ());
}
+ static void
+ dump_value (json::stream_serializer& j, const value& v)
+ {
+ // Hints.
+ //
+ // Note that the pair hint should only be used for simple names.
+ //
+ optional<bool> h_array;
+ optional<bool> h_pair; // true/false - second/first is optional.
+
+ if (v.null)
+ {
+ j.value (nullptr);
+ return;
+ }
+ else if (v.type != nullptr)
+ {
+ const value_type& t (*v.type);
+
+ auto s_array = [&j] (const auto& vs)
+ {
+ j.begin_array ();
+ for (const auto& v: vs) j.value (v);
+ j.end_array ();
+ };
+
+ auto s_array_string = [&j] (const auto& vs)
+ {
+ j.begin_array ();
+ for (const auto& v: vs) j.value (v.string ());
+ j.end_array ();
+ };
+
+ // Note: check in the derived-first order.
+ //
+ if (t.is_a<bool> ()) j.value (v.as<bool> ());
+ else if (t.is_a<int64_t> ()) j.value (v.as<int64_t> ());
+ else if (t.is_a<uint64_t> ()) j.value (v.as<uint64_t> ());
+ else if (t.is_a<string> ()) j.value (v.as<string> ());
+ else if (t.is_a<path> ()) j.value (v.as<path> ().string ());
+ else if (t.is_a<dir_path> ()) j.value (v.as<dir_path> ().string ());
+ else if (t.is_a<target_triplet> ()) j.value (v.as<target_triplet> ().string ());
+ else if (t.is_a<project_name> ()) j.value (v.as<project_name> ().string ());
+ else if (t.is_a<int64s> ()) s_array (v.as<int64s> ());
+ else if (t.is_a<uint64s> ()) s_array (v.as<uint64s> ());
+ else if (t.is_a<strings> ()) s_array (v.as<strings> ());
+ else if (t.is_a<paths> ()) s_array_string (v.as<paths> ());
+ else if (t.is_a<dir_paths> ()) s_array_string (v.as<dir_paths> ());
+ else
+ {
+ // Note: check in the derived-first order.
+ //
+ if (t.is_a<name> ()) h_array = false;
+ else if (t.is_a<name_pair> ())
+ {
+ h_array = false;
+ h_pair = true;
+ }
+ else if (t.is_a<process_path_ex> ())
+ {
+ // Decide on array dynamically.
+ h_pair = true;
+ }
+ else if (t.is_a<process_path> ())
+ {
+ h_array = false;
+ h_pair = true;
+ }
+ else if (t.is_a<cmdline> () ||
+ t.is_a<vector<name>> ())
+ {
+ h_array = true;
+ }
+ else if (t.is_a<vector<pair<string, string>>> () ||
+ t.is_a<vector<pair<string, optional<string>>>> () ||
+ t.is_a<vector<pair<string, optional<bool>>>> () ||
+ t.is_a<map<string, string>> () ||
+ t.is_a<map<string, optional<string>>> () ||
+ t.is_a<map<string, optional<bool>>> () ||
+ t.is_a<map<project_name, dir_path>> ())
+ {
+ h_array = true;
+ h_pair = true;
+ }
+ else if (t.is_a<map<optional<string>, string>> () ||
+ t.is_a<vector<pair<optional<string>, string>>> ())
+ {
+ h_array = true;
+ h_pair = false;
+ }
+
+ goto fall_through;
+ }
+
+ return;
+
+ fall_through:
+ ;
+ }
+
+ names storage;
+ names_view ns (reverse (v, storage, true /* reduce */));
+
+ if (ns.empty ())
+ {
+ // When it comes to representing an empty value, our options are: empty
+ // array ([]), empty object ({}), or an absent member. The latter feels
+ // closer to null than empty, so that's out. After some experimentation,
+ // it feels the best choice is to use array unless we know for sure it
+ // is not, in which case we use an object if it's a pair and empty
+ // string otherwise (the empty string makes sense because we serialize
+ // complex names as target names; see below).
+ //
+ if (!h_array || *h_array)
+ {
+ j.begin_array ();
+ j.end_array ();
+ }
+ else
+ {
+ if (h_pair)
+ {
+ j.begin_object ();
+ j.end_object ();
+ }
+ else
+ j.value ("");
+ }
+ }
+ else
+ {
+ if (!h_array)
+ h_array = ns.size () > 2 || (ns.size () == 2 && !ns.front ().pair);
+
+ if (*h_array)
+ j.begin_array ();
+
+ // While it may be tempting to try to provide a heterogeneous array
+ // (i.e., all strings, all objects, all pairs), in case of pairs we
+ // actually don't know whether a non-pair element is first or second
+ // (it's up to interpretation; though we do hint which one is optional
+ // for typed values above). So we serialize each name in its most
+ // appropriate form.
+ //
+ auto simple = [] (const name& n)
+ {
+ return n.simple () || n.directory () || n.file ();
+ };
+
+ auto s_simple = [&j] (const name& n)
+ {
+ if (n.simple ())
+ j.value (n.value);
+ else if (n.directory ())
+ j.value (n.dir.string ());
+ else if (n.file ())
+ {
+ // Note: both must be present due to earlier checks.
+ //
+ j.value ((n.dir / n.value).string ());
+ }
+ else
+ return false;
+
+ return true;
+ };
+
+ for (auto i (ns.begin ()), e (ns.end ()); i != e; )
+ {
+ const name& l (*i++);
+ const name* r (l.pair ? &*i++ : nullptr);
+
+ optional<bool> hp (h_pair);
+
+ if (!hp && r != nullptr && simple (l) && simple (*r))
+ hp = true;
+
+ if (hp)
+ {
+ // Pair of simple names.
+ //
+ j.begin_object ();
+
+ if (r != nullptr)
+ {
+ j.member_name ("first"); s_simple (l);
+ j.member_name ("second"); s_simple (*r);
+ }
+ else
+ {
+ j.member_name (*hp ? "first" : "second"); s_simple (l);
+ }
+
+ j.end_object ();
+ }
+ else if (r == nullptr && s_simple (l))
+ ;
+ else
+ {
+ // If complex name (or pair thereof), then assume a target name.
+ //
+ dump_quoted_target_name (j,
+ names_view (&l, r != nullptr ? 2 : 1),
+ false /* relative */);
+ }
+ }
+
+ if (*h_array)
+ j.end_array ();
+ }
+ }
+#endif
+
enum class variable_kind {scope, tt_pat, target, rule, prerequisite};
static void
@@ -83,6 +400,10 @@ namespace build2
const variable& var (p.first);
const value& v (p.second);
+ // On one hand it might be helpful to print the visibility. On the
+ // other, it is always specified which means there will be a lot of
+ // noise. So probably not.
+ //
if (var.type != nullptr)
os << '[' << var.type->name << "] ";
@@ -123,6 +444,68 @@ namespace build2
}
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_variable (json::stream_serializer& j,
+ const variable_map& vm,
+ const variable_map::const_iterator& vi,
+ const scope& s,
+ variable_kind k)
+ {
+ // Note: see the buildfile version above for comments.
+
+ assert (k != variable_kind::tt_pat); // TODO
+
+ const auto& p (*vi);
+ const variable& var (p.first);
+ const value& v (p.second);
+
+ lookup l (v, var, vm);
+ if (k != variable_kind::prerequisite)
+ {
+ if (var.override ())
+ return; // Ignore.
+
+ if (var.overrides != nullptr)
+ {
+ l = s.lookup_override (
+ var,
+ make_pair (l, 1),
+ k == variable_kind::target || k == variable_kind::rule,
+ k == variable_kind::rule).first;
+
+ assert (l.defined ()); // We at least have the original.
+ }
+ }
+
+ // Note that we do not distinguish between variable/value type.
+ //
+ // An empty value of a non-array type is represented as an empty object
+ // ({}).
+ //
+#if 0
+ struct variable
+ {
+ string name;
+ optional<string> type;
+ json_value value; // string|number|boolean|null|object|array
+ };
+#endif
+
+ j.begin_object ();
+
+ j.member ("name", var.name);
+
+ if (l->type != nullptr)
+ j.member ("type", l->type->name);
+
+ j.member_name ("value");
+ dump_value (j, *l);
+
+ j.end_object ();
+ }
+#endif
+
static void
dump_variables (ostream& os,
string& ind,
@@ -139,6 +522,20 @@ namespace build2
}
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_variables (json::stream_serializer& j,
+ const variable_map& vars,
+ const scope& s,
+ variable_kind k)
+ {
+ for (auto i (vars.begin ()), e (vars.end ()); i != e; ++i)
+ {
+ dump_variable (j, vars, i, s, k);
+ }
+ }
+#endif
+
// Dump target type/pattern-specific variables.
//
static void
@@ -208,7 +605,7 @@ namespace build2
for (action a: r.actions)
os << ' ' << re.meta_operations[a.meta_operation ()]->name <<
- '(' << re.operations[a.operation ()]->name << ')';
+ '(' << re.operations[a.operation ()].info->name << ')';
os << endl;
r.dump_text (os, ind);
@@ -225,6 +622,14 @@ namespace build2
// Pattern.
//
os << ind;
+
+ // Avoid printing the derived name.
+ //
+ if (rp.rule_name.front () != '<' || rp.rule_name.back () != '>')
+ {
+ os << "[rule_name=" << rp.rule_name << "] ";
+ }
+
rp.dump (os);
// Recipes.
@@ -236,10 +641,27 @@ namespace build2
}
}
+ // Similar to target::matched() but for the load phase.
+ //
+ static inline bool
+ matched (const target& t, action a)
+ {
+ // Note: running serial and task_count is 0 before any operation has
+ // started.
+ //
+ if (size_t c = t[a].task_count.load (memory_order_relaxed))
+ {
+ if (c == t.ctx.count_applied () || c == t.ctx.count_executed ())
+ return true;
+ }
+
+ return false;
+ }
+
static void
- dump_target (optional<action> a,
- ostream& os,
+ dump_target (ostream& os,
string& ind,
+ optional<action> a,
const target& t,
const scope& s,
bool rel)
@@ -248,6 +670,9 @@ namespace build2
// scope. To achieve this we are going to temporarily lower the stream
// path verbosity to level 0.
//
+ // @@ Not if in src and out != src? Otherwise end up with ../../../...
+ // See JSON version for the state of the art.
+ //
stream_verbosity osv, nsv;
if (rel)
{
@@ -259,7 +684,38 @@ namespace build2
if (t.group != nullptr)
os << ind << t << " -> " << *t.group << endl;
- os << ind << t << ':';
+ os << ind;
+
+ // Target attributes.
+ //
+ if (!t.rule_hints.map.empty ())
+ {
+ os << '[';
+
+ bool f (true);
+ for (const rule_hints::value_type& v: t.rule_hints.map)
+ {
+ if (f)
+ f = false;
+ else
+ os << ", ";
+
+ if (v.type != nullptr)
+ os << v.type->name << '@';
+
+ os << "rule_hint=";
+
+ if (v.operation != default_id)
+ os << s.root_scope ()->root_extra->operations[v.operation].info->name
+ << '@';
+
+ os << v.hint;
+ }
+
+ os << "] ";
+ }
+
+ os << t << ':';
// First check if this is the simple case where we can print everything
// as a single declaration.
@@ -278,32 +734,26 @@ namespace build2
// If the target has been matched to a rule, we also print resolved
// prerequisite targets.
//
- // Note: running serial and task_count is 0 before any operation has
- // started.
- //
const prerequisite_targets* pts (nullptr);
{
action inner; // @@ Only for the inner part of the action currently.
- if (size_t c = t[inner].task_count.load (memory_order_relaxed))
+ if (matched (t, inner))
{
- if (c == t.ctx.count_applied () || c == t.ctx.count_executed ())
- {
- pts = &t.prerequisite_targets[inner];
+ pts = &t.prerequisite_targets[inner];
- bool f (false);
- for (const target* pt: *pts)
+ bool f (false);
+ for (const target* pt: *pts)
+ {
+ if (pt != nullptr)
{
- if (pt != nullptr)
- {
- f = true;
- break;
- }
+ f = true;
+ break;
}
-
- if (!f)
- pts = nullptr;
}
+
+ if (!f)
+ pts = nullptr;
}
}
@@ -467,10 +917,318 @@ namespace build2
stream_verb (os, osv);
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_target (json::stream_serializer& j,
+ optional<action> a,
+ const target& t,
+ const scope& s,
+ bool rel,
+ target_name_cache& tcache)
+ {
+ // Note: see the buildfile version above for comments.
+
+ // Note that the target name (and display_name) are relative to the
+ // containing scope (if any).
+ //
+#if 0
+ struct prerequisite
+ {
+ string name; // Quoted/qualified name.
+ string type;
+ vector<variable> variables; // Prerequisite variables.
+ };
+
+ struct loaded_target
+ {
+ string name; // Quoted/qualified name.
+ string display_name;
+ string type; // Target type.
+ //string declaration;
+ optional<string> group; // Quoted/qualified group target name.
+
+ vector<variable> variables; // Target variables.
+
+ vector<prerequisite> prerequisites;
+ };
+
+ // @@ TODO: target attributes (rule_hint)
+
+ struct prerequisite_target
+ {
+ string name; // Target name (always absolute).
+ string type;
+ bool adhoc;
+ };
+
+ struct operation_state
+ {
+ string rule; // null if direct recipe match
+
+ optional<string> state; // unchanged|changed|group
+
+ vector<variable> variables; // Rule variables.
+
+ vector<prerequisite_target> prerequisite_targets;
+ };
+
+ struct matched_target
+ {
+ string name;
+ string display_name;
+ string type;
+ //string declaration;
+ optional<string> group;
+
+ optional<path> path; // Absent if not path-based target, not assigned.
+
+ vector<variable> variables;
+
+ optional<operation_state> outer_operation; // null if not matched.
+ operation_state inner_operation; // null if not matched.
+ };
+#endif
+
+ j.begin_object ();
+
+ j.member_name ("name");
+ dump_quoted_target_name (j, t, rel /* relative */);
+
+ j.member_name ("display_name");
+ dump_display_target_name (j, t, rel /* relative */);
+
+ j.member ("type", t.type ().name);
+
+ // @@ This value currently doesn't make much sense:
+ //
+ // - why are all the system headers prereq-new?
+ //
+ // - why is synthesized obje{} prereq-new?
+ //
+#if 0
+ {
+ const char* v (nullptr);
+ switch (t.decl)
+ {
+ case target_decl::prereq_new: v = "prerequisite-new"; break;
+ case target_decl::prereq_file: v = "prerequisite-file"; break;
+ case target_decl::implied: v = "implied"; break;
+ case target_decl::real: v = "real"; break;
+ }
+ j.member ("declaration", v);
+ }
+#endif
+
+ if (t.group != nullptr)
+ {
+ j.member_name ("group");
+ dump_quoted_target_name (j, *t.group, tcache);
+ }
+
+ if (a)
+ {
+ const string* v (nullptr);
+
+ if (t.is_a<dir> () || t.is_a<fsdir> ())
+ {
+ v = &t.dir.string ();
+ }
+ else if (const auto* pt = t.is_a<path_target> ())
+ {
+ const path& p (pt->path ());
+
+ if (!p.empty ())
+ v = &p.string ();
+ }
+
+ if (v != nullptr)
+ j.member ("path", *v);
+ }
+
+ // Target variables.
+ //
+ if (!t.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, t.vars, s, variable_kind::target);
+ j.end_array ();
+ }
+
+ // Prerequisites.
+ //
+ if (!a)
+ {
+ const prerequisites& ps (t.prerequisites ());
+
+ if (!ps.empty ())
+ {
+ j.member_begin_array ("prerequisites");
+
+ for (const prerequisite& p: ps)
+ {
+ j.begin_object ();
+
+ {
+ // Cobble together an equivalent of dump_quoted_target_name().
+ //
+ prerequisite_key pk (p.key ());
+ target_key& tk (pk.tk);
+
+ // It's possible that the containing scope differs from
+ // prerequisite's. This, for example, happens when we copy the
+ // prerequisite for a synthesized obj{} dependency that happens to
+ // be in a subdirectory, as in exe{foo}:src/cxx{foo}. In this
+ // case, we need to rebase relative paths to the containing scope.
+ //
+ dir_path d, o;
+ if (p.scope != s)
+ {
+ if (tk.out->empty ())
+ {
+ if (tk.dir->relative ())
+ {
+ d = (p.scope.out_path () / *tk.dir).relative (s.out_path ());
+ tk.dir = &d;
+ }
+ }
+ else
+ {
+ if (tk.dir->relative ())
+ {
+ d = (p.scope.src_path () / *tk.dir).relative (s.src_path ());
+ tk.dir = &d;
+ }
+
+ if (tk.out->relative ())
+ {
+ o = (p.scope.out_path () / *tk.out).relative (s.out_path ());
+ if (o.empty ())
+ o = dir_path (".");
+ tk.out = &o;
+ }
+ }
+ }
+
+ // If prerequisite paths are absolute, keep them absolute.
+ //
+ ostringstream os;
+ stream_verb (os, stream_verbosity (1, 0));
+
+ if (pk.proj)
+ os << *pk.proj << '%';
+
+ to_stream (os, pk.tk.as_name (), quote_mode::effective, '@');
+
+ j.member ("name", os.str ());
+ }
+
+ j.member ("type", p.type.name);
+
+ if (!p.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, p.vars, s, variable_kind::prerequisite);
+ j.end_array ();
+ }
+
+ j.end_object ();
+ }
+
+ j.end_array ();
+ }
+ }
+ else
+ {
+ // Matched rules and their state (prerequisite_targets, vars, etc).
+ //
+ auto dump_opstate = [&tcache, &j, &s, &t] (action a)
+ {
+ const target::opstate& o (t[a]);
+
+ j.begin_object ();
+
+ j.member ("rule", o.rule != nullptr ? o.rule->first.c_str () : nullptr);
+
+ // It feels natural to omit the unknown state, as if it corresponded
+ // to absent in optional<target_state>.
+ //
+ if (o.state != target_state::unknown)
+ {
+ assert (o.state == target_state::unchanged ||
+ o.state == target_state::changed ||
+ o.state == target_state::group);
+
+ j.member ("state", to_string (o.state));
+ }
+
+ if (!o.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, o.vars, s, variable_kind::rule);
+ j.end_array ();
+ }
+
+ {
+ bool first (true);
+ for (const prerequisite_target& pt: t.prerequisite_targets[a])
+ {
+ if (pt.target == nullptr)
+ continue;
+
+ if (first)
+ {
+ j.member_begin_array ("prerequisite_targets");
+ first = false;
+ }
+
+ j.begin_object ();
+
+ j.member_name ("name");
+ dump_quoted_target_name (j, *pt.target, tcache);
+
+ j.member ("type", pt.target->type ().name);
+
+ if (pt.adhoc ())
+ j.member ("adhoc", true);
+
+ j.end_object ();
+ }
+
+ if (!first)
+ j.end_array ();
+ }
+
+ j.end_object ();
+ };
+
+ if (a->outer ())
+ {
+ j.member_name ("outer_operation");
+ if (matched (t, *a))
+ dump_opstate (*a);
+ else
+ j.value (nullptr);
+ }
+
+ {
+ action ia (a->inner_action ());
+
+ j.member_name ("inner_operation");
+ if (matched (t, ia))
+ dump_opstate (ia);
+ else
+ j.value (nullptr);
+ }
+ }
+
+ j.end_object ();
+ }
+#endif
+
static void
- dump_scope (optional<action> a,
- ostream& os,
+ dump_scope (ostream& os,
string& ind,
+ optional<action> a,
scope_map::const_iterator& i,
bool rel)
{
@@ -545,21 +1303,25 @@ namespace build2
// disabled amalgamation will be printed directly inside the global
// scope).
//
- for (auto e (p.ctx.scopes.end ());
- (i != e &&
- i->second.front () != nullptr &&
- i->second.front ()->parent_scope () == &p); )
+ for (auto e (p.ctx.scopes.end ()); i != e; )
{
- if (vb || rb || sb)
+ if (i->second.front () == nullptr)
+ ++i; // Skip over src paths.
+ else if (i->second.front ()->parent_scope () != &p)
+ break; // Moved past our parent.
+ else
{
- os << endl;
- vb = rb = false;
- }
+ if (vb || rb || sb)
+ {
+ os << endl;
+ vb = rb = false;
+ }
- os << endl; // Extra newline between scope blocks.
+ os << endl; // Extra newline between scope blocks.
- dump_scope (a, os, ind, i, true /* relative */);
- sb = true;
+ dump_scope (os, ind, a, i, true /* relative */);
+ sb = true;
+ }
}
// Targets.
@@ -581,7 +1343,7 @@ namespace build2
}
os << endl; // Extra newline between targets.
- dump_target (a, os, ind, t, p, true /* relative */);
+ dump_target (os, ind, a, t, p, true /* relative */);
tb = true;
}
@@ -592,45 +1354,245 @@ namespace build2
<< ind << '}';
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_scope (json::stream_serializer& j,
+ optional<action> a,
+ scope_map::const_iterator& i,
+ bool rel,
+ target_name_cache& tcache)
+ {
+ // Note: see the buildfile version above for additional comments.
+
+ const scope& p (*i->second.front ());
+ const dir_path& d (i->first);
+ ++i;
+
+#if 0
+ struct scope
+ {
+ // The out_path member is relative to the parent scope. It is empty for
+ // the special global scope. The src_path member is absent if the same
+ // as out_path (in-source build or scope outside of project).
+ //
+ string out_path;
+ optional<string> src_path;
+
+ vector<variable> variables; // Non-type/pattern scope variables.
+
+ vector<scope> scopes; // Immediate children.
+
+ vector<loaded_target|matched_target> targets;
+ };
+#endif
+
+ j.begin_object ();
+
+ if (d.empty ())
+ j.member ("out_path", ""); // Global scope.
+ else
+ {
+ const dir_path& rd (rel ? relative (d) : d);
+ j.member ("out_path", rd.empty () ? string (".") : rd.string ());
+
+ if (!p.out_eq_src ())
+ j.member ("src_path", p.src_path ().string ());
+ }
+
+ const dir_path* orb (relative_base);
+ relative_base = &d;
+
+ // Scope variables.
+ //
+ if (!p.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, p.vars, p, variable_kind::scope);
+ j.end_array ();
+ }
+
+ // Nested scopes of which we are an immediate parent.
+ //
+ {
+ bool first (true);
+ for (auto e (p.ctx.scopes.end ()); i != e; )
+ {
+ if (i->second.front () == nullptr)
+ ++i;
+ else if (i->second.front ()->parent_scope () != &p)
+ break;
+ else
+ {
+ if (first)
+ {
+ j.member_begin_array ("scopes");
+ first = false;
+ }
+
+ dump_scope (j, a, i, true /* relative */, tcache);
+ }
+ }
+
+ if (!first)
+ j.end_array ();
+ }
+
+ // Targets.
+ //
+ {
+ bool first (true);
+ for (const auto& pt: p.ctx.targets)
+ {
+ const target& t (*pt);
+
+ if (&p != &t.base_scope ()) // @@ PERF
+ continue;
+
+ // Skip targets that haven't been matched for this action.
+ //
+ if (a)
+ {
+ if (!(matched (t, a->inner_action ()) ||
+ (a->outer () && matched (t, *a))))
+ continue;
+ }
+
+ if (first)
+ {
+ j.member_begin_array ("targets");
+ first = false;
+ }
+
+ dump_target (j, a, t, p, true /* relative */, tcache);
+ }
+
+ if (!first)
+ j.end_array ();
+ }
+
+ relative_base = orb;
+ j.end_object ();
+ }
+#endif
+
void
- dump (const context& c, optional<action> a)
+ dump (const context& c, optional<action> a, dump_format fmt)
{
auto i (c.scopes.begin ());
assert (i->second.front () == &c.global_scope);
- // We don't lock diag_stream here as dump() is supposed to be called from
- // the main thread prior/after to any other threads being spawned.
- //
- string ind;
- ostream& os (*diag_stream);
- dump_scope (a, os, ind, i, false /* relative */);
- os << endl;
+ switch (fmt)
+ {
+ case dump_format::buildfile:
+ {
+ // We don't lock diag_stream here as dump() is supposed to be called
+ // from the main thread prior/after to any other threads being
+ // spawned.
+ //
+ string ind;
+ ostream& os (*diag_stream);
+ dump_scope (os, ind, a, i, false /* relative */);
+ os << endl;
+ break;
+ }
+ case dump_format::json:
+ {
+#ifndef BUILD2_BOOTSTRAP
+ target_name_cache tc;
+ json::stream_serializer j (cout, 0 /* indent */);
+ dump_scope (j, a, i, false /* relative */, tc);
+ cout << endl;
+#else
+ assert (false);
+#endif
+ break;
+ }
+ }
}
void
- dump (const scope& s, const char* cind)
+ dump (const scope* s, optional<action> a, dump_format fmt, const char* cind)
{
- const scope_map& m (s.ctx.scopes);
- auto i (m.find_exact (s.out_path ()));
- assert (i != m.end () && i->second.front () == &s);
+ scope_map::const_iterator i;
+ if (s != nullptr)
+ {
+ const scope_map& m (s->ctx.scopes);
+ i = m.find_exact (s->out_path ());
+ assert (i != m.end () && i->second.front () == s);
+ }
- string ind (cind);
- ostream& os (*diag_stream);
- dump_scope (nullopt /* action */, os, ind, i, false /* relative */);
- os << endl;
+ switch (fmt)
+ {
+ case dump_format::buildfile:
+ {
+ string ind (cind);
+ ostream& os (*diag_stream);
+
+ if (s != nullptr)
+ dump_scope (os, ind, a, i, false /* relative */);
+ else
+ os << ind << "<no known scope to dump>";
+
+ os << endl;
+ break;
+ }
+ case dump_format::json:
+ {
+#ifndef BUILD2_BOOTSTRAP
+ target_name_cache tc;
+ json::stream_serializer j (cout, 0 /* indent */);
+
+ if (s != nullptr)
+ dump_scope (j, a, i, false /* relative */, tc);
+ else
+ j.value (nullptr);
+
+ cout << endl;
+#else
+ assert (false);
+#endif
+ break;
+ }
+ }
}
void
- dump (const target& t, const char* cind)
+ dump (const target* t, optional<action> a, dump_format fmt, const char* cind)
{
- string ind (cind);
- ostream& os (*diag_stream);
- dump_target (nullopt /* action */,
- os,
- ind,
- t,
- t.base_scope (),
- false /* relative */);
- os << endl;
+ const scope* bs (t != nullptr ? &t->base_scope () : nullptr);
+
+ switch (fmt)
+ {
+ case dump_format::buildfile:
+ {
+ string ind (cind);
+ ostream& os (*diag_stream);
+
+ if (t != nullptr)
+ dump_target (os, ind, a, *t, *bs, false /* relative */);
+ else
+ os << ind << "<no known target to dump>";
+
+ os << endl;
+ break;
+ }
+ case dump_format::json:
+ {
+#ifndef BUILD2_BOOTSTRAP
+ target_name_cache tc;
+ json::stream_serializer j (cout, 0 /* indent */);
+
+ if (t != nullptr)
+ dump_target (j, a, *t, *bs, false /* relative */, tc);
+ else
+ j.value (nullptr);
+
+ cout << endl;
+#else
+ assert (false);
+#endif
+ break;
+ }
+ }
}
}
diff --git a/libbuild2/dump.hxx b/libbuild2/dump.hxx
index 6ec6944..1a1a080 100644
--- a/libbuild2/dump.hxx
+++ b/libbuild2/dump.hxx
@@ -4,6 +4,10 @@
#ifndef LIBBUILD2_DUMP_HXX
#define LIBBUILD2_DUMP_HXX
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/serializer.hxx>
+#endif
+
#include <libbuild2/types.hxx>
#include <libbuild2/forward.hxx>
#include <libbuild2/utility.hxx>
@@ -14,18 +18,40 @@
namespace build2
{
+ enum class dump_format {buildfile, json};
+
// Dump the build state to diag_stream. If action is specified, then assume
// rules have been matched for this action and dump action-specific
// information (like rule-specific variables).
//
+ // If scope or target is NULL, then assume not found and write a format-
+ // appropriate indication.
+ //
+ LIBBUILD2_SYMEXPORT void
+ dump (const context&, optional<action>, dump_format);
+
LIBBUILD2_SYMEXPORT void
- dump (const context&, optional<action> = nullopt);
+ dump (const scope*, optional<action>, dump_format, const char* ind = "");
LIBBUILD2_SYMEXPORT void
- dump (const scope&, const char* ind = "");
+ dump (const target*, optional<action>, dump_format, const char* ind = "");
+#ifndef BUILD2_BOOTSTRAP
+ // Dump (effectively) quoted target name, optionally relative (to the out
+ // tree).
+ //
+ LIBBUILD2_SYMEXPORT void
+ dump_quoted_target_name (butl::json::stream_serializer&,
+ const target&,
+ bool relative = false);
+
+ // Dump display target name, optionally relative (to the out tree).
+ //
LIBBUILD2_SYMEXPORT void
- dump (const target&, const char* ind = "");
+ dump_display_target_name (butl::json::stream_serializer&,
+ const target&,
+ bool relative = false);
+#endif
}
#endif // LIBBUILD2_DUMP_HXX
diff --git a/libbuild2/dyndep.cxx b/libbuild2/dyndep.cxx
new file mode 100644
index 0000000..c0360f0
--- /dev/null
+++ b/libbuild2/dyndep.cxx
@@ -0,0 +1,1104 @@
+// file : libbuild2/dyndep.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/dyndep.hxx>
+
+#include <libbuild2/scope.hxx>
+#include <libbuild2/target.hxx>
+#include <libbuild2/search.hxx>
+#include <libbuild2/context.hxx>
+#include <libbuild2/algorithm.hxx>
+#include <libbuild2/filesystem.hxx>
+#include <libbuild2/diagnostics.hxx>
+
+using namespace std;
+using namespace butl;
+
+namespace build2
+{
+ bool dyndep_rule::
+ update (tracer& trace, action a, const target& t, timestamp ts)
+ {
+ return update_during_match (trace, a, t, ts);
+ }
+
+ optional<bool> dyndep_rule::
+ inject_file (tracer& trace, const char* what,
+ action a, target& t,
+ const file& pt,
+ timestamp mt,
+ bool f,
+ bool adhoc,
+ uintptr_t data)
+ {
+ // Even if failing we still use try_match_sync() in order to issue
+ // consistent (with other places) diagnostics (rather than the generic
+ // "not rule to update ...").
+ //
+ if (!try_match_sync (a, pt).first)
+ {
+ if (!f)
+ return nullopt;
+
+ diag_record dr;
+ dr << fail << what << ' ' << pt << " not found and no rule to "
+ << "generate it";
+
+ if (verb < 4)
+ dr << info << "re-run with --verbose=4 for more information";
+ }
+
+ bool r (update (trace, a, pt, mt));
+
+ // Add to our prerequisite target list.
+ //
+ t.prerequisite_targets[a].emplace_back (&pt, adhoc, data);
+
+ return r;
+ }
+
+ // Check if the specified prerequisite is updated during match by any other
+ // prerequisites of the specified target, recursively.
+ //
+ static bool
+ updated_during_match (action a, const target& t, size_t pts_n,
+ const target& pt)
+ {
+ const auto& pts (t.prerequisite_targets[a]);
+
+ for (size_t i (0); i != pts_n; ++i)
+ {
+ const prerequisite_target& p (pts[i]);
+
+ // @@ This currently doesn't cover adhoc targets if matched with
+ // buildscript (it stores them in p.data). Probably need to redo
+ // things there (see adhoc_buildscript_rule::apply()).
+ //
+ if (p.target != nullptr)
+ {
+ if (p.target == &pt &&
+ (p.include & prerequisite_target::include_udm) != 0)
+ return true;
+
+ if (size_t n = p.target->prerequisite_targets[a].size ())
+ {
+ if (updated_during_match (a, *p.target, n, pt))
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ optional<bool> dyndep_rule::
+ inject_existing_file (tracer& trace, const char* what,
+ action a, target& t, size_t pts_n,
+ const file& pt,
+ timestamp mt,
+ bool f,
+ bool adhoc,
+ uintptr_t data)
+ {
+ if (!try_match_sync (a, pt).first)
+ {
+ if (!f)
+ return nullopt;
+
+ diag_record dr;
+ dr << fail << what << ' ' << pt << " not found and no rule to "
+ << "generate it";
+
+ if (verb < 4)
+ dr << info << "re-run with --verbose=4 for more information";
+ }
+
+ recipe_function* const* rf (pt[a].recipe.target<recipe_function*> ());
+ if (rf == nullptr || *rf != &noop_action)
+ {
+ if (pts_n == 0 || !updated_during_match (a, t, pts_n, pt))
+ {
+ fail << what << ' ' << pt << " has non-noop recipe" <<
+ info << "consider listing it as static prerequisite of " << t;
+ }
+ }
+
+ bool r (update (trace, a, pt, mt));
+
+ // Add to our prerequisite target list.
+ //
+ t.prerequisite_targets[a].emplace_back (&pt, adhoc, data);
+
+ return r;
+ }
+
+ void dyndep_rule::
+ verify_existing_file (tracer&, const char* what,
+ action a, const target& t, size_t pts_n,
+ const file& pt)
+ {
+ diag_record dr;
+
+ if (pt.matched (a, memory_order_acquire))
+ {
+ recipe_function* const* rf (pt[a].recipe.target<recipe_function*> ());
+ if (rf == nullptr || *rf != &noop_action)
+ {
+ if (pts_n == 0 || !updated_during_match (a, t, pts_n, pt))
+ {
+ dr << fail << what << ' ' << pt << " has non-noop recipe";
+ }
+ }
+ }
+ else if (pt.decl == target_decl::real)
+ {
+ // Note that this target could not possibly be updated during match
+ // since it's not matched.
+ //
+ dr << fail << what << ' ' << pt << " is explicitly declared as "
+ << "target and may have non-noop recipe";
+ }
+
+ if (!dr.empty ())
+ dr << info << "consider listing it as static prerequisite of " << t;
+ }
+
+ small_vector<const target_type*, 2> dyndep_rule::
+ map_extension (const scope& bs,
+ const string& n, const string& e,
+ const target_type* const* tts)
+ {
+ // We will just have to try all of the possible ones, in the "most
+ // likely to match" order.
+ //
+ auto test = [&bs, &n, &e] (const target_type& tt) -> bool
+ {
+ if (tt.default_extension != nullptr)
+ {
+ // Call the extension derivation function. Here we know that it will
+ // only use the target type and name from the target key so we can
+ // pass bogus values for the rest.
+ //
+ target_key tk {&tt, nullptr, nullptr, &n, nullopt};
+
+ // This is like prerequisite search.
+ //
+ optional<string> de (tt.default_extension (tk, bs, nullptr, true));
+
+ return de && *de == e;
+ }
+
+ return false;
+ };
+
+ small_vector<const target_type*, 2> r;
+
+ if (tts != nullptr)
+ {
+ // @@ What if these types are not known by this project? Maybe this
+ // should just be unified with the below loop? Need to make sure
+ // we don't rely on the order in which they are returned.
+ //
+ for (const target_type* const* p (tts); *p != nullptr; ++p)
+ if (test (**p))
+ r.push_back (*p);
+ }
+
+ // Next try target types derived from any of the base types (or file if
+ // there are no base types).
+ //
+ const target_type_map& ttm (bs.root_scope ()->root_extra->target_types);
+
+ for (auto i (ttm.type_begin ()), e (ttm.type_end ()); i != e; ++i)
+ {
+ const target_type& dt (i->second);
+
+ if (tts != nullptr)
+ {
+ for (const target_type* const* p (tts); *p != nullptr; ++p)
+ {
+ const target_type& bt (**p);
+
+ if (dt.is_a (bt))
+ {
+ if (dt != bt && test (dt))
+ r.push_back (&dt);
+
+ break;
+ }
+ }
+ }
+ else
+ {
+ // Anything file-derived but not the file itself.
+ //
+ if (dt.is_a<file> () && dt != file::static_type && test (dt))
+ r.push_back (&dt);
+ }
+ }
+
+ return r;
+ }
+
+ void dyndep_rule::
+ append_prefix (tracer& trace, prefix_map& m, const target& t, dir_path d)
+ {
+ // If the target directory is a sub-directory of the include directory,
+ // then the prefix is the difference between the two. Otherwise, leave it
+ // empty.
+ //
+ // The idea here is to make this "canonical" setup work auto-magically
+ // (using C/C++ #include's as an example):
+ //
+ // 1. We include all headers with a prefix, e.g., <foo/bar>.
+ //
+ // 2. The library target is in the foo/ sub-directory, e.g., /tmp/foo/.
+ //
+ // 3. The poptions variable contains -I/tmp.
+ //
+ dir_path p (t.dir.sub (d) ? t.dir.leaf (d) : dir_path ());
+
+ // We use the target's directory as out_base but that doesn't work well
+ // for targets that are stashed in subdirectories. So as a heuristics we
+ // are going to also enter the outer directories of the original prefix.
+ // It is, however, possible, that another directory after this one will
+ // produce one of these outer prefixes as its original prefix in which
+ // case we should override it.
+ //
+ // So we are going to assign the original prefix priority value 0
+ // (highest) and then increment it for each outer prefix.
+ //
+ auto enter = [&trace, &m] (dir_path p, dir_path d, size_t prio)
+ {
+ auto j (m.lower_bound (p)), e (m.end ());
+
+ if (j != e && j->first != p)
+ j = e;
+
+ if (j == m.end ())
+ {
+ if (verb >= 4)
+ trace << "new mapping for prefix '" << p << "'\n"
+ << " new mapping to " << d << " priority " << prio;
+
+ m.emplace (move (p), prefix_value {move (d), prio});
+ }
+ else if (p.empty ())
+ {
+ // For prefixless we keep all the entries since for them we have an
+ // extra check (target must be explicitly spelled out in a buildfile).
+ //
+ if (verb >= 4)
+ trace << "additional mapping for prefix '" << p << "'\n"
+ << " new mapping to " << d << " priority " << prio;
+
+ // Find the position where to insert according to the priority.
+ // For equal priorities we use the insertion order.
+ //
+ do
+ {
+ if (j->second.priority > prio)
+ break;
+ }
+ while (++j != e && j->first == p);
+
+ m.emplace_hint (j, move (p), prefix_value {move (d), prio});
+ }
+ else
+ {
+ prefix_value& v (j->second);
+
+ // We used to reject duplicates but it seems this can be reasonably
+ // expected to work according to the order of, say, -I options.
+ //
+ // Seeing that we normally have more "specific" -I paths first, (so
+ // that we don't pick up installed headers, etc), we ignore it.
+ //
+ if (v.directory == d)
+ {
+ if (v.priority > prio)
+ v.priority = prio;
+ }
+ else if (v.priority <= prio)
+ {
+ if (verb >= 4)
+ trace << "ignoring mapping for prefix '" << p << "'\n"
+ << " existing mapping to " << v.directory
+ << " priority " << v.priority << '\n'
+ << " another mapping to " << d << " priority " << prio;
+ }
+ else
+ {
+ if (verb >= 4)
+ trace << "overriding mapping for prefix '" << p << "'\n"
+ << " existing mapping to " << v.directory
+ << " priority " << v.priority << '\n'
+ << " new mapping to " << d << " priority " << prio;
+
+ v.directory = move (d);
+ v.priority = prio;
+ }
+ }
+ };
+
+ // Enter all outer prefixes, including prefixless.
+ //
+ // The prefixless part is fuzzy but seems to be doing the right thing
+ // ignoring/overriding-wise, at least in cases where one of the competing
+ // include search paths is a subdirectory of another.
+ //
+ for (size_t prio (0);; ++prio)
+ {
+ bool e (p.empty ());
+ enter ((e ? move (p) : p), (e ? move (d) : d), prio);
+ if (e)
+ break;
+ p = p.directory ();
+ }
+ }
+
+ bool dyndep_rule::srcout_builder::
+ next (dir_path&& d)
+ {
+ // Ignore any paths containing '.', '..' components. Allow any directory
+ // separators though (think -I$src_root/foo on Windows).
+ //
+ if (d.absolute () && d.normalized (false))
+ {
+ // If we have a candidate out_base, see if this is its src_base.
+ //
+ if (prev_ != nullptr)
+ {
+ const dir_path& bp (prev_->src_path ());
+
+ if (d.sub (bp))
+ {
+ if (diff_.empty () || d.leaf (bp) == diff_)
+ {
+ // We've got a pair.
+ //
+ map_.emplace (move (d), prev_->out_path () / diff_);
+ prev_ = nullptr; // Taken.
+ return true;
+ }
+ }
+
+ // Not a pair. Fall through to consider as out_base.
+ //
+ prev_ = nullptr;
+ }
+
+ // See if this path is inside a project with an out of source build and is
+ // in the out directory tree.
+ //
+ const scope& bs (ctx_.scopes.find_out (d));
+ if (bs.root_scope () != nullptr)
+ {
+ if (!bs.out_eq_src ())
+ {
+ const dir_path& bp (bs.out_path ());
+
+ bool e;
+ if ((e = (d == bp)) || d.sub (bp))
+ {
+ prev_ = &bs;
+ if (e)
+ diff_.clear ();
+ else
+ diff_ = d.leaf (bp);
+ }
+ }
+ }
+ }
+ else
+ prev_ = nullptr;
+
+ return false;
+ }
+
+ static pair<const file*, bool>
+ enter_file_impl (
+ tracer& trace, const char* what,
+ action a, const scope& bs, const target& t,
+ path& fp, bool cache, bool norm,
+ bool insert,
+ bool dynamic,
+ const function<dyndep_rule::map_extension_func>& map_extension,
+ const target_type& fallback,
+ const function<dyndep_rule::prefix_map_func>& get_pfx_map,
+ const dyndep_rule::srcout_map& so_map)
+ {
+ // NOTE: see enter_header() caching logic if changing anyting here with
+ // regards to the target and base scope usage.
+
+ assert (!insert || t.ctx.phase == run_phase::match);
+
+ // Find or maybe insert the target.
+ //
+ // If insert is false, then don't consider dynamically-created targets
+ // (i.e., those that are not real or implied) unless dynamic is true, in
+ // which case return the target that would have been inserted.
+ //
+ // The directory is only moved from if insert is true. Note that it must
+ // be normalized.
+ //
+ auto find = [&trace, what, &bs, &t,
+ &map_extension,
+ &fallback] (dir_path&& d,
+ path&& f,
+ bool insert,
+ bool dynamic = false) -> const file*
+ {
+ context& ctx (t.ctx);
+
+ // Split the file into its name part and extension. Here we can assume
+ // the name part is a valid filesystem name.
+ //
+ // Note that if the file has no extension, we record an empty extension
+ // rather than NULL (which would signify that the default extension
+ // should be added).
+ //
+ string e (f.extension ());
+ string n (move (f).string ());
+
+ if (!e.empty ())
+ n.resize (n.size () - e.size () - 1); // One for the dot.
+
+ // See if this directory is part of any project and if so determine
+ // the target type.
+ //
+ // While at it also determine if this target is from the src or out
+ // tree of said project.
+ //
+ dir_path out;
+
+ // It's possible the extension-to-target type mapping is ambiguous (for
+ // example, because both C and C++-language headers use the same .h
+ // extension). In this case we will first try to find one that matches
+ // an explicit target (similar logic to when insert is false).
+ //
+ small_vector<const target_type*, 2> tts;
+
+ // Note that the path can be in out or src directory and the latter
+ // can be associated with multiple scopes. So strictly speaking we
+ // need to pick one that is "associated" with us. But that is still a
+ // TODO (see scope_map::find() for details) and so for now we just
+ // pick the first one (it's highly unlikely the source file extension
+ // mapping will differ based on the configuration).
+ //
+ // Note that we also need to remember the base scope for search() below
+ // (failed that, search_existing_file() will refuse to look).
+ //
+ const scope* s (nullptr);
+ {
+ // While we cannot accurately associate in the general case, we can do
+ // so if the path belongs to this project.
+ //
+ const scope& rs (*bs.root_scope ());
+ bool src (false);
+ if (d.sub (rs.out_path ()) ||
+ (src = (!rs.out_eq_src () && d.sub (rs.src_path ()))))
+ {
+ if (map_extension != nullptr)
+ tts = map_extension (bs, n, e);
+
+ if (src)
+ out = out_src (d, rs);
+
+ s = &bs;
+ }
+ else
+ {
+ const scope& bs (**ctx.scopes.find (d).first);
+ if (const scope* rs = bs.root_scope ())
+ {
+ if (map_extension != nullptr)
+ tts = map_extension (bs, n, e);
+
+ if (!rs->out_eq_src () && d.sub (rs->src_path ()))
+ out = out_src (d, *rs);
+
+ s = &bs;
+ }
+ }
+ }
+
+ // If it is outside any project, or the project doesn't have such an
+ // extension, use the fallback target type.
+ //
+ if (tts.empty ())
+ {
+ // If the project doesn't "know" this extension then we can't possibly
+ // find a real or implied target of this type.
+ //
+ if (!insert && !dynamic)
+ {
+ l6 ([&]{trace << "unknown " << what << ' ' << n << " extension '"
+ << e << "'";});
+ return nullptr;
+ }
+
+ tts.push_back (&fallback);
+ }
+
+ // Find or insert target.
+ //
+ // Note that in case of the target type ambiguity we first try to find
+ // an explicit target that resolves this ambiguity.
+ //
+ const target* r (nullptr);
+
+ if (!insert || tts.size () > 1)
+ {
+ // Note that we skip any target type-specific searches (like for an
+ // existing file) and go straight for the target object since we need
+ // to find the target explicitly spelled out.
+ //
+ // Also, it doesn't feel like we should be able to resolve an absolute
+ // path with a spelled-out extension to multiple targets.
+ //
+ const target* f (nullptr);
+
+ for (size_t i (0), m (tts.size ()); i != m; ++i)
+ {
+ const target_type& tt (*tts[i]);
+
+ if (const target* x = ctx.targets.find (tt, d, out, n, e, trace))
+ {
+ // What would be the harm in reusing a dynamically-inserted target
+ // if there is no buildfile-mentioned one? Probably none (since it
+ // can't be updated) except that it will be racy: sometimes we
+ // will reuse the dynamic, sometimes we will insert a new one. And
+ // we don't like racy.
+ //
+ // Note that we can't only check for real targets and must include
+ // implied ones because pre-entered members of a target group
+ // (e.g., cli.cxx) are implied.
+ //
+ if (operator>= (x->decl, target_decl::implied)) // @@ VC14
+ {
+ r = x;
+ break;
+ }
+ else
+ {
+ // Cache the dynamic target corresponding to tts[0] since that's
+ // what we will be inserting (see below).
+ //
+ if ((insert || dynamic) && i == 0)
+ f = x;
+
+ l6 ([&]{trace << "dynamic target with target type " << tt.name;});
+ }
+ }
+ else
+ l6 ([&]{trace << "no target with target type " << tt.name;});
+ }
+
+ // Note: we can't do this because of the in source builds where there
+ // won't be explicit targets for non-generated files.
+ //
+ // This should be harmless, however, since in our world generated file
+ // are spelled-out as explicit targets. And if not, we will still get
+ // an error, just a bit less specific.
+ //
+#if 0
+ if (r == nullptr && insert)
+ {
+ f = d / n;
+ if (!e.empty ())
+ {
+ f += '.';
+ f += e;
+ }
+
+ diag_record dr (fail);
+ dr << "ambiguous mapping of " << what ' ' << f << " to target type";
+ for (const target_type* tt: tts)
+ dr << info << "could be " << tt->name << "{}";
+ dr << info << "spell-out its target to resolve this ambiguity";
+ }
+#endif
+
+ if (r == nullptr && f != nullptr)
+ r = f;
+ }
+
+ if (r == nullptr && insert)
+ {
+ // Like search(t, pk) but don't fail if the target is in src.
+ //
+ // While it may seem like there is not much difference, the caller may
+ // actually do more than just issue more specific diagnostics. For
+ // example, if may defer the failure to the tool diagnostics.
+ //
+#if 0
+ r = &search (t, *tts[0], d, out, n, &e, s);
+#else
+ prerequisite_key pk {nullopt, {tts[0], &d, &out, &n, move (e)}, s};
+
+ r = pk.tk.type->search (t, pk);
+
+ if (r == nullptr && pk.tk.out->empty ())
+ r = &create_new_target (ctx, pk);
+#endif
+ }
+
+ return static_cast<const file*> (r);
+ };
+
+ // If it's not absolute then it either does not (yet) exist or is a
+ // relative ""-include (see init_args() for details). Reduce the second
+ // case to absolute.
+ //
+ // Note: we now always use absolute path to the translation unit so this
+ // no longer applies. But let's keep it for posterity.
+ //
+ // Also note that we now assume (see cc::compile_rule::enter_header()) a
+ // relative path signifies a generated header.
+ //
+#if 0
+ if (f.relative () && rels.relative ())
+ {
+ // If the relative source path has a directory component, make sure it
+ // matches since ""-include will always start with that (none of the
+ // compilers we support try to normalize this path). Failed that we may
+ // end up searching for a generated header in a random (working)
+ // directory.
+ //
+ const string& fs (f.string ());
+ const string& ss (rels.string ());
+
+ size_t p (path::traits::rfind_separator (ss));
+
+ if (p == string::npos || // No directory.
+ (fs.size () > p + 1 &&
+ path::traits::compare (fs.c_str (), p, ss.c_str (), p) == 0))
+ {
+ path t (work / f); // The rels path is relative to work.
+
+ if (exists (t))
+ f = move (t);
+ }
+ }
+#endif
+
+ const file* pt (nullptr);
+ bool remapped (false);
+
+ // If relative then it does not exist.
+ //
+ if (fp.relative ())
+ {
+ // This is probably as often an error as an auto-generated file, so
+ // trace at level 4.
+ //
+ l4 ([&]{trace << "non-existent " << what << " '" << fp << "'";});
+
+ if (get_pfx_map != nullptr)
+ {
+ fp.normalize ();
+
+ // The relative path might still contain '..' (e.g., ../foo.hxx;
+ // presumably ""-include'ed). We don't attempt to support auto-
+ // generated files with such inclusion styles.
+ //
+ if (fp.normalized ())
+ {
+ const dyndep_rule::prefix_map& pfx_map (get_pfx_map (a, bs, t));
+
+ // First try the whole file. Then just the directory.
+ //
+ // @@ Has to be a separate map since the prefix can be the same as
+ // the file name.
+ //
+ // auto i (pfx_map->find (f));
+
+ // Find the most qualified prefix of which we are a sub-path.
+ //
+ if (!pfx_map.empty ())
+ {
+ dir_path d (fp.directory ());
+ auto p (pfx_map.sup_range (d));
+
+ if (p.first != p.second)
+ {
+ // Note that we can only have multiple entries for the
+ // prefixless mapping.
+ //
+ dir_path pd; // Reuse.
+ for (auto i (p.first); i != p.second; ++i)
+ {
+ // Note: value in pfx_map is not necessarily canonical.
+ //
+ pd = i->second.directory;
+ pd.canonicalize ();
+
+ l4 ([&]{trace << "try prefix '" << d << "' mapped to " << pd;});
+
+ // If this is a prefixless mapping, then only use it if we can
+ // resolve it to an existing target (i.e., it is explicitly
+ // spelled out in a buildfile). @@ Hm, I wonder why, it's not
+ // like we can generate any file without an explicit target.
+ // Maybe for diagnostics (i.e., we will actually try to build
+ // something there instead of just saying no mapping).
+ //
+ if (i->first.empty ())
+ pt = find (pd / d, fp.leaf (), false);
+ else
+ pt = find (pd / d, fp.leaf (), insert, dynamic);
+
+ if (pt != nullptr)
+ {
+ fp = pd / fp;
+ l4 ([&]{trace << "mapped as auto-generated " << fp;});
+ break;
+ }
+ else
+ l4 ([&]{trace << "no explicit target in " << pd;});
+ }
+ }
+ else
+ l4 ([&]{trace << "no prefix map entry for '" << d << "'";});
+ }
+ else
+ l4 ([&]{trace << "prefix map is empty";});
+ }
+ }
+ }
+ else
+ {
+ // Normalize the path unless it is already normalized. This is also
+ // where we handle src-out remap which is not needed if cached.
+ //
+ if (!norm)
+ normalize_external (fp, what);
+
+ if (!cache)
+ {
+ if (!so_map.empty ())
+ {
+ // Find the most qualified prefix of which we are a sub-path.
+ //
+ auto i (so_map.find_sup (fp));
+ if (i != so_map.end ())
+ {
+ // Ok, there is an out tree for this file. Remap to a path from
+ // the out tree and see if there is a target for it. Note that the
+ // value in so_map is not necessarily canonical.
+ //
+ dir_path d (i->second);
+ d /= fp.leaf (i->first).directory ();
+ d.canonicalize ();
+
+ pt = find (move (d), fp.leaf (), false); // d is not moved from.
+
+ if (pt != nullptr)
+ {
+ path p (d / fp.leaf ());
+ l4 ([&]{trace << "remapping " << fp << " to " << p;});
+ fp = move (p);
+ remapped = true;
+ }
+ }
+ }
+ }
+
+ if (pt == nullptr)
+ {
+ l6 ([&]{trace << (insert ? "entering " : "finding ") << fp;});
+ pt = find (fp.directory (), fp.leaf (), insert, dynamic);
+ }
+ }
+
+ return make_pair (pt, remapped);
+ }
+
+ pair<const file*, bool> dyndep_rule::
+ enter_file (tracer& trace, const char* what,
+ action a, const scope& bs, target& t,
+ path& fp, bool cache, bool norm,
+ const function<map_extension_func>& map_ext,
+ const target_type& fallback,
+ const function<prefix_map_func>& pfx_map,
+ const srcout_map& so_map)
+ {
+ return enter_file_impl (trace, what,
+ a, bs, t,
+ fp, cache, norm,
+ true /* insert */, false,
+ map_ext, fallback, pfx_map, so_map);
+ }
+
+ pair<const file*, bool> dyndep_rule::
+ find_file (tracer& trace, const char* what,
+ action a, const scope& bs, const target& t,
+ path& fp, bool cache, bool norm,
+ bool dynamic,
+ const function<map_extension_func>& map_ext,
+ const target_type& fallback,
+ const function<prefix_map_func>& pfx_map,
+ const srcout_map& so_map)
+ {
+ return enter_file_impl (trace, what,
+ a, bs, t,
+ fp, cache, norm,
+ false /* insert */, dynamic,
+ map_ext, fallback, pfx_map, so_map);
+ }
+
+ static pair<const file&, bool>
+ inject_group_member_impl (action a, const scope& bs, mtime_target& g,
+ path f, string n, string e,
+ const target_type& tt,
+ const function<dyndep_rule::group_filter_func>& fl)
+ {
+ // NOTE: see adhoc_rule_regex_pattern::apply_group_members() for a variant
+ // of the same code.
+
+ // Note that we used to directly match such a member with group_recipe.
+ // But that messes up our dependency counts since we don't really know
+ // whether someone will execute such a member.
+ //
+ // So instead we now just link the member up to the group and rely on the
+ // special semantics in match_rule() for groups with the dyn_members flag.
+ //
+ assert ((g.type ().flags & target_type::flag::dyn_members) ==
+ target_type::flag::dyn_members);
+
+ // We expect that nobody else can insert these members (seems reasonable
+ // seeing that their names are dynamically discovered).
+ //
+ auto l (search_new_locked (
+ bs.ctx,
+ tt,
+ f.directory (),
+ dir_path (), // Always in out.
+ move (n),
+ &e,
+ &bs));
+
+ const file& t (l.first.as<file> ()); // Note: non-const only if have lock.
+
+ // We don't need to match the group recipe directy from ad hoc
+ // recipes/rules due to the special semantics for explicit group members
+ // in match_rule(). This is what skip_match is for.
+ if (l.second)
+ {
+ l.first.group = &g;
+ l.second.unlock ();
+ t.path (move (f));
+ return pair<const file&, bool> (t, true);
+ }
+ else
+ {
+ if (fl != nullptr && !fl (g, t))
+ return pair<const file&, bool> (t, false);
+ }
+
+ // Check if we already belong to this group. Note that this not a mere
+ // optimization since we may be in the member->group->member chain and
+ // trying to lock the member the second time would deadlock (this can be
+ // triggered, for example, by dist, which sort of depends on such members
+ // directly... which was not quite correct and is now fixed).
+ //
+ if (t.group == &g) // Note: atomic.
+ t.path (move (f));
+ else
+ {
+ // This shouldn't normally fail since we are the only ones that should
+ // know about this target (otherwise why is it dynamicaly discovered).
+ // However, nothing prevents the user from depending on such a target,
+ // however misguided.
+ //
+ target_lock tl (lock (a, t));
+
+ if (!tl)
+ fail << "group " << g << " member " << t << " is already matched" <<
+ info << "dynamically extracted group members cannot be used as "
+ << "prerequisites directly, only via group";
+
+ if (t.group == nullptr)
+ tl.target->group = &g;
+ else if (t.group != &g)
+ fail << "group " << g << " member " << t
+ << " is already member of group " << *t.group;
+
+ t.path (move (f));
+ }
+
+ return pair<const file&, bool> (t, true);
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_group_member (action a, const scope& bs, mtime_target& g,
+ path f,
+ const target_type& tt,
+ const function<group_filter_func>& filter)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ return inject_group_member_impl (a, bs, g,
+ move (f), move (n).string (), move (e),
+ tt,
+ filter);
+ }
+
+ static const target_type&
+ map_target_type (const char* what,
+ const scope& bs,
+ const path& f, const string& n, const string& e,
+ const function<dyndep_rule::map_extension_func>& map_ext,
+ const target_type& fallback)
+ {
+ // Map extension to the target type, falling back to the fallback type.
+ //
+ small_vector<const target_type*, 2> tts;
+ if (map_ext != nullptr)
+ tts = map_ext (bs, n, e);
+
+ // Not sure what else we can do in this case.
+ //
+ if (tts.size () > 1)
+ {
+ diag_record dr (fail);
+
+ dr << "mapping of " << what << " target path " << f
+ << " to target type is ambiguous";
+
+ for (const target_type* tt: tts)
+ dr << info << "can be " << tt->name << "{}";
+ }
+
+ const target_type& tt (tts.empty () ? fallback : *tts.front ());
+
+ if (!tt.is_a<file> ())
+ {
+ fail << what << " target path " << f << " mapped to non-file-based "
+ << "target type " << tt.name << "{}";
+ }
+
+ return tt;
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_group_member (const char* what,
+ action a, const scope& bs, mtime_target& g,
+ path f,
+ const function<map_extension_func>& map_ext,
+ const target_type& fallback,
+ const function<group_filter_func>& filter)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ // Map extension to the target type, falling back to the fallback type.
+ //
+ const target_type& tt (
+ map_target_type (what, bs, f, n.string (), e, map_ext, fallback));
+
+ return inject_group_member_impl (a, bs, g,
+ move (f), move (n).string (), move (e),
+ tt,
+ filter);
+ }
+
+ pair<const file&, bool>
+ inject_adhoc_group_member_impl (action, const scope& bs, target& t,
+ path f, string n, string e,
+ const target_type& tt)
+ {
+ // Assume nobody else can insert these members (seems reasonable seeing
+ // that their names are dynamically discovered).
+ //
+ auto l (search_new_locked (
+ bs.ctx,
+ tt,
+ f.directory (),
+ dir_path (), // Always in out.
+ move (n),
+ &e,
+ &bs));
+
+ file* ft (&l.first.as<file> ()); // Note: non-const only if locked.
+
+ // Skip if this is one of the static targets (or a duplicate of the
+ // dynamic target).
+ //
+ // In particular, we expect to skip all the targets that we could not lock
+ // (e.g., in case all of this has already been done for the previous
+ // operation in a batch; make sure to test `update update update` and
+ // `update clean update ...` batches if changing anything here).
+ //
+ // While at it also find the ad hoc members list tail.
+ //
+ const_ptr<target>* tail (&t.adhoc_member);
+ for (target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (ft == m)
+ {
+ tail = nullptr;
+ break;
+ }
+
+ tail = &m->adhoc_member;
+ }
+
+ if (tail == nullptr)
+ return pair<const file&, bool> (*ft, false);
+
+ if (!l.second)
+ fail << "dynamic target " << *ft << " already exists and cannot be "
+ << "made ad hoc member of group " << t;
+
+ ft->group = &t;
+ l.second.unlock ();
+
+ // We need to be able to distinguish static targets from dynamic (see the
+ // static set hashing in adhoc_buildscript_rule::apply() for details).
+ //
+ assert (ft->decl != target_decl::real);
+
+ *tail = ft;
+ ft->path (move (f));
+
+ return pair<const file&, bool> (*ft, true);
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_adhoc_group_member (action a, const scope& bs, target& t,
+ path f,
+ const target_type& tt)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ return inject_adhoc_group_member_impl (
+ a, bs, t, move (f), move (n).string (), move (e), tt);
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_adhoc_group_member (const char* what,
+ action a, const scope& bs, target& t,
+ path f,
+ const function<map_extension_func>& map_ext,
+ const target_type& fallback)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ // Map extension to the target type, falling back to the fallback type.
+ //
+ const target_type& tt (
+ map_target_type (what, bs, f, n.string (), e, map_ext, fallback));
+
+
+ return inject_adhoc_group_member_impl (
+ a, bs, t, move (f), move (n).string (), move (e), tt);
+ }
+}
diff --git a/libbuild2/dyndep.hxx b/libbuild2/dyndep.hxx
new file mode 100644
index 0000000..bbda030
--- /dev/null
+++ b/libbuild2/dyndep.hxx
@@ -0,0 +1,304 @@
+// file : libbuild2/dyndep.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_DYNDEP_HXX
+#define LIBBUILD2_DYNDEP_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/forward.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/action.hxx>
+#include <libbuild2/target.hxx>
+
+#include <libbuild2/export.hxx>
+
+// Additional functionality that is normally only useful for implementing
+// rules with dynamic dependencies (usually prerequisites, but also target
+// group members).
+//
+namespace build2
+{
+ class LIBBUILD2_SYMEXPORT dyndep_rule
+ {
+ public:
+ // Update the target during the match phase. Return true if the target has
+ // changed or, if the passed timestamp is not timestamp_unknown, it is
+ // older than the target.
+ //
+ // Note that such a target must still be updated during the execute phase
+ // in order to keep the dependency counts straight.
+ //
+ static bool
+ update (tracer&, action, const target&, timestamp);
+
+ // Update and add to the list of prerequisite targets a prerequisite file
+ // target.
+ //
+ // Return the indication of whether it has changed or, if the passed
+ // timestamp is not timestamp_unknown, is older than this timestamp. If
+ // the prerequisite target does not exists nor can be generated (no rule),
+ // then issue diagnostics and fail if the fail argument is true and return
+ // nullopt otherwise.
+ //
+ // If adhoc is true, then add it as ad hoc to prerequisite targets. At
+ // first it may seem like such dynamic prerequisites should always be ad
+ // hoc. But on the other hand, taking headers as an example, if the same
+ // header is listed as a static prerequisite, it will most definitely not
+ // going to be ad hoc. So we leave it to the caller to make this decision.
+ // Similarly, the data argument is passed to the prerequisite_target ctor.
+ //
+ static optional<bool>
+ inject_file (tracer&, const char* what,
+ action, target&,
+ const file& prerequiste,
+ timestamp,
+ bool fail,
+ bool adhoc = false,
+ uintptr_t data = 0);
+
+ // As above but verify the file is matched with noop_recipe or was updated
+ // during match and issue diagnostics and fail otherwise (regardless of
+ // the fail flag). Pass 0 for pts_n if don't want the "was updated during
+ // match" part.
+ //
+ // This version (together with verify_existing_file() below) is primarily
+ // useful for handling dynamic dependencies that are produced as a
+ // byproduct of recipe execution (and thus must have all the generated
+ // prerequisites specified statically).
+ //
+ // Note that this function expects all the static prerequisites of the
+ // target to already be matched and their number passed in pts_n.
+ //
+ static optional<bool>
+ inject_existing_file (tracer&, const char* what,
+ action, target&, size_t pts_n,
+ const file& prerequiste,
+ timestamp,
+ bool fail,
+ bool adhoc = false,
+ uintptr_t data = 0);
+
+ // Verify the file is matched with noop_recipe or was updated during match
+ // and issue diagnostics and fail otherwise. If the file is not matched,
+ // then fail if the target is not implied (that is, declared in a
+ // buildfile). Pass 0 for pts_n if don't want the "was updated during
+ // match" part.
+ //
+ // Note: can only be called in the execute phase.
+ //
+ static void
+ verify_existing_file (tracer&, const char* what,
+ action, const target&, size_t pts_n,
+ const file& prerequiste);
+
+ // Reverse-lookup target type(s) from file name/extension.
+ //
+ // If the list of base target types is specified, then only these types
+ // and those derived from them are considered. Otherwise, any file-based
+ // type is considered but not the file type itself.
+ //
+ // It's possible the extension-to-target type mapping is ambiguous (for
+ // example, because both C and C++-language headers use the same .h
+ // extension). So this function can return multiple target types.
+ //
+ static small_vector<const target_type*, 2>
+ map_extension (const scope& base,
+ const string& name, const string& ext,
+ const target_type* const* bases);
+
+ // Mapping of inclusion prefixes (e.g., foo in #include <foo/bar>) for
+ // auto-generated files to inclusion search paths (e.g. -I) where they
+ // will be generated.
+ //
+ // We are using a prefix map of directories (dir_path_map) instead of just
+ // a map in order to also cover sub-paths (e.g., #include <foo/more/bar>
+ // if we continue with the example). Specifically, we need to make sure we
+ // don't treat foobar as a sub-directory of foo.
+ //
+ // The priority is used to decide who should override whom. Lesser values
+ // are considered higher priority. Note that we allow multiple prefixless
+ // mapping (where priority is used to determine the order). For details,
+ // see append_prefix().
+ //
+ // Note that the keys should be normalized.
+ //
+ struct prefix_value
+ {
+ dir_path directory;
+ size_t priority;
+ };
+
+ using prefix_map = dir_path_multimap<prefix_value>;
+
+ // Add the specified absolute and normalized inclusion search path into
+ // the prefix map of the specified target.
+ //
+ static void
+ append_prefix (tracer&, prefix_map&, const target&, dir_path);
+
+ // Mapping of src inclusion search paths to the corresponding out paths
+ // for auto-generated files re-mapping. See cc::extract_headers() for
+ // background.
+ //
+ // Note that we use path_map instead of dir_path_map to allow searching
+ // using path (file path).
+ //
+ using srcout_map = path_map<dir_path>;
+
+ class LIBBUILD2_SYMEXPORT srcout_builder
+ {
+ public:
+ srcout_builder (context& ctx, srcout_map& map): ctx_ (ctx), map_ (map) {}
+
+ // Process next -I path. Return true if an entry was added to the map,
+ // in which case the passed path is moved from.
+ //
+ bool
+ next (dir_path&&);
+
+ // Skip the previously cached first half.
+ //
+ void
+ skip ()
+ {
+ prev_ = nullptr;
+ }
+
+ private:
+ context& ctx_;
+ srcout_map& map_;
+
+ // Previous -I's innermost scope if out_base plus the difference between
+ // the scope path and the -I path (normally empty).
+ //
+ const scope* prev_ = nullptr;
+ dir_path diff_;
+ };
+
+ // Find or insert a prerequisite file path as a target. If the path is
+ // relative, then assume this is a non-existent generated file.
+ //
+ // Depending on the cache flag, the path is assumed to either have come
+ // from the depdb cache or from the compiler run. If normalized is true,
+ // then assume the absolute path is already normalized.
+ //
+ // Return the file target and an indication of whether it was remapped or
+ // NULL if the file does not exist and cannot be generated. The passed by
+ // reference file path is guaranteed to still be valid but might have been
+ // adjusted (e.g., completed, normalized, remapped, etc). If the result is
+ // not NULL, then it is the absolute and normalized path to the actual
+ // file. If the result is NULL, then it can be used in diagnostics to
+ // identify the origial file path.
+ //
+ // The map_extension function is used to reverse-map a file extension to
+ // the target type. The fallback target type is used if it's NULL or
+ // didn't return anything but only in situations where we are sure the
+ // file is or should be there (see the implementation for details).
+ //
+ // The prefix map function is only called if this is a non-existent
+ // generated file (so it can be initialized lazily). If it's NULL, then
+ // generated files will not be supported. The srcout map is only consulted
+ // if cache is false to re-map generated files (so its initialization can
+ // be delayed until the call with cache=false).
+ //
+ using map_extension_func = small_vector<const target_type*, 2> (
+ const scope& base, const string& name, const string& ext);
+
+ using prefix_map_func = const prefix_map& (
+ action, const scope& base, const target&);
+
+ static pair<const file*, bool>
+ enter_file (tracer&, const char* what,
+ action, const scope& base, target&,
+ path& prerequisite, bool cache, bool normalized,
+ const function<map_extension_func>&,
+ const target_type& fallback,
+ const function<prefix_map_func>& = nullptr,
+ const srcout_map& = {});
+
+ // As above but do not insert the target if it doesn't already exist. This
+ // function also returns NULL if the target exists but is dynamic (that
+ // is, not real or implied), unless the dynamic argument is true.
+ //
+ static pair<const file*, bool>
+ find_file (tracer&, const char* what,
+ action, const scope& base, const target&,
+ path& prerequisite, bool cache, bool normalized,
+ bool dynamic,
+ const function<map_extension_func>&,
+ const target_type& fallback,
+ const function<prefix_map_func>& = nullptr,
+ const srcout_map& = {});
+
+ // Find or insert a target file path as a target of the specified type,
+ // make it a member of the specified (non-ad hoc) mtime target group and
+ // set its path. Return the target and an indication of whether it was
+ // made a member (can only be false if a filter is provided; see below).
+ //
+ // The file path must be absolute and normalized. Note that this function
+ // assumes that this member can only be matched via this group. The group
+ // type must have the target_type::flag::dyn_members flag.
+ //
+ // If specified, the group_filter function is called on the target before
+ // making it a group member, skipping it if this function returns false.
+ // Note that the filter is skipped if the target is newly inserted (the
+ // filter is meant to be used to skip duplicates).
+ //
+ using group_filter_func = bool (mtime_target& g, const file&);
+
+ static pair<const file&, bool>
+ inject_group_member (action, const scope& base, mtime_target&,
+ path,
+ const target_type&,
+ const function<group_filter_func>& = nullptr);
+
+ template <typename T>
+ static pair<const T&, bool>
+ inject_group_member (action a, const scope& bs, mtime_target& g,
+ path f,
+ const function<group_filter_func>& filter = nullptr)
+ {
+ auto p (inject_group_member (a, bs, g, move (f), T::static_type, filter));
+ return pair<const T&, bool> (p.first.template as<T> (), p.second);
+ }
+
+ // As above but the target type is determined using the map_extension
+ // function if specified, falling back to the fallback type if unable to
+ // (the what argument is used for diagnostics during this process).
+ //
+ static pair<const file&, bool>
+ inject_group_member (const char* what,
+ action, const scope& base, mtime_target& g,
+ path,
+ const function<map_extension_func>&,
+ const target_type& fallback,
+ const function<group_filter_func>& = nullptr);
+
+
+ // Find or insert a target file path as a target, make it a member of the
+ // specified ad hoc group unless it already is, and set its path. Return
+ // the target and an indication of whether it was added as a member.
+ //
+ // The file path must be absolute and normalized. Note that this function
+ // assumes that this target can only be known as a member of this group.
+ //
+ static pair<const file&, bool>
+ inject_adhoc_group_member (action, const scope& base, target& g,
+ path,
+ const target_type&);
+
+ // As above but the target type is determined using the map_extension
+ // function if specified, falling back to the fallback type if unable to
+ // (the what argument is used for diagnostics during this process).
+ //
+ static pair<const file&, bool>
+ inject_adhoc_group_member (const char* what,
+ action, const scope& base, target& g,
+ path,
+ const function<map_extension_func>&,
+ const target_type& fallback);
+ };
+}
+
+#endif // LIBBUILD2_DYNDEP_HXX
diff --git a/libbuild2/file-cache.cxx b/libbuild2/file-cache.cxx
index 1c1424f..caaf40c 100644
--- a/libbuild2/file-cache.cxx
+++ b/libbuild2/file-cache.cxx
@@ -28,6 +28,8 @@ namespace build2
if (!comp_path_.empty ())
try_rmfile_ignore_error (comp_path_);
+ // Note: state remains uninit until write::close().
+
pin ();
return write (*this);
}
diff --git a/libbuild2/file-cache.hxx b/libbuild2/file-cache.hxx
index d6904ed..98c2b67 100644
--- a/libbuild2/file-cache.hxx
+++ b/libbuild2/file-cache.hxx
@@ -92,7 +92,12 @@ namespace build2
// to the noop implementation.
//
explicit
- file_cache (bool compress = true);
+ file_cache (bool compress);
+
+ file_cache () = default; // Create uninitialized instance.
+
+ void
+ init (bool compress);
class entry;
@@ -114,9 +119,9 @@ namespace build2
// Move-to-NULL-only type.
//
- write (write&&);
+ write (write&&) noexcept;
write (const write&) = delete;
- write& operator= (write&&);
+ write& operator= (write&&) noexcept;
write& operator= (const write&) = delete;
~write ();
@@ -140,9 +145,9 @@ namespace build2
// Move-to-NULL-only type.
//
- read (read&&);
+ read (read&&) noexcept;
read (const read&) = delete;
- read& operator= (read&&);
+ read& operator= (read&&) noexcept;
read& operator= (const read&) = delete;
~read ();
@@ -203,9 +208,9 @@ namespace build2
// Move-to-NULL-only type.
//
- entry (entry&&);
+ entry (entry&&) noexcept;
entry (const entry&) = delete;
- entry& operator= (entry&&);
+ entry& operator= (entry&&) noexcept;
entry& operator= (const entry&) = delete;
~entry ();
diff --git a/libbuild2/file-cache.ixx b/libbuild2/file-cache.ixx
index 8385c90..99be5ad 100644
--- a/libbuild2/file-cache.ixx
+++ b/libbuild2/file-cache.ixx
@@ -65,26 +65,30 @@ namespace build2
}
inline file_cache::entry::
- entry (entry&& e)
+ entry (entry&& e) noexcept
: temporary (e.temporary),
state_ (e.state_),
path_ (move (e.path_)),
comp_path_ (move (e.comp_path_)),
pin_ (e.pin_)
{
+ e.state_ = null;
}
inline file_cache::entry& file_cache::entry::
- operator= (entry&& e)
+ operator= (entry&& e) noexcept
{
if (this != &e)
{
assert (state_ == null);
+
temporary = e.temporary;
state_ = e.state_;
path_ = move (e.path_);
comp_path_ = move (e.comp_path_);
pin_ = e.pin_;
+
+ e.state_ = null;
}
return *this;
}
@@ -105,14 +109,14 @@ namespace build2
}
inline file_cache::write::
- write (write&& e)
+ write (write&& e) noexcept
: entry_ (e.entry_)
{
e.entry_ = nullptr;
}
inline file_cache::write& file_cache::write::
- operator= (write&& e)
+ operator= (write&& e) noexcept
{
if (this != &e)
{
@@ -132,14 +136,14 @@ namespace build2
}
inline file_cache::read::
- read (read&& e)
+ read (read&& e) noexcept
: entry_ (e.entry_)
{
e.entry_ = nullptr;
}
inline file_cache::read& file_cache::read::
- operator= (read&& e)
+ operator= (read&& e) noexcept
{
if (this != &e)
{
@@ -173,9 +177,15 @@ namespace build2
: string ();
}
+ inline void file_cache::
+ init (bool compress)
+ {
+ compress_ = compress;
+ }
+
inline file_cache::
file_cache (bool compress)
- : compress_ (compress)
{
+ init (compress);
}
}
diff --git a/libbuild2/file.cxx b/libbuild2/file.cxx
index c93a86f..1b00662 100644
--- a/libbuild2/file.cxx
+++ b/libbuild2/file.cxx
@@ -7,6 +7,7 @@
#include <iomanip> // left, setw()
#include <sstream>
+#include <libbuild2/rule.hxx>
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
#include <libbuild2/context.hxx>
@@ -28,6 +29,8 @@ namespace build2
{
// Standard and alternative build file/directory naming schemes.
//
+ extern const dir_path std_export_dir;
+ extern const dir_path alt_export_dir;
// build:
@@ -35,6 +38,7 @@ namespace build2
const dir_path std_root_dir (dir_path (std_build_dir) /= "root");
const dir_path std_bootstrap_dir (dir_path (std_build_dir) /= "bootstrap");
const dir_path std_build_build_dir (dir_path (std_build_dir) /= "build");
+ const dir_path std_export_dir (dir_path (std_build_dir) /= "export");
const path std_root_file (std_build_dir / "root.build");
const path std_bootstrap_file (std_build_dir / "bootstrap.build");
@@ -52,6 +56,7 @@ namespace build2
const dir_path alt_root_dir (dir_path (alt_build_dir) /= "root");
const dir_path alt_bootstrap_dir (dir_path (alt_build_dir) /= "bootstrap");
const dir_path alt_build_build_dir (dir_path (alt_build_dir) /= "build");
+ const dir_path alt_export_dir (dir_path (alt_build_dir) /= "export");
const path alt_root_file (alt_build_dir / "root.build2");
const path alt_bootstrap_file (alt_build_dir / "bootstrap.build2");
@@ -218,7 +223,7 @@ namespace build2
// Checking for plausability feels expensive since we have to recursively
// traverse the directory tree. Note, however, that if the answer is
// positive, then shortly after we will be traversing this tree anyway and
- // presumably this time getting the data from the cash (we don't really
+ // presumably this time getting the data from the cache (we don't really
// care about the negative answer since this is a degenerate case).
//
optional<path> bf;
@@ -306,7 +311,7 @@ namespace build2
{
tracer trace ("source_once");
- if (!once.buildfiles.insert (bf).second)
+ if (!once.root_extra->insert_buildfile (bf))
{
l5 ([&]{trace << "skipping already sourced " << bf;});
return false;
@@ -357,7 +362,7 @@ namespace build2
//
try
{
- for (const dir_entry& de: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (d, dir_iterator::no_follow))
{
// If this is a link, then type() will try to stat() it. And if the
// link is dangling or points to something inaccessible, it will fail.
@@ -522,10 +527,14 @@ namespace build2
pair<scope&, scope*>
switch_scope (scope& root, const dir_path& out_base, bool proj)
{
+ context& ctx (root.ctx);
+
+ assert (ctx.phase == run_phase::load);
+
// First, enter the scope into the map and see if it is in any project. If
// it is not, then there is nothing else to do.
//
- auto i (root.ctx.scopes.rw (root).insert_out (out_base));
+ auto i (ctx.scopes.rw (root).insert_out (out_base));
scope& base (*i->second.front ());
scope* rs (nullptr);
@@ -546,7 +555,7 @@ namespace build2
// Switch to the new root scope.
//
- if (rs != &root)
+ if (rs != &root && !rs->root_extra->loaded)
load_root (*rs); // Load new root(s) recursively.
// Now we can figure out src_base and finish setting the scope.
@@ -581,37 +590,37 @@ namespace build2
fail << "variable out_root expected as first line in " << f << endf;
}
+ scope::root_extra_type::
+ root_extra_type (scope& root, bool a)
+ : altn (a),
+ loaded (false),
+
+ build_ext (a ? alt_build_ext : std_build_ext),
+ build_dir (a ? alt_build_dir : std_build_dir),
+ buildfile_file (a ? alt_buildfile_file : std_buildfile_file),
+ buildignore_file (a ? alt_buildignore_file : std_buildignore_file),
+ root_dir (a ? alt_root_dir : std_root_dir),
+ bootstrap_dir (a ? alt_bootstrap_dir : std_bootstrap_dir),
+ build_build_dir (a ? alt_build_build_dir : std_build_build_dir),
+ bootstrap_file (a ? alt_bootstrap_file : std_bootstrap_file),
+ root_file (a ? alt_root_file : std_root_file),
+ export_file (a ? alt_export_file : std_export_file),
+ src_root_file (a ? alt_src_root_file : std_src_root_file),
+ out_root_file (a ? alt_out_root_file : std_out_root_file),
+
+ var_pool (&root.ctx, &root.ctx.var_pool.rw (root), nullptr)
+ {
+ root.var_pool_ = &var_pool;
+ }
+
static void
setup_root_extra (scope& root, optional<bool>& altn)
{
assert (altn && root.root_extra == nullptr);
- bool a (*altn);
-
- root.root_extra.reset (
- new scope::root_extra_type {
- nullopt /* project */,
- nullopt /* amalgamation */,
- nullopt /* subprojects */,
- a,
- a ? alt_build_ext : std_build_ext,
- a ? alt_build_dir : std_build_dir,
- a ? alt_buildfile_file : std_buildfile_file,
- a ? alt_buildignore_file : std_buildignore_file,
- a ? alt_root_dir : std_root_dir,
- a ? alt_bootstrap_dir : std_bootstrap_dir,
- a ? alt_build_build_dir : std_build_build_dir,
- a ? alt_bootstrap_file : std_bootstrap_file,
- a ? alt_root_file : std_root_file,
- a ? alt_export_file : std_export_file,
- a ? alt_src_root_file : std_src_root_file,
- a ? alt_out_root_file : std_out_root_file,
- {}, /* meta_operations */
- {}, /* operations */
- {}, /* modules */
- {}, /* override_cache */
- {}, /* target_types */
- {}, /* environment */
- ""} /* environment_checksum */);
+
+ context& ctx (root.ctx);
+
+ root.root_extra.reset (new scope::root_extra_type (root, *altn));
// Enter built-in meta-operation and operation names. Loading of
// modules (via the src bootstrap; see below) can result in
@@ -621,9 +630,9 @@ namespace build2
root.insert_meta_operation (perform_id, mo_perform);
root.insert_meta_operation (info_id, mo_info);
- root.insert_operation (default_id, op_default);
- root.insert_operation (update_id, op_update);
- root.insert_operation (clean_id, op_clean);
+ root.insert_operation (default_id, op_default, nullptr);
+ root.insert_operation (update_id, op_update, ctx.var_update);
+ root.insert_operation (clean_id, op_clean, ctx.var_clean);
}
value&
@@ -842,10 +851,26 @@ namespace build2
try
{
- for (const dir_entry& de: dir_iterator (d, true /* ignore_dangling */))
+ // It's probably possible that a subproject can be a symlink with the
+ // link target, for example, being in a git submodule. Considering that,
+ // it makes sense to warn about dangling symlinks.
+ //
+ for (const dir_entry& de:
+ dir_iterator (d, dir_iterator::detect_dangling))
{
if (de.type () != entry_type::directory)
+ {
+ if (de.type () == entry_type::unknown)
+ {
+ bool sl (de.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << d / de.path ();
+ }
+
continue;
+ }
dir_path sd (d / path_cast<dir_path> (de.path ()));
@@ -913,7 +938,9 @@ namespace build2
}
void
- bootstrap_src (scope& rs, optional<bool>& altn)
+ bootstrap_src (scope& rs, optional<bool>& altn,
+ optional<dir_path> aovr,
+ bool sovr)
{
tracer trace ("bootstrap_src");
@@ -943,13 +970,15 @@ namespace build2
rs.root_extra->project = nullptr;
rs.root_extra->amalgamation = nullptr;
rs.root_extra->subprojects = nullptr;
+
+ assert (!aovr || aovr->empty ());
}
// We assume that bootstrap out cannot load this file explicitly. It
// feels wrong to allow this since that makes the whole bootstrap
// process hard to reason about. But we may try to bootstrap the same
// root scope multiple time.
//
- else if (rs.buildfiles.insert (bf).second)
+ else if (rs.root_extra->insert_buildfile (bf))
{
// Extract the project name and amalgamation variable value so that
// we can make them available while loading bootstrap.build.
@@ -985,7 +1014,13 @@ namespace build2
const project_name pn (cast<project_name> (move (*pv)));
rs.root_extra->project = &pn;
- if (av && (av->null || av->empty ()))
+ // @@ We will still have original values in the variables during
+ // bootstrap. Not sure what we can do about that. But it seems
+ // harmless.
+ //
+ if (aovr)
+ rs.root_extra->amalgamation = aovr->empty () ? nullptr : &*aovr;
+ else if (av && (av->null || av->empty ()))
rs.root_extra->amalgamation = nullptr;
{
@@ -1005,6 +1040,13 @@ namespace build2
fail << "variable " << *ctx.var_amalgamation << " expected as a "
<< "second line in " << bf;
}
+
+ // Replace the value if overridden.
+ //
+ // Note that root_extra::amalgamation will be re-pointed below.
+ //
+ if (aovr)
+ rs.vars.assign (ctx.var_amalgamation) = move (*aovr);
}
else
{
@@ -1071,6 +1113,12 @@ namespace build2
// no been configured. In this case falling through is what we want.
}
}
+ else if (v)
+ {
+ if (cast<dir_path> (v).absolute ())
+ fail << "absolute directory in variable " << *ctx.var_amalgamation
+ << " value";
+ }
// Do additional checks if the outer root could be our amalgamation.
//
@@ -1129,6 +1177,14 @@ namespace build2
auto rp (rs.vars.insert (*ctx.var_subprojects)); // Set NULL by default.
value& v (rp.first);
+ if (!sovr)
+ {
+ if (rp.second)
+ rp.second = false; // Keep NULL.
+ else
+ v = nullptr; // Make NULL.
+ }
+
if (rp.second)
{
// No subprojects set so we need to figure out if there are any.
@@ -1285,9 +1341,9 @@ namespace build2
// Call module's post-boot functions.
//
- for (size_t i (0); i != root.root_extra->modules.size (); ++i)
+ for (size_t i (0); i != root.root_extra->loaded_modules.size (); ++i)
{
- module_state& s (root.root_extra->modules[i]);
+ module_state& s (root.root_extra->loaded_modules[i]);
if (s.boot_post != nullptr)
boot_post_module (root, s);
@@ -1328,7 +1384,7 @@ namespace build2
}
void
- create_bootstrap_outer (scope& root)
+ create_bootstrap_outer (scope& root, bool subp)
{
context& ctx (root.ctx);
@@ -1376,7 +1432,7 @@ namespace build2
setup_root (rs, forwarded (root, out_root, v.as<dir_path> (), altn));
bootstrap_pre (rs, altn);
- bootstrap_src (rs, altn);
+ bootstrap_src (rs, altn, nullopt, subp);
// bootstrap_post() delayed until after create_bootstrap_outer().
}
else
@@ -1387,7 +1443,7 @@ namespace build2
rs.assign (ctx.var_forwarded) = true; // Only upgrade (see main()).
}
- create_bootstrap_outer (rs);
+ create_bootstrap_outer (rs, subp);
if (!bstrapped)
bootstrap_post (rs);
@@ -1475,22 +1531,19 @@ namespace build2
}
void
- load_root (scope& root)
+ load_root (scope& root,
+ const function<void (parser&)>& pre,
+ const function<void (parser&)>& post)
{
tracer trace ("load_root");
- context& ctx (root.ctx);
-
- const dir_path& out_root (root.out_path ());
- const dir_path& src_root (root.src_path ());
-
- // As an optimization, check if we have already loaded root.build. If
- // that's the case, then we have already been called for this project.
- //
- path f (src_root / root.root_extra->root_file);
-
- if (root.buildfiles.find (f) != root.buildfiles.end ())
+ if (root.root_extra->loaded)
+ {
+ assert (pre == nullptr && post == nullptr);
return;
+ }
+
+ context& ctx (root.ctx);
if (ctx.no_external_modules)
fail << "attempt to load project " << root << " after skipped loading "
@@ -1499,18 +1552,19 @@ namespace build2
// First load outer roots, if any.
//
if (scope* rs = root.parent_scope ()->root_scope ())
- load_root (*rs);
+ if (!rs->root_extra->loaded)
+ load_root (*rs);
// Finish off initializing bootstrapped modules (before mode).
//
// Note that init() can load additional modules invalidating iterators.
//
auto init_modules =
- [&root, n = root.root_extra->modules.size ()] (module_boot_init v)
+ [&root, n = root.root_extra->loaded_modules.size ()] (module_boot_init v)
{
for (size_t i (0); i != n; ++i)
{
- module_state& s (root.root_extra->modules[i]);
+ module_state& s (root.root_extra->loaded_modules[i]);
if (s.boot_init && *s.boot_init == v)
init_module (root, root, s.name, s.loc);
@@ -1530,6 +1584,11 @@ namespace build2
// Load hooks and root.build.
//
+ const dir_path& out_root (root.out_path ());
+ const dir_path& src_root (root.src_path ());
+
+ path f (src_root / root.root_extra->root_file);
+
// We can load the pre hooks before finishing off loading the bootstrapped
// modules (which, in case of config would load config.build) or after and
// one can come up with a plausible use-case for either approach. Note,
@@ -1545,10 +1604,22 @@ namespace build2
//
parser p (ctx, load_stage::root);
+ if (pre != nullptr)
+ {
+ pre (p);
+ p.reset ();
+ }
+
if (he) {source_hooks (p, root, hd, true /* pre */); p.reset ();}
if (fe) {source_once (p, root, root, f, root);}
if (he) {p.reset (); source_hooks (p, root, hd, false /* pre */);}
+ if (post != nullptr)
+ {
+ p.reset ();
+ post (p);
+ }
+
// Finish off initializing bootstrapped modules (after mode).
//
{
@@ -1639,7 +1710,7 @@ namespace build2
if (const value& v = *l)
{
storage.clear ();
- auto ns (reverse (v, storage));
+ auto ns (reverse (v, storage, true /* reduce */));
if (f == "multiline")
{
@@ -1654,6 +1725,8 @@ namespace build2
dr << left << setw (static_cast<int> (pad)) << n << " [null]";
}
}
+
+ root.root_extra->loaded = true;
}
scope&
@@ -1690,7 +1763,8 @@ namespace build2
if (load)
{
- load_root (rs);
+ if (!rs.root_extra->loaded)
+ load_root (rs);
setup_base (i, out_root, src_root); // Setup as base.
}
@@ -1745,8 +1819,10 @@ namespace build2
}
// Extract metadata for an executable target by executing it with the
- // --build2-metadata option. In case of an error, issue diagnostics and fail
- // if opt is false and return nullopt if it's true.
+ // --build2-metadata option. Key is the target name (and not necessarily the
+ // same as metadata variable prefix in export.metadata; e.g., openbsd-m4 and
+ // openbsd_m4). In case of an error, issue diagnostics and fail if opt is
+ // false and return nullopt if it's true.
//
// Note that loading of the metadata is split into two steps, extraction and
// parsing, because extraction also serves as validation that the executable
@@ -1810,7 +1886,9 @@ namespace build2
try
{
// Note: not using run_*() functions since need to be able to suppress
- // all errors, including inability to exec.
+ // all errors, including abnormal, inability to exec, etc., in case of
+ // optional import. Also, no need to buffer diagnostics since in the
+ // serial load.
//
if (verb >= 3)
print_process (args);
@@ -1870,10 +1948,19 @@ namespace build2
return r;
if (!opt)
- error (loc) << "invalid metadata signature in " << args[0]
- << " output" <<
+ {
+ diag_record dr;
+ dr << error (loc) << "invalid metadata signature in " << args[0]
+ << " output" <<
info << "expected '" << s << "'";
+ if (verb >= 1 && verb <= 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+ }
+
goto fail;
}
@@ -1889,16 +1976,27 @@ namespace build2
if (pr.wait ())
{
if (!opt)
- error (loc) << "unable to read metadata from " << args[0];
+ error (loc) << "io error reading metadata from " << args[0];
}
else
{
// The child process presumably issued diagnostics but if it didn't,
- // the result will be very confusing. So let's issue something
- // generic for good measure.
+ // the result will be very confusing. So let's issue something generic
+ // for good measure. But also make it consistent with diagnostics
+ // issued by run_finish().
//
if (!opt)
- error (loc) << "unable to extract metadata from " << args[0];
+ {
+ diag_record dr;
+ dr << error (loc) << "unable to extract metadata from " << args[0] <<
+ info << "process " << args[0] << " " << *pr.exit;
+
+ if (verb >= 1 && verb <= 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+ }
}
goto fail;
@@ -1914,8 +2012,7 @@ namespace build2
goto fail;
}
- fail:
-
+ fail:
if (opt)
{
metadata_cache.insert (pp.effect_string (), true);
@@ -1950,8 +2047,9 @@ namespace build2
static void
import_suggest (const diag_record& dr,
const project_name& pn,
- const target_type& tt,
+ const target_type* tt,
const string& tn,
+ bool rule_hint,
const char* qual = nullptr)
{
string pv (pn.variable ());
@@ -1964,15 +2062,19 @@ namespace build2
// Suggest ad hoc import but only if it's a path-based target (doing it
// for lib{} is very confusing).
//
- if (tt.is_a<path_target> ())
+ if (tt != nullptr && tt->is_a<path_target> ())
{
- string v (tt.is_a<exe> () && (pv == tn || pn == tn)
+ string v (tt->is_a<exe> () && (pv == tn || pn == tn)
? "config." + pv
- : "config.import." + pv + '.' + tn + '.' + tt.name);
+ : "config.import." + pv + '.' + tn + '.' + tt->name);
dr << info << "or use " << v << " configuration variable to specify "
<< "its " << (qual != nullptr ? qual : "") << "path";
}
+
+ if (rule_hint)
+ dr << info << "or use rule_hint attribute to specify a rule that can "
+ << "find this target";
}
// Return the processed target name as well as the project directory, if
@@ -1987,6 +2089,9 @@ namespace build2
// Return empty name if an ad hoc import resulted in a NULL target (only
// allowed if optional is true).
//
+ // Note that this function has a side effect of potentially marking some
+ // config.import.* variables as used.
+ //
pair<name, optional<dir_path>>
import_search (bool& new_value,
scope& ibase,
@@ -2018,6 +2123,9 @@ namespace build2
//
// 4. Normal import.
//
+ // @@ PERF: in quite a few places (local, subproject) we could have
+ // returned the scope and save on bootstrap in import_load().
+ //
if (tgt.unqualified ())
{
if (tgt.directory () && tgt.relative ())
@@ -2025,6 +2133,8 @@ namespace build2
if (tgt.absolute ())
{
+ // Ad hoc import.
+ //
// Actualize the directory to be analogous to the config.import.<proj>
// case (which is of abs_dir_path type).
//
@@ -2041,7 +2151,7 @@ namespace build2
fail (loc) << "project-local importation of target " << tgt
<< " from an unnamed project";
- tgt.proj = pn;
+ tgt.proj = pn; // Reduce to normal import.
return make_pair (move (tgt), optional<dir_path> (iroot.out_path ()));
}
@@ -2073,7 +2183,9 @@ namespace build2
// over anything that we may discover. In particular, we will prefer it
// over any bundled subprojects.
//
- auto& vp (iroot.var_pool ());
+ // Note: go straight for the public variable pool.
+ //
+ auto& vp (iroot.var_pool (true /* public */));
using config::lookup_config;
@@ -2243,7 +2355,8 @@ namespace build2
auto df = make_diag_frame (
[&proj, tt, &on] (const diag_record& dr)
{
- import_suggest (dr, proj, *tt, on, "alternative ");
+ import_suggest (
+ dr, proj, tt, on, false, "alternative ");
});
md = extract_metadata (e->process_path (),
@@ -2366,6 +2479,8 @@ namespace build2
{
tracer trace ("import_load");
+ uint64_t metav (meta ? 1 : 0); // Metadata version.
+
// We end up here in two cases: Ad hoc import, in which case name is
// unqualified and absolute and path is a base, not necessarily root. And
// normal import, in which case name must be project-qualified and path is
@@ -2428,14 +2543,51 @@ namespace build2
}
}
+ // First check the cache.
+ //
+ using import_key = context::import_key;
+
+ auto cache_find = [&ctx, &tgt, metav] (dir_path& out_root) ->
+ const pair<names, const scope&>*
+ {
+ import_key k {move (out_root), move (tgt), metav};
+
+ auto i (ctx.import_cache.find (k));
+ if (i != ctx.import_cache.end ())
+ return &i->second;
+
+ out_root = move (k.out_root);
+ tgt = move (k.target);
+
+ return nullptr;
+ };
+
+ if (proj)
+ {
+ if (const auto* r = cache_find (out_root))
+ return *r;
+ }
+
+ dir_path cache_out_root;
+
// Clear current project's environment.
//
auto_project_env penv (nullptr);
+ // Note: this loop does at most two iterations.
+ //
for (const scope* proot (nullptr); ; proot = root)
{
bool top (proot == nullptr);
+ // Check the cache for the subproject.
+ //
+ if (!top && proj)
+ {
+ if (const auto* r = cache_find (out_root))
+ return *r;
+ }
+
root = create_root (ctx, out_root, src_root)->second.front ();
bool bstrapped (bootstrapped (*root));
@@ -2514,6 +2666,8 @@ namespace build2
if (i != ps->end ())
{
+ cache_out_root = move (out_root);
+
const dir_path& d ((*i).second);
altn = nullopt;
out_root = root->out_path () / d;
@@ -2525,9 +2679,69 @@ namespace build2
fail (loc) << out_root << " is not out_root for " << *proj;
}
+ // Buildfile importation is quite different so handle it separately.
+ //
+ // Note that we don't need to load the project in this case.
+ //
+ // @@ For now we don't out-qualify the resulting target to be able to
+ // re-import it ad hoc (there is currently no support for out-qualified
+ // ad hoc import). Feels like this should be harmless since it's just a
+ // glorified path to a static file that nobody is actually going to use
+ // as a target (e.g., to depend upon).
+ //
+ if (tgt.type == "buildfile")
+ {
+ auto add_ext = [&altn] (string& n)
+ {
+ if (path_traits::find_extension (n) == string::npos)
+ {
+ if (n != (*altn ? alt_buildfile_file : std_buildfile_file).string ())
+ {
+ n += ".";
+ n += *altn ? alt_build_ext : std_build_ext;
+ }
+ }
+ };
+
+ if (proj)
+ {
+ name n;
+
+ n.dir = move (src_root);
+ n.dir /= *altn ? alt_export_dir : std_export_dir;
+ if (!tgt.dir.empty ())
+ {
+ n.dir /= tgt.dir;
+ n.dir.normalize ();
+ }
+
+ n.type = tgt.type;
+ n.value = tgt.value;
+ add_ext (n.value);
+
+ pair<names, const scope&> r (names {move (n)}, *root);
+
+ // Cache.
+ //
+ if (cache_out_root.empty ())
+ cache_out_root = move (out_root);
+
+ ctx.import_cache.emplace (
+ import_key {move (cache_out_root), move (tgt), metav}, r);
+
+ return r;
+ }
+ else
+ {
+ add_ext (tgt.value);
+ return pair<names, const scope&> (names {move (tgt)}, *root);
+ }
+ }
+
// Load the imported root scope.
//
- load_root (*root);
+ if (!root->root_extra->loaded)
+ load_root (*root);
// If this is a normal import, then we go through the export stub.
//
@@ -2542,6 +2756,9 @@ namespace build2
// "Pass" the imported project's roots to the stub.
//
+ if (cache_out_root.empty ())
+ cache_out_root = out_root;
+
ts.assign (ctx.var_out_root) = move (out_root);
ts.assign (ctx.var_src_root) = move (src_root);
@@ -2557,7 +2774,7 @@ namespace build2
// Pass the metadata compatibility version in import.metadata.
//
if (meta)
- ts.assign (ctx.var_import_metadata) = uint64_t (1);
+ ts.assign (ctx.var_import_metadata) = metav;
// Load the export stub. Note that it is loaded in the context of the
// importing project, not the imported one. The export stub will
@@ -2583,7 +2800,7 @@ namespace build2
});
parser p (ctx);
- v = p.parse_export_stub (ifs, path_name (es), gs, ts);
+ v = p.parse_export_stub (ifs, path_name (es), *root, gs, ts);
}
// If there were no export directive executed in an export stub,
@@ -2593,7 +2810,14 @@ namespace build2
fail (loc) << "target " << tgt << " is not exported by project "
<< *proj;
- return pair<names, const scope&> (move (v), *root);
+ pair<names, const scope&> r (move (v), *root);
+
+ // Cache.
+ //
+ ctx.import_cache.emplace (
+ import_key {move (cache_out_root), move (tgt), metav}, r);
+
+ return r;
}
catch (const io_error& e)
{
@@ -2650,10 +2874,13 @@ namespace build2
}
}
+ static names
+ import2_buildfile (context&, names&&, bool, const location&);
+
pair<names, import_kind>
import (scope& base,
name tgt,
- bool ph2,
+ const optional<string>& ph2,
bool opt,
bool metadata,
const location& loc)
@@ -2677,11 +2904,10 @@ namespace build2
//
if (metadata)
{
- pair<const target*, import_kind> r (
+ import_result<target> r (
import_direct (base, move (tgt), ph2, opt, metadata, loc));
- return make_pair (r.first != nullptr ? r.first->as_name () : names {},
- r.second);
+ return make_pair (move (r.name), r.kind);
}
pair<name, optional<dir_path>> r (
@@ -2712,18 +2938,30 @@ namespace build2
//
if (ns.back ().qualified ())
{
- if (ph2)
+ if (ns.back ().type == "buildfile")
+ {
+ assert (ph2);
+ ns = import2_buildfile (ctx, move (ns), opt && !r.second, loc);
+ }
+ else if (ph2)
{
// This is tricky: we only want the optional semantics for the
// fallback case.
//
- if (const target* t = import (ctx,
- base.find_prerequisite_key (ns, loc),
- opt && !r.second /* optional */,
- nullopt /* metadata */,
- false /* existing */,
- loc))
+ if (const target* t = import2 (ctx,
+ base.find_prerequisite_key (ns, loc),
+ *ph2,
+ opt && !r.second /* optional */,
+ nullopt /* metadata */,
+ false /* existing */,
+ loc))
+ {
+ // Note that here r.first was still project-qualified and we
+ // have no choice but to call as_name(). This shouldn't cause
+ // any problems since the import() call assigns the extension.
+ //
ns = t->as_name ();
+ }
else
ns.clear (); // NULL
}
@@ -2747,21 +2985,82 @@ namespace build2
}
const target*
- import (context& ctx,
- const prerequisite_key& pk,
- bool opt,
- const optional<string>& meta,
- bool exist,
- const location& loc)
+ import2 (context& ctx,
+ const prerequisite_key& pk,
+ const string& hint,
+ bool opt,
+ const optional<string>& meta,
+ bool exist,
+ const location& loc)
{
- tracer trace ("import");
+ tracer trace ("import2");
- assert (!meta || !exist);
+ // Neither hint nor metadata can be requested for existing.
+ //
+ assert (!exist || (!meta && hint.empty ()));
assert (pk.proj);
const project_name& proj (*pk.proj);
- // Target type-specific search.
+ // Note that if this function returns a target, it should have the
+ // extension assigned (like the find/insert_target() functions) so that
+ // as_name() returns a stable name.
+
+ // Rule-specific resolution.
+ //
+ if (!hint.empty ())
+ {
+ assert (pk.scope != nullptr);
+
+ // Note: similar to/inspired by match_rule().
+ //
+ // Search scopes outwards, stopping at the project root.
+ //
+ for (const scope* s (pk.scope);
+ s != nullptr;
+ s = s->root () ? nullptr : s->parent_scope ())
+ {
+ // We only look for rules that are registered for perform(update).
+ //
+ if (const operation_rule_map* om = s->rules[perform_id])
+ {
+ if (const target_type_rule_map* ttm = (*om)[update_id])
+ {
+ // Ignore the target type the rules are registered for (this is
+ // about prerequisite types, not target).
+ //
+ // @@ Note that the same rule could be registered for several
+ // types which means we will keep calling it repeatedly.
+ //
+ for (const auto& p: *ttm)
+ {
+ const name_rule_map& nm (p.second);
+
+ // Filter against the hint.
+ //
+ for (auto p (nm.find_sub (hint)); p.first != p.second; ++p.first)
+ {
+ const string& n (p.first->first);
+ const rule& r (p.first->second);
+
+ auto df = make_diag_frame (
+ [&pk, &n](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while importing " << pk << " using rule "
+ << n;
+ });
+
+ if (const target* t = r.import (pk, meta, loc))
+ return t;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Builtin resolution for certain target types.
//
const target_key& tk (pk.tk);
const target_type& tt (*tk.type);
@@ -2814,8 +3113,7 @@ namespace build2
auto df = make_diag_frame (
[&proj, &tt, &tk] (const diag_record& dr)
{
- import_suggest (
- dr, proj, tt, *tk.name, "alternative ");
+ import_suggest (dr, proj, &tt, *tk.name, false, "alternative ");
});
if (!(md = extract_metadata (pp, *meta, opt, loc)))
@@ -2824,6 +3122,9 @@ namespace build2
if (!t || *t == nullptr)
{
+ // Note: we need the lock because process_path() call below is not
+ // MT-safe.
+ //
pair<target&, ulock> r (insert_target (trace, ctx, tt, p));
t = &r.first;
@@ -2855,29 +3156,108 @@ namespace build2
dr << info << "consider adding its installation location" <<
info << "or explicitly specify its project name";
else
- import_suggest (dr, proj, tt, *tk.name);
+ // Use metadata as proxy for immediate import.
+ //
+ import_suggest (dr, proj, &tt, *tk.name, meta && hint.empty ());
dr << endf;
}
- pair<const target*, import_kind>
+ static names
+ import2_buildfile (context&, names&& ns, bool opt, const location& loc)
+ {
+ tracer trace ("import2_buildfile");
+
+ assert (ns.size () == 1);
+ name n (move (ns.front ()));
+
+ // Our approach doesn't work for targets without a project so let's fail
+ // hard, even if optional.
+ //
+ if (!n.proj || n.proj->empty ())
+ fail (loc) << "unable to import target " << n << " without project name";
+
+ while (!build_install_buildfile.empty ()) // Breakout loop.
+ {
+ path f (build_install_buildfile /
+ dir_path (n.proj->string ()) /
+ n.dir /
+ n.value);
+
+ // See if we need to try with extensions.
+ //
+ bool ext (path_traits::find_extension (n.value) == string::npos &&
+ n.value != std_buildfile_file.string () &&
+ n.value != alt_buildfile_file.string ());
+
+ if (ext)
+ {
+ f += '.';
+ f += std_build_ext;
+ }
+
+ if (!exists (f))
+ {
+ l6 ([&]{trace << "tried " << f;});
+
+ if (ext)
+ {
+ f.make_base ();
+ f += '.';
+ f += alt_build_ext;
+
+ if (!exists (f))
+ {
+ l6 ([&]{trace << "tried " << f;});
+ break;
+ }
+ }
+ else
+ break;
+ }
+
+ // Split the path into the target.
+ //
+ ns = {name (f.directory (), move (n.type), f.leaf ().string ())};
+ return move (ns);
+ }
+
+ if (opt)
+ return names {};
+
+ diag_record dr;
+ dr << fail (loc) << "unable to import target " << n;
+
+ import_suggest (dr, *n.proj, nullptr /* tt */, n.value, false);
+
+ if (build_install_buildfile.empty ())
+ dr << info << "no exported buildfile installation location is "
+ << "configured in build2";
+ else
+ dr << info << "exported buildfile installation location is "
+ << build_install_buildfile;
+
+ dr << endf;
+ }
+
+ import_result<target>
import_direct (bool& new_value,
scope& base,
name tgt,
- bool ph2,
+ const optional<string>& ph2,
bool opt,
bool metadata,
const location& loc,
const char* what)
{
- // This is like normal import() except we return the target rather than
+ // This is like normal import() except we return the target in addition to
// its name.
//
tracer trace ("import_direct");
l5 ([&]{trace << tgt << " from " << base << " for " << what;});
- assert ((!opt || ph2) && (!metadata || ph2));
+ assert ((!opt || ph2) && (!metadata || ph2) && tgt.type != "buildfile");
context& ctx (base.ctx);
assert (ctx.phase == run_phase::load);
@@ -2886,7 +3266,7 @@ namespace build2
//
auto meta (metadata ? optional<string> (tgt.value) : nullopt);
- names ns;
+ names ns, rns;
import_kind k;
const target* pt (nullptr);
@@ -2909,7 +3289,7 @@ namespace build2
if (r.first.empty ())
{
assert (opt);
- return make_pair (pt, k); // NULL
+ return import_result<target> {nullptr, {}, k}; // NULL
}
else if (r.first.qualified ())
{
@@ -2920,31 +3300,44 @@ namespace build2
// This is tricky: we only want the optional semantics for the
// fallback case.
//
- pt = import (ctx,
- base.find_prerequisite_key (ns, loc),
- opt && !r.second,
- meta,
- false /* existing */,
- loc);
+ pt = import2 (ctx,
+ base.find_prerequisite_key (ns, loc),
+ *ph2,
+ opt && !r.second,
+ meta,
+ false /* existing */,
+ loc);
}
if (pt == nullptr)
- return make_pair (pt, k); // NULL
+ return import_result<target> {nullptr, {}, k}; // NULL
+
+ // Note that here r.first was still project-qualified and we have no
+ // choice but to call as_name() (below). This shouldn't cause any
+ // problems since the import() call assigns the extension.
- // Otherwise fall through.
+ // Fall through.
}
else
+ {
+ // It's a bit fuzzy in which cases we end up here. So for now we keep
+ // the original if it's absolute and call as_name() otherwise.
+ //
+ if (r.first.absolute ())
+ rns.push_back (r.first);
+
ns.push_back (move (r.first)); // And fall through.
+ }
}
else
{
k = r.first.absolute () ? import_kind::adhoc : import_kind::normal;
- ns = import_load (base.ctx, move (r), metadata, loc).first;
+ rns = ns = import_load (base.ctx, move (r), metadata, loc).first;
}
if (pt == nullptr)
{
- // Similar logic to perform's search().
+ // Similar logic to perform's search(). Note: modifies ns.
//
target_key tk (base.find_target_key (ns, loc));
pt = ctx.targets.find (tk, trace);
@@ -2952,6 +3345,9 @@ namespace build2
fail (loc) << "unknown imported target " << tk;
}
+ if (rns.empty ())
+ rns = pt->as_name ();
+
target& t (pt->rw ()); // Load phase.
// Note that if metadata is requested via any of the import*() functions,
@@ -2963,7 +3359,10 @@ namespace build2
// The export.metadata value should start with the version followed by
// the metadata variable prefix.
//
- lookup l (t.vars[ctx.var_export_metadata]);
+ // Note: lookup on target, not target::vars since it could come from
+ // the group (think lib{} metadata).
+ //
+ lookup l (t[ctx.var_export_metadata]);
if (l && !l->empty ())
{
const names& ns (cast<names> (l));
@@ -2996,13 +3395,15 @@ namespace build2
const string& pfx (ns[1].value);
- auto& vp (ctx.var_pool.rw ()); // Load phase.
-
// See if we have the stable program name in the <var-prefix>.name
// variable. If its missing, set it to the metadata key (i.e., target
// name as imported) by default.
//
{
+ // Note: go straight for the public variable pool.
+ //
+ auto& vp (ctx.var_pool.rw ()); // Load phase.
+
value& nv (t.assign (vp.insert (pfx + ".name")));
if (!nv)
nv = *meta;
@@ -3023,18 +3424,43 @@ namespace build2
fail (loc) << "no metadata for imported target " << t;
}
- return make_pair (pt, k);
+ return import_result<target> {pt, move (rns), k};
+ }
+
+ path
+ import_buildfile (scope& bs, name n, bool opt, const location& loc)
+ {
+ names r (import (bs,
+ move (n),
+ string () /* phase2 */,
+ opt,
+ false /* metadata */,
+ loc).first);
+
+ path p;
+ if (!r.empty ()) // Optional not found.
+ {
+ // Note: see also parse_import().
+ //
+ assert (r.size () == 1); // See import_load() for details.
+ name& n (r.front ());
+ p = n.dir / n.value; // Should already include extension.
+ }
+ else
+ assert (opt);
+
+ return p;
}
ostream&
- operator<< (ostream& o, const pair<const exe*, import_kind>& p)
+ operator<< (ostream& o, const import_result<exe>& r)
{
- assert (p.first != nullptr);
+ assert (r.target != nullptr);
- if (p.second == import_kind::normal)
- o << *p.first;
+ if (r.kind == import_kind::normal)
+ o << *r.target;
else
- o << p.first->process_path ();
+ o << r.target->process_path ();
return o;
}
@@ -3074,13 +3500,23 @@ namespace build2
//
mkdir (d / std_build_dir, verbosity);
+ auto diag = [verbosity] (const path& f)
+ {
+ if (verb >= verbosity)
+ {
+ if (verb >= 2)
+ text << "cat >" << f;
+ else if (verb)
+ print_diag ("save", f);
+ }
+ };
+
// Write build/bootstrap.build.
//
{
path f (d / std_bootstrap_file);
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ diag (f);
try
{
@@ -3126,8 +3562,7 @@ namespace build2
{
path f (d / std_root_file);
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ diag (f);
try
{
@@ -3175,8 +3610,7 @@ namespace build2
{
path f (d / std_build_dir / "config.build"); // std_config_file
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ diag (f);
try
{
@@ -3201,8 +3635,7 @@ namespace build2
{
path f (d / std_buildfile_file);
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ diag (f);
try
{
diff --git a/libbuild2/file.hxx b/libbuild2/file.hxx
index 1c8e57e..6c5097d 100644
--- a/libbuild2/file.hxx
+++ b/libbuild2/file.hxx
@@ -17,7 +17,30 @@
namespace build2
{
class lexer;
-
+ class parser;
+
+ // The following filesystem entries in the build/ subdirectory are reserved
+ // by the build2 core:
+ //
+ // build/ -- build2 core-internal build state (e.g., recipes)
+ // bootstrap/ -- bootstrap state and hooks
+ // bootstrap.build -- bootstrap buildfile
+ // root/ -- root load hooks
+ // root.build -- root buildfile
+ // export.build -- export stub
+ // export/ -- exported buildfiles
+ //
+ // The build/, bootstrap/, root/, and config.build entries are in .gitignore
+ // as generated by bdep-new.
+ //
+ // The rest of the filesystem entries are shared between the project and the
+ // modules that it loads. In particular, if a project loads module named
+ // <mod>, then the <mod>.build, <mod>/, *.<mod> entries (spelled in any
+ // case) are reserved to this module and should not be used by the project
+ // unless explicitly allowed by the module. By convention, <mod>/build/ is
+ // for module-internal build state (e.g., C++ modules side-build) and is
+ // .gitignore'ed.
+ //
LIBBUILD2_SYMEXPORT extern const dir_path std_build_dir; // build/
// build/root.build
@@ -54,7 +77,7 @@ namespace build2
find_src_root (const dir_path&, optional<bool>& altn);
// The same as above but for project's out. Note that we also check whether
- // a directory happens to be src_root, in case this is an in-tree build with
+ // a directory happens to be src_root, in case this is an in source build with
// the result returned as the second half of the pair. Note also that if the
// input is normalized/actualized, then the output will be as well.
//
@@ -115,10 +138,11 @@ namespace build2
bool
source_once (scope& root, scope& base, const path&);
- // As above but checks against the specified scope rather than base.
+ // As above but checks against the specified root scope rather than this
+ // root scope.
//
LIBBUILD2_SYMEXPORT bool
- source_once (scope& root, scope& base, const path&, scope& once);
+ source_once (scope& root, scope& base, const path&, scope& once_root);
// Create project's root scope. Only set the src_root variable if the passed
// src_root value is not empty.
@@ -182,8 +206,15 @@ namespace build2
// Bootstrap the project's root scope, the src part.
//
+ // If amalgamation is present, then use the specified directory as the
+ // amalgamation instead of discovering or extracting it from bootstrap.build
+ // (use empty directory to disable amalgamation). If subprojects is false,
+ // then do not discover or extract subprojects.
+ //
LIBBUILD2_SYMEXPORT void
- bootstrap_src (scope& root, optional<bool>& altn);
+ bootstrap_src (scope& root, optional<bool>& altn,
+ optional<dir_path> amalgamation = nullopt,
+ bool subprojects = true);
// Return true if this scope has already been bootstrapped, that is, the
// following calls have already been made:
@@ -205,10 +236,11 @@ namespace build2
bootstrap_post (scope& root);
// Create and bootstrap outer root scopes, if any. Loading is done by
- // load_root().
+ // load_root(). If subprojects is false, then do not discover or extract
+ // subprojects.
//
LIBBUILD2_SYMEXPORT void
- create_bootstrap_outer (scope& root);
+ create_bootstrap_outer (scope& root, bool subprojects = true);
// Create and bootstrap inner root scopes, if any, recursively.
//
@@ -224,8 +256,13 @@ namespace build2
// loaded. Also make sure all outer root scopes are loaded prior to loading
// this root scope.
//
+ // If pre/post functions are specified, they are called before/after
+ // pre/post hooks, respectively.
+ //
LIBBUILD2_SYMEXPORT void
- load_root (scope& root);
+ load_root (scope& root,
+ const function<void (parser&)>& pre = nullptr,
+ const function<void (parser&)>& post = nullptr);
// Extract the specified variable value from a buildfile. It is expected to
// be the first non-blank/comment line and not to rely on any variable
@@ -309,10 +346,14 @@ namespace build2
// original; see the config.import.<proj>.<name>[.<type>] logic for details)
// in which case it should still be passed to import phase 2.
//
- // If phase2 is true then the phase 2 is performed right away (we call it
- // immediate import). Note that if optional is true, phase2 must be true as
- // well (and thus there is no rule-specific logic for optional imports). In
- // case of optional, empty names value is retuned if nothing was found.
+ // If phase2 is present then the phase 2 is performed right away (we call it
+ // immediate import). Note that if optional is true, phase2 must be present
+ // as well (and thus there is no rule-specific logic for optional imports).
+ // In case of optional, empty names value is returned if nothing was found.
+ // The value in phase2 is the optional rule hint that, if not empty, will be
+ // used to lookup a rule that will be asked to resolve the qualified target
+ // (see rule::import()). If it is empty, then built-in resolution logic will
+ // be used for some target types (currently only exe{}).
//
// If metadata is true, then load the target metadata. In this case phase2
// must be true as well.
@@ -320,7 +361,9 @@ namespace build2
// Note also that we return names rather than a single name: while normally
// it will be a single target name, it can be an out-qualified pair (if
// someone wants to return a source target) but it can also be a non-target
- // since we don't restrict what users can import/export.
+ // since we don't restrict what users can import/export. If name has
+ // buildfile type, then the result is an absolute buildfile target to be
+ // included (once) at the point of importation.
//
// Finally, note that import is (and should be kept) idempotent or, more
// precisely, "accumulatively idempotent" in that additional steps may be
@@ -331,7 +374,7 @@ namespace build2
LIBBUILD2_SYMEXPORT pair<names, import_kind>
import (scope& base,
name,
- bool phase2,
+ const optional<string>& phase2,
bool optional,
bool metadata,
const location&);
@@ -339,33 +382,53 @@ namespace build2
// Import phase 2.
//
const target&
- import (context&, const prerequisite_key&);
+ import2 (context&, const prerequisite_key&);
// As above but import the target "here and now" without waiting for phase 2
// (and thus omitting any rule-specific logic). This version of import is,
// for example, used by build system modules to perform an implicit import
// of the corresponding tool.
//
- // If phase2 is false, then the second phase's fallback/default logic is
+ // If phase2 is absent, then the second phase's fallback/default logic is
// only invoked if the import was ad hoc (i.e., a relative path was
// specified via config.import.<proj>.<name>[.<type>]) with NULL returned
// otherwise.
//
- // If phase2 is true and optional is true, then NULL is returned instead of
- // failing if phase 2 could not find anything.
+ // If phase2 is present and optional is true, then NULL is returned instead
+ // of failing if phase 2 could not find anything.
//
// If metadata is true, then load the target metadata. In this case phase2
- // must be true as well.
+ // must be present as well.
//
// The what argument specifies what triggered the import (for example,
// "module load") and is used in diagnostics.
//
- // This function also returns the kind of import that was performed.
+ // This function also returns the stable exported target name (see
+ // target::as_name() for details) as well as the kind of import that was
+ // performed.
+ //
+ // Note: cannot be used to import buildfile targets (use import_buildfile()
+ // instead).
+ //
+ template <typename T>
+ struct import_result
+ {
+ const T* target;
+ names name;
+ import_kind kind;
+ };
+
+ // Print import_direct<exe>() result either as a target for a normal import
+ // or as a process path for ad hoc and fallback imports. Normally used in
+ // build system modules to print the configuration report.
//
- pair<const target*, import_kind>
+ LIBBUILD2_SYMEXPORT ostream&
+ operator<< (ostream&, const import_result<exe>&);
+
+ import_result<target>
import_direct (scope& base,
name,
- bool phase2,
+ const optional<string>& phase2,
bool optional,
bool metadata,
const location&,
@@ -376,37 +439,44 @@ namespace build2
// details. Note that a phase 2 fallback/default logic is not considered new
// (though this can be easily adjusted based on import kind).
//
- LIBBUILD2_SYMEXPORT pair<const target*, import_kind>
+ LIBBUILD2_SYMEXPORT import_result<target>
import_direct (bool& new_value,
scope& base,
name,
- bool phase2,
+ const optional<string>& phase2,
bool optional,
bool metadata,
const location&,
const char* what = "import");
+ // As above but also cast the target and pass phase2 as bool (primarily
+ // for use in build system modules).
+ //
template <typename T>
- pair<const T*, import_kind>
+ import_result<T>
import_direct (scope&,
name, bool, bool, bool,
const location&, const char* = "import");
template <typename T>
- pair<const T*, import_kind>
+ import_result<T>
import_direct (bool&,
scope&,
name,
bool, bool, bool,
const location&, const char* = "import");
- // Print import_direct<exe>() result either as a target for a normal import
- // or as a process path for ad hoc and fallback imports. Normally used in
- // build system modules to print the configuration report.
+ // The import_direct() equivalent for importing buildfile targets. Return
+ // empty name if optional and not found. Note that the returned file path is
+ // not necessarily checked for existence so sourcing it may still fail.
//
- LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const pair<const exe*, import_kind>&);
+ // Note also that this function can be used for an ad hoc import by passing
+ // an absolute target name as would be returned by the normal import (can be
+ // useful for importing own buildfiles).
+ //
+ LIBBUILD2_SYMEXPORT path
+ import_buildfile (scope& base, name, bool optional, const location&);
// As import phase 2 but only imports as an already existing target. But
// unlike it, this function can be called during the load and execute
@@ -462,7 +532,7 @@ namespace build2
const optional<string>& config_file, // Ad hoc config.build contents.
bool buildfile, // Create root buildfile.
const char* who, // Who is creating it.
- uint16_t verbosity = 1); // Diagnostic verbosity.
+ uint16_t verbosity); // Diagnostic verbosity.
}
#include <libbuild2/file.ixx>
diff --git a/libbuild2/file.ixx b/libbuild2/file.ixx
index bd138a0..dc39bcb 100644
--- a/libbuild2/file.ixx
+++ b/libbuild2/file.ixx
@@ -22,15 +22,16 @@ namespace build2
}
LIBBUILD2_SYMEXPORT const target*
- import (context&,
- const prerequisite_key&,
- bool optional_,
- const optional<string>& metadata, // False or metadata key.
- bool existing,
- const location&);
+ import2 (context&,
+ const prerequisite_key&,
+ const string& hint,
+ bool optional_,
+ const optional<string>& metadata, // False or metadata key.
+ bool existing,
+ const location&);
inline const target&
- import (context& ctx, const prerequisite_key& pk)
+ import2 (context& ctx, const prerequisite_key& pk)
{
assert (ctx.phase == run_phase::match);
@@ -39,13 +40,13 @@ namespace build2
// Looks like the only way to do this is to keep location in name and
// then in prerequisite. Perhaps one day...
//
- return *import (ctx, pk, false, nullopt, false, location ());
+ return *import2 (ctx, pk, string (), false, nullopt, false, location ());
}
- inline pair<const target*, import_kind>
+ inline import_result<target>
import_direct (scope& base,
name tgt,
- bool ph2, bool opt, bool md,
+ const optional<string>& ph2, bool opt, bool md,
const location& loc, const char* w)
{
bool dummy (false);
@@ -53,33 +54,50 @@ namespace build2
}
template <typename T>
- inline pair<const T*, import_kind>
+ inline import_result<T>
import_direct (scope& base,
name tgt,
bool ph2, bool opt, bool md,
const location& loc, const char* w)
{
- auto r (import_direct (base, move (tgt), ph2, opt, md, loc, w));
- return make_pair (r.first != nullptr ? &r.first->as<const T> () : nullptr,
- r.second);
+ auto r (import_direct (base,
+ move (tgt),
+ ph2 ? optional<string> (string ()) : nullopt,
+ opt,
+ md,
+ loc,
+ w));
+ return import_result<T> {
+ r.target != nullptr ? &r.target->as<const T> () : nullptr,
+ move (r.name),
+ r.kind};
}
template <typename T>
- inline pair<const T*, import_kind>
+ inline import_result<T>
import_direct (bool& nv,
scope& base,
name tgt,
bool ph2, bool opt, bool md,
const location& loc, const char* w)
{
- auto r (import_direct (nv, base, move (tgt), ph2, opt, md, loc, w));
- return make_pair (r.first != nullptr ? &r.first->as<const T> () : nullptr,
- r.second);
+ auto r (import_direct (nv,
+ base,
+ move (tgt),
+ ph2 ? optional<string> (string ()) : nullopt,
+ opt,
+ md,
+ loc,
+ w));
+ return import_result<T> {
+ r.target != nullptr ? &r.target->as<const T> () : nullptr,
+ move (r.name),
+ r.kind};
}
inline const target*
import_existing (context& ctx, const prerequisite_key& pk)
{
- return import (ctx, pk, false, nullopt, true, location ());
+ return import2 (ctx, pk, string (), false, nullopt, true, location ());
}
}
diff --git a/libbuild2/filesystem.cxx b/libbuild2/filesystem.cxx
index fbe145c..f340dd7 100644
--- a/libbuild2/filesystem.cxx
+++ b/libbuild2/filesystem.cxx
@@ -15,7 +15,12 @@ namespace build2
touch (context& ctx, const path& p, bool create, uint16_t v)
{
if (verb >= v)
- text << "touch " << p;
+ {
+ if (verb >= 2)
+ text << "touch " << p;
+ else if (verb)
+ print_diag ("touch", p);
+ }
if (ctx.dry_run)
return;
@@ -50,25 +55,30 @@ namespace build2
// We don't want to print the command if the directory already exists.
// This makes the below code a bit ugly.
//
- mkdir_status ms;
+ auto print = [v, &d] (bool ovr)
+ {
+ if (verb >= v || ovr)
+ {
+ if (verb >= 2)
+ text << "mkdir " << d;
+ else if (verb)
+ print_diag ("mkdir", d);
+ }
+ };
+ mkdir_status ms;
try
{
ms = try_mkdir (d);
}
catch (const system_error& e)
{
- if (verb >= v)
- text << "mkdir " << d;
-
+ print (true);
fail << "unable to create directory " << d << ": " << e << endf;
}
if (ms == mkdir_status::success)
- {
- if (verb >= v)
- text << "mkdir " << d;
- }
+ print (false);
return ms;
}
@@ -79,25 +89,30 @@ namespace build2
// We don't want to print the command if the directory already exists.
// This makes the below code a bit ugly.
//
- mkdir_status ms;
+ auto print = [v, &d] (bool ovr)
+ {
+ if (verb >= v || ovr)
+ {
+ if (verb >= 2)
+ text << "mkdir -p " << d;
+ else if (verb)
+ print_diag ("mkdir -p", d);
+ }
+ };
+ mkdir_status ms;
try
{
ms = try_mkdir_p (d);
}
catch (const system_error& e)
{
- if (verb >= v)
- text << "mkdir -p " << d;
-
+ print (true);
fail << "unable to create directory " << d << ": " << e << endf;
}
if (ms == mkdir_status::success)
- {
- if (verb >= v)
- text << "mkdir -p " << d;
- }
+ print (false);
return ms;
}
@@ -106,7 +121,12 @@ namespace build2
mvfile (const path& f, const path& t, uint16_t v)
{
if (verb >= v)
- text << "mv " << f << ' ' << t;
+ {
+ if (verb >= 2)
+ text << "mv " << f << ' ' << t;
+ else if (verb)
+ print_diag ("mv", f, t);
+ }
try
{
@@ -126,10 +146,18 @@ namespace build2
fs_status<rmfile_status>
rmsymlink (context& ctx, const path& p, bool d, uint16_t v)
{
- auto print = [&p, v] ()
+ auto print = [&p, v] (bool ovr)
{
- if (verb >= v)
- text << "rm " << p.string ();
+ if (verb >= v || ovr)
+ {
+ // Note: strip trailing directory separator (but keep as path for
+ // relative).
+ //
+ if (verb >= 2)
+ text << "rm " << p.string ();
+ else if (verb)
+ print_diag ("rm", p.to_directory () ? path (p.string ()) : p);
+ }
};
rmfile_status rs;
@@ -144,12 +172,12 @@ namespace build2
}
catch (const system_error& e)
{
- print ();
+ print (true);
fail << "unable to remove symlink " << p.string () << ": " << e << endf;
}
if (rs == rmfile_status::success)
- print ();
+ print (false);
return rs;
}
@@ -166,7 +194,12 @@ namespace build2
return rmdir_status::not_exist;
if (verb >= v)
- text << "rmdir -r " << d;
+ {
+ if (verb >= 2)
+ text << "rmdir -r " << d;
+ else if (verb)
+ print_diag ("rmdir -r", d);
+ }
if (!ctx.dry_run)
{
@@ -258,7 +291,7 @@ namespace build2
{
try
{
- for (const dir_entry& de: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (d, dir_iterator::no_follow))
{
// The .buildignore filesystem entry should be of the regular file
// type.
@@ -323,4 +356,59 @@ namespace build2
fail << "unable to set path " << p << " permissions: " << e;
}
}
+
+ void
+ normalize_external (path& f, const char* what)
+ {
+ // The main motivating case for this logic are C/C++ headers.
+ //
+ // Interestingly, on most paltforms and with most compilers (Clang on
+ // Linux being a notable exception) most system/compiler headers are
+ // already normalized.
+ //
+ path_abnormality a (f.abnormalities ());
+ if (a != path_abnormality::none)
+ {
+ // While we can reasonably expect this path to exit, things do go south
+ // from time to time (like compiling under wine with file wlantypes.h
+ // included as WlanTypes.h).
+ //
+ try
+ {
+ // If we have any parent components, then we have to verify the
+ // normalized path matches realized.
+ //
+ path r;
+ if ((a & path_abnormality::parent) == path_abnormality::parent)
+ {
+ r = f;
+ r.realize ();
+ }
+
+ try
+ {
+ f.normalize ();
+
+ // Note that we might still need to resolve symlinks in the
+ // normalized path.
+ //
+ if (!r.empty () && f != r && path (f).realize () != r)
+ f = move (r);
+ }
+ catch (const invalid_path&)
+ {
+ assert (!r.empty ()); // Shouldn't have failed if no `..`.
+ f = move (r); // Fallback to realize.
+ }
+ }
+ catch (const invalid_path&)
+ {
+ fail << "invalid " << what << " path '" << f.string () << "'";
+ }
+ catch (const system_error& e)
+ {
+ fail << "invalid " << what << " path '" << f.string () << "': " << e;
+ }
+ }
+ }
}
diff --git a/libbuild2/filesystem.hxx b/libbuild2/filesystem.hxx
index ee7ba9a..7b45a08 100644
--- a/libbuild2/filesystem.hxx
+++ b/libbuild2/filesystem.hxx
@@ -22,6 +22,8 @@
//
namespace build2
{
+ using butl::entry_type;
+
using butl::auto_rmfile;
using butl::auto_rmdir;
@@ -73,10 +75,10 @@ namespace build2
using mkdir_status = butl::mkdir_status;
LIBBUILD2_SYMEXPORT fs_status<mkdir_status>
- mkdir (const dir_path&, uint16_t verbosity = 1);
+ mkdir (const dir_path&, uint16_t verbosity);
LIBBUILD2_SYMEXPORT fs_status<mkdir_status>
- mkdir_p (const dir_path&, uint16_t verbosity = 1);
+ mkdir_p (const dir_path&, uint16_t verbosity);
// Rename a file (or file symlink) overwriting the destination if exists.
//
@@ -166,7 +168,7 @@ namespace build2
//
LIBBUILD2_SYMEXPORT fs_status<mkdir_status>
mkdir_buildignore (context&,
- const dir_path&, const path&, uint16_t verbosity = 1);
+ const dir_path&, const path&, uint16_t verbosity);
// Return true if the directory is empty or only contains the .buildignore
// file. Fail if the directory doesn't exist.
@@ -189,6 +191,35 @@ namespace build2
LIBBUILD2_SYMEXPORT void
path_perms (const path&, permissions);
+
+ // Normalize an absolute path to an existing file that may reside outside of
+ // any project and could involve funny filesystem business (e.g., relative
+ // directory symlinks). For example, a C/C++ header path returned by a
+ // compiler which could be a system header.
+ //
+ // We used to just normalize such a path but that could result in an invalid
+ // path (e.g., for some system/compiler headers on CentOS 7 with Clang 3.4)
+ // because of the symlinks (if a directory component is a symlink, then any
+ // following `..` are resolved relative to the target; see path::normalize()
+ // for background).
+ //
+ // Initially, to fix this, we realized (i.e., realpath(3)) it instead. But
+ // that turned out also not to be quite right since now we have all the
+ // symlinks resolved: conceptually it feels correct to keep the original
+ // header names since that's how the user chose to arrange things and
+ // practically this is how compilers see/report them (e.g., the GCC module
+ // mapper).
+ //
+ // So now we have a pretty elaborate scheme where we try to use the
+ // normalized path if possible and fallback to realized. Normalized paths
+ // will work for situations where `..` does not cross symlink boundaries,
+ // which is the sane case. And for the insane case we only really care
+ // about out-of-project files (i.e., system/compiler headers). In other
+ // words, if you have the insane case inside your project, then you are on
+ // your own.
+ //
+ LIBBUILD2_SYMEXPORT void
+ normalize_external (path&, const char* what);
}
#include <libbuild2/filesystem.ixx>
diff --git a/libbuild2/filesystem.txx b/libbuild2/filesystem.txx
index 7404532..afdb48d 100644
--- a/libbuild2/filesystem.txx
+++ b/libbuild2/filesystem.txx
@@ -1,8 +1,6 @@
// file : libbuild2/filesystem.txx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <type_traits> // is_base_of
-
#include <libbuild2/diagnostics.hxx>
namespace build2
@@ -15,16 +13,17 @@ namespace build2
// We don't want to print the command if we couldn't remove the file
// because it does not exist (just like we don't print the update command
- // if the file is up to date). This makes the below code a bit ugly.
+ // if the file is up to date). But we always want to print some command
+ // before we issue diagnostics. This makes the below code a bit ugly.
//
- auto print = [&f, &t, v] ()
+ auto print = [&f, &t, v] (bool ovr)
{
- if (verb >= v)
+ if (verb >= v || ovr)
{
if (verb >= 2)
text << "rm " << f;
else if (verb)
- text << "rm " << t;
+ print_diag ("rm", t); // T can be target or path.
}
};
@@ -38,12 +37,12 @@ namespace build2
}
catch (const system_error& e)
{
- print ();
+ print (true);
fail << "unable to remove file " << f << ": " << e << endf;
}
if (rs == rmfile_status::success)
- print ();
+ print (false);
return rs;
}
@@ -56,16 +55,17 @@ namespace build2
// We don't want to print the command if we couldn't remove the directory
// because it does not exist (just like we don't print mkdir if it already
- // exists) or if it is not empty. This makes the below code a bit ugly.
+ // exists) or if it is not empty. But we always want to print some command
+ // before we issue diagnostics. This makes the below code a bit ugly.
//
- auto print = [&d, &t, v] ()
+ auto print = [&d, &t, v] (bool ovr)
{
- if (verb >= v)
+ if (verb >= v || ovr)
{
if (verb >= 2)
text << "rmdir " << d;
else if (verb)
- text << (std::is_base_of<dir_path, T>::value ? "rmdir " : "rm ") << t;
+ print_diag ("rmdir", t); // T can be target or dir_path.
}
};
@@ -79,7 +79,7 @@ namespace build2
}
catch (const system_error& e)
{
- print ();
+ print (true);
fail << "unable to remove directory " << d << ": " << e << endf;
}
@@ -87,14 +87,14 @@ namespace build2
{
case rmdir_status::success:
{
- print ();
+ print (false);
break;
}
case rmdir_status::not_empty:
{
if (verb >= v && verb >= 2)
{
- text << d << " is "
+ info << d << " is "
<< (w ? "current working directory" : "not empty")
<< ", not removing";
}
diff --git a/libbuild2/forward.hxx b/libbuild2/forward.hxx
index 4c9a50f..057ab24 100644
--- a/libbuild2/forward.hxx
+++ b/libbuild2/forward.hxx
@@ -26,6 +26,7 @@ namespace build2
struct variable;
class variable_pool;
+ class variable_patterns;
class variable_map;
struct variable_override;
using variable_overrides = vector<variable_override>;
@@ -79,6 +80,10 @@ namespace build2
//
struct attribute;
struct attributes;
+
+ // <libbuild2/depbd.hxx>
+ //
+ class depdb;
}
#endif // LIBBUILD2_FORWARD_HXX
diff --git a/libbuild2/function.cxx b/libbuild2/function.cxx
index eaf3f9e..528b396 100644
--- a/libbuild2/function.cxx
+++ b/libbuild2/function.cxx
@@ -213,7 +213,7 @@ namespace build2
if (f->arg_types[i] &&
*f->arg_types[i] == nullptr &&
args[i].type != nullptr)
- untypify (args[i]);
+ untypify (args[i], true /* reduce */);
}
}
@@ -349,8 +349,10 @@ namespace build2
// Static-initialize the function map and populate with builtin functions.
//
+ void bool_functions (function_map&); // functions-bool.cxx
void builtin_functions (function_map&); // functions-builtin.cxx
void filesystem_functions (function_map&); // functions-filesystem.cxx
+ void integer_functions (function_map&); // functions-integer.cxx
void name_functions (function_map&); // functions-name.cxx
void path_functions (function_map&); // functions-path.cxx
void process_functions (function_map&); // functions-process.cxx
@@ -363,8 +365,10 @@ namespace build2
void
insert_builtin_functions (function_map& m)
{
+ bool_functions (m);
builtin_functions (m);
filesystem_functions (m);
+ integer_functions (m);
name_functions (m);
path_functions (m);
process_functions (m);
diff --git a/libbuild2/function.hxx b/libbuild2/function.hxx
index 81ece89..cda856a 100644
--- a/libbuild2/function.hxx
+++ b/libbuild2/function.hxx
@@ -4,8 +4,9 @@
#ifndef LIBBUILD2_FUNCTION_HXX
#define LIBBUILD2_FUNCTION_HXX
-#include <utility> // index_sequence
-#include <type_traits> // aligned_storage
+#include <cstddef> // max_align_t
+#include <utility> // index_sequence
+#include <type_traits> // is_*
#include <libbuild2/types.hxx>
#include <libbuild2/forward.hxx>
@@ -133,8 +134,8 @@ namespace build2
// Auxiliary data storage. Note that it is expected to be trivially
// copyable and destructible.
//
- std::aligned_storage<sizeof (void*) * 3>::type data;
- static const size_t data_size = sizeof (decltype (data));
+ static const size_t data_size = sizeof (void*) * 3;
+ alignas (std::max_align_t) unsigned char data[data_size];
function_overload (const char* an,
size_t mi, size_t ma, types ts,
@@ -952,7 +953,8 @@ namespace build2
// Low-level interface that can be used to pass additional data.
//
- // Note that the call to this function sidesteps the thunk.
+ // Note that the call to this function sidesteps the thunk. One notable
+ // consequence of this is that the values are not checked for NULL.
//
template <typename D, typename... A>
void
diff --git a/libbuild2/function.test.cxx b/libbuild2/function.test.cxx
index b09e4f7..37ed5ff 100644
--- a/libbuild2/function.test.cxx
+++ b/libbuild2/function.test.cxx
@@ -44,13 +44,13 @@ namespace build2
// Fake build system driver, default verbosity.
//
init_diag (1);
- init (nullptr, argv[0]);
+ init (nullptr, argv[0], true);
// Serial execution.
//
scheduler sched (1);
global_mutexes mutexes (1);
- file_cache fcache;
+ file_cache fcache (true);
context ctx (sched, mutexes, fcache);
auto& functions (ctx.functions);
@@ -115,7 +115,7 @@ namespace build2
else if (!a.empty ())
{
names storage;
- cout << reverse (a, storage);
+ cout << reverse (a, storage, true /* reduce */);
}
cout << endl;
}
@@ -124,7 +124,9 @@ namespace build2
try
{
- scope& s (ctx.global_scope.rw ());
+ // Use temp scope for the private variable pool.
+ //
+ temp_scope s (ctx.global_scope.rw ());
parser p (ctx);
p.parse_buildfile (cin, path_name ("buildfile"), &s, s);
diff --git a/libbuild2/functions-bool.cxx b/libbuild2/functions-bool.cxx
new file mode 100644
index 0000000..1d9c72f
--- /dev/null
+++ b/libbuild2/functions-bool.cxx
@@ -0,0 +1,23 @@
+// file : libbuild2/functions-bool.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/function.hxx>
+#include <libbuild2/variable.hxx>
+
+using namespace std;
+
+namespace build2
+{
+ void
+ bool_functions (function_map& m)
+ {
+ function_family f (m, "bool");
+
+ // $string(<bool>)
+ //
+ // Note that we don't handle NULL values for this type since it has no
+ // empty representation.
+ //
+ f["string"] += [](bool b) {return b ? "true" : "false";};
+ }
+}
diff --git a/libbuild2/functions-builtin.cxx b/libbuild2/functions-builtin.cxx
index 2adff38..378ffbc 100644
--- a/libbuild2/functions-builtin.cxx
+++ b/libbuild2/functions-builtin.cxx
@@ -11,6 +11,27 @@ using namespace std;
namespace build2
{
+ // Note: not static since used by type-specific sort() implementations.
+ //
+ bool
+ functions_sort_flags (optional<names> fs)
+ {
+ bool r (false);
+ if (fs)
+ {
+ for (name& f: *fs)
+ {
+ string s (convert<string> (move (f)));
+
+ if (s == "dedup")
+ r = true;
+ else
+ throw invalid_argument ("invalid flag '" + s + '\'');
+ }
+ }
+ return r;
+ };
+
void
builtin_functions (function_map& m)
{
@@ -39,7 +60,7 @@ namespace build2
fail << "visibility() called out of scope" << endf;
const variable* var (
- s->ctx.var_pool.find (convert<string> (move (name))));
+ s->var_pool ().find (convert<string> (move (name))));
return (var != nullptr
? optional<string> (to_string (var->visibility))
@@ -52,13 +73,6 @@ namespace build2
f["identity"] += [](value* v) {return move (*v);};
- // string
- //
- f["string"] += [](bool b) {return b ? "true" : "false";};
- f["string"] += [](int64_t i) {return to_string (i);};
- f["string"] += [](uint64_t i) {return to_string (i);};
- f["string"] += [](name n) {return to_string (n);};
-
// Quote a value returning its string representation. If escape is true,
// then also escape (with a backslash) the quote characters being added
// (this is useful if the result will be re-parsed, for example as a
@@ -69,12 +83,12 @@ namespace build2
if (v->null)
return string ();
- untypify (*v); // Reverse to names.
+ untypify (*v, true /* reduce */); // Reverse to names.
ostringstream os;
to_stream (os,
v->as<names> (),
- true /* quote */,
+ quote_mode::normal,
'@' /* pair */,
escape && convert<bool> (move (*escape)));
return os.str ();
diff --git a/libbuild2/functions-filesystem.cxx b/libbuild2/functions-filesystem.cxx
index ef7bfc5..1acb3d1 100644
--- a/libbuild2/functions-filesystem.cxx
+++ b/libbuild2/functions-filesystem.cxx
@@ -7,6 +7,7 @@
#include <libbuild2/variable.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
@@ -29,12 +30,27 @@ namespace build2
return true;
};
+ auto dangling = [] (const dir_entry& de)
+ {
+ bool sl (de.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << de.base () / de.path ();
+
+ return true;
+ };
+
// Print paths "as is" in the diagnostics.
//
try
{
if (pattern.absolute ())
- path_search (pattern, add);
+ path_search (pattern,
+ add,
+ dir_path () /* start */,
+ path_match_flags::follow_symlinks,
+ dangling);
else
{
// An absolute start directory must be specified for the relative
@@ -54,7 +70,11 @@ namespace build2
<< "' is relative";
}
- path_search (pattern, add, *start);
+ path_search (pattern,
+ add,
+ *start,
+ path_match_flags::follow_symlinks,
+ dangling);
}
}
catch (const system_error& e)
@@ -83,7 +103,7 @@ namespace build2
function_family f (m, "filesystem");
- // path_search
+ // $path_search(<pattern> [, <start-dir>])
//
// Return filesystem paths that match the pattern. If the pattern is an
// absolute path, then the start directory is ignored (if present).
@@ -91,6 +111,9 @@ namespace build2
//
// Note that this function is not pure.
//
+ // @@ In the future we may want to add a flag that controls the
+ // dangling/inaccessible treatment.
+ //
{
auto e (f.insert ("path_search", false));
@@ -115,6 +138,5 @@ namespace build2
convert<dir_path> (move (start)));
};
}
-
}
}
diff --git a/libbuild2/functions-integer.cxx b/libbuild2/functions-integer.cxx
new file mode 100644
index 0000000..a634ae9
--- /dev/null
+++ b/libbuild2/functions-integer.cxx
@@ -0,0 +1,181 @@
+// file : libbuild2/functions-integer.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/function.hxx>
+#include <libbuild2/variable.hxx>
+
+using namespace std;
+
+namespace build2
+{
+ extern bool
+ functions_sort_flags (optional<names>); // functions-builtin.cxx
+
+ static const char hex_digits[] = "0123456789abcdef";
+
+ static string
+ to_string (uint64_t i, optional<value> base, optional<value> width)
+ {
+ uint64_t b (base ? convert<uint64_t> (move (*base)) : 10);
+ size_t w (width
+ ? static_cast<size_t> (convert<uint64_t> (move (*width)))
+ : 0);
+
+ // One day we can switch to C++17 std::to_chars().
+ //
+ string r;
+ switch (b)
+ {
+ case 10:
+ {
+ r = to_string (i);
+ if (w > r.size ())
+ r.insert (0, w - r.size (), '0');
+ break;
+ }
+ case 16:
+ {
+ r.reserve (18);
+ r += "0x";
+
+ for (size_t j (64); j != 0; )
+ {
+ j -= 4;
+ size_t d ((i >> j) & 0x0f);
+
+ // Omit leading zeros but watch out for the i==0 corner case.
+ //
+ if (d != 0 || r.size () != 2 || j == 0)
+ r += hex_digits[d];
+ }
+
+ if (w > r.size () - 2)
+ r.insert (2, w - (r.size () - 2), '0');
+
+ break;
+ }
+ default:
+ throw invalid_argument ("unsupported base");
+ }
+
+ return r;
+ }
+
+ void
+ integer_functions (function_map& m)
+ {
+ function_family f (m, "integer");
+
+ // $string(<int64>)
+ // $string(<uint64>[, <base>[, <width>]])
+ //
+ // Note that we don't handle NULL values for these type since they have no
+ // empty representation.
+ //
+ f["string"] += [](int64_t i) {return to_string (i);};
+
+ f["string"] += [](uint64_t i, optional<value> base, optional<value> width)
+ {
+ return to_string (i, move (base), move (width));
+ };
+
+ // $integer_sequence(<begin>, <end>[, <step>])
+ //
+ // Return the list of uint64 integers starting from <begin> (including) to
+ // <end> (excluding) with the specified <step> or 1 if unspecified. If
+ // <begin> is greater than <end>, empty list is returned.
+ //
+ // Note that currently negative numbers are not supported but this could
+ // be handled if required (e.g., by returning int64s in this case).
+ //
+ // Note also that we could improve this by adding a shortcut to get the
+ // indexes of a list (for example, $indexes(<list>) plus potentially a
+ // similar $keys() function for maps).
+ //
+ f["integer_sequence"] += [](value begin, value end, optional<value> step)
+ {
+ uint64_t b (convert<uint64_t> (move (begin)));
+ uint64_t e (convert<uint64_t> (move (end)));
+ uint64_t s (step ? convert<uint64_t> (move (*step)) : 1);
+
+ uint64s r;
+ if (b < e)
+ {
+ r.reserve (static_cast<size_t> ((e - b) / s + 1));
+
+ for (; b < e; b += s)
+ r.push_back (static_cast<size_t> (b));
+ }
+
+ return r;
+ };
+
+ // $size(<ints>)
+ //
+ // Return the number of elements in the sequence.
+ //
+ f["size"] += [] (int64s v) {return v.size ();};
+ f["size"] += [] (uint64s v) {return v.size ();};
+
+ // $sort(<ints> [, <flags>])
+ //
+ // Sort integers in ascending order.
+ //
+ // The following flags are supported:
+ //
+ // dedup - in addition to sorting also remove duplicates
+ //
+ f["sort"] += [](int64s v, optional<names> fs)
+ {
+ sort (v.begin (), v.end ());
+
+ if (functions_sort_flags (move (fs)))
+ v.erase (unique (v.begin(), v.end()), v.end ());
+
+ return v;
+ };
+
+ f["sort"] += [](uint64s v, optional<names> fs)
+ {
+ sort (v.begin (), v.end ());
+
+ if (functions_sort_flags (move (fs)))
+ v.erase (unique (v.begin(), v.end()), v.end ());
+
+ return v;
+ };
+
+ // $find(<ints>, <int>)
+ //
+ // Return true if the integer sequence contains the specified integer.
+ //
+ f["find"] += [](int64s vs, value v)
+ {
+ return find (vs.begin (), vs.end (),
+ convert<int64_t> (move (v))) != vs.end ();
+ };
+
+ f["find"] += [](uint64s vs, value v)
+ {
+ return find (vs.begin (), vs.end (),
+ convert<uint64_t> (move (v))) != vs.end ();
+ };
+
+ // $find_index(<ints>, <int>)
+ //
+ // Return the index of the first element in the integer sequence that is
+ // equal to the specified integer or $size(<ints>) if none is found.
+ //
+ f["find_index"] += [](int64s vs, value v)
+ {
+ auto i (find (vs.begin (), vs.end (), convert<int64_t> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+
+ f["find_index"] += [](uint64s vs, value v)
+ {
+ auto i (find (vs.begin (), vs.end (), convert<uint64_t> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+ }
+}
diff --git a/libbuild2/functions-name.cxx b/libbuild2/functions-name.cxx
index 43bd8cb..84608d4 100644
--- a/libbuild2/functions-name.cxx
+++ b/libbuild2/functions-name.cxx
@@ -1,6 +1,8 @@
// file : libbuild2/functions-name.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
+#include <libbuild2/functions-name.hxx>
+
#include <libbuild2/scope.hxx>
#include <libbuild2/function.hxx>
#include <libbuild2/variable.hxx>
@@ -10,35 +12,37 @@ using namespace std;
namespace build2
{
+ extern bool
+ functions_sort_flags (optional<names>); // functions-builtin.cxx
+
// Convert name to target'ish name (see below for the 'ish part). Return
// raw/unprocessed data in case this is an unknown target type (or called
// out of scope). See scope::find_target_type() for details. Allow out-
// qualified names (out is discarded).
//
- static pair<name, optional<string>>
- to_target_name (const scope* s, name&& n, const name& o = name ())
+ static pair<const target_type*, optional<string>>
+ to_target_type (const scope* s, name& n, const name& o = name ())
{
if (n.pair && !o.directory ())
fail << "name pair in names";
- optional<string> e;
-
- if (s != nullptr)
- {
- auto rp (s->find_target_type (n, location ()));
+ return s != nullptr
+ ? s->find_target_type (n, location ())
+ : pair<const target_type*, optional<string>> {nullptr, nullopt};
+ }
- if (rp.first != nullptr)
- n.type = rp.first->name;
+ static pair<name, optional<string>>
+ to_target_name (const scope* s, name&& n, const name& o = name ())
+ {
+ auto rp (to_target_type (s, n, o));
- e = move (rp.second);
- }
+ if (rp.first != nullptr)
+ n.type = rp.first->name;
- return make_pair (move (n), move (e));
+ return make_pair (move (n), move (rp.second));
}
- // Note: this helper mey be used by other functions that operate on targets.
- //
- LIBBUILD2_SYMEXPORT const target&
+ const target&
to_target (const scope& s, name&& n, name&& o)
{
if (const target* r = search_existing (n, s, o.dir))
@@ -49,6 +53,81 @@ namespace build2
<< " not found" << endf;
}
+ const target&
+ to_target (const scope& s, names&& ns)
+ {
+ assert (ns.size () == (ns[0].pair ? 2 : 1));
+
+ name o;
+ return to_target (s, move (ns[0]), move (ns[0].pair ? ns[1] : o));
+ }
+
+ static bool
+ is_a (const scope* s, name&& n, const name& o, names&& t)
+ {
+ if (s == nullptr)
+ fail << "name.is_a() called out of scope";
+
+ string tts (convert<string> (move (t)));
+ const target_type* tt (s->find_target_type (tts));
+ if (tt == nullptr)
+ fail << "unknown target type " << tts;
+
+ const target_type* ntt (to_target_type (s, n, o).first);
+ if (ntt == nullptr)
+ fail << "unknown target type " << n.type << " in " << n;
+
+ return ntt->is_a (*tt);
+ }
+
+ static names
+ filter (const scope* s, names ns, names ts, bool out)
+ {
+ if (s == nullptr)
+ fail << "name." << (out ? "filter_out" : "filter")
+ << "() called out of scope";
+
+ small_vector<const target_type*, 1> tts;
+ for (const name& n: ts)
+ {
+ if (!n.simple ())
+ fail << "invalid target type name " << n;
+
+ if (n.pair)
+ fail << "pair in target type name " << n;
+
+ const target_type* tt (s->find_target_type (n.value));
+ if (tt == nullptr)
+ fail << "unknown target type " << n.value;
+
+ tts.push_back (tt);
+ }
+
+ names r;
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ name& n (*i);
+ bool p (n.pair);
+
+ const target_type* ntt (to_target_type (s, n, p ? *++i : name ()).first);
+ if (ntt == nullptr)
+ fail << "unknown target type " << n.type << " in " << n;
+
+ if ((find_if (tts.begin (), tts.end (),
+ [ntt] (const target_type* tt)
+ {
+ return ntt->is_a (*tt);
+ }) != tts.end ()) != out)
+ {
+ r.push_back (move (n));
+ if (p)
+ r.push_back (move (*i));
+ }
+ }
+
+ return r;
+ }
+
void
name_functions (function_map& m)
{
@@ -63,6 +142,14 @@ namespace build2
//
function_family fn (m, "name");
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ fn["string"] += [](name* n)
+ {
+ return n != nullptr ? to_string (move (*n)) : string ();
+ };
+
fn["name"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).first.value;
@@ -172,6 +259,108 @@ namespace build2
return to_target_name (s, move (n), o).first.proj;
};
+ // $is_a(<name>, <target-type>)
+ //
+ // Return true if the <name>'s target type is-a <target-type>. Note that
+ // this is a dynamic type check that takes into account target type
+ // inheritance.
+ //
+ fn["is_a"] += [](const scope* s, name n, names t)
+ {
+ return is_a (s, move (n), name (), move (t));
+ };
+ fn["is_a"] += [](const scope* s, names ns, names t)
+ {
+ auto i (ns.begin ());
+
+ name& n (*i);
+ const name& o (n.pair ? *++i : name ());
+
+ if (++i != ns.end ())
+ fail << "invalid name value: multiple names"; // Like in convert().
+
+ return is_a (s, move (n), o, move (t));
+ };
+
+ // $filter(<names>, <target-types>)
+ // $filter_out(<names>, <target-types>)
+ //
+ // Return names with target types which are-a (filter) or not are-a
+ // (filter_out) one of <target-types>. See $is_a() for background.
+ //
+ fn["filter"] += [](const scope* s, names ns, names ts)
+ {
+ return filter (s, move (ns), move (ts), false /* out */);
+ };
+
+ fn["filter_out"] += [](const scope* s, names ns, names ts)
+ {
+ return filter (s, move (ns), move (ts), true /* out */);
+ };
+
+ // $size(<names>)
+ //
+ // Return the number of elements in the sequence.
+ //
+ fn["size"] += [] (names ns)
+ {
+ size_t n (0);
+
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ ++n;
+ if (i->pair && !(++i)->directory ())
+ fail << "name pair in names";
+ }
+
+ return n;
+ };
+
+ // $sort(<names> [, <flags>])
+ //
+ // Sort names in ascending order.
+ //
+ // The following flags are supported:
+ //
+ // dedup - in addition to sorting also remove duplicates
+ //
+ fn["sort"] += [] (names ns, optional<names> fs)
+ {
+ //@@ TODO: shouldn't we do this in a pair-aware manner?
+
+ sort (ns.begin (), ns.end ());
+
+ if (functions_sort_flags (move (fs)))
+ ns.erase (unique (ns.begin(), ns.end()), ns.end ());
+
+ return ns;
+ };
+
+ // $find(<names>, <name>)
+ //
+ // Return true if the name sequence contains the specified name.
+ //
+ fn["find"] += [](names vs, names v)
+ {
+ //@@ TODO: shouldn't we do this in a pair-aware manner?
+
+ return find (vs.begin (), vs.end (),
+ convert<name> (move (v))) != vs.end ();
+ };
+
+ // $find_index(<names>, <name>)
+ //
+ // Return the index of the first element in the name sequence that is
+ // equal to the specified name or $size(<names>) if none is found.
+ //
+ fn["find_index"] += [](names vs, names v)
+ {
+ //@@ TODO: shouldn't we do this in a pair-aware manner?
+
+ auto i (find (vs.begin (), vs.end (), convert<name> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+
// Functions that can be called only on real targets.
//
function_family ft (m, "target");
@@ -226,7 +415,7 @@ namespace build2
// Note that while this function is not technically pure, we don't mark it
// as such for the same reasons as $path() above.
//
- fn["process_path"] += [](const scope* s, names ns)
+ ft["process_path"] += [](const scope* s, names ns)
{
if (s == nullptr)
fail << "target.process_path() called out of scope";
@@ -255,6 +444,11 @@ namespace build2
//
function_family fb (m, "builtin");
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected. So for now
+ // we keep it a bit tighter.
+ //
fb[".concat"] += [](dir_path d, name n)
{
d /= n.dir;
diff --git a/libbuild2/functions-name.hxx b/libbuild2/functions-name.hxx
new file mode 100644
index 0000000..34fa4b8
--- /dev/null
+++ b/libbuild2/functions-name.hxx
@@ -0,0 +1,30 @@
+// file : libbuild2/functions-name.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_FUNCTIONS_NAME_HXX
+#define LIBBUILD2_FUNCTIONS_NAME_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/forward.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/export.hxx>
+
+namespace build2
+{
+ // Helpers that may be useful to other functions that operate on target
+ // name.
+
+ // Resolve the name to target issuing diagnostics and failing if not found.
+ //
+ LIBBUILD2_SYMEXPORT const target&
+ to_target (const scope&, name&&, name&& out);
+
+ // As above but from the names vector which should contain a single name
+ // or an out-qualified name pair (asserted).
+ //
+ LIBBUILD2_SYMEXPORT const target&
+ to_target (const scope&, names&&);
+}
+
+#endif // LIBBUILD2_FUNCTIONS_NAME_HXX
diff --git a/libbuild2/functions-path.cxx b/libbuild2/functions-path.cxx
index b7c9a8d..020c8f4 100644
--- a/libbuild2/functions-path.cxx
+++ b/libbuild2/functions-path.cxx
@@ -10,6 +10,9 @@ using namespace std;
namespace build2
{
+ extern bool
+ functions_sort_flags (optional<names>); // functions-builtin.cxx
+
static value
path_thunk (const scope* base,
vector_view<value> args,
@@ -96,6 +99,20 @@ namespace build2
}
}
+ template <typename P>
+ static inline P
+ relative (const P& p, const dir_path& d)
+ {
+ try
+ {
+ return p.relative (d); // Note: cannot move due to diagnostics.
+ }
+ catch (const invalid_path&)
+ {
+ fail << "'" << p << "' cannot be made relative to '" << d << "'" << endf;
+ }
+ }
+
using butl::path_match;
// Return true if a path matches the pattern. See path_match() overloads
@@ -137,6 +154,45 @@ namespace build2
return path_match (entry, pattern, *start);
}
+ // Don't fail for absolute paths on Windows and, for example, just return
+ // c:/foo for c:\foo.
+ //
+ template <typename P>
+ static inline string
+ posix_string (P&& p)
+ {
+#ifndef _WIN32
+ return move (p).posix_string ();
+#else
+ if (p.relative ())
+ return move (p).posix_string ();
+
+ // Note: also handles root directories.
+ //
+ dir_path d (p.root_directory ());
+ return d.string () + '/' + p.leaf (d).posix_string ();
+#endif
+ }
+
+ // Similar to the above don't fail for absolute paths on Windows.
+ //
+ template <typename P>
+ static inline string
+ posix_representation (P&& p)
+ {
+#ifndef _WIN32
+ return move (p).posix_representation ();
+#else
+ if (p.relative ())
+ return move (p).posix_representation ();
+
+ // Note: also handles root directories.
+ //
+ dir_path d (p.root_directory ());
+ return d.string () + '/' + p.leaf (d).posix_representation ();
+#endif
+ }
+
void
path_functions (function_map& m)
{
@@ -144,7 +200,13 @@ namespace build2
// string
//
- f["string"] += [](path p) {return move (p).string ();};
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ f["string"] += [](path* p)
+ {
+ return p != nullptr ? move (*p).string () : string ();
+ };
f["string"] += [](paths v)
{
@@ -162,6 +224,41 @@ namespace build2
return r;
};
+ // posix_string
+ //
+ f["posix_string"] += [](path p) {return posix_string (move (p));};
+ f["posix_string"] += [](dir_path p) {return posix_string (move (p));};
+
+ f["posix_string"] += [](paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_string (move (p)));
+ return r;
+ };
+
+ f["posix_string"] += [](dir_paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_string (move (p)));
+ return r;
+ };
+
+ f[".posix_string"] += [](names ns)
+ {
+ // For each path decide based on the presence of a trailing slash
+ // whether it is a directory. Return as untyped list of strings.
+ //
+ for (name& n: ns)
+ {
+ n = n.directory ()
+ ? posix_string (move (n.dir))
+ : posix_string (convert<path> (move (n)));
+ }
+ return ns;
+ };
+
// representation
//
f["representation"] += [](path p) {return move (p).representation ();};
@@ -182,6 +279,48 @@ namespace build2
return r;
};
+ // posix_representation
+ //
+ f["posix_representation"] += [](path p)
+ {
+ return posix_representation (move (p));
+ };
+
+ f["posix_representation"] += [](dir_path p)
+ {
+ return posix_representation (move (p));
+ };
+
+ f["posix_representation"] += [](paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_representation (move (p)));
+ return r;
+ };
+
+ f["posix_representation"] += [](dir_paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_representation (move (p)));
+ return r;
+ };
+
+ f[".posix_representation"] += [](names ns)
+ {
+ // For each path decide based on the presence of a trailing slash
+ // whether it is a directory. Return as untyped list of strings.
+ //
+ for (name& n: ns)
+ {
+ n = n.directory ()
+ ? posix_representation (move (n.dir))
+ : posix_representation (convert<path> (move (n)));
+ }
+ return ns;
+ };
+
// canonicalize
//
// @@ TODO: add ability to specify alternative separator.
@@ -295,7 +434,11 @@ namespace build2
return ns;
};
- // directory
+ // $directory(<path>)
+ // $directory(<paths>)
+ //
+ // Return the directory part of the path or empty path if there is no
+ // directory. Directory of a root directory is an empty path.
//
f["directory"] += &path::directory;
@@ -329,44 +472,55 @@ namespace build2
return ns;
};
- // base
+ // $root_directory(<path>)
+ // $root_directory(<paths>)
//
- f["base"] += &path::base;
+ // Return the root directory of the path or empty path if the directory is
+ // not absolute.
+ //
+ f["root_directory"] += &path::root_directory;
- f["base"] += [](paths v)
+ f["root_directory"] += [](paths v)
{
- for (path& p: v)
- p = p.base ();
- return v;
+ dir_paths r;
+ for (const path& p: v)
+ r.push_back (p.root_directory ());
+ return r;
};
- f["base"] += [](dir_paths v)
+ f["root_directory"] += [](dir_paths v)
{
for (dir_path& p: v)
- p = p.base ();
+ p = p.root_directory ();
return v;
};
- f[".base"] += [](names ns)
+ f[".root_directory"] += [](names ns)
{
// For each path decide based on the presence of a trailing slash
- // whether it is a directory. Return as untyped list of (potentially
- // mixed) paths.
+ // whether it is a directory. Return as list of directory names.
//
for (name& n: ns)
{
if (n.directory ())
- n.dir = n.dir.base ();
+ n.dir = n.dir.root_directory ();
else
- n.value = convert<path> (move (n)).base ().string ();
+ n = convert<path> (move (n)).root_directory ();
}
return ns;
};
- // leaf
+ // $leaf(<path>)
//
f["leaf"] += &path::leaf;
+ // $leaf(<path>, <dir-path>)
+ // $leaf(<paths>, <dir-path>)
+ //
+ // Return the path without the specified directory part. Return empty path
+ // if the paths are the same. Issue diagnostics and fail if the directory
+ // is not a prefix of the path. Note: expects both paths to be normalized.
+ //
f["leaf"] += [](path p, dir_path d)
{
return leaf (p, move (d));
@@ -402,6 +556,83 @@ namespace build2
return ns;
};
+ // $relative(<path>, <dir-path>)
+ // $relative(<paths>, <dir-path>)
+ //
+ // Return a path relative to the specified directory that is equivalent to
+ // the specified path. Issue diagnostics and fail if a relative path
+ // cannot be derived (for example, paths are on different drives on
+ // Windows).
+ //
+ f["relative"] += [](path p, dir_path d)
+ {
+ return relative (p, d);
+ };
+
+ f["relative"] += [](paths v, dir_path d)
+ {
+ for (path& p: v)
+ p = relative (p, d);
+ return v;
+ };
+
+ f["relative"] += [](dir_paths v, dir_path d)
+ {
+ for (dir_path& p: v)
+ p = relative (p, d);
+ return v;
+ };
+
+ f[".relative"] += [](names ns, dir_path d)
+ {
+ // For each path decide based on the presence of a trailing slash
+ // whether it is a directory. Return as untyped list of (potentially
+ // mixed) paths.
+ //
+ for (name& n: ns)
+ {
+ if (n.directory ())
+ n.dir = relative (n.dir, d);
+ else
+ n.value = relative (convert<path> (move (n)), d).string ();
+ }
+ return ns;
+ };
+
+ // base
+ //
+ f["base"] += &path::base;
+
+ f["base"] += [](paths v)
+ {
+ for (path& p: v)
+ p = p.base ();
+ return v;
+ };
+
+ f["base"] += [](dir_paths v)
+ {
+ for (dir_path& p: v)
+ p = p.base ();
+ return v;
+ };
+
+ f[".base"] += [](names ns)
+ {
+ // For each path decide based on the presence of a trailing slash
+ // whether it is a directory. Return as untyped list of (potentially
+ // mixed) paths.
+ //
+ for (name& n: ns)
+ {
+ if (n.directory ())
+ n.dir = n.dir.base ();
+ else
+ n.value = convert<path> (move (n)).base ().string ();
+ }
+ return ns;
+ };
+
// extension
//
f["extension"] += &extension;
@@ -411,6 +642,93 @@ namespace build2
return extension (convert<path> (move (ns)));
};
+ // $size(<paths>)
+ // $size(<dir_paths>)
+ //
+ // Return the number of elements in the sequence.
+ //
+ f["size"] += [] (paths v) {return v.size ();};
+ f["size"] += [] (dir_paths v) {return v.size ();};
+
+ // $size(<path>)
+ // $size(<dir_path>)
+ //
+ // Return the number of characters (bytes) in the path. Note that for
+ // dir_path the result does not include the trailing directory separator
+ // (except for the POSIX root directory).
+ //
+ f["size"] += [] (path v) {return v.size ();};
+ f["size"] += [] (dir_path v) {return v.size ();};
+
+ // $sort(<paths> [, <flags>])
+ // $sort(<dir_paths> [, <flags>])
+ //
+ // Sort paths in ascending order. Note that on hosts with a case-
+ // insensitive filesystem the order is case-insensitive.
+ //
+ // The following flags are supported:
+ //
+ // dedup - in addition to sorting also remove duplicates
+ //
+ f["sort"] += [](paths v, optional<names> fs)
+ {
+ sort (v.begin (), v.end ());
+
+ if (functions_sort_flags (move (fs)))
+ v.erase (unique (v.begin(), v.end()), v.end ());
+
+ return v;
+ };
+
+ f["sort"] += [](dir_paths v, optional<names> fs)
+ {
+ sort (v.begin (), v.end ());
+
+ if (functions_sort_flags (move (fs)))
+ v.erase (unique (v.begin(), v.end()), v.end ());
+
+ return v;
+ };
+
+ // $find(<paths>, <path>)
+ // $find(<dir_paths>, <dir_path>)
+ //
+ // Return true if the path sequence contains the specified path. Note that
+ // on hosts with a case-insensitive filesystem the comparison is
+ // case-insensitive.
+ //
+ f["find"] += [](paths vs, value v)
+ {
+ return find (vs.begin (), vs.end (),
+ convert<path> (move (v))) != vs.end ();
+ };
+
+ f["find"] += [](dir_paths vs, value v)
+ {
+ return find (vs.begin (), vs.end (),
+ convert<dir_path> (move (v))) != vs.end ();
+ };
+
+ // $find_index(<paths>, <path>)
+ // $find_index(<dir_paths>, <dir_path>)
+ //
+ // Return the index of the first element in the path sequence that is
+ // equal to the specified path or $size(<paths>) if none is found. Note
+ // that on hosts with a case-insensitive filesystem the comparison is
+ // case-insensitive.
+ //
+ f["find_index"] += [](paths vs, value v)
+ {
+ auto i (find (vs.begin (), vs.end (), convert<path> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+
+ f["find_index"] += [](dir_paths vs, value v)
+ {
+ auto i (find (vs.begin (), vs.end (), convert<dir_path> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+
// $path.match(<val>, <pat> [, <start>])
//
// Match a filesystem entry name against a name pattern (both are strings),
@@ -498,6 +816,11 @@ namespace build2
//
function_family b (m, "builtin", &path_thunk);
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected, especially
+ // if the NULL value is on the LHS. So for now we keep it a bit tighter.
+ //
b[".concat"] += &concat_path_string;
b[".concat"] += &concat_dir_path_string;
@@ -510,5 +833,15 @@ namespace build2
{
return concat_dir_path_string (move (l), convert<string> (move (ur)));
};
+
+ b[".concat"] += [](dir_path l, dir_path r)
+ {
+ return value (move (l /= r));
+ };
+
+ b[".concat"] += [](dir_path l, path r)
+ {
+ return value (path_cast<path> (move (l)) /= r);
+ };
}
}
diff --git a/libbuild2/functions-process.cxx b/libbuild2/functions-process.cxx
index c4e5c24..bbcbbab 100644
--- a/libbuild2/functions-process.cxx
+++ b/libbuild2/functions-process.cxx
@@ -4,6 +4,8 @@
#include <libbutl/regex.hxx>
#include <libbutl/builtin.hxx>
+#include <libbuild2/scope.hxx>
+#include <libbuild2/context.hxx>
#include <libbuild2/function.hxx>
#include <libbuild2/variable.hxx>
@@ -141,6 +143,9 @@ namespace build2
builtin_callbacks cb;
fdpipe ofd (open_pipe ());
+ if (verb >= 3)
+ print_process (process_args (bn, args));
+
uint8_t rs; // Storage.
butl::builtin b (bf (rs,
args,
@@ -172,7 +177,16 @@ namespace build2
// While assuming that the builtin has issued the diagnostics on failure
// we still print the error message (see process_finish() for details).
//
- fail << bn << " builtin " << process_exit (rs) << endf;
+ diag_record dr;
+ dr << fail << "builtin " << bn << " " << process_exit (rs);
+
+ if (verb >= 1 && verb <= 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, process_args (bn, args));
+ }
+
+ dr << endf;
}
catch (const system_error& e)
{
@@ -181,18 +195,32 @@ namespace build2
}
static inline value
- run_builtin (builtin_function* bf, const strings& args, const string& bn)
+ run_builtin (const scope* s,
+ builtin_function* bf,
+ const strings& args,
+ const string& bn)
{
+ // See below.
+ //
+ if (s != nullptr && s->ctx.phase != run_phase::load)
+ fail << "process.run() called during " << s->ctx.phase << " phase";
+
return run_builtin_impl (bf, args, bn, read);
}
static inline value
- run_builtin_regex (builtin_function* bf,
+ run_builtin_regex (const scope* s,
+ builtin_function* bf,
const strings& args,
const string& bn,
const string& pat,
const optional<string>& fmt)
{
+ // See below.
+ //
+ if (s != nullptr && s->ctx.phase != run_phase::load)
+ fail << "process.run_regex() called during " << s->ctx.phase << " phase";
+
// Note that we rely on the "small function object" optimization here.
//
return run_builtin_impl (bf, args, bn,
@@ -293,6 +321,9 @@ namespace build2
[] (const string& s) {return s.c_str ();});
cargs.push_back (nullptr);
+ // Note that for now these functions can only be called during the load
+ // phase (see below) and so no diagnostics buffering is needed.
+ //
return run_start (3 /* verbosity */,
pp,
cargs,
@@ -309,15 +340,7 @@ namespace build2
void
process_finish (const scope*, const cstrings& args, process& pr)
{
- try
- {
- if (!pr.wait ())
- fail << "process " << args[0] << " " << *pr.exit;
- }
- catch (const process_error& e)
- {
- fail << "unable to execute " << args[0] << ": " << e;
- }
+ run_finish (args, pr, 2 /* verbosity */);
}
// Run a process.
@@ -352,6 +375,15 @@ namespace build2
static inline value
run_process (const scope* s, const process_path& pp, const strings& args)
{
+ // The only plausible place where these functions can be called outside
+ // the load phase are scripts and there it doesn't make much sense to use
+ // them (the same can be achieved with commands in a uniform manner). Note
+ // that if there is no scope, then this is most likely (certainly?) the
+ // load phase (for example, command line).
+ //
+ if (s != nullptr && s->ctx.phase != run_phase::load)
+ fail << "process.run() called during " << s->ctx.phase << " phase";
+
return run_process_impl (s, pp, args, read);
}
@@ -362,6 +394,11 @@ namespace build2
const string& pat,
const optional<string>& fmt)
{
+ // See above.
+ //
+ if (s != nullptr && s->ctx.phase != run_phase::load)
+ fail << "process.run_regex() called during " << s->ctx.phase << " phase";
+
// Note that we rely on the "small function object" optimization here.
//
return run_process_impl (s, pp, args,
@@ -377,7 +414,7 @@ namespace build2
if (builtin_function* bf = builtin (args))
{
pair<string, strings> ba (builtin_args (bf, move (args), "run"));
- return run_builtin (bf, ba.second, ba.first);
+ return run_builtin (s, bf, ba.second, ba.first);
}
else
{
@@ -395,7 +432,7 @@ namespace build2
if (builtin_function* bf = builtin (args))
{
pair<string, strings> ba (builtin_args (bf, move (args), "run_regex"));
- return run_builtin_regex (bf, ba.second, ba.first, pat, fmt);
+ return run_builtin_regex (s, bf, ba.second, ba.first, pat, fmt);
}
else
{
@@ -420,7 +457,8 @@ namespace build2
// result, then such variables should be reported with the
// config.environment directive.
//
- // Note that this function is not pure.
+ // Note that this function is not pure and can only be called during the
+ // load phase.
//
f.insert (".run", false) += [](const scope* s, names args)
{
@@ -446,7 +484,8 @@ namespace build2
// result, then such variables should be reported with the
// config.environment directive.
//
- // Note that this function is not pure.
+ // Note that this function is not pure and can only be called during the
+ // load phase.
//
{
auto e (f.insert (".run_regex", false));
diff --git a/libbuild2/functions-project-name.cxx b/libbuild2/functions-project-name.cxx
index 145e62c..4a8394d 100644
--- a/libbuild2/functions-project-name.cxx
+++ b/libbuild2/functions-project-name.cxx
@@ -13,7 +13,13 @@ namespace build2
{
function_family f (m, "project_name");
- f["string"] += [](project_name p) {return move (p).string ();};
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ f["string"] += [](project_name* p)
+ {
+ return p != nullptr ? move (*p).string () : string ();
+ };
f["base"] += [](project_name p, optional<string> ext)
{
@@ -32,6 +38,11 @@ namespace build2
//
function_family b (m, "builtin");
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected. So for now
+ // we keep it a bit tighter.
+ //
b[".concat"] += [](project_name n, string s)
{
string r (move (n).string ());
diff --git a/libbuild2/functions-regex.cxx b/libbuild2/functions-regex.cxx
index 2f0d122..1465108 100644
--- a/libbuild2/functions-regex.cxx
+++ b/libbuild2/functions-regex.cxx
@@ -21,7 +21,7 @@ namespace build2
// Optimize for the string value type.
//
if (v.type != &value_traits<string>::value_type)
- untypify (v);
+ untypify (v, true /* reduce */);
return convert<string> (move (v));
}
@@ -69,7 +69,7 @@ namespace build2
else if (s == "return_subs")
subs = true;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -92,10 +92,7 @@ namespace build2
names r;
for (size_t i (1); i != m.size (); ++i)
- {
- if (m[i].matched)
- r.emplace_back (m.str (i));
- }
+ r.emplace_back (m[i].matched ? m.str (i) : string ());
return value (move (r));
}
@@ -129,7 +126,7 @@ namespace build2
else if (s == "return_subs")
subs = true;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -161,10 +158,7 @@ namespace build2
if (subs)
{
for (size_t i (1); i != m.size (); ++i)
- {
- if (m[i].matched)
- r.emplace_back (m.str (i));
- }
+ r.emplace_back (m[i].matched ? m.str (i) : string ());
}
return value (move (r));
@@ -174,7 +168,9 @@ namespace build2
}
static pair<regex::flag_type, regex_constants::match_flag_type>
- parse_replacement_flags (optional<names>&& flags, bool first_only = true)
+ parse_replacement_flags (optional<names>&& flags,
+ bool first_only = true,
+ bool* copy_empty = nullptr)
{
regex::flag_type rf (regex::ECMAScript);
regex_constants::match_flag_type mf (regex_constants::match_default);
@@ -191,8 +187,10 @@ namespace build2
mf |= regex_constants::format_first_only;
else if (s == "format_no_copy")
mf |= regex_constants::format_no_copy;
+ else if (copy_empty != nullptr && s == "format_copy_empty")
+ *copy_empty = true;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -334,7 +332,10 @@ namespace build2
const string& fmt,
optional<names>&& flags)
{
- auto fl (parse_replacement_flags (move (flags), false));
+ bool copy_empty (false);
+ auto fl (parse_replacement_flags (move (flags),
+ false /* first_only */,
+ &copy_empty));
regex rge (parse_regex (re, fl.first));
names r;
@@ -342,10 +343,10 @@ namespace build2
try
{
regex_replace_search (to_string (move (v)), rge, fmt,
- [&r] (string::const_iterator b,
- string::const_iterator e)
+ [copy_empty, &r] (string::const_iterator b,
+ string::const_iterator e)
{
- if (b != e)
+ if (copy_empty || b != e)
r.emplace_back (string (b, e));
},
fl.second);
@@ -364,26 +365,29 @@ namespace build2
// apply() overloads (below) for details.
//
static names
- apply (names&& s,
+ apply (names&& ns,
const string& re,
const string& fmt,
optional<names>&& flags)
{
- auto fl (parse_replacement_flags (move (flags)));
+ bool copy_empty (false);
+ auto fl (parse_replacement_flags (move (flags),
+ true /* first_only */,
+ &copy_empty));
regex rge (parse_regex (re, fl.first));
names r;
try
{
- for (auto& v: s)
+ for (auto& n: ns)
{
- string s (regex_replace_search (convert<string> (move (v)),
+ string s (regex_replace_search (convert<string> (move (n)),
rge,
fmt,
fl.second).first);
- if (!s.empty ())
+ if (copy_empty || !s.empty ())
r.emplace_back (move (s));
}
}
@@ -411,7 +415,7 @@ namespace build2
if (s == "icase")
r |= regex::icase;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -422,67 +426,141 @@ namespace build2
// See find_match() overloads (below) for details.
//
static bool
- find_match (names&& s, const string& re, optional<names>&& flags)
+ find_match (names&& ns, const string& re, optional<names>&& flags)
{
regex::flag_type fl (parse_find_flags (move (flags)));
regex rge (parse_regex (re, fl));
- for (auto& v: s)
+ for (auto& n: ns)
{
- if (regex_match (convert<string> (move (v)), rge))
+ if (regex_match (convert<string> (move (n)), rge))
return true;
}
return false;
}
+ // Return a list of elements that match (matching is true) or don't match
+ // (matching is false) the regular expression. See filter_match() and
+ // filter_out_match() overloads (below) for details.
+ //
+ static names
+ filter_match (names&& ns,
+ const string& re,
+ optional<names>&& flags,
+ bool matching)
+ {
+ regex::flag_type fl (parse_find_flags (move (flags)));
+ regex rge (parse_regex (re, fl));
+
+ names r;
+
+ for (name& n: ns)
+ {
+ // Note that we need to preserve the element while converting it to
+ // string since we may add it to the resulting list. But let's optimize
+ // this for the simple value case by round-tripping it through the
+ // string.
+ //
+ bool s (n.simple ());
+ string v (convert<string> (s ? move (n) : name (n)));
+
+ if (regex_match (v, rge) == matching)
+ r.emplace_back (s ? name (move (v)) : move (n));
+ }
+
+ return r;
+ }
+
// Return true if a part of any of the list elements matches the regular
// expression. See find_search() overloads (below) for details.
//
static bool
- find_search (names&& s, const string& re, optional<names>&& flags)
+ find_search (names&& ns, const string& re, optional<names>&& flags)
{
regex::flag_type fl (parse_find_flags (move (flags)));
regex rge (parse_regex (re, fl));
- for (auto& v: s)
+ for (auto& n: ns)
{
- if (regex_search (convert<string> (move (v)), rge))
+ if (regex_search (convert<string> (move (n)), rge))
return true;
}
return false;
}
+ // Return those elements of a list which have a match (matching is true) or
+ // have no match (matching is false) between the regular expression and
+ // some/any part of the element. See filter_search() and filter_out_search()
+ // overloads (below) for details.
+ //
+ static names
+ filter_search (names&& ns,
+ const string& re,
+ optional<names>&& flags,
+ bool matching)
+ {
+ regex::flag_type fl (parse_find_flags (move (flags)));
+ regex rge (parse_regex (re, fl));
+
+ names r;
+
+ for (auto& n: ns)
+ {
+ // Note that we need to preserve the element while converting it to
+ // string since we may add it to the resulting list. But let's optimize
+ // this for the simple value case by round-tripping it through the
+ // string.
+ //
+ bool s (n.simple ());
+ string v (convert<string> (s ? move (n) : name (n)));
+
+ if (regex_search (v, rge) == matching)
+ r.emplace_back (s ? name (move (v)) : move (n));
+ }
+
+ return r;
+ }
+
// Replace matched parts of list elements using the format string and
// concatenate the transformed elements. See merge() overloads (below) for
// details.
//
static names
- merge (names&& s,
+ merge (names&& ns,
const string& re,
const string& fmt,
optional<string>&& delim,
optional<names>&& flags)
{
- auto fl (parse_replacement_flags (move (flags)));
+ bool copy_empty (false);
+ auto fl (parse_replacement_flags (move (flags),
+ true /* first_only */,
+ &copy_empty));
regex rge (parse_regex (re, fl.first));
string rs;
try
{
- for (auto& v: s)
+ bool first (true);
+ for (auto& n: ns)
{
- string s (regex_replace_search (convert<string> (move (v)),
+ string s (regex_replace_search (convert<string> (move (n)),
rge,
fmt,
fl.second).first);
- if (!s.empty ())
+ if (copy_empty || !s.empty ())
{
- if (!rs.empty () && delim)
- rs.append (*delim);
+ if (delim)
+ {
+ if (first)
+ first = false;
+ else
+ rs.append (*delim);
+ }
rs.append (s);
}
@@ -521,33 +599,70 @@ namespace build2
// sub-strings that match the marked sub-expressions and
// NULL if no match
//
- f[".match"] += [](value s, string re, optional<names> flags)
+ f[".match"] += [](value v, string re, optional<names> flags)
{
- return match (move (s), re, move (flags));
+ return match (move (v), re, move (flags));
};
- f[".match"] += [](value s, names re, optional<names> flags)
+ f[".match"] += [](value v, names re, optional<names> flags)
{
- return match (move (s), convert<string> (move (re)), move (flags));
+ return match (move (v), convert<string> (move (re)), move (flags));
};
// $regex.find_match(<vals>, <pat> [, <flags>])
//
// Match list elements against the regular expression and return true if
- // the match is found. Convert the elements to string prior to matching.
+ // the match is found. Convert the elements to strings prior to matching.
+ //
+ // The following flags are supported:
+ //
+ // icase - match ignoring case
+ //
+ f[".find_match"] += [](names ns, string re, optional<names> flags)
+ {
+ return find_match (move (ns), re, move (flags));
+ };
+
+ f[".find_match"] += [](names ns, names re, optional<names> flags)
+ {
+ return find_match (move (ns), convert<string> (move (re)), move (flags));
+ };
+
+ // $regex.filter_match(<vals>, <pat> [, <flags>])
+ // $regex.filter_out_match(<vals>, <pat> [, <flags>])
+ //
+ // Return elements of a list that match (filter) or do not match
+ // (filter_out) the regular expression. Convert the elements to strings
+ // prior to matching.
//
// The following flags are supported:
//
// icase - match ignoring case
//
- f[".find_match"] += [](names s, string re, optional<names> flags)
+ f[".filter_match"] += [](names ns, string re, optional<names> flags)
{
- return find_match (move (s), re, move (flags));
+ return filter_match (move (ns), re, move (flags), true /* matching */);
};
- f[".find_match"] += [](names s, names re, optional<names> flags)
+ f[".filter_match"] += [](names ns, names re, optional<names> flags)
{
- return find_match (move (s), convert<string> (move (re)), move (flags));
+ return filter_match (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ true /* matching */);
+ };
+
+ f[".filter_out_match"] += [](names s, string re, optional<names> flags)
+ {
+ return filter_match (move (s), re, move (flags), false /* matching */);
+ };
+
+ f[".filter_out_match"] += [](names ns, names re, optional<names> flags)
+ {
+ return filter_match (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ false /* matching */);
};
// $regex.search(<val>, <pat> [, <flags>])
@@ -573,38 +688,75 @@ namespace build2
// If both return_match and return_subs flags are specified then the
// sub-string that matches the whole regular expression comes first.
//
- f[".search"] += [](value s, string re, optional<names> flags)
+ f[".search"] += [](value v, string re, optional<names> flags)
{
- return search (move (s), re, move (flags));
+ return search (move (v), re, move (flags));
};
- f[".search"] += [](value s, names re, optional<names> flags)
+ f[".search"] += [](value v, names re, optional<names> flags)
{
- return search (move (s), convert<string> (move (re)), move (flags));
+ return search (move (v), convert<string> (move (re)), move (flags));
};
// $regex.find_search(<vals>, <pat> [, <flags>])
//
// Determine if there is a match between the regular expression and some
- // part of any of the list elements. Convert the elements to string prior
+ // part of any of the list elements. Convert the elements to strings prior
// to matching.
//
// The following flags are supported:
//
// icase - match ignoring case
//
- f[".find_search"] += [](names s, string re, optional<names> flags)
+ f[".find_search"] += [](names ns, string re, optional<names> flags)
{
- return find_search (move (s), re, move (flags));
+ return find_search (move (ns), re, move (flags));
};
- f[".find_search"] += [](names s, names re, optional<names> flags)
+ f[".find_search"] += [](names ns, names re, optional<names> flags)
{
- return find_search (move (s),
+ return find_search (move (ns),
convert<string> (move (re)),
move (flags));
};
+ // $regex.filter_search(<vals>, <pat> [, <flags>])
+ // $regex.filter_out_search(<vals>, <pat> [, <flags>])
+ //
+ // Return elements of a list for which there is a match (filter) or no
+ // match (filter_out) between the regular expression and some part of the
+ // element. Convert the elements to strings prior to matching.
+ //
+ // The following flags are supported:
+ //
+ // icase - match ignoring case
+ //
+ f[".filter_search"] += [](names ns, string re, optional<names> flags)
+ {
+ return filter_search (move (ns), re, move (flags), true /* matching */);
+ };
+
+ f[".filter_search"] += [](names ns, names re, optional<names> flags)
+ {
+ return filter_search (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ true /* matching */);
+ };
+
+ f[".filter_out_search"] += [](names ns, string re, optional<names> flags)
+ {
+ return filter_search (move (ns), re, move (flags), false /* matching */);
+ };
+
+ f[".filter_out_search"] += [](names ns, names re, optional<names> flags)
+ {
+ return filter_search (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ false /* matching */);
+ };
+
// $regex.replace(<val>, <pat>, <fmt> [, <flags>])
//
// Replace matched parts in a value of an arbitrary type, using the format
@@ -625,14 +777,14 @@ namespace build2
// If both format_first_only and format_no_copy flags are specified then
// the result will only contain the replacement of the first match.
//
- f[".replace"] += [](value s, string re, string fmt, optional<names> flags)
+ f[".replace"] += [](value v, string re, string fmt, optional<names> flags)
{
- return replace (move (s), re, fmt, move (flags));
+ return replace (move (v), re, fmt, move (flags));
};
- f[".replace"] += [](value s, names re, names fmt, optional<names> flags)
+ f[".replace"] += [](value v, names re, names fmt, optional<names> flags)
{
- return replace (move (s),
+ return replace (move (v),
convert<string> (move (re)),
convert<string> (move (fmt)),
move (flags));
@@ -658,21 +810,21 @@ namespace build2
// Note that if format_no_copy is specified, unmatched lines are not
// copied either.
//
- f[".replace_lines"] += [](value s,
- string re,
- string fmt,
- optional<names> flags)
+ f[".replace_lines"] += [](value v,
+ string re,
+ string fmt,
+ optional<names> flags)
{
- return replace_lines (move (s), re, move (fmt), move (flags));
+ return replace_lines (move (v), re, move (fmt), move (flags));
};
- f[".replace_lines"] += [](value s,
- names re,
- names* fmt,
- optional<names> flags)
+ f[".replace_lines"] += [](value v,
+ names re,
+ names* fmt,
+ optional<names> flags)
{
return replace_lines (
- move (s),
+ move (v),
convert<string> (move (re)),
(fmt != nullptr
? optional<string> (convert<string> (move (*fmt)))
@@ -683,8 +835,9 @@ namespace build2
// $regex.split(<val>, <pat>, <fmt> [, <flags>])
//
// Split a value of an arbitrary type into a list of unmatched value parts
- // and replacements of the matched parts, omitting empty ones. Convert the
- // value to string prior to matching.
+ // and replacements of the matched parts, omitting empty ones (unless the
+ // format_copy_empty flag is specified). Convert the value to string prior
+ // to matching.
//
// Substitution escape sequences are extended with a subset of Perl
// sequences (see libbutl/regex.hxx for details).
@@ -695,14 +848,16 @@ namespace build2
//
// format_no_copy - do not copy unmatched value parts into the result
//
- f[".split"] += [](value s, string re, string fmt, optional<names> flags)
+ // format_copy_empty - copy empty elements into the result
+ //
+ f[".split"] += [](value v, string re, string fmt, optional<names> flags)
{
- return split (move (s), re, fmt, move (flags));
+ return split (move (v), re, fmt, move (flags));
};
- f[".split"] += [](value s, names re, names fmt, optional<names> flags)
+ f[".split"] += [](value v, names re, names fmt, optional<names> flags)
{
- return split (move (s),
+ return split (move (v),
convert<string> (move (re)),
convert<string> (move (fmt)),
move (flags));
@@ -711,9 +866,10 @@ namespace build2
// $regex.merge(<vals>, <pat>, <fmt> [, <delim> [, <flags>]])
//
// Replace matched parts in a list of elements using the regex format
- // string. Convert the elements to string prior to matching. The result
+ // string. Convert the elements to strings prior to matching. The result
// value is untyped and contains concatenation of transformed non-empty
- // elements optionally separated with a delimiter.
+ // elements (unless the format_copy_empty flag is specified) optionally
+ // separated with a delimiter.
//
// Substitution escape sequences are extended with a subset of Perl
// sequences (see libbutl/regex.hxx for details).
@@ -726,30 +882,38 @@ namespace build2
//
// format_no_copy - do not copy unmatched value parts into the result
//
+ // format_copy_empty - copy empty elements into the result
+ //
// If both format_first_only and format_no_copy flags are specified then
// the result will be a concatenation of only the first match
// replacements.
//
- f[".merge"] += [](names s,
- string re,
- string fmt,
- optional<string> delim,
- optional<names> flags)
- {
- return merge (move (s), re, fmt, move (delim), move (flags));
+ f[".merge"] += [](names ns,
+ string re,
+ string fmt,
+ optional<string*> delim,
+ optional<names> flags)
+ {
+ return merge (move (ns),
+ re,
+ fmt,
+ delim && *delim != nullptr
+ ? move (**delim)
+ : optional<string> (),
+ move (flags));
};
- f[".merge"] += [](names s,
- names re,
- names fmt,
- optional<names> delim,
- optional<names> flags)
+ f[".merge"] += [](names ns,
+ names re,
+ names fmt,
+ optional<names*> delim,
+ optional<names> flags)
{
- return merge (move (s),
+ return merge (move (ns),
convert<string> (move (re)),
convert<string> (move (fmt)),
- delim
- ? convert<string> (move (*delim))
+ delim && *delim != nullptr
+ ? convert<string> (move (**delim))
: optional<string> (),
move (flags));
};
@@ -757,8 +921,9 @@ namespace build2
// $regex.apply(<vals>, <pat>, <fmt> [, <flags>])
//
// Replace matched parts of each element in a list using the regex format
- // string. Convert the elements to string prior to matching. Return a list
- // of transformed elements, omitting the empty ones.
+ // string. Convert the elements to strings prior to matching. Return a
+ // list of transformed elements, omitting the empty ones (unless the
+ // format_copy_empty flag is specified).
//
// Substitution escape sequences are extended with a subset of Perl
// sequences (see libbutl/regex.hxx for details).
@@ -771,18 +936,20 @@ namespace build2
//
// format_no_copy - do not copy unmatched value parts into the result
//
+ // format_copy_empty - copy empty elements into the result
+ //
// If both format_first_only and format_no_copy flags are specified then
// the result elements will only contain the replacement of the first
// match.
//
- f[".apply"] += [](names s, string re, string fmt, optional<names> flags)
+ f[".apply"] += [](names ns, string re, string fmt, optional<names> flags)
{
- return apply (move (s), re, fmt, move (flags));
+ return apply (move (ns), re, fmt, move (flags));
};
- f[".apply"] += [](names s, names re, names fmt, optional<names> flags)
+ f[".apply"] += [](names ns, names re, names fmt, optional<names> flags)
{
- return apply (move (s),
+ return apply (move (ns),
convert<string> (move (re)),
convert<string> (move (fmt)),
move (flags));
diff --git a/libbuild2/functions-string.cxx b/libbuild2/functions-string.cxx
index b430ebf..06fe89d 100644
--- a/libbuild2/functions-string.cxx
+++ b/libbuild2/functions-string.cxx
@@ -8,17 +8,44 @@ using namespace std;
namespace build2
{
+ static size_t
+ find_index (const strings& vs, value&& v, optional<names>&& fs)
+ {
+ bool ic (false);
+ if (fs)
+ {
+ for (name& f: *fs)
+ {
+ string s (convert<string> (move (f)));
+
+ if (s == "icase")
+ ic = true;
+ else
+ throw invalid_argument ("invalid flag '" + s + '\'');
+ }
+ }
+
+ auto i (find_if (vs.begin (), vs.end (),
+ [ic, y = convert<string> (move (v))] (const string& x)
+ {
+ return (ic ? icasecmp (x, y) : x.compare (y)) == 0;
+ }));
+
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+
void
string_functions (function_map& m)
{
function_family f (m, "string");
- f["string"] += [](string s) {return s;};
-
- // @@ Shouldn't it concatenate elements into the single string?
- // @@ Doesn't seem to be used so far. Can consider removing.
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
//
- // f["string"] += [](strings v) {return v;};
+ f["string"] += [](string* s)
+ {
+ return s != nullptr ? move (*s) : string ();
+ };
// Compare ASCII strings ignoring case and returning the boolean value.
//
@@ -77,23 +104,118 @@ namespace build2
return names {name (ucase (convert<string> (move (s))))};
};
+ // $size(<strings>)
+ //
+ // Return the number of elements in the sequence.
+ //
+ f["size"] += [] (strings v) {return v.size ();};
+
+ // $size(<string>)
+ //
+ // Return the number of characters (bytes) in the string.
+ //
+ f["size"] += [] (string v) {return v.size ();};
+
+ // $sort(<strings> [, <flags>])
+ //
+ // Sort strings in ascending order.
+ //
+ // The following flags are supported:
+ //
+ // icase - sort ignoring case
+ //
+ // dedup - in addition to sorting also remove duplicates
+ //
+ f["sort"] += [](strings v, optional<names> fs)
+ {
+ bool ic (false);
+ bool dd (false);
+ if (fs)
+ {
+ for (name& f: *fs)
+ {
+ string s (convert<string> (move (f)));
+
+ if (s == "icase")
+ ic = true;
+ else if (s == "dedup")
+ dd = true;
+ else
+ throw invalid_argument ("invalid flag '" + s + '\'');
+ }
+ }
+
+ sort (v.begin (), v.end (),
+ [ic] (const string& x, const string& y)
+ {
+ return (ic ? icasecmp (x, y) : x.compare (y)) < 0;
+ });
+
+ if (dd)
+ v.erase (unique (v.begin(), v.end(),
+ [ic] (const string& x, const string& y)
+ {
+ return (ic ? icasecmp (x, y) : x.compare (y)) == 0;
+ }),
+ v.end ());
+
+ return v;
+ };
+
+ // $find(<strings>, <string>[, <flags>])
+ //
+ // Return true if the string sequence contains the specified string.
+ //
+ // The following flags are supported:
+ //
+ // icase - compare ignoring case
+ //
+ // See also $regex.find_{match,search}().
+ //
+ f["find"] += [](strings vs, value v, optional<names> fs)
+ {
+ return find_index (vs, move (v), move (fs)) != vs.size ();
+ };
+
+ // $find_index(<strings>, <string>[, <flags>])
+ //
+ // Return the index of the first element in the string sequence that
+ // is equal to the specified string or $size(<strings>) if none is
+ // found.
+ //
+ // The following flags are supported:
+ //
+ // icase - compare ignoring case
+ //
+ f["find_index"] += [](strings vs, value v, optional<names> fs)
+ {
+ return find_index (vs, move (v), move (fs));
+ };
+
// String-specific overloads from builtins.
//
function_family b (m, "builtin");
- b[".concat"] += [](string l, string r) {l += r; return l;};
+ // Note that we must handle NULL values (relied upon by the parser to
+ // provide concatenation semantics consistent with untyped values).
+ //
+ b[".concat"] += [](string* l, string* r)
+ {
+ return l != nullptr
+ ? r != nullptr ? move (*l += *r) : move (*l)
+ : r != nullptr ? move (*r) : string ();
+ };
- b[".concat"] += [](string l, names ur)
+ b[".concat"] += [](string* l, names* ur)
{
- l += convert<string> (move (ur));
- return l;
+ string r (ur != nullptr ? convert<string> (move (*ur)) : string ());
+ return l != nullptr ? move (*l += r) : move (r);
};
- b[".concat"] += [](names ul, string r)
+ b[".concat"] += [](names* ul, string* r)
{
- string l (convert<string> (move (ul)));
- l += r;
- return l;
+ string l (ul != nullptr ? convert<string> (move (*ul)) : string ());
+ return r != nullptr ? move (l += *r) : move (l);
};
}
}
diff --git a/libbuild2/functions-target-triplet.cxx b/libbuild2/functions-target-triplet.cxx
index 4b0ec02..b89cadf 100644
--- a/libbuild2/functions-target-triplet.cxx
+++ b/libbuild2/functions-target-triplet.cxx
@@ -13,13 +13,28 @@ namespace build2
{
function_family f (m, "target_triplet");
- f["string"] += [](target_triplet t) {return t.string ();};
- f["representation"] += [](target_triplet t) {return t.representation ();};
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ f["string"] += [](target_triplet* t)
+ {
+ return t != nullptr ? t->string () : string ();
+ };
+
+ f["representation"] += [](target_triplet t)
+ {
+ return t.representation ();
+ };
// Target triplet-specific overloads from builtins.
//
function_family b (m, "builtin");
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected. So for now
+ // we keep it a bit tighter.
+ //
b[".concat"] += [](target_triplet l, string sr) {return l.string () + sr;};
b[".concat"] += [](string sl, target_triplet r) {return sl + r.string ();};
diff --git a/libbuild2/in/init.cxx b/libbuild2/in/init.cxx
index 18071f8..2fb73e1 100644
--- a/libbuild2/in/init.cxx
+++ b/libbuild2/in/init.cxx
@@ -34,7 +34,10 @@ namespace build2
// Enter variables.
//
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// Alternative variable substitution symbol with '$' being the
// default.
@@ -58,7 +61,27 @@ namespace build2
// is still stricter than the autoconf's semantics which also leaves
// unknown substitutions as is.
//
- vp.insert<string> ("in.substitution");
+ const variable& im (vp.insert<string> ("in.mode"));
+
+ // Original name of this variable for backwards compatibility.
+ //
+ vp.insert_alias (im, "in.substitution");
+
+ // Substitution map. Substitutions can be specified as key-value pairs
+ // rather than buildfile variables. This map is checked before the
+ // variables. An absent value in key-value has the NULL semantics.
+ //
+ // This mechanism has two primary uses: Firstly, it allows us to have
+ // substitution names that cannot be specified as buildfile variables.
+ // For example, a name may start with an underscore and thus be
+ // reserved or it may refer to one of the predefined variables such a
+ // `include` or `extension` that may have a wrong visibility and/or
+ // type.
+ //
+ // Secondly, this mechanism allows us to encapsulate a group of
+ // substitutions and pass this group around as a single value.
+ //
+ vp.insert<map<string, optional<string>>> ("in.substitutions");
// Fallback value to use for NULL value substitutions. If unspecified,
// NULL substitutions are an error.
diff --git a/libbuild2/in/rule.cxx b/libbuild2/in/rule.cxx
index 2569948..74bc2a7 100644
--- a/libbuild2/in/rule.cxx
+++ b/libbuild2/in/rule.cxx
@@ -23,14 +23,14 @@ namespace build2
namespace in
{
bool rule::
- match (action a, target& xt, const string&) const
+ match (action a, target& xt) const
{
tracer trace ("in::rule::match");
if (!xt.is_a<file> ()) // See module init() for details.
return false;
- file& t (static_cast<file&> (xt));
+ file& t (xt.as<file> ());
bool fi (false); // Found in.
for (prerequisite_member p: group_prerequisite_members (a, t))
@@ -53,7 +53,7 @@ namespace build2
recipe rule::
apply (action a, target& xt) const
{
- file& t (static_cast<file&> (xt));
+ file& t (xt.as<file> ());
// Derive the file name.
//
@@ -108,7 +108,7 @@ namespace build2
// Substitution mode.
//
bool strict (strict_);
- if (const string* s = cast_null<string> (t["in.substitution"]))
+ if (const string* s = cast_null<string> (t["in.mode"]))
{
if (*s == "lax")
strict = false;
@@ -116,6 +116,11 @@ namespace build2
fail << "invalid substitution mode '" << *s << "'";
}
+ // Substitution map.
+ //
+ const substitution_map* smap (
+ cast_null<map<string, optional<string>>> (t["in.substitutions"]));
+
// NULL substitutions.
//
optional<string> null;
@@ -157,6 +162,10 @@ namespace build2
l4 ([&]{trace << "substitution mode mismatch forcing update of"
<< t;});
+ // Then additional depdb entries, if any.
+ //
+ perform_update_depdb (a, t, dd);
+
// Then the .in file.
//
if (dd.expect (i.path ()) != nullptr)
@@ -209,39 +218,61 @@ namespace build2
{
// The line format is:
//
- // <ln> <name> <hash>
+ // <ln> <name> <hash>[/<flags>]
//
// Note that <name> can contain spaces (see the constraint check
- // expressions in the version module).
+ // expressions in the version module). That's the reason why we
+ // use the `/` separator for <flags> instead of the more natural
+ // space.
//
char* e (nullptr);
uint64_t ln (strtoull (s->c_str (), &e, 10));
- size_t p1 (*e == ' ' ? e - s->c_str () : string::npos);
- size_t p2 (s->rfind (' '));
+ size_t p1 (*e == ' ' ? e - s->c_str () : string::npos); // <name>
+ size_t p2 (s->rfind (' ')); // <hash>
if (p1 != string::npos && p2 != string::npos && p2 - p1 > 1)
{
- string n (*s, p1 + 1, p2 - p1 - 1);
+ ++p1;
+ string name (*s, p1, p2 - p1);
- // Note that we have to call substitute(), not lookup() since it
- // can be overriden with custom substitution semantics.
- //
- optional<string> v (
- substitute (location (ip, ln), a, t, n, strict, null));
+ ++p2;
+ size_t p3 (s->find ('/', p2)); // <flags>
- assert (v); // Rule semantics change without version increment?
+ optional<uint64_t> flags;
+ if (p3 != string::npos)
+ {
+ uint64_t v (strtoull (s->c_str () + p3 + 1, &e, 10));
+ if (*e == '\0')
+ flags = v;
+ }
- if (s->compare (p2 + 1,
- string::npos,
- sha256 (*v).string ()) == 0)
+ if (p3 == string::npos || flags)
{
- dd_skip++;
- continue;
+ // Note that we have to call substitute(), not lookup() since
+ // it can be overriden with custom substitution semantics.
+ //
+ optional<string> v (
+ substitute (location (ip, ln),
+ a, t,
+ name, flags,
+ strict, smap, null));
+
+ assert (v); // Rule semantics change without version increment?
+
+ if (p3 != string::npos)
+ p3 -= p2; // Hash length.
+
+ if (s->compare (p2, p3, sha256 (*v).string ()) == 0)
+ {
+ dd_skip++;
+ continue;
+ }
+ else
+ l4 ([&]{trace << name << " variable value mismatch forcing "
+ << "update of " << t;});
}
- else
- l4 ([&]{trace << n << " variable value mismatch forcing "
- << "update of " << t;});
+
// Fall through.
}
@@ -268,7 +299,35 @@ namespace build2
if (verb >= 2)
text << program_ << ' ' << ip << " >" << tp;
else if (verb)
- text << program_ << ' ' << ip;
+ {
+ // If we straight print the target, in most cases we will end up with
+ // something ugly like in{version...h.in} (due to the in{} target
+ // type search semantics). There is the `...h` part but also the
+ // `.in` part that is redundant given in{}. So let's tidy this up
+ // a bit if the extension could have been derived by in_search().
+ //
+ target_key ik (i.key ());
+
+ if (ik.ext)
+ {
+ string& ie (*ik.ext);
+ const string* te (t.ext ());
+
+ size_t in (ie.size ());
+ size_t tn (te != nullptr ? te->size () : 0);
+
+ if (in == tn + (tn != 0 ? 1 : 0) + 2) // [<te>.]in
+ {
+ if (ie.compare (in - 2, 2, "in") == 0)
+ {
+ if (tn == 0 || (ie.compare (0, tn, *te) == 0 && ie[tn] == '.'))
+ ie.clear ();
+ }
+ }
+ }
+
+ print_diag (program_.c_str (), move (ik), t);
+ }
// Read and process the file, one line at a time, while updating depdb.
//
@@ -313,7 +372,7 @@ namespace build2
#endif
auto_rmfile arm (tp);
- // Note: this default will only be used if the file if empty (i.e.,
+ // Note: this default will only be used if the file is empty (i.e.,
// does not contain even a newline).
//
const char* nl (
@@ -324,8 +383,8 @@ namespace build2
#endif
);
- string s; // Reuse the buffer.
- for (size_t ln (1);; ++ln)
+ uint64_t ln (1);
+ for (string s;; ++ln)
{
what = "read"; whom = &ip;
if (!getline (ifs, s))
@@ -338,97 +397,31 @@ namespace build2
if (crlf)
s.pop_back();
- // Not tracking column for now (see also depdb above).
- //
- const location l (ip, ln);
-
- // Scan the line looking for substiutions in the $<name>$ form. In
- // the strict mode treat $$ as an escape sequence.
- //
- for (size_t b (0), n, d; b != (n = s.size ()); b += d)
- {
- d = 1;
-
- if (s[b] != sym)
- continue;
-
- // Note that in the lax mode these should still be substitutions:
- //
- // @project@@
- // @@project@
-
- // Find the other end.
- //
- size_t e (b + 1);
- for (; e != (n = s.size ()); ++e)
- {
- if (s[e] == sym)
- {
- if (strict && e + 1 != n && s[e + 1] == sym) // Escape.
- s.erase (e, 1); // Keep one, erase the other.
- else
- break;
- }
- }
-
- if (e == n)
- {
- if (strict)
- fail (l) << "unterminated '" << sym << "'" << endf;
-
- break;
- }
-
- if (e - b == 1) // Escape (or just double symbol in the lax mode).
- {
- if (strict)
- s.erase (b, 1); // Keep one, erase the other.
+ what = "write"; whom = &tp;
+ if (ln != 1)
+ ofs << nl;
- continue;
- }
+ nl = crlf ? "\r\n" : "\n"; // Preserve the original line ending.
- // We have a (potential, in the lax mode) substition with b
- // pointing to the opening symbol and e -- to the closing.
- //
- string name (s, b + 1, e - b -1);
- if (optional<string> val =
- substitute (l, a, t, name, strict, null))
- {
- // Save in depdb.
- //
- if (dd_skip == 0)
- {
- // The line format is:
- //
- // <ln> <name> <hash>
- //
- string s (to_string (ln));
- s += ' ';
- s += name;
- s += ' ';
- s += sha256 (*val).string ();
- dd.write (s);
- }
- else
- --dd_skip;
+ if (ln == 1)
+ perform_update_pre (a, t, ofs, nl);
- // Patch the result in and adjust the delta.
- //
- s.replace (b, e - b + 1, *val);
- d = val->size ();
- }
- else
- d = e - b + 1; // Ignore this substitution.
- }
+ // Not tracking column for now (see also depdb above).
+ //
+ process (location (ip, ln),
+ a, t,
+ dd, dd_skip,
+ s, 0,
+ nl, sym, strict, smap, null);
- what = "write"; whom = &tp;
- if (ln != 1)
- ofs << nl; // See below.
ofs << s;
-
- nl = crlf ? "\r\n" : "\n"; // Preserve the original line ending.
}
+ what = "write"; whom = &tp;
+ if (ln == 1)
+ perform_update_pre (a, t, ofs, nl);
+ perform_update_post (a, t, ofs, nl);
+
// Close depdb before closing the output file so its mtime is not
// newer than of the output.
//
@@ -462,56 +455,147 @@ namespace build2
return prerequisite_target (&build2::search (t, p), i);
}
- string rule::
- lookup (const location& loc,
- action,
- const target& t,
- const string& n,
- const optional<string>& null) const
+ void rule::
+ perform_update_depdb (action, const target&, depdb&) const
{
- auto l (t[n]);
+ }
- if (l.defined ())
+ void rule::
+ perform_update_pre (action, const target&, ofdstream&, const char*) const
+ {
+ }
+
+ void rule::
+ perform_update_post (action, const target&, ofdstream&, const char*) const
+ {
+ }
+
+ void rule::
+ process (const location& l,
+ action a, const target& t,
+ depdb& dd, size_t& dd_skip,
+ string& s, size_t b,
+ const char* nl,
+ char sym,
+ bool strict,
+ const substitution_map* smap,
+ const optional<string>& null) const
+ {
+ // Scan the line looking for substiutions in the $<name>$ form. In the
+ // strict mode treat $$ as an escape sequence.
+ //
+ for (size_t n, d; b != (n = s.size ()); b += d)
{
- value v (*l);
+ d = 1;
- if (v.null)
+ if (s[b] != sym)
+ continue;
+
+ // Note that in the lax mode these should still be substitutions:
+ //
+ // @project@@
+ // @@project@
+
+ // Find the other end.
+ //
+ size_t e (b + 1);
+ for (; e != (n = s.size ()); ++e)
{
- if (null)
- return *null;
- else
- fail (loc) << "null value in variable '" << n << "'" <<
- info << "use in.null to specify null value substiution string";
+ if (s[e] == sym)
+ {
+ if (strict && e + 1 != n && s[e + 1] == sym) // Escape.
+ s.erase (e, 1); // Keep one, erase the other.
+ else
+ break;
+ }
}
- // For typed values call string() for conversion.
+ if (e == n)
+ {
+ if (strict)
+ fail (l) << "unterminated '" << sym << "'";
+
+ break;
+ }
+
+ if (e - b == 1) // Escape (or just double symbol in the lax mode).
+ {
+ if (strict)
+ s.erase (b, 1); // Keep one, erase the other.
+
+ continue;
+ }
+
+ // We have a (potential, in the lax mode) substition with b pointing
+ // to the opening symbol and e -- to the closing.
//
- try
+ if (optional<string> val = substitute (l,
+ a, t,
+ dd, dd_skip,
+ string (s, b + 1, e - b -1),
+ nullopt /* flags */,
+ strict, smap, null))
{
- return convert<string> (
- v.type == nullptr
- ? move (v)
- : t.ctx.functions.call (&t.base_scope (),
- "string",
- vector_view<value> (&v, 1),
- loc));
+ replace_newlines (*val, nl);
+
+ // Patch the result in and adjust the delta.
+ //
+ s.replace (b, e - b + 1, *val);
+ d = val->size ();
}
- catch (const invalid_argument& e)
+ else
+ d = e - b + 1; // Ignore this substitution.
+ }
+ }
+
+ optional<string> rule::
+ substitute (const location& l,
+ action a, const target& t,
+ depdb& dd, size_t& dd_skip,
+ const string& n,
+ optional<uint64_t> flags,
+ bool strict,
+ const substitution_map* smap,
+ const optional<string>& null) const
+ {
+ optional<string> val (substitute (l, a, t, n, flags, strict, smap, null));
+
+ if (val)
+ {
+ // Save in depdb.
+ //
+ if (dd_skip == 0)
{
- fail (loc) << e <<
- info << "while substituting '" << n << "'" << endf;
+ // The line format is:
+ //
+ // <ln> <name> <hash>[/<flags>]
+ //
+ string s (to_string (l.line));
+ s += ' ';
+ s += n;
+ s += ' ';
+ s += sha256 (*val).string ();
+ if (flags)
+ {
+ s += '/';
+ s += to_string (*flags);
+ }
+ dd.write (s);
}
+ else
+ --dd_skip;
}
- else
- fail (loc) << "undefined variable '" << n << "'" << endf;
+
+ return val;
}
optional<string> rule::
substitute (const location& l,
- action a,
- const target& t,
+ action a, const target& t,
const string& n,
+ optional<uint64_t> flags,
bool strict,
+ const substitution_map* smap,
const optional<string>& null) const
{
// In the lax mode scan the fragment to make sure it is a variable name
@@ -536,7 +620,75 @@ namespace build2
}
}
- return lookup (l, a, t, n, null);
+ return lookup (l, a, t, n, flags, smap, null);
+ }
+
+ string rule::
+ lookup (const location& loc,
+ action, const target& t,
+ const string& n,
+ optional<uint64_t> flags,
+ const substitution_map* smap,
+ const optional<string>& null) const
+ {
+ assert (!flags);
+
+ // First look in the substitution map.
+ //
+ if (smap != nullptr)
+ {
+ auto i (smap->find (n));
+
+ if (i != smap->end ())
+ {
+ if (i->second)
+ return *i->second;
+
+ if (null)
+ return *null;
+
+ fail (loc) << "null value in substitution map entry '" << n << "'" <<
+ info << "use in.null to specify null value substiution string";
+ }
+ }
+
+ // Next look for the buildfile variable.
+ //
+ auto l (t[n]);
+
+ if (l.defined ())
+ {
+ value v (*l);
+
+ if (v.null)
+ {
+ if (null)
+ return *null;
+
+ fail (loc) << "null value in variable '" << n << "'" <<
+ info << "use in.null to specify null value substiution string";
+ }
+
+ // For typed values call string() for conversion.
+ //
+ try
+ {
+ return convert<string> (
+ v.type == nullptr
+ ? move (v)
+ : t.ctx.functions.call (&t.base_scope (),
+ "string",
+ vector_view<value> (&v, 1),
+ loc));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (loc) << e <<
+ info << "while substituting '" << n << "'" << endf;
+ }
+ }
+ else
+ fail (loc) << "undefined variable '" << n << "'" << endf;
}
}
}
diff --git a/libbuild2/in/rule.hxx b/libbuild2/in/rule.hxx
index 2fa1305..369fd93 100644
--- a/libbuild2/in/rule.hxx
+++ b/libbuild2/in/rule.hxx
@@ -5,6 +5,7 @@
#define LIBBUILD2_IN_RULE_HXX
#include <libbuild2/types.hxx>
+#include <libbuild2/forward.hxx> // depdb
#include <libbuild2/utility.hxx>
#include <libbuild2/rule.hxx>
@@ -17,8 +18,9 @@ namespace build2
{
// Preprocess an .in file.
//
- // Note that a derived rule can use the target data pad to cache data
- // (e.g., in match()) to be used in substitute/lookup() calls.
+ // Note that a derived rule can use the target auxiliary data storage to
+ // cache data (e.g., in match() or apply()) to be used in substitute() and
+ // lookup() calls.
//
// Note also that currently this rule ignores the dry-run mode (see
// perform_update() for the rationale).
@@ -42,7 +44,7 @@ namespace build2
null_ (move (null)) {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -50,24 +52,45 @@ namespace build2
virtual target_state
perform_update (action, const target&) const;
- // Customization hooks.
+ // Customization hooks and helpers.
//
+ using substitution_map = map<string, optional<string>>;
// Perform prerequisite search.
//
virtual prerequisite_target
- search (action,
- const target&,
+ search (action, const target&,
const prerequisite_member&,
include_type) const;
+ // Additional depdb entries.
+ //
+ virtual void
+ perform_update_depdb (action, const target&, depdb&) const;
+
+ // Pre/post update.
+ //
+ virtual void
+ perform_update_pre (action, const target&,
+ ofdstream&, const char* newline) const;
+
+ virtual void
+ perform_update_post (action, const target&,
+ ofdstream&, const char* newline) const;
+
// Perform variable lookup.
//
+ // Flags can be used by a custom implementation to alter the lookup
+ // semantics, for example, for special substitutions. Note, however,
+ // that one must make sure this semantics cannot change without changes
+ // to the .in file (see the depdb logic for details).
+ //
virtual string
lookup (const location&,
- action,
- const target&,
+ action, const target&,
const string& name,
+ optional<uint64_t> flags,
+ const substitution_map*,
const optional<string>& null) const;
// Perform variable substitution. Return nullopt if it should be
@@ -75,12 +98,61 @@ namespace build2
//
virtual optional<string>
substitute (const location&,
- action,
- const target&,
+ action, const target&,
+ const string& name,
+ optional<uint64_t> flags,
+ bool strict,
+ const substitution_map*,
+ const optional<string>& null) const;
+
+ // Call the above version and do any necessary depdb saving.
+ //
+ optional<string>
+ substitute (const location&,
+ action, const target&,
+ depdb& dd, size_t& dd_skip,
const string& name,
+ optional<uint64_t> flags,
bool strict,
+ const substitution_map*,
const optional<string>& null) const;
+ // Process a line of input from the specified position performing any
+ // necessary substitutions.
+ //
+ virtual void
+ process (const location&,
+ action, const target&,
+ depdb& dd, size_t& dd_skip,
+ string& line, size_t pos,
+ const char* newline,
+ char sym,
+ bool strict,
+ const substitution_map*,
+ const optional<string>& null) const;
+
+ // Replace newlines in a multi-line value with the given newline
+ // sequence.
+ //
+ static void
+ replace_newlines (string& v, const char* newline)
+ {
+ for (size_t p (0), n; (p = v.find ('\n', p)) != string::npos; p += n)
+ {
+ n = 1;
+
+ // Deal with CRLF in the value.
+ //
+ if (p != 0 && v[p - 1] == '\r')
+ {
+ --p;
+ ++n;
+ }
+
+ v.replace (p, n, newline);
+ }
+ }
+
protected:
const string rule_id_;
const string program_;
diff --git a/libbuild2/in/target.cxx b/libbuild2/in/target.cxx
index d9bc8a7..54130ff 100644
--- a/libbuild2/in/target.cxx
+++ b/libbuild2/in/target.cxx
@@ -20,6 +20,14 @@ namespace build2
if (!e)
{
+ // Why is the extension, say, .h.in and not .in (with .h being in the
+ // name)? While this is mostly academic (in this case things will work
+ // the same either way), conceptually, it is a header template rather
+ // than some file template. In other words, we are adding the second
+ // level classification.
+ //
+ // See also the low verbosity tidying up code in the rule.
+ //
if (const file* t = xt.is_a<file> ())
{
const string& te (t->derive_extension ());
@@ -51,9 +59,9 @@ namespace build2
&target_extension_none,
nullptr, /* default_extension */ // Taken care of by search.
&in_pattern,
- &target_print_1_ext_verb, // Same as file.
+ &target_print_1_ext_verb, // Same as file (but see rule).
&in_search,
- false
+ target_type::flag::none
};
}
}
diff --git a/libbuild2/in/target.hxx b/libbuild2/in/target.hxx
index 20a0c44..619c06e 100644
--- a/libbuild2/in/target.hxx
+++ b/libbuild2/in/target.hxx
@@ -35,11 +35,14 @@ namespace build2
class LIBBUILD2_IN_SYMEXPORT in: public file
{
public:
- using file::file;
+ in (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
}
}
diff --git a/libbuild2/install/functions.cxx b/libbuild2/install/functions.cxx
index 5668efe..1de4d3e 100644
--- a/libbuild2/install/functions.cxx
+++ b/libbuild2/install/functions.cxx
@@ -15,17 +15,125 @@ namespace build2
{
function_family f (m, "install");
- // Resolve potentially relative install.* value to an absolute directory
- // based on (other) install.* values visible from the calling scope.
+ // $install.resolve(<dir>[, <rel_base>])
+ //
+ // @@ TODO: add overload to call resolve_file().
+ //
+ // Resolve potentially relative install.* value to an absolute and
+ // normalized directory based on (other) install.* values visible from
+ // the calling scope.
+ //
+ // If rel_base is specified and is not empty, then make the resulting
+ // directory relative to it. If rel_base itself is relative, first
+ // resolve it to an absolute and normalized directory based on install.*
+ // values. Note that this argument is mandatory if this function is
+ // called during relocatable installation (install.relocatable is true).
+ // While you can pass empty directory to suppress this functionality,
+ // make sure this does not render the result non-relocatable.
+ //
+ // As an example, consider an executable that supports loading plugins
+ // and requires the plugin installation directory to be embedded into
+ // the executable during the build. The common way to support
+ // relocatable installations for such cases is to embed a path relative
+ // to the executable and complete it at runtime. If you would like to
+ // always use the relative path, regardless of whether the installation
+ // is relocatable of not, then you can simply always pass rel_base, for
+ // example:
+ //
+ // plugin_dir = $install.resolve($install.lib, $install.bin)
+ //
+ // Alternatively, if you would like to continue using absolute paths for
+ // non-relocatable installations, then you can use something like this:
+ //
+ // plugin_dir = $install.resolve($install.lib, ($install.relocatable ? $install.bin : [dir_path] ))
+ //
+ // Finally, if you are unable to support relocatable installations, the
+ // correct way to handle this is NOT to always pass an empty path for
+ // rel_base but rather assert in root.build that your project does not
+ // support relocatable installations, for example:
+ //
+ // assert (!$install.relocatable) 'relocatable installation not supported'
//
// Note that this function is not pure.
//
- f.insert (".resolve", false) += [] (const scope* s, dir_path d)
+ f.insert (".resolve", false) += [] (const scope* s,
+ dir_path dir,
+ optional<dir_path> rel_base)
{
if (s == nullptr)
fail << "install.resolve() called out of scope" << endf;
- return resolve_dir (*s, move (d));
+ if (!rel_base)
+ {
+ const scope& rs (*s->root_scope ());
+
+ if (cast_false<bool> (rs["install.relocatable"]))
+ {
+ fail << "relocatable installation requires relative base "
+ << "directory" <<
+ info << "pass empty relative base directory if this call does "
+ << "not affect installation relocatability" <<
+ info << "or add `assert (!$install.relocatable) 'relocatable "
+ << "installation not supported'` before the call";
+ }
+ }
+
+ return resolve_dir (*s,
+ move (dir),
+ rel_base ? move (*rel_base) : dir_path ());
+ };
+
+ // @@ TODO: add $install.chroot().
+
+ // $install.filter(<path>[, <type>])
+ //
+ // Apply filters from config.install.filter and return true if the
+ // specified filesystem entry should be installed/uninstalled. Note that
+ // the entry is specified as an absolute and normalized installation
+ // path (so not $path($>) but $install.resolve($>)).
+ //
+ // The type argument can be one of `regular`, `directory`, or `symlink`.
+ // If unspecified, either `directory` or `regular` is assumed, based on
+ // whether path is syntactially a directory (ends with a directory
+ // separator).
+ //
+ // Note that this function is not pure.
+ //
+ f.insert (".filter", false) += [] (const scope* s,
+ path p,
+ optional<names> ot)
+ {
+ if (s == nullptr)
+ fail << "install.filter() called out of scope" << endf;
+
+ entry_type t;
+ if (ot)
+ {
+ string v (convert<string> (move (*ot)));
+
+ if (v == "regular") t = entry_type::regular;
+ else if (v == "directory") t = entry_type::directory;
+ else if (v == "symlink") t = entry_type::symlink;
+ else throw invalid_argument ("unknown type '" + v + '\'');
+ }
+ else
+ t = p.to_directory () ? entry_type::directory : entry_type::regular;
+
+ // Split into directory and leaf.
+ //
+ dir_path d;
+ if (t == entry_type::directory)
+ {
+ d = path_cast<dir_path> (move (p));
+ p = path (); // No leaf.
+ }
+ else
+ {
+ d = p.directory ();
+ p.make_leaf ();
+ }
+
+ return filter_entry (*s->root_scope (), d, p, t);
};
}
}
diff --git a/libbuild2/install/init.cxx b/libbuild2/install/init.cxx
index 25dc845..0b33475 100644
--- a/libbuild2/install/init.cxx
+++ b/libbuild2/install/init.cxx
@@ -166,6 +166,8 @@ namespace build2
bool global (*name == '\0');
+ auto& vp (rs.var_pool (true /* default */)); // All qualified.
+
if (spec)
{
vn = "config.install";
@@ -175,7 +177,7 @@ namespace build2
vn += name;
}
vn += var;
- const variable& vr (rs.var_pool ().insert<CT> (move (vn)));
+ const variable& vr (vp.insert<CT> (move (vn)));
using config::lookup_config;
@@ -192,7 +194,7 @@ namespace build2
vn = "install.";
vn += name;
vn += var;
- const variable& vr (rs.var_pool ().insert<T> (move (vn)));
+ const variable& vr (vp.insert<T> (move (vn)));
value& v (rs.assign (vr));
@@ -236,7 +238,7 @@ namespace build2
// This one doesn't have config.* value (only set in a buildfile).
//
if (!global)
- rs.var_pool ().insert<bool> (string ("install.") + n + ".subdirs");
+ rs.var_pool (true).insert<bool> (string ("install.") + n + ".subdirs");
}
void
@@ -250,6 +252,20 @@ namespace build2
context& ctx (rs.ctx);
+ // Enter module variables (note that init() below enters some more).
+ //
+ // The install variable is a path, not dir_path, since it can be used
+ // to both specify the target directory (to install with the same file
+ // name) or target file (to install with a different name). And the
+ // way we distinguish between the two is via the presence/absence of
+ // the trailing directory separator.
+ //
+ // Plus it can have the special true/false values when acting as an
+ // operation variable.
+ //
+ auto& ovar (rs.var_pool ().insert<path> ("install",
+ variable_visibility::target));
+
// Register the install function family if this is the first instance of
// the install modules.
//
@@ -258,9 +274,9 @@ namespace build2
// Register our operations.
//
- rs.insert_operation (install_id, op_install);
- rs.insert_operation (uninstall_id, op_uninstall);
- rs.insert_operation (update_for_install_id, op_update_for_install);
+ rs.insert_operation (install_id, op_install, &ovar);
+ rs.insert_operation (uninstall_id, op_uninstall, &ovar);
+ rs.insert_operation (update_for_install_id, op_update_for_install, &ovar);
}
static const path cmd ("install");
@@ -269,24 +285,26 @@ namespace build2
//
#define DIR(N, V) static const dir_path dir_##N (V)
- DIR (data_root, dir_path ("root"));
- DIR (exec_root, dir_path ("root"));
+ DIR (data_root, dir_path ("root"));
+ DIR (exec_root, dir_path ("root"));
- DIR (sbin, dir_path ("exec_root") /= "sbin");
- DIR (bin, dir_path ("exec_root") /= "bin");
- DIR (lib, (dir_path ("exec_root") /= "lib") /= "<private>");
- DIR (libexec, ((dir_path ("exec_root") /= "libexec") /= "<private>") /= "<project>");
- DIR (pkgconfig, dir_path ("lib") /= "pkgconfig");
+ DIR (sbin, dir_path ("exec_root") /= "sbin");
+ DIR (bin, dir_path ("exec_root") /= "bin");
+ DIR (lib, (dir_path ("exec_root") /= "lib") /= "<private>");
+ DIR (libexec, ((dir_path ("exec_root") /= "libexec") /= "<private>") /= "<project>");
+ DIR (pkgconfig, dir_path ("lib") /= "pkgconfig");
- DIR (etc, dir_path ("data_root") /= "etc");
- DIR (include, (dir_path ("data_root") /= "include") /= "<private>");
- DIR (share, dir_path ("data_root") /= "share");
- DIR (data, (dir_path ("share") /= "<private>") /= "<project>");
+ DIR (etc, dir_path ("data_root") /= "etc");
+ DIR (include, (dir_path ("data_root") /= "include") /= "<private>");
+ DIR (include_arch, dir_path ("include"));
+ DIR (share, dir_path ("data_root") /= "share");
+ DIR (data, (dir_path ("share") /= "<private>") /= "<project>");
+ DIR (buildfile, ((dir_path ("share") /= "build2") /= "export") /= "<project>");
- DIR (doc, ((dir_path ("share") /= "doc") /= "<private>") /= "<project>");
- DIR (legal, dir_path ("doc"));
- DIR (man, dir_path ("share") /= "man");
- DIR (man1, dir_path ("man") /= "man1");
+ DIR (doc, ((dir_path ("share") /= "doc") /= "<private>") /= "<project>");
+ DIR (legal, dir_path ("doc"));
+ DIR (man, dir_path ("share") /= "man");
+ DIR (man1, dir_path ("man") /= "man1");
#undef DIR
@@ -312,22 +330,17 @@ namespace build2
// Enter module variables.
//
- auto& vp (rs.var_pool ());
+ rs.var_pool ().insert<bool> ("for_install", variable_visibility::prereq);
+
+ // The rest of the variables we enter are qualified so go straight
+ // for the public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// Note that the set_dir() calls below enter some more.
//
- {
- // The install variable is a path, not dir_path, since it can be used
- // to both specify the target directory (to install with the same file
- // name) or target file (to install with a different name). And the
- // way we distinguish between the two is via the presence/absence of
- // the trailing directory separator.
- //
- vp.insert<path> ("install", variable_visibility::target);
- vp.insert<bool> ("for_install", variable_visibility::prereq);
- vp.insert<string> ("install.mode");
- vp.insert<bool> ("install.subdirs");
- }
+ vp.insert<string> ("install.mode");
+ vp.insert<bool> ("install.subdirs");
// Environment.
//
@@ -372,25 +385,30 @@ namespace build2
const auto& gr (group_rule_);
bs.insert_rule<alias> (perform_install_id, "install.alias", ar);
- bs.insert_rule<alias> (perform_uninstall_id, "uninstall.alias", ar);
+ bs.insert_rule<alias> (perform_uninstall_id, "install.alias", ar);
bs.insert_rule<fsdir> (perform_install_id, "install.fsdir", dr);
bs.insert_rule<fsdir> (perform_uninstall_id, "install.fsdir", dr);
bs.insert_rule<file> (perform_install_id, "install.file", fr);
- bs.insert_rule<file> (perform_uninstall_id, "uninstall.file", fr);
+ bs.insert_rule<file> (perform_uninstall_id, "install.file", fr);
- bs.insert_rule<target> (perform_install_id, "install.file", gr);
- bs.insert_rule<target> (perform_uninstall_id, "uninstall.file", gr);
+ // Note: use mtime_target (instead of target) to take precedence over
+ // the fallback file rules below.
+ //
+ bs.insert_rule<mtime_target> (perform_install_id, "install.group", gr);
+ bs.insert_rule<mtime_target> (perform_uninstall_id, "install.group", gr);
// Register the fallback file rule for the update-for-[un]install
// operation, similar to update.
//
- rs.global_scope ().insert_rule<mtime_target> (
- perform_install_id, "install.file", fr);
+ // @@ Hm, it's a bit fuzzy why we would be updating-for-install
+ // something outside of any project..?
+ //
+ scope& gs (rs.global_scope ());
- rs.global_scope ().insert_rule<mtime_target> (
- perform_uninstall_id, "uninstall.file", fr);
+ gs.insert_rule<mtime_target> (perform_install_id, "install.file", fr);
+ gs.insert_rule<mtime_target> (perform_uninstall_id, "install.file", fr);
}
// Configuration.
@@ -404,9 +422,9 @@ namespace build2
using config::lookup_config;
using config::specified_config;
- // Note: ignore config.install.scope (see below).
+ // Note: ignore config.install.{scope,manifest} (see below).
//
- bool s (specified_config (rs, "install", {"scope"}));
+ bool s (specified_config (rs, "install", {"scope", "manifest"}));
// Adjust module priority so that the (numerous) config.install.*
// values are saved at the end of config.build.
@@ -443,6 +461,123 @@ namespace build2
config::unsave_variable (rs, v);
}
+ // config.install.manifest
+ //
+ // Installation manifest. Valid values are a file path or `-` to dump
+ // the manifest to stdout.
+ //
+ // If specified during the install operation, then write the
+ // information about all the filesystem entries being installed into
+ // the manifest. If specified during uninstall, then remove the
+ // filesystem entries according to the manifest as opposed to the
+ // current build state. In particular, this functionality can be used
+ // to avoid surprising (and potentially lengthy) updates during
+ // uninstall that may happen because of changes to system-installed
+ // dependencies (for example, the compiler or standard library).
+ //
+ // @@ TODO: manifest uninstall is still TODO.
+ //
+ // Note: there is a single manifest per operation and thus this
+ // variable can only be specified as a global override. (While it
+ // could be handy to save this varible in config.build in some
+ // situations, supporting this will complicate the global override
+ // case). Note that as a result the manifest file path may not be
+ // specified in terms of the config.install.* values.
+ //
+ // Note also that the manifest is produced even in the dry-run mode.
+ // However, in this case no directory creation is tracked.
+ //
+ // The format of the installation manifest is "JSON lines", that is,
+ // each line is a JSON text (this makes it possible to reverse the
+ // order of lines without loading the entire file into memory). For
+ // example (indented lines indicate line continuations):
+ //
+ // {"type":"directory","path":"/tmp/install","mode":"755"}
+ // {"type":"target","name":"/tmp/libhello/libs{hello}",
+ // "entries":[
+ // {"type":"file","path":"/tmp/install/lib/libhello-1.0.so","mode":"755"},
+ // {"type":"symlink","path":"/tmp/install/lib/libhello.so","target":"libhello-1.0.so"}]}
+ //
+ // Each line is a serialization of one of the following non-abstract
+ // C++ structs:
+ //
+ // struct entry // abstract
+ // {
+ // enum {directory, file, symlink, target} type;
+ // };
+ //
+ // struct filesystem_entry: entry // abstract
+ // {
+ // path path;
+ // };
+ //
+ // struct directory_entry: filesystem_entry
+ // {
+ // string mode;
+ // };
+ //
+ // struct file_entry: filesystem_entry
+ // {
+ // string mode;
+ // };
+ //
+ // struct symlink_entry: filesystem_entry
+ // {
+ // path target;
+ // };
+ //
+ // struct target_entry: entry
+ // {
+ // string name;
+ // vector<filesystem_entry*> entries;
+ // };
+ //
+ // New entry types may be added later. Additional entry members may be
+ // added later to existing entries after the existing members.
+ //
+ // If installation is relocatable (see config.install.relocatable) and
+ // the installation manifest file path is inside config.install.root
+ // (including chroot), then absolute filesystem_entry::path's are
+ // saved as relative to the manifest file's directory (note that
+ // symlink_entry::target cannot be absolute in relocatable
+ // installation).
+ //
+ {
+ auto& v (vp.insert<path> ("config.install.manifest"));
+
+ // If specified, verify it is a global override.
+ //
+ if (lookup l = rs[v])
+ {
+ if (!l.belongs (rs.global_scope ()))
+ fail << "config.install.manifest must be a global override" <<
+ info << "specify !config.install.manifest=...";
+ }
+
+ config::unsave_variable (rs, v);
+ }
+
+ // Support for relocatable install.
+ //
+ // Note that it is false by default since supporting relocatable
+ // installation may require extra effort and not all projects may
+ // support it. A project that is known not to support it should assert
+ // this fact in its root.build, for example:
+ //
+ // assert (!$install.relocatable) 'relocatable installation not supported'
+ //
+ {
+ auto& var (vp.insert<bool> ( "install.relocatable"));
+ auto& cvar (vp.insert<bool> ("config.install.relocatable"));
+
+ value& v (rs.assign (var));
+
+ // Note: unlike other variables, for ease of assertion set it to
+ // false if no config.install.* is specified.
+ //
+ v = s && cast_false<bool> (lookup_config (rs, cvar, false));
+ }
+
// Support for private install (aka poor man's Flatpack).
//
const dir_path* p;
@@ -480,35 +615,109 @@ namespace build2
}
}
- // Global config.install.* values.
+ // config.install.filter
//
- set_dir (s, p, rs, "", abs_dir_path (), false, "644", "755", cmd);
-
- set_dir (s, p, rs, "root", abs_dir_path ());
-
- set_dir (s, p, rs, "data_root", dir_data_root);
- set_dir (s, p, rs, "exec_root", dir_exec_root, false, "755");
+ // Installation filterting. The value of this variable is a list of
+ // key-value pairs that specify the filesystem entries to include or
+ // exclude from the installation. For example, the following filters
+ // will omit installing headers and static libraries (notice the
+ // quoting of the wildcard).
+ //
+ // config.install.filter='include/@false "*.a"@false'
+ //
+ // The key in each pair is a file or directory path or a path wildcard
+ // pattern. If a key is relative and contains a directory component or
+ // is a directory, then it is treated relative to the corresponding
+ // config.install.* location. Otherwise (simple path, normally a
+ // pattern), it is matched against the leaf of any path. Note that if
+ // an absolute path is specified, it should be without the
+ // config.install.chroot prefix.
+ //
+ // The value in each pair is either true (include) or false (exclude).
+ // The filters are evaluated in the order specified and the first
+ // match that is found determines the outcome. If no match is found,
+ // the default is to include. For a directory, while false means
+ // exclude all the sub-paths inside this directory, true does not mean
+ // that all the sub-paths will be included wholesale. Rather, the
+ // matched component of the sub-path is treated as included with the
+ // rest of the components matched against the following
+ // sub-filters. For example:
+ //
+ // config.install.filter='
+ // include/x86_64-linux-gnu/@true
+ // include/x86_64-linux-gnu/details/@false
+ // include/@false'
+ //
+ // The true or false value may be followed by comma and the `symlink`
+ // modifier to only apply to symlink filesystem entries. For example:
+ //
+ // config.install.filter='"*.so"@false,symlink'
+ //
+ // Note that this mechanism only affects what gets physically copied
+ // to the installation directory without affecting what gets built for
+ // install or the view of what gets installed at the buildfile level.
+ // For example, given the `include/@false *.a@false` filters, static
+ // libraries will still be built (unless arranged not to with
+ // config.bin.lib) and the pkg-config files will still end up with -I
+ // options pointing to the header installation directory. Note also
+ // that this mechanism applies to both install and uninstall
+ // operations.
+ //
+ // If you are familiar with the Debian or Fedora packaging, this
+ // mechanism is somewhat similar to (and can be used for a similar
+ // purpose as) the Debian's .install files and Fedora's %files spec
+ // file sections that are used to split the installation into multiple
+ // binary packages.
+ //
+ {
+ auto& var (vp.insert<filters> ( "install.filter"));
+ auto& cvar (vp.insert<filters> ("config.install.filter"));
- set_dir (s, p, rs, "sbin", dir_sbin);
- set_dir (s, p, rs, "bin", dir_bin);
- set_dir (s, p, rs, "lib", dir_lib);
- set_dir (s, p, rs, "libexec", dir_libexec);
- set_dir (s, p, rs, "pkgconfig", dir_pkgconfig, false, "644");
+ value& v (rs.assign (var));
- set_dir (s, p, rs, "etc", dir_etc);
- set_dir (s, p, rs, "include", dir_include);
- set_dir (s, p, rs, "share", dir_share);
- set_dir (s, p, rs, "data", dir_data);
+ if (s)
+ {
+ if (lookup l = lookup_config (rs, cvar, nullptr))
+ v = cast<filters> (l);
+ }
+ }
- set_dir (s, p, rs, "doc", dir_doc);
- set_dir (s, p, rs, "legal", dir_legal);
- set_dir (s, p, rs, "man", dir_man);
- set_dir (s, p, rs, "man1", dir_man1);
+ // Global config.install.* values.
+ //
+ set_dir (s, p, rs, "", abs_dir_path (), false, "644", "755", cmd);
+
+ set_dir (s, p, rs, "root", abs_dir_path ());
+
+ set_dir (s, p, rs, "data_root", dir_data_root);
+ set_dir (s, p, rs, "exec_root", dir_exec_root, false, "755");
+
+ set_dir (s, p, rs, "sbin", dir_sbin);
+ set_dir (s, p, rs, "bin", dir_bin);
+ set_dir (s, p, rs, "lib", dir_lib);
+ set_dir (s, p, rs, "libexec", dir_libexec);
+ set_dir (s, p, rs, "pkgconfig", dir_pkgconfig, false, "644");
+
+ set_dir (s, p, rs, "etc", dir_etc);
+ set_dir (s, p, rs, "include", dir_include);
+ set_dir (s, p, rs, "include_arch", dir_include_arch);
+ set_dir (s, p, rs, "share", dir_share);
+ set_dir (s, p, rs, "data", dir_data);
+ set_dir (s, p, rs, "buildfile", dir_buildfile);
+
+ set_dir (s, p, rs, "doc", dir_doc);
+ set_dir (s, p, rs, "legal", dir_legal);
+ set_dir (s, p, rs, "man", dir_man);
+ set_dir (s, p, rs, "man1", dir_man1);
}
// Configure "installability" for built-in target types.
//
+ // Note that for exe{} we also set explicit 755 mode in case it gets
+ // installed somewhere else where the default is not 755 (for example to
+ // libexec/, which on Debian has the 644 mode).
+ //
install_path<exe> (bs, dir_path ("bin"));
+ install_mode<exe> (bs, "755");
install_path<doc> (bs, dir_path ("doc"));
install_path<legal> (bs, dir_path ("legal"));
install_path<man> (bs, dir_path ("man"));
diff --git a/libbuild2/install/operation.cxx b/libbuild2/install/operation.cxx
index 54d5b9a..ce5d24a 100644
--- a/libbuild2/install/operation.cxx
+++ b/libbuild2/install/operation.cxx
@@ -3,8 +3,15 @@
#include <libbuild2/install/operation.hxx>
+#include <sstream>
+
+#include <libbuild2/scope.hxx>
+#include <libbuild2/target.hxx>
+#include <libbuild2/context.hxx>
#include <libbuild2/variable.hxx>
+#include <libbuild2/install/utility.hxx>
+
using namespace std;
using namespace butl;
@@ -12,22 +19,356 @@ namespace build2
{
namespace install
{
+#ifndef BUILD2_BOOTSTRAP
+ context_data::
+ context_data (const path* mf)
+ : manifest_name (mf),
+ manifest_os (mf != nullptr
+ ? open_file_or_stdout (manifest_name, manifest_ofs)
+ : manifest_ofs),
+ manifest_autorm (manifest_ofs.is_open () ? *mf : path ()),
+ manifest_json (manifest_os, 0 /* indentation */)
+ {
+ if (manifest_ofs.is_open ())
+ {
+ manifest_file = *mf;
+ manifest_file.complete ();
+ manifest_file.normalize ();
+ }
+ }
+
+ static path
+ relocatable_path (context_data& d, const target& t, path p)
+ {
+ // This is both inefficient (re-detecting relocatable manifest for every
+ // path) and a bit dirty (if multiple projects are being installed with
+ // different install.{relocatable,root} values, we may end up producing
+ // some paths relative and some absolute). But doing either of these
+ // properly is probably not worth the extra complexity.
+ //
+ if (!d.manifest_file.empty ()) // Not stdout.
+ {
+ const scope& rs (t.root_scope ());
+
+ if (cast_false<bool> (rs["install.relocatable"]))
+ {
+ // Note: install.root is abs_dir_path so absolute and normalized.
+ //
+ const dir_path* root (cast_null<dir_path> (rs["install.root"]));
+ if (root == nullptr)
+ fail << "unknown installation root directory in " << rs <<
+ info << "did you forget to specify config.install.root?";
+
+ // The manifest path would include chroot so if used, we need to add
+ // it to root and the file path (we could also strip it, but then
+ // making it absolute gets tricky on Windows).
+ //
+ dir_path md (d.manifest_file.directory ());
+
+ if (md.sub (chroot_path (rs, *root))) // Inside installation root.
+ {
+ p = chroot_path (rs, p);
+ try
+ {
+ p = p.relative (md);
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make filesystem entry path " << p
+ << " relative to " << md <<
+ info << "required for relocatable installation manifest";
+ }
+ }
+ }
+ }
+
+ return p;
+ }
+
+ // Serialize current target and, if tgt is not NULL, start the new target.
+ //
+ // Note that we always serialize directories as top-level entries. And
+ // theoretically we can end up "splitting" a target with a directory
+ // creation. For example, if some files that belong to the target are
+ // installed into subdirectories that have not yet been created. So we
+ // have to cache the information for the current target in memory and only
+ // flush it once we see the next target (or the end).
+ //
+ // You may be wondering why not just serialize directories as target
+ // entries. While we could do that, it's not quite correct conceptually,
+ // since this would be the first of potentially many targets that caused
+ // the directory's creation. To put it another way, while files and
+ // symlinks belong to tragets, directories do not.
+ //
+ static void
+ manifest_flush_target (context_data& d, const target* tgt)
+ {
+ if (d.manifest_target != nullptr)
+ {
+ assert (!d.manifest_target_entries.empty ());
+
+ // Target name format is the same as in the structured result output.
+ //
+ ostringstream os;
+ stream_verb (os, stream_verbosity (1, 0));
+ os << *d.manifest_target;
+
+ try
+ {
+ auto& s (d.manifest_json);
+
+ s.begin_object ();
+ s.member ("type", "target");
+ s.member ("name", os.str ());
+ s.member_name ("entries");
+ s.begin_array ();
+
+ for (const auto& e: d.manifest_target_entries)
+ {
+ path p (relocatable_path (d, *d.manifest_target, move (e.path)));
+
+ s.begin_object ();
+
+ if (e.target.empty ())
+ {
+ s.member ("type", "file");
+ s.member ("path", p.string ());
+ s.member ("mode", e.mode);
+ }
+ else
+ {
+ s.member ("type", "symlink");
+ s.member ("path", p.string ());
+ s.member ("target", e.target.string ());
+ }
+
+ s.end_object ();
+ }
+
+ s.end_array (); // entries member
+ s.end_object (); // target object
+ }
+ catch (const json::invalid_json_output& e)
+ {
+ fail << "invalid " << d.manifest_name << " json output: " << e;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << d.manifest_name << ": " << e;
+ }
+
+ d.manifest_target_entries.clear ();
+ }
+
+ d.manifest_target = tgt;
+ }
+
+ void context_data::
+ manifest_install_d (context& ctx,
+ const target& tgt,
+ const dir_path& dir,
+ const string& mode)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ try
+ {
+ auto& s (d.manifest_json);
+
+ // If we moved to the next target, flush the current one.
+ //
+ if (d.manifest_target != &tgt)
+ manifest_flush_target (d, nullptr);
+
+ s.begin_object ();
+ s.member ("type", "directory");
+ s.member ("path", relocatable_path (d, tgt, dir).string ());
+ s.member ("mode", mode);
+ s.end_object ();
+ }
+ catch (const json::invalid_json_output& e)
+ {
+ fail << "invalid " << d.manifest_name << " json output: " << e;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << d.manifest_name << ": " << e;
+ }
+ }
+ }
+
+ void context_data::
+ manifest_install_f (context& ctx,
+ const target& tgt,
+ const dir_path& dir,
+ const path& name,
+ const string& mode)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ if (d.manifest_target != &tgt)
+ manifest_flush_target (d, &tgt);
+
+ d.manifest_target_entries.push_back (
+ manifest_target_entry {dir / name, mode, path ()});
+ }
+ }
+
+ void context_data::
+ manifest_install_l (context& ctx,
+ const target& tgt,
+ const path& link_target,
+ const dir_path& dir,
+ const path& link)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ if (d.manifest_target != &tgt)
+ manifest_flush_target (d, &tgt);
+
+ d.manifest_target_entries.push_back (
+ manifest_target_entry {dir / link, "", link_target});
+ }
+ }
+
+ static void
+ manifest_close (context& ctx)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ try
+ {
+ manifest_flush_target (d, nullptr);
+
+ d.manifest_os << '\n'; // Final newline.
+
+ if (d.manifest_ofs.is_open ())
+ {
+ d.manifest_ofs.close ();
+ d.manifest_autorm.cancel ();
+ }
+ }
+ catch (const json::invalid_json_output& e)
+ {
+ fail << "invalid " << d.manifest_name << " json output: " << e;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << d.manifest_name << ": " << e;
+ }
+ }
+ }
+#else
+ context_data::
+ context_data (const path*)
+ {
+ }
+
+ void context_data::
+ manifest_install_d (context&,
+ const target&,
+ const dir_path&,
+ const string&)
+ {
+ }
+
+ void context_data::
+ manifest_install_f (context&,
+ const target&,
+ const dir_path&,
+ const path&,
+ const string&)
+ {
+ }
+
+ void context_data::
+ manifest_install_l (context&,
+ const target&,
+ const path&,
+ const dir_path&,
+ const path&)
+ {
+ }
+
+ static void
+ manifest_close (context&)
+ {
+ }
+#endif
+
static operation_id
- install_pre (const values& params, meta_operation_id mo, const location& l)
+ pre_install (context&,
+ const values&,
+ meta_operation_id mo,
+ const location&)
{
- if (!params.empty ())
- fail (l) << "unexpected parameters for operation install";
+ // Run update as a pre-operation, unless we are disfiguring.
+ //
+ return mo != disfigure_id ? update_id : 0;
+ }
+ static operation_id
+ pre_uninstall (context&,
+ const values&,
+ meta_operation_id mo,
+ const location&)
+ {
// Run update as a pre-operation, unless we are disfiguring.
//
return mo != disfigure_id ? update_id : 0;
}
+ static void
+ install_pre (context& ctx,
+ const values& params,
+ bool inner,
+ const location& l)
+ {
+ if (!params.empty ())
+ fail (l) << "unexpected parameters for operation install";
+
+ if (inner)
+ {
+ // See if we need to write the installation manifest.
+ //
+ // Note: go straight for the public variable pool.
+ //
+ const path* mf (
+ cast_null<path> (
+ ctx.global_scope[*ctx.var_pool.find ("config.install.manifest")]));
+
+ // Note that we cannot calculate whether the manifest should use
+ // relocatable (relative) paths once here since we don't know the
+ // value of config.install.root.
+
+ ctx.current_inner_odata = context::current_data_ptr (
+ new context_data (mf),
+ [] (void* p) {delete static_cast<context_data*> (p);});
+ }
+ }
+
+ static void
+ install_post (context& ctx, const values&, bool inner)
+ {
+ if (inner)
+ manifest_close (ctx);
+ }
+
// Note that we run both install and uninstall serially. The reason for
// this is all the fuzzy things we are trying to do like removing empty
// outer directories if they are empty. If we do this in parallel, then
// those things get racy. Also, since all we do here is creating/removing
// files, there is not going to be much speedup from doing it in parallel.
+ // There is also now the installation manifest, which relies on us
+ // installing all the filesystem entries of a target serially.
const operation_info op_install {
install_id,
@@ -39,8 +380,10 @@ namespace build2
"has nothing to install", // We cannot "be installed".
execution_mode::first,
0 /* concurrency */, // Run serially.
- &install_pre,
+ &pre_install,
nullptr,
+ &install_pre,
+ &install_post,
nullptr,
nullptr
};
@@ -64,7 +407,9 @@ namespace build2
"is not installed",
execution_mode::last,
0 /* concurrency */, // Run serially
- &install_pre,
+ &pre_uninstall,
+ nullptr,
+ nullptr,
nullptr,
nullptr,
nullptr
@@ -82,8 +427,10 @@ namespace build2
op_update.name_done,
op_update.mode,
op_update.concurrency,
- op_update.pre,
- op_update.post,
+ op_update.pre_operation,
+ op_update.post_operation,
+ op_update.operation_pre,
+ op_update.operation_post,
op_update.adhoc_match,
op_update.adhoc_apply
};
diff --git a/libbuild2/install/operation.hxx b/libbuild2/install/operation.hxx
index c1f5416..bd818b4 100644
--- a/libbuild2/install/operation.hxx
+++ b/libbuild2/install/operation.hxx
@@ -4,10 +4,15 @@
#ifndef LIBBUILD2_INSTALL_OPERATION_HXX
#define LIBBUILD2_INSTALL_OPERATION_HXX
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/serializer.hxx>
+#endif
+
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
#include <libbuild2/operation.hxx>
+#include <libbuild2/filesystem.hxx> // auto_rmfile
namespace build2
{
@@ -16,6 +21,65 @@ namespace build2
extern const operation_info op_install;
extern const operation_info op_uninstall;
extern const operation_info op_update_for_install;
+
+ // Set as context::current_inner_odata during the install/uninstall inner
+ // operations.
+ //
+ struct context_data
+ {
+ // Manifest.
+ //
+#ifndef BUILD2_BOOTSTRAP
+ path manifest_file; // Absolute and normalized, empty if `-`.
+ path_name manifest_name; // Original path/name.
+ ofdstream manifest_ofs;
+ ostream& manifest_os;
+ auto_rmfile manifest_autorm;
+ butl::json::stream_serializer manifest_json;
+ const target* manifest_target = nullptr; // Target being installed.
+ struct manifest_target_entry
+ {
+ build2::path path;
+ string mode;
+ build2::path target;
+ };
+ vector<manifest_target_entry> manifest_target_entries;
+#endif
+
+ // The following manifest_install_[dfl]() functions correspond to (and
+ // are called from) file_rule::install_[dfl]().
+
+ // install -d -m <mode> <dir>
+ //
+ static void
+ manifest_install_d (context&,
+ const target&,
+ const dir_path& dir,
+ const string& mode);
+
+ // install -m <mode> <file> <dir>/<name>
+ //
+ static void
+ manifest_install_f (context&,
+ const target& file,
+ const dir_path& dir,
+ const path& name,
+ const string& mode);
+
+ // install -l <link_target> <dir>/<link>
+ //
+ static void
+ manifest_install_l (context&,
+ const target&,
+ const path& link_target,
+ const dir_path& dir,
+ const path& link);
+
+ // Constructor.
+ //
+ explicit
+ context_data (const path* manifest);
+ };
}
}
diff --git a/libbuild2/install/rule.cxx b/libbuild2/install/rule.cxx
index 2d81067..20a4bc3 100644
--- a/libbuild2/install/rule.cxx
+++ b/libbuild2/install/rule.cxx
@@ -13,6 +13,8 @@
#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/install/operation.hxx>
+
using namespace std;
using namespace butl;
@@ -37,12 +39,28 @@ namespace build2
return r.simple () && r.string () == "false" ? nullptr : &r;
}
+ // Note that the below rules are called for both install and
+ // update-for-install.
+ //
+ // @@ TODO: we clearly need a module class.
+ //
+ static inline const variable&
+ var_install (const scope& rs)
+ {
+ context& ctx (rs.ctx);
+
+ return *rs.root_extra->operations[
+ (ctx.current_outer_oif != nullptr
+ ? ctx.current_outer_oif
+ : ctx.current_inner_oif)->id].ovar;
+ }
+
// alias_rule
//
const alias_rule alias_rule::instance;
bool alias_rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
// We always match.
//
@@ -105,7 +123,7 @@ namespace build2
// iterates over all its members.
//
if (!is)
- is = install_scope (t);
+ is = a.operation () != update_id ? install_scope (t) : nullptr;
const target* pt (filter (*is, a, t, i));
if (pt == nullptr)
@@ -125,7 +143,7 @@ namespace build2
//
// Note: not the same as lookup_install() above.
//
- auto l ((*pt)["install"]);
+ auto l ((*pt)[var_install (*p.scope.root_scope ())]);
if (l && cast<path> (l).string () == "false")
{
l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
@@ -136,8 +154,8 @@ namespace build2
// libu{}) then ignore it if there is no rule to install.
//
if (pt->is_a<file> ())
- build2::match (a, *pt);
- else if (!try_match (a, *pt).first)
+ match_sync (a, *pt);
+ else if (!try_match_sync (a, *pt).first)
{
l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
pt = nullptr;
@@ -155,7 +173,7 @@ namespace build2
const fsdir_rule fsdir_rule::instance;
bool fsdir_rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
// We always match.
//
@@ -179,7 +197,7 @@ namespace build2
if (a.operation () == update_id)
{
match_inner (a, t);
- return &execute_inner;
+ return inner_recipe;
}
else
return noop_recipe;
@@ -190,10 +208,10 @@ namespace build2
const group_rule group_rule::instance (false /* see_through_only */);
bool group_rule::
- match (action a, target& t, const string& h) const
+ match (action a, target& t) const
{
- return (!see_through || t.type ().see_through) &&
- alias_rule::match (a, t, h);
+ return (!see_through_only || t.type ().see_through ()) &&
+ alias_rule::match (a, t);
}
const target* group_rule::
@@ -202,6 +220,25 @@ namespace build2
return &m;
}
+ const target* group_rule::
+ filter (const scope* is,
+ action, const target& t, const prerequisite& p) const
+ {
+ // The same logic as in file_rule::filter() below.
+ //
+ if (p.is_a<exe> ())
+ {
+ const scope& rs (*p.scope.root_scope ());
+
+ if (p.vars.empty () ||
+ cast_empty<path> (p.vars[var_install (rs)]).string () != "true")
+ return nullptr;
+ }
+
+ const target& pt (search (t, p));
+ return is == nullptr || pt.in (*is) ? &pt : nullptr;
+ }
+
recipe group_rule::
apply (action a, target& t) const
{
@@ -211,7 +248,7 @@ namespace build2
//
// Remember that we are called twice: first during update for install
// (pre-operation) and then during install. During the former, we rely
- // on the normall update rule to resolve the group members. During the
+ // on the normal update rule to resolve the group members. During the
// latter, there will be no rule to do this but the group will already
// have been resolved by the pre-operation.
//
@@ -221,8 +258,10 @@ namespace build2
? resolve_members (a, t)
: t.group_members (a));
- if (gv.members != nullptr)
+ if (gv.members != nullptr && gv.count != 0)
{
+ const scope& rs (t.root_scope ());
+
auto& pts (t.prerequisite_targets[a]);
for (size_t i (0); i != gv.count; ++i)
{
@@ -245,14 +284,14 @@ namespace build2
//
// Note: not the same as lookup_install() above.
//
- auto l ((*mt)["install"]);
+ auto l ((*mt)[var_install (rs)]);
if (l && cast<path> (l).string () == "false")
{
l5 ([&]{trace << "ignoring " << *mt << " (not installable)";});
continue;
}
- build2::match (a, *mt);
+ match_sync (a, *mt);
pts.push_back (mt); // Never ad hoc.
}
}
@@ -268,7 +307,7 @@ namespace build2
const file_rule file_rule::instance;
bool file_rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
// We always match, even if this target is not installable (so that we
// can ignore it; see apply()).
@@ -288,13 +327,19 @@ namespace build2
filter (const scope* is,
action, const target& t, const prerequisite& p) const
{
+ // See also group_rule::filter() with identical semantics.
+ //
if (p.is_a<exe> ())
{
- // Feels like one day this should be unified with include (see
- // context::var_include).
+ const scope& rs (*p.scope.root_scope ());
+
+ // Note that while include() checks for install=false, here we need to
+ // check for explicit install=true. We could have re-used the lookup
+ // performed by include(), but then we would have had to drag it
+ // through and also diagnose any invalid values.
//
if (p.vars.empty () ||
- cast_empty<path> (p.vars["install"]).string () != "true")
+ cast_empty<path> (p.vars[var_install (rs)]).string () != "true")
return nullptr;
}
@@ -306,7 +351,7 @@ namespace build2
apply (action a, target& t) const
{
recipe r (apply_impl (a, t));
- return r != nullptr ? r : noop_recipe;
+ return r != nullptr ? move (r) : noop_recipe;
}
recipe file_rule::
@@ -366,7 +411,7 @@ namespace build2
// iterates over all its members.
//
if (!is)
- is = install_scope (t);
+ is = a.operation () != update_id ? install_scope (t) : nullptr;
const target* pt (filter (*is, a, t, i));
@@ -381,7 +426,7 @@ namespace build2
//
// Note: not the same as lookup_install() above.
//
- auto l ((*pt)["install"]);
+ auto l ((*pt)[var_install (*p.scope.root_scope ())]);
if (l && cast<path> (l).string () == "false")
{
l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
@@ -396,10 +441,10 @@ namespace build2
// when updating static installable content (headers, documentation,
// etc).
//
- if (build2::match (a, *pt, unmatch::unchanged).first)
+ if (match_sync (a, *pt, unmatch::unchanged).first)
pt = nullptr;
}
- else if (!try_match (a, *pt).first)
+ else if (!try_match_sync (a, *pt).first)
{
l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
pt = nullptr;
@@ -510,7 +555,8 @@ namespace build2
const dir_path& d (t.out_dir ().leaf (p->out_path ()));
// Add it as another leading directory rather than modifying
- // the last one directly; somehow, it feels right.
+ // the last one directly; somehow, it feels right. Note: the
+ // result is normalized.
//
if (!d.empty ())
rs.emplace_back (rs.back ().dir / d, rs.back ());
@@ -521,8 +567,9 @@ namespace build2
return rs.back ();
}
- // Resolve installation directory name to absolute directory path. Return
- // all the super-directories leading up to the destination (last).
+ // Resolve installation directory name to absolute and normalized
+ // directory path. Return all the super-directories leading up to the
+ // destination (last).
//
// If target is not NULL, then also handle the subdirs logic.
//
@@ -621,24 +668,52 @@ namespace build2
return rs;
}
- static inline install_dirs
- resolve (const target& t, dir_path d, bool fail_unknown = true)
+ static dir_path
+ resolve_dir (const scope& s, const target* t,
+ dir_path d, dir_path rb,
+ bool fail_unknown)
{
- return resolve (t.base_scope (), &t, move (d), fail_unknown);
+ install_dirs rs (resolve (s, t, move (d), fail_unknown));
+
+ if (rs.empty ())
+ return dir_path ();
+
+ dir_path r (move (rs.back ().dir));
+
+ if (!rb.empty ())
+ {
+ dir_path b (resolve (s, t, move (rb), false).back ().dir);
+
+ try
+ {
+ r = r.relative (b);
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make installation directory " << r
+ << " relative to " << b;
+ }
+ }
+
+ return r;
}
dir_path
- resolve_dir (const target& t, dir_path d, bool fail_unknown)
+ resolve_dir (const target& t, dir_path d, dir_path rb, bool fail_unknown)
{
- install_dirs r (resolve (t, move (d), fail_unknown));
- return r.empty () ? dir_path () : move (r.back ().dir);
+ return resolve_dir (t.base_scope (), &t, move (d), move (rb), fail_unknown);
}
dir_path
- resolve_dir (const scope& s, dir_path d, bool fail_unknown)
+ resolve_dir (const scope& s, dir_path d, dir_path rb, bool fail_unknown)
{
- install_dirs r (resolve (s, nullptr, move (d), fail_unknown));
- return r.empty () ? dir_path () : move (r.back ().dir);
+ return resolve_dir (s, nullptr, move (d), move (rb), fail_unknown);
+ }
+
+ static inline install_dirs
+ resolve (const target& t, dir_path d, bool fail_unknown = true)
+ {
+ return resolve (t.base_scope (), &t, move (d), fail_unknown);
}
path
@@ -654,6 +729,10 @@ namespace build2
bool n (!p->to_directory ());
dir_path d (n ? p->directory () : path_cast<dir_path> (*p));
+ if (n && d.empty ())
+ fail << "relative installation file path '" << p
+ << "' has no directory component";
+
install_dirs ids (resolve (f, d));
if (!n)
@@ -704,30 +783,15 @@ namespace build2
return s;
}
- // Given an abolute path return its chroot'ed version, if any, accoring to
- // install.chroot.
- //
- template <typename P>
- static inline P
- chroot_path (const scope& rs, const P& p)
- {
- if (const dir_path* d = cast_null<dir_path> (rs["install.chroot"]))
- {
- dir_path r (p.root_directory ());
- assert (!r.empty ()); // Must be absolute.
-
- return *d / p.leaf (r);
- }
-
- return p;
- }
-
void file_rule::
install_d (const scope& rs,
const install_dir& base,
const dir_path& d,
+ const file& t,
uint16_t verbosity)
{
+ assert (d.absolute ());
+
context& ctx (rs.ctx);
// Here is the problem: if this is a dry-run, then we will keep showing
@@ -740,7 +804,10 @@ namespace build2
// with uninstall since the directories won't be empty (because we don't
// actually uninstall any files).
//
- if (ctx.dry_run)
+ // Note that this also means we won't have the directory entries in the
+ // manifest created with dry-run. Probably not a big deal.
+ //
+ if (ctx.dry_run || !filter_entry (rs, d, path (), entry_type::directory))
return;
dir_path chd (chroot_path (rs, d));
@@ -767,13 +834,13 @@ namespace build2
dir_path pd (d.directory ());
if (pd != base.dir)
- install_d (rs, base, pd, verbosity);
+ install_d (rs, base, pd, t, verbosity);
}
cstrings args;
string reld (
- cast<string> (ctx.global_scope["build.host.class"]) == "windows"
+ ctx.build_host->class_ == "windows"
? msys_path (chd)
: relative (chd).string ());
@@ -798,10 +865,14 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "install " << chd;
+ print_diag ("install -d", chd); // See also `install -l` below.
}
- run (pp, args);
+ run (ctx,
+ pp, args,
+ verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
+
+ context_data::manifest_install_d (ctx, t, d, *base.dir_mode);
}
void file_rule::
@@ -812,14 +883,21 @@ namespace build2
const path& f,
uint16_t verbosity)
{
+ assert (name.empty () || name.simple ());
+
context& ctx (rs.ctx);
+ const path& leaf (name.empty () ? f.leaf () : name);
+
+ if (!filter_entry (rs, base.dir, leaf, entry_type::regular))
+ return;
+
path relf (relative (f));
dir_path chd (chroot_path (rs, base.dir));
string reld (
- cast<string> (ctx.global_scope["build.host.class"]) == "windows"
+ ctx.build_host->class_ == "windows"
? msys_path (chd)
: relative (chd).string ());
@@ -852,23 +930,47 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "install " << t;
+ {
+ if (name.empty ())
+ print_diag ("install", t, chd);
+ else
+ print_diag ("install", t, chd / name);
+ }
}
if (!ctx.dry_run)
- run (pp, args);
+ run (ctx,
+ pp, args,
+ verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
+
+ context_data::manifest_install_f (ctx, t, base.dir, leaf, *base.mode);
}
void file_rule::
install_l (const scope& rs,
const install_dir& base,
- const path& target,
const path& link,
+ const file& target,
+ const path& link_target,
uint16_t verbosity)
{
+ assert (link.simple () && !link.empty ());
+
context& ctx (rs.ctx);
- path rell (relative (chroot_path (rs, base.dir)));
+ if (!filter_entry (rs, base.dir, link, entry_type::symlink))
+ return;
+
+ if (link_target.absolute () &&
+ cast_false<bool> (rs["install.relocatable"]))
+ {
+ fail << "absolute symlink target " << link_target.string ()
+ << " in relocatable installation";
+ }
+
+ dir_path chd (chroot_path (rs, base.dir));
+
+ path rell (relative (chd));
rell /= link;
// We can create a symlink directly without calling ln. This, however,
@@ -882,7 +984,7 @@ namespace build2
base.sudo != nullptr ? base.sudo->c_str () : nullptr,
"ln",
"-sf",
- target.string ().c_str (),
+ link_target.string ().c_str (),
rell.string ().c_str (),
nullptr};
@@ -895,11 +997,19 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "install " << rell << " -> " << target;
+ {
+ // Without a flag it's unclear (unlike with ln) that we are creating
+ // a link. FreeBSD install(1) has the -l flag with the appropriate
+ // semantics. For consistency, we also pass -d above.
+ //
+ print_diag ("install -l", link_target, chd / link);
+ }
}
if (!ctx.dry_run)
- run (pp, args);
+ run (ctx,
+ pp, args,
+ verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
#else
// The -f part.
//
@@ -911,15 +1021,15 @@ namespace build2
if (verb >= verbosity)
{
if (verb >= 2)
- text << "ln -sf " << target.string () << ' ' << rell.string ();
+ text << "ln -sf " << link_target.string () << ' ' << rell.string ();
else if (verb)
- text << "install " << rell << " -> " << target;
+ print_diag ("install -l", link_target, chd / link);
}
if (!ctx.dry_run)
try
{
- mkanylink (target, rell, true /* copy */);
+ mkanylink (link_target, rell, true /* copy */);
}
catch (const pair<entry_type, system_error>& e)
{
@@ -931,6 +1041,12 @@ namespace build2
fail << "unable to make " << w << ' ' << rell << ": " << e.second;
}
#endif
+
+ context_data::manifest_install_l (ctx,
+ target,
+ link_target,
+ base.dir,
+ link);
}
target_state file_rule::
@@ -954,6 +1070,10 @@ namespace build2
bool n (!p.to_directory ());
dir_path d (n ? p.directory () : path_cast<dir_path> (p));
+ if (n && d.empty ())
+ fail << "relative installation file path '" << p
+ << "' has no directory component";
+
// Resolve target directory.
//
install_dirs ids (resolve (t, d));
@@ -975,7 +1095,7 @@ namespace build2
// sudo, etc).
//
for (auto i (ids.begin ()), j (i); i != ids.end (); j = i++)
- install_d (rs, *j, i->dir, verbosity); // install -d
+ install_d (rs, *j, i->dir, t, verbosity); // install -d
install_dir& id (ids.back ());
@@ -1033,7 +1153,7 @@ namespace build2
//
if (!tp.empty ())
{
- install_target (t, cast<path> (t["install"]), 1);
+ install_target (t, cast<path> (t[var_install (rs)]), 1);
r |= target_state::changed;
}
@@ -1046,9 +1166,13 @@ namespace build2
const dir_path& d,
uint16_t verbosity)
{
+ assert (d.absolute ());
+
+ context& ctx (rs.ctx);
+
// See install_d() for the rationale.
//
- if (rs.ctx.dry_run)
+ if (ctx.dry_run || !filter_entry (rs, d, path (), entry_type::directory))
return false;
dir_path chd (chroot_path (rs, d));
@@ -1095,7 +1219,7 @@ namespace build2
if (verb >= 2)
text << "rmdir " << reld;
else if (verb)
- text << "uninstall " << reld;
+ print_diag ("uninstall -d", chd);
}
try
@@ -1125,11 +1249,19 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "uninstall " << reld;
+ print_diag ("uninstall -d", chd);
}
- process pr (run_start (pp, args));
- r = run_finish_code (args, pr);
+ process pr (run_start (pp, args,
+ 0 /* stdin */,
+ 1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */));
+ diag_buffer dbuf (ctx, args[0], pr);
+ dbuf.read ();
+ r = run_finish_code (
+ dbuf,
+ args, pr,
+ verb >= verbosity ? 1 : verb_never /* verbosity */);
}
if (!r)
@@ -1153,40 +1285,16 @@ namespace build2
return r;
}
- bool file_rule::
- uninstall_f (const scope& rs,
- const install_dir& base,
- const file* t,
- const path& name,
- uint16_t verbosity)
+ static void
+ uninstall_f_impl (const scope& rs,
+ const install_dir& base,
+ const path& f,
+ uint16_t verbosity)
{
- assert (t != nullptr || !name.empty ());
- path f (chroot_path (rs, base.dir) /
- (name.empty () ? t->path ().leaf () : name));
-
- try
- {
- // Note: don't follow symlinks so if the target is a dangling symlinks
- // we will proceed to removing it.
- //
- if (!file_exists (f, false)) // May throw (e.g., EACCES).
- return false;
- }
- catch (const system_error& e)
- {
- fail << "invalid installation path " << f << ": " << e;
- }
+ context& ctx (rs.ctx);
path relf (relative (f));
- if (verb >= verbosity && verb == 1)
- {
- if (t != nullptr)
- text << "uninstall " << *t;
- else
- text << "uninstall " << relf;
- }
-
// The same story as with uninstall -d (on Windows rm is also from
// MSYS2/Cygwin).
//
@@ -1196,7 +1304,7 @@ namespace build2
if (verb >= verbosity && verb >= 2)
text << "rm " << relf;
- if (!rs.ctx.dry_run)
+ if (!ctx.dry_run)
{
try
{
@@ -1222,13 +1330,107 @@ namespace build2
process_path pp (run_search (args[0]));
- if (verb >= verbosity && verb >= 2)
- print_process (args);
+ if (verb >= verbosity)
+ {
+ if (verb >= 2)
+ print_process (args);
+ }
+
+ if (!ctx.dry_run)
+ run (ctx,
+ pp, args,
+ verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
+ }
+ }
+
+ bool file_rule::
+ uninstall_f (const scope& rs,
+ const install_dir& base,
+ const file* t,
+ const path& name,
+ uint16_t verbosity)
+ {
+ assert (name.empty () ? t != nullptr : name.simple ());
+
+ const path& leaf (name.empty () ? t->path ().leaf () : name);
+
+ if (!filter_entry (rs, base.dir, leaf, entry_type::regular))
+ return false;
+
+ dir_path chd (chroot_path (rs, base.dir));
+ path f (chd / leaf);
+
+ try
+ {
+ // Note: don't follow symlinks so if the target is a dangling symlinks
+ // we will proceed to removing it.
+ //
+ if (!file_exists (f, false)) // May throw (e.g., EACCES).
+ return false;
+ }
+ catch (const system_error& e)
+ {
+ fail << "invalid installation path " << f << ": " << e;
+ }
+
+ if (verb >= verbosity && verb == 1)
+ {
+ if (t != nullptr)
+ {
+ if (name.empty ())
+ print_diag ("uninstall", *t, chd, "<-");
+ else
+ print_diag ("uninstall", *t, f, "<-");
+ }
+ else
+ print_diag ("uninstall", f);
+ }
+
+ uninstall_f_impl (rs, base, f, verbosity);
+ return true;
+ }
+
+ bool file_rule::
+ uninstall_l (const scope& rs,
+ const install_dir& base,
+ const path& link,
+ const path& /*link_target*/,
+ uint16_t verbosity)
+ {
+ assert (link.simple () && !link.empty ());
+
+ if (!filter_entry (rs, base.dir, link, entry_type::symlink))
+ return false;
+
+ dir_path chd (chroot_path (rs, base.dir));
+ path f (chd / link);
- if (!rs.ctx.dry_run)
- run (pp, args);
+ try
+ {
+ // Note: don't follow symlinks so if the target is a dangling symlinks
+ // we will proceed to removing it.
+ //
+ if (!file_exists (f, false)) // May throw (e.g., EACCES).
+ return false;
+ }
+ catch (const system_error& e)
+ {
+ fail << "invalid installation path " << f << ": " << e;
}
+ if (verb >= verbosity && verb == 1)
+ {
+ // It's dubious showing the link target path adds anything useful
+ // here.
+ //
+#if 0
+ print_diag ("uninstall -l", target, f, "<-");
+#else
+ print_diag ("uninstall -l", f);
+#endif
+ }
+
+ uninstall_f_impl (rs, base, f, verbosity);
return true;
}
@@ -1251,6 +1453,10 @@ namespace build2
bool n (!p.to_directory ());
dir_path d (n ? p.directory () : path_cast<dir_path> (p));
+ if (n && d.empty ())
+ fail << "relative installation file path '" << p
+ << "' has no directory component";
+
// Resolve target directory.
//
install_dirs ids (resolve (t, d));
@@ -1298,7 +1504,7 @@ namespace build2
target_state r (target_state::unchanged);
if (!tp.empty ())
- r |= uninstall_target (t, cast<path> (t["install"]), 1);
+ r |= uninstall_target (t, cast<path> (t[var_install (rs)]), 1);
// Then installable ad hoc group members, if any. To be anally precise,
// we would have to do it in reverse, but that's not easy (it's a
@@ -1323,7 +1529,6 @@ namespace build2
}
}
-
// Finally handle installable prerequisites.
//
r |= reverse_execute_prerequisites (a, t);
diff --git a/libbuild2/install/rule.hxx b/libbuild2/install/rule.hxx
index 53d97d2..b319071 100644
--- a/libbuild2/install/rule.hxx
+++ b/libbuild2/install/rule.hxx
@@ -22,7 +22,7 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
// Return NULL if this prerequisite should be ignored and pointer to its
// target otherwise.
@@ -54,7 +54,7 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -78,7 +78,7 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
// Return NULL if this group member should be ignored and pointer to its
// target otherwise.
@@ -88,15 +88,25 @@ namespace build2
virtual const target*
filter (action, const target&, const target& group_member) const;
+ // Return NULL if this prerequisite should be ignored and pointer to its
+ // target otherwise.
+ //
+ // The same semantics as in file_rule below.
+ //
using alias_rule::filter; // "Unhide" to make Clang happy.
+ virtual const target*
+ filter (const scope*,
+ action, const target&,
+ const prerequisite&) const override;
+
virtual recipe
apply (action, target&) const override;
- group_rule (bool see_through_only): see_through (see_through_only) {}
+ group_rule (bool sto): see_through_only (sto) {}
static const group_rule instance;
- bool see_through;
+ bool see_through_only;
};
struct install_dir;
@@ -105,7 +115,7 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
// Return NULL if this prerequisite should be ignored and pointer to its
// target otherwise.
@@ -178,10 +188,16 @@ namespace build2
//
// install -d <dir>
//
+ // Note: <dir> is expected to be absolute.
+ //
+ // Note that the target argument only specifies which target caused
+ // this directory to be created.
+ //
static void
install_d (const scope& rs,
const install_dir& base,
const dir_path& dir,
+ const file& target,
uint16_t verbosity = 1);
// Install a file:
@@ -189,6 +205,8 @@ namespace build2
// install <file> <base>/ # if <name> is empty
// install <file> <base>/<name> # if <name> is not empty
//
+ // Note that <name> should be a simple path.
+ //
static void
install_f (const scope& rs,
const install_dir& base,
@@ -199,13 +217,25 @@ namespace build2
// Install (make) a symlink:
//
- // ln -s <target> <base>/<link>
+ // install -l <link_target> <base>/<link>
+ //
+ // Which is essentially:
+ //
+ // ln -s <link_target> <base>/<link>
+ //
+ // Note that <link> should be a simple path. Note that <link_target>
+ // must not be absolute if relocatable installation is requested
+ // (config.install.relocatable).
+ //
+ // Note that the target argument only specifies which target this
+ // symlink "belongs" to.
//
static void
install_l (const scope& rs,
const install_dir& base,
- const path& target,
const path& link,
+ const file& target,
+ const path& link_target,
uint16_t verbosity = 1);
// Uninstall (remove) a file or symlink:
@@ -223,13 +253,26 @@ namespace build2
const path& name,
uint16_t verbosity = 1);
+ // Uninstall (remove) a symlink.
+ //
+ // This is essentially unistall_f() but with better low-verbosity
+ // diagnostics.
+ //
+ static bool
+ uninstall_l (const scope& rs,
+ const install_dir& base,
+ const path& link,
+ const path& link_target,
+ uint16_t verbosity = 1);
+
+
// Uninstall (remove) an empty directory.
//
// uninstall -d <dir>
//
- // We try to remove all the directories between base and dir but not base
- // itself unless base == dir. Return false if nothing has been removed
- // (i.e., the directories do not exist or are not empty).
+ // We try to remove all the directories between base and dir but not
+ // base itself unless base == dir. Return false if nothing has been
+ // removed (i.e., the directories do not exist or are not empty).
//
static bool
uninstall_d (const scope& rs,
diff --git a/libbuild2/install/utility.cxx b/libbuild2/install/utility.cxx
index 17b1365..c8b6a92 100644
--- a/libbuild2/install/utility.cxx
+++ b/libbuild2/install/utility.cxx
@@ -3,6 +3,9 @@
#include <libbuild2/install/utility.hxx>
+#include <libbuild2/variable.hxx>
+#include <libbuild2/diagnostics.hxx>
+
namespace build2
{
namespace install
@@ -12,6 +15,8 @@ namespace build2
{
context& ctx (t.ctx);
+ // Note: go straight for the public variable pool.
+ //
const variable& var (*ctx.var_pool.find ("config.install.scope"));
if (const string* s = cast_null<string> (ctx.global_scope[var]))
@@ -30,5 +35,261 @@ namespace build2
return nullptr;
}
+
+ bool
+ filter_entry (const scope& rs,
+ const dir_path& base,
+ const path& leaf,
+ entry_type type)
+ {
+ assert (type != entry_type::unknown &&
+ (type == entry_type::directory) == leaf.empty ());
+
+ const filters* fs (cast_null<filters> (rs["install.filter"]));
+
+ if (fs == nullptr || fs->empty ())
+ return true;
+
+ tracer trace ("install::filter");
+
+ // Parse, resolve, and apply each filter in order.
+ //
+ // If redoing all this work for every entry proves too slow, we can
+ // consider some form of caching (e.g., on the per-project basis).
+ //
+ auto i (fs->begin ());
+
+ bool negate (false);
+ if (i->first == "!")
+ {
+ negate = true;
+ ++i;
+ }
+
+ size_t limit (0); // See below.
+
+ for (auto e (fs->end ()); i != e; ++i)
+ {
+ const pair<string, optional<string>>& kv (*i);
+
+ path k;
+ try
+ {
+ k = path (kv.first);
+
+ if (k.absolute ())
+ k.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ fail << "invalid path '" << kv.first << "' in config.install.filter "
+ << "value";
+ }
+
+ bool v;
+ {
+ const string& s (kv.second ? *kv.second : string ());
+
+ size_t p (s.find (','));
+
+ if (s.compare (0, p, "true") == 0)
+ v = true;
+ else if (s.compare (0, p, "false") == 0)
+ v = false;
+ else
+ fail << "expected true or false instead of '" << string (s, 0, p)
+ << "' in config.install.filter value";
+
+ if (p != string::npos)
+ {
+ if (s.compare (p + 1, string::npos, "symlink") == 0)
+ {
+ if (type != entry_type::symlink)
+ continue;
+ }
+ else
+ fail << "unknown modifier '" << string (s, p + 1) << "' in "
+ << "config.install.filter value";
+ }
+ }
+
+ // @@ TODO (see below for all the corner cases). Note that in a sense
+ // we already have the match file in any subdirectory support via
+ // simple patterns so perhaps this is not worth the trouble. Or we
+ // could support some limited form (e.g., `**` should be in the
+ // last component). But it may still be tricky to determine if
+ // it is a sub-filter.
+ //
+ if (path_pattern_recursive (k))
+ fail << "recursive wildcard pattern '" << kv.first << "' in "
+ << "config.install.filter value";
+
+ if (k.simple () && !k.to_directory ())
+ {
+ // Simple name/pattern matched against the leaf.
+ //
+ // @@ What if it is `**`?
+ //
+ if (path_pattern (k))
+ {
+ if (!path_match (leaf, k))
+ continue;
+ }
+ else
+ {
+ if (k != leaf)
+ continue;
+ }
+ }
+ else
+ {
+ // Split into directory and leaf.
+ //
+ // @@ What if leaf is `**`?
+ //
+ dir_path d;
+ if (k.to_directory ())
+ {
+ d = path_cast<dir_path> (move (k));
+ k = path (); // No leaf.
+ }
+ else
+ {
+ d = k.directory ();
+ k.make_leaf ();
+ }
+
+ // Resolve relative directory.
+ //
+ // Note that this resolution is potentially project-specific (that
+ // is, different projects may have different install.* locaitons).
+ //
+ // Note that if the first component is/contains a wildcard (e.g.,
+ // `*/`), then the resulution will fail, which feels correct (what
+ // does */ mean?).
+ //
+ if (d.relative ())
+ {
+ // @@ Strictly speaking, this should be base, not root scope.
+ //
+ d = resolve_dir (rs, move (d));
+ }
+
+ // Return the number of path components in the path.
+ //
+ auto path_comp = [] (const path& p)
+ {
+ size_t n (0);
+ for (auto i (p.begin ()); i != p.end (); ++i)
+ ++n;
+ return n;
+ };
+
+ // We need the sub() semantics but which uses pattern match instead
+ // of equality for the prefix. Looks like chopping off the path and
+ // calling path_match() on that is the best we can do.
+ //
+ // @@ Assumes no `**` components.
+ //
+ auto path_sub = [&path_comp] (const dir_path& ent,
+ const dir_path& pat,
+ size_t n = 0)
+ {
+ if (n == 0)
+ n = path_comp (pat);
+
+ dir_path p;
+ for (auto i (ent.begin ()); n != 0 && i != ent.end (); --n, ++i)
+ p.combine (*i, i.separator ());
+
+ return path_match (p, pat);
+ };
+
+ // The following checks should continue on no match and fall through
+ // to return.
+ //
+ if (k.empty ()) // Directory.
+ {
+ // Directories have special semantics.
+ //
+ // Consider this sequence of filters:
+ //
+ // include/x86_64-linux-gnu/@true
+ // include/x86_64-linux-gnu/details/@false
+ // include/@false
+ //
+ // It seems the semantics we want is that only subcomponent
+ // filters should apply. Maybe remember the latest matched
+ // directory as a current limit? But perhaps we don't need to
+ // remember the directory itself but the number of path
+ // components?
+ //
+ // I guess for patterns we will use the actual matched directory,
+ // not the pattern, to calculate the limit? @@ Because we
+ // currently don't support `**`, we for now can count components
+ // in the pattern.
+
+ // Check if this is a sub-filter.
+ //
+ size_t n (path_comp (d));
+ if (n <= limit)
+ continue;
+
+ if (path_pattern (d))
+ {
+ if (!path_sub (base, d, n))
+ continue;
+ }
+ else
+ {
+ if (!base.sub (d))
+ continue;
+ }
+
+ if (v)
+ {
+ limit = n;
+ continue; // Continue looking for sub-filters.
+ }
+ }
+ else
+ {
+ if (path_pattern (d))
+ {
+ if (!path_sub (base, d))
+ continue;
+ }
+ else
+ {
+ if (!base.sub (d))
+ continue;
+ }
+
+ if (path_pattern (k))
+ {
+ // @@ Does not handle `**`.
+ //
+ if (!path_match (leaf, k))
+ continue;
+ }
+ else
+ {
+ if (k != leaf)
+ continue;
+ }
+ }
+ }
+
+ if (negate)
+ v = !v;
+
+ l4 ([&]{trace << (base / leaf)
+ << (v ? " included by " : " excluded by ")
+ << kv.first << '@' << *kv.second;});
+ return v;
+ }
+
+ return !negate;
+ }
}
}
diff --git a/libbuild2/install/utility.hxx b/libbuild2/install/utility.hxx
index cc5cd53..fc40ebe 100644
--- a/libbuild2/install/utility.hxx
+++ b/libbuild2/install/utility.hxx
@@ -9,6 +9,7 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
+#include <libbuild2/filesystem.hxx> // entry_type
#include <libbuild2/export.hxx>
@@ -43,7 +44,7 @@ namespace build2
{
auto r (
s.target_vars[tt]["*"].insert (
- *s.var_pool ().find ("install.mode")));
+ *s.ctx.var_pool.find ("install.mode")));
if (r.second) // Already set by the user?
r.first = move (m);
@@ -61,26 +62,64 @@ namespace build2
// belong to projects outside of this scope. If it's NULL, install
// prerequisites from all projects. See also config.install.scope.
//
+ // Note that this should not apply to update-for-install. Failed that we
+ // may end up using incompatibly-built prerequisites (e.g., a library) in
+ // a target built for install (e.g., an executable).
+ //
LIBBUILD2_SYMEXPORT const scope*
install_scope (const target&);
// Resolve relative installation directory path (e.g., include/libfoo) to
- // its absolute directory path (e.g., /usr/include/libfoo). If the
- // resolution encountered an unknown directory, issue diagnostics and fail
- // unless fail_unknown is false, in which case return empty directory.
+ // its absolute and normalized directory path (e.g., /usr/include/libfoo).
+ // If the resolution encountered an unknown directory, issue diagnostics
+ // and fail unless fail_unknown is false, in which case return empty
+ // directory.
+ //
+ // For rel_base semantics, see the $install.resolve() documentation. Note
+ // that fail_unknown does not apply to the rel_base resolution.
//
// Note: implemented in rule.cxx.
//
LIBBUILD2_SYMEXPORT dir_path
- resolve_dir (const target&, dir_path, bool fail_unknown = true);
+ resolve_dir (const target&,
+ dir_path,
+ dir_path rel_base = {},
+ bool fail_unknown = true);
LIBBUILD2_SYMEXPORT dir_path
- resolve_dir (const scope&, dir_path, bool fail_unknown = true);
+ resolve_dir (const scope&,
+ dir_path,
+ dir_path rel_base = {},
+ bool fail_unknown = true);
// Resolve file installation path returning empty path if not installable.
//
LIBBUILD2_SYMEXPORT path
resolve_file (const file&); // rule.cxx
+
+ // Given an abolute path return its chroot'ed version, if any, accoring to
+ // install.chroot.
+ //
+ template <typename P>
+ inline P
+ chroot_path (const scope& rs, const P& p)
+ {
+ assert (p.absolute ());
+ const dir_path* d (cast_null<dir_path> (rs["install.chroot"]));
+ return d != nullptr ? *d / p.leaf (p.root_directory ()) : p;
+ }
+
+ // Installation filtering (config.install.filter).
+ //
+ // If entry type is a directory, then leaf must be empty.
+ //
+ using filters = vector<pair<string, optional<string>>>;
+
+ LIBBUILD2_SYMEXPORT bool
+ filter_entry (const scope& rs,
+ const dir_path& base,
+ const path& leaf,
+ entry_type);
}
}
diff --git a/libbuild2/lexer.cxx b/libbuild2/lexer.cxx
index f445d4b..04c15be 100644
--- a/libbuild2/lexer.cxx
+++ b/libbuild2/lexer.cxx
@@ -42,6 +42,22 @@ namespace build2
return make_pair (make_pair (r[0], r[1]), sep_);
}
+ pair<char, bool> lexer::
+ peek_char ()
+ {
+ auto p (skip_spaces ());
+ assert (!p.second);
+ sep_ = p.first;
+
+ char r ('\0');
+
+ xchar c (peek ());
+ if (!eos (c))
+ r = c;
+
+ return make_pair (r, sep_);
+ }
+
void lexer::
mode (lexer_mode m, char ps, optional<const char*> esc, uintptr_t data)
{
@@ -144,13 +160,15 @@ namespace build2
break;
}
case lexer_mode::foreign:
- assert (data > 1);
- // Fall through.
+ {
+ assert (ps == '\0' && data > 1);
+ s = false;
+ break;
+ }
case lexer_mode::single_quoted:
case lexer_mode::double_quoted:
{
- assert (ps == '\0');
- s = false;
+ assert (false); // Can only be set manually in word().
break;
}
case lexer_mode::variable:
@@ -162,8 +180,49 @@ namespace build2
default: assert (false); // Unhandled custom mode.
}
- state_.push (
- state {m, data, nullopt, lsb, false, ps, s, n, q, *esc, s1, s2});
+ mode_impl (state {m, data, nullopt, lsb, false, ps, s, n, q, *esc, s1, s2});
+ }
+
+ void lexer::
+ mode_impl (state&& s)
+ {
+ // If we are in the double-quoted mode then, unless the new mode is eval
+ // or variable, delay the state switch until the current mode is expired.
+ // Note that we delay by injecting the new state beneath the current
+ // state.
+ //
+ if (!state_.empty () &&
+ state_.top ().mode == lexer_mode::double_quoted &&
+ s.mode != lexer_mode::eval &&
+ s.mode != lexer_mode::variable)
+ {
+ state qs (move (state_.top ())); // Save quoted state.
+ state_.top () = move (s); // Overwrite quoted state with new state.
+ state_.push (move (qs)); // Restore quoted state.
+ }
+ else
+ state_.push (move (s));
+ }
+
+ void lexer::
+ expire_mode ()
+ {
+ // If we are in the double-quoted mode, then delay the state expiration
+ // until the current mode is expired. Note that we delay by overwriting
+ // the being expired state with the current state.
+ //
+ assert (!state_.empty () &&
+ (state_.top ().mode != lexer_mode::double_quoted ||
+ state_.size () > 1));
+
+ if (state_.top ().mode == lexer_mode::double_quoted)
+ {
+ state qs (move (state_.top ())); // Save quoted state.
+ state_.pop (); // Pop quoted state.
+ state_.top () = move (qs); // Expire state, restoring quoted state.
+ }
+ else
+ state_.pop ();
}
token lexer::
@@ -654,9 +713,9 @@ namespace build2
}
token lexer::
- word (state st, bool sep)
+ word (const state& rst, bool sep)
{
- lexer_mode m (st.mode);
+ lexer_mode m (rst.mode);
xchar c (peek ());
assert (!eos (c));
@@ -687,22 +746,66 @@ namespace build2
lexeme += c;
};
- for (; !eos (c); c = peek ())
+ const state* st (&rst);
+ for (bool first (true); !eos (c); first = false, c = peek ())
{
// First handle escape sequences.
//
if (c == '\\')
{
- // In the variable mode we treat the beginning of the escape sequence
- // as a separator (think \"$foo\").
+ // In the variable mode we treat immediate `\` as the escape sequence
+ // literal and any following as a separator (think \"$foo\").
//
if (m == lexer_mode::variable)
- break;
+ {
+ if (!first)
+ break;
+
+ get ();
+ c = get ();
+
+ if (eos (c))
+ fail (c) << "unterminated escape sequence";
+
+ // For now we only support all the simple C/C++ escape sequences
+ // plus \0 (which in C/C++ is an octal escape sequence).
+ //
+ // In the future we may decide to support more elaborate sequences
+ // such as \xNN, \uNNNN, etc.
+ //
+ // Note: we return it in the literal form instead of translating for
+ // easier printing.
+ //
+ switch (c)
+ {
+ case '\'':
+ case '"':
+ case '?':
+ case '\\':
+ case '0':
+ case 'a':
+ case 'b':
+ case 'f':
+ case 'n':
+ case 'r':
+ case 't':
+ case 'v': lexeme = c; break;
+ default:
+ fail (c) << "unknown escape sequence \\" << c;
+ }
+
+ state_.pop ();
+ return token (type::escape,
+ move (lexeme),
+ sep,
+ qtype, qcomp, qfirst,
+ ln, cn);
+ }
get ();
xchar p (peek ());
- const char* esc (st.escapes);
+ const char* esc (st->escapes);
if (esc == nullptr ||
(*esc != '\0' && !eos (p) && strchr (esc, p) != nullptr))
@@ -718,7 +821,7 @@ namespace build2
continue;
}
else
- unget (c); // Treat as a normal character.
+ unget (c); // Fall through to treat as a normal character.
}
bool done (false);
@@ -747,8 +850,8 @@ namespace build2
get ();
state_.pop ();
- st = state_.top ();
- m = st.mode;
+ st = &state_.top ();
+ m = st->mode;
continue;
}
}
@@ -757,19 +860,17 @@ namespace build2
//
else if (m == lexer_mode::variable)
{
- bool first (lexeme.empty ());
-
// Handle special variable names, if any.
//
- if (first &&
- st.data != 0 &&
- strchr (reinterpret_cast<const char*> (st.data), c) != nullptr)
+ if (first &&
+ st->data != 0 &&
+ strchr (reinterpret_cast<const char*> (st->data), c) != nullptr)
{
get ();
lexeme += c;
done = true;
}
- else if (c != '_' && !(first ? alpha (c) : alnum (c)))
+ else if (c != '_' && !(lexeme.empty () ? alpha (c) : alnum (c)))
{
if (c != '.')
done = true;
@@ -789,17 +890,17 @@ namespace build2
{
// First check if it's a pair separator.
//
- if (c == st.sep_pair)
+ if (c == st->sep_pair)
done = true;
else
{
// Then see if this character or character sequence is a separator.
//
- for (const char* p (strchr (st.sep_first, c));
+ for (const char* p (strchr (st->sep_first, c));
p != nullptr;
p = done ? nullptr : strchr (p + 1, c))
{
- char s (st.sep_second[p - st.sep_first]);
+ char s (st->sep_second[p - st->sep_first]);
// See if it has a second.
//
@@ -817,8 +918,21 @@ namespace build2
// Handle single and double quotes if enabled for this mode and unless
// they were considered separators.
//
- if (st.quotes && !done)
+ if (st->quotes && !done)
{
+ auto quoted_mode = [this] (lexer_mode m)
+ {
+ // In the double-quoted mode we only do effective escaping of the
+ // special `$("\` characters, line continuations, plus `)` for
+ // symmetry. Nothing can be escaped in single-quoted.
+ //
+ const char* esc (m == lexer_mode::double_quoted ? "$()\"\\\n" : "");
+
+ state_.push (state {
+ m, 0, nullopt, false, false, '\0', false, true, true,
+ esc, nullptr, nullptr});
+ };
+
switch (c)
{
case '\'':
@@ -826,7 +940,7 @@ namespace build2
// Enter the single-quoted mode in case the derived lexer needs
// to notice this.
//
- mode (lexer_mode::single_quoted);
+ quoted_mode (lexer_mode::single_quoted);
switch (qtype)
{
@@ -865,9 +979,10 @@ namespace build2
{
get ();
- mode (lexer_mode::double_quoted);
- st = state_.top ();
- m = st.mode;
+ quoted_mode (lexer_mode::double_quoted);
+
+ st = &state_.top ();
+ m = st->mode;
switch (qtype)
{
@@ -989,7 +1104,7 @@ namespace build2
if ((c = peek ()) == '\\')
{
get ();
- if ((c = peek ()) == '\n')
+ if ((c = peek ()) == '\n' || eos (c))
return true;
}
@@ -1000,15 +1115,16 @@ namespace build2
{
// Scan until we see the closing one.
//
- for (; !eos (c); c = peek ())
+ for (;;)
{
- get ();
if (c == '#' && ml ())
break;
- }
- if (eos (c))
- fail (c) << "unterminated multi-line comment";
+ if (eos (c = peek ()))
+ fail (c) << "unterminated multi-line comment";
+
+ get ();
+ }
}
else
{
@@ -1022,6 +1138,8 @@ namespace build2
}
case '\\':
{
+ // See if this is line continuation.
+ //
get ();
if (peek () == '\n')
diff --git a/libbuild2/lexer.hxx b/libbuild2/lexer.hxx
index 148666e..e913829 100644
--- a/libbuild2/lexer.hxx
+++ b/libbuild2/lexer.hxx
@@ -26,14 +26,15 @@ namespace build2
// mode we don't treat certain characters (e.g., `+`, `=`) as special so
// that we can use them in the variable values, e.g., `foo = g++`. In
// contrast, in the variable mode, we restrict certain character (e.g., `/`)
- // from appearing in the name. The values mode is like value but recogizes
- // `,` as special (used in contexts where we need to list multiple
- // values). The attributes/attribute_value modes are like values where each
- // value is potentially a variable assignment; they don't treat `{` and `}`
- // as special (so we cannot have name groups in attributes) as well as
- // recognizes `=` and `]`. The subscript mode is like value but doesn't
- // treat `{` and `}` as special and recognizes `]`. The eval mode is used in
- // the evaluation context.
+ // from appearing in the name. Additionally, in the variable mode we
+ // recognize leading `\` as the beginning of the escape sequent ($\n). The
+ // values mode is like value but recogizes `,` as special (used in contexts
+ // where we need to list multiple values). The attributes/attribute_value
+ // modes are like values where each value is potentially a variable
+ // assignment; they don't treat `{` and `}` as special (so we cannot have
+ // name groups in attributes) as well as recognizes `=` and `]`. The
+ // subscript mode is like value but doesn't treat `{` and `}` as special and
+ // recognizes `]`. The eval mode is used in the evaluation context.
//
// A number of modes are "derived" from the value/values mode by recognizing
// a few extra characters:
@@ -133,10 +134,23 @@ namespace build2
const path_name&
name () const {return name_;}
- // Note: sets mode for the next token. The second argument can be used to
- // specify the pair separator character (if the mode supports pairs). If
- // escapes is not specified, then inherit the current mode's (though a
- // mode can also override it).
+ // Set the lexer mode for the next token or delay this until the end of a
+ // double-quoted token sequence is encountered. The second argument can be
+ // used to specify the pair separator character (if the mode supports
+ // pairs). If escapes is not specified, then inherit the current mode's
+ // (though a mode can also override it).
+ //
+ // Note that there is a common parsing pattern of sensing the language
+ // construct kind we are about to parse by reading its first token,
+ // switching to an appropriate lexing mode, and then parsing the rest. The
+ // problem here is that the first token may start the double-quoted token
+ // sequence, turning the lexer into the double-quoted mode. In this case
+ // switching the lexer mode right away would not be a good idea. Thus,
+ // this function delays the mode switch until the end of the double-quoted
+ // sequence is encountered. Note, however, that such a delay only works
+ // properly if the function is called right after the first quoted token
+ // is read (because any subsequent tokens may end up being parsed in a
+ // nested mode such as variable or eval; see mode_impl() for details).
//
virtual void
mode (lexer_mode,
@@ -153,10 +167,12 @@ namespace build2
state_.top ().lsbrace_unsep = unsep;
}
- // Expire the current mode early.
+ // Expire the current mode early or delay this until the end of a
+ // double-quoted token sequence is encountered (see mode() for details on
+ // the delay condition and reasoning).
//
void
- expire_mode () {state_.pop ();}
+ expire_mode ();
lexer_mode
mode () const {return state_.top ().mode;}
@@ -175,7 +191,7 @@ namespace build2
virtual token
next ();
- // Peek at the first two characters of the next token(s). Return the
+ // Peek at the first one/two characters of the next token(s). Return the
// characters or '\0' if either would be eos. Also return an indicator of
// whether the next token would be separated. Note: cannot be used to peek
// at the first character of a line.
@@ -184,6 +200,9 @@ namespace build2
// mode in which these characters will actually be parsed use the same
// whitespace separation (the sep_space and sep_newline values).
//
+ pair<char, bool>
+ peek_char ();
+
pair<pair<char, char>, bool>
peek_chars ();
@@ -244,7 +263,7 @@ namespace build2
// been "expired" from the top).
//
virtual token
- word (state current, bool separated);
+ word (const state& current, bool separated);
// Return true in first if we have seen any spaces. Skipped empty lines
// don't count. In other words, we are only interested in spaces that are
@@ -255,6 +274,20 @@ namespace build2
pair<bool, bool>
skip_spaces ();
+ // Set state for the next token or delay until the end of a double-quoted
+ // token sequence is encountered (see mode() for details on the delay
+ // condition and reasoning).
+ //
+ void
+ mode_impl (state&&);
+
+ state&
+ current_state ()
+ {
+ assert (!state_.empty ());
+ return state_.top ();
+ }
+
// Diagnostics.
//
protected:
@@ -283,11 +316,14 @@ namespace build2
}
const path_name& name_;
- std::stack<state> state_;
bool sep_; // True if we skipped spaces in peek().
private:
+ // Use current_state(), mode_impl(), and expire_mode().
+ //
+ std::stack<state> state_;
+
using base = char_scanner<butl::utf8_validator, 2>;
// Buffer for a get()/peek() potential error.
diff --git a/libbuild2/make-parser.cxx b/libbuild2/make-parser.cxx
new file mode 100644
index 0000000..c6c077f
--- /dev/null
+++ b/libbuild2/make-parser.cxx
@@ -0,0 +1,171 @@
+// file : libbuild2/make-parser.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/make-parser.hxx>
+
+#include <cstring> // strchr()
+
+#include <libbuild2/diagnostics.hxx>
+
+namespace build2
+{
+ auto make_parser::
+ next (const string& l, size_t& p, const location& ll) -> pair<type, path>
+ {
+ assert (state != end);
+
+ type t (state == prereqs ? type::prereq : type::target);
+
+ pair<string, bool> r (next (l, p, t));
+
+ // Deal with the end.
+ //
+ if (r.second)
+ {
+ if (state == begin && r.first.empty ())
+ ; // Skip leading blank line.
+ else
+ {
+ if (state != prereqs)
+ fail (ll) << "end of make dependency declaration before ':'";
+
+ state = end;
+ }
+ }
+ // Deal with the first target.
+ //
+ else if (state == begin && !r.first.empty ())
+ state = targets;
+
+ // Deal with `:`.
+ //
+ if (p != l.size () && l[p] == ':')
+ {
+ switch (state)
+ {
+ case begin: fail (ll) << "':' before make target"; break;
+ case targets: state = prereqs; break;
+ case prereqs: fail (ll) << "':' after make prerequisite"; break;
+ case end: break;
+ }
+
+ if (++p == l.size ())
+ state = end; // Not a mere optimization: the caller will get next line.
+ }
+
+ try
+ {
+ return pair<type, path> (t, path (move (r.first)));
+ }
+ catch (const invalid_path& e)
+ {
+ fail (ll) << "invalid make "
+ << (t == type::prereq ? "prerequisite" : "target")
+ << " path '" << e.path << "'" << endf;
+ }
+ }
+
+ // Note: backslash must be first.
+ //
+ // Note also that, at least in GNU make 4.1, `%` seems to be unescapable
+ // if appears in a target and literal if in a prerequisite.
+ //
+ static const char escapable[] = "\\ :#";
+
+ pair<string, bool> make_parser::
+ next (const string& l, size_t& p, type)
+ {
+ size_t n (l.size ());
+
+ // Skip leading spaces.
+ //
+ for (; p != n && l[p] == ' '; p++) ;
+
+ // Lines containing multiple targets/prerequisites are customarily 80
+ // characters max.
+ //
+ string r;
+ r.reserve (n - p);
+
+ // Scan the next target/prerequisite while watching out for escape
+ // sequences.
+ //
+ // @@ Can't we do better for the (common) case where nothing is escaped?
+ //
+#ifdef _WIN32
+ size_t b (p);
+#endif
+
+ for (char c; p != n && (c = l[p]) != ' '; r += c)
+ {
+ if (c == ':')
+ {
+#ifdef _WIN32
+ // See if this colon is part of the driver letter component in an
+ // absolute Windows path.
+ //
+ // Note that here we assume we are not dealing with directories (in
+ // which case c: would be a valid path) and thus an absolute path is
+ // at least 4 characters long (e.g., c:\x).
+ //
+ if (p == b + 1 && // Colon is second character.
+ alpha (l[b]) && // First is drive letter.
+ p + 2 < n && // At least two more characters after colon.
+ ((l[p + 1] == '/') || // Next is directory separator.
+ (l[p + 1] == '\\' && // But not part of a non-\\ escape sequence.
+ strchr (escapable + 1, l[p + 2]) == nullptr)))
+ {
+ ++p;
+ continue;
+ }
+#endif
+ break;
+ }
+
+ // If we have another character, then handle the escapes.
+ //
+ if (++p != n)
+ {
+ if (c == '\\')
+ {
+ // This may or may not be an escape sequence depending on whether
+ // what follows is "escapable".
+ //
+ if (strchr (escapable, l[p]) != nullptr)
+ c = l[p++];
+ }
+ else if (c == '$')
+ {
+ // Got to be another (escaped) '$'.
+ //
+ if (l[p] == '$')
+ ++p;
+ }
+ }
+ // Note that the newline escape is not necessarily separated with space.
+ //
+ else if (c == '\\')
+ {
+ --p;
+ break;
+ }
+ }
+
+ // Skip trailing spaces.
+ //
+ for (; p != n && l[p] == ' '; p++) ;
+
+ // Skip final '\' and determine if this is the end.
+ //
+ bool e (false);
+ if (p == n - 1)
+ {
+ if (l[p] == '\\')
+ p++;
+ }
+ else if (p == n)
+ e = true;
+
+ return pair<string, bool> (move (r), e);
+ }
+}
diff --git a/libbuild2/make-parser.hxx b/libbuild2/make-parser.hxx
new file mode 100644
index 0000000..f6da7a1
--- /dev/null
+++ b/libbuild2/make-parser.hxx
@@ -0,0 +1,83 @@
+// file : libbuild2/make-parser.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_MAKE_PARSER_HXX
+#define LIBBUILD2_MAKE_PARSER_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/export.hxx>
+
+namespace build2
+{
+ // Make dependency declaration parser.
+ //
+ // The format is line-based (but with potential line continuations) so we
+ // parse one line at a time. This allows the caller to bail out early (for
+ // example, on encountering a non-existent generated file).
+ //
+ // Note that most tools (MinGW GCC, Qt moc, etc) do not escape `:` in
+ // absolute Windows paths. To handle such cases the parser recognizes `:`
+ // that is a part of the driver letter component and does not treat it as
+ // the target/prerequisite separator.
+ //
+ class LIBBUILD2_SYMEXPORT make_parser
+ {
+ public:
+ enum {begin, targets, prereqs, end} state = begin;
+
+ // Parse next target/prerequisite on a line starting from the specified
+ // position. Update the position to point to the start of the following
+ // target/prerequisite or line.size() if there is nothing left on this
+ // line. May return an empty path for a valid if unlikely dependency
+ // declarations (see below) or if passing leading blank lines (both of
+ // which should normally be just skipped). Issue diagnostics and throw
+ // failed if the declaration or path is invalid.
+ //
+ // Note that the (pos != line.size) should be in the do-while rather than
+ // in a while loop. In other words, except for the leading blank lines,
+ // the parser needs to see the blank line to correctly identify the end of
+ // the declaration. See make-parser.test.cxx for a recommended usage.
+ //
+ // To parse more than one declaration, reset the state to begin after
+ // reaching end.
+ //
+ enum class type {target, prereq};
+
+ pair<type, path>
+ next (const string& line, size_t& pos, const location&);
+
+ // Lower-level stateless API.
+ //
+ public:
+ // Parse next target/prerequisite on a line starting from the specified
+ // position. Return the target/prerequisite as well as an indication of
+ // whether the end of the dependency declaration was reached. Update the
+ // position to point to the start of the following target/prerequisite,
+ // `:`, or line.size() if there is nothing left on this line.
+ //
+ // Note also that this function may return an empty string (with
+ // end=false) for a valid if unlikely dependency declaration, for example
+ // (using | to represent backslash):
+ //
+ // foo:|
+ // |
+ // bar
+ //
+ // It would also return an empty string (with end=true) if passed an empty
+ // or whitespace-only line.
+ //
+ // Note also that in the make language line continuations introduce a
+ // whitespace rather than just being remove. For example, the following
+ // declaration has two prerequisites:
+ //
+ // foo: bar|
+ // baz
+ //
+ static pair<string, bool>
+ next (const string& line, size_t& pos, type);
+ };
+}
+
+#endif // LIBBUILD2_MAKE_PARSER_HXX
diff --git a/libbuild2/make-parser.test.cxx b/libbuild2/make-parser.test.cxx
new file mode 100644
index 0000000..00a265a
--- /dev/null
+++ b/libbuild2/make-parser.test.cxx
@@ -0,0 +1,88 @@
+// file : libbuild2/make-parser.test.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <iostream>
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/make-parser.hxx>
+#include <libbuild2/diagnostics.hxx>
+
+#undef NDEBUG
+#include <cassert>
+
+using namespace std;
+
+namespace build2
+{
+ int
+ main (int, char* argv[])
+ {
+ // Fake build system driver, default verbosity.
+ //
+ init_diag (1);
+ init (nullptr, argv[0], true);
+
+ path_name in ("<stdin>");
+
+ try
+ {
+ cin.exceptions (istream::badbit);
+
+ using make_state = make_parser;
+ using make_type = make_parser::type;
+
+ make_parser make;
+
+ location ll (in, 1);
+ for (string l; !eof (getline (cin, l)); ++ll.line)
+ {
+ if (make.state == make_state::end)
+ {
+ cout << endl;
+ make.state = make_state::begin;
+ }
+
+ // Skip leading blank lines to reduce output noise.
+ //
+ if (make.state == make_state::begin && l.empty ())
+ continue;
+
+ size_t pos (0);
+ do
+ {
+ pair<make_type, path> r (make.next (l, pos, ll));
+
+ cout << (r.first == make_type::target ? 'T' : 'P');
+
+ if (!r.second.empty ())
+ cout << ' ' << r.second;
+
+ cout << endl;
+ }
+ while (pos != l.size ());
+ }
+
+ if (make.state != make_state::end && make.state != make_state::begin)
+ fail (ll) << "incomplete make dependency declaration";
+ }
+ catch (const io_error& e)
+ {
+ cerr << "unable to read stdin: " << e << endl;
+ return 1;
+ }
+ catch (const failed&)
+ {
+ return 1;
+ }
+
+ return 0;
+ }
+}
+
+int
+main (int argc, char* argv[])
+{
+ return build2::main (argc, argv);
+}
diff --git a/libbuild2/make-parser.test.testscript b/libbuild2/make-parser.test.testscript
new file mode 100644
index 0000000..9108ba3
--- /dev/null
+++ b/libbuild2/make-parser.test.testscript
@@ -0,0 +1,129 @@
+# file : libbuild2/make-parser.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+windows = ($cxx.target.class == 'windows')
+
+: valid
+:
+$* <<EOI >>EOO
+ foo:
+
+ foo: bar
+
+ foo: bar baz
+
+ foo: bar \
+ baz
+
+ foo: bar\
+ baz
+
+ foo:\
+ bar baz\
+ fox
+
+ foo: bar \
+ \
+ baz
+
+ foo: bar\
+
+ foo bar: baz
+
+ foo \
+ bar: baz
+
+ foo \
+ bar \
+ : baz
+
+ \
+ foo: bar
+ EOI
+ T foo
+
+ T foo
+ P bar
+
+ T foo
+ P bar
+ P baz
+
+ T foo
+ P bar
+ P baz
+
+ T foo
+ P bar
+ P baz
+
+ T foo
+ P
+ P bar
+ P baz
+ P fox
+
+ T foo
+ P bar
+ P
+ P baz
+
+ T foo
+ P bar
+ P
+
+ T foo
+ T bar
+ P baz
+
+ T foo
+ T bar
+ P baz
+
+ T foo
+ T bar
+ T
+ P baz
+
+ T
+ T foo
+ P bar
+ EOO
+
+: escape
+:
+$* <<EOI >>EOO
+ \#foo\:\ bar: fox$$\\baz
+ EOI
+ T #foo: bar
+ P fox$\baz
+ EOO
+
+: windows
+:
+if $windows
+{
+ $* <<EOI >>EOO
+ foo: c:\tmp\bar
+
+ c:\tmp\foo: c:\tmp\bar \
+ c:\tmp\baz
+
+ c:\\tmp\\foo: c:\\tmp\\bar
+
+ c:\x:c:\y
+ EOI
+ T foo
+ P c:\tmp\bar
+
+ T c:\tmp\foo
+ P c:\tmp\bar
+ P c:\tmp\baz
+
+ T c:\tmp\foo
+ P c:\tmp\bar
+
+ T c:\x
+ P c:\y
+ EOO
+}
diff --git a/libbuild2/module.cxx b/libbuild2/module.cxx
index 9756860..1aaa38d 100644
--- a/libbuild2/module.cxx
+++ b/libbuild2/module.cxx
@@ -30,26 +30,26 @@ using namespace butl;
namespace build2
{
- mutex loaded_modules_lock::mutex_;
+ mutex module_libraries_lock::mutex_;
- loaded_module_map loaded_modules;
+ module_libraries_map module_libraries;
void
load_builtin_module (module_load_function* lf)
{
for (const module_functions* i (lf ()); i->name != nullptr; ++i)
- loaded_modules[i->name] = i;
+ module_libraries.emplace (i->name, module_library {*i, dir_path ()});
}
// Sorted array of bundled modules (excluding core modules bundled with
// libbuild2; see below).
//
-#if !defined(BUILD2_BOOTSTRAP) && !defined(LIBBUILD2_STATIC_BUILD)
static const char* bundled_modules[] = {
"bash",
"bin",
"c",
"cc",
+ "cli",
"cxx",
"in",
"version"
@@ -63,7 +63,6 @@ namespace build2
bundled_modules + sizeof (bundled_modules) / sizeof (*bundled_modules),
mod);
}
-#endif
// Note: also used by ad hoc recipes thus not static.
//
@@ -77,22 +76,30 @@ namespace build2
// same global mutexes. Also disable nested module context for good
// measure.
//
+ // The reserve values were picked experimentally by building libbuild2 and
+ // adding a reasonable margin for future growth.
+ //
ctx.module_context_storage->reset (
- new context (ctx.sched,
- ctx.mutexes,
- ctx.fcache,
- false, /* match_only */
+ new context (*ctx.sched,
+ *ctx.mutexes,
+ *ctx.fcache,
+ nullopt, /* match_only */
false, /* no_external_modules */
false, /* dry_run */
+ ctx.no_diag_buffer,
ctx.keep_going,
ctx.global_var_overrides, /* cmd_vars */
+ context::reserves {
+ 2500, /* targets */
+ 900 /* variables */
+ },
nullopt)); /* module_context */
// We use the same context for building any nested modules that might be
// required while building modules.
//
- ctx.module_context = ctx.module_context_storage->get ();
- ctx.module_context->module_context = ctx.module_context;
+ context& mctx (*(ctx.module_context = ctx.module_context_storage->get ()));
+ mctx.module_context = &mctx;
// Setup the context to perform update. In a sense we have a long-running
// perform meta-operation batch (indefinite, in fact, since we never call
@@ -104,12 +111,12 @@ namespace build2
// recipes) we will see the old state.
//
if (mo_perform.meta_operation_pre != nullptr)
- mo_perform.meta_operation_pre ({} /* parameters */, loc);
+ mo_perform.meta_operation_pre (mctx, {} /* parameters */, loc);
- ctx.module_context->current_meta_operation (mo_perform);
+ mctx.current_meta_operation (mo_perform);
if (mo_perform.operation_pre != nullptr)
- mo_perform.operation_pre ({} /* parameters */, update_id);
+ mo_perform.operation_pre (mctx, {} /* parameters */, update_id);
}
// Note: also used by ad hoc recipes thus not static.
@@ -120,6 +127,9 @@ namespace build2
{
// New update operation.
//
+ assert (op_update.operation_pre == nullptr &&
+ op_update.operation_post == nullptr);
+
ctx.module_context->current_operation (op_update);
// Un-tune the scheduler.
@@ -127,13 +137,14 @@ namespace build2
// Note that we can only do this if we are running serially because
// otherwise we cannot guarantee the scheduler is idle (we could have
// waiting threads from the outer context). This is fine for now since the
- // only two tuning level we use are serial and full concurrency (turns out
- // currently we don't really need this: we will always be called during
- // load or match phases and we always do parallel match; but let's keep it
- // in case things change).
- //
- auto sched_tune (ctx.sched.serial ()
- ? scheduler::tune_guard (ctx.sched, 0)
+ // only two tuning level we use are serial and full concurrency. (Turns
+ // out currently we don't really need this: we will always be called
+ // during load or match phases and we always do parallel match; but let's
+ // keep it in case things change. Actually, we may need it, if the
+ // scheduler was started up in a tuned state, like in bpkg).
+ //
+ auto sched_tune (ctx.sched->serial ()
+ ? scheduler::tune_guard (*ctx.sched, 0)
: scheduler::tune_guard ());
// Remap verbosity level 0 to 1 unless we were requested to be silent.
@@ -231,11 +242,20 @@ namespace build2
}
#endif
- static module_load_function*
+ // Return the module functions as well as the module project directory or
+ // empty if not imported from project. Return {nullptr, nullopt} if not
+ // found.
+ //
+ // The dry-run mode only calls import_search() and always returns NULL for
+ // module functions (see below for background).
+ //
+ static pair<module_load_function*, optional<dir_path>>
import_module (
#if defined(BUILD2_BOOTSTRAP) || defined(LIBBUILD2_STATIC_BUILD)
+ bool,
scope&,
#else
+ bool dry_run,
scope& bs,
#endif
const string& mod,
@@ -249,15 +269,21 @@ namespace build2
{
tracer trace ("import_module");
+ pair<module_load_function*, optional<dir_path>> r (nullptr, nullopt);
+
// Take care of core modules that are bundled with libbuild2 in case they
// are not pre-loaded by the driver.
//
- if (mod == "config") return &config::build2_config_load;
- else if (mod == "dist") return &dist::build2_dist_load;
- else if (mod == "install") return &install::build2_install_load;
- else if (mod == "test") return &test::build2_test_load;
+ if (mod == "config") r.first = &config::build2_config_load;
+ else if (mod == "dist") r.first = &dist::build2_dist_load;
+ else if (mod == "install") r.first = &install::build2_install_load;
+ else if (mod == "test") r.first = &test::build2_test_load;
- module_load_function* r (nullptr);
+ if (r.first != nullptr)
+ {
+ r.second = dir_path ();
+ return r;
+ }
// No dynamic loading of build system modules during bootstrap or if
// statically-linked..
@@ -326,7 +352,7 @@ namespace build2
// and undefined if the module was not mentioned.
//
if (boot && !bundled && ctx.no_external_modules)
- return nullptr;
+ return r; // NULL
// See if we can import a target for this module.
//
@@ -381,7 +407,7 @@ namespace build2
if (ir.first.empty ())
{
assert (opt);
- return nullptr;
+ return r; // NULL
}
if (ir.second)
@@ -389,6 +415,8 @@ namespace build2
// What if a module is specified with config.import.<mod>.<lib>.libs?
// Note that this could still be a project-qualified target.
//
+ // Note: we now return an empty directory to mean something else.
+ //
if (ir.second->empty ())
fail (loc) << "direct module target importation not yet supported";
@@ -396,6 +424,17 @@ namespace build2
// the target (which will also give us the shared library path).
//
l5 ([&]{trace << "found " << ir.first << " in " << *ir.second;});
+ }
+
+ if (dry_run)
+ {
+ r.second = ir.second ? move (*ir.second) : dir_path ();
+ return r;
+ }
+
+ if (ir.second)
+ {
+ r.second = *ir.second;
// Create the build context if necessary.
//
@@ -408,7 +447,7 @@ namespace build2
create_module_context (ctx, loc);
}
- // Inherit loaded_modules lock from the outer context.
+ // Inherit module_libraries lock from the outer context.
//
ctx.module_context->modules_lock = ctx.modules_lock;
@@ -417,7 +456,7 @@ namespace build2
//
auto_thread_env penv (nullptr);
context& ctx (*bs.ctx.module_context);
- scheduler::phase_guard pg (ctx.sched);
+ scheduler::phase_guard pg (*ctx.sched);
// Load the imported project in the module context.
//
@@ -468,6 +507,8 @@ namespace build2
}
else
{
+ r.second = dir_path ();
+
// No module project found. Form the shared library name (incorporating
// build system core version) and try using system-default search
// (installed, rpath, etc).
@@ -510,7 +551,7 @@ namespace build2
fail (loc) << "unable to lookup " << sym << " in build system module "
<< mod << " (" << lib << "): " << err;
- r = function_cast<module_load_function*> (hs.second);
+ r.first = function_cast<module_load_function*> (hs.second);
}
else if (!opt)
{
@@ -522,7 +563,10 @@ namespace build2
<< "line variable to specify its project out_root";
}
else
+ {
+ r.second = nullopt;
l5 ([&]{trace << "unable to load " << lib << ": " << err;});
+ }
#endif // BUILD2_BOOTSTRAP || LIBBUILD2_STATIC_BUILD
@@ -538,89 +582,200 @@ namespace build2
{
tracer trace ("find_module");
- // Note that we hold the lock for the entire time it takes to build a
- // module.
+ // If this is a submodule, get the main module name.
+ //
+ string mmod (smod, 0, smod.find ('.'));
+
+ // We have a somewhat strange two-level caching in imported_modules
+ // and module_libraries in order to achieve the following:
+ //
+ // 1. Correctly handle cases where a module can be imported from one
+ // project but not the other.
+ //
+ // 2. Make sure that for each project that imports the module we actually
+ // call import_search() in order to mark any config.import.* as used.
//
- loaded_modules_lock lock (bs.ctx);
+ // 3. Make sure that all the projects import the same module.
+ //
+ scope& rs (*bs.root_scope ());
+
+ const string* mod;
+ const module_functions* fun;
- // Optional modules and submodules sure make this logic convoluted. So we
- // divide it into two parts: (1) find or insert an entry (for submodule
- // or, failed that, for the main module, the latter potentially NULL) and
- // (2) analyze the entry and issue diagnostics.
+ // First check the project's imported_modules in case this (main) module
+ // is known to be not found.
//
- auto i (loaded_modules.find (smod)), e (loaded_modules.end ());
+ auto j (rs.root_extra->imported_modules.find (mmod));
+ auto je (rs.root_extra->imported_modules.end ());
- if (i == e)
+ if (j != je && !j->found)
{
- // If this is a submodule, get the main module name.
+ mod = &mmod;
+ fun = nullptr;
+ }
+ else
+ {
+ // Note that we hold the lock for the entire time it takes to build a
+ // module.
//
- string mmod (smod, 0, smod.find ('.'));
+ module_libraries_lock lock (bs.ctx);
- if (mmod != smod)
- i = loaded_modules.find (mmod);
+ // Optional modules and submodules sure make this logic convoluted. So
+ // we divide it into two parts: (1) find or insert an entry (for
+ // submodule or, failed that, for the main module) and (2) analyze the
+ // entry and issue diagnostics.
+ //
+ auto i (module_libraries.find (smod));
+ auto ie (module_libraries.end ());
- if (i == e)
+ bool imported (false);
+ if (i == ie)
{
- module_load_function* f (import_module (bs, mmod, loc, boot, opt));
+ if (mmod != smod)
+ i = module_libraries.find (mmod);
- if (f != nullptr)
+ if (i == ie)
{
- // Enter all the entries noticing which one is our submodule. If
- // none are, then we notice the main module.
- //
- for (const module_functions* j (f ()); j->name != nullptr; ++j)
+ pair<module_load_function*, optional<dir_path>> ir (
+ import_module (false /* dry_run */, bs, mmod, loc, boot, opt));
+
+ if (module_load_function* f = ir.first)
{
- const string& n (j->name);
+ // Enter all the entries noticing which one is our submodule. If
+ // none are, then we notice the main module.
+ //
+ for (const module_functions* j (f ()); j->name != nullptr; ++j)
+ {
+ const string& n (j->name);
+
+ l5 ([&]{trace << "registering " << n;});
+
+ bool main (n == mmod);
- l5 ([&]{trace << "registering " << n;});
+ auto p (module_libraries.emplace (
+ n,
+ module_library {
+ *j,
+ main ? move (*ir.second) : dir_path ()}));
- auto p (loaded_modules.emplace (n, j));
+ if (!p.second)
+ fail (loc) << "build system submodule name " << n << " of main "
+ << "module " << mmod << " is already in use";
- if (!p.second)
- fail (loc) << "build system submodule name " << n << " of main "
- << "module " << mmod << " is already in use";
+ // Note: this assumes the main module is last.
+ //
+ if (n == smod || (main && i == ie))
+ i = p.first;
+ }
- if (n == smod || (i == e && n == mmod))
- i = p.first;
+ // We should at least have the main module.
+ //
+ if (i == ie)
+ fail (loc) << "invalid function list in build system module "
+ << mmod;
}
- // We should at least have the main module.
- //
- if (i == e)
- fail (loc) << "invalid function list in build system module "
- << mmod;
+ imported = true;
+ }
+ }
+
+ // Now the iterator points to a submodule or to the main module, or to
+ // end if neither is found.
+ //
+ assert (j == je || i != ie); // Cache state consistecy sanity check.
+
+ if (i != ie)
+ {
+ // Note: these should remain stable after we release the lock.
+ //
+ mod = &i->first;
+ fun = &i->second.functions.get ();
+
+ // If this project hasn't imported this main module and we found the
+ // entry in the cache, then we have to perform the import_search()
+ // part of import_module() in order to cover items (2) and (3) above.
+ //
+ // There is one nuance: omit this for bundled modules since it's
+ // possible to first import them ad hoc and then, if we call
+ // import_search() again, to find them differently (e.g., as a
+ // subproject).
+ //
+ if (j == je && !imported && !bundled_module (mmod))
+ {
+ pair<module_load_function*, optional<dir_path>> ir (
+ import_module (true /* dry_run */, bs, mmod, loc, boot, opt));
+
+ if (ir.second)
+ {
+ if (i->first != mmod)
+ {
+ i = module_libraries.find (mmod);
+ assert (i != ie); // Has to be there.
+ }
+
+ const dir_path& cd (*ir.second);
+ const dir_path& pd (i->second.import_path);
+
+ if (cd != pd)
+ {
+ fail (loc) << "inconsistent build system module " << mmod
+ << " importation" <<
+ info << rs << " imports it as "
+ << (cd.empty () ? "ad hoc" : cd.representation ().c_str ()) <<
+ info << "previously imported as "
+ << (pd.empty () ? "ad hoc" : pd.representation ().c_str ());
+ }
+ }
+ else
+ {
+ // This module is not found from this project.
+ //
+ mod = &mmod;
+ fun = nullptr;
+ }
}
- else
- i = loaded_modules.emplace (move (mmod), nullptr).first;
+ }
+ else
+ {
+ mod = &mmod;
+ fun = nullptr;
}
}
+ // Cache the result in imported_modules if necessary.
+ //
+ if (j == je)
+ rs.root_extra->imported_modules.push_back (
+ module_import {mmod, fun != nullptr});
+
// Reduce skipped external module to optional.
//
- if (boot && i->second == nullptr)
+ if (boot && fun == nullptr)
opt = true;
- // Now the iterator points to a submodule or to the main module, the
- // latter potentially NULL.
+ // Handle optional.
//
- if (!opt)
+ if (fun == nullptr)
{
- if (i->second == nullptr)
- {
- fail (loc) << "unable to load build system module " << i->first;
- }
- else if (i->first != smod)
- {
- fail (loc) << "build system module " << i->first << " has no "
+ if (!opt)
+ fail (loc) << "unable to load build system module " << *mod;
+ }
+ else if (*mod != smod)
+ {
+ if (!opt)
+ fail (loc) << "build system module " << *mod << " has no "
<< "submodule " << smod;
+ else
+ {
+ // Note that if the main module exists but has no such submodule, we
+ // return NULL rather than fail (think of an older version of a module
+ // that doesn't implement some extra functionality).
+ //
+ fun = nullptr;
}
}
- // Note that if the main module exists but has no such submodule, we
- // return NULL rather than fail (think of an older version of a module
- // that doesn't implement some extra functionality).
- //
- return i->second;
+ return fun;
}
void
@@ -628,7 +783,7 @@ namespace build2
{
// First see if this modules has already been booted for this project.
//
- module_map& lm (rs.root_extra->modules);
+ module_state_map& lm (rs.root_extra->loaded_modules);
auto i (lm.find (mod));
if (i != lm.end ())
@@ -673,7 +828,7 @@ namespace build2
i->boot_init = e.init;
}
- rs.assign (rs.var_pool ().insert (mod + ".booted")) = (mf != nullptr);
+ rs.assign (rs.var_pool (true).insert (mod + ".booted")) = (mf != nullptr);
}
void
@@ -704,7 +859,7 @@ namespace build2
{
// First see if this modules has already been inited for this project.
//
- module_map& lm (rs.root_extra->modules);
+ module_state_map& lm (rs.root_extra->loaded_modules);
auto i (lm.find (mod));
bool f (i == lm.end ());
@@ -742,7 +897,7 @@ namespace build2
// buildfile-visible (where we use the term "load a module"; see the note
// on terminology above)
//
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true));
value& lv (bs.assign (vp.insert (mod + ".loaded")));
value& cv (bs.assign (vp.insert (mod + ".configured")));
@@ -824,7 +979,7 @@ namespace build2
if (cast_false<bool> (bs[name + ".loaded"]))
{
if (cast_false<bool> (bs[name + ".configured"]))
- return rs.root_extra->modules.find (name)->module;
+ return rs.root_extra->loaded_modules.find (name)->module;
}
else
{
@@ -846,7 +1001,7 @@ namespace build2
// attempt to load it was optional?
return cast_false<bool> (bs[name + ".loaded"])
- ? rs.root_extra->modules.find (name)->module
+ ? rs.root_extra->loaded_modules.find (name)->module
: init_module (rs, bs, name, loc, false /* optional */, hints)->module;
}
}
diff --git a/libbuild2/module.hxx b/libbuild2/module.hxx
index 8223bae..6cdd040 100644
--- a/libbuild2/module.hxx
+++ b/libbuild2/module.hxx
@@ -21,6 +21,12 @@ namespace build2
// implementation's perspectives, the module library is "loaded" and the
// module is optionally "bootstrapped" (or "booted" for short) and then
// "initialized" (or "inited").
+ //
+ // Note also that a module name (or component thereof, for submodules) is
+ // not a project name (in particular, it can be less than 3 characters long)
+ // and we usually use `-` instead of `_` as a word separator within
+ // components, for example `c.as-cpp` (since the top-level component ends up
+ // in the library name; but this is not a hard rule).
// Base class for module instance.
//
@@ -145,9 +151,9 @@ namespace build2
//
// The <name> part in the function name is the main module name without
// submodule components (for example, `c` in `c.config`) and the load
- // function is expected to return boot/init functions for all its submodules
- // (if any) as well as for the module itself as an array of module_functions
- // terminated with an all-NULL entry.
+ // function is expected to return boot/init functions as an array of
+ // module_functions: entries for all its submodules (if any) first, followed
+ // by the module itself, and terminated with an all-NULL entry.
//
// Note that the load function is guaranteed to be called during serial
// execution (either from main() or during the load phase).
@@ -155,7 +161,31 @@ namespace build2
extern "C"
using module_load_function = const module_functions* ();
- // Module state.
+ // Imported module state.
+ //
+ // The module name is the main module (corresponding to the library). If
+ // found is false then this module could not be imported from this project.
+ //
+ struct module_import
+ {
+ const string name;
+ bool found;
+ };
+
+ struct module_import_map: vector<module_import>
+ {
+ iterator
+ find (const string& name)
+ {
+ return find_if (
+ begin (), end (),
+ [&name] (const module_import& i) {return i.name == name;});
+ }
+ };
+
+ // Loaded module state.
+ //
+ // Note that unlike import_state, the module name here could be a submodule.
//
struct module_state
{
@@ -167,7 +197,7 @@ namespace build2
optional<module_boot_init> boot_init;
};
- struct module_map: vector<module_state>
+ struct module_state_map: vector<module_state>
{
iterator
find (const string& name)
@@ -268,23 +298,28 @@ namespace build2
return static_cast<T&> (*load_module (root, base, name, l, config_hints));
}
- // Loaded modules (as in libraries).
+ // Loaded module libraries.
//
- // A NULL entry for the main module indicates that a module library was not
- // found.
+ // Note that this map contains entries for all the submodules.
//
- using loaded_module_map = map<string, const module_functions*>;
+ struct module_library
+ {
+ reference_wrapper<const module_functions> functions;
+ dir_path import_path; // Only for main module.
+ };
+
+ using module_libraries_map = map<string, module_library>;
- // The loaded_modules map is locked per top-level (as opposed to nested)
+ // The module_libraries map is locked per top-level (as opposed to nested)
// context (see context.hxx for details).
//
// Note: should only be constructed during contexts-wide serial execution.
//
- class LIBBUILD2_SYMEXPORT loaded_modules_lock
+ class LIBBUILD2_SYMEXPORT module_libraries_lock
{
public:
explicit
- loaded_modules_lock (context& c)
+ module_libraries_lock (context& c)
: ctx_ (c), lock_ (mutex_, defer_lock)
{
if (ctx_.modules_lock == nullptr)
@@ -294,7 +329,7 @@ namespace build2
}
}
- ~loaded_modules_lock ()
+ ~module_libraries_lock ()
{
if (ctx_.modules_lock == this)
ctx_.modules_lock = nullptr;
@@ -306,7 +341,7 @@ namespace build2
mlock lock_;
};
- LIBBUILD2_SYMEXPORT extern loaded_module_map loaded_modules;
+ LIBBUILD2_SYMEXPORT extern module_libraries_map modules_libraries;
// Load a builtin module (i.e., a module linked as a static/shared library
// or that is part of the build system driver).
diff --git a/libbuild2/name.cxx b/libbuild2/name.cxx
index 1081b5c..6c48bb3 100644
--- a/libbuild2/name.cxx
+++ b/libbuild2/name.cxx
@@ -80,15 +80,20 @@ namespace build2
}
ostream&
- to_stream (ostream& os, const name& n, bool quote, char pair, bool escape)
+ to_stream (ostream& os, const name& n, quote_mode q, char pair, bool escape)
{
using pattern_type = name::pattern_type;
- auto write_string = [&os, quote, pair, escape] (
+ auto write_string = [&os, q, pair, escape] (
const string& v,
optional<pattern_type> pat = nullopt,
bool curly = false)
{
+ // We don't expect the effective quoting mode to be specified for the
+ // name patterns.
+ //
+ assert (q != quote_mode::effective || !pat);
+
// Special characters, path pattern characters, and regex pattern
// characters. The latter only need to be quoted in the first position
// and if followed by a non-alphanumeric delimiter. If that's the only
@@ -97,7 +102,7 @@ namespace build2
// escape leading `+` in the curly braces which is also recognized as a
// path pattern.
//
- char sc[] = {
+ char nsc[] = {
'{', '}', '[', ']', '$', '(', ')', // Token endings.
' ', '\t', '\n', '#', // Spaces.
'\\', '"', // Escaping and quoting.
@@ -114,6 +119,26 @@ namespace build2
return (v[0] == '~' || v[0] == '^') && v[1] != '\0' && !alnum (v[1]);
};
+ char esc[] = {
+ '{', '}', '$', '(', // Token endings.
+ ' ', '\t', '\n', '#', // Spaces.
+ '"', // Quoting.
+ pair, // Pair separator, if any.
+ '\0'};
+
+ auto ec = [&esc] (const string& v)
+ {
+ for (size_t i (0); i < v.size (); ++i)
+ {
+ char c (v[i]);
+
+ if (strchr (esc, c) != nullptr || (c == '\\' && v[i + 1] == '\\'))
+ return true;
+ }
+
+ return false;
+ };
+
if (pat)
{
switch (*pat)
@@ -124,7 +149,7 @@ namespace build2
}
}
- if (quote && v.find ('\'') != string::npos)
+ if (q != quote_mode::none && v.find ('\'') != string::npos)
{
// Quote the string with the double quotes rather than with the single
// one. Escape some of the special characters.
@@ -148,8 +173,10 @@ namespace build2
// pattern character but not vice-verse. See the parsing logic for
// details.
//
- else if (quote && (v.find_first_of (sc) != string::npos ||
- (!pat && v.find_first_of (pc) != string::npos)))
+ else if ((q == quote_mode::normal &&
+ (v.find_first_of (nsc) != string::npos ||
+ (!pat && v.find_first_of (pc) != string::npos))) ||
+ (q == quote_mode::effective && ec (v)))
{
if (escape) os << '\\';
os << '\'';
@@ -164,8 +191,9 @@ namespace build2
// details). So we escape it both if it's not a pattern or is a path
// pattern.
//
- else if (quote && ((!pat || *pat == pattern_type::path) &&
- ((v[0] == '+' && curly) || rc (v))))
+ else if (q == quote_mode::normal &&
+ (!pat || *pat == pattern_type::path) &&
+ ((v[0] == '+' && curly) || rc (v)))
{
if (escape) os << '\\';
os << '\\' << v;
@@ -176,12 +204,12 @@ namespace build2
uint16_t dv (stream_verb (os).path); // Directory verbosity.
- auto write_dir = [&os, quote, &write_string, dv] (
+ auto write_dir = [&os, q, &write_string, dv] (
const dir_path& d,
optional<pattern_type> pat = nullopt,
bool curly = false)
{
- if (quote)
+ if (q != quote_mode::none)
write_string (dv < 1 ? diag_relative (d) : d.representation (),
pat,
curly);
@@ -194,7 +222,7 @@ namespace build2
// If quoted then print empty name as '' rather than {}.
//
- if (quote && n.empty ())
+ if (q != quote_mode::none && n.empty ())
return os << (escape ? "\\'\\'" : "''");
if (n.proj)
@@ -255,7 +283,7 @@ namespace build2
ostream&
to_stream (ostream& os,
const names_view& ns,
- bool quote,
+ quote_mode q,
char pair,
bool escape)
{
@@ -263,7 +291,7 @@ namespace build2
{
const name& n (*i);
++i;
- to_stream (os, n, quote, pair, escape);
+ to_stream (os, n, q, pair, escape);
if (n.pair)
os << n.pair;
diff --git a/libbuild2/name.hxx b/libbuild2/name.hxx
index 216f207..f5cb2c5 100644
--- a/libbuild2/name.hxx
+++ b/libbuild2/name.hxx
@@ -178,12 +178,15 @@ namespace build2
// trailing directory separator then it is stored as a directory, otherwise
// as a simple name. Note that the returned name is never a pattern.
//
+ // NOTE: this function does not parse the full name syntax. See context-less
+ // parser::parse_names() for a heavy-weight way to achieve this.
+ //
name
to_name (string);
// Serialize the name to the stream. If requested, the name components
- // containing special characters are quoted and/or escaped. The special
- // characters are:
+ // containing special characters are quoted and/or escaped. In the normal
+ // quoting mode the special characters are:
//
// {}[]$() \t\n#\"'%
//
@@ -197,8 +200,14 @@ namespace build2
//
// As well as leading `+` if in the curly braces.
//
+ // In the effective quoting mode the special characters are:
+ //
+ // {}$( \t\n#"'
+ //
+ // As well as `\` if followed by any of the above characters or itself.
+ //
// If the pair argument is not '\0', then it is added to the above special
- // characters set. If the quote character is present in the component then
+ // characters sets. If the quote character is present in the component then
// it is double quoted rather than single quoted. In this case the following
// characters are escaped:
//
@@ -211,15 +220,23 @@ namespace build2
// Note that in the quoted mode empty unqualified name is printed as '',
// not {}.
//
+ enum class quote_mode
+ {
+ none,
+ normal,
+ effective
+ };
+
LIBBUILD2_SYMEXPORT ostream&
to_stream (ostream&,
const name&,
- bool quote,
+ quote_mode,
char pair = '\0',
bool escape = false);
inline ostream&
- operator<< (ostream& os, const name& n) {return to_stream (os, n, false);}
+ operator<< (ostream& os, const name& n) {
+ return to_stream (os, n, quote_mode::none);}
// Vector of names.
//
@@ -238,13 +255,13 @@ namespace build2
LIBBUILD2_SYMEXPORT ostream&
to_stream (ostream&,
const names_view&,
- bool quote,
+ quote_mode,
char pair = '\0',
bool escape = false);
inline ostream&
operator<< (ostream& os, const names_view& ns) {
- return to_stream (os, ns, false);}
+ return to_stream (os, ns, quote_mode::none);}
inline ostream&
operator<< (ostream& os, const names& ns) {return os << names_view (ns);}
diff --git a/libbuild2/name.test.cxx b/libbuild2/name.test.cxx
index 80b830e..c404503 100644
--- a/libbuild2/name.test.cxx
+++ b/libbuild2/name.test.cxx
@@ -46,7 +46,7 @@ namespace build2
// Test stream representation.
//
{
- auto ts = [] (const name& n, bool quote = true)
+ auto ts = [] (const name& n, quote_mode quote = quote_mode::normal)
{
ostringstream os;
stream_verb (os, stream_verbosity (0, 1));
@@ -54,8 +54,8 @@ namespace build2
return os.str ();
};
- assert (ts (name ()) == "''");
- assert (ts (name (), false) == "{}");
+ assert (ts (name ()) == "''");
+ assert (ts (name (), quote_mode::none) == "{}");
assert (ts (name ("foo")) == "foo");
@@ -70,10 +70,18 @@ namespace build2
assert (ts (name (dir ("bar/"), "dir", "foo")) == "bar/dir{foo}");
assert (ts (name (dir ("bar/baz/"), "dir", "foo")) == "bar/baz/dir{foo}");
- // Quoting.
+ // Normal quoting.
//
assert (ts (name (dir ("bar baz/"), "dir", "foo fox")) == "'bar baz/'dir{'foo fox'}");
+ // Effective quoting.
+ //
+ assert (ts (name ("bar\\baz"), quote_mode::effective) == "bar\\baz");
+ assert (ts (name ("bar[baz]"), quote_mode::effective) == "bar[baz]");
+ assert (ts (name ("bar$baz"), quote_mode::effective) == "'bar$baz'");
+ assert (ts (name ("bar\\\\baz"), quote_mode::effective) == "'bar\\\\baz'");
+ assert (ts (name ("bar\\$baz"), quote_mode::effective) == "'bar\\$baz'");
+
// Relative logic.
//
#ifndef _WIN32
diff --git a/libbuild2/operation.cxx b/libbuild2/operation.cxx
index f1fc83c..4af03fe 100644
--- a/libbuild2/operation.cxx
+++ b/libbuild2/operation.cxx
@@ -3,7 +3,12 @@
#include <libbuild2/operation.hxx>
-#include <iostream> // cout
+#include <iostream> // cout
+#include <unordered_map>
+
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/serializer.hxx>
+#endif
#include <libbuild2/file.hxx>
#include <libbuild2/scope.hxx>
@@ -13,6 +18,10 @@
#include <libbuild2/algorithm.hxx>
#include <libbuild2/diagnostics.hxx>
+#if 0
+#include <libbuild2/adhoc-rule-buildscript.hxx> // @@ For a hack below.
+#endif
+
using namespace std;
using namespace butl;
@@ -55,7 +64,7 @@ namespace build2
true, // bootstrap_outer
nullptr, // meta-operation pre
nullptr, // operation pre
- &load,
+ &perform_load,
nullptr, // search
nullptr, // match
nullptr, // execute
@@ -67,16 +76,17 @@ namespace build2
// perform
//
void
- load (const values&,
- scope& root,
- const path& bf,
- const dir_path& out_base,
- const dir_path& src_base,
- const location&)
+ perform_load (const values&,
+ scope& root,
+ const path& bf,
+ const dir_path& out_base,
+ const dir_path& src_base,
+ const location&)
{
// Load project's root.build.
//
- load_root (root);
+ if (!root.root_extra->loaded)
+ load_root (root);
// Create the base scope. Note that its existence doesn't mean it was
// already setup as a base scope; it can be the same as root.
@@ -91,15 +101,15 @@ namespace build2
}
void
- search (const values&,
- const scope&,
- const scope& bs,
- const path& bf,
- const target_key& tk,
- const location& l,
- action_targets& ts)
+ perform_search (const values&,
+ const scope&,
+ const scope& bs,
+ const path& bf,
+ const target_key& tk,
+ const location& l,
+ action_targets& ts)
{
- tracer trace ("search");
+ tracer trace ("perform_search");
context& ctx (bs.ctx);
phase_lock pl (ctx, run_phase::match);
@@ -127,10 +137,126 @@ namespace build2
ts.push_back (t);
}
+ // Verify that no two targets share a path unless they both are "read-only"
+ // (have noop recipes).
+ //
+ // Note: somewhat similar logic in dyndep::verify_existing_file().
+ //
+ static void
+ verify_targets (context& ctx, action a)
+ {
+ // On the first pass we collect all the targets that have non-noop
+ // recipes. On the second pass we check if there are any other targets
+ // that have the same path. Note that we must also deal with two non-noop
+ // targets that have the same path.
+ //
+ // Strictly speaking we may need to produce some sort of progress if this
+ // takes long. However, currently we are looking at verification speed of
+ // ~1ms per 2K targets, which means it will only becomes noticeable with
+ // over 1M targets.
+ //
+ unordered_map<reference_wrapper<const path>,
+ const target*,
+ hash<path>,
+ equal_to<path>> map;
+
+ // Half of the total appears to be a reasonable heuristics.
+ //
+ map.reserve (ctx.targets.size () / 2);
+
+ size_t count_matched (ctx.count_matched ());
+
+ bool e (false);
+ for (size_t pass (1); pass != 3; ++pass)
+ {
+ for (const auto& pt: ctx.targets)
+ {
+ // We are only interested in path-based targets.
+ //
+ const path_target* t (pt->is_a<path_target> ());
+ if (t == nullptr)
+ continue;
+
+ // We are only interested in the matched targets.
+ //
+ const target::opstate& s (t->state[a]);
+
+ if (s.task_count.load (memory_order_relaxed) < count_matched)
+ continue;
+
+ // Skip if for some reason the path is not assigned.
+ //
+ const path& p (t->path (memory_order_relaxed));
+ if (p.empty ())
+ continue;
+
+ recipe_function* const* rf (s.recipe.target<recipe_function*> ());
+ bool noop (rf != nullptr && *rf == &noop_action);
+
+ if ((noop ? 2 : 1) != pass)
+ continue;
+
+ const target* t1;
+ if (pass == 1)
+ {
+ auto r (map.emplace (p, t));
+
+ if (r.second)
+ continue;
+
+ t1 = r.first->second;
+ }
+ else
+ {
+ auto i (map.find (p));
+
+ if (i == map.end ())
+ continue;
+
+ t1 = i->second;
+ }
+
+ e = true;
+
+ diag_record dr (error);
+
+ dr << "multiple targets share path " << p <<
+ info << "first target: " << *t1 <<
+ info << "second target: " << *t <<
+ info << "target " << *t1 << " has non-noop recipe";
+
+ if (pass == 1)
+ {
+ dr << info << "target " << *t << " has non-noop recipe";
+ }
+ else if (t->decl != target_decl::real)
+ {
+ if (t->decl == target_decl::implied)
+ {
+ dr << info << "target " << *t << " is implied by a buildfile";
+ }
+ else
+ {
+ dr << info << "target " << *t << " is not declared in a buildfile";
+
+ if (t->decl == target_decl::prereq_file)
+ dr << " but has corresponding existing file";
+
+ dr << info << "perhaps it is a dynamic dependency?";
+ }
+ }
+ }
+ }
+
+ if (e)
+ throw failed ();
+ }
+
void
- match (const values&, action a, action_targets& ts, uint16_t diag, bool prog)
+ perform_match (const values&, action a, action_targets& ts,
+ uint16_t diag, bool prog)
{
- tracer trace ("match");
+ tracer trace ("perform_match");
if (ts.empty ())
return;
@@ -142,25 +268,50 @@ namespace build2
// Setup progress reporting if requested.
//
- string what; // Note: must outlive monitor_guard.
+ struct monitor_data
+ {
+ size_t incr;
+ string what;
+ atomic<timestamp::rep> time {timestamp_nonexistent_rep};
+ } md; // Note: must outlive monitor_guard.
scheduler::monitor_guard mg;
if (prog && show_progress (2 /* max_verb */))
{
- size_t incr (stderr_term ? 1 : 10); // Scale depending on output type.
-
- what = " targets to " + diag_do (ctx, a);
+ // Note that showing progress is not free and it can take up to 10% of
+ // the up-to-date check on some projects (e.g., Boost). So we jump
+ // through a few hoops to make sure we don't overindulge.
+ //
+ md.incr = stderr_term // Scale depending on output type.
+ ? (ctx.sched->serial () ? 1 : 5)
+ : 100;
+ md.what = " targets to " + diag_do (ctx, a);
- mg = ctx.sched.monitor (
+ mg = ctx.sched->monitor (
ctx.target_count,
- incr,
- [incr, &what] (size_t c) -> size_t
+ md.incr,
+ [&md] (size_t c) -> size_t
{
+ size_t r (c + md.incr);
+
+ if (stderr_term)
+ {
+ timestamp o (duration (md.time.load (memory_order_consume)));
+ timestamp n (system_clock::now ());
+
+ if (n - o < chrono::milliseconds (80))
+ return r;
+
+ md.time.store (n.time_since_epoch ().count (),
+ memory_order_release);
+ }
+
diag_progress_lock pl;
diag_progress = ' ';
diag_progress += to_string (c);
- diag_progress += what;
- return c + incr;
+ diag_progress += md.what;
+
+ return r;
});
}
@@ -168,6 +319,7 @@ namespace build2
// many we have started. Wait with unlocked phase to allow phase
// switching.
//
+ bool fail (false);
size_t i (0), n (ts.size ());
{
atomic_count task_count (0);
@@ -183,16 +335,69 @@ namespace build2
// Bail out if the target has failed and we weren't instructed to
// keep going.
//
- if (s == target_state::failed && !ctx.keep_going)
+ if (s == target_state::failed)
{
- ++i;
- break;
+ fail = true;
+
+ if (!ctx.keep_going)
+ {
+ ++i;
+ break;
+ }
}
}
wg.wait ();
}
+ // If we have any targets with post hoc prerequisites, match those.
+ //
+ // See match_posthoc() for the overall approach description.
+ //
+ bool posthoc_fail (false);
+ if (!ctx.current_posthoc_targets.empty () && (!fail || ctx.keep_going))
+ {
+ // Note that on each iteration we may end up with new entries at the
+ // back. Since we start and end each iteration in serial execution, we
+ // don't need to mess with the mutex.
+ //
+ for (const context::posthoc_target& p: ctx.current_posthoc_targets)
+ {
+ action a (p.action); // May not be the same as argument action.
+ const target& t (p.target);
+
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while matching to " << diag_do (t.ctx, a)
+ << " post hoc prerequisites of " << t;
+ });
+
+ // Cannot use normal match because incrementing dependency counts in
+ // the face of cycles does not work well (we will deadlock for the
+ // reverse execution mode).
+ //
+ // @@ PERF: match in parallel (need match_direct_async(), etc).
+ //
+ for (const target* pt: p.prerequisite_targets)
+ {
+ target_state s (match_direct_sync (a, *pt, false /* fail */));
+
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
+ }
+
+ if (posthoc_fail && !ctx.keep_going)
+ break;
+ }
+ }
+
// Clear the progress if present.
//
if (mg)
@@ -203,15 +408,25 @@ namespace build2
// We are now running serially. Re-examine targets that we have matched.
//
- bool fail (false);
for (size_t j (0); j != n; ++j)
{
action_target& at (ts[j]);
const target& t (at.as<target> ());
- target_state s (j < i
- ? match (a, t, false)
- : target_state::postponed);
+ // We cannot attribute post hoc failures to specific targets so it
+ // seems the best we can do is just fail them all.
+ //
+ target_state s;
+ if (j < i)
+ {
+ s = match_complete (a, t, false);
+
+ if (posthoc_fail)
+ s = /*t.state[a].state =*/ target_state::failed;
+ }
+ else
+ s = target_state::postponed;
+
switch (s)
{
case target_state::postponed:
@@ -248,6 +463,12 @@ namespace build2
if (fail)
throw failed ();
+
+ // @@ This feels a bit ad hoc. Maybe we should invent operation hooks
+ // for this (e.g., post-search, post-match, post-execute)?
+ //
+ if (a == perform_update_id)
+ verify_targets (ctx, a);
}
// Phase restored to load.
@@ -256,16 +477,89 @@ namespace build2
}
void
- execute (const values&, action a, action_targets& ts,
- uint16_t diag, bool prog)
+ perform_execute (const values&, action a, action_targets& ts,
+ uint16_t diag, bool prog)
{
- tracer trace ("execute");
+ tracer trace ("perform_execute");
if (ts.empty ())
return;
context& ctx (ts[0].as<target> ().ctx);
+ bool posthoc_fail (false);
+ auto execute_posthoc = [&ctx, &posthoc_fail] ()
+ {
+ for (const context::posthoc_target& p: ctx.current_posthoc_targets)
+ {
+ action a (p.action); // May not be the same as argument action.
+ const target& t (p.target);
+
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while " << diag_doing (t.ctx, a)
+ << " post hoc prerequisites of " << t;
+ });
+
+#if 0
+ for (const target* pt: p.prerequisite_targets)
+ {
+ target_state s (execute_direct_sync (a, *pt, false /* fail */));
+
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
+ }
+#else
+ // Note: similar logic/reasoning to below except we use direct
+ // execution.
+ //
+ atomic_count tc (0);
+ wait_guard wg (ctx, tc);
+
+ for (const target* pt: p.prerequisite_targets)
+ {
+ target_state s (execute_direct_async (a, *pt, 0, tc, false /*fail*/));
+
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
+ }
+
+ wg.wait ();
+
+ // Process the result.
+ //
+ for (const target* pt: p.prerequisite_targets)
+ {
+ // Similar to below, no need to wait.
+ //
+ target_state s (pt->executed_state (a, false /* fail */));
+
+ if (s == target_state::failed)
+ {
+ // Note: no need to keep going.
+ //
+ posthoc_fail = true;
+ break;
+ }
+ }
+#endif
+ if (posthoc_fail && !ctx.keep_going)
+ break;
+ }
+ };
+
// Reverse the order of targets if the execution mode is 'last'.
//
if (ctx.current_mode == execution_mode::last)
@@ -273,6 +567,7 @@ namespace build2
phase_lock pl (ctx, run_phase::execute); // Never switched.
+ bool fail (false);
{
// Tune the scheduler.
//
@@ -281,7 +576,7 @@ namespace build2
switch (ctx.current_inner_oif->concurrency)
{
- case 0: sched_tune = tune_guard (ctx.sched, 1); break; // Run serially.
+ case 0: sched_tune = tune_guard (*ctx.sched, 1); break; // Run serially.
case 1: break; // Run as is.
default: assert (false); // Not supported.
}
@@ -304,7 +599,7 @@ namespace build2
{
what = "% of targets " + diag_did (ctx, a);
- mg = ctx.sched.monitor (
+ mg = ctx.sched->monitor (
ctx.target_count,
init - incr,
[init, incr, &what, &ctx] (size_t c) -> size_t
@@ -329,9 +624,18 @@ namespace build2
}
}
+ // In the 'last' execution mode run post hoc first.
+ //
+ if (ctx.current_mode == execution_mode::last)
+ {
+ if (!ctx.current_posthoc_targets.empty ())
+ execute_posthoc ();
+ }
+
// Similar logic to execute_members(): first start asynchronous
// execution of all the top-level targets.
//
+ if (!posthoc_fail || ctx.keep_going)
{
atomic_count task_count (0);
wait_guard wg (ctx, task_count);
@@ -347,13 +651,24 @@ namespace build2
// Bail out if the target has failed and we weren't instructed to
// keep going.
//
- if (s == target_state::failed && !ctx.keep_going)
- break;
+ if (s == target_state::failed)
+ {
+ fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
}
wg.wait ();
}
+ if (ctx.current_mode == execution_mode::first)
+ {
+ if (!ctx.current_posthoc_targets.empty () && (!fail || ctx.keep_going))
+ execute_posthoc ();
+ }
+
// We are now running serially.
//
@@ -389,12 +704,24 @@ namespace build2
// Re-examine all the targets and print diagnostics.
//
- bool fail (false);
for (action_target& at: ts)
{
const target& t (at.as<target> ());
- switch ((at.state = t.executed_state (a, false)))
+ // Similar to match we cannot attribute post hoc failures to specific
+ // targets so it seems the best we can do is just fail them all.
+ //
+ if (!posthoc_fail)
+ {
+ // Note that here we call executed_state() directly instead of
+ // execute_complete() since we know there is no need to wait.
+ //
+ at.state = t.executed_state (a, false /* fail */);
+ }
+ else
+ at.state = /*t.state[a].state =*/ target_state::failed;
+
+ switch (at.state)
{
case target_state::unknown:
{
@@ -439,26 +766,191 @@ namespace build2
if (fail)
throw failed ();
- // We should have executed every target that we matched, provided we
+#ifndef NDEBUG
+ size_t base (ctx.count_base ());
+
+ // For now we disable these checks if we've performed any group member
+ // resolutions that required a match (with apply()) but not execute.
+ //
+ if (ctx.target_count.load (memory_order_relaxed) != 0 &&
+ ctx.resolve_count.load (memory_order_relaxed) != 0)
+ {
+ // These counts are only tracked for the inner operation.
+ //
+ action ia (a.outer () ? a.inner_action () : a);
+
+ // While it may seem that just decrementing the counters for every
+ // target with the resolve_counted flag set should be enough, this will
+ // miss any prerequisites that this target has matched but did not
+ // execute, which may affect both task_count and dependency_count. Note
+ // that this applies recursively and we effectively need to pretend to
+ // execute this target and all its prerequisites, recursively without
+ // actually executing any of their recepies.
+ //
+ // That last bit means we must be able to interpret the populated
+ // prerequisite_targets generically, which is a requirement we place on
+ // rules that resolve groups in apply (see target::group_members() for
+ // details). It so happens that our own adhoc_buildscript_rule doesn't
+ // follow this rule (see execute_update_prerequisites()) so we detect
+ // and handle this with a hack.
+ //
+ // @@ Hm, but there is no guarantee that this holds recursively since
+ // prerequisites may not be see-through groups. For this to work we
+ // would have to impose this restriction globally. Which we could
+ // probably do, just need to audit things carefully (especially
+ // cc::link_rule). But we already sort of rely on that for dump! Maybe
+ // should just require it everywhere and fix adhoc_buildscript_rule.
+ //
+ // @@ There are special recipes that don't populate prerequisite_targets
+ // like group_recipe! Are we banning any user-defined such recipes?
+ // Need to actually look if we have anything else like this. There
+ // is also inner_recipe, though doesn't apply here (only for outer).
+ //
+ // @@ TMP: do and enable after the 0.16.0 release.
+ //
+ // Note: recursive lambda.
+ //
+#if 0
+ auto pretend_execute = [base, ia] (target& t,
+ const auto& pretend_execute) -> void
+ {
+ context& ctx (t.ctx);
+
+ // Note: tries to emulate the execute_impl() functions semantics.
+ //
+ auto execute_impl = [base, ia, &ctx, &pretend_execute] (target& t)
+ {
+ target::opstate& s (t.state[ia]);
+
+ size_t gd (ctx.dependency_count.fetch_sub (1, memory_order_relaxed));
+ size_t td (s.dependents.fetch_sub (1, memory_order_release));
+ assert (td != 0 && gd != 0);
+
+ // Execute unless already executed.
+ //
+ if (s.task_count.load (memory_order_relaxed) !=
+ base + target::offset_executed)
+ pretend_execute (t, pretend_execute);
+ };
+
+ target::opstate& s (t.state[ia]);
+
+ if (s.state != target_state::unchanged) // Noop recipe.
+ {
+ if (s.recipe_group_action)
+ {
+ execute_impl (const_cast<target&> (*t.group));
+ }
+ else
+ {
+ // @@ Special hack for adhoc_buildscript_rule (remember to drop
+ // include above if getting rid of).
+ //
+ bool adhoc (
+ ia == perform_update_id &&
+ s.rule != nullptr &&
+ dynamic_cast<const adhoc_buildscript_rule*> (
+ &s.rule->second.get ()) != nullptr);
+
+ for (const prerequisite_target& p: t.prerequisite_targets[ia])
+ {
+ const target* pt;
+
+ if (adhoc)
+ pt = (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
+ nullptr);
+ else
+ pt = p.target;
+
+ if (pt != nullptr)
+ execute_impl (const_cast<target&> (*pt));
+ }
+
+ ctx.target_count.fetch_sub (1, memory_order_relaxed);
+ if (s.resolve_counted)
+ {
+ s.resolve_counted = false;
+ ctx.resolve_count.fetch_sub (1, memory_order_relaxed);
+ }
+ }
+
+ s.state = target_state::changed;
+ }
+
+ s.task_count.store (base + target::offset_executed,
+ memory_order_relaxed);
+ };
+#endif
+
+ for (const auto& pt: ctx.targets)
+ {
+ target& t (*pt);
+ target::opstate& s (t.state[ia]);
+
+ // We are only interested in the targets that have been matched for
+ // this operation and are in the applied state.
+ //
+ if (s.task_count.load (memory_order_relaxed) !=
+ base + target::offset_applied)
+ continue;
+
+ if (s.resolve_counted)
+ {
+#if 0
+ pretend_execute (t, pretend_execute);
+
+ if (ctx.resolve_count.load (memory_order_relaxed) == 0)
+ break;
+#else
+ return; // Skip all the below checks.
+#endif
+ }
+ }
+ }
+
+ // We should have executed every target that we have matched, provided we
// haven't failed (in which case we could have bailed out early).
//
assert (ctx.target_count.load (memory_order_relaxed) == 0);
+ assert (ctx.resolve_count.load (memory_order_relaxed) == 0); // Sanity check.
-#ifndef NDEBUG
if (ctx.dependency_count.load (memory_order_relaxed) != 0)
{
+ auto dependents = [base] (action a, const target& t)
+ {
+ const target::opstate& s (t.state[a]);
+
+ // Only consider targets that have been matched for this operation
+ // (since matching is what causes the dependents count reset).
+ //
+ size_t c (s.task_count.load (memory_order_relaxed));
+
+ return (c >= base + target::offset_applied
+ ? s.dependents.load (memory_order_relaxed)
+ : 0);
+ };
+
diag_record dr;
dr << info << "detected unexecuted matched targets:";
for (const auto& pt: ctx.targets)
{
const target& t (*pt);
- if (size_t n = t[a].dependents.load (memory_order_relaxed))
+
+ if (size_t n = dependents (a, t))
dr << text << t << ' ' << n;
+
+ if (a.outer ())
+ {
+ if (size_t n = dependents (a.inner_action (), t))
+ dr << text << t << ' ' << n;
+ }
}
}
-#endif
+
assert (ctx.dependency_count.load (memory_order_relaxed) == 0);
+#endif
}
const meta_operation_info mo_perform {
@@ -471,10 +963,10 @@ namespace build2
true, // bootstrap_outer
nullptr, // meta-operation pre
nullptr, // operation pre
- &load,
- &search,
- &match,
- &execute,
+ &perform_load,
+ &perform_search,
+ &perform_match,
+ &perform_execute,
nullptr, // operation post
nullptr, // meta-operation post
nullptr // include
@@ -482,8 +974,73 @@ namespace build2
// info
//
+
+ // Note: similar approach to forward() in configure.
+ //
+ struct info_params
+ {
+ bool json = false;
+ bool subprojects = true;
+ };
+
+ // Note: should not fail if mo is NULL (see info_subprojects() below).
+ //
+ static info_params
+ info_parse_params (const values& params,
+ const char* mo = nullptr,
+ const location& l = location ())
+ {
+ info_params r;
+
+ if (params.size () == 1)
+ {
+ for (const name& n: cast<names> (params[0]))
+ {
+ if (n.simple ())
+ {
+ if (n.value == "json")
+ {
+ r.json = true;
+ continue;
+ }
+
+ if (n.value == "no_subprojects")
+ {
+ r.subprojects = false;
+ continue;
+ }
+
+ // Fall through.
+ }
+
+ if (mo != nullptr)
+ fail (l) << "unexpected parameter '" << n << "' for "
+ << "meta-operation " << mo;
+ }
+ }
+ else if (!params.empty ())
+ {
+ if (mo != nullptr)
+ fail (l) << "unexpected parameters for meta-operation " << mo;
+ }
+
+ return r;
+ }
+
+ bool
+ info_subprojects (const values& params)
+ {
+ return info_parse_params (params).subprojects;
+ }
+
+ static void
+ info_pre (context&, const values& params, const location& l)
+ {
+ info_parse_params (params, "info", l); // Validate.
+ }
+
static operation_id
- info_operation_pre (const values&, operation_id o)
+ info_operation_pre (context&, const values&, operation_id o)
{
if (o != default_id)
fail << "explicit operation specified for meta-operation info";
@@ -532,7 +1089,7 @@ namespace build2
}
static void
- info_execute (const values&, action, action_targets& ts, uint16_t, bool)
+ info_execute_lines (action_targets& ts, bool subp)
{
for (size_t i (0); i != ts.size (); ++i)
{
@@ -565,7 +1122,7 @@ namespace build2
//
auto print_mods = [&rs] ()
{
- for (const module_state& ms: rs.root_extra->modules)
+ for (const module_state& ms: rs.root_extra->loaded_modules)
cout << ' ' << ms.name;
};
@@ -583,6 +1140,20 @@ namespace build2
cout << ' ' << *p;
};
+ // Print a potentially null/empty directory path without trailing slash.
+ //
+ auto print_dir = [] (const dir_path& d)
+ {
+ if (!d.empty ())
+ cout << ' ' << d.string ();
+ };
+
+ auto print_pdir = [&print_dir] (const dir_path* d)
+ {
+ if (d != nullptr)
+ print_dir (*d);
+ };
+
// This could be a simple project that doesn't set project name.
//
cout
@@ -590,16 +1161,181 @@ namespace build2
<< "version:" ; print_empty (cast_empty<string> (rs[ctx.var_version])); cout << endl
<< "summary:" ; print_empty (cast_empty<string> (rs[ctx.var_project_summary])); cout << endl
<< "url:" ; print_empty (cast_empty<string> (rs[ctx.var_project_url])); cout << endl
- << "src_root: " << cast<dir_path> (rs[ctx.var_src_root]) << endl
- << "out_root: " << cast<dir_path> (rs[ctx.var_out_root]) << endl
- << "amalgamation:" ; print_null (*rs.root_extra->amalgamation); cout << endl
- << "subprojects:" ; print_null (*rs.root_extra->subprojects); cout << endl
+ << "src_root:" ; print_dir (cast<dir_path> (rs[ctx.var_src_root])); cout << endl
+ << "out_root:" ; print_dir (cast<dir_path> (rs[ctx.var_out_root])); cout << endl
+ << "amalgamation:" ; print_pdir (*rs.root_extra->amalgamation); cout << endl;
+ if (subp)
+ {
+ cout
+ << "subprojects:" ; print_null (*rs.root_extra->subprojects); cout << endl;
+ }
+ cout
<< "operations:" ; print_ops (rs.root_extra->operations, ctx.operation_table); cout << endl
<< "meta-operations:"; print_ops (rs.root_extra->meta_operations, ctx.meta_operation_table); cout << endl
<< "modules:" ; print_mods (); cout << endl;
}
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ info_execute_json (action_targets& ts, bool subp)
+ {
+ json::stream_serializer s (cout);
+ s.begin_array ();
+
+ for (size_t i (0); i != ts.size (); ++i)
+ {
+ const scope& rs (ts[i].as<scope> ());
+
+ context& ctx (rs.ctx);
+
+ s.begin_object ();
+
+ // Print a potentially empty string.
+ //
+ auto print_string = [&s] (const char* n,
+ const string& v,
+ bool check = false)
+ {
+ if (!v.empty ())
+ s.member (n, v, check);
+ };
+
+ // Print a potentially null/empty directory path without trailing slash.
+ //
+ auto print_dir = [&s] (const char* n, const dir_path& v)
+ {
+ if (!v.empty ())
+ s.member (n, v.string ());
+ };
+
+ auto print_pdir = [&print_dir] (const char* n, const dir_path* v)
+ {
+ if (v != nullptr)
+ print_dir (n, *v);
+ };
+
+ // Print [meta_]operation names (see info_lines() for details).
+ //
+ auto print_ops = [&s] (const char* name,
+ const auto& ov,
+ const auto& ot,
+ const auto& printer)
+ {
+ s.member_name (name, false /* check */);
+
+ s.begin_array ();
+
+ for (uint8_t id (2); id < ov.size (); ++id)
+ {
+ if (ov[id] != nullptr)
+ printer (ot[id]);
+ }
+
+ s.end_array ();
+ };
+
+ // Note that we won't check some values for being valid UTF-8, since
+ // their characters belong to even stricter character sets and/or are
+ // read from buildfile which is already verified to be valid UTF-8.
+ //
+ print_string ("project", project (rs).string ());
+ print_string ("version", cast_empty<string> (rs[ctx.var_version]));
+ print_string ("summary", cast_empty<string> (rs[ctx.var_project_summary]));
+ print_string ("url", cast_empty<string> (rs[ctx.var_project_url]));
+ print_dir ("src_root", cast<dir_path> (rs[ctx.var_src_root]));
+ print_dir ("out_root", cast<dir_path> (rs[ctx.var_out_root]));
+ print_pdir ("amalgamation", *rs.root_extra->amalgamation);
+
+ // Print subprojects.
+ //
+ if (subp)
+ {
+ const subprojects* sps (*rs.root_extra->subprojects);
+
+ if (sps != nullptr && !sps->empty ())
+ {
+ s.member_name ("subprojects", false /* check */);
+ s.begin_array ();
+
+ for (const auto& sp: *sps)
+ {
+ s.begin_object ();
+
+ print_dir ("path", sp.second);
+
+ // See find_subprojects() for details.
+ //
+ const string& n (sp.first.string ());
+
+ if (!path::traits_type::is_separator (n.back ()))
+ print_string ("name", n);
+
+ s.end_object ();
+ }
+
+ s.end_array ();
+ }
+ }
+
+ print_ops ("operations",
+ rs.root_extra->operations,
+ ctx.operation_table,
+ [&s] (const string& v) {s.value (v, false /* check */);});
+
+ print_ops ("meta-operations",
+ rs.root_extra->meta_operations,
+ ctx.meta_operation_table,
+ [&s] (const meta_operation_data& v)
+ {
+ s.value (v.name, false /* check */);
+ });
+
+ // Print modules.
+ //
+ if (!rs.root_extra->loaded_modules.empty ())
+ {
+ s.member_name ("modules", false /* check */);
+ s.begin_array ();
+
+ for (const module_state& ms: rs.root_extra->loaded_modules)
+ s.value (ms.name, false /* check */);
+
+ s.end_array ();
+ }
+
+ s.end_object ();
+ }
+
+ s.end_array ();
+ cout << endl;
+ }
+#else
+ static void
+ info_execute_json (action_targets&, bool)
+ {
+ }
+#endif //BUILD2_BOOTSTRAP
+
+ static void
+ info_execute (const values& params,
+ action,
+ action_targets& ts,
+ uint16_t,
+ bool)
+ {
+ info_params ip (info_parse_params (params));
+
+ // Note that both outputs will not be "ideal" if the user does something
+ // like `b info(foo/) info(bar/)` instead of `b info(foo/ bar/)`. Oh,
+ // well.
+ //
+ if (ip.json)
+ info_execute_json (ts, ip.subprojects);
+ else
+ info_execute_lines (ts, ip.subprojects);
+ }
+
const meta_operation_info mo_info {
info_id,
"info",
@@ -607,8 +1343,8 @@ namespace build2
"",
"",
"",
- false, // bootstrap_outer
- nullptr, // meta-operation pre
+ false, // bootstrap_outer
+ &info_pre, // meta-operation pre
&info_operation_pre,
&info_load,
&info_search,
@@ -634,6 +1370,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
@@ -660,6 +1398,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
@@ -676,6 +1416,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
}
diff --git a/libbuild2/operation.hxx b/libbuild2/operation.hxx
index d80a01c..e8ff38a 100644
--- a/libbuild2/operation.hxx
+++ b/libbuild2/operation.hxx
@@ -82,8 +82,8 @@ namespace build2
// then default_id is used. If, however, operation_pre() is NULL,
// then default_id is translated to update_id.
//
- void (*meta_operation_pre) (const values&, const location&);
- operation_id (*operation_pre) (const values&, operation_id);
+ void (*meta_operation_pre) (context&, const values&, const location&);
+ operation_id (*operation_pre) (context&, const values&, operation_id);
// Meta-operation-specific logic to load the buildfile, search and match
// the targets, and execute the action on the targets.
@@ -121,16 +121,20 @@ namespace build2
// End of operation and meta-operation batches.
//
- void (*operation_post) (const values&, operation_id);
- void (*meta_operation_post) (const values&);
+ // Note: not called in case any of the earlier callbacks failed.
+ //
+ void (*operation_post) (context&, const values&, operation_id);
+ void (*meta_operation_post) (context&, const values&);
// Optional prerequisite exclusion override callback. See include() for
- // details. Note that it's not called for include_type::normal;
+ // details. Note that it's not called for include_type::normal without
+ // operation-specific override.
//
include_type (*include) (action,
const target&,
const prerequisite_member&,
- include_type);
+ include_type,
+ lookup&);
};
// Built-in meta-operations.
@@ -145,41 +149,46 @@ namespace build2
// scope.
//
LIBBUILD2_SYMEXPORT void
- load (const values&,
- scope&,
- const path&,
- const dir_path&,
- const dir_path&,
- const location&);
+ perform_load (const values&,
+ scope&,
+ const path&,
+ const dir_path&,
+ const dir_path&,
+ const location&);
// Search and match the target. This is the default implementation
// that does just that and adds a pointer to the target to the list.
//
LIBBUILD2_SYMEXPORT void
- search (const values&,
- const scope&,
- const scope&,
- const path&,
- const target_key&,
- const location&,
- action_targets&);
+ perform_search (const values&,
+ const scope&,
+ const scope&,
+ const path&,
+ const target_key&,
+ const location&,
+ action_targets&);
LIBBUILD2_SYMEXPORT void
- match (const values&, action, action_targets&,
- uint16_t diag, bool prog);
+ perform_match (const values&, action, action_targets&,
+ uint16_t diag, bool prog);
// Execute the action on the list of targets. This is the default
// implementation that does just that while issuing appropriate
// diagnostics (unless quiet).
//
LIBBUILD2_SYMEXPORT void
- execute (const values&, action, const action_targets&,
- uint16_t diag, bool prog);
+ perform_execute (const values&, action, const action_targets&,
+ uint16_t diag, bool prog);
LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_noop;
LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_perform;
LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_info;
+ // Return true if params does not contain no_subprojects.
+ //
+ LIBBUILD2_SYMEXPORT bool
+ info_subprojects (const values& params);
+
// Operation info.
//
// NOTE: keep POD-like to ensure can be constant-initialized in order to
@@ -216,17 +225,38 @@ namespace build2
//
const size_t concurrency;
- // The first argument in all the callbacks is the operation parameters.
+ // The values argument in the callbacks is the operation parameters. If
+ // the operation expects parameters, then it should have a non-NULL
+ // operation_pre() callback. Failed that, any parameters will be diagnosed
+ // as unexpected.
+ //
+ // Note also that if the specified operation has outer (for example,
+ // update-for-install), then parameters belong to outer (for example,
+ // install; this is done in order to be consistent with the case when
+ // update is performed as a pre-operation of install).
+
+ // Pre/post operations for this operation. Note that these callbacks are
+ // called before this operation becomes current.
+ //
+ // If the returned by pre/post_*() operation_id's are not 0, then they are
+ // injected as pre/post operations for this operation. Can be NULL if
+ // unused. The returned operation_id shall not be default_id.
//
- // If the operation expects parameters, then it should have a non-NULL
- // pre(). Failed that, any parameters will be diagnosed as unexpected.
+ operation_id (*pre_operation) (
+ context&, const values&, meta_operation_id, const location&);
- // If the returned operation_id's are not 0, then they are injected
- // as pre/post operations for this operation. Can be NULL if unused.
- // The returned operation_id shall not be default_id.
+ operation_id (*post_operation) (
+ context&, const values&, meta_operation_id);
+
+ // Called immediately after/before this operation becomes/ceases to be
+ // current operation for the specified context. Can be used to
+ // initialize/finalize operation-specific data (context::current_*_odata).
+ // Can be NULL if unused.
//
- operation_id (*pre) (const values&, meta_operation_id, const location&);
- operation_id (*post) (const values&, meta_operation_id);
+ void (*operation_pre) (
+ context&, const values&, bool inner, const location&);
+ void (*operation_post) (
+ context&, const values&, bool inner);
// Operation-specific ad hoc rule callbacks. Essentially, if not NULL,
// then every ad hoc rule match and apply call for this operation is
@@ -302,35 +332,36 @@ namespace build2
using operation_table = butl::string_table<operation_id>;
- // These are "sparse" in the sense that we may have "holes" that
- // are represented as NULL pointers. Also, lookup out of bounds
- // is treated as a hole.
+ // This is a "sparse" vector in the sense that we may have "holes" that are
+ // represented as default-initialized empty instances (for example, NULL if
+ // T is a pointer). Also, lookup out of bounds is treated as a hole.
//
- template <typename T>
+ template <typename T, size_t N>
struct sparse_vector
{
- using base_type = vector<T*>;
+ using base_type = small_vector<T, N>;
using size_type = typename base_type::size_type;
void
- insert (size_type i, T& x)
+ insert (size_type i, T x)
{
size_type n (v_.size ());
if (i < n)
- v_[i] = &x;
+ v_[i] = x;
else
{
if (n != i)
- v_.resize (i, nullptr); // Add holes.
- v_.push_back (&x);
+ v_.resize (i, T ()); // Add holes.
+
+ v_.push_back (move (x));
}
}
- T*
+ T
operator[] (size_type i) const
{
- return i < v_.size () ? v_[i] : nullptr;
+ return i < v_.size () ? v_[i] : T ();
}
bool
@@ -345,8 +376,28 @@ namespace build2
base_type v_;
};
- using meta_operations = sparse_vector<const meta_operation_info>;
- using operations = sparse_vector<const operation_info>;
+ // For operations we keep both the pointer to its description as well
+ // as to its operation variable (see var_include) which may belong to
+ // the project-private variable pool.
+ //
+ struct project_operation_info
+ {
+ const operation_info* info = nullptr;
+ const variable* ovar = nullptr; // Operation variable.
+
+ // Allow treating it as pointer to operation_info in most contexts.
+ //
+ operator const operation_info*() const {return info;}
+ bool operator== (nullptr_t) {return info == nullptr;}
+ bool operator!= (nullptr_t) {return info != nullptr;}
+
+ project_operation_info (const operation_info* i = nullptr, // VC14
+ const variable* v = nullptr)
+ : info (i), ovar (v) {}
+ };
+
+ using meta_operations = sparse_vector<const meta_operation_info*, 8>;
+ using operations = sparse_vector<project_operation_info, 10>;
}
namespace butl
diff --git a/libbuild2/options-types.hxx b/libbuild2/options-types.hxx
new file mode 100644
index 0000000..5c224a7
--- /dev/null
+++ b/libbuild2/options-types.hxx
@@ -0,0 +1,16 @@
+// file : libbuild2/options-types.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_OPTIONS_TYPES_HXX
+#define LIBBUILD2_OPTIONS_TYPES_HXX
+
+namespace build2
+{
+ enum class structured_result_format
+ {
+ lines,
+ json
+ };
+}
+
+#endif // LIBBUILD2_OPTIONS_TYPES_HXX
diff --git a/libbuild2/parser.cxx b/libbuild2/parser.cxx
index d50f86f..5d77e2b 100644
--- a/libbuild2/parser.cxx
+++ b/libbuild2/parser.cxx
@@ -24,6 +24,8 @@
#include <libbuild2/adhoc-rule-regex-pattern.hxx>
+#include <libbuild2/dist/module.hxx> // module
+
#include <libbuild2/config/utility.hxx> // lookup_config
using namespace std;
@@ -42,7 +44,10 @@ namespace build2
{
o << '=';
names storage;
- to_stream (o, reverse (a.value, storage), true /* quote */, '@');
+ to_stream (o,
+ reverse (a.value, storage, true /* reduce */),
+ quote_mode::normal,
+ '@');
}
return o;
@@ -57,27 +62,7 @@ namespace build2
enter_scope (parser& p, dir_path&& d)
: p_ (&p), r_ (p.root_), s_ (p.scope_), b_ (p.pbase_)
{
- // Try hard not to call normalize(). Most of the time we will go just
- // one level deeper.
- //
- bool n (true);
-
- if (d.relative ())
- {
- // Relative scopes are opened relative to out, not src.
- //
- if (d.simple () && !d.current () && !d.parent ())
- {
- d = dir_path (p.scope_->out_path ()) /= d.string ();
- n = false;
- }
- else
- d = p.scope_->out_path () / d;
- }
-
- if (n)
- d.normalize ();
-
+ complete_normalize (*p.scope_, d);
e_ = p.switch_scope (d);
}
@@ -103,8 +88,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- enter_scope (enter_scope&& x) {*this = move (x);}
- enter_scope& operator= (enter_scope&& x)
+ enter_scope (enter_scope&& x) noexcept {*this = move (x);}
+ enter_scope& operator= (enter_scope&& x) noexcept
{
if (this != &x)
{
@@ -121,6 +106,31 @@ namespace build2
enter_scope (const enter_scope&) = delete;
enter_scope& operator= (const enter_scope&) = delete;
+ static void
+ complete_normalize (scope& s, dir_path& d)
+ {
+ // Try hard not to call normalize(). Most of the time we will go just
+ // one level deeper.
+ //
+ bool n (true);
+
+ if (d.relative ())
+ {
+ // Relative scopes are opened relative to out, not src.
+ //
+ if (d.simple () && !d.current () && !d.parent ())
+ {
+ d = dir_path (s.out_path ()) /= d.string ();
+ n = false;
+ }
+ else
+ d = s.out_path () / d;
+ }
+
+ if (n)
+ d.normalize ();
+ }
+
private:
parser* p_;
scope* r_;
@@ -162,7 +172,7 @@ namespace build2
tracer& tr)
{
auto r (p.scope_->find_target_type (n, o, loc));
- return p.ctx.targets.insert (
+ return p.ctx->targets.insert (
r.first, // target type
move (n.dir),
move (o.dir),
@@ -182,12 +192,12 @@ namespace build2
tracer& tr)
{
auto r (p.scope_->find_target_type (n, o, loc));
- return p.ctx.targets.find (r.first, // target type
- n.dir,
- o.dir,
- n.value,
- r.second, // extension
- tr);
+ return p.ctx->targets.find (r.first, // target type
+ n.dir,
+ o.dir,
+ n.value,
+ r.second, // extension
+ tr);
}
~enter_target ()
@@ -198,8 +208,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- enter_target (enter_target&& x) {*this = move (x);}
- enter_target& operator= (enter_target&& x) {
+ enter_target (enter_target&& x) noexcept {*this = move (x);}
+ enter_target& operator= (enter_target&& x) noexcept {
p_ = x.p_; t_ = x.t_; x.p_ = nullptr; return *this;}
enter_target (const enter_target&) = delete;
@@ -230,8 +240,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- enter_prerequisite (enter_prerequisite&& x) {*this = move (x);}
- enter_prerequisite& operator= (enter_prerequisite&& x) {
+ enter_prerequisite (enter_prerequisite&& x) noexcept {*this = move (x);}
+ enter_prerequisite& operator= (enter_prerequisite&& x) noexcept {
p_ = x.p_; r_ = x.r_; x.p_ = nullptr; return *this;}
enter_prerequisite (const enter_prerequisite&) = delete;
@@ -247,6 +257,7 @@ namespace build2
{
pre_parse_ = false;
attributes_.clear ();
+ condition_ = nullopt;
default_target_ = nullptr;
peeked_ = false;
replay_ = replay::stop;
@@ -259,10 +270,11 @@ namespace build2
scope* root,
scope& base,
target* tgt,
- prerequisite* prq)
+ prerequisite* prq,
+ bool enter)
{
lexer l (is, in);
- parse_buildfile (l, root, base, tgt, prq);
+ parse_buildfile (l, root, base, tgt, prq, enter);
}
void parser::
@@ -270,7 +282,8 @@ namespace build2
scope* root,
scope& base,
target* tgt,
- prerequisite* prq)
+ prerequisite* prq,
+ bool enter)
{
path_ = &l.name ();
lexer_ = &l;
@@ -289,9 +302,9 @@ namespace build2
? auto_project_env (*root_)
: auto_project_env ());
- if (path_->path != nullptr)
- enter_buildfile (*path_->path); // Note: needs scope_.
-
+ const buildfile* bf (enter && path_->path != nullptr
+ ? &enter_buildfile (*path_->path)
+ : nullptr);
token t;
type tt;
next (t, tt);
@@ -303,13 +316,32 @@ namespace build2
else
{
parse_clause (t, tt);
- process_default_target (t);
+ process_default_target (t, bf);
}
if (tt != type::eos)
fail (t) << "unexpected " << t;
}
+ names parser::
+ parse_export_stub (istream& is, const path_name& name,
+ const scope& rs, scope& gs, scope& ts)
+ {
+ // Enter the export stub manually with correct out.
+ //
+ if (name.path != nullptr)
+ {
+ dir_path out (!rs.out_eq_src ()
+ ? out_src (name.path->directory (), rs)
+ : dir_path ());
+
+ enter_buildfile (*name.path, move (out));
+ }
+
+ parse_buildfile (is, name, &gs, ts, nullptr, nullptr, false /* enter */);
+ return move (export_value);
+ }
+
token parser::
parse_variable (lexer& l, scope& s, const variable& var, type kind)
{
@@ -355,6 +387,81 @@ namespace build2
return make_pair (move (lhs), move (t));
}
+ names parser::
+ parse_names (lexer& l,
+ const dir_path* b,
+ pattern_mode pmode,
+ const char* what,
+ const string* separators)
+ {
+ path_ = &l.name ();
+ lexer_ = &l;
+
+ root_ = nullptr;
+ scope_ = nullptr;
+ target_ = nullptr;
+ prerequisite_ = nullptr;
+
+ pbase_ = b;
+
+ token t;
+ type tt;
+
+ mode (lexer_mode::value, '@');
+ next (t, tt);
+
+ names r (parse_names (t, tt, pmode, what, separators));
+
+ if (tt != type::eos)
+ fail (t) << "unexpected " << t;
+
+ return r;
+ }
+
+ value parser::
+ parse_eval (lexer& l, scope& rs, scope& bs, pattern_mode pmode)
+ {
+ path_ = &l.name ();
+ lexer_ = &l;
+
+ root_ = &rs;
+ scope_ = &bs;
+ target_ = nullptr;
+ prerequisite_ = nullptr;
+
+ pbase_ = scope_->src_path_;
+
+ // Note that root_ may not be a project root.
+ //
+ auto_project_env penv (
+ stage_ != stage::boot && root_ != nullptr && root_->root_extra != nullptr
+ ? auto_project_env (*root_)
+ : auto_project_env ());
+
+ token t;
+ type tt;
+ next (t, tt);
+
+ if (tt != type::lparen)
+ fail (t) << "expected '(' instead of " << t;
+
+ location loc (get_location (t));
+ mode (lexer_mode::eval, '@');
+ next_with_attributes (t, tt);
+
+ values vs (parse_eval (t, tt, pmode));
+
+ if (next (t, tt) != type::eos)
+ fail (t) << "unexpected " << t;
+
+ switch (vs.size ())
+ {
+ case 0: return value (names ());
+ case 1: return move (vs[0]);
+ default: fail (loc) << "expected single value" << endf;
+ }
+ }
+
bool parser::
parse_clause (token& t, type& tt, bool one)
{
@@ -516,9 +623,39 @@ namespace build2
location nloc (get_location (t));
names ns;
- if (tt != type::labrace)
+ // We have to parse names in chunks to detect invalid cases of the
+ // group{foo}<...> syntax.
+ //
+ // Consider (1):
+ //
+ // x =
+ // group{foo} $x<...>:
+ //
+ // And (2):
+ //
+ // x = group{foo} group{bar}
+ // $x<...>:
+ //
+ // As well as (3):
+ //
+ // <...><...>:
+ //
+ struct chunk
{
- ns = parse_names (t, tt, pattern_mode::preserve);
+ size_t pos; // Index in ns of the beginning of the last chunk.
+ location loc; // Position of the beginning of the last chunk.
+ };
+ optional<chunk> ns_last;
+
+ bool labrace_first (tt == type::labrace);
+ if (!labrace_first)
+ {
+ do
+ {
+ ns_last = chunk {ns.size (), get_location (t)};
+ parse_names (t, tt, ns, pattern_mode::preserve, true /* chunk */);
+ }
+ while (start_names (tt));
// Allow things like function calls that don't result in anything.
//
@@ -534,44 +671,87 @@ namespace build2
}
}
- // Handle ad hoc target group specification (<...>).
+ // Handle target group specification (<...>).
//
// We keep an "optional" (empty) vector of names parallel to ns that
- // contains the ad hoc group members.
+ // contains the group members. Note that when we "catch" gns up to ns,
+ // we populate it with ad hoc (as opposed to explicit) groups with no
+ // members.
//
- adhoc_names ans;
+ group_names gns;
if (tt == type::labrace)
{
- while (tt == type::labrace)
+ for (; tt == type::labrace; labrace_first = false)
{
- // Parse target names inside < >.
+ // Detect explicit group (group{foo}<...>).
+ //
+ // Note that `<` first thing on the line is not seperated thus the
+ // labrace_first complication.
+ //
+ bool expl (!t.separated && !labrace_first);
+ if (expl)
+ {
+ // Note: (N) refers to the example in the above comment.
+ //
+ if (!ns_last /* (3) */ || ns_last->pos == ns.size () /* (1) */)
+ {
+ fail (t) << "group name or whitespace expected before '<'";
+ }
+ else
+ {
+ size_t n (ns.size () - ns_last->pos);
+
+ // Note: could be a pair.
+ //
+ if ((n > 2 || (n == 2 && !ns[ns_last->pos].pair)) /* (2) */)
+ {
+ fail (t) << "single group name or whitespace expected before "
+ << "'<' instead of '"
+ << names_view (ns.data () + ns_last->pos, n) << "'";
+ }
+ }
+ }
+
+ // Parse target names inside <>.
//
// We "reserve" the right to have attributes inside <> though what
// exactly that would mean is unclear. One potentially useful
- // semantics would be the ability to specify attributes for ad hoc
- // members though the fact that the primary target is listed first
- // would make it rather unintuitive. Maybe attributes that change
- // the group semantics itself?
+ // semantics would be the ability to specify attributes for group
+ // members though the fact that the primary target for ad hoc groups
+ // is listed first would make it rather unintuitive. Maybe
+ // attributes that change the group semantics itself?
//
next_with_attributes (t, tt);
auto at (attributes_push (t, tt));
if (at.first)
- fail (at.second) << "attributes before ad hoc target";
+ fail (at.second) << "attributes before group member";
else
attributes_pop ();
- // Allow empty case (<>).
+ // For explicit groups, the group target is already in ns and all
+ // the members should go straight to gns.
//
- if (tt != type::rabrace)
+ // For ad hoc groups, the first name (or a pair) is the primary
+ // target which we need to keep in ns. The rest, if any, are ad
+ // hoc members that we should move to gns.
+ //
+ if (expl)
+ {
+ gns.resize (ns.size ()); // Catch up with the names vector.
+ group_names_loc& g (gns.back ());
+ g.expl = true;
+ g.group_loc = move (ns_last->loc);
+ g.member_loc = get_location (t); // Start of members.
+
+ if (tt != type::rabrace) // Handle empty case (<>)
+ parse_names (t, tt, g.ns, pattern_mode::preserve);
+ }
+ else if (tt != type::rabrace) // Allow and ignore empty case (<>).
{
- location aloc (get_location (t));
+ location mloc (get_location (t)); // Start of members.
- // The first name (or a pair) is the primary target which we need
- // to keep in ns. The rest, if any, are ad hoc members that we
- // should move to ans.
- //
size_t m (ns.size ());
parse_names (t, tt, ns, pattern_mode::preserve);
size_t n (ns.size ());
@@ -588,11 +768,10 @@ namespace build2
{
n -= m; // Number of names in ns we should end up with.
- ans.resize (n); // Catch up with the names vector.
- adhoc_names_loc& a (ans.back ());
-
- a.loc = move (aloc);
- a.ns.insert (a.ns.end (),
+ gns.resize (n); // Catch up with the names vector.
+ group_names_loc& g (gns.back ());
+ g.group_loc = g.member_loc = move (mloc);
+ g.ns.insert (g.ns.end (),
make_move_iterator (ns.begin () + n),
make_move_iterator (ns.end ()));
ns.resize (n);
@@ -606,12 +785,16 @@ namespace build2
// Parse the next chunk of target names after >, if any.
//
next (t, tt);
- if (start_names (tt))
- parse_names (t, tt, ns, pattern_mode::preserve);
+ ns_last = nullopt; // To detect <...><...>.
+ while (start_names (tt))
+ {
+ ns_last = chunk {ns.size (), get_location (t)};
+ parse_names (t, tt, ns, pattern_mode::preserve, true /* chunk */);
+ }
}
- if (!ans.empty ())
- ans.resize (ns.size ()); // Catch up with the final chunk.
+ if (!gns.empty ())
+ gns.resize (ns.size ()); // Catch up with the final chunk.
if (tt != type::colon)
fail (t) << "expected ':' instead of " << t;
@@ -630,10 +813,7 @@ namespace build2
if (ns.empty ())
fail (t) << "expected target before ':'";
- if (at.first)
- fail (at.second) << "attributes before target";
- else
- attributes_pop ();
+ attributes as (attributes_pop ());
// Call the specified parsing function (variable value/block) for
// one/each pattern/target. We handle multiple targets by replaying
@@ -642,10 +822,11 @@ namespace build2
// evaluated. The function signature is:
//
// void (token& t, type& tt,
+ // optional<bool> member, // true -- explict, false -- ad hoc
// optional<pattern_type>, const target_type* pat_tt, string pat,
// const location& pat_loc)
//
- // Note that the target and its ad hoc members are inserted implied
+ // Note that the target and its group members are inserted implied
// but this flag can be cleared and default_target logic applied if
// appropriate.
//
@@ -742,16 +923,20 @@ namespace build2
if (ttype == nullptr)
fail (nloc) << "unknown target type " << n.type;
- f (t, tt, n.pattern, ttype, move (n.value), nloc);
+ f (t, tt, nullopt, n.pattern, ttype, move (n.value), nloc);
};
auto for_each = [this, &trace, &for_one_pat,
- &t, &tt, &ns, &nloc, &ans] (auto&& f)
+ &t, &tt, &as, &ns, &nloc, &gns] (auto&& f)
{
+ // We need replay if we have multiple targets or group members.
+ //
// Note: watch out for an out-qualified single target (two names).
//
replay_guard rg (*this,
- ns.size () > 2 || (ns.size () == 2 && !ns[0].pair));
+ ns.size () > 2 ||
+ (ns.size () == 2 && !ns[0].pair) ||
+ !gns.empty ());
for (size_t i (0), e (ns.size ()); i != e; )
{
@@ -765,11 +950,15 @@ namespace build2
//
if (n.pattern)
{
+ if (!as.empty ())
+ fail (as.loc) << "attributes before target type/pattern";
+
if (n.pair)
fail (nloc) << "out-qualified target type/pattern";
- if (!ans.empty () && !ans[i].ns.empty ())
- fail (ans[i].loc) << "ad hoc member in target type/pattern";
+ if (!gns.empty () && !gns[i].ns.empty ())
+ fail (gns[i].member_loc)
+ << "group member in target type/pattern";
if (*n.pattern == pattern_type::regex_substitution)
fail (nloc) << "regex substitution " << n << " without "
@@ -779,24 +968,47 @@ namespace build2
}
else
{
- name o (n.pair ? move (ns[++i]) : name ());
- enter_target tg (*this,
- move (n),
- move (o),
- true /* implied */,
- nloc,
- trace);
-
- // Enter ad hoc members.
- //
- if (!ans.empty ())
+ bool expl;
+ vector<reference_wrapper<target>> gms;
{
- // Note: index after the pair increment.
+ name o (n.pair ? move (ns[++i]) : name ());
+ enter_target tg (*this,
+ move (n),
+ move (o),
+ true /* implied */,
+ nloc,
+ trace);
+
+ if (!as.empty ())
+ apply_target_attributes (*target_, as);
+
+ // Enter group members.
//
- enter_adhoc_members (move (ans[i]), true /* implied */);
+ if (!gns.empty ())
+ {
+ // Note: index after the pair increment.
+ //
+ group_names_loc& g (gns[i]);
+ expl = g.expl;
+
+ if (expl && !target_->is_a<group> ())
+ fail (g.group_loc) << *target_ << " is not group target";
+
+ gms = expl
+ ? enter_explicit_members (move (g), true /* implied */)
+ : enter_adhoc_members (move (g), true /* implied */);
+ }
+
+ f (t, tt, nullopt, nullopt, nullptr, string (), location ());
}
- f (t, tt, nullopt, nullptr, string (), location ());
+ for (target& gm: gms)
+ {
+ rg.play (); // Replay.
+
+ enter_target tg (*this, gm);
+ f (t, tt, expl, nullopt, nullptr, string (), location ());
+ }
}
if (++i != e)
@@ -850,12 +1062,15 @@ namespace build2
ploc = get_location (t);
pns = parse_names (t, tt, pattern_mode::preserve);
- // Target-specific variable assignment.
+ // Target type/pattern-specific variable assignment.
//
if (tt == type::assign || tt == type::prepend || tt == type::append)
{
- if (!ans.empty ())
- fail (ans[0].loc) << "ad hoc member in target type/pattern";
+ // Note: ns contains single target name.
+ //
+ if (!gns.empty ())
+ fail (gns[0].member_loc)
+ << "group member in target type/pattern";
// Note: see the same code below if changing anything here.
//
@@ -874,6 +1089,7 @@ namespace build2
for_one_pat (
[this, &var, akind, &aloc] (
token& t, type& tt,
+ optional<bool>,
optional<pattern_type> pt, const target_type* ptt,
string pat, const location& ploc)
{
@@ -886,6 +1102,10 @@ namespace build2
nloc);
next_after_newline (t, tt);
+
+ if (!as.empty ())
+ fail (as.loc) << "attributes before target type/pattern";
+
continue; // Just a target type/pattern-specific var assignment.
}
@@ -915,6 +1135,7 @@ namespace build2
for_one_pat (
[this] (
token& t, type& tt,
+ optional<bool>,
optional<pattern_type> pt, const target_type* ptt,
string pat, const location& ploc)
{
@@ -934,8 +1155,14 @@ namespace build2
if (pns.empty () &&
tt != type::percent && tt != type::multi_lcbrace)
{
- if (!ans.empty ())
- fail (ans[0].loc) << "ad hoc member in target type/pattern";
+ // Note: ns contains single target name.
+ //
+ if (!gns.empty ())
+ fail (gns[0].member_loc)
+ << "group member in target type/pattern";
+
+ if (!as.empty ())
+ fail (as.loc) << "attributes before target type/pattern";
continue;
}
@@ -943,6 +1170,38 @@ namespace build2
// Ok, this is an ad hoc pattern rule.
//
+ // First process the attributes.
+ //
+ string rn;
+ {
+ const location& l (as.loc);
+
+ for (auto& a: as)
+ {
+ const string& n (a.name);
+ value& v (a.value);
+
+ // rule_name=
+ //
+ if (n == "rule_name")
+ {
+ try
+ {
+ rn = convert<string> (move (v));
+
+ if (rn.empty ())
+ throw invalid_argument ("empty name");
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid " << n << " attribute value: " << e;
+ }
+ }
+ else
+ fail (l) << "unknown ad hoc pattern rule attribute " << a;
+ }
+ }
+
// What should we do if we have neither prerequisites nor recipes?
// While such a declaration doesn't make much sense, it can happen,
// for example, with an empty variable expansion:
@@ -979,7 +1238,8 @@ namespace build2
// semantics is not immediately obvious. Whatever we decide, it
// should be consistent with the target type/pattern-specific
// variables where it is interpreted as a scope (and which doesn't
- // feel like the best option for pattern rules).
+ // feel like the best option for pattern rules). See also depdb
+ // dyndep --update-* patterns.
//
auto check_pattern = [this] (name& n, const location& loc)
{
@@ -1003,22 +1263,33 @@ namespace build2
check_pattern (n, nloc);
- // Verify all the ad hoc members are patterns or substitutions and
- // of the correct type.
+ // If we have group members, verify all the members are patterns or
+ // substitutions (ad hoc) or subsitutions (explicit) and of the
+ // correct pattern type. A rule for an explicit group that wishes to
+ // match based on some of its members feels far fetched.
+ //
+ // For explicit groups the use-case is to inject static members
+ // which could otherwise be tedious to specify for each group.
//
- names ns (ans.empty () ? names () : move (ans[0].ns));
- const location& aloc (ans.empty () ? location () : ans[0].loc);
+ const location& mloc (gns.empty () ? location () : gns[0].member_loc);
+ names ns (gns.empty () ? names () : move (gns[0].ns));
+ bool expl (gns.empty () ? false : gns[0].expl);
for (name& n: ns)
{
if (!n.pattern || !(*n.pattern == pt || (st && *n.pattern == *st)))
{
- fail (aloc) << "expected " << pn << " pattern or substitution "
+ fail (mloc) << "expected " << pn << " pattern or substitution "
<< "instead of " << n;
}
if (*n.pattern != pattern_type::regex_substitution)
- check_pattern (n, aloc);
+ {
+ if (expl)
+ fail (mloc) << "explicit group member pattern " << n;
+
+ check_pattern (n, mloc);
+ }
}
// The same for prerequisites except here we can have non-patterns.
@@ -1038,14 +1309,18 @@ namespace build2
}
}
- // Derive the rule name. It must be unique in this scope.
+ // Derive the rule name unless specified explicitly. It must be
+ // unique in this scope.
//
// It would have been nice to include the location but unless we
// include the absolute path to the buildfile (which would be
// unwieldy), it could be ambigous.
//
- string rn ("<ad hoc pattern rule #" +
- to_string (scope_->adhoc_rules.size () + 1) + '>');
+ // NOTE: we rely on the <...> format in dump.
+ //
+ if (rn.empty ())
+ rn = "<ad hoc pattern rule #" +
+ to_string (scope_->adhoc_rules.size () + 1) + '>';
auto& ars (scope_->adhoc_rules);
@@ -1058,7 +1333,9 @@ namespace build2
const target_type* ttype (nullptr);
if (i != ars.end ())
{
- // @@ TODO: append ad hoc members, prereqs.
+ // @@ TODO: append ad hoc members, prereqs (we now have
+ // [rule_name=] which we can use to reference the same
+ // rule).
//
ttype = &(*i)->type;
assert (false);
@@ -1074,6 +1351,12 @@ namespace build2
if (ttype == nullptr)
fail (nloc) << "unknown target type " << n.type;
+ if (!gns.empty ())
+ {
+ if (ttype->is_a<group> () != expl)
+ fail (nloc) << "group type and target type mismatch";
+ }
+
unique_ptr<adhoc_rule_pattern> rp;
switch (pt)
{
@@ -1085,7 +1368,7 @@ namespace build2
rp.reset (new adhoc_rule_regex_pattern (
*scope_, rn, *ttype,
move (n), nloc,
- move (ns), aloc,
+ move (ns), mloc,
move (pns), ploc));
break;
case pattern_type::regex_substitution:
@@ -1108,15 +1391,24 @@ namespace build2
for (shared_ptr<adhoc_rule>& pr: recipes)
{
+ // Can be NULL if the recipe is disabled with a condition.
+ //
+ if (pr != nullptr)
+ {
+ pr->pattern = &rp; // Connect recipe to pattern.
+ rp.rules.push_back (move (pr));
+ }
+ }
+
+ // Register this adhoc rule for all its actions.
+ //
+ for (shared_ptr<adhoc_rule>& pr: rp.rules)
+ {
adhoc_rule& r (*pr);
- r.pattern = &rp; // Connect recipe to pattern.
- rp.rules.push_back (move (pr));
- // Register this adhoc rule for all its actions.
- //
for (action a: r.actions)
{
- // This covers both duplicate recipe actions withing the rule
+ // This covers both duplicate recipe actions within the rule
// pattern (similar to parse_recipe()) as well as conflicts
// with other rules (ad hoc or not).
//
@@ -1146,6 +1438,44 @@ namespace build2
scope_->rules.insert (
a.meta_operation (), 0,
*ttype, rp.rule_name, rp.fallback_rule_);
+
+ // We also register for the dist meta-operation in order to
+ // inject additional prerequisites which may "pull" additional
+ // sources into the distribution. Unless there is an explicit
+ // recipe for dist.
+ //
+ // And the same for the configure meta-operation to, for
+ // example, make sure a hinted ad hoc rule matches. @@ Hm,
+ // maybe we fixed this with action-specific hints? But the
+ // injection part above may still apply. BTW, this is also
+ // required for see-through groups in order to resolve their
+ // member.
+ //
+ // Note also that the equivalent semantics for ad hoc recipes
+ // is provided by match_adhoc_recipe().
+ //
+ if (a.meta_operation () == perform_id)
+ {
+ auto reg = [this, ttype, &rp, &r] (action ea)
+ {
+ for (shared_ptr<adhoc_rule>& pr: rp.rules)
+ for (action a: pr->actions)
+ if (ea == a)
+ return;
+
+ scope_->rules.insert (ea, *ttype, rp.rule_name, r);
+ };
+
+ reg (action (dist_id, a.operation ()));
+ reg (action (configure_id, a.operation ()));
+ }
+
+ // @@ TODO: if this rule does dynamic member discovery of a
+ // see-through target group, then we may also need to
+ // register update for other meta-operations (see, for
+ // example, wildcard update registration in the cli
+ // module). BTW, we can now detect such a target via
+ // its target type flags.
}
}
}
@@ -1189,6 +1519,7 @@ namespace build2
st = token (t), // Save start token (will be gone on replay).
recipes = small_vector<shared_ptr<adhoc_rule>, 1> ()]
(token& t, type& tt,
+ optional<bool> gm, // true -- explicit, false -- ad hoc
optional<pattern_type> pt, const target_type* ptt, string pat,
const location& ploc) mutable
{
@@ -1202,7 +1533,14 @@ namespace build2
//
next (t, tt); // Newline.
next (t, tt); // First token inside the variable block.
- parse_variable_block (t, tt, pt, ptt, move (pat), ploc);
+
+ // For explicit groups we only assign variables on the group
+ // omitting the members.
+ //
+ if (!gm || !*gm)
+ parse_variable_block (t, tt, pt, ptt, move (pat), ploc);
+ else
+ skip_block (t, tt);
if (tt != type::rcbrace)
fail (t) << "expected '}' instead of " << t;
@@ -1218,6 +1556,16 @@ namespace build2
else
rt = st;
+ // If this is a group member then we know we are replaying and
+ // can skip the recipe.
+ //
+ if (gm)
+ {
+ replay_skip ();
+ next (t, tt);
+ return;
+ }
+
if (pt)
fail (rt) << "unexpected recipe after target type/pattern" <<
info << "ad hoc pattern rule may not be combined with other "
@@ -1238,7 +1586,7 @@ namespace build2
// Note also that we treat this as an explicit dependency
// declaration (i.e., not implied).
//
- enter_targets (move (ns), nloc, move (ans), 0);
+ enter_targets (move (ns), nloc, move (gns), 0, as);
}
continue;
@@ -1253,7 +1601,8 @@ namespace build2
if (!start_names (tt))
fail (t) << "unexpected " << t;
- // @@ PAT: currently we pattern-expand target-specific var names.
+ // @@ PAT: currently we pattern-expand target-specific var names (see
+ // also parse_import()).
//
const location ploc (get_location (t));
names pns (parse_names (t, tt, pattern_mode::expand));
@@ -1288,6 +1637,7 @@ namespace build2
for_each (
[this, &var, akind, &aloc] (
token& t, type& tt,
+ optional<bool> gm,
optional<pattern_type> pt, const target_type* ptt, string pat,
const location& ploc)
{
@@ -1296,7 +1646,18 @@ namespace build2
*pt, *ptt, move (pat), ploc,
var, akind, aloc);
else
- parse_variable (t, tt, var, akind);
+ {
+ // Skip explicit group members (see the block case above for
+ // background).
+ //
+ if (!gm || !*gm)
+ parse_variable (t, tt, var, akind);
+ else
+ {
+ next (t, tt);
+ skip_line (t, tt);
+ }
+ }
});
next_after_newline (t, tt);
@@ -1314,8 +1675,9 @@ namespace build2
parse_dependency (t, tt,
move (ns), nloc,
- move (ans),
- move (pns), ploc);
+ move (gns),
+ move (pns), ploc,
+ as);
}
continue;
@@ -1539,7 +1901,7 @@ namespace build2
// Parse a recipe chain.
//
// % [<attrs>] [<buildspec>]
- // [if|switch ...]
+ // [if|if!|switch ...]
// {{ [<lang> ...]
// ...
// }}
@@ -1558,10 +1920,27 @@ namespace build2
//
if (target_ != nullptr)
{
+ // @@ What if some members are added later?
+ //
+ // @@ Also, what happends if redeclared as real dependency, do we
+ // upgrade the members?
+ //
if (target_->decl != target_decl::real)
{
- for (target* m (target_); m != nullptr; m = m->adhoc_member)
- m->decl = target_decl::real;
+ target_->decl = target_decl::real;
+
+ if (group* g = target_->is_a<group> ())
+ {
+ for (const target& m: g->static_members)
+ const_cast<target&> (m).decl = target_decl::real; // During load.
+ }
+ else
+ {
+ for (target* m (target_->adhoc_member);
+ m != nullptr;
+ m = m->adhoc_member)
+ m->decl = target_decl::real;
+ }
if (default_target_ == nullptr)
default_target_ = target_;
@@ -1574,7 +1953,15 @@ namespace build2
t = start; tt = t.type;
for (size_t i (0); tt == type::percent || tt == type::multi_lcbrace; ++i)
{
- recipes.push_back (nullptr); // For missing else/default (see below).
+ // For missing else/default (see below).
+ //
+ // Note that it may remain NULL if we have, say, an if-condition that
+ // evaluates to false and no else. While it may be tempting to get rid
+ // of such "holes", it's not easy due to the replay semantics (see the
+ // target_ != nullptr block below). So we expect the caller to be
+ // prepared to handle this.
+ //
+ recipes.push_back (nullptr);
attributes as;
buildspec bs;
@@ -1629,6 +2016,10 @@ namespace build2
//
location loc (get_location (st));
+ // @@ We could add an attribute (name= or recipe_name=) to allow
+ // the user specify a friendly name for diagnostics, similar
+ // to rule_name.
+
shared_ptr<adhoc_rule> ar;
if (!lang)
{
@@ -1738,7 +2129,7 @@ namespace build2
for (metaopspec& m: d.bs)
{
- meta_operation_id mi (ctx.meta_operation_table.find (m.name));
+ meta_operation_id mi (ctx->meta_operation_table.find (m.name));
if (mi == 0)
fail (l) << "unknown meta-operation " << m.name;
@@ -1748,7 +2139,7 @@ namespace build2
if (mf == nullptr)
fail (l) << "project " << *root_ << " does not support meta-"
- << "operation " << ctx.meta_operation_table[mi].name;
+ << "operation " << ctx->meta_operation_table[mi].name;
for (opspec& o: m)
{
@@ -1764,7 +2155,7 @@ namespace build2
fail (l) << "default operation in recipe action" << endf;
}
else
- oi = ctx.operation_table.find (o.name);
+ oi = ctx->operation_table.find (o.name);
if (oi == 0)
fail (l) << "unknown operation " << o.name;
@@ -1773,7 +2164,7 @@ namespace build2
if (of == nullptr)
fail (l) << "project " << *root_ << " does not support "
- << "operation " << ctx.operation_table[oi];
+ << "operation " << ctx->operation_table[oi];
// Note: for now always inner (see match_rule() for details).
//
@@ -1841,6 +2232,9 @@ namespace build2
}
target_->adhoc_recipes.push_back (r);
+
+ // Note that "registration" of configure_* and dist_* actions
+ // (similar to ad hoc rules) is provided by match_adhoc_recipe().
}
}
@@ -1868,8 +2262,7 @@ namespace build2
//
// TODO: handle and erase common attributes if/when we have any.
//
- as = move (attributes_top ());
- attributes_pop ();
+ as = attributes_pop ();
// Handle the buildspec.
//
@@ -1925,7 +2318,7 @@ namespace build2
// handy if we want to provide a custom recipe but only on certain
// platforms or some such).
- if (n == "if")
+ if (n == "if" || n == "if!")
{
parse_if_else (t, tt, true /* multi */, parse_block);
continue;
@@ -1985,13 +2378,97 @@ namespace build2
}
}
- void parser::
- enter_adhoc_members (adhoc_names_loc&& ans, bool implied)
+ vector<reference_wrapper<target>> parser::
+ enter_explicit_members (group_names_loc&& gns, bool implied)
+ {
+ tracer trace ("parser::enter_explicit_members", &path_);
+
+ names& ns (gns.ns);
+ const location& loc (gns.member_loc);
+
+ vector<reference_wrapper<target>> r;
+ r.reserve (ns.size ());
+
+ group& g (target_->as<group> ());
+ auto& ms (g.static_members);
+
+ for (size_t i (0); i != ns.size (); ++i)
+ {
+ name&& n (move (ns[i]));
+ name&& o (n.pair ? move (ns[++i]) : name ());
+
+ if (n.qualified ())
+ fail (loc) << "project name in target " << n;
+
+ // We derive the path unless the target name ends with the '...' escape
+ // which here we treat as the "let the rule derive the path" indicator
+ // (see target::split_name() for details). This will only be useful for
+ // referring to group members that are managed by the group's matching
+ // rule. Note also that omitting '...' for such a member could be used
+ // to override the file name, provided the rule checks if the path has
+ // already been derived before doing it itself.
+ //
+ // @@ What can the ad hoc recipe/rule do differently here? Maybe get
+ // path from dynamic targets? Maybe we will have custom path
+ // derivation support in buildscript in the future?
+ //
+ bool escaped;
+ {
+ const string& v (n.value);
+ size_t p (v.size ());
+
+ escaped = (p > 3 &&
+ v[--p] == '.' && v[--p] == '.' && v[--p] == '.' &&
+ v[--p] != '.');
+ }
+
+ target& m (enter_target::insert_target (*this,
+ move (n), move (o),
+ implied,
+ loc, trace));
+
+ if (g == m)
+ fail (loc) << "explicit group member " << m << " is group itself";
+
+ // Add as static member skipping duplicates.
+ //
+ if (find (ms.begin (), ms.end (), m) == ms.end ())
+ {
+ if (m.group == nullptr)
+ m.group = &g;
+ else if (m.group != &g)
+ fail (loc) << g << " group member " << m << " already belongs to "
+ << "group " << *m.group;
+
+ ms.push_back (m);
+ }
+
+ if (!escaped)
+ {
+ if (file* ft = m.is_a<file> ())
+ ft->derive_path ();
+ }
+
+ r.push_back (m);
+ }
+
+ return r;
+ }
+
+ vector<reference_wrapper<target>> parser::
+ enter_adhoc_members (group_names_loc&& gns, bool implied)
{
tracer trace ("parser::enter_adhoc_members", &path_);
- names& ns (ans.ns);
- const location& loc (ans.loc);
+ names& ns (gns.ns);
+ const location& loc (gns.member_loc);
+
+ if (target_->is_a<group> ())
+ fail (loc) << "ad hoc group primary member " << *target_
+ << " is explicit group";
+
+ vector<reference_wrapper<target>> r;
+ r.reserve (ns.size ());
for (size_t i (0); i != ns.size (); ++i)
{
@@ -2019,14 +2496,16 @@ namespace build2
v[--p] != '.');
}
- target& at (
- enter_target::insert_target (*this,
- move (n), move (o),
- implied,
- loc, trace));
+ target& m (enter_target::insert_target (*this,
+ move (n), move (o),
+ implied,
+ loc, trace));
- if (target_ == &at)
- fail (loc) << "ad hoc group member " << at << " is primary target";
+ if (target_ == &m)
+ fail (loc) << "ad hoc group member " << m << " is primary target";
+
+ if (m.is_a<group> ())
+ fail (loc) << "ad hoc group member " << m << " is explicit group";
// Add as an ad hoc member at the end of the chain skipping duplicates.
//
@@ -2034,7 +2513,7 @@ namespace build2
const_ptr<target>* mp (&target_->adhoc_member);
for (; *mp != nullptr; mp = &(*mp)->adhoc_member)
{
- if (*mp == &at)
+ if (*mp == &m)
{
mp = nullptr;
break;
@@ -2043,30 +2522,41 @@ namespace build2
if (mp != nullptr)
{
- *mp = &at;
- at.group = target_;
+ if (m.group == nullptr)
+ m.group = target_;
+ else if (m.group != target_)
+ fail (loc) << *target_ << " ad hoc group member " << m
+ << " already belongs to group " << *m.group;
+ *mp = &m;
}
}
if (!escaped)
{
- if (file* ft = at.is_a<file> ())
+ if (file* ft = m.is_a<file> ())
ft->derive_path ();
}
+
+ r.push_back (m);
}
+
+ return r;
}
- small_vector<reference_wrapper<target>, 1> parser::
+ small_vector<pair<reference_wrapper<target>,
+ vector<reference_wrapper<target>>>, 1> parser::
enter_targets (names&& tns, const location& tloc, // Target names.
- adhoc_names&& ans, // Ad hoc target names.
- size_t prereq_size)
+ group_names&& gns, // Group member names.
+ size_t prereq_size,
+ const attributes& tas) // Target attributes.
{
- // Enter all the targets (normally we will have just one) and their ad hoc
- // groups.
+ // Enter all the targets (normally we will have just one) and their group
+ // members.
//
tracer trace ("parser::enter_targets", &path_);
- small_vector<reference_wrapper<target>, 1> tgs;
+ small_vector<pair<reference_wrapper<target>,
+ vector<reference_wrapper<target>>>, 1> tgs;
for (size_t i (0); i != tns.size (); ++i)
{
@@ -2088,13 +2578,24 @@ namespace build2
false /* implied */,
tloc, trace);
- // Enter ad hoc members.
+ if (!tas.empty ())
+ apply_target_attributes (*target_, tas);
+
+ // Enter group members.
//
- if (!ans.empty ())
+ vector<reference_wrapper<target>> gms;
+ if (!gns.empty ())
{
// Note: index after the pair increment.
//
- enter_adhoc_members (move (ans[i]), false /* implied */);
+ group_names_loc& g (gns[i]);
+
+ if (g.expl && !target_->is_a<group> ())
+ fail (g.group_loc) << *target_ << " is not group target";
+
+ gms = g.expl
+ ? enter_explicit_members (move (g), false /* implied */)
+ : enter_adhoc_members (move (g), false /* implied */);
}
if (default_target_ == nullptr)
@@ -2102,17 +2603,97 @@ namespace build2
target_->prerequisites_state_.store (2, memory_order_relaxed);
target_->prerequisites_.reserve (prereq_size);
- tgs.push_back (*target_);
+ tgs.emplace_back (*target_, move (gms));
}
return tgs;
}
void parser::
+ apply_target_attributes (target& t, const attributes& as)
+ {
+ const location& l (as.loc);
+
+ for (auto& a: as)
+ {
+ const string& n (a.name);
+ const value& v (a.value);
+
+ // rule_hint=
+ // liba@rule_hint=
+ //
+ size_t p (string::npos);
+ if (n == "rule_hint" ||
+ ((p = n.find ('@')) != string::npos &&
+ n.compare (p + 1, string::npos, "rule_hint") == 0))
+ {
+ // Resolve target type, if specified.
+ //
+ const target_type* tt (nullptr);
+ if (p != string::npos)
+ {
+ string t (n, 0, p);
+ tt = scope_->find_target_type (t);
+
+ if (tt == nullptr)
+ fail (l) << "unknown target type " << t << " in rule_hint "
+ << "attribute";
+ }
+
+ // The rule hint value is vector<pair<optional<string>, string>> where
+ // the first half is the operation and the second half is the hint.
+ // Absent operation is used as a fallback for update/clean.
+ //
+ const names& ns (v.as<names> ());
+
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ operation_id oi (default_id);
+ if (i->pair)
+ {
+ const name& n (*i++);
+
+ if (!n.simple ())
+ fail (l) << "expected operation name instead of " << n
+ << " in rule_hint attribute";
+
+ const string& v (n.value);
+
+ if (!v.empty ())
+ {
+ oi = ctx->operation_table.find (v);
+
+ if (oi == 0)
+ fail (l) << "unknown operation " << v << " in rule_hint "
+ << "attribute";
+
+ if (root_->root_extra->operations[oi] == nullptr)
+ fail (l) << "project " << *root_ << " does not support "
+ << "operation " << ctx->operation_table[oi]
+ << " specified in rule_hint attribute";
+ }
+ }
+
+ const name& n (*i);
+
+ if (!n.simple () || n.empty ())
+ fail (l) << "expected hint instead of " << n << " in rule_hint "
+ << "attribute";
+
+ t.rule_hints.insert (tt, oi, n.value);
+ }
+ }
+ else
+ fail (l) << "unknown target attribute " << a;
+ }
+ }
+
+ void parser::
parse_dependency (token& t, token_type& tt,
names&& tns, const location& tloc, // Target names.
- adhoc_names&& ans, // Ad hoc target names.
- names&& pns, const location& ploc) // Prereq names.
+ group_names&& gns, // Group member names.
+ names&& pns, const location& ploc, // Prereq names.
+ const attributes& tas) // Target attributes.
{
// Parse a dependency chain and/or a target/prerequisite-specific variable
// assignment/block and/or recipe block(s).
@@ -2122,33 +2703,72 @@ namespace build2
//
tracer trace ("parser::parse_dependency", &path_);
+ // Diagnose conditional prerequisites. Note that we want to diagnose this
+ // even if pns is empty (think empty variable expansion; the literal "no
+ // prerequisites" case is handled elsewhere).
+ //
+ // @@ TMP For now we only do it during the dist meta-operation. In the
+ // future we should tighten this to any meta-operation provided
+ // the dist module is loaded.
+ //
+ // @@ TMP For now it's a warning because we have dependencies like
+ // cli.cxx{foo}: cli{foo} which are not currently possible to
+ // rewrite (cli.cxx{} is not always registered).
+ //
+ if (condition_ &&
+ ctx->current_mif != nullptr &&
+ ctx->current_mif->id == dist_id)
+ {
+ // Only issue the warning for the projects being distributed. In
+ // particular, this makes sure we don't complain about imported
+ // projects. Note: use amalgamation to cover bundled subprojects.
+ //
+ auto* dm (root_->bundle_scope ()->find_module<dist::module> (
+ dist::module::name));
+
+ if (dm != nullptr && dm->distributed)
+ {
+ warn (tloc) << "conditional dependency declaration may result in "
+ << "incomplete distribution" <<
+ info (ploc) << "prerequisite declared here" <<
+ info (*condition_) << "conditional buildfile fragment starts here" <<
+ info << "instead use 'include' prerequisite-specific variable to "
+ << "conditionally include prerequisites" <<
+ info << "for example: "
+ << "<target>: <prerequisite>: include = (<condition>)" <<
+ info << "for details, see https://github.com/build2/HOWTO/blob/"
+ << "master/entries/keep-build-graph-config-independent.md";
+ }
+ }
+
// First enter all the targets.
//
- small_vector<reference_wrapper<target>, 1> tgs (
- enter_targets (move (tns), tloc, move (ans), pns.size ()));
+ small_vector<pair<reference_wrapper<target>,
+ vector<reference_wrapper<target>>>, 1>
+ tgs (enter_targets (move (tns), tloc, move (gns), pns.size (), tas));
// Now enter each prerequisite into each target.
//
- for (name& pn: pns)
+ for (auto i (pns.begin ()); i != pns.end (); ++i)
{
// We cannot reuse the names if we (potentially) may need to pass them
// as targets in case of a chain (see below).
//
- name n (tt != type::colon ? move (pn) : pn);
+ name n (tt != type::colon ? move (*i) : *i);
// See also scope::find_prerequisite_key().
//
auto rp (scope_->find_target_type (n, ploc));
- const target_type* tt (rp.first);
+ const target_type* t (rp.first);
optional<string>& e (rp.second);
- if (tt == nullptr)
+ if (t == nullptr)
fail (ploc) << "unknown target type " << n.type;
// Current dir collapses to an empty one.
//
if (!n.dir.empty ())
- n.dir.normalize (false, true);
+ n.dir.normalize (false /* actual */, true);
// @@ OUT: for now we assume the prerequisite's out is undetermined. The
// only way to specify an src prerequisite will be with the explicit
@@ -2159,10 +2779,47 @@ namespace build2
// a special indicator. Also, one can easily and natually suppress any
// searches by specifying the absolute path.
//
+ name o;
+ if (n.pair)
+ {
+ assert (n.pair == '@');
+
+ ++i;
+ o = tt != type::colon ? move (*i) : *i;
+
+ if (!o.directory ())
+ fail (ploc) << "expected directory after '@'";
+
+ o.dir.normalize (); // Note: don't collapse current to empty.
+
+ // Make sure out and src are parallel unless both were specified as
+ // absolute. We make an exception for this case because out may be
+ // used to "tag" imported targets (see cc::search_library()). So it's
+ // sort of the "I know what I am doing" escape hatch (it would have
+ // been even better to verify such a target is outside any project
+ // but that won't be cheap).
+ //
+ // For now we require that both are either relative or absolute.
+ //
+ // See similar code for targets in scope::find_target_type().
+ //
+ if (n.dir.absolute () && o.dir.absolute ())
+ ;
+ else if (n.dir.empty () && o.dir.current ())
+ ;
+ else if (o.dir.relative () &&
+ n.dir.relative () &&
+ o.dir == n.dir)
+ ;
+ else
+ fail (ploc) << "prerequisite output directory " << o.dir
+ << " must be parallel to source directory " << n.dir;
+ }
+
prerequisite p (move (n.proj),
- *tt,
+ *t,
move (n.dir),
- dir_path (),
+ move (o.dir),
move (n.value),
move (e),
*scope_);
@@ -2171,7 +2828,7 @@ namespace build2
{
// Move last prerequisite (which will normally be the only one).
//
- target& t (*i);
+ target& t (i->first);
t.prerequisites_.push_back (++i == e
? move (p)
: prerequisite (p, memory_order_relaxed));
@@ -2184,20 +2841,42 @@ namespace build2
//
// We handle multiple targets and/or prerequisites by replaying the tokens
// (see the target-specific case comments for details). The function
- // signature is:
+ // signature for for_each_t (see for_each on the gm argument semantics):
+ //
+ // void (token& t, type& tt, optional<bool> gm)
+ //
+ // And for for_each_p:
//
// void (token& t, type& tt)
//
auto for_each_t = [this, &t, &tt, &tgs] (auto&& f)
{
- replay_guard rg (*this, tgs.size () > 1);
+ // We need replay if we have multiple targets or group members.
+ //
+ replay_guard rg (*this, tgs.size () > 1 || !tgs[0].second.empty ());
for (auto ti (tgs.begin ()), te (tgs.end ()); ti != te; )
{
- target& tg (*ti);
- enter_target tgg (*this, tg);
+ target& tg (ti->first);
+ const vector<reference_wrapper<target>>& gms (ti->second);
- f (t, tt);
+ {
+ enter_target g (*this, tg);
+ f (t, tt, nullopt);
+ }
+
+ if (!gms.empty ())
+ {
+ bool expl (tg.is_a<group> ());
+
+ for (target& gm: gms)
+ {
+ rg.play (); // Replay.
+
+ enter_target g (*this, gm);
+ f (t, tt, expl);
+ }
+ }
if (++ti != te)
rg.play (); // Replay.
@@ -2210,8 +2889,8 @@ namespace build2
for (auto ti (tgs.begin ()), te (tgs.end ()); ti != te; )
{
- target& tg (*ti);
- enter_target tgg (*this, tg);
+ target& tg (ti->first);
+ enter_target g (*this, tg);
for (size_t pn (tg.prerequisites_.size ()), pi (pn - pns.size ());
pi != pn; )
@@ -2254,7 +2933,7 @@ namespace build2
this,
st = token (t), // Save start token (will be gone on replay).
recipes = small_vector<shared_ptr<adhoc_rule>, 1> ()]
- (token& t, type& tt) mutable
+ (token& t, type& tt, optional<bool> gm) mutable
{
token rt; // Recipe start token.
@@ -2264,7 +2943,14 @@ namespace build2
{
next (t, tt); // Newline.
next (t, tt); // First token inside the variable block.
- parse_variable_block (t, tt);
+
+ // Skip explicit group members (see the block case above for
+ // background).
+ //
+ if (!gm || !*gm)
+ parse_variable_block (t, tt);
+ else
+ skip_block (t, tt);
if (tt != type::rcbrace)
fail (t) << "expected '}' instead of " << t;
@@ -2280,6 +2966,16 @@ namespace build2
else
rt = st;
+ // If this is a group member then we know we are replaying and can
+ // skip the recipe.
+ //
+ if (gm)
+ {
+ replay_skip ();
+ next (t, tt);
+ return;
+ }
+
parse_recipe (t, tt, rt, recipes);
};
@@ -2289,21 +2985,6 @@ namespace build2
return;
}
- // What should we do if there are no prerequisites (for example, because
- // of an empty wildcard result)? We can fail or we can ignore. In most
- // cases, however, this is probably an error (for example, forgetting to
- // checkout a git submodule) so let's not confuse the user and fail (one
- // can always handle the optional prerequisites case with a variable and
- // an if).
- //
- if (pns.empty ())
- fail (ploc) << "no prerequisites in dependency chain or prerequisite-"
- << "specific variable assignment";
-
- next_with_attributes (t, tt); // Recognize attributes after `:`.
-
- auto at (attributes_push (t, tt));
-
// If we are here, then this can be one of three things:
//
// 1. A prerequisite-specific variable bloc:
@@ -2317,10 +2998,37 @@ namespace build2
//
// foo: bar: x = y
//
- // 3. A further dependency chain :
+ // 3. A further dependency chain:
//
// foo: bar: baz ...
//
+ // What should we do if there are no prerequisites, for example, because
+ // of an empty wildcard result or empty variable expansion? We can fail or
+ // we can ignore. In most cases, however, this is probably an error (for
+ // example, forgetting to checkout a git submodule) so let's not confuse
+ // the user and fail (one can always handle the optional prerequisites
+ // case with a variable and an if).
+ //
+ // On the other hand, we allow just empty prerequisites (which is also the
+ // more common case by far) and so it's strange that we don't allow the
+ // same with, say, `include = false`:
+ //
+ // exe{foo}: cxx{$empty} # Ok.
+ // exe{foo}: cxx{$empty}: include = false # Not Ok?
+ //
+ // So let's ignore in the first two cases (variable block and assignment)
+ // for consistency. The dependency chain is iffy both conceptually and
+ // implementation-wise (it could be followed by a variable block). So
+ // let's keep it an error for now.
+ //
+ // Note that the syntactically-empty prerequisite list is still an error:
+ //
+ // exe{foo}: : include = false # Error.
+ //
+ next_with_attributes (t, tt); // Recognize attributes after `:`.
+
+ auto at (attributes_push (t, tt));
+
if (tt == type::newline || tt == type::eos)
{
attributes_pop (); // Must be none since can't be standalone.
@@ -2335,15 +3043,22 @@ namespace build2
// Parse the block for each prerequisites of each target.
//
- for_each_p ([this] (token& t, token_type& tt)
- {
- next (t, tt); // First token inside the block.
+ if (!pns.empty ())
+ for_each_p ([this] (token& t, token_type& tt)
+ {
+ next (t, tt); // First token inside the block.
- parse_variable_block (t, tt);
+ parse_variable_block (t, tt);
- if (tt != type::rcbrace)
- fail (t) << "expected '}' instead of " << t;
- });
+ if (tt != type::rcbrace)
+ fail (t) << "expected '}' instead of " << t;
+ });
+ else
+ {
+ skip_block (t, tt);
+ if (tt != type::rcbrace)
+ fail (t) << "expected '}' instead of " << t;
+ }
next (t, tt); // Presumably newline after '}'.
next_after_newline (t, tt, '}'); // Should be on its own line.
@@ -2366,10 +3081,13 @@ namespace build2
// Parse the assignment for each prerequisites of each target.
//
- for_each_p ([this, &var, at] (token& t, token_type& tt)
- {
- parse_variable (t, tt, var, at);
- });
+ if (!pns.empty ())
+ for_each_p ([this, &var, at] (token& t, token_type& tt)
+ {
+ parse_variable (t, tt, var, at);
+ });
+ else
+ skip_line (t, tt);
next_after_newline (t, tt);
@@ -2388,6 +3106,13 @@ namespace build2
//
else
{
+ if (pns.empty ())
+ fail (ploc) << "no prerequisites in dependency chain";
+
+ // @@ This is actually ambiguous: prerequisite or target attributes
+ // (or both or neither)? Perhaps this should be prerequisites for
+ // the same reason as below (these are prerequsites first).
+ //
if (at.first)
fail (at.second) << "attributes before prerequisites";
else
@@ -2399,30 +3124,35 @@ namespace build2
// we just say that the dependency chain is equivalent to specifying
// each dependency separately.
//
- // Also note that supporting ad hoc target group specification in
- // chains will be complicated. For example, what if prerequisites that
- // have ad hoc targets don't end up being chained? Do we just silently
- // drop them? Also, these are prerequsites first that happened to be
- // reused as target names so perhaps it is the right thing not to
- // support, conceptually.
+ // Also note that supporting target group specification in chains will
+ // be complicated. For example, what if prerequisites that have group
+ // members don't end up being chained? Do we just silently drop them?
+ // Also, these are prerequsites first that happened to be reused as
+ // target names so perhaps it is the right thing not to support,
+ // conceptually.
//
parse_dependency (t, tt,
move (pns), ploc,
- {} /* ad hoc target name */,
- move (ns), loc);
+ {} /* group names */,
+ move (ns), loc,
+ attributes () /* target attributes */);
}
}
}
void parser::
- source (istream& is, const path_name& in, const location& loc, bool deft)
+ source_buildfile (istream& is,
+ const path_name& in,
+ const location& loc,
+ bool deft)
{
- tracer trace ("parser::source", &path_);
+ tracer trace ("parser::source_buildfile", &path_);
l5 ([&]{trace (loc) << "entering " << in;});
- if (in.path != nullptr)
- enter_buildfile (*in.path);
+ const buildfile* bf (in.path != nullptr
+ ? &enter_buildfile (*in.path)
+ : nullptr);
const path_name* op (path_);
path_ = &in;
@@ -2448,7 +3178,7 @@ namespace build2
if (deft)
{
- process_default_target (t);
+ process_default_target (t, bf);
default_target_ = odt;
}
@@ -2492,10 +3222,10 @@ namespace build2
try
{
ifdstream ifs (p);
- source (ifs,
- path_name (p),
- get_location (t),
- false /* default_target */);
+ source_buildfile (ifs,
+ path_name (p),
+ get_location (t),
+ false /* default_target */);
}
catch (const io_error& e)
{
@@ -2617,19 +3347,35 @@ namespace build2
l6 ([&]{trace (l) << "absolute path " << p;});
- if (!root_->buildfiles.insert (p).second) // Note: may be "new" root.
+ // Note: may be "new" root.
+ //
+ if (!root_->root_extra->insert_buildfile (p))
{
l5 ([&]{trace (l) << "skipping already included " << p;});
continue;
}
+ // Note: see a variant of this in parse_import().
+ //
+ // Clear/restore if/switch location.
+ //
+ // We do it here but not in parse_source since the included buildfile is
+ // in a sense expected to be a standalone entity (think a file included
+ // from an export stub).
+ //
+ auto g = make_guard ([this, old = condition_] () mutable
+ {
+ condition_ = old;
+ });
+ condition_ = nullopt;
+
try
{
ifdstream ifs (p);
- source (ifs,
- path_name (p),
- get_location (t),
- true /* default_target */);
+ source_buildfile (ifs,
+ path_name (p),
+ get_location (t),
+ true /* default_target */);
}
catch (const io_error& e)
{
@@ -2681,13 +3427,16 @@ namespace build2
[] (const string& s) {return s.c_str ();});
cargs.push_back (nullptr);
+ // Note: we are in the serial load phase and so no diagnostics buffering
+ // is needed.
+ //
process pr (run_start (3 /* verbosity */,
cargs,
0 /* stdin */,
-1 /* stdout */,
- true /* error */,
- dir_path () /* cwd */,
+ 2 /* stderr */,
nullptr /* env */,
+ dir_path () /* cwd */,
l));
try
{
@@ -2709,10 +3458,10 @@ namespace build2
dr << info (l) << "while parsing " << args[0] << " output";
});
- source (is,
- path_name ("<stdout>"),
- l,
- false /* default_target */);
+ source_buildfile (is,
+ path_name ("<stdout>"),
+ l,
+ false /* default_target */);
}
is.close (); // Detect errors.
@@ -2726,7 +3475,7 @@ namespace build2
// caused by that and let run_finish() deal with it.
}
- run_finish (cargs, pr, l);
+ run_finish (cargs, pr, 2 /* verbosity */, false /* omit_normal */, l);
next_after_newline (t, tt);
}
@@ -2787,18 +3536,23 @@ namespace build2
next_with_attributes (t, tt);
// Get variable attributes, if any, and deal with the special config.*
- // attributes. Since currently they can only appear in the config
- // directive, we handle them in an ad hoc manner.
+ // attributes as well as null. Since currently they can only appear in the
+ // config directive, we handle them in an ad hoc manner.
//
attributes_push (t, tt);
attributes& as (attributes_top ());
+ bool nullable (false);
optional<string> report;
string report_var;
for (auto i (as.begin ()); i != as.end (); )
{
- if (i->name == "config.report")
+ if (i->name == "null")
+ {
+ nullable = true;
+ }
+ else if (i->name == "config.report")
{
try
{
@@ -2810,7 +3564,7 @@ namespace build2
report = move (v);
else
throw invalid_argument (
- "expected 'false' or format name instead of '" + v + "'");
+ "expected 'false' or format name instead of '" + v + '\'');
}
catch (const invalid_argument& e)
{
@@ -2852,7 +3606,7 @@ namespace build2
if (report && *report != "false" && !config)
{
- if (!as.empty ())
+ if (!as.empty () || nullable)
fail (as.loc) << "unexpected attributes for report-only variable";
attributes_pop ();
@@ -2954,6 +3708,9 @@ namespace build2
peeked ().value != "false")
fail (loc) << var << " variable default value must be literal false";
+ if (nullable)
+ fail (loc) << var << " variable must not be nullable";
+
sflags |= config::save_false_omitted;
}
@@ -2972,14 +3729,48 @@ namespace build2
// all.
//
if (l.defined ())
+ {
+ // Peek at the attributes to detect whether the value is NULL.
+ //
+ if (!dev && !nullable)
+ {
+ // Essentially a prefix of parse_variable_value().
+ //
+ mode (lexer_mode::value, '@');
+ next_with_attributes (t, tt);
+ attributes_push (t, tt, true);
+ for (const attribute& a: attributes_pop ())
+ {
+ if (a.name == "null")
+ {
+ nullable = true;
+ break;
+ }
+ }
+ }
+
skip_line (t, tt);
+ }
else
{
value lhs, rhs (parse_variable_value (t, tt, !dev /* mode */));
apply_value_attributes (&var, lhs, move (rhs), type::assign);
+
+ if (!nullable)
+ nullable = lhs.null;
+
l = config::lookup_config (new_val, *root_, var, move (lhs), sflags);
}
}
+
+ // If the variable is not nullable, verify the value is not NULL.
+ //
+ // Note that undefined is not the same as NULL (if it is undefined, we
+ // should either see the default value or if there is no default value,
+ // then the user is expected to handle the undefined case).
+ //
+ if (!nullable && l.defined () && l->null)
+ fail (loc) << "null value in non-nullable variable " << var;
}
// We will be printing the report at either level 2 (-v) or 3 (-V)
@@ -2998,9 +3789,12 @@ namespace build2
// In a somewhat hackish way we pass the variable in an undefined
// lookup.
//
+ // Note: consistent with parse_variable_name() wrt overridability.
+ //
l = lookup ();
l.var = &root_->var_pool ().insert (
- move (report_var), true /* overridable */);
+ move (report_var),
+ report_var.find ('.') != string::npos /* overridable */);
}
if (l.var != nullptr)
@@ -3080,118 +3874,274 @@ namespace build2
if (stage_ == stage::boot)
fail (t) << "import during bootstrap";
- // General import format:
+ // General import form:
//
// import[?!] [<attrs>] <var> = [<attrs>] (<target>|<project>%<target>])+
//
+ // Special form for importing buildfiles:
+ //
+ // import[?!] [<attrs>] (<target>|<project>%<target>])+
+ //
bool opt (t.value.back () == '?');
- bool ph2 (opt || t.value.back () == '!');
+ optional<string> ph2 (opt || t.value.back () == '!'
+ ? optional<string> (string ())
+ : nullopt);
// We are now in the normal lexing mode and we let the lexer handle `=`.
//
next_with_attributes (t, tt);
- // Get variable attributes, if any, and deal with the special metadata
- // attribute. Since currently it can only appear in the import directive,
- // we handle it in an ad hoc manner.
+ // Get variable (or value, in the second form) attributes, if any, and
+ // deal with the special metadata and rule_hint attributes. Since
+ // currently they can only appear in the import directive, we handle them
+ // in an ad hoc manner.
//
attributes_push (t, tt);
- attributes& as (attributes_top ());
- bool meta (false);
- for (auto i (as.begin ()); i != as.end (); )
+ bool meta (false); // Import with metadata.
+ bool once (false); // Import buildfile once.
{
- if (i->name == "metadata")
- {
- if (!ph2)
- fail (as.loc) << "loading metadata requires immediate import" <<
- info << "consider using the import! directive instead";
+ attributes& as (attributes_top ());
+ const location& l (as.loc);
- meta = true;
- }
- else
+ for (auto i (as.begin ()); i != as.end (); )
{
- ++i;
- continue;
- }
+ const string& n (i->name);
+ value& v (i->value);
- i = as.erase (i);
- }
+ if (n == "metadata")
+ {
+ if (!ph2)
+ fail (l) << "loading metadata requires immediate import" <<
+ info << "consider using the import! directive instead";
- if (tt != type::word)
- fail (t) << "expected variable name instead of " << t;
+ meta = true;
+ }
+ else if (n == "once")
+ {
+ once = true;
+ }
+ else if (n == "rule_hint")
+ {
+ if (!ph2)
+ fail (l) << "rule hint can only be used with immediate import" <<
+ info << "consider using the import! directive instead";
- const variable& var (
- parse_variable_name (move (t.value), get_location (t)));
- apply_variable_attributes (var);
+ // Here we only allow a single name.
+ //
+ try
+ {
+ ph2 = convert<string> (move (v));
- if (var.visibility > variable_visibility::scope)
- {
- fail (t) << "variable " << var << " has " << var.visibility
- << " visibility but is assigned in import";
+ if (ph2->empty ())
+ throw invalid_argument ("empty name");
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid " << n << " attribute value: " << e;
+ }
+ }
+ else
+ {
+ ++i;
+ continue;
+ }
+
+ i = as.erase (i);
+ }
}
- // Next should come the assignment operator. Note that we don't support
+ // Note that before supporting the second form (without <var>) we used to
+ // parse the value after assignment in the value mode. However, we don't
+ // really need to since what we should have is a bunch of target names.
+ // In other words, whatever the value mode does not treat as special
+ // compared to the normal mode (like `:`) would be illegal here.
+ //
+ // Note that we expant patterns for the ad hoc import case:
+ //
+ // import sub = */
+ //
+ // @@ PAT: the only issue here is that we currently pattern-expand var
+ // name (same assue as with target-specific var names).
+ //
+ if (!start_names (tt))
+ fail (t) << "expected variable name or buildfile target instead of " << t;
+
+ location loc (get_location (t));
+ names ns (parse_names (t, tt, pattern_mode::expand));
+
+ // Next could come the assignment operator. Note that we don't support
// default assignment (?=) yet (could make sense when attempting to import
// alternatives or some such).
//
- next (t, tt);
+ type atype;
+ const variable* var (nullptr);
+ if (tt == type::assign || tt == type::append || tt == type::prepend)
+ {
+ var = &parse_variable_name (move (ns), loc);
+ apply_variable_attributes (*var);
+
+ if (var->visibility > variable_visibility::scope)
+ {
+ fail (loc) << "variable " << *var << " has " << var->visibility
+ << " visibility but is assigned in import";
+ }
- if (tt != type::assign && tt != type::append && tt != type::prepend)
- fail (t) << "expected variable assignment instead of " << t;
+ atype = tt;
+ next_with_attributes (t, tt);
+ attributes_push (t, tt, true /* standalone */);
- type atype (tt);
- value& val (atype == type::assign
- ? scope_->assign (var)
- : scope_->append (var));
+ if (!start_names (tt))
+ fail (t) << "expected target to import instead of " << t;
- // The rest should be a list of targets. Parse them similar to a value on
- // the RHS of an assignment (attributes, etc).
- //
- // Note that we expant patterns for the ad hoc import case:
- //
- // import sub = */
+ loc = get_location (t);
+ ns = parse_names (t, tt, pattern_mode::expand);
+ }
+ else if (tt == type::default_assign)
+ fail (t) << "default assignment not yet supported";
+
+
+ // If there are any value attributes, roundtrip the names through the
+ // value applying the attributes.
//
- mode (lexer_mode::value, '@');
- next_with_attributes (t, tt);
+ if (!attributes_top ().empty ())
+ {
+ value lhs, rhs (move (ns));
+ apply_value_attributes (nullptr, lhs, move (rhs), type::assign);
- if (tt == type::newline || tt == type::eos)
- fail (t) << "expected target to import instead of " << t;
+ if (!lhs)
+ fail (loc) << "expected target to import instead of null value";
- const location loc (get_location (t));
+ untypify (lhs, true /* reduce */);
+ ns = move (lhs.as<names> ());
+ }
+ else
+ attributes_pop ();
- if (value v = parse_value_with_attributes (t, tt, pattern_mode::expand))
+ value* val (var != nullptr ?
+ &(atype == type::assign
+ ? scope_->assign (*var)
+ : scope_->append (*var))
+ : nullptr);
+
+ for (name& n: ns)
{
- names storage;
- for (name& n: reverse (v, storage))
+ // @@ Could this be an out-qualified ad hoc import? Yes, see comment
+ // about buildfile import in import_load().
+ //
+ if (n.pair)
+ fail (loc) << "unexpected pair in import";
+
+ // See if we are importing a buildfile target. Such an import is always
+ // immediate.
+ //
+ bool bf (n.type == "buildfile");
+ if (bf)
{
- // @@ Could this be an out-qualified ad hoc import?
- //
- if (n.pair)
- fail (loc) << "unexpected pair in import";
+ if (meta)
+ fail (loc) << "metadata requested for buildfile target " << n;
- // import() will check the name, if required.
- //
- names r (import (*scope_, move (n), ph2, opt, meta, loc).first);
+ if (once && var != nullptr)
+ fail (loc) << "once importation requested with variable assignment";
+
+ if (ph2 && !ph2->empty ())
+ fail (loc) << "rule hint specified for buildfile target " << n;
+ }
+ else
+ {
+ if (once)
+ fail (loc) << "once importation requested for target " << n;
+
+ if (var == nullptr)
+ fail (loc) << "variable assignment required to import target " << n;
+ }
+
+ // import() will check the name, if required.
+ //
+ names r (import (*scope_,
+ move (n),
+ ph2 ? ph2 : bf ? optional<string> (string ()) : nullopt,
+ opt,
+ meta,
+ loc).first);
+ if (val != nullptr)
+ {
if (r.empty ()) // Optional not found.
{
if (atype == type::assign)
- val = nullptr;
+ *val = nullptr;
}
else
{
- if (atype == type::assign)
- val.assign (move (r), &var);
- else if (atype == type::prepend)
- val.prepend (move (r), &var);
- else
- val.append (move (r), &var);
+ if (atype == type::assign) val->assign (move (r), var);
+ else if (atype == type::prepend) val->prepend (move (r), var);
+ else val->append (move (r), var);
}
if (atype == type::assign)
atype = type::append; // Append subsequent values.
}
+ else
+ {
+ assert (bf);
+
+ if (r.empty ()) // Optional not found.
+ {
+ assert (opt);
+ continue;
+ }
+
+ // Note: see also import_buildfile().
+ //
+ assert (r.size () == 1); // See import_load() for details.
+ name& n (r.front ());
+ path p (n.dir / n.value); // Should already include extension.
+
+ // Note: similar to parse_include().
+ //
+ // Nuance: we insert this buildfile even with once=false in case it
+ // gets imported with once=true from another place.
+ //
+ if (!root_->root_extra->insert_buildfile (p) && once)
+ {
+ l5 ([&]{trace (loc) << "skipping already imported " << p;});
+ continue;
+ }
+
+ // Clear/restore if/switch location.
+ //
+ auto g = make_guard ([this, old = condition_] () mutable
+ {
+ condition_ = old;
+ });
+ condition_ = nullopt;
+
+ try
+ {
+ ifdstream ifs (p);
+
+ auto df = make_diag_frame (
+ [this, &loc] (const diag_record& dr)
+ {
+ dr << info (loc) << "imported from here";
+ });
+
+ // @@ Do we want to enter this buildfile? What's the harm (one
+ // benefit is that it will be in dump). But, we currently don't
+ // out-qualify them, though feels like there is nothing fatal
+ // in that, just inaccurate.
+ //
+ source_buildfile (ifs,
+ path_name (p),
+ loc,
+ false /* default_target */);
+ }
+ catch (const io_error& e)
+ {
+ fail (loc) << "unable to read imported buildfile " << p << ": " << e;
+ }
+ }
}
next_after_newline (t, tt);
@@ -3233,7 +4183,12 @@ namespace build2
fail (l) << "null value in export";
if (val.type != nullptr)
- untypify (val);
+ {
+ // While feels far-fetched, let's preserve empty typed values in the
+ // result.
+ //
+ untypify (val, false /* reduce */);
+ }
export_value = move (val).as<names> ();
@@ -3273,6 +4228,9 @@ namespace build2
n = move (i->value);
+ if (n[0] == '_')
+ fail (l) << "module name '" << n << "' starts with underscore";
+
if (i->pair)
try
{
@@ -3317,11 +4275,37 @@ namespace build2
void parser::
parse_define (token& t, type& tt)
{
- // define <derived>: <base>
+ // define [<attrs>] <derived>: <base>
//
// See tests/define.
//
- if (next (t, tt) != type::word)
+ next_with_attributes (t, tt);
+
+ // Handle attributes.
+ //
+ attributes_push (t, tt);
+
+ target_type::flag flags (target_type::flag::none);
+ {
+ attributes as (attributes_pop ());
+ const location& l (as.loc);
+
+ for (attribute& a: as)
+ {
+ const string& n (a.name);
+ value& v (a.value);
+
+ if (n == "see_through") flags |= target_type::flag::see_through;
+ else if (n == "member_hint") flags |= target_type::flag::member_hint;
+ else
+ fail (l) << "unknown target type definition attribute " << n;
+
+ if (!v.null)
+ fail (l) << "unexpected value in attribute " << n;
+ }
+ }
+
+ if (tt != type::word)
fail (t) << "expected name instead of " << t << " in target type "
<< "definition";
@@ -3344,7 +4328,18 @@ namespace build2
if (bt == nullptr)
fail (t) << "unknown target type " << bn;
- if (!root_->derive_target_type (move (dn), *bt).second)
+ // Note that the group{foo}<...> syntax is only recognized for group-
+ // based targets and ad hoc buildscript recipes/rules only match group.
+ // (We may want to relax this for member_hint in the future since its
+ // currently also used on non-mtime-based targets, though what exactly
+ // we will do in ad hoc recipes/rules in this case is fuzzy).
+ //
+ if ((flags & target_type::flag::group) == target_type::flag::group &&
+ !bt->is_a<group> ())
+ fail (t) << "base target type " << bn << " must be group for "
+ << "group-related attribute";
+
+ if (!root_->derive_target_type (move (dn), *bt, flags).second)
fail (dnl) << "target type " << dn << " already defined in this "
<< "project";
@@ -3360,6 +4355,12 @@ namespace build2
void parser::
parse_if_else (token& t, type& tt)
{
+ auto g = make_guard ([this, old = condition_] () mutable
+ {
+ condition_ = old;
+ });
+ condition_ = get_location (t);
+
parse_if_else (t, tt,
false /* multi */,
[this] (token& t, type& tt, bool s, const string& k)
@@ -3501,6 +4502,12 @@ namespace build2
void parser::
parse_switch (token& t, type& tt)
{
+ auto g = make_guard ([this, old = condition_] () mutable
+ {
+ condition_ = old;
+ });
+ condition_ = get_location (t);
+
parse_switch (t, tt,
false /* multi */,
[this] (token& t, type& tt, bool s, const string& k)
@@ -3711,7 +4718,7 @@ namespace build2
if (!e.arg.empty ())
args.push_back (value (e.arg));
- value r (ctx.functions.call (scope_, *e.func, args, l));
+ value r (ctx->functions.call (scope_, *e.func, args, l));
// We support two types of functions: matchers and extractors:
// a matcher returns a statically-typed bool value while an
@@ -3845,10 +4852,10 @@ namespace build2
void parser::
parse_for (token& t, type& tt)
{
- // for <varname>: <value>
+ // for [<var-attrs>] <varname> [<elem-attrs>]: [<val-attrs>] <value>
// <line>
//
- // for <varname>: <value>
+ // for [<var-attrs>] <varname> [<elem-attrs>]: [<val-attrs>] <value>
// {
// <block>
// }
@@ -3859,13 +4866,12 @@ namespace build2
next_with_attributes (t, tt);
attributes_push (t, tt);
- // @@ PAT: currently we pattern-expand for var.
+ // Enable list element attributes.
//
- const location vloc (get_location (t));
- names vns (parse_names (t, tt, pattern_mode::expand));
+ enable_attributes ();
- if (tt != type::colon)
- fail (t) << "expected ':' instead of " << t << " after variable name";
+ const location vloc (get_location (t));
+ names vns (parse_names (t, tt, pattern_mode::preserve));
const variable& var (parse_variable_name (move (vns), vloc));
apply_variable_attributes (var);
@@ -3876,6 +4882,17 @@ namespace build2
<< " visibility but is assigned in for-loop";
}
+ // Parse the list element attributes, if present.
+ //
+ attributes_push (t, tt);
+
+ if (tt != type::colon)
+ fail (t) << "expected ':' instead of " << t << " after variable name";
+
+ // Save element attributes so that we can inject them on each iteration.
+ //
+ attributes val_attrs (attributes_pop ());
+
// Now the value (list of names) to iterate over. Parse it similar to a
// value on the RHS of an assignment (expansion, attributes).
//
@@ -3892,7 +4909,11 @@ namespace build2
if (val && val.type != nullptr)
{
etype = val.type->element_type;
- untypify (val);
+
+ // Note that here we don't want to be reducing empty simple values to
+ // empty lists.
+ //
+ untypify (val, false /* reduce */);
}
if (tt != type::newline)
@@ -3940,7 +4961,7 @@ namespace build2
// Iterate.
//
- value& v (scope_->assign (var)); // Assign even if no iterations.
+ value& lhs (scope_->assign (var)); // Assign even if no iterations.
if (!val)
return;
@@ -3960,11 +4981,17 @@ namespace build2
names n;
n.push_back (move (*i));
if (pair) n.push_back (move (*++i));
- v = value (move (n));
+ value v (move (n));
if (etype != nullptr)
typify (v, *etype, &var);
+ // Inject element attributes.
+ //
+ attributes_.push_back (val_attrs);
+
+ apply_value_attributes (&var, lhs, move (v), type::assign);
+
lexer l (is, *path_, line);
lexer* ol (lexer_);
lexer_ = &l;
@@ -4067,7 +5094,7 @@ namespace build2
if (value v = parse_value_with_attributes (t, tt, pattern_mode::expand))
{
names storage;
- cout << reverse (v, storage) << endl;
+ cout << reverse (v, storage, true /* reduce */) << endl;
}
else
cout << "[null]" << endl;
@@ -4100,7 +5127,7 @@ namespace build2
if (value v = parse_value_with_attributes (t, tt, pattern_mode::expand))
{
names storage;
- dr << reverse (v, storage);
+ dr << reverse (v, storage, true /* reduce */);
}
if (tt != type::eos)
@@ -4130,8 +5157,10 @@ namespace build2
if (ns.empty ())
{
+ // Indent two spaces.
+ //
if (scope_ != nullptr)
- dump (*scope_, " "); // Indent two spaces.
+ dump (scope_, nullopt /* action */, dump_format::buildfile, " ");
else
os << " <no current scope>" << endl;
}
@@ -4149,8 +5178,10 @@ namespace build2
const target* t (enter_target::find_target (*this, n, o, l, trace));
+ // Indent two spaces.
+ //
if (t != nullptr)
- dump (*t, " "); // Indent two spaces.
+ dump (t, nullopt /* action */, dump_format::buildfile, " ");
else
{
os << " <no target " << n;
@@ -4172,10 +5203,12 @@ namespace build2
{
// Enter a variable name for assignment (as opposed to lookup).
+ // If the variable is qualified (and thus public), make it overridable.
+ //
// Note that the overridability can still be restricted (e.g., by a module
// that enters this variable or by a pattern).
//
- bool ovr (true);
+ bool ovr (on.find ('.') != string::npos);
auto r (scope_->var_pool ().insert (move (on), nullptr, nullptr, &ovr));
if (!r.second)
@@ -4209,9 +5242,13 @@ namespace build2
{
// Parse and enter a variable name for assignment (as opposed to lookup).
- // The list should contain a single, simple name.
+ // The list should contain a single, simple name. Go an extra mile to
+ // issue less confusing diagnostics.
//
- if (ns.size () != 1 || ns[0].pattern || !ns[0].simple () || ns[0].empty ())
+ size_t n (ns.size ());
+ if (n == 0 || (n == 1 && ns[0].empty ()))
+ fail (l) << "empty variable name";
+ else if (n != 1 || ns[0].pattern || !ns[0].simple ())
fail (l) << "expected variable name instead of " << ns;
return parse_variable_name (move (ns[0].value), l);
@@ -4267,7 +5304,7 @@ namespace build2
// Note that the pattern is preserved if insert fails with regex_error.
//
p = scope_->target_vars[ptt].insert (pt, move (pat)).insert (
- var, kind == type::assign);
+ var, kind == type::assign, false /* reset_extra */);
}
catch (const regex_error& e)
{
@@ -4281,7 +5318,12 @@ namespace build2
// We store prepend/append values untyped (similar to overrides).
//
if (rhs.type != nullptr && kind != type::assign)
- untypify (rhs);
+ {
+ // Our heuristics for prepend/append of a typed value is to preserve
+ // empty (see apply_value_attributes() for details) so do not reduce.
+ //
+ untypify (rhs, false /* reduce */);
+ }
if (p.second)
{
@@ -4368,8 +5410,8 @@ namespace build2
: value (names ());
}
- static const value_type*
- map_type (const string& n)
+ const value_type* parser::
+ find_value_type (const scope*, const string& n)
{
auto ptr = [] (const value_type& vt) {return &vt;};
@@ -4392,6 +5434,7 @@ namespace build2
n == "paths" ? ptr (value_traits<paths>::value_type) :
n == "dir_paths" ? ptr (value_traits<dir_paths>::value_type) :
n == "names" ? ptr (value_traits<vector<name>>::value_type) :
+ n == "cmdline" ? ptr (value_traits<cmdline>::value_type) :
nullptr;
}
@@ -4415,19 +5458,62 @@ namespace build2
string& n (a.name);
value& v (a.value);
- if (const value_type* t = map_type (n))
+ if (n == "visibility")
{
+ try
+ {
+ string s (convert<string> (move (v)));
+
+ variable_visibility r;
+ if (s == "global") r = variable_visibility::global;
+ else if (s == "project") r = variable_visibility::project;
+ else if (s == "scope") r = variable_visibility::scope;
+ else if (s == "target") r = variable_visibility::target;
+ else if (s == "prerequisite") r = variable_visibility::prereq;
+ else throw invalid_argument ("unknown visibility name");
+
+ if (vis && r != *vis)
+ fail (l) << "conflicting variable visibilities: " << s << ", "
+ << *vis;
+
+ vis = r;
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid " << n << " attribute value: " << e;
+ }
+ }
+ else if (n == "overridable")
+ {
+ try
+ {
+ // Treat absent value (represented as NULL) as true.
+ //
+ bool r (v.null || convert<bool> (move (v)));
+
+ if (ovr && r != *ovr)
+ fail (l) << "conflicting variable overridabilities";
+
+ ovr = r;
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid " << n << " attribute value: " << e;
+ }
+ }
+ else if (const value_type* t = find_value_type (root_, n))
+ {
+ if (!v.null)
+ fail (l) << "unexpected value in attribute " << a;
+
if (type != nullptr && t != type)
- fail (l) << "multiple variable types: " << n << ", " << type->name;
+ fail (l) << "conflicting variable types: " << n << ", "
+ << type->name;
type = t;
- // Fall through.
}
else
fail (l) << "unknown variable attribute " << a;
-
- if (!v.null)
- fail (l) << "unexpected value in attribute " << a;
}
if (type != nullptr && var.type != nullptr)
@@ -4439,15 +5525,33 @@ namespace build2
<< var.type->name << " to " << type->name;
}
- //@@ TODO: the same checks for vis and ovr (when we have the corresponding
- // attributes).
+ if (vis)
+ {
+ // Note that this logic naturally makes sure that a project-private
+ // variable doesn't have global visibility (since it would have been
+ // entered with the project visibility).
+ //
+ if (var.visibility == *vis)
+ vis = nullopt;
+ else if (var.visibility > *vis) // See variable_pool::update().
+ fail (l) << "changing variable " << var << " visibility from "
+ << var.visibility << " to " << *vis;
+ }
- if (type || vis || ovr)
- ctx.var_pool.update (const_cast<variable&> (var),
- type,
- vis ? &*vis : nullptr,
- ovr ? &*ovr : nullptr);
+ if (ovr)
+ {
+ // Note that the overridability incompatibilities are diagnosed by
+ // update(). So we just need to diagnose the project-private case.
+ //
+ if (*ovr && var.owner != &ctx->var_pool)
+ fail (l) << "private variable " << var << " cannot be overridable";
+ }
+ if (type || vis || ovr)
+ var.owner->update (const_cast<variable&> (var),
+ type,
+ vis ? &*vis : nullptr,
+ ovr ? &*ovr : nullptr);
}
void parser::
@@ -4477,10 +5581,10 @@ namespace build2
null = true;
// Fall through.
}
- else if (const value_type* t = map_type (n))
+ else if (const value_type* t = find_value_type (root_, n))
{
if (type != nullptr && t != type)
- fail (l) << "multiple value types: " << n << ", " << type->name;
+ fail (l) << "conflicting value types: " << n << ", " << type->name;
type = t;
// Fall through.
@@ -4528,6 +5632,13 @@ namespace build2
bool rhs_type (false);
if (rhs.type != nullptr)
{
+ // Our heuristics is to not reduce typed RHS empty simple values for
+ // prepend/append and additionally for assign provided LHS is a
+ // container.
+ //
+ bool reduce (kind == type::assign &&
+ (type == nullptr || !type->container));
+
// Only consider RHS type if there is no explicit or variable type.
//
if (type == nullptr)
@@ -4538,7 +5649,7 @@ namespace build2
// Reduce this to the untyped value case for simplicity.
//
- untypify (rhs);
+ untypify (rhs, reduce);
}
if (kind == type::assign)
@@ -4921,17 +6032,38 @@ namespace build2
if (pre_parse_)
return v; // Empty.
- if (v.type != nullptr || !v || v.as<names> ().size () != 1)
- fail (l) << "expected target before ':'";
-
+ // We used to return this as a <target>:<name> pair but that meant we
+ // could not handle an out-qualified target (which is represented as
+ // <target>@<out> pair). As a somewhat of a hack, we deal with this by
+ // changing the order of the name and target to be <name>:<target> with
+ // the qualified case becoming a "tripple pair" <name>:<target>@<out>.
+ //
+ // @@ This is actually not great since it's possible to observe such a
+ // tripple pair, for example with `print (file{x}@./:y)`.
+ //
if (n.type != nullptr || !n || n.as<names> ().size () != 1 ||
n.as<names> ()[0].pattern)
fail (nl) << "expected variable name after ':'";
- names& ns (v.as<names> ());
+ names& ns (n.as<names> ());
ns.back ().pair = ':';
- ns.push_back (move (n.as<names> ().back ()));
- return v;
+
+ if (v.type == nullptr && v)
+ {
+ names& ts (v.as<names> ());
+
+ size_t s (ts.size ());
+ if (s == 1 || (s == 2 && ts.front ().pair == '@'))
+ {
+ ns.push_back (move (ts.front ()));
+ if (s == 2)
+ ns.push_back (move (ts.back ()));
+
+ return n;
+ }
+ }
+
+ fail (l) << "expected target before ':'" << endf;
}
else
{
@@ -5000,8 +6132,13 @@ namespace build2
}
pair<bool, location> parser::
- attributes_push (token& t, type& tt, bool standalone)
+ attributes_push (token& t, type& tt, bool standalone, bool next_token)
{
+ // To make sure that the attributes are not standalone we need to read the
+ // token which follows ']'.
+ //
+ assert (standalone || next_token);
+
location l (get_location (t));
bool has (tt == type::lsbrace);
@@ -5024,6 +6161,10 @@ namespace build2
// Parse the attribute name with expansion (we rely on this in some
// old and hairy tests).
//
+ // Note that the attributes lexer mode does not recognize `{}@` as
+ // special and we rely on that in the rule hint attributes
+ // (libs@rule_hint=cxx).
+ //
const location l (get_location (t));
names ns (
@@ -5065,32 +6206,33 @@ namespace build2
}
while (tt != type::rsbrace);
}
+ else
+ has = false; // `[]` doesn't count.
if (tt != type::rsbrace)
fail (t) << "expected ']' instead of " << t;
- next (t, tt);
-
- if (tt == type::newline || tt == type::eos)
+ if (next_token)
{
- if (!standalone)
- fail (t) << "standalone attributes";
+ next (t, tt);
+
+ if (tt == type::newline || tt == type::eos)
+ {
+ if (!standalone)
+ fail (t) << "standalone attributes";
+ }
+ //
+ // Verify that the attributes are separated from the following word or
+ // "word-producing" token.
+ //
+ else if (!t.separated && (tt == type::word ||
+ tt == type::dollar ||
+ tt == type::lparen ||
+ tt == type::lcbrace))
+ fail (t) << "whitespace required after attributes" <<
+ info (l) << "use the '\\[' escape sequence if this is a wildcard "
+ << "pattern";
}
- //
- // We require attributes to be separated from the following word or
- // "word-producing" tokens (`$` for variable expansions/function calls,
- // `(` for eval contexts, and `{` for name generation) to reduce the
- // possibility of confusing them with wildcard patterns. Consider:
- //
- // ./: [abc]-foo.txt
- //
- else if (!t.separated && (tt == type::word ||
- tt == type::dollar ||
- tt == type::lparen ||
- tt == type::lcbrace))
- fail (t) << "whitespace required after attributes" <<
- info (l) << "use the '\\[' escape sequence if this is a wildcard "
- << "pattern";
return make_pair (has, l);
}
@@ -5325,9 +6467,11 @@ namespace build2
// May throw invalid_path.
//
auto include_pattern =
- [&r, &append, &include_match, sp, &l, this] (string&& p,
- optional<string>&& e,
- bool a)
+ [this,
+ &append, &include_match,
+ &r, sp, &l, &dir] (string&& p,
+ optional<string>&& e,
+ bool a)
{
// If we don't already have any matches and our pattern doesn't contain
// multiple recursive wildcards, then the result will be unique and we
@@ -5374,14 +6518,62 @@ namespace build2
// multiple entries for each pattern.
//
if (!interm)
- d.appf (move (m).representation (), optional<string> (d.e));
+ {
+ // If the extension is empty (meaning there should be no extension,
+ // for example hxx{Q*.}), skip entries with extensions.
+ //
+ if (!d.e || !d.e->empty () || m.extension_cstring () == nullptr)
+ d.appf (move (m).representation (), optional<string> (d.e));
+ }
return true;
};
+ const function<bool (const dir_entry&)> dangling (
+ [&dir] (const dir_entry& de)
+ {
+ bool sl (de.ltype () == entry_type::symlink);
+
+ const path& n (de.path ());
+
+ // One case where this turned out to be not worth it practically
+ // (too much noise) is the backlinks to executables (and the
+ // associated DLL assemblies for Windows). So we now have this
+ // heuristics that if this looks like an executable (or DLL for
+ // Windows), then we omit the warning. On POSIX, where executables
+ // don't have extensions, we will consider it an executable only if
+ // we are not looking for directories (which also normally don't
+ // have extension).
+ //
+ // @@ PEDANTIC: re-enable if --pedantic.
+ //
+ if (sl)
+ {
+ string e (n.extension ());
+
+ if ((e.empty () && !dir) ||
+ path_traits::compare (e, "exe") == 0 ||
+ path_traits::compare (e, "dll") == 0 ||
+ path_traits::compare (e, "pdb") == 0 || // .{exe,dll}.pdb
+ (path_traits::compare (e, "dlls") == 0 && // .exe.dlls assembly
+ path_traits::compare (n.base ().extension (), "exe") == 0))
+ return true;
+ }
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry")
+ << ' ' << de.base () / n;
+
+ return true;
+ });
+
try
{
- path_search (path (move (p)), process, *sp);
+ path_search (path (move (p)),
+ process,
+ *sp,
+ path_match_flags::follow_symlinks,
+ dangling);
}
catch (const system_error& e)
{
@@ -5549,6 +6741,7 @@ namespace build2
if ((n.pair & 0x02) != 0)
{
e = move (n.type);
+ n.type.clear ();
// Remove non-empty extension from the name (it got to be there, see
// above).
@@ -5826,9 +7019,35 @@ namespace build2
bool concat_quoted_first (false);
name concat_data;
- auto concat_typed = [this, &vnull, &vtype,
- &concat, &concat_data] (value&& rhs,
- const location& loc)
+ auto concat_diag_multiple = [this] (const location& loc,
+ const char* what_expansion)
+ {
+ diag_record dr (fail (loc));
+
+ dr << "concatenating " << what_expansion << " contains multiple values";
+
+ // See if this looks like a subscript without an evaluation context and
+ // help the user out.
+ //
+ if (mode () != lexer_mode::eval)
+ {
+ const token& t (peeked ()); // Should be peeked at.
+
+ if (t.type == type::word &&
+ t.qtype == quote_type::unquoted &&
+ t.value[0] == '[')
+ {
+ dr << info << "wrap it in (...) evaluation context if this "
+ << "is value subscript";
+ }
+ }
+ };
+
+ auto concat_typed = [this, what, &vnull, &vtype,
+ &concat, &concat_data,
+ &concat_diag_multiple] (value&& rhs,
+ const location& loc,
+ const char* what_expansion)
{
// If we have no LHS yet, then simply copy value/type.
//
@@ -5845,6 +7064,10 @@ namespace build2
// RHS.
//
+ // Note that if RHS contains multiple values then we expect the result
+ // to be a single value somehow or, more likely, there to be no
+ // suitable $builtin.concat() overload.
+ //
a.push_back (move (rhs));
const char* l ((a[0].type != nullptr ? a[0].type->name : "<untyped>"));
@@ -5861,7 +7084,10 @@ namespace build2
dr << info << "use quoting to force untyped concatenation";
});
- p = ctx.functions.try_call (
+ if (ctx == nullptr)
+ fail << "literal " << what << " expected";
+
+ p = ctx->functions.try_call (
scope_, "builtin.concat", vector_view<value> (a), loc);
}
@@ -5883,18 +7109,22 @@ namespace build2
if (!vnull)
{
if (vtype != nullptr)
- untypify (rhs);
+ untypify (rhs, true /* reduce */);
names& d (rhs.as<names> ());
- // If the value is empty, then untypify() will (typically; no pun
- // intended) represent it as an empty sequence of names rather than
- // a sequence of one empty name. This is usually what we need (see
- // simple_reverse() for details) but not in this case.
+ // If the value is empty, then we asked untypify() to reduce it to
+ // an empty sequence of names rather than a sequence of one empty
+ // name.
//
if (!d.empty ())
{
- assert (d.size () == 1); // Must be a single value.
+ if (d.size () != 1)
+ {
+ assert (what_expansion != nullptr);
+ concat_diag_multiple (loc, what_expansion);
+ }
+
concat_data = move (d[0]);
}
}
@@ -6001,6 +7231,8 @@ namespace build2
// continue accumulating or inject. We inject if the next token is not a
// word, var expansion, or eval context or if it is separated.
//
+ optional<pair<const value_type*, name>> path_concat; // Backup.
+
if (concat && last_concat ())
{
// Concatenation does not affect the tokens we get, only what we do
@@ -6040,6 +7272,13 @@ namespace build2
// dir/{$str}
// file{$str}
//
+ // And yet another exception: if the type is path or dir_path and the
+ // pattern mode is not ignore, then we will inject to try our luck in
+ // interpreting the concatenation result as a path pattern. This makes
+ // sure patterns like `$src_base/*.txt` work, naturally. Failed that,
+ // we will handle this concatenation as we do for other types (via the
+ // path_concat backup).
+ //
// A concatenation cannot produce value/NULL.
//
@@ -6051,12 +7290,14 @@ namespace build2
bool e1 (tt == type::lcbrace && !peeked ().separated);
bool e2 (pp || dp != nullptr || tp != nullptr);
+ const value_type* pt (&value_traits<path>::value_type);
+ const value_type* dt (&value_traits<dir_path>::value_type);
+
if (e1 || e2)
{
- if (vtype == &value_traits<path>::value_type ||
- vtype == &value_traits<string>::value_type)
+ if (vtype == pt || vtype == &value_traits<string>::value_type)
; // Representation is already in concat_data.value.
- else if (vtype == &value_traits<dir_path>::value_type)
+ else if (vtype == dt)
concat_data.value = move (concat_data.dir).representation ();
else
{
@@ -6071,6 +7312,20 @@ namespace build2
vtype = nullptr;
// Fall through to injection.
}
+ else if (pmode != pattern_mode::ignore &&
+ (vtype == pt || vtype == dt))
+ {
+ path_concat = make_pair (vtype, concat_data);
+
+ // Note: for path the representation is already in
+ // concat_data.value.
+ //
+ if (vtype == dt)
+ concat_data.value = move (concat_data.dir).representation ();
+
+ vtype = nullptr;
+ // Fall through to injection.
+ }
else
{
// This is either a simple name (untyped concatenation; in which
@@ -6164,7 +7419,7 @@ namespace build2
//
names ns;
ns.push_back (name (move (val)));
- concat_typed (value (move (ns)), get_location (t));
+ concat_typed (value (move (ns)), get_location (t), nullptr);
}
else
{
@@ -6331,7 +7586,7 @@ namespace build2
// See if this is a pattern, path or regex.
//
// A path pattern either contains an unquoted wildcard character or,
- // in the curly context, start with unquoted/unescaped `+`.
+ // in the curly context, starts with unquoted/unescaped `+`.
//
// A regex pattern starts with unquoted/unescaped `~` followed by a
// non-alphanumeric delimiter and has the following form:
@@ -6409,7 +7664,7 @@ namespace build2
// Note that we have to check for regex patterns first since
// they may also be detected as path patterns.
//
- if (!quoted_first && regex_pattern ())
+ if (!quoted_first && !path_concat && regex_pattern ())
{
// Note: we may decide to support regex-based name generation
// some day (though a substitution won't make sense here).
@@ -6477,7 +7732,7 @@ namespace build2
// there isn't any good reason to; see also to_stream(name) for
// the corresponding serialization logic).
//
- if (!quoted_first && regex_pattern ())
+ if (!quoted_first && !path_concat && regex_pattern ())
{
const char* w;
if (val[0] == '~')
@@ -6535,6 +7790,24 @@ namespace build2
}
}
+ // If this is a concatenation of the path or dir_path type and it is
+ // not a pattern, then handle it in the same way as concatenations of
+ // other types (see above).
+ //
+ if (path_concat && !pat)
+ {
+ ns.push_back (move (path_concat->second));
+
+ // Restore the type information if that's the only name.
+ //
+ if (start == ns.size () && last_token ())
+ vtype = path_concat->first;
+
+ // Restart the loop.
+ //
+ continue;
+ }
+
// If we are a second half of a pair, add another first half
// unless this is the first instance.
//
@@ -6589,6 +7862,9 @@ namespace build2
//
if (tt == type::dollar || tt == type::lparen)
{
+ if (ctx == nullptr)
+ fail << "literal " << what << " expected";
+
// These cases are pretty similar in that in both we quickly end up
// with a list of names that we need to splice into the result.
//
@@ -6610,11 +7886,15 @@ namespace build2
// token is a paren or a word, we turn it on and switch to the eval
// mode if what we get next is a paren.
//
- // Also sniff out the special variables string from mode data for
- // the ad hoc $() handling below.
- //
mode (lexer_mode::variable);
+ // Sniff out the special variables string from mode data and use
+ // that to recognize special variables in the ad hoc $() handling
+ // below.
+ //
+ // Note: must be done before calling next() which may expire the
+ // mode.
+ //
auto special = [s = reinterpret_cast<const char*> (mode_data ())]
(const token& t) -> char
{
@@ -6653,156 +7933,202 @@ namespace build2
next (t, tt);
loc = get_location (t);
- name qual;
- string name;
-
- if (t.separated)
- ; // Leave the name empty to fail below.
- else if (tt == type::word)
+ if (tt == type::escape)
{
- name = move (t.value);
+ // For now we only support all the simple C/C++ escape sequences
+ // plus \0 (which in C/C++ is an octal escape sequence). See the
+ // lexer part for details.
+ //
+ // Note: cannot be subscripted.
+ //
+ if (!pre_parse_)
+ {
+ string s;
+ switch (char c = t.value[0])
+ {
+ case '\'':
+ case '"':
+ case '?':
+ case '\\': s = c; break;
+ case '0': s = '\0'; break;
+ case 'a': s = '\a'; break;
+ case 'b': s = '\b'; break;
+ case 'f': s = '\f'; break;
+ case 'n': s = '\n'; break;
+ case 'r': s = '\r'; break;
+ case 't': s = '\t'; break;
+ case 'v': s = '\v'; break;
+ default:
+ assert (false);
+ }
+
+ result_data = name (move (s));
+ what = "escape sequence expansion";
+ }
+
+ tt = peek ();
}
- else if (tt == type::lparen)
+ else
{
- expire_mode ();
- mode (lexer_mode::eval, '@');
- next_with_attributes (t, tt);
+ names qual;
+ string name;
- // Handle the $(x) case ad hoc. We do it this way in order to get
- // the variable name even during pre-parse. It should also be
- // faster.
- //
- char c;
- if ((tt == type::word
- ? path_traits::rfind_separator (t.value) == string::npos
- : (c = special (t))) &&
- peek () == type::rparen)
+ if (t.separated)
+ ; // Leave the name empty to fail below.
+ else if (tt == type::word)
{
- name = (tt == type::word ? move (t.value) : string (1, c));
- next (t, tt); // Get `)`.
+ name = move (t.value);
}
- else
+ else if (tt == type::lparen)
{
- using name_type = build2::name;
+ expire_mode ();
+ mode (lexer_mode::eval, '@');
+ next_with_attributes (t, tt);
- //@@ OUT will parse @-pair and do well?
+ // Handle the $(x) case ad hoc. We do it this way in order to
+ // get the variable name even during pre-parse. It should also
+ // be faster.
//
- values vs (parse_eval (t, tt, pmode));
-
- if (!pre_parse_)
+ char c;
+ if ((tt == type::word
+ ? path_traits::rfind_separator (t.value) == string::npos
+ : (c = special (t))) &&
+ peek () == type::rparen)
+ {
+ name = (tt == type::word ? move (t.value) : string (1, c));
+ next (t, tt); // Get `)`.
+ }
+ else
{
- if (vs.size () != 1)
- fail (loc) << "expected single variable/function name";
+ using name_type = build2::name;
- value& v (vs[0]);
+ values vs (parse_eval (t, tt, pmode));
- if (!v)
- fail (loc) << "null variable/function name";
+ if (!pre_parse_)
+ {
+ if (vs.size () != 1)
+ fail (loc) << "expected single variable/function name";
- names storage;
- vector_view<name_type> ns (reverse (v, storage)); // Movable.
- size_t n (ns.size ());
+ value& v (vs[0]);
- // We cannot handle scope-qualification in the eval context as
- // we do for target-qualification (see eval-qual) since then
- // we would be treating all paths as qualified variables. So
- // we have to do it here.
- //
- if (n == 2 && ns[0].pair == ':') // $(foo: x)
- {
- qual = move (ns[0]);
+ if (!v)
+ fail (loc) << "null variable/function name";
- if (qual.empty ())
- fail (loc) << "empty variable/function qualification";
- }
- else if (n == 2 && ns[0].directory ()) // $(foo/ x)
- {
- qual = move (ns[0]);
- qual.pair = '/';
- }
- else if (n > 1)
- fail (loc) << "expected variable/function name instead of '"
- << ns << "'";
+ names storage;
+ vector_view<name_type> ns (
+ reverse (v, storage, true /* reduce */)); // Movable.
+ size_t n (ns.size ());
- // Note: checked for empty below.
- //
- if (!ns[n - 1].simple ())
- fail (loc) << "expected variable/function name instead of '"
- << ns[n - 1] << "'";
+ // We cannot handle scope-qualification in the eval context
+ // as we do for target-qualification (see eval-qual) since
+ // then we would be treating all paths as qualified
+ // variables. So we have to do it here.
+ //
+ if (n >= 2 && ns[0].pair == ':') // $(foo: x)
+ {
+ // Note: name is first (see eval for details).
+ //
+ qual.push_back (move (ns[1]));
- size_t p;
- if (n == 1 && // $(foo/x)
- (p = path_traits::rfind_separator (ns[0].value)) !=
- string::npos)
- {
- // Note that p cannot point to the last character since then
- // it would have been a directory, not a simple name.
+ if (qual.back ().empty ())
+ fail (loc) << "empty variable/function qualification";
+
+ if (n > 2)
+ qual.push_back (move (ns[2]));
+
+ // Move name to the last position (see below).
+ //
+ swap (ns[0], ns[n - 1]);
+ }
+ else if (n == 2 && ns[0].directory ()) // $(foo/ x)
+ {
+ qual.push_back (move (ns[0]));
+ qual.back ().pair = '/';
+ }
+ else if (n > 1)
+ fail (loc) << "expected variable/function name instead of '"
+ << ns << "'";
+
+ // Note: checked for empty below.
//
- string& s (ns[0].value);
+ if (!ns[n - 1].simple ())
+ fail (loc) << "expected variable/function name instead of '"
+ << ns[n - 1] << "'";
- name = string (s, p + 1);
- s.resize (p + 1);
- qual = name_type (dir_path (move (s)));
- qual.pair = '/';
+ size_t p;
+ if (n == 1 && // $(foo/x)
+ (p = path_traits::rfind_separator (ns[0].value)) !=
+ string::npos)
+ {
+ // Note that p cannot point to the last character since
+ // then it would have been a directory, not a simple name.
+ //
+ string& s (ns[0].value);
+
+ name = string (s, p + 1);
+ s.resize (p + 1);
+ qual.push_back (name_type (dir_path (move (s))));
+ qual.back ().pair = '/';
+ }
+ else
+ name = move (ns[n - 1].value);
}
- else
- name = move (ns[n - 1].value);
}
}
- }
- else
- fail (t) << "expected variable/function name instead of " << t;
-
- if (!pre_parse_ && name.empty ())
- fail (loc) << "empty variable/function name";
+ else
+ fail (t) << "expected variable/function name instead of " << t;
- // Figure out whether this is a variable expansion with potential
- // subscript or a function call.
- //
- if (sub) enable_subscript ();
- tt = peek ();
+ if (!pre_parse_ && name.empty ())
+ fail (loc) << "empty variable/function name";
- // Note that we require function call opening paren to be
- // unseparated; consider: $x ($x == 'foo' ? 'FOO' : 'BAR').
- //
- if (tt == type::lparen && !peeked ().separated)
- {
- // Function call.
+ // Figure out whether this is a variable expansion with potential
+ // subscript or a function call.
//
- next (t, tt); // Get '('.
- mode (lexer_mode::eval, '@');
- next_with_attributes (t, tt);
-
- // @@ Should we use (target/scope) qualification (of name) as the
- // context in which to call the function? Hm, interesting...
- //
- values args (parse_eval (t, tt, pmode));
-
if (sub) enable_subscript ();
tt = peek ();
- // Note that we "move" args to call().
+ // Note that we require function call opening paren to be
+ // unseparated; consider: $x ($x == 'foo' ? 'FOO' : 'BAR').
//
- if (!pre_parse_)
+ if (tt == type::lparen && !peeked ().separated)
{
- result_data = ctx.functions.call (scope_, name, args, loc);
- what = "function call";
+ // Function call.
+ //
+ next (t, tt); // Get '('.
+ mode (lexer_mode::eval, '@');
+ next_with_attributes (t, tt);
+
+ // @@ Should we use (target/scope) qualification (of name) as
+ // the context in which to call the function? Hm, interesting...
+ //
+ values args (parse_eval (t, tt, pmode));
+
+ if (sub) enable_subscript ();
+ tt = peek ();
+
+ // Note that we "move" args to call().
+ //
+ if (!pre_parse_)
+ {
+ result_data = ctx->functions.call (scope_, name, args, loc);
+ what = "function call";
+ }
+ else
+ lookup_function (move (name), loc);
}
else
- lookup_function (move (name), loc);
- }
- else
- {
- // Variable expansion.
- //
- lookup l (lookup_variable (move (qual), move (name), loc));
-
- if (!pre_parse_)
{
- if (l.defined ())
- result = l.value; // Otherwise leave as NULL result_data.
+ // Variable expansion.
+ //
+ lookup l (lookup_variable (move (qual), move (name), loc));
+
+ if (!pre_parse_)
+ {
+ if (l.defined ())
+ result = l.value; // Otherwise leave as NULL result_data.
- what = "variable expansion";
+ what = "variable expansion";
+ }
}
}
}
@@ -6834,7 +8160,7 @@ namespace build2
// Handle value subscript.
//
- if (tt == type::lsbrace)
+ if (tt == type::lsbrace && mode () == lexer_mode::eval)
{
location bl (get_location (t));
next (t, tt); // `[`
@@ -6901,12 +8227,44 @@ namespace build2
}
else
{
- // @@ TODO: we would want to return a value with element type.
+ // Similar logic to parse_for().
//
- //result_data = ...
- fail (l) << "typed value subscript not yet supported" <<
- info (bl) << "use the '\\[' escape sequence if this is a "
- << "wildcard pattern";
+ // @@ Maybe we should invent type-aware subscript? Could also
+ // be used for non-index subscripts (map keys etc).
+ //
+ const value_type* etype (result->type->element_type);
+
+ value val (result == &result_data
+ ? value (move (result_data))
+ : value (*result));
+
+ untypify (val, false /* reduce */);
+
+ names& ns (val.as<names> ());
+
+ // Pair-aware subscript.
+ //
+ names r;
+ for (auto i (ns.begin ()); i != ns.end (); ++i, --j)
+ {
+ bool p (i->pair);
+
+ if (j == 0)
+ {
+ r.push_back (move (*i));
+ if (p)
+ r.push_back (move (*++i));
+ break;
+ }
+
+ if (p)
+ ++i;
+ }
+
+ result_data = r.empty () ? value () : value (move (r));
+
+ if (etype != nullptr)
+ typify (result_data, *etype, nullptr /* var */);
}
result = &result_data;
@@ -6956,7 +8314,8 @@ namespace build2
// then it should not be overloaded for a type). In a quoted
// context we use $string() which returns a "canonical
// representation" (e.g., a directory path without a trailing
- // slash).
+ // slash). Note: looks like we use typed $concat() now in the
+ // unquoted context.
//
if (result->type != nullptr && quoted)
{
@@ -6979,7 +8338,10 @@ namespace build2
dr << info (loc) << "while converting " << t << " to string";
});
- p = ctx.functions.try_call (
+ if (ctx == nullptr)
+ fail << "literal " << what << " expected";
+
+ p = ctx->functions.try_call (
scope_, "string", vector_view<value> (&result_data, 1), loc);
}
@@ -6987,7 +8349,11 @@ namespace build2
fail (loc) << "no string conversion for " << t;
result_data = move (p.first);
- untypify (result_data); // Convert to untyped simple name.
+
+ // Convert to untyped simple name reducing empty string to empty
+ // names as an optimization.
+ //
+ untypify (result_data, true /* reduce */);
}
if ((concat && vtype != nullptr) || // LHS typed.
@@ -6996,7 +8362,7 @@ namespace build2
if (result != &result_data) // Same reason as above.
result = &(result_data = *result);
- concat_typed (move (result_data), loc);
+ concat_typed (move (result_data), loc, what);
}
//
// Untyped concatenation. Note that if RHS is NULL/empty, we still
@@ -7013,8 +8379,7 @@ namespace build2
// This should be a simple value or a simple directory.
//
if (lv.size () > 1)
- fail (loc) << "concatenating " << what << " contains multiple "
- << "values";
+ concat_diag_multiple (loc, what);
const name& n (lv[0]);
@@ -7069,7 +8434,7 @@ namespace build2
// @@ Could move if nv is result_data; see untypify().
//
names nv_storage;
- names_view nv (reverse (*result, nv_storage));
+ names_view nv (reverse (*result, nv_storage, true /* reduce */));
count = splice_names (
loc, nv, move (nv_storage), ns, what, pairn, pp, dp, tp);
@@ -7307,14 +8672,16 @@ namespace build2
buildspec parser::
parse_buildspec (istream& is, const path_name& in)
{
- // We do "effective escaping" and only for ['"\$(] (basically what's
- // necessary inside a double-quoted literal plus the single quote).
+ // We do "effective escaping" of the special `'"\$(` characters (basically
+ // what's escapable inside a double-quoted literal plus the single quote;
+ // note, however, that we exclude line continuations and `)` since they
+ // would make directory paths on Windows unusable).
//
path_ = &in;
lexer l (is, *path_, 1 /* line */, "\'\"\\$(");
lexer_ = &l;
- root_ = &ctx.global_scope.rw ();
+ root_ = &ctx->global_scope.rw ();
scope_ = root_;
target_ = nullptr;
prerequisite_ = nullptr;
@@ -7549,8 +8916,11 @@ namespace build2
}
lookup parser::
- lookup_variable (name&& qual, string&& name, const location& loc)
+ lookup_variable (names&& qual, string&& name, const location& loc)
{
+ // Note that this function can be called during execute (for example, from
+ // scripts). In particular, this means we cannot use enter_{scope,target}.
+
if (pre_parse_)
return lookup ();
@@ -7562,9 +8932,6 @@ namespace build2
// If we are qualified, it can be a scope or a target.
//
- enter_scope sg;
- enter_target tg;
-
if (qual.empty ())
{
s = scope_;
@@ -7573,36 +8940,70 @@ namespace build2
}
else
{
- switch (qual.pair)
+ // What should we do if we cannot find the qualification (scope or
+ // target)? We can "fall through" to an outer scope (there is always the
+ // global scope backstop), we can return NULL straight away, or we can
+ // fail. It feels like in most cases unknown scope or target is a
+ // mistake and doing anything other than failing is just making things
+ // harder to debug.
+ //
+ switch (qual.front ().pair)
{
case '/':
{
- assert (qual.directory ());
- sg = enter_scope (*this, move (qual.dir));
- s = scope_;
+ assert (qual.front ().directory ());
+
+ dir_path& d (qual.front ().dir);
+ enter_scope::complete_normalize (*scope_, d);
+
+ s = &ctx->scopes.find_out (d);
+
+ if (s->out_path () != d)
+ fail (loc) << "unknown scope " << d << " in scope-qualified "
+ << "variable " << name << " expansion" <<
+ info << "did you forget to include the corresponding buildfile?";
+
break;
}
- case ':':
+ default:
{
- qual.pair = '\0';
+ build2::name n (move (qual.front ())), o;
+
+ if (n.pair)
+ o = move (qual.back ());
+
+ t = enter_target::find_target (*this, n, o, loc, trace);
+
+ if (t == nullptr || !operator>= (t->decl, target_decl::implied)) // VC14
+ {
+ diag_record dr (fail (loc));
+
+ dr << "unknown target " << n;
- // @@ OUT TODO
+ if (n.pair && !o.dir.empty ())
+ dr << '@' << o.dir;
+
+ dr << " in target-qualified variable " << name << " expansion";
+ }
+
+ // Use the target's var_pool for good measure.
//
- tg = enter_target (
- *this, move (qual), build2::name (), true, loc, trace);
- t = target_;
+ s = &t->base_scope ();
+
break;
}
- default: assert (false);
}
}
// Lookup.
//
- if (const variable* pvar = scope_->var_pool ().find (name))
+ if (const variable* pvar =
+ (s != nullptr ? s : scope_)->var_pool ().find (name))
{
auto& var (*pvar);
+ // Note: the order of the following blocks is important.
+
if (p != nullptr)
{
// The lookup depth is a bit of a hack but should be harmless since
@@ -7689,62 +9090,200 @@ namespace build2
return r;
}
+ // file.cxx
+ //
+ extern const dir_path std_export_dir;
+ extern const dir_path alt_export_dir;
+
void parser::
- process_default_target (token& t)
+ process_default_target (token& t, const buildfile* bf)
{
tracer trace ("parser::process_default_target", &path_);
// The logic is as follows: if we have an explicit current directory
- // target, then that's the default target. Otherwise, we take the
- // first target and use it as a prerequisite to create an implicit
- // current directory target, effectively making it the default
- // target via an alias. If there are no targets in this buildfile,
- // then we don't do anything.
+ // target, then that's the default target. Otherwise, we take the first
+ // target and use it as a prerequisite to create an implicit current
+ // directory target, effectively making it the default target via an
+ // alias. If this is a project root buildfile, then also add exported
+ // buildfiles. And if there are no targets in this buildfile, then we
+ // don't do anything (reasonably assuming it's not root).
//
if (default_target_ == nullptr) // No targets in this buildfile.
return;
- target& dt (*default_target_);
-
target* ct (
- const_cast<target*> ( // Ok (serial execution).
- ctx.targets.find (dir::static_type, // Explicit current dir target.
- scope_->out_path (),
- dir_path (), // Out tree target.
- string (),
- nullopt,
- trace)));
-
- if (ct == nullptr)
- {
- l5 ([&]{trace (t) << "creating current directory alias for " << dt;});
-
- // While this target is not explicitly mentioned in the buildfile, we
- // say that we behave as if it were. Thus not implied.
- //
- ct = &ctx.targets.insert (dir::static_type,
- scope_->out_path (),
- dir_path (),
- string (),
- nullopt,
- target_decl::real,
- trace).first;
- // Fall through.
- }
- else if (ct->decl != target_decl::real)
+ const_cast<target*> ( // Ok (serial execution).
+ ctx->targets.find (dir::static_type, // Explicit current dir target.
+ scope_->out_path (),
+ dir_path (), // Out tree target.
+ string (),
+ nullopt,
+ trace)));
+
+ if (ct != nullptr && ct->decl == target_decl::real)
+ ; // Existing and not implied.
+ else
{
- ct->decl = target_decl::real;
- // Fall through.
+ target& dt (*default_target_);
+
+ if (ct == nullptr)
+ {
+ l5 ([&]{trace (t) << "creating current directory alias for " << dt;});
+
+ // While this target is not explicitly mentioned in the buildfile, we
+ // say that we behave as if it were. Thus not implied.
+ //
+ ct = &ctx->targets.insert (dir::static_type,
+ scope_->out_path (),
+ dir_path (),
+ string (),
+ nullopt,
+ target_decl::real,
+ trace).first;
+ }
+ else
+ ct->decl = target_decl::real;
+
+ ct->prerequisites_state_.store (2, memory_order_relaxed);
+ ct->prerequisites_.push_back (prerequisite (dt));
}
- else
- return; // Existing and not implied.
- ct->prerequisites_state_.store (2, memory_order_relaxed);
- ct->prerequisites_.emplace_back (prerequisite (dt));
+ // See if this is a root buildfile and not in a simple project.
+ //
+ if (bf != nullptr &&
+ root_ != nullptr &&
+ root_->root_extra != nullptr &&
+ root_->root_extra->loaded &&
+ *root_->root_extra->project != nullptr &&
+ bf->dir == root_->src_path () &&
+ bf->name == root_->root_extra->buildfile_file.string ())
+ {
+ // See if we have any exported buildfiles.
+ //
+ const dir_path& export_dir (
+ root_->root_extra->altn ? alt_export_dir : std_export_dir);
+
+ dir_path d (root_->src_path () / export_dir);
+ if (exists (d))
+ {
+ // Make sure prerequisites are set.
+ //
+ ct->prerequisites_state_.store (2, memory_order_relaxed);
+
+ const string& build_ext (root_->root_extra->build_ext);
+
+ // Return true if entered any exported buildfiles.
+ //
+ // Note: recursive lambda.
+ //
+ auto iterate = [this, &trace,
+ ct, &build_ext] (const dir_path& d,
+ const auto& iterate) -> bool
+ {
+ bool r (false);
+
+ try
+ {
+ for (const dir_entry& e:
+ dir_iterator (d, dir_iterator::detect_dangling))
+ {
+ switch (e.type ())
+ {
+ case entry_type::directory:
+ {
+ r = iterate (d / path_cast<dir_path> (e.path ()), iterate) || r;
+ break;
+ }
+ case entry_type::regular:
+ {
+ const path& n (e.path ());
+
+ if (n.extension () == build_ext)
+ {
+ // Similar to above, enter as real.
+ //
+ // Note that these targets may already be entered (for
+ // example, if already imported).
+ //
+ const target& bf (
+ ctx->targets.insert (buildfile::static_type,
+ d,
+ (root_->out_eq_src ()
+ ? dir_path ()
+ : out_src (d, *root_)),
+ n.base ().string (),
+ build_ext,
+ target_decl::real,
+ trace).first);
+
+ ct->prerequisites_.push_back (prerequisite (bf));
+ r = true;
+ }
+
+ break;
+ }
+ case entry_type::unknown:
+ {
+ bool sl (e.ltype () == entry_type::symlink);
+
+ fail << (sl ? "dangling symlink" : "inaccessible entry")
+ << ' ' << d / e.path ();
+
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to iterate over " << d << ": " << e;
+ }
+
+ return r;
+ };
+
+ if (iterate (d, iterate))
+ {
+ // Arrange for the exported buildfiles to be installed, recreating
+ // subdirectories inside export/. Essentially, we are arranging for
+ // this:
+ //
+ // build/export/buildfile{*}:
+ // {
+ // install = buildfile/
+ // install.subdirs = true
+ // }
+ //
+ if (cast_false<bool> (root_->vars["install.loaded"]))
+ {
+ enter_scope es (*this, dir_path (export_dir));
+ auto& vars (scope_->target_vars[buildfile::static_type]["*"]);
+
+ // @@ TODO: get cached variables from the module once we have one.
+ //
+ {
+ auto r (vars.insert (*root_->var_pool ().find ("install")));
+
+ if (r.second) // Already set by the user?
+ r.first = path_cast<path> (dir_path ("buildfile"));
+ }
+
+ {
+ auto r (vars.insert (
+ *root_->var_pool (true).find ("install.subdirs")));
+ if (r.second)
+ r.first = true;
+ }
+ }
+ }
+ }
+ }
}
- void parser::
- enter_buildfile (const path& p)
+ const buildfile& parser::
+ enter_buildfile (const path& p, optional<dir_path> out)
{
tracer trace ("parser::enter_buildfile", &path_);
@@ -7752,17 +9291,20 @@ namespace build2
// Figure out if we need out.
//
- dir_path out;
- if (scope_->src_path_ != nullptr &&
- scope_->src_path () != scope_->out_path () &&
- d.sub (scope_->src_path ()))
+ dir_path o;
+ if (out)
+ o = move (*out);
+ else if (root_ != nullptr &&
+ root_->src_path_ != nullptr &&
+ !root_->out_eq_src () &&
+ d.sub (*root_->src_path_))
{
- out = out_src (d, *root_);
+ o = out_src (d, *root_);
}
- ctx.targets.insert<buildfile> (
+ return ctx->targets.insert<buildfile> (
move (d),
- move (out),
+ move (o),
p.leaf ().base ().string (),
p.extension (), // Always specified.
trace);
diff --git a/libbuild2/parser.hxx b/libbuild2/parser.hxx
index 0390e26..54735d5 100644
--- a/libbuild2/parser.hxx
+++ b/libbuild2/parser.hxx
@@ -4,6 +4,10 @@
#ifndef LIBBUILD2_PARSER_HXX
#define LIBBUILD2_PARSER_HXX
+#include <exception> // uncaught_exception[s]()
+
+#include <libbutl/ft/exception.hxx> // uncaught_exceptions
+
#include <libbuild2/types.hxx>
#include <libbuild2/forward.hxx>
#include <libbuild2/utility.hxx>
@@ -44,9 +48,19 @@ namespace build2
explicit
parser (context& c, stage s = stage::rest)
: fail ("error", &path_), info ("info", &path_),
- ctx (c),
+ ctx (&c),
stage_ (s) {}
+ // Pattern expansion mode.
+ //
+ enum class pattern_mode
+ {
+ ignore, // Treat as literals.
+ preserve, // Preserve as name pattern.
+ expand, // Expand to non-pattern names.
+ detect // Implementation detail mode (see code for more information).
+ };
+
// Issue diagnostics and throw failed in case of an error.
//
void
@@ -55,14 +69,20 @@ namespace build2
scope* root,
scope& base,
target* = nullptr,
- prerequisite* = nullptr);
+ prerequisite* = nullptr,
+ bool enter_buildfile = true);
void
parse_buildfile (lexer&,
scope* root,
scope& base,
target* = nullptr,
- prerequisite* = nullptr);
+ prerequisite* = nullptr,
+ bool enter_buildfile = true);
+
+ names
+ parse_export_stub (istream& is, const path_name& name,
+ const scope& rs, scope& gs, scope& ts);
buildspec
parse_buildspec (istream&, const path_name&);
@@ -73,12 +93,10 @@ namespace build2
pair<value, token>
parse_variable_value (lexer&, scope&, const dir_path*, const variable&);
- names
- parse_export_stub (istream& is, const path_name& name, scope& r, scope& b)
- {
- parse_buildfile (is, name, &r, b);
- return move (export_value);
- }
+ // Parse an evaluation context (`(...)`).
+ //
+ value
+ parse_eval (lexer&, scope& rs, scope& bs, pattern_mode);
// The above functions may be called again on the same parser instance
// after a reset.
@@ -86,6 +104,25 @@ namespace build2
void
reset ();
+ // Special, context-less mode that can only be used to parse literal
+ // names.
+ //
+ public:
+ static const string name_separators;
+
+ explicit
+ parser (context* c)
+ : fail ("error", &path_), info ("info", &path_),
+ ctx (c),
+ stage_ (stage::rest) {}
+
+ names
+ parse_names (lexer&,
+ const dir_path* base,
+ pattern_mode pmode,
+ const char* what = "name",
+ const string* separators = &name_separators);
+
// Ad hoc parsing results for some cases.
//
// Note that these are not touched by reset().
@@ -100,21 +137,20 @@ namespace build2
vector<pair<lookup, string>> config_report; // Config value and format.
bool config_report_new = false; // One of values is new.
+ // Misc utilities.
+ //
+ public:
+ // Return the value type corresponding to the type name or NULL if the
+ // type name is unknown. Pass project's root scope if known.
+ //
+ static const value_type*
+ find_value_type (const scope* rs, const string& name);
+
// Recursive descent parser.
//
protected:
using pattern_type = name::pattern_type;
- // Pattern expansion mode.
- //
- enum class pattern_mode
- {
- ignore, // Treat as literals.
- preserve, // Preserve as name pattern.
- expand, // Expand to non-pattern names.
- detect // Implementation detail mode (see code for more information).
- };
-
// If one is true then parse a single (logical) line (logical means it
// can actually be several lines, e.g., an if-block). Return false if
// nothing has been parsed (i.e., we are still on the same token).
@@ -142,27 +178,41 @@ namespace build2
const target_type* = nullptr,
const string& = {});
- // Ad hoc target names inside < ... >.
+ // Group target names inside < ... >.
//
- struct adhoc_names_loc
+ struct group_names_loc
{
+ bool expl = false; // True -- explicit group, fase -- ad hoc.
+ location group_loc; // Group/primary target location.
+ location member_loc; // Members location.
names ns;
- location loc;
};
- using adhoc_names = small_vector<adhoc_names_loc, 1>;
+ using group_names = small_vector<group_names_loc, 1>;
- void
- enter_adhoc_members (adhoc_names_loc&&, bool);
+ vector<reference_wrapper<target>>
+ enter_explicit_members (group_names_loc&&, bool);
+
+ vector<reference_wrapper<target>>
+ enter_adhoc_members (group_names_loc&&, bool);
- small_vector<reference_wrapper<target>, 1>
- enter_targets (names&&, const location&, adhoc_names&&, size_t);
+ small_vector<pair<reference_wrapper<target>, // Target.
+ vector<reference_wrapper<target>>>, // Ad hoc members.
+ 1>
+ enter_targets (names&&, const location&,
+ group_names&&,
+ size_t,
+ const attributes&);
+
+ void
+ apply_target_attributes (target&, const attributes&);
void
parse_dependency (token&, token_type&,
names&&, const location&,
- adhoc_names&&,
- names&&, const location&);
+ group_names&&,
+ names&&, const location&,
+ const attributes&);
void
parse_assert (token&, token_type&);
@@ -297,15 +347,25 @@ namespace build2
// Push a new entry into the attributes_ stack. If the next token is `[`
// then parse the attribute sequence until ']' storing the result in the
- // new stack entry. Then get the next token and, if standalone is false,
- // verify it is not newline/eos (i.e., there is something after it).
- // Return the indication of whether we have seen `[` (even if it's the
- // `[]` empty list) and its location.
+ // new stack entry. Then, if next_token is true, get the next token and,
+ // if standalone is false, verify it is not newline/eos (i.e., there is
+ // something after it). If the next token is read and it is a word or a
+ // "word-producing" token (`$` for variable expansions/function calls, `(`
+ // for eval contexts, and `{` for name generation), then verify that it is
+ // separated to reduce the possibility of confusing it with a wildcard
+ // pattern. Consider:
+ //
+ // ./: [abc]-foo.txt
+ //
+ // Return the indication of whether we have seen any attributes (note that
+ // the `[]` empty list does not count) and the location of `[`.
//
// Note that during pre-parsing nothing is pushed into the stack.
//
pair<bool, location>
- attributes_push (token&, token_type&, bool standalone = false);
+ attributes_push (token&, token_type&,
+ bool standalone = false,
+ bool next_token = true);
attributes
attributes_pop ()
@@ -319,15 +379,15 @@ namespace build2
attributes&
attributes_top () {return attributes_.back ();}
- // Source a stream optionnaly performing the default target processing.
- // If the specified path name has a real path, then also enter it as a
- // buildfile.
+ // Source a buildfile as a stream optionally performing the default target
+ // processing. If the specified path name has a real path, then also enter
+ // it as a buildfile.
//
void
- source (istream&,
- const path_name&,
- const location&,
- bool default_target);
+ source_buildfile (istream&,
+ const path_name&,
+ const location&,
+ bool default_target);
// The what argument is used in diagnostics (e.g., "expected <what>
// instead of ...".
@@ -337,9 +397,6 @@ namespace build2
// project separator. Note that even if it is NULL, the result may still
// contain non-simple names due to variable expansions.
//
-
- static const string name_separators;
-
names
parse_names (token& t, token_type& tt,
pattern_mode pmode,
@@ -362,14 +419,7 @@ namespace build2
const string* separators = &name_separators)
{
names ns;
- parse_names (t, tt,
- ns,
- pmode,
- chunk,
- what,
- separators,
- 0,
- nullopt, nullptr, nullptr);
+ parse_names (t, tt, ns, pmode, chunk, what, separators);
return ns;
}
@@ -391,14 +441,7 @@ namespace build2
bool chunk = false)
{
names ns;
- auto r (parse_names (t, tt,
- ns,
- pmode,
- chunk,
- what,
- separators,
- 0,
- nullopt, nullptr, nullptr));
+ auto r (parse_names (t, tt, ns, pmode, chunk, what, separators));
value v (r.type); // Potentially typed NULL value.
@@ -518,8 +561,12 @@ namespace build2
// Customization hooks.
//
protected:
- // If qual is not empty, then its pair member should indicate the kind
- // of qualification: ':' -- target, '/' -- scope.
+ // If qual is not empty, then first element's pair member indicates the
+ // kind of qualification:
+ //
+ // '\0' -- target
+ // '@' -- out-qualified target
+ // '/' -- scope
//
// Note that this function is called even during pre-parse with the result
// unused. In this case a valid name will only be provided for variables
@@ -527,8 +574,12 @@ namespace build2
// example, $($x ? X : Y)) it will be empty (along with qual, which can
// only be non-empty for a computed variable).
//
+ // Note also that this function is (currently) not called by some lookup-
+ // like functions ($defined(), $config.origin()). But we should be careful
+ // if/when extending this and audit all the existing use-cases.
+ //
virtual lookup
- lookup_variable (name&& qual, string&& name, const location&);
+ lookup_variable (names&& qual, string&& name, const location&);
// This function is only called during pre-parse and is the continuation
// of the similar logic in lookup_variable() above (including the fact
@@ -553,12 +604,12 @@ namespace build2
switch_scope (const dir_path& out_base);
void
- process_default_target (token&);
+ process_default_target (token&, const buildfile*);
// Enter buildfile as a target.
//
- void
- enter_buildfile (const path&);
+ const buildfile&
+ enter_buildfile (const path&, optional<dir_path> out = nullopt);
// Lexer.
//
@@ -644,15 +695,24 @@ namespace build2
replay_data_[replay_i_].mode == m);
}
+ // In the replay mode return the lexing mode of the token returned by the
+ // subsequent next() or peek() call.
+ //
lexer_mode
mode () const
{
if (replay_ != replay::play)
+ {
return lexer_->mode ();
+ }
else
{
- assert (replay_i_ != replay_data_.size ());
- return replay_data_[replay_i_].mode;
+ assert (!peeked_ || replay_i_ != 0);
+
+ size_t i (!peeked_ ? replay_i_ : replay_i_ - 1);
+ assert (i != replay_data_.size ());
+
+ return replay_data_[i].mode;
}
}
@@ -707,6 +767,16 @@ namespace build2
}
void
+ replay_pop ()
+ {
+ assert (replay_ == replay::save);
+
+ assert (!peeked_ && !replay_data_.empty ());
+
+ replay_data_.pop_back ();
+ }
+
+ void
replay_play ()
{
assert ((replay_ == replay::save && !replay_data_.empty ()) ||
@@ -722,10 +792,21 @@ namespace build2
}
void
- replay_stop ()
+ replay_skip ()
{
+ assert (replay_ == replay::play);
+
assert (!peeked_);
+ replay_i_ = replay_data_.size () - 1;
+ }
+
+ void
+ replay_stop (bool verify = true)
+ {
+ if (verify)
+ assert (!peeked_);
+
if (replay_ == replay::play)
path_ = replay_path_; // Restore old path.
@@ -752,10 +833,23 @@ namespace build2
~replay_guard ()
{
if (p_ != nullptr)
- p_->replay_stop ();
+ p_->replay_stop (!uncaught_exception ());
}
private:
+ // C++17 deprecated uncaught_exception() so use uncaught_exceptions() if
+ // available.
+ //
+ static bool
+ uncaught_exception ()
+ {
+#ifdef __cpp_lib_uncaught_exceptions
+ return std::uncaught_exceptions () != 0;
+#else
+ return std::uncaught_exception ();
+#endif
+ }
+
parser* p_;
};
@@ -825,7 +919,7 @@ namespace build2
// NOTE: remember to update reset() if adding anything here.
//
protected:
- context& ctx;
+ context* ctx;
stage stage_;
bool pre_parse_ = false;
@@ -842,6 +936,13 @@ namespace build2
small_vector<attributes, 2> attributes_;
+ // Innermost if/switch (but excluding recipes).
+ //
+ // Note also that this is cleared/restored when crossing the include
+ // (but not source) boundary.
+ //
+ optional<location> condition_;
+
target* default_target_ = nullptr;
replay_token peek_;
diff --git a/libbuild2/prerequisite.cxx b/libbuild2/prerequisite.cxx
index cc41708..7e14c76 100644
--- a/libbuild2/prerequisite.cxx
+++ b/libbuild2/prerequisite.cxx
@@ -63,7 +63,7 @@ namespace build2
ext (to_ext (t.ext ())),
scope (t.base_scope ()),
target (&t),
- vars (t.ctx, false /* global */)
+ vars (*this, false /* shared */)
{
}
diff --git a/libbuild2/prerequisite.hxx b/libbuild2/prerequisite.hxx
index 476ed9d..2f63056 100644
--- a/libbuild2/prerequisite.hxx
+++ b/libbuild2/prerequisite.hxx
@@ -29,7 +29,9 @@ namespace build2
using target_type_type = build2::target_type;
// Note that unlike targets, for prerequisites an empty out directory
- // means undetermined rather than being definitely in the out tree.
+ // means undetermined rather than being definitely in the out tree (but
+ // maybe we should make this explicit via optional<>; see the from-target
+ // constructor).
//
// It might seem natural to keep the reference to the owner target instead
// of to the scope. But that's not the semantics that we have, consider:
@@ -61,6 +63,8 @@ namespace build2
// Note that the lookup is often ad hoc (see bin.whole as an example).
// But see also parser::lookup_variable() if adding something here.
//
+ // @@ PERF: redo as vector so can make move constructor noexcept.
+ //
public:
variable_map vars;
@@ -91,7 +95,7 @@ namespace build2
name (move (n)),
ext (move (e)),
scope (s),
- vars (s.ctx, false /* global */) {}
+ vars (*this, false /* shared */) {}
// Make a prerequisite from a target.
//
@@ -136,7 +140,10 @@ namespace build2
is_a (const target_type_type& tt) const {return type.is_a (tt);}
public:
- prerequisite (prerequisite&& x)
+ // Note that we have the noexcept specification even though vars
+ // (std::map) could potentially throw.
+ //
+ prerequisite (prerequisite&& x) noexcept
: proj (move (x.proj)),
type (x.type),
dir (move (x.dir)),
@@ -145,7 +152,8 @@ namespace build2
ext (move (x.ext)),
scope (x.scope),
target (x.target.load (memory_order_relaxed)),
- vars (move (x.vars)) {}
+ vars (move (x.vars), *this, false /* shared */)
+ {}
prerequisite (const prerequisite& x, memory_order o = memory_order_consume)
: proj (x.proj),
@@ -156,7 +164,7 @@ namespace build2
ext (x.ext),
scope (x.scope),
target (x.target.load (o)),
- vars (x.vars) {}
+ vars (x.vars, *this, false /* shared */) {}
};
inline ostream&
diff --git a/libbuild2/recipe.cxx b/libbuild2/recipe.cxx
index 3720059..87d37e7 100644
--- a/libbuild2/recipe.cxx
+++ b/libbuild2/recipe.cxx
@@ -7,8 +7,9 @@
namespace build2
{
- const recipe empty_recipe;
- const recipe noop_recipe (&noop_action);
- const recipe default_recipe (&default_action);
- const recipe group_recipe (&group_action);
+ recipe_function* const empty_recipe = nullptr;
+ recipe_function* const noop_recipe = &noop_action;
+ recipe_function* const default_recipe = &default_action;
+ recipe_function* const group_recipe = &group_action;
+ recipe_function* const inner_recipe = &execute_inner;
}
diff --git a/libbuild2/recipe.hxx b/libbuild2/recipe.hxx
index 508c059..97261f5 100644
--- a/libbuild2/recipe.hxx
+++ b/libbuild2/recipe.hxx
@@ -27,13 +27,14 @@ namespace build2
// and while the prerequisite will be re-examined via another dependency,
// this target is done).
//
- // Note that max size for the "small capture optimization" in std::function
- // ranges (in pointer sizes) from 0 (GCC prior to 5) to 2 (GCC 5) to 6 (VC
- // 14.2). With the size ranging (in bytes for 64-bit target) from 32 (GCC)
- // to 64 (VC).
+ // Note that max size for the "small size optimization" in std::function
+ // (which is what move_only_function_ex is based on) ranges (in pointer
+ // sizes) from 0 (GCC libstdc++ prior to 5) to 2 (GCC 5 and later) to 3
+ // (Clang libc++) to 6 (VC 14.3). With the size ranging (in bytes for 64-bit
+ // target) from 32 (GCC) to 64 (VC).
//
using recipe_function = target_state (action, const target&);
- using recipe = function<recipe_function>;
+ using recipe = move_only_function_ex<recipe_function>;
// Commonly-used recipes.
//
@@ -44,10 +45,11 @@ namespace build2
// <libbuild2/algorithm.hxx> for details). The group recipe calls the
// group's recipe.
//
- LIBBUILD2_SYMEXPORT extern const recipe empty_recipe;
- LIBBUILD2_SYMEXPORT extern const recipe noop_recipe;
- LIBBUILD2_SYMEXPORT extern const recipe default_recipe;
- LIBBUILD2_SYMEXPORT extern const recipe group_recipe;
+ LIBBUILD2_SYMEXPORT extern recipe_function* const empty_recipe;
+ LIBBUILD2_SYMEXPORT extern recipe_function* const noop_recipe;
+ LIBBUILD2_SYMEXPORT extern recipe_function* const default_recipe;
+ LIBBUILD2_SYMEXPORT extern recipe_function* const group_recipe;
+ LIBBUILD2_SYMEXPORT extern recipe_function* const inner_recipe;
}
#endif // LIBBUILD2_RECIPE_HXX
diff --git a/libbuild2/rule-map.hxx b/libbuild2/rule-map.hxx
index 20895f3..8f6f59f 100644
--- a/libbuild2/rule-map.hxx
+++ b/libbuild2/rule-map.hxx
@@ -14,15 +14,43 @@
namespace build2
{
- using hint_rule_map =
- butl::prefix_map<string, reference_wrapper<const rule>, '.'>;
+ // A rule name is used both for diagnostics as well as to match rule hints
+ // (see rule_hints). A rule hint is a potentially partial rule name.
+ //
+ // The recommended rule naming scheme is to start with the module name, for
+ // example: cxx.compile, cxx.link. This way a rule hint can be just the
+ // module name, for example [rule_hint=cxx]. If a module can only possibly
+ // have a single rule, then the rule name can be just the module name (e.g.,
+ // `in`; though make doubly sure there is unlikely to be a need for another
+ // rule, for example, for documentation generation, in the future).
+ //
+ // The two common choices of names for the second component in a rule name
+ // is an action (e.g., cxx.compile, cxx.link) or a target type (e.g.,
+ // bin.def, bin.lib). The latter is a good choice when the action is
+ // inherent to the target type (e.g., "generate def file", "see through lib
+ // group"). Also note that a rule for compensating operations (e.g.,
+ // update/clean, install/uninstall) is customarily registered with the same
+ // name.
+ //
+ struct name_rule_map: butl::prefix_map<string,
+ reference_wrapper<const rule>,
+ '.'>
+ {
+ // Return true if the rule name matches a rule hint.
+ //
+ static bool
+ sub (const string& hint, const string& name)
+ {
+ return compare_type ('.').prefix (hint, name);
+ }
+ };
- using target_type_rule_map = map<const target_type*, hint_rule_map>;
+ using target_type_rule_map = map<const target_type*, name_rule_map>;
// This is an "indexed map" with operation_id being the index. Entry
// with id 0 is a wildcard.
//
- // Note that while we may resize some vectors during non-serial load, this
+ // Note that while we may resize some vectors during non-initial load, this
// is MT-safe since we never cache any references to their elements.
//
class operation_rule_map
@@ -33,7 +61,7 @@ namespace build2
bool
insert (operation_id oid,
const target_type& tt,
- string hint,
+ string name,
const rule& r)
{
// 3 is the number of builtin operations.
@@ -41,7 +69,7 @@ namespace build2
if (oid >= map_.size ())
map_.resize ((oid < 3 ? 3 : oid) + 1);
- return map_[oid][&tt].emplace (move (hint), r).second;
+ return map_[oid][&tt].emplace (move (name), r).second;
}
// Return NULL if not found.
@@ -78,17 +106,17 @@ namespace build2
bool
insert (action_id a,
const target_type& tt,
- string hint,
+ string name,
const rule& r)
{
- return insert (a >> 4, a & 0x0F, tt, move (hint), r);
+ return insert (a >> 4, a & 0x0F, tt, move (name), r);
}
template <typename T>
bool
- insert (action_id a, string hint, const rule& r)
+ insert (action_id a, string name, const rule& r)
{
- return insert (a, T::static_type, move (hint), r);
+ return insert (a, T::static_type, move (name), r);
}
// 0 oid is a wildcard.
@@ -97,17 +125,17 @@ namespace build2
insert (meta_operation_id mid,
operation_id oid,
const target_type& tt,
- string hint,
+ string name,
const rule& r)
{
if (mid_ == mid)
- return map_.insert (oid, tt, move (hint), r);
+ return map_.insert (oid, tt, move (name), r);
else
{
if (next_ == nullptr)
next_.reset (new rule_map (mid));
- return next_->insert (mid, oid, tt, move (hint), r);
+ return next_->insert (mid, oid, tt, move (name), r);
}
}
@@ -115,10 +143,10 @@ namespace build2
bool
insert (meta_operation_id mid,
operation_id oid,
- string hint,
+ string name,
const rule& r)
{
- return insert (mid, oid, T::static_type, move (hint), r);
+ return insert (mid, oid, T::static_type, move (name), r);
}
// Return NULL if not found.
diff --git a/libbuild2/rule.cxx b/libbuild2/rule.cxx
index 6dad685..097e15a 100644
--- a/libbuild2/rule.cxx
+++ b/libbuild2/rule.cxx
@@ -22,12 +22,43 @@ namespace build2
{
}
+ const target* rule::
+ import (const prerequisite_key&,
+ const optional<string>&,
+ const location&) const
+ {
+ return nullptr;
+ }
+
+ const rule_match*
+ match_adhoc_recipe (action, target&, match_extra&); // algorithm.cxx
+
+ bool rule::
+ sub_match (const string& n, operation_id o,
+ action a, target& t, match_extra& me) const
+ {
+ // First check for an ad hoc recipe (see match_rule() for details).
+ //
+ if (!t.adhoc_recipes.empty ())
+ {
+ // Use scratch match_extra since if there is no recipe, then we don't
+ // want to keep any changes and if there is, then we want it discarded.
+ //
+ match_extra s (true /* locked */); // Not called from adhoc_rule::match().
+ if (match_adhoc_recipe (action (a.meta_operation (), o), t, s) != nullptr)
+ return false;
+ }
+
+ const string& h (t.find_hint (o));
+ return name_rule_map::sub (h, n) && match (a, t, h, me);
+ }
+
// simple_rule
//
bool simple_rule::
- match (action a, target& t, const string& h, match_extra&) const
+ match (action a, target& t, const string&, match_extra&) const
{
- return match (a, t, h);
+ return match (a, t);
}
recipe simple_rule::
@@ -36,6 +67,20 @@ namespace build2
return apply (a, t);
}
+ bool simple_rule::
+ sub_match (const string& n, operation_id o,
+ action a, target& t) const
+ {
+ if (!t.adhoc_recipes.empty ())
+ {
+ match_extra s (true /* locked */); // Not called from adhoc_rule::match().
+ if (match_adhoc_recipe (action (a.meta_operation (), o), t, s) != nullptr)
+ return false;
+ }
+
+ return name_rule_map::sub (t.find_hint (o), n) && match (a, t);
+ }
+
// file_rule
//
// Note that this rule is special. It is the last, fallback rule. If
@@ -46,7 +91,7 @@ namespace build2
// use it as a guide to implement your own, normal, rules.
//
bool file_rule::
- match (action a, target& t, const string&) const
+ match (action a, target& t, const string&, match_extra&) const
{
tracer trace ("file_rule::match");
@@ -56,10 +101,13 @@ namespace build2
// are not doing anything for this action so not checking if the file
// exists seems harmless.
//
+ // But we also don't want to match real targets and not cleaning their
+ // output files.
+ //
switch (a)
{
case perform_clean_id:
- return true;
+ return t.decl != target_decl::real;
default:
{
// While normally we shouldn't do any of this in match(), no other
@@ -121,7 +169,7 @@ namespace build2
}
recipe file_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra&) const
{
// Update triggers the update of this target's prerequisites so it would
// seem natural that we should also trigger their cleanup. However, this
@@ -153,12 +201,12 @@ namespace build2
}
const file_rule file_rule::instance;
- const rule_match file_rule::rule_match ("file", file_rule::instance);
+ const rule_match file_rule::rule_match ("build.file", file_rule::instance);
// alias_rule
//
bool alias_rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
return true;
}
@@ -169,9 +217,25 @@ namespace build2
// Inject dependency on our directory (note: not parent) so that it is
// automatically created on update and removed on clean.
//
- inject_fsdir (a, t, false);
+ inject_fsdir (a, t, true, false);
- match_prerequisites (a, t);
+ // Handle the alias match-only level.
+ //
+ match_search ms;
+ if (t.ctx.match_only && *t.ctx.match_only == match_only_level::alias)
+ {
+ ms = [] (action,
+ const target& t,
+ const prerequisite& p,
+ include_type i)
+ {
+ return prerequisite_target (
+ p.is_a<alias> () ? &search (t, p) : nullptr,
+ i);
+ };
+ }
+
+ match_prerequisites (a, t, ms);
return default_recipe;
}
@@ -180,7 +244,7 @@ namespace build2
// fsdir_rule
//
bool fsdir_rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
return true;
}
@@ -214,7 +278,7 @@ namespace build2
if (verb >= 2)
text << "mkdir " << d;
else if (verb && t.ctx.current_diag_noise)
- text << "mkdir " << t;
+ print_diag ("mkdir", t);
};
// Note: ignoring the dry_run flag.
@@ -315,7 +379,7 @@ namespace build2
// noop_rule
//
bool noop_rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
return true;
}
@@ -339,8 +403,9 @@ namespace build2
}
bool adhoc_rule::
- match (action a, target& t, const string& h, match_extra& me) const
+ match (action a, target& xt, const string& h, match_extra& me) const
{
+ const target& t (xt);
return pattern == nullptr || pattern->match (a, t, h, me);
}
diff --git a/libbuild2/rule.hxx b/libbuild2/rule.hxx
index 3eb7775..4f77432 100644
--- a/libbuild2/rule.hxx
+++ b/libbuild2/rule.hxx
@@ -22,7 +22,12 @@ namespace build2
// you need to modify some state (e.g., counters or some such), then make
// sure things are MT-safe.
//
- // Note: match() is only called once but may not be followed by apply().
+ // Note: match() could be called multiple times (so should be idempotent)
+ // and it may not be followed by apply().
+ //
+ // The hint argument is the rule hint, if any, that was used to select this
+ // rule. While normally not factored into the match decision, a rule may
+ // "try harder" if a hint was specified (see cc::link_rule for an example).
//
// The match_extra argument (the type is defined in target.hxx) is used to
// pass additional information that is only needed by some rule
@@ -47,15 +52,45 @@ namespace build2
rule (const rule&) = delete;
rule& operator= (const rule&) = delete;
+
+ // Resolve a project-qualified target in a rule-specific manner.
+ //
+ // This is optional functionality that may be provided by some rules to
+ // facilitate immediate importation of certain target types. See the
+ // import machinery for details. The default implementation always returns
+ // NULL.
+ //
+ // Note that if this function returns a target, it should have the
+ // extension assigned so that as_name() returns a stable name.
+ //
+ virtual const target*
+ import (const prerequisite_key&,
+ const optional<string>& metadata,
+ const location&) const;
+
+ // Sometimes we want to match only if another rule of ours would match
+ // another operation. For example, we would want our install rule to match
+ // only if our update rule also matches.
+ //
+ // Arranging this, however, is not a simple matter of calling the other
+ // rule's match(): we also have to take into account ad hoc recipes and
+ // rule hints for that operation. This helper performs all the necessary
+ // checks. Note: should only be called from match() (see
+ // target::find_hint() for details). Note also that ad hoc recipes are
+ // checked for hint_op, not action's operation.
+ //
+ bool
+ sub_match (const string& rule_name, operation_id hint_op,
+ action, target&, match_extra&) const;
};
- // Simplified interface for rules that don't care about the extras.
+ // Simplified interface for rules that don't care about the hint or extras.
//
class LIBBUILD2_SYMEXPORT simple_rule: public rule
{
public:
virtual bool
- match (action, target&, const string& hint) const = 0;
+ match (action, target&) const = 0;
virtual recipe
apply (action, target&) const = 0;
@@ -65,19 +100,31 @@ namespace build2
virtual recipe
apply (action, target&, match_extra&) const override;
+
+ // The simplified version of sub_match() above.
+ //
+ // Note that it calls the simplified match() directly rather than going
+ // through the original.
+ //
+ bool
+ sub_match (const string& rule_name, operation_id hint_op,
+ action, target&) const;
};
// Fallback rule that only matches if the file exists. It will also match
// an mtime_target provided it has a set timestamp.
//
- class LIBBUILD2_SYMEXPORT file_rule: public simple_rule
+ // Note: this rule is "hot" because it matches every static source file and
+ // so we don't use simple_rule to avoid two extra virtual calls.
+ //
+ class LIBBUILD2_SYMEXPORT file_rule: public rule
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&, const string&, match_extra&) const override;
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
file_rule () {}
@@ -89,7 +136,7 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -105,7 +152,7 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -132,7 +179,7 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -145,9 +192,14 @@ namespace build2
//
// Used for both ad hoc pattern rules and ad hoc recipes. For recipes, it's
// essentially a rule of one case. Note that when used as part of a pattern,
- // the implementation cannot use the match_extra::buffer nor the target
- // auxiliary data storage until the pattern's apply_*() calls have been
- // made.
+ // the implementation cannot use the match_extra::data() facility nor the
+ // target auxiliary data storage until the pattern's apply_*() calls have
+ // been made.
+ //
+ // Note also that when used as part of a pattern, the rule is also register
+ // for the dist meta-operation (unless there is an explicit recipe for dist)
+ // in order to inject additional pattern prerequisites which may "pull"
+ // additional sources into the distribution.
//
// Note: not exported.
//
@@ -200,6 +252,16 @@ namespace build2
// The default implementation forwards to the pattern's match() if there
// is a pattern and returns true otherwise.
//
+ // Note also that in case of a member of a group-based target, match() is
+ // called on the group while apply() on the member (see match_rule() in
+ // algorithms.cxx for details). This means that match() may be called
+ // without having the target locked and as a result match() should (unless
+ // known to only match a non-group) treat the target as const and only
+ // rely on immutable information (type, name, etc) since the group could
+ // be matched concurrenly. This case can be detected by examining
+ // match_extra::locked (see adhoc_rule_regex_pattern::match() for a
+ // use-case).
+ //
virtual bool
match (action, target&, const string&, match_extra&) const override;
@@ -214,8 +276,8 @@ namespace build2
// Implementation details.
//
public:
- // The name in rule_match is used as a hint and as a name in diagnostics.
- // The former does not apply to ad hoc recipes (but does apply to ad hoc
+ // The name in rule_match is used to match hints and in diagnostics. The
+ // former does not apply to ad hoc recipes (but does apply to ad hoc
// rules).
//
const build2::rule_match rule_match;
@@ -266,14 +328,28 @@ namespace build2
~adhoc_rule_pattern ();
public:
+ // Note: the adhoc_rule::match() restrictions apply here as well.
+ //
virtual bool
- match (action, target&, const string&, match_extra&) const = 0;
+ match (action, const target&, const string&, match_extra&) const = 0;
+ // Append additional group members. Note that this function should handle
+ // both ad hoc and explicit groups.
+ //
virtual void
- apply_adhoc_members (action, target&, match_extra&) const = 0;
-
+ apply_group_members (action, target&,
+ const scope& base,
+ match_extra&) const = 0;
+
+ // The implementation should append pattern prerequisites to
+ // t.prerequisite_targets[a] but not match. It should set bit 2 in
+ // prerequisite_target::include to indicate update=match and bit 3
+ // to indicate update=unmatch.
+ //
virtual void
- apply_prerequisites (action, target&, match_extra&) const = 0;
+ apply_prerequisites (action, target&,
+ const scope& base,
+ match_extra&) const = 0;
// Dump support.
//
diff --git a/libbuild2/scheduler.cxx b/libbuild2/scheduler.cxx
index bdd703d..5027f90 100644
--- a/libbuild2/scheduler.cxx
+++ b/libbuild2/scheduler.cxx
@@ -5,8 +5,11 @@
#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__APPLE__)
# include <pthread.h>
-# ifdef __FreeBSD__
+# if defined(__FreeBSD__)
# include <pthread_np.h> // pthread_attr_get_np() (in <pthread.h> on NetBSD)
+# elif defined(__OpenBSD__)
+# include <sys/signal.h>
+# include <pthread_np.h> // pthread_stackseg_np()
# endif
#endif
@@ -362,8 +365,14 @@ namespace build2
size_t init_active,
size_t max_threads,
size_t queue_depth,
- optional<size_t> max_stack)
+ optional<size_t> max_stack,
+ size_t orig_max_active)
{
+ if (orig_max_active == 0)
+ orig_max_active = max_active;
+ else
+ assert (max_active <= orig_max_active);
+
// Lock the mutex to make sure our changes are visible in (other) active
// threads.
//
@@ -375,16 +384,18 @@ namespace build2
// were asked to run serially.
//
if (max_threads == 0)
- max_threads = (max_active == 1 ? 1 :
- sizeof (void*) < 8 ? 8 : 32) * max_active;
+ max_threads = (orig_max_active == 1
+ ? 1
+ : (sizeof (void*) < 8 ? 8 : 32) * orig_max_active);
assert (shutdown_ &&
init_active != 0 &&
init_active <= max_active &&
- max_active <= max_threads);
+ orig_max_active <= max_threads);
active_ = init_active_ = init_active;
- max_active_ = orig_max_active_ = max_active;
+ max_active_ = max_active;
+ orig_max_active_ = orig_max_active;
max_threads_ = max_threads;
// This value should be proportional to the amount of hardware concurrency
@@ -398,7 +409,7 @@ namespace build2
//
task_queue_depth_ = queue_depth != 0
? queue_depth
- : max_active * 8;
+ : orig_max_active_ * 8;
queued_task_count_.store (0, memory_order_relaxed);
@@ -421,6 +432,8 @@ namespace build2
shutdown_ = false;
+ // Delay thread startup if serial.
+ //
if (max_active_ != 1)
dead_thread_ = thread (deadlock_monitor, this);
}
@@ -429,7 +442,7 @@ namespace build2
tune (size_t max_active)
{
// Note that if we tune a parallel scheduler to run serially, we will
- // still have the deadlock monitoring thread running.
+ // still have the deadlock monitoring thread loitering around.
// With multiple initial active threads we will need to make changes to
// max_active_ visible to other threads and which we currently say can be
@@ -451,6 +464,11 @@ namespace build2
lock l (wait_idle ());
swap (max_active_, max_active);
+
+ // Start the deadlock thread if its startup was delayed.
+ //
+ if (max_active_ != 1 && !dead_thread_.joinable ())
+ dead_thread_ = thread (deadlock_monitor, this);
}
return max_active == orig_max_active_ ? 0 : max_active;
@@ -519,7 +537,7 @@ namespace build2
// Wait for the deadlock monitor (the only remaining thread).
//
- if (orig_max_active_ != 1) // See tune() for why not max_active_.
+ if (dead_thread_.joinable ())
{
l.unlock ();
dead_condv_.notify_one ();
@@ -835,6 +853,15 @@ namespace build2
if (r != 0)
throw_system_error (r);
+#elif defined(__OpenBSD__)
+ stack_t s;
+ int r (pthread_stackseg_np (pthread_self (), &s));
+
+ if (r != 0)
+ throw_system_error (r);
+
+ stack_size = s.ss_size;
+
#else // defined(__APPLE__)
stack_size = pthread_get_stacksize_np (pthread_self ());
#endif
diff --git a/libbuild2/scheduler.hxx b/libbuild2/scheduler.hxx
index dcde79b..dcddfcc 100644
--- a/libbuild2/scheduler.hxx
+++ b/libbuild2/scheduler.hxx
@@ -5,11 +5,10 @@
#define LIBBUILD2_SCHEDULER_HXX
#include <list>
-#include <mutex>
#include <tuple>
#include <atomic>
-#include <type_traits> // aligned_storage, etc
-#include <condition_variable>
+#include <cstddef> // max_align_t
+#include <type_traits> // decay, etc
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
@@ -249,8 +248,12 @@ namespace build2
alloc_guard (): n (0), s_ (nullptr) {}
alloc_guard (scheduler& s, size_t m): n (s.allocate (m)), s_ (&s) {}
- alloc_guard (alloc_guard&& x): n (x.n), s_ (x.s_) {x.s_ = nullptr;}
- alloc_guard& operator= (alloc_guard&& x)
+
+ alloc_guard (alloc_guard&& x) noexcept
+ : n (x.n), s_ (x.s_) {x.s_ = nullptr;}
+
+ alloc_guard&
+ operator= (alloc_guard&& x) noexcept
{
if (&x != this)
{
@@ -301,14 +304,25 @@ namespace build2
// If the maximum threads or task queue depth arguments are unspecified,
// then appropriate defaults are used.
//
+ // Passing non-zero orig_max_active (normally the real max active) allows
+ // starting up a pre-tuned scheduler. In particular, starting a pre-tuned
+ // to serial scheduler is relatively cheap since starting the deadlock
+ // detection thread is delayed until the scheduler is re-tuned.
+ //
explicit
scheduler (size_t max_active,
size_t init_active = 1,
size_t max_threads = 0,
size_t queue_depth = 0,
- optional<size_t> max_stack = nullopt)
+ optional<size_t> max_stack = nullopt,
+ size_t orig_max_active = 0)
{
- startup (max_active, init_active, max_threads, queue_depth, max_stack);
+ startup (max_active,
+ init_active,
+ max_threads,
+ queue_depth,
+ max_stack,
+ orig_max_active);
}
// Start the scheduler.
@@ -318,7 +332,8 @@ namespace build2
size_t init_active = 1,
size_t max_threads = 0,
size_t queue_depth = 0,
- optional<size_t> max_stack = nullopt);
+ optional<size_t> max_stack = nullopt,
+ size_t orig_max_active = 0);
// Return true if the scheduler was started up.
//
@@ -343,12 +358,19 @@ namespace build2
size_t
tune (size_t max_active);
+ bool
+ tuned () const {return max_active_ != orig_max_active_;}
+
struct tune_guard
{
tune_guard (): s_ (nullptr), o_ (0) {}
tune_guard (scheduler& s, size_t ma): s_ (&s), o_ (s_->tune (ma)) {}
- tune_guard (tune_guard&& x): s_ (x.s_), o_ (x.o_) {x.s_ = nullptr;}
- tune_guard& operator= (tune_guard&& x)
+
+ tune_guard (tune_guard&& x) noexcept
+ : s_ (x.s_), o_ (x.o_) {x.s_ = nullptr;}
+
+ tune_guard&
+ operator= (tune_guard&& x) noexcept
{
if (&x != this)
{
@@ -416,8 +438,8 @@ namespace build2
{
explicit
monitor_guard (scheduler* s = nullptr): s_ (s) {}
- monitor_guard (monitor_guard&& x): s_ (x.s_) {x.s_ = nullptr;}
- monitor_guard& operator= (monitor_guard&& x)
+ monitor_guard (monitor_guard&& x) noexcept: s_ (x.s_) {x.s_ = nullptr;}
+ monitor_guard& operator= (monitor_guard&& x) noexcept
{
if (&x != this)
{
@@ -480,7 +502,7 @@ namespace build2
static size_t
hardware_concurrency ()
{
- return std::thread::hardware_concurrency ();
+ return build2::thread::hardware_concurrency ();
}
// Return a prime number that can be used as a lock shard size that's
@@ -497,7 +519,7 @@ namespace build2
// to become idle. Return the lock over the scheduler mutex. Normally you
// don't need to call this function directly.
//
- using lock = std::unique_lock<std::mutex>;
+ using lock = build2::mlock;
lock
wait_idle ();
@@ -559,7 +581,7 @@ namespace build2
size_t monitor_init_; // Initial count.
function<size_t (size_t)> monitor_func_;
- std::mutex mutex_;
+ build2::mutex mutex_;
bool shutdown_ = true; // Shutdown flag.
optional<size_t> max_stack_;
@@ -599,8 +621,8 @@ namespace build2
//
size_t orig_max_active_ = 0;
- std::condition_variable idle_condv_; // Idle helpers queue.
- std::condition_variable ready_condv_; // Ready masters queue.
+ build2::condition_variable idle_condv_; // Idle helpers queue.
+ build2::condition_variable ready_condv_; // Ready masters queue.
// Statistics counters.
//
@@ -619,8 +641,8 @@ namespace build2
// Deadlock detection.
//
- std::thread dead_thread_;
- std::condition_variable dead_condv_;
+ build2::thread dead_thread_;
+ build2::condition_variable dead_condv_;
static void*
deadlock_monitor (void*);
@@ -641,8 +663,8 @@ namespace build2
//
struct wait_slot
{
- std::mutex mutex;
- std::condition_variable condv;
+ build2::mutex mutex;
+ build2::condition_variable condv;
size_t waiters = 0;
const atomic_count* task_count;
bool shutdown = true;
@@ -663,7 +685,7 @@ namespace build2
//
struct task_data
{
- std::aligned_storage<sizeof (void*) * 8>::type data;
+ alignas (std::max_align_t) unsigned char data[sizeof (void*) * 8];
void (*thunk) (scheduler&, lock&, void*);
};
@@ -714,7 +736,7 @@ namespace build2
struct task_queue: task_queue_data
{
- std::mutex mutex;
+ build2::mutex mutex;
bool shutdown = false;
size_t stat_full = 0; // Number of times push() returned NULL.
diff --git a/libbuild2/scheduler.test.cxx b/libbuild2/scheduler.test.cxx
index b29c932..2ef8d5c 100644
--- a/libbuild2/scheduler.test.cxx
+++ b/libbuild2/scheduler.test.cxx
@@ -2,7 +2,6 @@
// license : MIT; see accompanying LICENSE file
#include <chrono>
-#include <thread>
#include <iostream>
diff --git a/libbuild2/scope.cxx b/libbuild2/scope.cxx
index 93f21db..be2669d 100644
--- a/libbuild2/scope.cxx
+++ b/libbuild2/scope.cxx
@@ -23,7 +23,7 @@ namespace build2
? empty_project_name
: i->first);
- os << (i != b ? " " : "") << n << '@' << i->second;
+ os << (i != b ? " " : "") << n << '@' << i->second.string ();
}
return os;
@@ -32,8 +32,8 @@ namespace build2
// scope
//
scope::
- scope (context& c, bool global)
- : ctx (c), vars (c, global), target_vars (c, global)
+ scope (context& c, bool shared)
+ : ctx (c), vars (*this, shared), target_vars (c, shared)
{
}
@@ -806,26 +806,68 @@ namespace build2
fail (loc) << "expected directory after '@'";
}
- dir_path& d (n.dir);
+ dir_path& dir (n.dir);
const dir_path& sd (src_path ());
const dir_path& od (out_path ());
- if (d.empty ())
- d = src ? sd : od; // Already dormalized.
+ bool nabs (false);
+
+ if (dir.empty ())
+ dir = src ? sd : od; // Already normalized.
else
{
- if (d.relative ())
- d = (src ? sd : od) / d;
+ if (dir.relative ())
+ dir = (src ? sd : od) / dir;
+ else if (src)
+ nabs = true;
- d.normalize ();
+ dir.normalize ();
}
dir_path out;
- if (src && sd != od) // If in-source build, then out must be empty.
+ if (src)
{
- out = o.dir.relative () ? od / o.dir : move (o.dir);
+ bool oabs (o.dir.absolute ());
+
+ out = oabs ? move (o.dir) : od / o.dir;
out.normalize ();
+
+ // Make sure out and src are parallel unless both were specified as
+ // absolute. We make an exception for this case because out may be used
+ // to "tag" imported targets (see cc::search_library()). So it's sort of
+ // the "I know what I am doing" escape hatch (it would have been even
+ // better to verify such a target is outside any project but that won't
+ // be cheap).
+ //
+ // See similar code for prerequisites in parser::parse_dependency().
+ //
+ if (nabs && oabs)
+ ;
+ else if (root_->out_eq_src ()
+ ? out == dir
+ //
+ // @@ PERF: could just compare leafs in place.
+ //
+ : (out.sub (root_->out_path ()) &&
+ dir.sub (root_->src_path ()) &&
+ out.leaf (root_->out_path ()) == dir.leaf (root_->src_path ())))
+ ;
+ else
+ // @@ TMP change warn to fail after 0.16.0 release.
+ //
+ warn (loc) << "target output directory " << out
+ << " must be parallel to source directory " << dir;
+
+ // If this target is in this project, then out must be empty if this is
+ // in source build. We assume that if either src or out are relative,
+ // then it belongs to this project.
+ //
+ if (root_->out_eq_src ())
+ {
+ if (!nabs || !oabs || out.sub (root_->out_path ()))
+ out.clear ();
+ }
}
o.dir = move (out); // Result.
@@ -910,7 +952,9 @@ namespace build2
}
pair<reference_wrapper<const target_type>, bool> scope::
- derive_target_type (const string& name, const target_type& base)
+ derive_target_type (const string& name,
+ const target_type& base,
+ target_type::flag flags)
{
assert (root_scope () == this);
@@ -928,10 +972,13 @@ namespace build2
//
// Currently, if we define myfile{}: file{}, then myfile{foo} and
// myfile{foo.x} are the same target.
+
+ // Note: copies flags.
//
unique_ptr<target_type> dt (new target_type (base));
dt->base = &base;
dt->factory = &derived_tt_factory;
+ dt->flags |= flags;
#if 0
// @@ We should probably inherit the fixed extension unless overriden with
@@ -1028,7 +1075,7 @@ namespace build2
if (er.first->second.front () == nullptr)
{
- er.first->second.front () = new scope (ctx, true /* global */);
+ er.first->second.front () = new scope (ctx, true /* shared */);
er.second = true;
}
diff --git a/libbuild2/scope.hxx b/libbuild2/scope.hxx
index f82db72..8e25d78 100644
--- a/libbuild2/scope.hxx
+++ b/libbuild2/scope.hxx
@@ -4,8 +4,6 @@
#ifndef LIBBUILD2_SCOPE_HXX
#define LIBBUILD2_SCOPE_HXX
-#include <unordered_set>
-
#include <libbuild2/types.hxx>
#include <libbuild2/forward.hxx>
#include <libbuild2/utility.hxx>
@@ -28,8 +26,12 @@ namespace build2
using subprojects = map<project_name, dir_path>;
+ // Print as name@dir sequence.
+ //
+ // Note: trailing slash is not printed for the directory path.
+ //
LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const subprojects&); // Print as name@dir sequence.
+ operator<< (ostream&, const subprojects&);
class LIBBUILD2_SYMEXPORT scope
{
@@ -132,7 +134,7 @@ namespace build2
lookup_type
operator[] (const string& name) const
{
- const variable* var (ctx.var_pool.find (name));
+ const variable* var (var_pool ().find (name));
return var != nullptr ? operator[] (*var) : lookup_type ();
}
@@ -298,13 +300,6 @@ namespace build2
//
variable_type_map target_vars;
- // Set of buildfiles already loaded for this scope. The included
- // buildfiles are checked against the project's root scope while
- // imported -- against the global scope (global_scope).
- //
- public:
- std::unordered_set<path> buildfiles;
-
// Target types.
//
// Note that target types are project-wide (even if the module that
@@ -395,8 +390,12 @@ namespace build2
// reference to the target type and an indicator of whether it was
// actually created.
//
+ // Note: the flags are OR'ed to the base's flags.
+ //
pair<reference_wrapper<const target_type>, bool>
- derive_target_type (const string& name, const target_type& base);
+ derive_target_type (const string& name,
+ const target_type& base,
+ target_type::flag flags = target_type::flag::none);
template <typename T>
pair<reference_wrapper<const target_type>, bool>
@@ -418,19 +417,20 @@ namespace build2
template <typename T>
void
- insert_rule (action_id a, string hint, const rule& r)
+ insert_rule (action_id a, string name, const rule& r)
{
- rules.insert<T> (a, move (hint), r);
+ rules.insert<T> (a, move (name), r);
}
+ // 0 meta-operation id is treated as an (emulated) wildcard.
+ //
+ // Emulated means that we just iterate over all the meta-operations known
+ // to this project (and they should all be known at this point) and
+ // register the rule for each of them.
+ //
template <typename T>
void
- insert_rule (meta_operation_id mid, operation_id oid,
- string hint,
- const rule& r)
- {
- rules.insert<T> (mid, oid, move (hint), r);
- }
+ insert_rule (meta_operation_id, operation_id, string name, const rule&);
// Operation callbacks.
//
@@ -481,9 +481,10 @@ namespace build2
// is not yet determined (happens at the end of bootstrap_src()). NULL
// means there are no subprojects.
//
- optional<const build2::subprojects*> subprojects;
+ optional<build2::subprojects*> subprojects;
- bool altn; // True if using alternative build file/directory naming.
+ bool altn; // True if using alternative build file/directory naming.
+ bool loaded; // True if already loaded (load_root()).
// Build file/directory naming scheme used by this project.
//
@@ -502,14 +503,40 @@ namespace build2
const path& src_root_file; // build[2]/bootstrap/src-root.build[2]
const path& out_root_file; // build[2]/bootstrap/src-root.build[2]
+ // Project-private variable pool.
+ //
+ // Note: see scope::var_pool_ and use scope::var_pool().
+ //
+ variable_pool var_pool;
+
// Meta/operations supported by this project.
//
build2::meta_operations meta_operations;
build2::operations operations;
- // Modules loaded by this project.
+ // Modules imported/loaded by this project.
//
- module_map modules;
+ module_import_map imported_modules;
+ module_state_map loaded_modules;
+
+ // Buildfiles already loaded for this project.
+ //
+ // We don't expect too many of them per project so let's use vector
+ // with linear search.
+ //
+ paths buildfiles;
+
+ bool
+ insert_buildfile (const path& f)
+ {
+ bool r (find (buildfiles.begin (),
+ buildfiles.end (),
+ f) == buildfiles.end ());
+ if (r)
+ buildfiles.push_back (f);
+
+ return r;
+ }
// Variable override cache.
//
@@ -537,33 +564,53 @@ namespace build2
// when, for example, caching environment-sensitive information.
//
string environment_checksum;
+
+ root_extra_type (scope&, bool altn); // file.cxx
};
unique_ptr<root_extra_type> root_extra;
+ // The last argument is the operation variable (see var_include) or NULL
+ // if not used.
+ //
void
- insert_operation (operation_id id, const operation_info& in)
+ insert_operation (operation_id id,
+ const operation_info& in,
+ const variable* ovar)
{
- root_extra->operations.insert (id, in);
+ // The operation variable should have prerequisite or target visibility.
+ //
+ assert (ovar == nullptr ||
+ (ovar->visibility == variable_visibility::prereq ||
+ ovar->visibility == variable_visibility::target));
+
+ root_extra->operations.insert (id, project_operation_info {&in, ovar});
}
void
insert_meta_operation (meta_operation_id id, const meta_operation_info& in)
{
- root_extra->meta_operations.insert (id, in);
+ root_extra->meta_operations.insert (id, &in);
}
bool
find_module (const string& name) const
{
- return root_extra->modules.find_module<module> (name) != nullptr;
+ return root_extra->loaded_modules.find_module<module> (name) != nullptr;
}
template <typename T>
T*
+ find_module (const string& name)
+ {
+ return root_extra->loaded_modules.find_module<T> (name);
+ }
+
+ template <typename T>
+ const T*
find_module (const string& name) const
{
- return root_extra->modules.find_module<T> (name);
+ return root_extra->loaded_modules.find_module<T> (name);
}
public:
@@ -576,10 +623,29 @@ namespace build2
return const_cast<scope&> (*this);
}
+ // Return the project-private variable pool (which is chained to the
+ // public pool) unless pub is true, in which case return the public pool.
+ //
+ // You would normally go for the public pool directly as an optimization
+ // (for example, in the module's init()) if you know all your variables
+ // are qualified and thus public.
+ //
variable_pool&
- var_pool ()
+ var_pool (bool pub = false)
+ {
+ return (pub ? ctx.var_pool :
+ var_pool_ != nullptr ? *var_pool_ :
+ root_ != nullptr ? *root_->var_pool_ :
+ ctx.var_pool).rw (*this);
+ }
+
+ const variable_pool&
+ var_pool (bool pub = false) const
{
- return ctx.var_pool.rw (*this);
+ return (pub ? ctx.var_pool :
+ var_pool_ != nullptr ? *var_pool_ :
+ root_ != nullptr ? *root_->var_pool_ :
+ ctx.var_pool);
}
private:
@@ -587,13 +653,13 @@ namespace build2
friend class scope_map;
friend class temp_scope;
- // These two from <libbuild2/file.hxx> set strong_.
+ // These from <libbuild2/file.hxx> set strong_.
//
- friend LIBBUILD2_SYMEXPORT void create_bootstrap_outer (scope&);
+ friend LIBBUILD2_SYMEXPORT void create_bootstrap_outer (scope&, bool);
friend LIBBUILD2_SYMEXPORT scope& create_bootstrap_inner (scope&,
const dir_path&);
- scope (context&, bool global);
+ scope (context&, bool shared);
~scope ();
// Return true if this root scope can be amalgamated.
@@ -608,6 +674,8 @@ namespace build2
scope* root_;
scope* strong_ = nullptr; // Only set on root scopes.
// NULL means no strong amalgamtion.
+
+ variable_pool* var_pool_ = nullptr; // For temp_scope override.
};
inline bool
@@ -675,24 +743,28 @@ namespace build2
// Temporary scope. The idea is to be able to create a temporary scope in
// order not to change the variables in the current scope. Such a scope is
- // not entered in to the scope map. As a result it can only be used as a
- // temporary set of variables. In particular, defining targets directly in
- // such a scope will surely end up badly. Defining any nested scopes will be
- // as if defining such a scope in the parent (since path() returns parent's
- // path).
+ // not entered in to the scope map and its parent is the global scope. As a
+ // result it can only be used as a temporary set of variables. In
+ // particular, defining targets directly in such a scope will surely end up
+ // badly.
//
class temp_scope: public scope
{
public:
- temp_scope (scope& p)
- : scope (p.ctx, false /* global */)
+ temp_scope (scope& gs)
+ : scope (gs.ctx, false /* shared */),
+ var_pool_ (nullptr /* shared */, &gs.ctx.var_pool.rw (gs), nullptr)
{
- out_path_ = p.out_path_;
- src_path_ = p.src_path_;
- parent_ = &p;
- root_ = p.root_;
- // No need to copy strong_ since we are never root scope.
+ // Note that making this scope its own root is a bad idea.
+ //
+ root_ = nullptr;
+ parent_ = &gs;
+ out_path_ = gs.out_path_;
+ scope::var_pool_ = &var_pool_;
}
+
+ private:
+ variable_pool var_pool_;
};
// Scope map. Protected by the phase mutex.
@@ -756,7 +828,7 @@ namespace build2
// single invocation. How can we pick the scope that is "ours", for some
// definition of "ours"?
//
- // The current think is that a project can be "associated" with other
+ // The current thinking is that a project can be "associated" with other
// projects: its sub-projects and imported projects (it doesn't feel like
// its super-projects should be in this set, but maybe). And "ours" could
// mean belonging to one of the associated projects. This feels correct
diff --git a/libbuild2/scope.ixx b/libbuild2/scope.ixx
index e123e4a..5d76a7f 100644
--- a/libbuild2/scope.ixx
+++ b/libbuild2/scope.ixx
@@ -173,6 +173,37 @@ namespace build2
this};
}
+ template <typename T>
+ inline void scope::
+ insert_rule (meta_operation_id mid, operation_id oid,
+ string name,
+ const rule& r)
+ {
+ if (mid != 0)
+ rules.insert<T> (mid, oid, move (name), r);
+ else
+ {
+ auto& ms (root_scope ()->root_extra->meta_operations);
+
+ for (size_t i (1), n (ms.size ()); i != n; ++i)
+ {
+ if (ms[i] != nullptr)
+ {
+ // Skip a few well-known meta-operations that cannot possibly
+ // trigger a rule match.
+ //
+ mid = static_cast<meta_operation_id> (i);
+
+ if (mid != noop_id &&
+ mid != info_id &&
+ mid != create_id &&
+ mid != disfigure_id)
+ rules.insert<T> (mid, oid, name, r);
+ }
+ }
+ }
+ }
+
inline dir_path
src_out (const dir_path& out, const scope& r)
{
diff --git a/libbuild2/script/builtin-options.cxx b/libbuild2/script/builtin-options.cxx
index 56e7f24..b71b9d3 100644
--- a/libbuild2/script/builtin-options.cxx
+++ b/libbuild2/script/builtin-options.cxx
@@ -18,215 +18,14 @@
#include <utility>
#include <ostream>
#include <sstream>
+#include <cstring>
namespace build2
{
- namespace script
+ namespace build
{
namespace cli
{
- // unknown_option
- //
- unknown_option::
- ~unknown_option () throw ()
- {
- }
-
- void unknown_option::
- print (::std::ostream& os) const
- {
- os << "unknown option '" << option ().c_str () << "'";
- }
-
- const char* unknown_option::
- what () const throw ()
- {
- return "unknown option";
- }
-
- // unknown_argument
- //
- unknown_argument::
- ~unknown_argument () throw ()
- {
- }
-
- void unknown_argument::
- print (::std::ostream& os) const
- {
- os << "unknown argument '" << argument ().c_str () << "'";
- }
-
- const char* unknown_argument::
- what () const throw ()
- {
- return "unknown argument";
- }
-
- // missing_value
- //
- missing_value::
- ~missing_value () throw ()
- {
- }
-
- void missing_value::
- print (::std::ostream& os) const
- {
- os << "missing value for option '" << option ().c_str () << "'";
- }
-
- const char* missing_value::
- what () const throw ()
- {
- return "missing option value";
- }
-
- // invalid_value
- //
- invalid_value::
- ~invalid_value () throw ()
- {
- }
-
- void invalid_value::
- print (::std::ostream& os) const
- {
- os << "invalid value '" << value ().c_str () << "' for option '"
- << option ().c_str () << "'";
-
- if (!message ().empty ())
- os << ": " << message ().c_str ();
- }
-
- const char* invalid_value::
- what () const throw ()
- {
- return "invalid option value";
- }
-
- // eos_reached
- //
- void eos_reached::
- print (::std::ostream& os) const
- {
- os << what ();
- }
-
- const char* eos_reached::
- what () const throw ()
- {
- return "end of argument stream reached";
- }
-
- // scanner
- //
- scanner::
- ~scanner ()
- {
- }
-
- // argv_scanner
- //
- bool argv_scanner::
- more ()
- {
- return i_ < argc_;
- }
-
- const char* argv_scanner::
- peek ()
- {
- if (i_ < argc_)
- return argv_[i_];
- else
- throw eos_reached ();
- }
-
- const char* argv_scanner::
- next ()
- {
- if (i_ < argc_)
- {
- const char* r (argv_[i_]);
-
- if (erase_)
- {
- for (int i (i_ + 1); i < argc_; ++i)
- argv_[i - 1] = argv_[i];
-
- --argc_;
- argv_[argc_] = 0;
- }
- else
- ++i_;
-
- ++start_position_;
- return r;
- }
- else
- throw eos_reached ();
- }
-
- void argv_scanner::
- skip ()
- {
- if (i_ < argc_)
- {
- ++i_;
- ++start_position_;
- }
- else
- throw eos_reached ();
- }
-
- std::size_t argv_scanner::
- position ()
- {
- return start_position_;
- }
-
- // vector_scanner
- //
- bool vector_scanner::
- more ()
- {
- return i_ < v_.size ();
- }
-
- const char* vector_scanner::
- peek ()
- {
- if (i_ < v_.size ())
- return v_[i_].c_str ();
- else
- throw eos_reached ();
- }
-
- const char* vector_scanner::
- next ()
- {
- if (i_ < v_.size ())
- return v_[i_++].c_str ();
- else
- throw eos_reached ();
- }
-
- void vector_scanner::
- skip ()
- {
- if (i_ < v_.size ())
- ++i_;
- else
- throw eos_reached ();
- }
-
- std::size_t vector_scanner::
- position ()
- {
- return start_position_ + i_;
- }
-
template <typename X>
struct parser
{
@@ -254,10 +53,31 @@ namespace build2
struct parser<bool>
{
static void
- parse (bool& x, scanner& s)
+ parse (bool& x, bool& xs, scanner& s)
{
- s.next ();
- x = true;
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
}
};
@@ -367,6 +187,56 @@ namespace build2
}
};
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
template <typename X, typename T, T X::*M>
void
thunk (X& x, scanner& s)
@@ -374,6 +244,14 @@ namespace build2
parser<T>::parse (x.*M, s);
}
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
template <typename X, typename T, T X::*M, bool X::*S>
void
thunk (X& x, scanner& s)
@@ -385,7 +263,6 @@ namespace build2
}
#include <map>
-#include <cstring>
namespace build2
{
@@ -406,13 +283,13 @@ namespace build2
set_options (int& argc,
char** argv,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: exact_ (),
newline_ (),
whitespace_ ()
{
- ::build2::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
_parse (s, opt, arg);
}
@@ -421,13 +298,13 @@ namespace build2
int& argc,
char** argv,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: exact_ (),
newline_ (),
whitespace_ ()
{
- ::build2::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
_parse (s, opt, arg);
}
@@ -436,13 +313,13 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: exact_ (),
newline_ (),
whitespace_ ()
{
- ::build2::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
_parse (s, opt, arg);
end = s.end ();
}
@@ -453,21 +330,21 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: exact_ (),
newline_ (),
whitespace_ ()
{
- ::build2::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
_parse (s, opt, arg);
end = s.end ();
}
set_options::
- set_options (::build2::script::cli::scanner& s,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ set_options (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: exact_ (),
newline_ (),
whitespace_ ()
@@ -476,7 +353,7 @@ namespace build2
}
typedef
- std::map<std::string, void (*) (set_options&, ::build2::script::cli::scanner&)>
+ std::map<std::string, void (*) (set_options&, ::build2::build::cli::scanner&)>
_cli_set_options_map;
static _cli_set_options_map _cli_set_options_map_;
@@ -486,24 +363,24 @@ namespace build2
_cli_set_options_map_init ()
{
_cli_set_options_map_["--exact"] =
- &::build2::script::cli::thunk< set_options, bool, &set_options::exact_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::exact_ >;
_cli_set_options_map_["-e"] =
- &::build2::script::cli::thunk< set_options, bool, &set_options::exact_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::exact_ >;
_cli_set_options_map_["--newline"] =
- &::build2::script::cli::thunk< set_options, bool, &set_options::newline_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::newline_ >;
_cli_set_options_map_["-n"] =
- &::build2::script::cli::thunk< set_options, bool, &set_options::newline_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::newline_ >;
_cli_set_options_map_["--whitespace"] =
- &::build2::script::cli::thunk< set_options, bool, &set_options::whitespace_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::whitespace_ >;
_cli_set_options_map_["-w"] =
- &::build2::script::cli::thunk< set_options, bool, &set_options::whitespace_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::whitespace_ >;
}
};
static _cli_set_options_map_init _cli_set_options_map_init_;
bool set_options::
- _parse (const char* o, ::build2::script::cli::scanner& s)
+ _parse (const char* o, ::build2::build::cli::scanner& s)
{
_cli_set_options_map::const_iterator i (_cli_set_options_map_.find (o));
@@ -517,13 +394,13 @@ namespace build2
}
bool set_options::
- _parse (::build2::script::cli::scanner& s,
- ::build2::script::cli::unknown_mode opt_mode,
- ::build2::script::cli::unknown_mode arg_mode)
+ _parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt_mode,
+ ::build2::build::cli::unknown_mode arg_mode)
{
// Can't skip combined flags (--no-combined-flags).
//
- assert (opt_mode != ::build2::script::cli::unknown_mode::skip);
+ assert (opt_mode != ::build2::build::cli::unknown_mode::skip);
bool r = false;
bool opt = true;
@@ -565,14 +442,14 @@ namespace build2
const_cast<char*> (v)
};
- ::build2::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (_parse (co.c_str (), ns))
{
// Parsed the option but not its value?
//
if (ns.end () != 2)
- throw ::build2::script::cli::invalid_value (co, v);
+ throw ::build2::build::cli::invalid_value (co, v);
s.next ();
r = true;
@@ -613,7 +490,7 @@ namespace build2
cf
};
- ::build2::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (!_parse (cf, ns))
break;
@@ -638,19 +515,19 @@ namespace build2
switch (opt_mode)
{
- case ::build2::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::script::cli::unknown_option (o);
+ throw ::build2::build::cli::unknown_option (o);
}
}
@@ -660,19 +537,19 @@ namespace build2
switch (arg_mode)
{
- case ::build2::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::script::cli::unknown_argument (o);
+ throw ::build2::build::cli::unknown_argument (o);
}
}
@@ -695,11 +572,11 @@ namespace build2
timeout_options (int& argc,
char** argv,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: success_ ()
{
- ::build2::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
_parse (s, opt, arg);
}
@@ -708,11 +585,11 @@ namespace build2
int& argc,
char** argv,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: success_ ()
{
- ::build2::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
_parse (s, opt, arg);
}
@@ -721,11 +598,11 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: success_ ()
{
- ::build2::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
_parse (s, opt, arg);
end = s.end ();
}
@@ -736,26 +613,26 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: success_ ()
{
- ::build2::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
_parse (s, opt, arg);
end = s.end ();
}
timeout_options::
- timeout_options (::build2::script::cli::scanner& s,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ timeout_options (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: success_ ()
{
_parse (s, opt, arg);
}
typedef
- std::map<std::string, void (*) (timeout_options&, ::build2::script::cli::scanner&)>
+ std::map<std::string, void (*) (timeout_options&, ::build2::build::cli::scanner&)>
_cli_timeout_options_map;
static _cli_timeout_options_map _cli_timeout_options_map_;
@@ -765,16 +642,16 @@ namespace build2
_cli_timeout_options_map_init ()
{
_cli_timeout_options_map_["--success"] =
- &::build2::script::cli::thunk< timeout_options, bool, &timeout_options::success_ >;
+ &::build2::build::cli::thunk< timeout_options, &timeout_options::success_ >;
_cli_timeout_options_map_["-s"] =
- &::build2::script::cli::thunk< timeout_options, bool, &timeout_options::success_ >;
+ &::build2::build::cli::thunk< timeout_options, &timeout_options::success_ >;
}
};
static _cli_timeout_options_map_init _cli_timeout_options_map_init_;
bool timeout_options::
- _parse (const char* o, ::build2::script::cli::scanner& s)
+ _parse (const char* o, ::build2::build::cli::scanner& s)
{
_cli_timeout_options_map::const_iterator i (_cli_timeout_options_map_.find (o));
@@ -788,13 +665,13 @@ namespace build2
}
bool timeout_options::
- _parse (::build2::script::cli::scanner& s,
- ::build2::script::cli::unknown_mode opt_mode,
- ::build2::script::cli::unknown_mode arg_mode)
+ _parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt_mode,
+ ::build2::build::cli::unknown_mode arg_mode)
{
// Can't skip combined flags (--no-combined-flags).
//
- assert (opt_mode != ::build2::script::cli::unknown_mode::skip);
+ assert (opt_mode != ::build2::build::cli::unknown_mode::skip);
bool r = false;
bool opt = true;
@@ -836,14 +713,14 @@ namespace build2
const_cast<char*> (v)
};
- ::build2::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (_parse (co.c_str (), ns))
{
// Parsed the option but not its value?
//
if (ns.end () != 2)
- throw ::build2::script::cli::invalid_value (co, v);
+ throw ::build2::build::cli::invalid_value (co, v);
s.next ();
r = true;
@@ -884,7 +761,7 @@ namespace build2
cf
};
- ::build2::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (!_parse (cf, ns))
break;
@@ -909,19 +786,19 @@ namespace build2
switch (opt_mode)
{
- case ::build2::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::script::cli::unknown_option (o);
+ throw ::build2::build::cli::unknown_option (o);
}
}
@@ -931,19 +808,19 @@ namespace build2
switch (arg_mode)
{
- case ::build2::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::script::cli::unknown_argument (o);
+ throw ::build2::build::cli::unknown_argument (o);
}
}
@@ -969,14 +846,14 @@ namespace build2
export_options (int& argc,
char** argv,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: unset_ (),
unset_specified_ (false),
clear_ (),
clear_specified_ (false)
{
- ::build2::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
_parse (s, opt, arg);
}
@@ -985,14 +862,14 @@ namespace build2
int& argc,
char** argv,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: unset_ (),
unset_specified_ (false),
clear_ (),
clear_specified_ (false)
{
- ::build2::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
_parse (s, opt, arg);
}
@@ -1001,14 +878,14 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: unset_ (),
unset_specified_ (false),
clear_ (),
clear_specified_ (false)
{
- ::build2::script::cli::argv_scanner s (argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
_parse (s, opt, arg);
end = s.end ();
}
@@ -1019,22 +896,22 @@ namespace build2
char** argv,
int& end,
bool erase,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: unset_ (),
unset_specified_ (false),
clear_ (),
clear_specified_ (false)
{
- ::build2::script::cli::argv_scanner s (start, argc, argv, erase);
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
_parse (s, opt, arg);
end = s.end ();
}
export_options::
- export_options (::build2::script::cli::scanner& s,
- ::build2::script::cli::unknown_mode opt,
- ::build2::script::cli::unknown_mode arg)
+ export_options (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
: unset_ (),
unset_specified_ (false),
clear_ (),
@@ -1044,7 +921,7 @@ namespace build2
}
typedef
- std::map<std::string, void (*) (export_options&, ::build2::script::cli::scanner&)>
+ std::map<std::string, void (*) (export_options&, ::build2::build::cli::scanner&)>
_cli_export_options_map;
static _cli_export_options_map _cli_export_options_map_;
@@ -1054,16 +931,16 @@ namespace build2
_cli_export_options_map_init ()
{
_cli_export_options_map_["--unset"] =
- &::build2::script::cli::thunk< export_options, vector<string>, &export_options::unset_,
+ &::build2::build::cli::thunk< export_options, vector<string>, &export_options::unset_,
&export_options::unset_specified_ >;
_cli_export_options_map_["-u"] =
- &::build2::script::cli::thunk< export_options, vector<string>, &export_options::unset_,
+ &::build2::build::cli::thunk< export_options, vector<string>, &export_options::unset_,
&export_options::unset_specified_ >;
_cli_export_options_map_["--clear"] =
- &::build2::script::cli::thunk< export_options, vector<string>, &export_options::clear_,
+ &::build2::build::cli::thunk< export_options, vector<string>, &export_options::clear_,
&export_options::clear_specified_ >;
_cli_export_options_map_["-c"] =
- &::build2::script::cli::thunk< export_options, vector<string>, &export_options::clear_,
+ &::build2::build::cli::thunk< export_options, vector<string>, &export_options::clear_,
&export_options::clear_specified_ >;
}
};
@@ -1071,7 +948,7 @@ namespace build2
static _cli_export_options_map_init _cli_export_options_map_init_;
bool export_options::
- _parse (const char* o, ::build2::script::cli::scanner& s)
+ _parse (const char* o, ::build2::build::cli::scanner& s)
{
_cli_export_options_map::const_iterator i (_cli_export_options_map_.find (o));
@@ -1085,13 +962,304 @@ namespace build2
}
bool export_options::
- _parse (::build2::script::cli::scanner& s,
- ::build2::script::cli::unknown_mode opt_mode,
- ::build2::script::cli::unknown_mode arg_mode)
+ _parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt_mode,
+ ::build2::build::cli::unknown_mode arg_mode)
+ {
+ // Can't skip combined flags (--no-combined-flags).
+ //
+ assert (opt_mode != ::build2::build::cli::unknown_mode::skip);
+
+ bool r = false;
+ bool opt = true;
+
+ while (s.more ())
+ {
+ const char* o = s.peek ();
+
+ if (std::strcmp (o, "--") == 0)
+ {
+ opt = false;
+ s.skip ();
+ r = true;
+ continue;
+ }
+
+ if (opt)
+ {
+ if (_parse (o, s))
+ {
+ r = true;
+ continue;
+ }
+
+ if (std::strncmp (o, "-", 1) == 0 && o[1] != '\0')
+ {
+ // Handle combined option values.
+ //
+ std::string co;
+ if (const char* v = std::strchr (o, '='))
+ {
+ co.assign (o, 0, v - o);
+ ++v;
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (co.c_str ()),
+ const_cast<char*> (v)
+ };
+
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
+
+ if (_parse (co.c_str (), ns))
+ {
+ // Parsed the option but not its value?
+ //
+ if (ns.end () != 2)
+ throw ::build2::build::cli::invalid_value (co, v);
+
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = co.c_str ();
+ }
+ }
+
+ // Handle combined flags.
+ //
+ char cf[3];
+ {
+ const char* p = o + 1;
+ for (; *p != '\0'; ++p)
+ {
+ if (!((*p >= 'a' && *p <= 'z') ||
+ (*p >= 'A' && *p <= 'Z') ||
+ (*p >= '0' && *p <= '9')))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ for (p = o + 1; *p != '\0'; ++p)
+ {
+ std::strcpy (cf, "-");
+ cf[1] = *p;
+ cf[2] = '\0';
+
+ int ac (1);
+ char* av[] =
+ {
+ cf
+ };
+
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
+
+ if (!_parse (cf, ns))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ // All handled.
+ //
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = cf;
+ }
+ }
+ }
+
+ switch (opt_mode)
+ {
+ case ::build2::build::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::build2::build::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::build2::build::cli::unknown_mode::fail:
+ {
+ throw ::build2::build::cli::unknown_option (o);
+ }
+ }
+
+ break;
+ }
+ }
+
+ switch (arg_mode)
+ {
+ case ::build2::build::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::build2::build::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::build2::build::cli::unknown_mode::fail:
+ {
+ throw ::build2::build::cli::unknown_argument (o);
+ }
+ }
+
+ break;
+ }
+
+ return r;
+ }
+
+ // for_options
+ //
+
+ for_options::
+ for_options ()
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ }
+
+ for_options::
+ for_options (int& argc,
+ char** argv,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
+ _parse (s, opt, arg);
+ }
+
+ for_options::
+ for_options (int start,
+ int& argc,
+ char** argv,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
+ _parse (s, opt, arg);
+ }
+
+ for_options::
+ for_options (int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
+ _parse (s, opt, arg);
+ end = s.end ();
+ }
+
+ for_options::
+ for_options (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
+ _parse (s, opt, arg);
+ end = s.end ();
+ }
+
+ for_options::
+ for_options (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ _parse (s, opt, arg);
+ }
+
+ typedef
+ std::map<std::string, void (*) (for_options&, ::build2::build::cli::scanner&)>
+ _cli_for_options_map;
+
+ static _cli_for_options_map _cli_for_options_map_;
+
+ struct _cli_for_options_map_init
+ {
+ _cli_for_options_map_init ()
+ {
+ _cli_for_options_map_["--exact"] =
+ &::build2::build::cli::thunk< for_options, &for_options::exact_ >;
+ _cli_for_options_map_["-e"] =
+ &::build2::build::cli::thunk< for_options, &for_options::exact_ >;
+ _cli_for_options_map_["--newline"] =
+ &::build2::build::cli::thunk< for_options, &for_options::newline_ >;
+ _cli_for_options_map_["-n"] =
+ &::build2::build::cli::thunk< for_options, &for_options::newline_ >;
+ _cli_for_options_map_["--whitespace"] =
+ &::build2::build::cli::thunk< for_options, &for_options::whitespace_ >;
+ _cli_for_options_map_["-w"] =
+ &::build2::build::cli::thunk< for_options, &for_options::whitespace_ >;
+ }
+ };
+
+ static _cli_for_options_map_init _cli_for_options_map_init_;
+
+ bool for_options::
+ _parse (const char* o, ::build2::build::cli::scanner& s)
+ {
+ _cli_for_options_map::const_iterator i (_cli_for_options_map_.find (o));
+
+ if (i != _cli_for_options_map_.end ())
+ {
+ (*(i->second)) (*this, s);
+ return true;
+ }
+
+ return false;
+ }
+
+ bool for_options::
+ _parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt_mode,
+ ::build2::build::cli::unknown_mode arg_mode)
{
// Can't skip combined flags (--no-combined-flags).
//
- assert (opt_mode != ::build2::script::cli::unknown_mode::skip);
+ assert (opt_mode != ::build2::build::cli::unknown_mode::skip);
bool r = false;
bool opt = true;
@@ -1133,14 +1301,14 @@ namespace build2
const_cast<char*> (v)
};
- ::build2::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (_parse (co.c_str (), ns))
{
// Parsed the option but not its value?
//
if (ns.end () != 2)
- throw ::build2::script::cli::invalid_value (co, v);
+ throw ::build2::build::cli::invalid_value (co, v);
s.next ();
r = true;
@@ -1181,7 +1349,7 @@ namespace build2
cf
};
- ::build2::script::cli::argv_scanner ns (0, ac, av);
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
if (!_parse (cf, ns))
break;
@@ -1206,19 +1374,19 @@ namespace build2
switch (opt_mode)
{
- case ::build2::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::script::cli::unknown_option (o);
+ throw ::build2::build::cli::unknown_option (o);
}
}
@@ -1228,19 +1396,19 @@ namespace build2
switch (arg_mode)
{
- case ::build2::script::cli::unknown_mode::skip:
+ case ::build2::build::cli::unknown_mode::skip:
{
s.skip ();
r = true;
continue;
}
- case ::build2::script::cli::unknown_mode::stop:
+ case ::build2::build::cli::unknown_mode::stop:
{
break;
}
- case ::build2::script::cli::unknown_mode::fail:
+ case ::build2::build::cli::unknown_mode::fail:
{
- throw ::build2::script::cli::unknown_argument (o);
+ throw ::build2::build::cli::unknown_argument (o);
}
}
diff --git a/libbuild2/script/builtin-options.hxx b/libbuild2/script/builtin-options.hxx
index d665279..9361d18 100644
--- a/libbuild2/script/builtin-options.hxx
+++ b/libbuild2/script/builtin-options.hxx
@@ -12,281 +12,7 @@
//
// End prologue.
-#include <vector>
-#include <iosfwd>
-#include <string>
-#include <cstddef>
-#include <exception>
-
-#ifndef CLI_POTENTIALLY_UNUSED
-# if defined(_MSC_VER) || defined(__xlC__)
-# define CLI_POTENTIALLY_UNUSED(x) (void*)&x
-# else
-# define CLI_POTENTIALLY_UNUSED(x) (void)x
-# endif
-#endif
-
-namespace build2
-{
- namespace script
- {
- namespace cli
- {
- class unknown_mode
- {
- public:
- enum value
- {
- skip,
- stop,
- fail
- };
-
- unknown_mode (value);
-
- operator value () const
- {
- return v_;
- }
-
- private:
- value v_;
- };
-
- // Exceptions.
- //
-
- class exception: public std::exception
- {
- public:
- virtual void
- print (::std::ostream&) const = 0;
- };
-
- ::std::ostream&
- operator<< (::std::ostream&, const exception&);
-
- class unknown_option: public exception
- {
- public:
- virtual
- ~unknown_option () throw ();
-
- unknown_option (const std::string& option);
-
- const std::string&
- option () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string option_;
- };
-
- class unknown_argument: public exception
- {
- public:
- virtual
- ~unknown_argument () throw ();
-
- unknown_argument (const std::string& argument);
-
- const std::string&
- argument () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string argument_;
- };
-
- class missing_value: public exception
- {
- public:
- virtual
- ~missing_value () throw ();
-
- missing_value (const std::string& option);
-
- const std::string&
- option () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string option_;
- };
-
- class invalid_value: public exception
- {
- public:
- virtual
- ~invalid_value () throw ();
-
- invalid_value (const std::string& option,
- const std::string& value,
- const std::string& message = std::string ());
-
- const std::string&
- option () const;
-
- const std::string&
- value () const;
-
- const std::string&
- message () const;
-
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
-
- private:
- std::string option_;
- std::string value_;
- std::string message_;
- };
-
- class eos_reached: public exception
- {
- public:
- virtual void
- print (::std::ostream&) const;
-
- virtual const char*
- what () const throw ();
- };
-
- // Command line argument scanner interface.
- //
- // The values returned by next() are guaranteed to be valid
- // for the two previous arguments up until a call to a third
- // peek() or next().
- //
- // The position() function returns a monotonically-increasing
- // number which, if stored, can later be used to determine the
- // relative position of the argument returned by the following
- // call to next(). Note that if multiple scanners are used to
- // extract arguments from multiple sources, then the end
- // position of the previous scanner should be used as the
- // start position of the next.
- //
- class scanner
- {
- public:
- virtual
- ~scanner ();
-
- virtual bool
- more () = 0;
-
- virtual const char*
- peek () = 0;
-
- virtual const char*
- next () = 0;
-
- virtual void
- skip () = 0;
-
- virtual std::size_t
- position () = 0;
- };
-
- class argv_scanner: public scanner
- {
- public:
- argv_scanner (int& argc,
- char** argv,
- bool erase = false,
- std::size_t start_position = 0);
-
- argv_scanner (int start,
- int& argc,
- char** argv,
- bool erase = false,
- std::size_t start_position = 0);
-
- int
- end () const;
-
- virtual bool
- more ();
-
- virtual const char*
- peek ();
-
- virtual const char*
- next ();
-
- virtual void
- skip ();
-
- virtual std::size_t
- position ();
-
- protected:
- std::size_t start_position_;
- int i_;
- int& argc_;
- char** argv_;
- bool erase_;
- };
-
- class vector_scanner: public scanner
- {
- public:
- vector_scanner (const std::vector<std::string>&,
- std::size_t start = 0,
- std::size_t start_position = 0);
-
- std::size_t
- end () const;
-
- void
- reset (std::size_t start = 0, std::size_t start_position = 0);
-
- virtual bool
- more ();
-
- virtual const char*
- peek ();
-
- virtual const char*
- next ();
-
- virtual void
- skip ();
-
- virtual std::size_t
- position ();
-
- private:
- std::size_t start_position_;
- const std::vector<std::string>& v_;
- std::size_t i_;
- };
-
- template <typename X>
- struct parser;
- }
- }
-}
-
-#include <libbuild2/types.hxx>
+#include <libbuild2/common-options.hxx>
namespace build2
{
@@ -300,34 +26,34 @@ namespace build2
set_options (int& argc,
char** argv,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
set_options (int start,
int& argc,
char** argv,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
set_options (int& argc,
char** argv,
int& end,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
set_options (int start,
int& argc,
char** argv,
int& end,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
- set_options (::build2::script::cli::scanner&,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ set_options (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
// Option accessors and modifiers.
//
@@ -362,13 +88,13 @@ namespace build2
//
protected:
bool
- _parse (const char*, ::build2::script::cli::scanner&);
+ _parse (const char*, ::build2::build::cli::scanner&);
private:
bool
- _parse (::build2::script::cli::scanner&,
- ::build2::script::cli::unknown_mode option,
- ::build2::script::cli::unknown_mode argument);
+ _parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option,
+ ::build2::build::cli::unknown_mode argument);
public:
bool exact_;
@@ -384,34 +110,34 @@ namespace build2
timeout_options (int& argc,
char** argv,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
timeout_options (int start,
int& argc,
char** argv,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
timeout_options (int& argc,
char** argv,
int& end,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
timeout_options (int start,
int& argc,
char** argv,
int& end,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
- timeout_options (::build2::script::cli::scanner&,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ timeout_options (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
// Option accessors and modifiers.
//
@@ -428,13 +154,13 @@ namespace build2
//
protected:
bool
- _parse (const char*, ::build2::script::cli::scanner&);
+ _parse (const char*, ::build2::build::cli::scanner&);
private:
bool
- _parse (::build2::script::cli::scanner&,
- ::build2::script::cli::unknown_mode option,
- ::build2::script::cli::unknown_mode argument);
+ _parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option,
+ ::build2::build::cli::unknown_mode argument);
public:
bool success_;
@@ -448,34 +174,34 @@ namespace build2
export_options (int& argc,
char** argv,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
export_options (int start,
int& argc,
char** argv,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
export_options (int& argc,
char** argv,
int& end,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
export_options (int start,
int& argc,
char** argv,
int& end,
bool erase = false,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
- export_options (::build2::script::cli::scanner&,
- ::build2::script::cli::unknown_mode option = ::build2::script::cli::unknown_mode::fail,
- ::build2::script::cli::unknown_mode argument = ::build2::script::cli::unknown_mode::stop);
+ export_options (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
// Option accessors and modifiers.
//
@@ -513,13 +239,13 @@ namespace build2
//
protected:
bool
- _parse (const char*, ::build2::script::cli::scanner&);
+ _parse (const char*, ::build2::build::cli::scanner&);
private:
bool
- _parse (::build2::script::cli::scanner&,
- ::build2::script::cli::unknown_mode option,
- ::build2::script::cli::unknown_mode argument);
+ _parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option,
+ ::build2::build::cli::unknown_mode argument);
public:
vector<string> unset_;
@@ -527,6 +253,90 @@ namespace build2
vector<string> clear_;
bool clear_specified_;
};
+
+ class for_options
+ {
+ public:
+ for_options ();
+
+ for_options (int& argc,
+ char** argv,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ for_options (int start,
+ int& argc,
+ char** argv,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ for_options (int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ for_options (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ for_options (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ // Option accessors and modifiers.
+ //
+ const bool&
+ exact () const;
+
+ bool&
+ exact ();
+
+ void
+ exact (const bool&);
+
+ const bool&
+ newline () const;
+
+ bool&
+ newline ();
+
+ void
+ newline (const bool&);
+
+ const bool&
+ whitespace () const;
+
+ bool&
+ whitespace ();
+
+ void
+ whitespace (const bool&);
+
+ // Implementation details.
+ //
+ protected:
+ bool
+ _parse (const char*, ::build2::build::cli::scanner&);
+
+ private:
+ bool
+ _parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option,
+ ::build2::build::cli::unknown_mode argument);
+
+ public:
+ bool exact_;
+ bool newline_;
+ bool whitespace_;
+ };
}
}
diff --git a/libbuild2/script/builtin-options.ixx b/libbuild2/script/builtin-options.ixx
index 8fef25a..575eb95 100644
--- a/libbuild2/script/builtin-options.ixx
+++ b/libbuild2/script/builtin-options.ixx
@@ -9,164 +9,6 @@
//
// End prologue.
-#include <cassert>
-
-namespace build2
-{
- namespace script
- {
- namespace cli
- {
- // unknown_mode
- //
- inline unknown_mode::
- unknown_mode (value v)
- : v_ (v)
- {
- }
-
- // exception
- //
- inline ::std::ostream&
- operator<< (::std::ostream& os, const exception& e)
- {
- e.print (os);
- return os;
- }
-
- // unknown_option
- //
- inline unknown_option::
- unknown_option (const std::string& option)
- : option_ (option)
- {
- }
-
- inline const std::string& unknown_option::
- option () const
- {
- return option_;
- }
-
- // unknown_argument
- //
- inline unknown_argument::
- unknown_argument (const std::string& argument)
- : argument_ (argument)
- {
- }
-
- inline const std::string& unknown_argument::
- argument () const
- {
- return argument_;
- }
-
- // missing_value
- //
- inline missing_value::
- missing_value (const std::string& option)
- : option_ (option)
- {
- }
-
- inline const std::string& missing_value::
- option () const
- {
- return option_;
- }
-
- // invalid_value
- //
- inline invalid_value::
- invalid_value (const std::string& option,
- const std::string& value,
- const std::string& message)
- : option_ (option),
- value_ (value),
- message_ (message)
- {
- }
-
- inline const std::string& invalid_value::
- option () const
- {
- return option_;
- }
-
- inline const std::string& invalid_value::
- value () const
- {
- return value_;
- }
-
- inline const std::string& invalid_value::
- message () const
- {
- return message_;
- }
-
- // argv_scanner
- //
- inline argv_scanner::
- argv_scanner (int& argc,
- char** argv,
- bool erase,
- std::size_t sp)
- : start_position_ (sp + 1),
- i_ (1),
- argc_ (argc),
- argv_ (argv),
- erase_ (erase)
- {
- }
-
- inline argv_scanner::
- argv_scanner (int start,
- int& argc,
- char** argv,
- bool erase,
- std::size_t sp)
- : start_position_ (sp + static_cast<std::size_t> (start)),
- i_ (start),
- argc_ (argc),
- argv_ (argv),
- erase_ (erase)
- {
- }
-
- inline int argv_scanner::
- end () const
- {
- return i_;
- }
-
- // vector_scanner
- //
- inline vector_scanner::
- vector_scanner (const std::vector<std::string>& v,
- std::size_t i,
- std::size_t sp)
- : start_position_ (sp), v_ (v), i_ (i)
- {
- }
-
- inline std::size_t vector_scanner::
- end () const
- {
- return i_;
- }
-
- inline void vector_scanner::
- reset (std::size_t i, std::size_t sp)
- {
- i_ = i;
- start_position_ = sp;
- }
- }
- }
-}
-
namespace build2
{
namespace script
@@ -311,6 +153,63 @@ namespace build2
{
this->clear_specified_ = x;
}
+
+ // for_options
+ //
+
+ inline const bool& for_options::
+ exact () const
+ {
+ return this->exact_;
+ }
+
+ inline bool& for_options::
+ exact ()
+ {
+ return this->exact_;
+ }
+
+ inline void for_options::
+ exact (const bool& x)
+ {
+ this->exact_ = x;
+ }
+
+ inline const bool& for_options::
+ newline () const
+ {
+ return this->newline_;
+ }
+
+ inline bool& for_options::
+ newline ()
+ {
+ return this->newline_;
+ }
+
+ inline void for_options::
+ newline (const bool& x)
+ {
+ this->newline_ = x;
+ }
+
+ inline const bool& for_options::
+ whitespace () const
+ {
+ return this->whitespace_;
+ }
+
+ inline bool& for_options::
+ whitespace ()
+ {
+ return this->whitespace_;
+ }
+
+ inline void for_options::
+ whitespace (const bool& x)
+ {
+ this->whitespace_ = x;
+ }
}
}
diff --git a/libbuild2/script/builtin.cli b/libbuild2/script/builtin.cli
index 1e3fb45..c993983 100644
--- a/libbuild2/script/builtin.cli
+++ b/libbuild2/script/builtin.cli
@@ -1,7 +1,7 @@
// file : libbuild2/script/builtin.cli
// license : MIT; see accompanying LICENSE file
-include <libbuild2/types.hxx>;
+include <libbuild2/common.cli>;
// Note that options in this file are undocumented because we generate neither
// the usage printing code nor man pages. Instead, they are documented in the
@@ -30,5 +30,12 @@ namespace build2
vector<string> --unset|-u;
vector<string> --clear|-c;
};
+
+ class for_options
+ {
+ bool --exact|-e;
+ bool --newline|-n;
+ bool --whitespace|-w;
+ };
}
}
diff --git a/libbuild2/script/lexer.cxx b/libbuild2/script/lexer.cxx
index 7577149..e13bbdb 100644
--- a/libbuild2/script/lexer.cxx
+++ b/libbuild2/script/lexer.cxx
@@ -24,10 +24,7 @@ namespace build2
bool q (true); // quotes
if (!esc)
- {
- assert (!state_.empty ());
- esc = state_.top ().escapes;
- }
+ esc = current_state ().escapes;
switch (m)
{
@@ -84,7 +81,7 @@ namespace build2
}
assert (ps == '\0');
- state_.push (
+ mode_impl (
state {m, data, nullopt, false, false, ps, s, n, q, *esc, s1, s2});
}
@@ -93,7 +90,7 @@ namespace build2
{
token r;
- switch (state_.top ().mode)
+ switch (mode ())
{
case lexer_mode::command_expansion:
case lexer_mode::here_line_single:
@@ -119,7 +116,7 @@ namespace build2
xchar c (get ());
uint64_t ln (c.line), cn (c.column);
- const state& st (state_.top ());
+ const state& st (current_state ());
lexer_mode m (st.mode);
auto make_token = [&sep, &m, ln, cn] (type t)
diff --git a/libbuild2/script/lexer.hxx b/libbuild2/script/lexer.hxx
index dbfdfcc..3cbcc03 100644
--- a/libbuild2/script/lexer.hxx
+++ b/libbuild2/script/lexer.hxx
@@ -112,6 +112,8 @@ namespace build2
const redirect_aliases_type& redirect_aliases;
protected:
+ using build2::lexer::mode; // Getter.
+
lexer (istream& is, const path_name& name, uint64_t line,
const char* escapes,
bool set_mode,
diff --git a/libbuild2/script/parser.cxx b/libbuild2/script/parser.cxx
index fffe7bb..ae6da76 100644
--- a/libbuild2/script/parser.cxx
+++ b/libbuild2/script/parser.cxx
@@ -3,9 +3,14 @@
#include <libbuild2/script/parser.hxx>
+#include <cstring> // strchr()
+#include <sstream>
+
#include <libbuild2/variable.hxx>
-#include <libbuild2/script/run.hxx> // exit
+
+#include <libbuild2/script/run.hxx> // exit, stream_reader
#include <libbuild2/script/lexer.hxx>
+#include <libbuild2/script/builtin-options.hxx>
using namespace std;
@@ -15,6 +20,33 @@ namespace build2
{
using type = token_type;
+ bool parser::
+ need_cmdline_relex (const string& s)
+ {
+ for (auto i (s.begin ()), e (s.end ()); i != e; ++i)
+ {
+ char c (*i);
+
+ if (c == '\\')
+ {
+ if (++i == e)
+ return false;
+
+ c = *i;
+
+ if (c == '\\' || c == '\'' || c == '\"')
+ return true;
+
+ // Fall through.
+ }
+
+ if (strchr ("|<>&\"'", c) != nullptr)
+ return true;
+ }
+
+ return false;
+ }
+
value parser::
parse_variable_line (token& t, type& tt)
{
@@ -111,18 +143,20 @@ namespace build2
return nullopt;
}
- pair<command_expr, parser::here_docs> parser::
+ parser::parse_command_expr_result parser::
parse_command_expr (token& t, type& tt,
- const redirect_aliases& ra)
+ const redirect_aliases& ra,
+ optional<token>&& program)
{
- // enter: first token of the command line
+ // enter: first (or second, if program) token of the command line
// leave: <newline> or unknown token
command_expr expr;
// OR-ed to an implied false for the first term.
//
- expr.push_back ({expr_operator::log_or, command_pipe ()});
+ if (!pre_parse_)
+ expr.push_back ({expr_operator::log_or, command_pipe ()});
command c; // Command being assembled.
@@ -189,8 +223,8 @@ namespace build2
// Add the next word to either one of the pending positions or to
// program arguments by default.
//
- auto add_word = [&c, &p, &mod, &check_regex_mod, this] (
- string&& w, const location& l)
+ auto add_word = [&c, &p, &mod, &check_regex_mod, this]
+ (string&& w, const location& l)
{
auto add_merge = [&l, this] (optional<redirect>& r,
const string& w,
@@ -668,11 +702,30 @@ namespace build2
const location ll (get_location (t)); // Line location.
// Keep parsing chunks of the command line until we see one of the
- // "terminators" (newline, exit status comparison, etc).
+ // "terminators" (newline or unknown/unexpected token).
//
location l (ll);
names ns; // Reuse to reduce allocations.
+ bool for_loop (false);
+
+ if (program)
+ {
+ assert (program->type == type::word);
+
+ // Note that here we skip all the parse_program() business since the
+ // program can only be one of the specially-recognized names.
+ //
+ if (program->value == "for")
+ for_loop = true;
+ else
+ assert (false); // Must be specially-recognized program.
+
+ // Save the program name and continue parsing as a command.
+ //
+ add_word (move (program->value), get_location (*program));
+ }
+
for (bool done (false); !done; l = get_location (t))
{
tt = ra.resolve (tt);
@@ -688,6 +741,9 @@ namespace build2
case type::equal:
case type::not_equal:
{
+ if (for_loop)
+ fail (l) << "for-loop exit code cannot be checked";
+
if (!pre_parse_)
check_pending (l);
@@ -718,30 +774,39 @@ namespace build2
}
case type::pipe:
+ if (for_loop)
+ fail (l) << "for-loop must be last command in a pipe";
+ // Fall through.
+
case type::log_or:
case type::log_and:
+ if (for_loop)
+ fail (l) << "command expression involving for-loop";
+ // Fall through.
- case type::in_pass:
- case type::out_pass:
+ case type::clean:
+ if (for_loop)
+ fail (l) << "cleanup in for-loop";
+ // Fall through.
- case type::in_null:
+ case type::out_pass:
case type::out_null:
-
case type::out_trace:
-
case type::out_merge:
-
- case type::in_str:
- case type::in_doc:
case type::out_str:
case type::out_doc:
-
- case type::in_file:
case type::out_file_cmp:
case type::out_file_ovr:
case type::out_file_app:
+ if (for_loop)
+ fail (l) << "output redirect in for-loop";
+ // Fall through.
- case type::clean:
+ case type::in_pass:
+ case type::in_null:
+ case type::in_str:
+ case type::in_doc:
+ case type::in_file:
{
if (pre_parse_)
{
@@ -939,6 +1004,42 @@ namespace build2
next (t, tt);
break;
}
+ case type::lsbrace:
+ {
+ // Recompose the attributes into a single command argument.
+ //
+ assert (!pre_parse_);
+
+ attributes_push (t, tt, true /* standalone */);
+
+ attributes as (attributes_pop ());
+ assert (!as.empty ());
+
+ ostringstream os;
+ names storage;
+ char c ('[');
+ for (const attribute& a: as)
+ {
+ os << c << a.name;
+
+ if (!a.value.null)
+ {
+ os << '=';
+
+ storage.clear ();
+ to_stream (os,
+ reverse (a.value, storage, true /* reduce */),
+ quote_mode::normal,
+ '@');
+ }
+
+ c = ',';
+ }
+ os << ']';
+
+ add_word (os.str (), l);
+ break;
+ }
default:
{
// Bail out if this is one of the unknown tokens.
@@ -1007,11 +1108,12 @@ namespace build2
hd.push_back (
here_doc {
{rd},
- move (end),
- (t.qtype == quote_type::unquoted ||
- t.qtype == quote_type::single),
- move (mod),
- r.intro, move (r.flags)});
+ move (end),
+ (t.qtype == quote_type::unquoted ||
+ t.qtype == quote_type::single),
+ move (mod),
+ r.intro,
+ move (r.flags)});
p = pending::none;
mod.clear ();
@@ -1024,16 +1126,33 @@ namespace build2
bool prog (p == pending::program_first ||
p == pending::program_next);
- // Check if this is the env pseudo-builtin.
+ // Check if this is the env pseudo-builtin or the for-loop.
//
bool env (false);
- if (prog && tt == type::word && t.value == "env")
+ if (prog && tt == type::word)
{
- parsed_env r (parse_env_builtin (t, tt));
- c.cwd = move (r.cwd);
- c.variables = move (r.variables);
- c.timeout = r.timeout;
- env = true;
+ if (t.value == "env")
+ {
+ parsed_env r (parse_env_builtin (t, tt));
+ c.cwd = move (r.cwd);
+ c.variables = move (r.variables);
+ c.timeout = r.timeout;
+ env = true;
+ }
+ else if (t.value == "for")
+ {
+ if (expr.size () > 1)
+ fail (l) << "command expression involving for-loop";
+
+ for_loop = true;
+
+ // Save 'for' as a program name and continue parsing as a
+ // command.
+ //
+ add_word (move (t.value), l);
+ next (t, tt);
+ continue;
+ }
}
// Parse the next chunk as names to get expansion, etc. Note that
@@ -1092,16 +1211,17 @@ namespace build2
// Process what we got.
//
- // First see if this is a value that should not be re-lexed. The
- // long term plan is to only re-lex values of a special type
- // representing a canned command line.
+ // First see if this is a value that should not be re-lexed. We
+ // only re-lex values of the special `cmdline` type that
+ // represents a canned command line.
//
// Otherwise, determine whether anything inside was quoted (note
// that the current token is "next" and is not part of this).
//
- bool q (
- (pr.value && !relex_) ||
- (quoted () - (t.qtype != quote_type::unquoted ? 1 : 0)) != 0);
+ bool lex (
+ pr.value
+ ? pr.type != nullptr && pr.type->is_a<cmdline> ()
+ : (quoted () - (t.qtype != quote_type::unquoted ? 1 : 0)) == 0);
for (name& n: ns)
{
@@ -1115,7 +1235,7 @@ namespace build2
{
diag_record dr (fail (l));
dr << "invalid string value ";
- to_stream (dr.os, n, true /* quote */);
+ to_stream (dr.os, n, quote_mode::normal);
}
// If it is a quoted chunk, then we add the word as is.
@@ -1123,10 +1243,7 @@ namespace build2
// interesting characters (operators plus quotes/escapes),
// then no need to re-lex.
//
- // NOTE: update quoting (script.cxx:to_stream_q()) if adding
- // any new characters.
- //
- if (q || s.find_first_of ("|&<>\'\"\\") == string::npos)
+ if (!lex || !need_cmdline_relex (s))
add_word (move (s), l);
else
{
@@ -1216,9 +1333,16 @@ namespace build2
switch (tt)
{
case type::pipe:
+ if (for_loop)
+ fail (l) << "for-loop must be last command in a pipe";
+ // Fall through.
+
case type::log_or:
case type::log_and:
{
+ if (for_loop)
+ fail (l) << "command expression involving for-loop";
+
// Check that the previous command makes sense.
//
check_command (l, tt != type::pipe);
@@ -1238,30 +1362,11 @@ namespace build2
break;
}
- case type::in_pass:
- case type::out_pass:
-
- case type::in_null:
- case type::out_null:
-
- case type::out_trace:
-
- case type::out_merge:
-
- case type::in_str:
- case type::out_str:
-
- case type::in_file:
- case type::out_file_cmp:
- case type::out_file_ovr:
- case type::out_file_app:
- {
- parse_redirect (move (t), tt, l);
- break;
- }
-
case type::clean:
{
+ if (for_loop)
+ fail (l) << "cleanup in for-loop";
+
parse_clean (t);
break;
}
@@ -1272,6 +1377,27 @@ namespace build2
fail (l) << "here-document redirect in expansion";
break;
}
+
+ case type::out_pass:
+ case type::out_null:
+ case type::out_trace:
+ case type::out_merge:
+ case type::out_str:
+ case type::out_file_cmp:
+ case type::out_file_ovr:
+ case type::out_file_app:
+ if (for_loop)
+ fail (l) << "output redirect in for-loop";
+ // Fall through.
+
+ case type::in_pass:
+ case type::in_null:
+ case type::in_str:
+ case type::in_file:
+ {
+ parse_redirect (move (t), tt, l);
+ break;
+ }
}
}
@@ -1299,7 +1425,7 @@ namespace build2
expr.back ().pipe.push_back (move (c));
}
- return make_pair (move (expr), move (hd));
+ return parse_command_expr_result {move (expr), move (hd), for_loop};
}
parser::parsed_env parser::
@@ -1313,7 +1439,7 @@ namespace build2
// Note that an option name and value can belong to different name
// chunks. That's why we parse the env builtin arguments in the chunking
// mode into the argument/location pair list up to the '--' separator
- // and parse this list into the variable sets/unsets afterwords.
+ // and parse this list into the variable sets/unsets afterwards.
//
// Align the size with environment_vars (double because of -u <var>
// which is two arguments).
@@ -1351,7 +1477,7 @@ namespace build2
{
diag_record dr (fail (l));
dr << "invalid string value ";
- to_stream (dr.os, n, true /* quote */);
+ to_stream (dr.os, n, quote_mode::normal);
}
}
@@ -1537,7 +1663,7 @@ namespace build2
diag_record dr;
dr << fail (l) << "expected exit status instead of ";
- to_stream (dr.os, ns, true /* quote */);
+ to_stream (dr.os, ns, quote_mode::normal);
dr << info << "exit status is an unsigned integer less than 256";
}
@@ -1548,7 +1674,7 @@ namespace build2
void parser::
parse_here_documents (token& t, type& tt,
- pair<command_expr, here_docs>& p)
+ parse_command_expr_result& pr)
{
// enter: newline
// leave: newline
@@ -1556,7 +1682,7 @@ namespace build2
// Parse here-document fragments in the order they were mentioned on
// the command line.
//
- for (here_doc& h: p.second)
+ for (here_doc& h: pr.docs)
{
// Switch to the here-line mode which is like single/double-quoted
// string but recognized the newline as a separator.
@@ -1576,7 +1702,7 @@ namespace build2
{
auto i (h.redirects.cbegin ());
- command& c (p.first[i->expr].pipe[i->pipe]);
+ command& c (pr.expr[i->expr].pipe[i->pipe]);
optional<redirect>& r (i->fd == 0 ? c.in :
i->fd == 1 ? c.out :
@@ -1608,7 +1734,7 @@ namespace build2
//
for (++i; i != h.redirects.cend (); ++i)
{
- command& c (p.first[i->expr].pipe[i->pipe]);
+ command& c (pr.expr[i->expr].pipe[i->pipe]);
optional<redirect>& ir (i->fd == 0 ? c.in :
i->fd == 1 ? c.out :
@@ -2034,6 +2160,8 @@ namespace build2
else if (n == "elif") r = line_type::cmd_elif;
else if (n == "elif!") r = line_type::cmd_elifn;
else if (n == "else") r = line_type::cmd_else;
+ else if (n == "while") r = line_type::cmd_while;
+ else if (n == "for") r = line_type::cmd_for_stream;
else if (n == "end") r = line_type::cmd_end;
else
{
@@ -2064,8 +2192,9 @@ namespace build2
exec_lines (lines::const_iterator i, lines::const_iterator e,
const function<exec_set_function>& exec_set,
const function<exec_cmd_function>& exec_cmd,
- const function<exec_if_function>& exec_if,
- size_t& li,
+ const function<exec_cond_function>& exec_cond,
+ const function<exec_for_function>& exec_for,
+ const iteration_index* ii, size_t& li,
variable_pool* var_pool)
{
try
@@ -2089,6 +2218,73 @@ namespace build2
next (t, tt);
const location ll (get_location (t));
+ // If end is true, then find the flow control construct's end ('end'
+ // line). Otherwise, find the flow control construct's block end
+ // ('end', 'else', etc). If skip is true then increment the command
+ // line index.
+ //
+ auto fcend = [e, &li] (lines::const_iterator j,
+ bool end,
+ bool skip) -> lines::const_iterator
+ {
+ // We need to be aware of nested flow control constructs.
+ //
+ size_t n (0);
+
+ for (++j; j != e; ++j)
+ {
+ line_type lt (j->type);
+
+ if (lt == line_type::cmd_if ||
+ lt == line_type::cmd_ifn ||
+ lt == line_type::cmd_while ||
+ lt == line_type::cmd_for_stream ||
+ lt == line_type::cmd_for_args)
+ ++n;
+
+ // If we are nested then we just wait until we get back
+ // to the surface.
+ //
+ if (n == 0)
+ {
+ switch (lt)
+ {
+ case line_type::cmd_elif:
+ case line_type::cmd_elifn:
+ case line_type::cmd_else:
+ if (end) break;
+ // Fall through.
+ case line_type::cmd_end: return j;
+ default: break;
+ }
+ }
+
+ if (lt == line_type::cmd_end)
+ --n;
+
+ if (skip)
+ {
+ // Note that we don't count else, end, and 'for x: ...' as
+ // commands.
+ //
+ switch (lt)
+ {
+ case line_type::cmd:
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ case line_type::cmd_elif:
+ case line_type::cmd_elifn:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_while: ++li; break;
+ default: break;
+ }
+ }
+ }
+
+ assert (false); // Missing end.
+ return e;
+ };
+
switch (lt)
{
case line_type::var:
@@ -2124,7 +2320,10 @@ namespace build2
single = true;
}
- exec_cmd (t, tt, li++, single, ll);
+ exec_cmd (t, tt,
+ ii, li++, single,
+ nullptr /* command_function */,
+ ll);
replay_stop ();
break;
@@ -2140,7 +2339,7 @@ namespace build2
bool take;
if (lt != line_type::cmd_else)
{
- take = exec_if (t, tt, li++, ll);
+ take = exec_cond (t, tt, ii, li++, ll);
if (lt == line_type::cmd_ifn || lt == line_type::cmd_elifn)
take = !take;
@@ -2153,97 +2352,383 @@ namespace build2
replay_stop ();
- // If end is true, then find the 'end' line. Otherwise, find
- // the next if-else line. If skip is true then increment the
- // command line index.
+ // If we are taking this branch then we need to parse all the
+ // lines until the next if-else line and then skip all the lines
+ // until the end (unless we are already at the end).
+ //
+ // Otherwise, we need to skip all the lines until the next
+ // if-else line and then continue parsing.
//
- auto next = [e, &li] (lines::const_iterator j,
- bool end,
- bool skip) -> lines::const_iterator
+ if (take)
+ {
+ // Find block end.
+ //
+ lines::const_iterator j (fcend (i, false, false));
+
+ if (!exec_lines (i + 1, j,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ ii, li,
+ var_pool))
+ return false;
+
+ // Find construct end.
+ //
+ i = j->type == line_type::cmd_end ? j : fcend (j, true, true);
+ }
+ else
+ {
+ // Find block end.
+ //
+ i = fcend (i, false, true);
+
+ if (i->type != line_type::cmd_end)
+ --i; // Continue with this line (e.g., elif or else).
+ }
+
+ break;
+ }
+ case line_type::cmd_while:
+ {
+ // The while-loop construct end. Set on the first iteration.
+ //
+ lines::const_iterator we (e);
+
+ size_t wli (li);
+
+ for (iteration_index wi {1, ii};; wi.index++)
+ {
+ next (t, tt); // Skip to start of command.
+
+ bool exec (exec_cond (t, tt, &wi, li++, ll));
+
+ replay_stop ();
+
+ // If the condition evaluates to true, then we need to parse
+ // all the lines until the end line, prepare for the condition
+ // reevaluation, and re-iterate.
+ //
+ // Otherwise, we need to skip all the lines until the end
+ // line, bail out from the loop, and continue parsing.
+ //
+ if (exec)
{
- // We need to be aware of nested if-else chains.
+ // Find the construct end, if it is not found yet.
//
- size_t n (0);
+ if (we == e)
+ we = fcend (i, true, false);
+
+ if (!exec_lines (i + 1, we,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ &wi, li,
+ var_pool))
+ return false;
+
+ // Prepare for the condition reevaluation.
+ //
+ replay_data (replay_tokens (ln.tokens));
+ next (t, tt);
+ li = wli;
+ }
+ else
+ {
+ // Position to the construct end, always incrementing the
+ // line index (skip is true).
+ //
+ i = fcend (i, true, true);
+ break; // Bail out from the while-loop.
+ }
+ }
+
+ break;
+ }
+ case line_type::cmd_for_stream:
+ {
+ // The for-loop construct end. Set on the first iteration.
+ //
+ lines::const_iterator fe (e);
- for (++j; j != e; ++j)
+ // Let's "wrap up" all the required data into the single object
+ // to rely on the "small function object" optimization.
+ //
+ struct loop_data
+ {
+ lines::const_iterator i;
+ lines::const_iterator e;
+ const function<exec_set_function>& exec_set;
+ const function<exec_cmd_function>& exec_cmd;
+ const function<exec_cond_function>& exec_cond;
+ const function<exec_for_function>& exec_for;
+ const iteration_index* ii;
+ size_t& li;
+ variable_pool* var_pool;
+ decltype (fcend)& fce;
+ lines::const_iterator& fe;
+ } ld {i, e,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ ii, li,
+ var_pool,
+ fcend,
+ fe};
+
+ function<command_function> cf (
+ [&ld, this]
+ (environment& env,
+ const strings& args,
+ auto_fd in,
+ pipe_command* pipe,
+ const optional<deadline>& dl,
+ const location& ll)
+ {
+ namespace cli = build2::build::cli;
+
+ try
{
- line_type lt (j->type);
+ // Parse arguments.
+ //
+ cli::vector_scanner scan (args);
+ for_options ops (scan);
+
+ // Note: diagnostics consistent with the set builtin.
+ //
+ if (ops.whitespace () && ops.newline ())
+ fail (ll) << "for: both -n|--newline and "
+ << "-w|--whitespace specified";
- if (lt == line_type::cmd_if || lt == line_type::cmd_ifn)
- ++n;
+ if (!scan.more ())
+ fail (ll) << "for: missing variable name";
- // If we are nested then we just wait until we get back
- // to the surface.
+ string vname (scan.next ());
+ if (vname.empty ())
+ fail (ll) << "for: empty variable name";
+
+ // Detect patterns analogous to parse_variable_name() (so
+ // we diagnose `for x[string]`).
+ //
+ if (vname.find_first_of ("[*?") != string::npos)
+ fail (ll) << "for: expected variable name instead of "
+ << vname;
+
+ // Let's also diagnose the `... | for x:...` misuse which
+ // can probably be quite common.
//
- if (n == 0)
+ if (vname.find (':') != string::npos)
+ fail (ll) << "for: ':' after variable name";
+
+ string attrs;
+ if (scan.more ())
{
- switch (lt)
- {
- case line_type::cmd_elif:
- case line_type::cmd_elifn:
- case line_type::cmd_else:
- if (end) break;
- // Fall through.
- case line_type::cmd_end: return j;
- default: break;
- }
+ attrs = scan.next ();
+
+ if (attrs.empty ())
+ fail (ll) << "for: empty variable attributes";
+
+ if (scan.more ())
+ fail (ll) << "for: unexpected argument '"
+ << scan.next () << "'";
}
- if (lt == line_type::cmd_end)
- --n;
+ // Since the command pipe is parsed, we can stop
+ // replaying. Note that we should do this before calling
+ // exec_lines() for the loop body. Also note that we
+ // should increment the line index before that.
+ //
+ replay_stop ();
+
+ size_t fli (++ld.li);
+ iteration_index fi {1, ld.ii};
- if (skip)
+ // Let's "wrap up" all the required data into the single
+ // object to rely on the "small function object"
+ // optimization.
+ //
+ struct
{
- // Note that we don't count else and end as commands.
- //
- switch (lt)
+ loop_data& ld;
+ environment& env;
+ const string& vname;
+ const string& attrs;
+ const location& ll;
+ size_t fli;
+ iteration_index& fi;
+
+ } d {ld, env, vname, attrs, ll, fli, fi};
+
+ function<void (string&&)> f (
+ [&d, this] (string&& s)
{
- case line_type::cmd:
- case line_type::cmd_if:
- case line_type::cmd_ifn:
- case line_type::cmd_elif:
- case line_type::cmd_elifn: ++li; break;
- default: break;
- }
- }
+ loop_data& ld (d.ld);
+
+ ld.li = d.fli;
+
+ // Don't move from the variable name since it is used
+ // on each iteration.
+ //
+ d.env.set_variable (d.vname,
+ names {name (move (s))},
+ d.attrs,
+ d.ll);
+
+ // Find the construct end, if it is not found yet.
+ //
+ if (ld.fe == ld.e)
+ ld.fe = ld.fce (ld.i, true, false);
+
+ if (!exec_lines (ld.i + 1, ld.fe,
+ ld.exec_set,
+ ld.exec_cmd,
+ ld.exec_cond,
+ ld.exec_for,
+ &d.fi, ld.li,
+ ld.var_pool))
+ {
+ throw exit (true);
+ }
+
+ d.fi.index++;
+ });
+
+ read (move (in),
+ !ops.newline (), ops.newline (), ops.exact (),
+ f,
+ pipe,
+ dl,
+ ll,
+ "for");
+ }
+ catch (const cli::exception& e)
+ {
+ fail (ll) << "for: " << e;
}
+ });
- assert (false); // Missing end.
- return e;
- };
+ exec_cmd (t, tt, ii, li, false /* single */, cf, ll);
- // If we are taking this branch then we need to parse all the
- // lines until the next if-else line and then skip all the
- // lines until the end (unless next is already end).
+ // Position to construct end.
//
- // Otherwise, we need to skip all the lines until the next
- // if-else line and then continue parsing.
+ i = (fe != e ? fe : fcend (i, true, true));
+
+ break;
+ }
+ case line_type::cmd_for_args:
+ {
+ // Parse the variable name.
//
- if (take)
+ next (t, tt);
+
+ assert (tt == type::word && t.qtype == quote_type::unquoted);
+
+ string vn (move (t.value));
+
+ // Enter the variable into the pool if this is not done during
+ // the script parsing (see the var line type handling for
+ // details).
+ //
+ const variable* var (ln.var);
+
+ if (var == nullptr)
{
- // Next if-else.
- //
- lines::const_iterator j (next (i, false, false));
- if (!exec_lines (i + 1, j,
- exec_set, exec_cmd, exec_if,
- li,
- var_pool))
- return false;
+ assert (var_pool != nullptr);
- i = j->type == line_type::cmd_end ? j : next (j, true, true);
+ var = &var_pool->insert (move (vn));
}
- else
+
+ // Parse the potential element attributes and skip the colon.
+ //
+ next_with_attributes (t, tt);
+ attributes_push (t, tt);
+
+ assert (tt == type::colon);
+
+ // Save element attributes so that we can inject them on each
+ // iteration.
+ //
+ attributes val_attrs (attributes_pop ());
+
+ // Parse the value with the potential attributes.
+ //
+ // Note that we don't really need to change the mode since we
+ // are replaying the tokens.
+ //
+ value val;
+ apply_value_attributes (nullptr /* variable */,
+ val,
+ parse_variable_line (t, tt),
+ type::assign);
+
+ replay_stop ();
+
+ // If the value is not NULL then iterate over its elements,
+ // assigning them to the for-loop variable, and parsing all the
+ // construct lines afterwards. Then position to the end line of
+ // the construct and continue parsing.
+
+ // The for-loop construct end. Set on the first iteration.
+ //
+ lines::const_iterator fe (e);
+
+ if (val)
{
- i = next (i, false, true);
- if (i->type != line_type::cmd_end)
- --i; // Continue with this line (e.g., elif or else).
+ // If this value is a vector, then save its element type so
+ // that we can typify each element below.
+ //
+ const value_type* etype (nullptr);
+
+ if (val.type != nullptr)
+ {
+ etype = val.type->element_type;
+
+ // Note that here we don't want to be reducing empty simple
+ // values to empty lists.
+ //
+ untypify (val, false /* reduce */);
+ }
+
+ size_t fli (li);
+ iteration_index fi {1, ii};
+ names& ns (val.as<names> ());
+
+ for (auto ni (ns.begin ()), ne (ns.end ()); ni != ne; ++ni)
+ {
+ li = fli;
+
+ // Set the variable value.
+ //
+ bool pair (ni->pair);
+ names n;
+ n.push_back (move (*ni));
+ if (pair) n.push_back (move (*++ni));
+ value v (move (n)); // Untyped.
+
+ if (etype != nullptr)
+ typify (v, *etype, var);
+
+ exec_for (*var, move (v), val_attrs, ll);
+
+ // Find the construct end, if it is not found yet.
+ //
+ if (fe == e)
+ fe = fcend (i, true, false);
+
+ if (!exec_lines (i + 1, fe,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ &fi, li,
+ var_pool))
+ return false;
+
+ fi.index++;
+ }
}
+ // Position to construct end.
+ //
+ i = (fe != e ? fe : fcend (i, true, true));
+
break;
}
case line_type::cmd_end:
{
assert (false);
+ break;
}
}
}
@@ -2278,7 +2763,7 @@ namespace build2
}
parser::parsed_doc::
- parsed_doc (parsed_doc&& d)
+ parsed_doc (parsed_doc&& d) noexcept
: re (d.re), end_line (d.end_line), end_column (d.end_column)
{
if (re)
diff --git a/libbuild2/script/parser.hxx b/libbuild2/script/parser.hxx
index 6e24d37..cadf909 100644
--- a/libbuild2/script/parser.hxx
+++ b/libbuild2/script/parser.hxx
@@ -25,7 +25,7 @@ namespace build2
class parser: protected build2::parser
{
public:
- parser (context& c, bool relex): build2::parser (c), relex_ (relex) {}
+ parser (context& c): build2::parser (c) {}
// Helpers.
//
@@ -42,6 +42,15 @@ namespace build2
using build2::parser::apply_value_attributes;
+ // Return true if a command line element needs to be re-lexed.
+ //
+ // Specifically, it needs to be re-lexed if it contains any of the
+ // special characters (|<>&), quotes ("') or effective escape sequences
+ // (\", \', \\).
+ //
+ static bool
+ need_cmdline_relex (const string&);
+
// Commonly used parsing functions. Issue diagnostics and throw failed
// in case of an error.
//
@@ -88,15 +97,34 @@ namespace build2
};
using here_docs = vector<here_doc>;
- pair<command_expr, here_docs>
- parse_command_expr (token&, token_type&, const redirect_aliases&);
+ struct parse_command_expr_result
+ {
+ command_expr expr; // Single pipe for the for-loop.
+ here_docs docs;
+ bool for_loop = false;
+
+ parse_command_expr_result () = default;
+
+ parse_command_expr_result (command_expr&& e,
+ here_docs&& h,
+ bool f)
+ : expr (move (e)), docs (move (h)), for_loop (f) {}
+ };
+
+ // Pass the first special command program name (token_type::word) if it
+ // is already pre-parsed.
+ //
+ parse_command_expr_result
+ parse_command_expr (token&, token_type&,
+ const redirect_aliases&,
+ optional<token>&& program = nullopt);
command_exit
parse_command_exit (token&, token_type&);
void
parse_here_documents (token&, token_type&,
- pair<command_expr, here_docs>&);
+ parse_command_expr_result&);
struct parsed_doc
{
@@ -112,7 +140,7 @@ namespace build2
parsed_doc (string, uint64_t line, uint64_t column);
parsed_doc (regex_lines&&, uint64_t line, uint64_t column);
- parsed_doc (parsed_doc&&); // Note: move constuctible-only type.
+ parsed_doc (parsed_doc&&) noexcept; // Note: move constuctible-only type.
~parsed_doc ();
};
@@ -126,6 +154,11 @@ namespace build2
// the first two tokens. Use the specified lexer mode to peek the second
// token.
//
+ // Always return the cmd_for_stream line type for the for-loop. Note
+ // that the for-loop form cannot be detected easily, based on the first
+ // two tokens. Also note that the detection can be specific for the
+ // script implementation (custom lexing mode, special variables, etc).
+ //
line_type
pre_parse_line_start (token&, token_type&, lexer_mode);
@@ -150,19 +183,26 @@ namespace build2
protected:
// Return false if the execution of the script should be terminated with
// the success status (e.g., as a result of encountering the exit
- // builtin). For unsuccessful termination the failed exception is thrown.
+ // builtin). For unsuccessful termination the failed exception is
+ // thrown.
//
using exec_set_function = void (const variable&,
token&, token_type&,
const location&);
using exec_cmd_function = void (token&, token_type&,
- size_t li,
+ const iteration_index*, size_t li,
bool single,
+ const function<command_function>&,
const location&);
- using exec_if_function = bool (token&, token_type&,
- size_t li,
+ using exec_cond_function = bool (token&, token_type&,
+ const iteration_index*, size_t li,
+ const location&);
+
+ using exec_for_function = void (const variable&,
+ value&&,
+ const attributes& value_attrs,
const location&);
// If a parser implementation doesn't pre-enter variables into a pool
@@ -174,8 +214,9 @@ namespace build2
exec_lines (lines::const_iterator b, lines::const_iterator e,
const function<exec_set_function>&,
const function<exec_cmd_function>&,
- const function<exec_if_function>&,
- size_t& li,
+ const function<exec_cond_function>&,
+ const function<exec_for_function>&,
+ const iteration_index*, size_t& li,
variable_pool* = nullptr);
// Customization hooks.
@@ -200,6 +241,13 @@ namespace build2
// something that requires re-lexing, for example `foo|bar`, which won't
// be easy to translate but which are handled by the parser.
//
+ // Note that the chunk could be of the special cmdline type in which
+ // case the names may need to be "preprocessed" (at least unquoted or
+ // potentially fully re-lexed) before being analyzed/consumed. Note also
+ // that in this case any names left unconsumed must remain of the
+ // cmdline type.
+ //
+ //
// During the pre-parsing phase the returned process path and names
// (that must still be parsed) are discarded. The main purpose of the
// call is to allow implementations to perform static script analysis,
@@ -229,7 +277,6 @@ namespace build2
size_t replay_quoted_;
protected:
- bool relex_;
lexer* lexer_ = nullptr;
};
}
diff --git a/libbuild2/script/regex.cxx b/libbuild2/script/regex.cxx
index 3f796b6..11ff8a1 100644
--- a/libbuild2/script/regex.cxx
+++ b/libbuild2/script/regex.cxx
@@ -75,15 +75,29 @@ namespace build2
string::traits_type::find (ex, 4, c) != nullptr)));
}
+ template <typename S>
+ static inline const char_string*
+ find_or_insert (line_pool& p, S&& s)
+ {
+ auto i (find (p.strings.begin (), p.strings.end (), s));
+ if (i == p.strings.end ())
+ {
+ p.strings.push_front (forward<S> (s));
+ i = p.strings.begin ();
+ }
+
+ return &*i;
+ }
+
line_char::
line_char (const char_string& s, line_pool& p)
- : line_char (&(*p.strings.emplace (s).first))
+ : line_char (find_or_insert (p, s))
{
}
line_char::
line_char (char_string&& s, line_pool& p)
- : line_char (&(*p.strings.emplace (move (s)).first))
+ : line_char (find_or_insert (p, move (s)))
{
}
diff --git a/libbuild2/script/regex.hxx b/libbuild2/script/regex.hxx
index e043c99..3c49b31 100644
--- a/libbuild2/script/regex.hxx
+++ b/libbuild2/script/regex.hxx
@@ -9,7 +9,6 @@
#include <locale>
#include <string> // basic_string
#include <type_traits> // make_unsigned, enable_if, is_*
-#include <unordered_set>
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
@@ -59,7 +58,12 @@ namespace build2
// Note that we assume the pool can be moved without invalidating
// pointers to any already pooled entities.
//
- std::unordered_set<char_string> strings;
+ // Note that we used to use unordered_set for strings but (1) there is
+ // no general expectation that we will have many identical strings and
+ // (2) the number of strings is not expected to be large. So that felt
+ // like an overkill and we now use a list with linear search.
+ //
+ std::list<char_string> strings;
std::list<char_regex> regexes;
};
@@ -267,8 +271,8 @@ namespace build2
template <typename T>
struct line_char_cmp
: public std::enable_if<std::is_integral<T>::value ||
- (std::is_enum<T>::value &&
- !std::is_same<T, char_flags>::value)> {};
+ (std::is_enum<T>::value &&
+ !std::is_same<T, char_flags>::value)> {};
template <typename T, typename = typename line_char_cmp<T>::type>
bool
@@ -466,10 +470,10 @@ namespace std
is (mask m, char_type c) const
{
return m ==
- (c.type () == line_type::special && c.special () >= 0 &&
- build2::digit (static_cast<char> (c.special ()))
- ? digit
- : 0);
+ (c.type () == line_type::special && c.special () >= 0 &&
+ build2::digit (static_cast<char> (c.special ()))
+ ? digit
+ : 0);
}
const char_type*
diff --git a/libbuild2/script/regex.test.cxx b/libbuild2/script/regex.test.cxx
index 9ec2432..6659d39 100644
--- a/libbuild2/script/regex.test.cxx
+++ b/libbuild2/script/regex.test.cxx
@@ -4,6 +4,9 @@
#include <regex>
#include <type_traits> // is_*
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
#include <libbuild2/script/regex.hxx>
#undef NDEBUG
@@ -15,6 +18,8 @@ using namespace build2::script::regex;
int
main ()
{
+ build2::init_process ();
+
using lc = line_char;
using ls = line_string;
using lr = line_regex;
diff --git a/libbuild2/script/run.cxx b/libbuild2/script/run.cxx
index f3b5cad..6d73a7e 100644
--- a/libbuild2/script/run.cxx
+++ b/libbuild2/script/run.cxx
@@ -9,7 +9,8 @@
# include <libbutl/win32-utility.hxx> // DBG_TERMINATE_PROCESS
#endif
-#include <ios> // streamsize
+#include <ios> // streamsize
+#include <cstring> // strchr()
#include <libbutl/regex.hxx>
#include <libbutl/builtin.hxx>
@@ -26,6 +27,8 @@
using namespace std;
using namespace butl;
+namespace cli = build2::build::cli;
+
namespace build2
{
namespace script
@@ -757,6 +760,31 @@ namespace build2
output_info (d, op);
}
+ // Note that a here-document regex without ':' modifier can never
+ // match an empty output since it always contains the trailing empty
+ // line-char. This can be confusing, as for example while testing a
+ // program which can print some line or nothing with the following
+ // test:
+ //
+ // $* >>~%EOO%
+ // %(
+ // Hello, World!
+ // %)?
+ // EOO
+ //
+ // Note that the above line-regex contains 4 line-chars and will never
+ // match empty output.
+ //
+ // Thus, let's complete an empty output with an empty line-char for
+ // such a regex, so it may potentially match.
+ //
+ if (ls.empty () &&
+ rd.type == redirect_type::here_doc_regex &&
+ rd.modifiers ().find (':') == string::npos)
+ {
+ ls += line_char (string (), regex.pool);
+ }
+
// Match the output with the regex.
//
// Note that we don't distinguish between the line_regex and
@@ -782,7 +810,7 @@ namespace build2
// regex to file for troubleshooting regardless of whether we print
// the diagnostics or not. We, however, register it for cleanup in the
// later case (the expression may still succeed, we can be evaluating
- // the if condition, etc).
+ // the flow control construct condition, etc).
//
optional<path> rp;
if (env.temp_dir_keep)
@@ -944,200 +972,712 @@ namespace build2
: path (c.program.recall_string ());
}
- // The set pseudo-builtin: set variable from the stdin input.
+ // Read the stream content into a string, optionally splitting the input
+ // data at whitespaces or newlines in which case return one, potentially
+ // incomplete, substring at a time (see the set builtin options for the
+ // splitting semantics). Throw io_error on the underlying OS error.
//
- // set [-e|--exact] [(-n|--newline)|(-w|--whitespace)] [<attr>] <var>
+ // On POSIX expects the stream to be non-blocking and its exception mask
+ // to have at least badbit. On Windows can also handle a blocking stream.
//
- static void
- set_builtin (environment& env,
- const strings& args,
- auto_fd in,
+ // Note that on Windows we can only turn pipe file descriptors into the
+ // non-blocking mode. Thus, we have no choice but to read from descriptors
+ // of other types synchronously there. That implies that we can
+ // potentially block indefinitely reading a file and missing a deadline on
+ // Windows. Note though, that the user can normally rewrite the command,
+ // for example, `set foo <<<file` with `cat file | set foo` to avoid this
+ // problem.
+ //
+ class stream_reader
+ {
+ public:
+ stream_reader (ifdstream&, bool whitespace, bool newline, bool exact);
+
+ // Read next substring. Return true if the substring has been read or
+ // false if it should be called again once the stream has more data to
+ // read. Also return true on eof (in which case no substring is read).
+ // The string must be empty on the first call. Throw ios::failure on the
+ // underlying OS error.
+ //
+ // Note that there could still be data to read in the stream's buffer
+ // (as opposed to file descriptor) after this function returns true and
+ // you should be careful not to block on fdselect() in this case. The
+ // recommended usage pattern is similar to that of
+ // butl::getline_non_blocking(). The only difference is that
+ // ifdstream::eof() needs to be used instead of butl::eof() since this
+ // function doesn't set failbit and only sets eofbit after the last
+ // substring is returned.
+ //
+ bool
+ next (string&);
+
+ private:
+ ifdstream& is_;
+ bool whitespace_;
+ bool newline_;
+ bool exact_;
+
+ bool empty_ = true; // Set to false after the first character is read.
+ };
+
+ stream_reader::
+ stream_reader (ifdstream& is, bool ws, bool nl, bool ex)
+ : is_ (is),
+ whitespace_ (ws),
+ newline_ (nl),
+ exact_ (ex)
+ {
+ }
+
+ bool stream_reader::
+ next (string& ss)
+ {
#ifndef _WIN32
- bool,
+ assert ((is_.exceptions () & ifdstream::badbit) != 0 && !is_.blocking ());
#else
- bool pipe,
+ assert ((is_.exceptions () & ifdstream::badbit) != 0);
#endif
- const optional<deadline>& dl,
- const command& deadline_cmd,
- const location& ll)
- {
- try
+
+ fdstreambuf& sb (*static_cast<fdstreambuf*> (is_.rdbuf ()));
+
+ // Return the number of characters available in the stream buffer's get
+ // area, which can be:
+ //
+ // -1 -- EOF.
+ // 0 -- no data since blocked before encountering more data/EOF.
+ // >0 -- there is some data.
+ //
+ // Note that on Windows if the stream is blocking, then the lambda calls
+ // underflow() instead of returning 0.
+ //
+ // @@ Probably we can call underflow() only once per the next() call,
+ // emulating the 'no data' case. This will allow the caller to
+ // perform some housekeeping (reading other streams, checking for the
+ // deadline, etc). But let's keep it simple for now.
+ //
+ auto avail = [&sb] () -> streamsize
{
- // Parse arguments.
+ // Note that here we reasonably assume that any failure in in_avail()
+ // will lead to badbit and thus an exception (see showmanyc()).
//
- cli::vector_scanner scan (args);
- set_options ops (scan);
+ streamsize r (sb.in_avail ());
- if (ops.whitespace () && ops.newline ())
- fail (ll) << "set: both -n|--newline and -w|--whitespace specified";
+#ifdef _WIN32
+ if (r == 0 && sb.blocking ())
+ {
+ if (sb.underflow () == ifdstream::traits_type::eof ())
+ return -1;
- if (!scan.more ())
- fail (ll) << "set: missing variable name";
+ r = sb.in_avail ();
- string a (scan.next ()); // Either attributes or variable name.
- const string* ats (!scan.more () ? nullptr : &a);
- string vname (!scan.more () ? move (a) : scan.next ());
+ assert (r != 0); // We wouldn't be here otherwise.
+ }
+#endif
- if (scan.more ())
- fail (ll) << "set: unexpected argument '" << scan.next () << "'";
+ return r;
+ };
- if (ats != nullptr && ats->empty ())
- fail (ll) << "set: empty variable attributes";
+ // Read until blocked (0), EOF (-1) or encounter the delimiter.
+ //
+ streamsize s;
+ while ((s = avail ()) > 0)
+ {
+ if (empty_)
+ empty_ = false;
- if (vname.empty ())
- fail (ll) << "set: empty variable name";
+ const char* p (sb.gptr ());
+ size_t n (sb.egptr () - p);
- // Read out the stream content into a string while keeping an eye on
- // the deadline. Then parse it according to the split mode.
+ // We move p and bump by the number of consumed characters.
//
- string s;
+ auto bump = [&sb, &p] () {sb.gbump (static_cast<int> (p - sb.gptr ()));};
+
+ if (whitespace_) // The whitespace mode.
{
- ifdstream cin;
+ const char* sep (" \n\r\t");
- // If the execution deadline is specified, then turn the stream into
- // the non-blocking mode reading its content in chunks and with a
- // single operation otherwise. If the specified deadline is reached
- // while reading the stream, then bail out for the successful
- // deadline and fail otherwise. Note that in the former case the
- // variable value will be incomplete, but we leave it to the caller
- // to handle that.
+ // Skip the whitespaces.
//
- // Note that on Windows we can only turn pipe file descriptors into
- // the non-blocking mode. Thus, we have no choice but to read from
- // descriptors of other types synchronously there. That implies that
- // we can potentially block indefinitely reading a file and missing
- // the deadline on Windows. Note though, that the user can always
- // rewrite `set foo <<<file` with `cat file | set foo` to avoid this
- // problem.
+ for (; n != 0 && strchr (sep, *p) != nullptr; ++p, --n) ;
+
+ // If there are any non-whitespace characters in the get area, then
+ // append them to the resulting substring until a whitespace
+ // character is encountered.
//
-#ifndef _WIN32
- if (dl)
-#else
- if (dl && pipe)
-#endif
+ if (n != 0)
{
- fdselect_set fds {in.get ()};
- cin.open (move (in), fdstream_mode::non_blocking);
+ // Append the non-whitespace characters.
+ //
+ for (char c; n != 0 && strchr (sep, c = *p) == nullptr; ++p, --n)
+ ss += c;
- const timestamp& dlt (dl->value);
+ // If a separator is encountered, then consume it, bump, and
+ // return the substring.
+ //
+ if (n != 0)
+ {
+ ++p; --n; // Consume the separator character.
+
+ bump ();
+ return true;
+ }
+
+ // Fall through.
+ }
- for (char buf[4096];; )
+ bump (); // Bump and continue reading.
+ }
+ else // The newline or no-split mode.
+ {
+ // Note that we don't collapse multiple consecutive newlines.
+ //
+ // Note also that we always sanitize CRs, so in the no-split mode we
+ // need to loop rather than consume the whole get area at once.
+ //
+ while (n != 0)
+ {
+ // Append the characters until the newline character or the end of
+ // the get area is encountered.
+ //
+ char c;
+ for (; n != 0 && (c = *p) != '\n'; ++p, --n)
+ ss += c;
+
+ // If the newline character is encountered, then sanitize CRs and
+ // return the substring in the newline mode and continue
+ // parsing/reading otherwise.
+ //
+ if (n != 0)
{
- timestamp now (system_clock::now ());
+ // Strip the trailing CRs that can appear while, for example,
+ // cross-testing Windows target or as a part of msvcrt junk
+ // production (see above).
+ //
+ while (!ss.empty () && ss.back () == '\r')
+ ss.pop_back ();
+
+ assert (c == '\n');
- if (dlt <= now || ifdselect (fds, dlt - now) == 0)
+ ++p; --n; // Consume the newline character.
+
+ if (newline_)
{
- if (!dl->success)
- fail (ll) << cmd_path (deadline_cmd)
- << " terminated: execution timeout expired";
- else
- break;
+ bump ();
+ return true;
}
- streamsize n (cin.readsome (buf, sizeof (buf)));
-
- // Bail out if eos is reached.
- //
- if (n == 0)
- break;
+ ss += c; // Append newline to the resulting string.
- s.append (buf, n);
+ // Fall through.
}
+
+ bump (); // Bump and continue parsing/reading.
}
- else
+ }
+ }
+
+ // Here s can be:
+ //
+ // -1 -- EOF.
+ // 0 -- blocked before encountering delimiter/EOF.
+ //
+ // Note: >0 (encountered the delimiter) case is handled in-place.
+ //
+ assert (s == -1 || s == 0);
+
+ if (s == -1)
+ {
+ // Return the last substring if it is not empty or it is the trailing
+ // "blank" in the exact mode. Otherwise, set eofbit for the stream
+ // indicating that we are done.
+ //
+ if (!ss.empty () || (exact_ && !empty_))
+ {
+ // Also, strip the trailing newline character, if present, in the
+ // no-split no-exact mode.
+ //
+ if (!ss.empty () && ss.back () == '\n' && // Trailing newline.
+ !newline_ && !whitespace_ && !exact_) // No-split no-exact mode.
{
- cin.open (move (in));
- s = cin.read_text ();
+ ss.pop_back ();
}
- cin.close ();
+ exact_ = false; // Make sure we will set eofbit on the next call.
}
+ else
+ is_.setstate (ifdstream::eofbit);
+ }
- // Parse the stream content into the variable value.
+ return s == -1;
+ }
+
+ // Stack-allocated linked list of information about the running pipeline
+ // processes and builtins.
+ //
+ // Note: constructed incrementally.
+ //
+ struct pipe_command
+ {
+ // Initially NULL. Set to the address of the process or builtin object
+ // when it is created. Reset back to NULL when the respective
+ // process/builtin is executed and its exit status is collected (see
+ // complete_pipe() for details).
+ //
+ // We could probably use a union here, but let's keep it simple for now
+ // (at least one is NULL).
+ //
+ process* proc = nullptr;
+ builtin* bltn = nullptr;
+
+ const command& cmd;
+ const cstrings* args = nullptr;
+ const optional<deadline>& dl;
+
+ diag_buffer dbuf;
+
+ bool terminated = false; // True if this command has been terminated.
+
+ // True if this command has been terminated but we failed to read out
+ // its stdout and/or stderr streams in the reasonable timeframe (2
+ // seconds) after the termination.
+ //
+ // Note that this may happen if there is a still running child process
+ // of the terminated command which has inherited the parent's stdout and
+ // stderr file descriptors.
+ //
+ bool unread_stdout = false;
+ bool unread_stderr = false;
+
+ // Only for diagnostics.
+ //
+ const location& loc;
+ const path* isp = nullptr; // stdin cache.
+ const path* osp = nullptr; // stdout cache.
+ const path* esp = nullptr; // stderr cache.
+
+ pipe_command* prev; // NULL for the left-most command.
+ pipe_command* next; // Left-most command for the right-most command.
+
+ pipe_command (context& x,
+ const command& c,
+ const optional<deadline>& d,
+ const location& l,
+ pipe_command* p,
+ pipe_command* f)
+ : cmd (c), dl (d), dbuf (x), loc (l), prev (p), next (f) {}
+ };
+
+ // Wait for a process/builtin to complete until the deadline is reached
+ // and return the underlying wait function result (optional<something>).
+ //
+ template<typename P>
+ static auto
+ timed_wait (P& p, const timestamp& deadline) -> decltype(p.try_wait ())
+ {
+ timestamp now (system_clock::now ());
+ return deadline > now ? p.timed_wait (deadline - now) : p.try_wait ();
+ }
+
+ // Terminate the pipeline processes starting from the specified one and up
+ // to the leftmost one and then kill those which didn't terminate after 2
+ // seconds.
+ //
+ // After that wait for the pipeline builtins completion. Since their
+ // standard streams should no longer be written to or read from by any
+ // process, that shouldn't take long. If, however, they won't be able to
+ // complete in 2 seconds, then some of them have probably stuck while
+ // communicating with a slow filesystem device or similar, and since we
+ // currently have no way to terminate asynchronous builtins, we have no
+ // choice but to abort.
+ //
+ // Issue diagnostics and fail if something goes wrong, but still try to
+ // terminate/kill all the pipe processes.
+ //
+ static void
+ term_pipe (pipe_command* pc, tracer& trace)
+ {
+ auto prog = [] (pipe_command* c) {return cmd_path (c->cmd);};
+
+ // Terminate processes gracefully and set the terminate flag for the
+ // pipe commands.
+ //
+ diag_record dr;
+ for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ {
+ if (process* p = c->proc)
+ try
+ {
+ l5 ([&]{trace (c->loc) << "terminating: " << c->cmd;});
+
+ p->term ();
+ }
+ catch (const process_error& e)
+ {
+ // If unable to terminate the process for any reason (the process is
+ // exiting on Windows, etc) then just ignore this, postponing the
+ // potential failure till the kill() call.
+ //
+ l5 ([&]{trace (c->loc) << "unable to terminate " << prog (c)
+ << ": " << e;});
+ }
+
+ c->terminated = true;
+ }
+
+ // Wait a bit for the processes to terminate and kill the remaining
+ // ones.
+ //
+ timestamp dl (system_clock::now () + chrono::seconds (2));
+
+ for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ {
+ if (process* p = c->proc)
+ try
+ {
+ l5 ([&]{trace (c->loc) << "waiting: " << c->cmd;});
+
+ if (!timed_wait (*p, dl))
+ {
+ l5 ([&]{trace (c->loc) << "killing: " << c->cmd;});
+
+ p->kill ();
+ p->wait ();
+ }
+ }
+ catch (const process_error& e)
+ {
+ dr << fail (c->loc) << "unable to wait/kill " << prog (c) << ": "
+ << e;
+ }
+ }
+
+ // Wait a bit for the builtins to complete and abort if any remain
+ // running.
+ //
+ dl = system_clock::now () + chrono::seconds (2);
+
+ for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ {
+ if (builtin* b = c->bltn)
+ try
+ {
+ l5 ([&]{trace (c->loc) << "waiting: " << c->cmd;});
+
+ if (!timed_wait (*b, dl))
+ {
+ error (c->loc) << prog (c) << " builtin hanged, aborting";
+ terminate (false /* trace */);
+ }
+ }
+ catch (const system_error& e)
+ {
+ dr << fail (c->loc) << "unable to wait for " << prog (c) << ": "
+ << e;
+ }
+ }
+ }
+
+ void
+ read (auto_fd&& in,
+ bool whitespace, bool newline, bool exact,
+ const function<void (string&&)>& cf,
+ pipe_command* pipeline,
+ const optional<deadline>& dl,
+ const location& ll,
+ const char* what)
+ {
+ tracer trace ("script::stream_read");
+
+ // Note: stays blocking on Windows if the descriptor is not of the pipe
+ // type.
+ //
+#ifndef _WIN32
+ fdstream_mode m (fdstream_mode::non_blocking);
+#else
+ fdstream_mode m (pipeline != nullptr
+ ? fdstream_mode::non_blocking
+ : fdstream_mode::blocking);
+#endif
+
+ ifdstream is (move (in), m, ifdstream::badbit);
+ stream_reader sr (is, whitespace, newline, exact);
+
+ fdselect_set fds;
+ for (pipe_command* c (pipeline); c != nullptr; c = c->prev)
+ {
+ diag_buffer& b (c->dbuf);
+
+ if (b.is.is_open ())
+ fds.emplace_back (b.is.fd (), c);
+ }
+
+ fds.emplace_back (is.fd ());
+ fdselect_state& ist (fds.back ());
+ size_t unread (fds.size ());
+
+ optional<timestamp> dlt (dl ? dl->value : optional<timestamp> ());
+
+ // If there are some left-hand side processes/builtins running, then
+ // terminate them and, if there are unread stdout/stderr file
+ // descriptors, then increase the deadline by another 2 seconds and
+ // return true. In this case the term() should be called again upon
+ // reaching the timeout. Otherwise return false. If there are no
+ // left-hand side processes/builtins running, then fail straight away.
+ //
+ // Note that in the former case the further reading will be performed
+ // with the adjusted timeout. We assume that this timeout is normally
+ // sufficient to read out the buffered data written by the already
+ // terminated processes. If, however, that's not the case (see
+ // pipe_command for the possible reasons), then term() needs to be
+ // called for the second time and the reading should be interrupted
+ // afterwards.
+ //
+ auto term = [&dlt, pipeline, &fds, &ist, &is, &unread,
+ &trace, &ll, what, terminated = false] () mutable -> bool
+ {
+ // Can only be called if the deadline is specified.
//
- names ns;
+ assert (dlt);
+
+ if (pipeline == nullptr)
+ fail (ll) << what << " terminated: execution timeout expired";
- if (!s.empty ())
+ if (!terminated)
{
- if (ops.whitespace ()) // The whitespace mode.
+ // Terminate the pipeline and adjust the deadline.
+ //
+
+ // Note that if we are still reading the stream and it's a builtin
+ // stdout, then we need to close it before terminating the pipeline.
+ // Not doing so can result in blocking this builtin on the write
+ // operation and thus aborting the build2 process (see term_pipe()
+ // for details).
+ //
+ // Should we do the same for all the pipeline builtins' stderr
+ // streams? No we don't, since the builtin diagnostics is assumed to
+ // always fit the pipe buffer (see libbutl/builtin.cxx for details).
+ // Thus, we will leave them open to fully read out the diagnostics.
+ //
+ if (ist.fd != nullfd && pipeline->bltn != nullptr)
{
- // Note that we collapse multiple consecutive whitespaces.
- //
- for (size_t p (0); p != string::npos; )
+ try
{
- // Skip the whitespaces.
- //
- const char* sep (" \n\r\t");
- size_t b (s.find_first_not_of (sep, p));
+ is.close ();
+ }
+ catch (const io_error&)
+ {
+ // Not much we can do here.
+ }
- if (b != string::npos) // Word beginning.
- {
- size_t e (s.find_first_of (sep, b)); // Find the word end.
- ns.emplace_back (string (s, b, e != string::npos ? e - b : e));
+ ist.fd = nullfd;
+ --unread;
+ }
- p = e;
- }
- else // Trailings whitespaces.
+ term_pipe (pipeline, trace);
+ terminated = true;
+
+ if (unread != 0)
+ dlt = system_clock::now () + chrono::seconds (2);
+
+ return unread != 0;
+ }
+ else
+ {
+ // Set the unread_{stderr,stdout} flags to true for the commands
+ // whose streams are not fully read yet.
+ //
+
+ // Can only be called after the first call of term() which would
+ // throw failed if pipeline is NULL.
+ //
+ assert (pipeline != nullptr);
+
+ for (fdselect_state& s: fds)
+ {
+ if (s.fd != nullfd)
+ {
+ if (s.data != nullptr) // stderr.
{
- // Append the trailing "blank" after the trailing whitespaces
- // in the exact mode.
- //
- if (ops.exact ())
- ns.emplace_back (empty_string);
+ pipe_command* c (static_cast<pipe_command*> (s.data));
+
+ c->unread_stderr = true;
- // Bail out since the end of the string is reached.
+ // Let's also close the stderr stream not to confuse
+ // diag_buffer::close() with a not fully read stream (eof is
+ // not reached, etc).
//
- break;
+ try
+ {
+ c->dbuf.is.close ();
+ }
+ catch (const io_error&)
+ {
+ // Not much we can do here. Anyway the diagnostics will be
+ // issued by complete_pipe().
+ }
}
+ else // stdout.
+ pipeline->unread_stdout = true;
}
}
- else // The newline or no-split mode.
+
+ return false;
+ }
+ };
+
+ // Note that on Windows if the file descriptor is not a pipe, then
+ // ifdstream assumes the blocking mode for which ifdselect() would throw
+ // invalid_argument. Such a descriptor can, however, only appear for the
+ // first command in the pipeline and so fds will only contain the input
+ // stream's descriptor. That all means that this descriptor will be read
+ // out by a series of the stream_reader::next() calls which can only
+ // return true and thus no ifdselect() calls will ever be made.
+ //
+ string s;
+ while (unread != 0)
+ {
+ // Read any pending data from the input stream.
+ //
+ if (ist.fd != nullfd)
+ {
+ // Prior to reading let's check that the deadline, if specified, is
+ // not reached. This way we handle the (hypothetical) case when we
+ // are continuously fed with the data without delays and thus can
+ // never get to ifdselect() which watches for the deadline. Also
+ // this check is the only way to bail out early on Windows for a
+ // blocking file descriptor.
+ //
+ if (dlt && *dlt <= system_clock::now ())
{
- // Note that we don't collapse multiple consecutive newlines.
- //
- // Note also that we always sanitize CRs so this loop is always
- // needed.
- //
- for (size_t p (0); p != string::npos; )
- {
- size_t e (s.find ('\n', p));
- string l (s, p, e != string::npos ? e - p : e);
+ if (!term ())
+ break;
+ }
- // Strip the trailing CRs that can appear while, for example,
- // cross-testing Windows target or as a part of msvcrt junk
- // production (see above).
+ if (sr.next (s))
+ {
+ if (!is.eof ())
+ {
+ // Consume the substring.
//
- while (!l.empty () && l.back () == '\r')
- l.pop_back ();
+ cf (move (s));
+ s.clear ();
+ }
+ else
+ {
+ ist.fd = nullfd;
+ --unread;
+ }
- // Append the line.
- //
- if (!l.empty () || // Non-empty.
- e != string::npos || // Empty, non-trailing.
- ops.exact ()) // Empty, trailing, in the exact mode.
- {
- if (ops.newline () || ns.empty ())
- ns.emplace_back (move (l));
- else
- {
- ns[0].value += '\n';
- ns[0].value += l;
- }
- }
+ continue;
+ }
+ }
- p = e != string::npos ? e + 1 : e;
+ try
+ {
+ // Wait until the data appear in any of the streams. If a deadline
+ // is specified, then pass the timeout to fdselect().
+ //
+ if (dlt)
+ {
+ timestamp now (system_clock::now ());
+
+ if (*dlt <= now || ifdselect (fds, *dlt - now) == 0)
+ {
+ if (term ())
+ continue;
+ else
+ break;
}
}
+ else
+ ifdselect (fds);
+
+ // Read out the pending data from the stderr streams.
+ //
+ for (fdselect_state& s: fds)
+ {
+ if (s.ready &&
+ s.data != nullptr &&
+ !static_cast<pipe_command*> (s.data)->dbuf.read ())
+ {
+ s.fd = nullfd;
+ --unread;
+ }
+ }
+ }
+ catch (const io_error& e)
+ {
+ fail (ll) << "io error reading pipeline streams: " << e;
+ }
+ }
+ }
+
+ // The set pseudo-builtin: set variable from the stdin input.
+ //
+ // set [-e|--exact] [(-n|--newline)|(-w|--whitespace)] <var> [<attr>]
+ //
+ static void
+ set_builtin (environment& env,
+ const strings& args,
+ auto_fd in,
+ pipe_command* pipeline,
+ const optional<deadline>& dl,
+ const location& ll)
+ {
+ tracer trace ("script::set_builtin");
+
+ try
+ {
+ // Parse arguments.
+ //
+ cli::vector_scanner scan (args);
+ set_options ops (scan);
+
+ if (ops.whitespace () && ops.newline ())
+ fail (ll) << "set: both -n|--newline and -w|--whitespace specified";
+
+ if (!scan.more ())
+ fail (ll) << "set: missing variable name";
+
+ string vname (scan.next ());
+ if (vname.empty ())
+ fail (ll) << "set: empty variable name";
+
+ // Detect patterns analogous to parser::parse_variable_name() (so we
+ // diagnose `set x[string]`).
+ //
+ if (vname.find_first_of ("[*?") != string::npos)
+ fail (ll) << "set: expected variable name instead of " << vname;
+
+ string attrs;
+ if (scan.more ())
+ {
+ attrs = scan.next ();
+
+ if (attrs.empty ())
+ fail (ll) << "set: empty variable attributes";
+
+ if (scan.more ())
+ fail (ll) << "set: unexpected argument '" << scan.next () << "'";
}
- env.set_variable (move (vname),
- move (ns),
- ats != nullptr ? *ats : empty_string,
- ll);
+ // Parse the stream content into the variable value.
+ //
+ names ns;
+
+ read (move (in),
+ ops.whitespace (), ops.newline (), ops.exact (),
+ [&ns] (string&& s) {ns.emplace_back (move (s));},
+ pipeline,
+ dl,
+ ll,
+ "set");
+
+ env.set_variable (move (vname), move (ns), attrs, ll);
}
catch (const io_error& e)
{
- fail (ll) << "set: " << e;
+ fail (ll) << "set: unable to read from stdin: " << e;
}
catch (const cli::exception& e)
{
@@ -1160,56 +1700,50 @@ namespace build2
name);
}
- // Stack-allocated linked list of information about the running pipeline
- // processes and builtins.
- //
- struct pipe_command
- {
- // We could probably use a union here, but let's keep it simple for now
- // (one is NULL).
- //
- process* proc;
- builtin* bltn;
-
- // True if this command has been terminated.
- //
- bool terminated = false;
-
- // Only for diagnostics.
- //
- const command& cmd;
- const location& loc;
-
- pipe_command* prev; // NULL for the left-most command.
-
- pipe_command (process& p,
- const command& c,
- const location& l,
- pipe_command* v)
- : proc (&p), bltn (nullptr), cmd (c), loc (l), prev (v) {}
-
- pipe_command (builtin& b,
- const command& c,
- const location& l,
- pipe_command* v)
- : proc (nullptr), bltn (&b), cmd (c), loc (l), prev (v) {}
- };
-
static bool
run_pipe (environment& env,
command_pipe::const_iterator bc,
command_pipe::const_iterator ec,
auto_fd ifd,
- size_t ci, size_t li, const location& ll,
+ const iteration_index* ii, size_t li, size_t ci,
+ const location& ll,
bool diag,
+ const function<command_function>& cf, bool last_cmd,
optional<deadline> dl = nullopt,
- const command* dl_cmd = nullptr, // env -t <cmd>
pipe_command* prev_cmd = nullptr)
{
tracer trace ("script::run_pipe");
- if (bc == ec) // End of the pipeline.
+ // At the end of the pipeline read out its stdout, if requested.
+ //
+ if (bc == ec)
+ {
+ if (cf != nullptr)
+ {
+ assert (!last_cmd); // Otherwise we wouldn't be here.
+
+ // The pipeline can't be empty.
+ //
+ assert (ifd != nullfd && prev_cmd != nullptr);
+
+ const command& c (prev_cmd->cmd);
+
+ try
+ {
+ cf (env, strings () /* arguments */,
+ move (ifd), prev_cmd,
+ dl,
+ ll);
+ }
+ catch (const io_error& e)
+ {
+ fail (ll) << "unable to read from " << cmd_path (c) << " stdout: "
+ << e;
+ }
+ }
+
return true;
+ }
// The overall plan is to run the first command in the pipe, reading its
// input from the file descriptor passed (or, for the first command,
@@ -1261,6 +1795,12 @@ namespace build2
command_pipe::const_iterator nc (bc + 1);
bool last (nc == ec);
+ // Make sure that stdout is not redirected if meant to be read (last_cmd
+ // is false) or cannot not be produced (last_cmd is true).
+ //
+ if (last && c.out && cf != nullptr)
+ fail (ll) << "stdout cannot be redirected";
+
// True if the process path is not pre-searched and the program path
// still needs to be resolved.
//
@@ -1272,7 +1812,7 @@ namespace build2
const redirect& in ((c.in ? *c.in : env.in).effective ());
- const redirect* out (!last
+ const redirect* out (!last || (cf != nullptr && !last_cmd)
? nullptr // stdout is piped.
: &(c.out ? *c.out : env.out).effective ());
@@ -1280,13 +1820,7 @@ namespace build2
auto process_args = [&c] () -> cstrings
{
- cstrings args {c.program.recall_string ()};
-
- for (const auto& a: c.arguments)
- args.push_back (a.c_str ());
-
- args.push_back (nullptr);
- return args;
+ return build2::process_args (c.program.recall_string (), c.arguments);
};
// Prior to opening file descriptors for command input/output redirects
@@ -1309,14 +1843,29 @@ namespace build2
// content), to make sure that the command doesn't print any unwanted
// diagnostics about IO operation failure.
//
- // Note though, that doing so would be a bad idea if the deadline is
- // specified, since we can block on read and miss the deadline.
- //
- if (!dl)
+ if (ifd != nullfd)
{
- // Note that dtor will ignore any errors (which is what we want).
+ // Note that we can't use ifdstream dtor in the skip mode here since
+ // it turns the stream into the blocking mode and we won't be able
+ // to read out the potentially buffered stderr for the
+ // pipeline. Using read() is also not ideal since it performs
+ // parsing and allocations needlessly. This, however, is probably ok
+ // for such an uncommon case.
+ //
+ //ifdstream (move (ifd), fdstream_mode::skip);
+
+ // Let's try to minimize the allocation size splitting the input
+ // data at whitespaces.
//
- ifdstream (move (ifd), fdstream_mode::skip);
+ read (move (ifd),
+ true /* whitespace */,
+ false /* newline */,
+ false /* exact */,
+ [] (string&&) {}, // Just drop the string.
+ prev_cmd,
+ dl,
+ ll,
+ program.c_str ());
}
if (!first || !last)
@@ -1340,6 +1889,9 @@ namespace build2
if (c.out)
fail (ll) << program << " builtin stdout cannot be redirected";
+ if (cf != nullptr && !last_cmd)
+ fail (ll) << program << " builtin stdout cannot be read";
+
if (c.err)
fail (ll) << program << " builtin stderr cannot be redirected";
@@ -1369,17 +1921,29 @@ namespace build2
// Create a unique path for a command standard stream cache file.
//
- auto std_path = [&env, &ci, &li, &ll] (const char* n) -> path
+ auto std_path = [&env, ii, &li, &ci, &ll] (const char* nm) -> path
{
using std::to_string;
- path p (n);
+ string s (nm);
+ size_t n (s.size ());
+
+ if (ii != nullptr)
+ {
+ // Note: reverse order (outermost to innermost).
+ //
+ for (const iteration_index* i (ii); i != nullptr; i = i->prev)
+ s.insert (n, "-i" + to_string (i->index));
+ }
// 0 if belongs to a single-line script, otherwise is the command line
// number (start from one) in the script.
//
- if (li > 0)
- p += "-" + to_string (li);
+ if (li != 0)
+ {
+ s += "-n";
+ s += to_string (li);
+ }
// 0 if belongs to a single-command expression, otherwise is the
// command number (start from one) in the expression.
@@ -1388,10 +1952,13 @@ namespace build2
// single-line script or to N-th single-command line of multi-line
// script. These cases are mutually exclusive and so are unambiguous.
//
- if (ci > 0)
- p += "-" + to_string (ci);
+ if (ci != 0)
+ {
+ s += "-c";
+ s += to_string (ci);
+ }
- return normalize (move (p), temp_dir (env), ll);
+ return normalize (path (move (s)), temp_dir (env), ll);
};
// If this is the first pipeline command, then open stdin descriptor
@@ -1496,8 +2063,7 @@ namespace build2
// Calculate the process/builtin execution deadline. Note that we should
// also consider the left-hand side processes deadlines, not to keep
// them waiting for us and allow them to terminate not later than their
- // deadlines. Thus, let's also track which command has introduced the
- // deadline, so we can report it if the deadline is missed.
+ // deadlines.
//
dl = earlier (dl, env.effective_deadline ());
@@ -1505,10 +2071,7 @@ namespace build2
{
deadline d (system_clock::now () + *c.timeout, false /* success */);
if (!dl || d < *dl)
- {
dl = d;
- dl_cmd = &c;
- }
}
// Prior to opening file descriptors for command outputs redirects
@@ -1529,6 +2092,9 @@ namespace build2
if (c.out)
fail (ll) << "set builtin stdout cannot be redirected";
+ if (cf != nullptr && !last_cmd)
+ fail (ll) << "set builtin stdout cannot be read";
+
if (c.err)
fail (ll) << "set builtin stderr cannot be redirected";
@@ -1538,14 +2104,54 @@ namespace build2
if (verb >= 2)
print_process (process_args ());
- set_builtin (env, c.arguments,
- move (ifd), !first,
- dl, dl_cmd != nullptr ? *dl_cmd : c,
- ll);
+ set_builtin (env, c.arguments, move (ifd), prev_cmd, dl, ll);
+ return true;
+ }
+
+ // If this is the last command in the pipe and the command function is
+ // specified for it, then call it.
+ //
+ if (last && cf != nullptr && last_cmd)
+ {
+ // Must be enforced by the caller.
+ //
+ assert (!c.out && !c.err && !c.exit);
+
+ try
+ {
+ cf (env, c.arguments, move (ifd), prev_cmd, dl, ll);
+ }
+ catch (const io_error& e)
+ {
+ diag_record dr (fail (ll));
+
+ dr << cmd_path (c) << ": unable to read from ";
+
+ if (prev_cmd != nullptr)
+ dr << cmd_path (prev_cmd->cmd) << " output";
+ else
+ dr << "stdin";
+
+ dr << ": " << e;
+ }
return true;
}
+ // Propagate the pointer to the left-most command.
+ //
+ pipe_command pc (env.context,
+ c,
+ dl,
+ ll,
+ prev_cmd,
+ prev_cmd != nullptr ? prev_cmd->next : nullptr);
+
+ if (prev_cmd != nullptr)
+ prev_cmd->next = &pc;
+ else
+ pc.next = &pc; // Points to itself.
+
// Open a file for command output redirect if requested explicitly
// (file overwrite/append redirects) or for the purpose of the output
// validation (none, here_*, file comparison redirects), register the
@@ -1555,9 +2161,9 @@ namespace build2
// or null-device descriptor for merge, pass or null redirects
// respectively (not opening any file).
//
- auto open = [&env, &wdir, &ll, &std_path] (const redirect& r,
- int dfd,
- path& p) -> auto_fd
+ auto open = [&env, &wdir, &ll, &std_path, &c, &pc] (const redirect& r,
+ int dfd,
+ path& p) -> auto_fd
{
assert (dfd == 1 || dfd == 2);
const char* what (dfd == 1 ? "stdout" : "stderr");
@@ -1575,11 +2181,34 @@ namespace build2
{
try
{
+ if (dfd == 2) // stderr?
+ {
+ fdpipe p;
+ if (diag_buffer::pipe (env.context) == -1) // Are we buffering?
+ p = fdopen_pipe ();
+
+ // Deduce the args0 argument similar to cmd_path().
+ //
+ // Note that we must open the diag buffer regardless of the
+ // diag_buffer::pipe() result.
+ //
+ pc.dbuf.open ((c.program.initial == nullptr
+ ? c.program.recall.string ().c_str ()
+ : c.program.recall_string ()),
+ move (p.in),
+ fdstream_mode::non_blocking);
+
+ if (p.out != nullfd)
+ return move (p.out);
+
+ // Fall through.
+ }
+
return fddup (dfd);
}
catch (const io_error& e)
{
- fail (ll) << "unable to duplicate " << what << ": " << e;
+ fail (ll) << "unable to redirect " << what << ": " << e;
}
}
@@ -1661,7 +2290,7 @@ namespace build2
// script failures investigation and, for example, for validation
// "tightening".
//
- if (last)
+ if (last && out != nullptr)
ofd.out = open (*out, 1, osp);
else
{
@@ -1690,7 +2319,7 @@ namespace build2
fail (ll) << "stdout and stderr redirected to each other";
auto_fd& self (mo ? ofd.out : efd);
- auto_fd& other (mo ? efd : ofd.out);
+ auto_fd& other (mo ? efd : ofd.out);
try
{
@@ -1704,115 +2333,390 @@ namespace build2
}
}
- // All descriptors should be open to the date.
+ // By now all descriptors should be open.
//
- assert (ofd.out.get () != -1 && efd.get () != -1);
+ assert (ofd.out != nullfd && efd != nullfd);
- // Wait for a process/builtin to complete until the deadline is reached
- // and return the underlying wait function result (optional<something>).
- //
- auto timed_wait = [] (auto& p, const timestamp& deadline)
- {
- timestamp now (system_clock::now ());
- return deadline > now ? p.timed_wait (deadline - now) : p.try_wait ();
- };
+ pc.isp = &isp;
+ pc.osp = &osp;
+ pc.esp = &esp;
- // Terminate the pipeline processes starting from the specified one and
- // up to the leftmost one and then kill those which didn't terminate
- // after 2 seconds.
+ // Read out all the pipeline's buffered strerr streams watching for the
+ // deadline, if specified. If the deadline is reached, then terminate
+ // the whole pipeline, move the deadline by another 2 seconds, and
+ // continue reading.
//
- // After that wait for the pipeline builtins completion. Since their
- // standard streams should no longer be written to or read from by any
- // process, that shouldn't take long. If, however, they won't be able to
- // complete in 2 seconds, then some of them have probably stuck while
- // communicating with a slow filesystem device or similar, and since we
- // currently have no way to terminate asynchronous builtins, we have no
- // choice but to abort.
+ // Note that we assume that this timeout increment is normally
+ // sufficient to read out the buffered data written by the already
+ // terminated processes. If, however, that's not the case (see
+ // pipe_command for the possible reasons), then we just set
+ // unread_stderr flag to true for such commands and bail out.
//
- // Issue diagnostics and fail if something goes wrong, but still try to
- // terminate/kill all the pipe processes.
+ // Also note that this is a reduced version of the above read() function.
//
- auto term_pipe = [&timed_wait, &trace] (pipe_command* pc)
+ auto read_pipe = [&pc, &ll, &trace] ()
{
- diag_record dr;
+ fdselect_set fds;
+ for (pipe_command* c (&pc); c != nullptr; c = c->prev)
+ {
+ diag_buffer& b (c->dbuf);
- auto prog = [] (pipe_command* c) {return cmd_path (c->cmd);};
+ if (b.is.is_open ())
+ fds.emplace_back (b.is.fd (), c);
+ }
- // Terminate processes gracefully and set the terminate flag for the
- // pipe commands.
+ // Note that the current command deadline is the earliest (see above).
//
- for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ optional<timestamp> dlt (pc.dl ? pc.dl->value : optional<timestamp> ());
+
+ bool terminated (false);
+
+ for (size_t unread (fds.size ()); unread != 0;)
{
- if (process* p = c->proc)
try
{
- l5 ([&]{trace (c->loc) << "terminating: " << c->cmd;});
+ // If a deadline is specified, then pass the timeout to fdselect().
+ //
+ if (dlt)
+ {
+ timestamp now (system_clock::now ());
- p->term ();
+ if (*dlt <= now || ifdselect (fds, *dlt - now) == 0)
+ {
+ if (!terminated)
+ {
+ term_pipe (&pc, trace);
+ terminated = true;
+
+ dlt = system_clock::now () + chrono::seconds (2);
+ continue;
+ }
+ else
+ {
+ for (fdselect_state& s: fds)
+ {
+ if (s.fd != nullfd)
+ {
+ pipe_command* c (static_cast<pipe_command*> (s.data));
+
+ c->unread_stderr = true;
+
+ // Let's also close the stderr stream not to confuse
+ // diag_buffer::close() (see read() for details).
+ //
+ try
+ {
+ c->dbuf.is.close ();
+ }
+ catch (const io_error&) {}
+ }
+ }
+
+ break;
+ }
+ }
+ }
+ else
+ ifdselect (fds);
+
+ for (fdselect_state& s: fds)
+ {
+ if (s.ready &&
+ !static_cast<pipe_command*> (s.data)->dbuf.read ())
+ {
+ s.fd = nullfd;
+ --unread;
+ }
+ }
}
- catch (const process_error& e)
+ catch (const io_error& e)
{
- // If unable to terminate the process for any reason (the process
- // is exiting on Windows, etc) then just ignore this, postponing
- // the potential failure till the kill() call.
- //
- l5 ([&]{trace (c->loc) <<"unable to terminate " << prog (c)
- << ": " << e;});
+ fail (ll) << "io error reading pipeline streams: " << e;
}
-
- c->terminated = true;
}
+ };
- // Wait a bit for the processes to terminate and kill the remaining
- // ones.
- //
- timestamp dl (system_clock::now () + chrono::seconds (2));
-
- for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ // Wait for the pipeline processes and builtins to complete, watching
+ // for their deadlines if present. If a deadline is reached for any of
+ // them, then terminate the whole pipeline.
+ //
+ // Note: must be called after read_pipe().
+ //
+ auto wait_pipe = [&pc, &dl, &trace] ()
+ {
+ for (pipe_command* c (&pc); c != nullptr; c = c->prev)
{
- if (process* p = c->proc)
try
{
- l5 ([&]{trace (c->loc) << "waiting: " << c->cmd;});
-
- if (!timed_wait (*p, dl))
+ if (process* p = c->proc)
+ {
+ if (!dl)
+ p->wait ();
+ else if (!timed_wait (*p, dl->value))
+ term_pipe (c, trace);
+ }
+ else
{
- l5 ([&]{trace (c->loc) << "killing: " << c->cmd;});
+ builtin* b (c->bltn);
- p->kill ();
- p->wait ();
+ if (!dl)
+ b->wait ();
+ else if (!timed_wait (*b, dl->value))
+ term_pipe (c, trace);
}
}
catch (const process_error& e)
{
- dr << fail (c->loc) << "unable to wait/kill " << prog (c) << ": "
- << e;
+ fail (c->loc) << "unable to wait " << cmd_path (c->cmd) << ": "
+ << e;
}
}
+ };
- // Wait a bit for the builtins to complete and abort if any remain
- // running.
- //
- dl = system_clock::now () + chrono::seconds (2);
+ // Iterate over the pipeline processes and builtins left to right,
+ // printing their stderr if buffered and issuing the diagnostics if the
+ // exit code is not available (terminated abnormally or due to a
+ // deadline), is unexpected, or stdout and/or stderr was not fully
+ // read. Throw failed at the end if the exit code for any of them is not
+ // available or stdout and/or stderr was not fully read. Return false if
+ // exit code for any of them is unexpected (the return is used, for
+ // example, in the if-conditions).
+ //
+ // Note: must be called after wait_pipe() and only once.
+ //
+ auto complete_pipe = [&pc, &env, diag] ()
+ {
+ bool r (true);
+ bool fail (false);
- for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ pipe_command* c (pc.next); // Left-most command.
+ assert (c != nullptr); // Since the lambda must be called once.
+
+ for (pc.next = nullptr; c != nullptr; c = c->next)
{
- if (builtin* b = c->bltn)
- try
+ // Collect the exit status, if present.
+ //
+ // Absent if the process/builtin misses the "unsuccessful" deadline.
+ //
+ optional<process_exit> exit;
+
+ const char* w (c->bltn != nullptr ? "builtin" : "process");
+
+ if (c->bltn != nullptr)
+ {
+ // Note that this also handles ad hoc termination (without the
+ // call to term_pipe()) by the sleep builtin.
+ //
+ if (c->terminated)
+ {
+ if (c->dl && c->dl->success)
+ exit = process_exit (0);
+ }
+ else
+ exit = process_exit (c->bltn->wait ());
+
+ c->bltn = nullptr;
+ }
+ else if (c->proc != nullptr)
{
- l5 ([&]{trace (c->loc) << "waiting: " << c->cmd;});
+ const process& pr (*c->proc);
- if (!timed_wait (*b, dl))
+#ifndef _WIN32
+ if (c->terminated &&
+ !pr.exit->normal () &&
+ pr.exit->signal () == SIGTERM)
+#else
+ if (c->terminated &&
+ !pr.exit->normal () &&
+ pr.exit->status == DBG_TERMINATE_PROCESS)
+#endif
{
- error (c->loc) << prog (c) << " builtin hanged, aborting";
- terminate (false /* trace */);
+ if (c->dl && c->dl->success)
+ exit = process_exit (0);
}
+ else
+ exit = pr.exit;
+
+ c->proc = nullptr;
}
- catch (const system_error& e)
+ else
+ assert (false); // The lambda can only be called once.
+
+ const command& cmd (c->cmd);
+ const location& ll (c->loc);
+
+ // Verify the exit status and issue the diagnostics on failure.
+ //
+ diag_record dr;
+
+ path pr (cmd_path (cmd));
+
+ // Print the diagnostics if the command stdout and/or stderr are not
+ // fully read.
+ //
+ auto unread_output_diag = [&dr, c, w, &pr] (bool main_error)
+ {
+ if (main_error)
+ dr << error (c->loc) << w << ' ' << pr << ' ';
+ else
+ dr << error;
+
+ if (c->unread_stdout)
+ {
+ dr << "stdout ";
+
+ if (c->unread_stderr)
+ dr << "and ";
+ }
+
+ if (c->unread_stderr)
+ dr << "stderr ";
+
+ dr << "not closed after exit";
+ };
+
+ // Fail if the process is terminated due to reaching the deadline.
+ //
+ if (!exit)
+ {
+ dr << error (ll) << w << ' ' << pr
+ << " terminated: execution timeout expired";
+
+ if (c->unread_stdout || c->unread_stderr)
+ unread_output_diag (false /* main_error */);
+
+ if (verb == 1)
+ {
+ dr << info << "command line: ";
+ print_process (dr, *c->args);
+ }
+
+ fail = true;
+ }
+ else
{
- dr << fail (c->loc) << "unable to wait for " << prog (c) << ": "
- << e;
+ // If there is no valid exit code available by whatever reason
+ // then we print the proper diagnostics, dump stderr (if cached
+ // and not too large) and fail the whole script. Otherwise if the
+ // exit code is not correct then we print diagnostics if requested
+ // and fail the pipeline.
+ //
+ bool valid (exit->normal ());
+
+ // On Windows the exit code can be out of the valid codes range
+ // being defined as uint16_t.
+ //
+#ifdef _WIN32
+ if (valid)
+ valid = exit->code () < 256;
+#endif
+
+ // In the presense of a valid exit code and given stdout and
+ // stderr are fully read out we print the diagnostics and return
+ // false rather than throw.
+ //
+ // Note that there can be a race, so that the process we have
+ // terminated due to reaching the deadline has in fact exited
+ // normally. Thus, the 'unread stderr' situation can also happen
+ // to a successfully terminated process. If that's the case, we
+ // report this problem as the main error and the secondary error
+ // otherwise.
+ //
+ if (!valid || c->unread_stdout || c->unread_stderr)
+ fail = true;
+
+ exit_comparison cmp (cmd.exit
+ ? cmd.exit->comparison
+ : exit_comparison::eq);
+
+ uint16_t exc (cmd.exit ? cmd.exit->code : 0);
+
+ bool success (valid &&
+ (cmp == exit_comparison::eq) ==
+ (exc == exit->code ()));
+
+ if (!success)
+ r = false;
+
+ if (!valid || (!success && diag))
+ {
+ dr << error (ll) << w << ' ' << pr << ' ';
+
+ if (!exit->normal ())
+ dr << *exit;
+ else
+ {
+ uint16_t ec (exit->code ()); // Make sure printed as integer.
+
+ if (!valid)
+ {
+ dr << "exit code " << ec << " out of 0-255 range";
+ }
+ else
+ {
+ if (cmd.exit)
+ dr << "exit code " << ec
+ << (cmp == exit_comparison::eq ? " != " : " == ")
+ << exc;
+ else
+ dr << "exited with code " << ec;
+ }
+ }
+
+ if (c->unread_stdout || c->unread_stderr)
+ unread_output_diag (false /* main_error */);
+
+ if (verb == 1)
+ {
+ dr << info << "command line: ";
+ print_process (dr, *c->args);
+ }
+
+ if (non_empty (*c->esp, ll) && avail_on_failure (*c->esp, env))
+ dr << info << "stderr: " << *c->esp;
+
+ if (non_empty (*c->osp, ll) && avail_on_failure (*c->osp, env))
+ dr << info << "stdout: " << *c->osp;
+
+ if (non_empty (*c->isp, ll) && avail_on_failure (*c->isp, env))
+ dr << info << "stdin: " << *c->isp;
+
+ // Print cached stderr.
+ //
+ print_file (dr, *c->esp, ll);
+ }
+ else if (c->unread_stdout || c->unread_stderr)
+ unread_output_diag (true /* main_error */);
}
+
+ // Now print the buffered stderr, if present, and/or flush the
+ // diagnostics, if issued.
+ //
+ if (c->dbuf.is_open ())
+ c->dbuf.close (move (dr));
+ }
+
+ // Fail if required.
+ //
+ if (fail)
+ throw failed ();
+
+ return r;
+ };
+
+ // Close all buffered pipeline stderr streams ignoring io_error
+ // exceptions.
+ //
+ auto close_pipe = [&pc] ()
+ {
+ for (pipe_command* c (&pc); c != nullptr; c = c->prev)
+ {
+ if (c->dbuf.is.is_open ())
+ try
+ {
+ c->dbuf.is.close();
+ }
+ catch (const io_error&) {}
}
};
@@ -1838,9 +2742,8 @@ namespace build2
fail (ll) << "specified working directory " << cwd
<< " does not exist";
- // Absent if the process/builtin misses the "unsuccessful" deadline.
- //
- optional<process_exit> exit;
+ cstrings args (process_args ());
+ pc.args = &args;
const builtin_info* bi (resolve ? builtins.find (program) : nullptr);
@@ -1850,8 +2753,11 @@ namespace build2
{
// Execute the builtin.
//
- if (verb >= 2)
- print_process (process_args ());
+ // Don't print the true and false builtins, since they are normally
+ // used for the commands execution flow control.
+ //
+ if (verb >= 2 && program != "true" && program != "false")
+ print_process (args);
// Some of the script builtins (cp, mkdir, etc) extend libbutl
// builtins (via callbacks) registering/moving cleanups for the
@@ -1892,18 +2798,6 @@ namespace build2
// We also extend the sleep builtin, deactivating the thread before
// going to sleep and waking up before the deadline is reached.
//
- // Let's "wrap up" the sleep-related values into the single object to
- // rely on "small function object" optimization.
- //
- struct sleep
- {
- optional<timestamp> deadline;
- bool terminated = false;
-
- sleep (const optional<timestamp>& d): deadline (d) {}
- };
- sleep slp (dl ? dl->value : optional<timestamp> ());
-
builtin_callbacks bcs {
// create
@@ -2065,16 +2959,19 @@ namespace build2
// sleep
//
- [&env, &slp] (const duration& d)
+ [&env, &pc] (const duration& d)
{
duration t (d);
- const optional<timestamp>& dl (slp.deadline);
+ const optional<timestamp>& dl (pc.dl
+ ? pc.dl->value
+ : optional<timestamp> ());
if (dl)
{
timestamp now (system_clock::now ());
- slp.terminated = now + t > *dl;
+ if (now + t > *dl)
+ pc.terminated = true;
if (*dl <= now)
return;
@@ -2087,7 +2984,7 @@ namespace build2
// If/when required we could probably support the precise sleep
// mode (e.g., via an option).
//
- env.context.sched.sleep (t);
+ env.context.sched->sleep (t);
}
};
@@ -2099,19 +2996,19 @@ namespace build2
move (ifd), move (ofd.out), move (efd),
cwd,
bcs));
+ pc.bltn = &b;
- pipe_command pc (b, c, ll, prev_cmd);
-
- // If the deadline is specified, then make sure we don't miss it
- // waiting indefinitely in the builtin destructor on the right-hand
- // side of the pipe failure.
+ // If the right-hand part of the pipe fails, then make sure we don't
+ // wait indefinitely in the process destructor if the deadlines are
+ // specified or just because a process is blocked on stderr.
//
- auto g (make_exception_guard ([&dl, &pc, &term_pipe] ()
+ auto g (make_exception_guard ([&pc, &close_pipe, &trace] ()
{
- if (dl)
+ if (pc.bltn != nullptr)
try
{
- term_pipe (&pc);
+ close_pipe ();
+ term_pipe (&pc, trace);
}
catch (const failed&)
{
@@ -2122,27 +3019,21 @@ namespace build2
success = run_pipe (env,
nc, ec,
move (ofd.in),
- ci + 1, li, ll, diag,
- dl, dl_cmd,
+ ii, li, ci + 1, ll, diag,
+ cf, last_cmd,
+ dl,
&pc);
- if (!dl)
- b.wait ();
- else if (!timed_wait (b, dl->value))
- term_pipe (&pc);
-
- // Note that this also handles ad hoc termination (without the call
- // to term_pipe()) by the sleep builtin (see above).
+ // Complete the pipeline execution, if not done yet.
//
- if (pc.terminated || slp.terminated)
+ if (pc.bltn != nullptr)
{
- assert (dl);
+ read_pipe ();
+ wait_pipe ();
- if (dl->success)
- exit = process_exit (0);
+ if (!complete_pipe ())
+ success = false;
}
- else
- exit = process_exit (r);
}
catch (const system_error& e)
{
@@ -2154,8 +3045,6 @@ namespace build2
{
// Execute the process.
//
- cstrings args (process_args ());
-
// If the process path is not pre-searched then resolve the relative
// non-simple program path against the script's working directory. The
// simple one will be left for the process path search machinery. Also
@@ -2213,10 +3102,16 @@ namespace build2
if (verb >= 2)
print_process (pe, args);
+ // Note that stderr can only be a pipe if we are buffering the
+ // diagnostics. In this case also pass the reading end so it can be
+ // "probed" on Windows (see butl::process::pipe for details).
+ //
process pr (
*pe.path,
args.data (),
- {ifd.get (), -1}, process::pipe (ofd), {-1, efd.get ()},
+ {ifd.get (), -1},
+ process::pipe (ofd),
+ {pc.dbuf.is.fd (), efd.get ()},
cwd.string ().c_str (),
pe.vars);
@@ -2226,18 +3121,19 @@ namespace build2
ofd.out.reset ();
efd.reset ();
- pipe_command pc (pr, c, ll, prev_cmd);
+ pc.proc = &pr;
- // If the deadline is specified, then make sure we don't miss it
- // waiting indefinitely in the process destructor on the right-hand
- // part of the pipe failure.
+ // If the right-hand part of the pipe fails, then make sure we don't
+ // wait indefinitely in the process destructor (see above for
+ // details).
//
- auto g (make_exception_guard ([&dl, &pc, &term_pipe] ()
+ auto g (make_exception_guard ([&pc, &close_pipe, &trace] ()
{
- if (dl)
+ if (pc.proc != nullptr)
try
{
- term_pipe (&pc);
+ close_pipe ();
+ term_pipe (&pc, trace);
}
catch (const failed&)
{
@@ -2248,32 +3144,21 @@ namespace build2
success = run_pipe (env,
nc, ec,
move (ofd.in),
- ci + 1, li, ll, diag,
- dl, dl_cmd,
+ ii, li, ci + 1, ll, diag,
+ cf, last_cmd,
+ dl,
&pc);
- if (!dl)
- pr.wait ();
- else if (!timed_wait (pr, dl->value))
- term_pipe (&pc);
-
-#ifndef _WIN32
- if (pc.terminated &&
- !pr.exit->normal () &&
- pr.exit->signal () == SIGTERM)
-#else
- if (pc.terminated &&
- !pr.exit->normal () &&
- pr.exit->status == DBG_TERMINATE_PROCESS)
-#endif
+ // Complete the pipeline execution, if not done yet.
+ //
+ if (pc.proc != nullptr)
{
- assert (dl);
+ read_pipe ();
+ wait_pipe ();
- if (dl->success)
- exit = process_exit (0);
+ if (!complete_pipe ())
+ success = false;
}
- else
- exit = pr.exit;
}
catch (const process_error& e)
{
@@ -2286,98 +3171,23 @@ namespace build2
}
}
- // If the righ-hand side pipeline failed than the whole pipeline fails,
- // and no further checks are required.
- //
- if (!success)
- return false;
-
- // Fail if the process is terminated due to reaching the deadline.
- //
- if (!exit)
- fail (ll) << cmd_path (dl_cmd != nullptr ? *dl_cmd : c)
- << " terminated: execution timeout expired";
-
- path pr (cmd_path (c));
-
- // If there is no valid exit code available by whatever reason then we
- // print the proper diagnostics, dump stderr (if cached and not too
- // large) and fail the whole script. Otherwise if the exit code is not
- // correct then we print diagnostics if requested and fail the pipeline.
- //
- bool valid (exit->normal ());
-
- // On Windows the exit code can be out of the valid codes range being
- // defined as uint16_t.
- //
-#ifdef _WIN32
- if (valid)
- valid = exit->code () < 256;
-#endif
-
- exit_comparison cmp (c.exit ? c.exit->comparison : exit_comparison::eq);
- uint16_t exc (c.exit ? c.exit->code : 0);
-
- success = valid &&
- (cmp == exit_comparison::eq) == (exc == exit->code ());
-
- if (!valid || (!success && diag))
- {
- // In the presense of a valid exit code we print the diagnostics and
- // return false rather than throw.
- //
- diag_record d (valid ? error (ll) : fail (ll));
-
- if (!exit->normal ())
- d << pr << " " << *exit;
- else
- {
- uint16_t ec (exit->code ()); // Make sure is printed as integer.
-
- if (!valid)
- d << pr << " exit code " << ec << " out of 0-255 range";
- else if (!success)
- {
- if (diag)
- {
- if (c.exit)
- d << pr << " exit code " << ec
- << (cmp == exit_comparison::eq ? " != " : " == ") << exc;
- else
- d << pr << " exited with code " << ec;
- }
- }
- else
- assert (false);
- }
-
- if (non_empty (esp, ll) && avail_on_failure (esp, env))
- d << info << "stderr: " << esp;
-
- if (non_empty (osp, ll) && avail_on_failure (osp, env))
- d << info << "stdout: " << osp;
-
- if (non_empty (isp, ll) && avail_on_failure (isp, env))
- d << info << "stdin: " << isp;
-
- // Print cached stderr.
- //
- print_file (d, esp, ll);
- }
-
- // If exit code is correct then check if the standard outputs match the
- // expectations. Note that stdout is only redirected to file for the
- // last command in the pipeline.
+ // If the pipeline or the righ-hand side outputs check failed, then no
+ // further checks are required. Otherwise, check if the standard outputs
+ // match the expectations. Note that stdout can only be redirected to
+ // file for the last command in the pipeline.
//
// The thinking behind matching stderr first is that if it mismatches,
// then the program probably misbehaves (executes wrong functionality,
// etc) in which case its stdout doesn't really matter.
//
if (success)
- success =
- check_output (pr, esp, isp, err, ll, env, diag, "stderr") &&
- (!last ||
- check_output (pr, osp, isp, *out, ll, env, diag, "stdout"));
+ {
+ path pr (cmd_path (c));
+
+ success = check_output (pr, esp, isp, err, ll, env, diag, "stderr") &&
+ (out == nullptr ||
+ check_output (pr, osp, isp, *out, ll, env, diag, "stdout"));
+ }
return success;
}
@@ -2385,8 +3195,10 @@ namespace build2
static bool
run_expr (environment& env,
const command_expr& expr,
- size_t li, const location& ll,
- bool diag)
+ const iteration_index* ii, size_t li,
+ const location& ll,
+ bool diag,
+ const function<command_function>& cf, bool last_cmd)
{
// Commands are numbered sequentially throughout the expression
// starting with 1. Number 0 means the command is a single one.
@@ -2424,10 +3236,15 @@ namespace build2
// with false.
//
if (!((or_op && r) || (!or_op && !r)))
+ {
+ assert (!p.empty ());
+
r = run_pipe (env,
p.begin (), p.end (),
auto_fd (),
- ci, li, ll, print);
+ ii, li, ci, ll, print,
+ cf, last_cmd);
+ }
ci += p.size ();
}
@@ -2438,24 +3255,37 @@ namespace build2
void
run (environment& env,
const command_expr& expr,
- size_t li, const location& ll)
+ const iteration_index* ii, size_t li,
+ const location& ll,
+ const function<command_function>& cf,
+ bool last_cmd)
{
// Note that we don't print the expression at any verbosity level
// assuming that the caller does this, potentially providing some
// additional information (command type, etc).
//
- if (!run_expr (env, expr, li, ll, true /* diag */))
+ if (!run_expr (env,
+ expr,
+ ii, li, ll,
+ true /* diag */,
+ cf, last_cmd))
throw failed (); // Assume diagnostics is already printed.
}
bool
- run_if (environment& env,
- const command_expr& expr,
- size_t li, const location& ll)
+ run_cond (environment& env,
+ const command_expr& expr,
+ const iteration_index* ii, size_t li,
+ const location& ll,
+ const function<command_function>& cf, bool last_cmd)
{
// Note that we don't print the expression here (see above).
//
- return run_expr (env, expr, li, ll, false /* diag */);
+ return run_expr (env,
+ expr,
+ ii, li, ll,
+ false /* diag */,
+ cf, last_cmd);
}
void
@@ -2704,8 +3534,7 @@ namespace build2
try
{
size_t n (0);
- for (const dir_entry& de: dir_iterator (p,
- false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (p, dir_iterator::no_follow))
{
if (n++ < 10)
dr << '\n' << (de.ltype () == entry_type::directory
diff --git a/libbuild2/script/run.hxx b/libbuild2/script/run.hxx
index 477dd88..c4c2aa2 100644
--- a/libbuild2/script/run.hxx
+++ b/libbuild2/script/run.hxx
@@ -38,11 +38,24 @@ namespace build2
// Location is the start position of this command line in the script. It
// can be used in diagnostics.
//
+ // Optionally, execute the specified function at the end of the pipe,
+ // either after the last command or instead of it.
+ //
void
- run (environment&, const command_expr&, size_t index, const location&);
+ run (environment&,
+ const command_expr&,
+ const iteration_index*, size_t index,
+ const location&,
+ const function<command_function>& = nullptr,
+ bool last_cmd = true);
bool
- run_if (environment&, const command_expr&, size_t, const location&);
+ run_cond (environment&,
+ const command_expr&,
+ const iteration_index*, size_t index,
+ const location&,
+ const function<command_function>& = nullptr,
+ bool last_cmd = true);
// Perform the registered special file cleanups in the direct order and
// then the regular cleanups in the reverse order.
@@ -69,6 +82,40 @@ namespace build2
//
string
diag_path (const dir_name_view&);
+
+ // Read the stream content, optionally splitting the input data at
+ // whitespaces or newlines and calling the specified callback function for
+ // each substring (see the set builtin options for the splitting
+ // semantics). Throw failed on io_error.
+ //
+ // If the stream is a pipeline's output, then the pipeline argument must
+ // also be specified. Normally called from a custom command function (see
+ // command_function for details) which is provided with the pipeline
+ // information.
+ //
+ // Turn the stream into the non-blocking mode and, if the pipeline is
+ // specified, read out its buffered stderr streams while waiting for the
+ // input stream data. If a deadline is specified and is reached, then
+ // terminate the whole pipeline, if specified, and bail out. Otherwise
+ // issue diagnostics and fail. The thinking here is that in the former
+ // case the caller first needs to dump the buffered stderr streams, issue
+ // the appropriate diagnostics for the pipeline processes/builtins, and
+ // only throw failed afterwards.
+ //
+ // Note that on Windows we can only turn file descriptors of the pipe type
+ // into the non-blocking mode. Thus, a non-pipe descriptor is read in the
+ // blocking manner (and the deadline is checked less accurately). This is
+ // fine since there are no pipeline stderr streams to read out in this
+ // case.
+ //
+ void
+ read (auto_fd&&,
+ bool whitespace, bool newline, bool exact,
+ const function<void (string&&)>&,
+ pipe_command* pipeline,
+ const optional<deadline>&,
+ const location&,
+ const char* what);
}
}
diff --git a/libbuild2/script/script.cxx b/libbuild2/script/script.cxx
index 9e6eeed..4a6ca33 100644
--- a/libbuild2/script/script.cxx
+++ b/libbuild2/script/script.cxx
@@ -20,14 +20,17 @@ namespace build2
switch (lt)
{
- case line_type::var: s = "variable"; break;
- case line_type::cmd: s = "command"; break;
- case line_type::cmd_if: s = "'if'"; break;
- case line_type::cmd_ifn: s = "'if!'"; break;
- case line_type::cmd_elif: s = "'elif'"; break;
- case line_type::cmd_elifn: s = "'elif!'"; break;
- case line_type::cmd_else: s = "'else'"; break;
- case line_type::cmd_end: s = "'end'"; break;
+ case line_type::var: s = "variable"; break;
+ case line_type::cmd: s = "command"; break;
+ case line_type::cmd_if: s = "'if'"; break;
+ case line_type::cmd_ifn: s = "'if!'"; break;
+ case line_type::cmd_elif: s = "'elif'"; break;
+ case line_type::cmd_elifn: s = "'elif!'"; break;
+ case line_type::cmd_else: s = "'else'"; break;
+ case line_type::cmd_while: s = "'while'"; break;
+ case line_type::cmd_for_args: s = "'for'"; break;
+ case line_type::cmd_for_stream: s = "'for'"; break;
+ case line_type::cmd_end: s = "'end'"; break;
}
return o << s;
@@ -186,14 +189,14 @@ namespace build2
void
dump (ostream& os, const string& ind, const lines& ls)
{
- // Additionally indent the if-branch lines.
+ // Additionally indent the flow control construct block lines.
//
- string if_ind;
+ string fc_ind;
for (const line& l: ls)
{
- // Before printing indentation, decrease it if the else or end line is
- // reached.
+ // Before printing indentation, decrease it if the else, end, etc line
+ // is reached.
//
switch (l.type)
{
@@ -202,9 +205,9 @@ namespace build2
case line_type::cmd_else:
case line_type::cmd_end:
{
- size_t n (if_ind.size ());
+ size_t n (fc_ind.size ());
assert (n >= 2);
- if_ind.resize (n - 2);
+ fc_ind.resize (n - 2);
break;
}
default: break;
@@ -212,9 +215,10 @@ namespace build2
// Print indentations.
//
- os << ind << if_ind;
+ os << ind << fc_ind;
- // After printing indentation, increase it for if/else branch.
+ // After printing indentation, increase it for the flow control
+ // construct block lines.
//
switch (l.type)
{
@@ -222,7 +226,10 @@ namespace build2
case line_type::cmd_ifn:
case line_type::cmd_elif:
case line_type::cmd_elifn:
- case line_type::cmd_else: if_ind += " "; break;
+ case line_type::cmd_else:
+ case line_type::cmd_while:
+ case line_type::cmd_for_args:
+ case line_type::cmd_for_stream: fc_ind += " "; break;
default: break;
}
@@ -761,7 +768,9 @@ namespace build2
{
using script::cleanup;
- assert (!implicit || c.type == cleanup_type::always);
+ // Implicit never-cleanup doesn't make sense.
+ //
+ assert (!implicit || c.type != cleanup_type::never);
const path& p (c.path);
diff --git a/libbuild2/script/script.hxx b/libbuild2/script/script.hxx
index d162900..cccad98 100644
--- a/libbuild2/script/script.hxx
+++ b/libbuild2/script/script.hxx
@@ -27,6 +27,9 @@ namespace build2
cmd_elif,
cmd_elifn,
cmd_else,
+ cmd_while,
+ cmd_for_args, // `for x: ...`
+ cmd_for_stream, // `... | for x` and `for x <...`
cmd_end
};
@@ -40,7 +43,7 @@ namespace build2
union
{
- const variable* var; // Pre-entered for line_type::var.
+ const variable* var; // Pre-entered for line_type::{var,cmd_for_*}.
};
};
@@ -262,7 +265,7 @@ namespace build2
cleanup_type type;
build2::path path;
};
- using cleanups = vector<cleanup>;
+ using cleanups = small_vector<cleanup, 1>;
// command_exit
//
@@ -315,6 +318,10 @@ namespace build2
add (string);
};
+ // @@ For better diagnostics we may want to store an individual location
+ // of each command in the pipeline (maybe we can share the file part
+ // somehow since a pipline cannot span multiple files).
+ //
struct command
{
// We use NULL initial as an indication that the path stored in recall
@@ -354,6 +361,10 @@ namespace build2
// command_pipe
//
+ // Note that we cannot use small_vector here, since moving from objects of
+ // the command_pipe type would invalidate the command redirects of the
+ // reference type in this case.
+ //
using command_pipe = vector<command>;
void
@@ -372,7 +383,7 @@ namespace build2
command_pipe pipe;
};
- using command_expr = vector<expr_term>;
+ using command_expr = small_vector<expr_term, 1>;
void
to_stream (ostream&, const command_expr&, command_to_stream);
@@ -380,6 +391,15 @@ namespace build2
ostream&
operator<< (ostream&, const command_expr&);
+ // Stack-allocated linked list of iteration indexes of the nested loops.
+ //
+ struct iteration_index
+ {
+ size_t index; // 1-based.
+
+ const iteration_index* prev; // NULL for the top-most loop.
+ };
+
struct timeout
{
duration value;
@@ -495,7 +515,8 @@ namespace build2
// Register a cleanup. If the cleanup is explicit, then override the
// cleanup type if this path is already registered. Ignore implicit
- // registration of a path outside root directory (see below).
+ // registration of a path outside sandbox directory, if specified (see
+ // above).
//
void
clean (cleanup, bool implicit);
@@ -535,7 +556,7 @@ namespace build2
// Set variable value with optional (non-empty) attributes.
//
virtual void
- set_variable (string&& name,
+ set_variable (string name,
names&&,
const string& attrs,
const location&) = 0;
@@ -568,6 +589,20 @@ namespace build2
~environment () = default;
};
+ // Custom command function that can be executed at the end of the
+ // pipeline. Should throw io_error on the underlying OS error.
+ //
+ // Note: the pipeline can be NULL (think of `for x <<<='foo'`).
+ //
+ struct pipe_command;
+
+ using command_function = void (environment&,
+ const strings& args,
+ auto_fd in,
+ pipe_command* pipeline,
+ const optional<deadline>&,
+ const location&);
+
// Helpers.
//
// Issue diagnostics with the specified prefix and fail if the string
@@ -584,6 +619,10 @@ namespace build2
verify_environment_var_assignment (const string&,
const char* prefix,
const location&);
+
+ // "Unhide" operator<< from the build2 namespace.
+ //
+ using build2::operator<<;
}
}
diff --git a/libbuild2/search.cxx b/libbuild2/search.cxx
index fca19ea..3bdb503 100644
--- a/libbuild2/search.cxx
+++ b/libbuild2/search.cxx
@@ -58,8 +58,11 @@ namespace build2
else
{
o = pk.scope->out_path ();
- o /= *tk.out;
- o.normalize ();
+ if (!tk.out->current ())
+ {
+ o /= *tk.out;
+ o.normalize ();
+ }
}
// Drop out if it is the same as src (in-src build).
@@ -86,6 +89,10 @@ namespace build2
const target_key& ctk (cpk.tk);
const scope* s (cpk.scope);
+ // Has to be a file target.
+ //
+ assert (ctk.type->is_a<file> ());
+
path f;
if (ctk.dir->absolute ())
@@ -165,11 +172,7 @@ namespace build2
// will be from the src tree.
//
// In the other two cases we use the prerequisite's out (in case it is
- // relative, we need to complete it, which is @@ OUT TODO). Note that we
- // blindly trust the user's value which can be used for some interesting
- // tricks, for example:
- //
- // ../cxx{foo}@./
+ // relative, we need to complete it).
//
dir_path out;
@@ -179,21 +182,39 @@ namespace build2
out = out_src (d, *s->root_scope ());
}
else
- out = *tk.out;
+ {
+ if (tk.out->absolute ())
+ out = *tk.out; // Already normalized.
+ else
+ {
+ out = pk.scope->out_path ();
+ if (!tk.out->current ())
+ {
+ out /= *tk.out;
+ out.normalize ();
+ }
+ }
+
+ // Drop out if it is the same as src (in-src build).
+ //
+ if (out == d)
+ out.clear ();
+ }
// Find or insert. Note that we are using our updated extension.
//
+ // More often insert than find, so skip find in insert().
+ //
auto r (ctx.targets.insert (*tk.type,
move (d),
move (out),
*tk.name,
ext,
target_decl::prereq_file,
- trace));
+ trace,
+ true /* skip_find */));
- // Has to be a file_target.
- //
- const file& t (dynamic_cast<const file&> (r.first));
+ const file& t (r.first.as<file> ());
l5 ([&]{trace << (r.second ? "new" : "existing") << " target " << t
<< " for prerequisite " << cpk;});
@@ -210,6 +231,12 @@ namespace build2
const target_key& tk (pk.tk);
+ // If out is present, then it means the target is in src and we shouldn't
+ // be creating new targets in src, should we? Feels like this should not
+ // even be called if out is not empty.
+ //
+ assert (tk.out->empty ());
+
// We default to the target in this directory scope.
//
dir_path d;
@@ -228,7 +255,7 @@ namespace build2
// Find or insert.
//
- // @@ OUT: same story as in search_existing_target() re out.
+ // More often insert than find, so skip find in insert().
//
auto r (ctx.targets.insert (*tk.type,
move (d),
@@ -236,7 +263,8 @@ namespace build2
*tk.name,
tk.ext,
target_decl::prereq_new,
- trace));
+ trace,
+ true /* skip_find */));
const target& t (r.first);
l5 ([&]{trace << (r.second ? "new" : "existing") << " target " << t
@@ -251,6 +279,12 @@ namespace build2
const target_key& tk (pk.tk);
+ // If out is present, then it means the target is in src and we shouldn't
+ // be creating new targets in src, should we? Feels like this should not
+ // even be called if out is not empty.
+ //
+ assert (tk.out->empty ());
+
// We default to the target in this directory scope.
//
dir_path d;
@@ -269,7 +303,7 @@ namespace build2
// Find or insert.
//
- // @@ OUT: same story as in search_existing_target() re out.
+ // More often insert than find, so skip find in insert_locked().
//
auto r (ctx.targets.insert_locked (*tk.type,
move (d),
@@ -277,7 +311,8 @@ namespace build2
*tk.name,
tk.ext,
target_decl::prereq_new,
- trace));
+ trace,
+ true /* skip_find */));
l5 ([&]
{
diag_record dr (trace);
diff --git a/libbuild2/target-key.hxx b/libbuild2/target-key.hxx
index f5351b8..9ac87dc 100644
--- a/libbuild2/target-key.hxx
+++ b/libbuild2/target-key.hxx
@@ -31,8 +31,16 @@ namespace build2
bool is_a () const {return type->is_a<T> ();}
bool is_a (const target_type& tt) const {return type->is_a (tt);}
+ // Return an "effective" name, for example, for pattern matching, that
+ // includes the extension where appropriate.
+ //
+ const string&
+ effective_name (string& storage, bool force_ext = false) const;
+
// Append/return the target name or a pair of names if out-qualified.
//
+ // See also target::as_name() for the returned name stability guarantees.
+ //
void
as_name (names&) const;
@@ -86,8 +94,21 @@ namespace build2
LIBBUILD2_SYMEXPORT ostream&
operator<< (ostream&, const target_key&);
- LIBBUILD2_SYMEXPORT ostream&
- to_stream (ostream&, const target_key&, optional<stream_verbosity> = nullopt);
+ // If name_only is true, then only print the target name (and extension, if
+ // necessary), without the directory or type.
+ //
+ // Return true if the result is regular, that is, in the
+ // <dir>/<type>{<name>}@<out>/ form with the individual components
+ // corresponding directly to the target_key members (that is, without moving
+ // parts around as would be the case for directories). This information is
+ // used when trying to print several targets in a combined form (for
+ // example, {hxx cxx}{foo}) in print_diag().
+ //
+ LIBBUILD2_SYMEXPORT bool
+ to_stream (ostream&,
+ const target_key&,
+ optional<stream_verbosity> = nullopt,
+ bool name_only = false);
}
namespace std
diff --git a/libbuild2/target-state.hxx b/libbuild2/target-state.hxx
index 3457b13..a6106f7 100644
--- a/libbuild2/target-state.hxx
+++ b/libbuild2/target-state.hxx
@@ -18,9 +18,14 @@ namespace build2
// Note that postponed is "greater" than unchanged since it may result in
// the changed state.
//
+ // Note also that value 0 is available to indicate absent/invalid state.
+ //
+ // NOTE: don't forget to also update operator<<(ostream,target_state) if
+ // changing anything here.
+ //
enum class target_state: uint8_t
{
- unknown,
+ unknown = 1,
unchanged,
postponed,
busy,
@@ -38,8 +43,14 @@ namespace build2
return l;
}
- LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, target_state); // target.cxx
+ LIBBUILD2_SYMEXPORT string
+ to_string (target_state); // target.cxx
+
+ inline ostream&
+ operator<< (ostream& o, target_state ts)
+ {
+ return o << to_string (ts);
+ }
}
#endif // LIBBUILD2_TARGET_STATE_HXX
diff --git a/libbuild2/target-type.hxx b/libbuild2/target-type.hxx
index 5798766..a0fc5a2 100644
--- a/libbuild2/target-type.hxx
+++ b/libbuild2/target-type.hxx
@@ -89,11 +89,34 @@ namespace build2
const location&,
bool reverse);
- void (*print) (ostream&, const target_key&);
+ // See to_stream(ostream,target_key) for details.
+ //
+ bool (*print) (ostream&, const target_key&, bool name_only);
const target* (*search) (const target&, const prerequisite_key&);
- bool see_through; // A group with the default "see through" semantics.
+ // Target type flags.
+ //
+ // Note that the member_hint flag should only be used on groups with
+ // link-up during load (see lib{}, for example). In particular, if the
+ // group link-up only happens during match, then the hint would be looked
+ // up before the group is known.
+ //
+ // Note: consider exposing as an attribute in define if adding a new flag.
+ //
+ enum class flag: uint64_t
+ {
+ none = 0,
+ group = 0x01, // A (non-adhoc) group.
+ see_through = group | 0x02, // A group with "see through" semantics.
+ member_hint = group | 0x04, // Untyped rule hint applies to members.
+ dyn_members = group | 0x08 // A group with dynamic members.
+ };
+
+ flag flags;
+
+ bool
+ see_through () const;
template <typename T>
bool
@@ -102,14 +125,15 @@ namespace build2
bool
is_a (const target_type& tt) const
{
- return this == &tt || (base != nullptr && is_a_base (tt));
+ for (const target_type* b (this); b != nullptr; b = b->base)
+ if (b == &tt)
+ return true;
+
+ return false;
}
bool
is_a (const char*) const; // Defined in target.cxx
-
- bool
- is_a_base (const target_type&) const; // Defined in target.cxx
};
inline bool
@@ -124,6 +148,32 @@ namespace build2
inline ostream&
operator<< (ostream& os, const target_type& tt) {return os << tt.name;}
+ inline target_type::flag
+ operator&= (target_type::flag& x, target_type::flag y)
+ {
+ return x = static_cast<target_type::flag> (
+ static_cast<uint64_t> (x) & static_cast<uint64_t> (y));
+ }
+
+ inline target_type::flag
+ operator|= (target_type::flag& x, target_type::flag y)
+ {
+ return x = static_cast<target_type::flag> (
+ static_cast<uint64_t> (x) | static_cast<uint64_t> (y));
+ }
+
+ inline target_type::flag
+ operator& (target_type::flag x, target_type::flag y) {return x &= y;}
+
+ inline target_type::flag
+ operator| (target_type::flag x, target_type::flag y) {return x |= y;}
+
+ inline bool target_type::
+ see_through () const
+ {
+ return (flags & flag::see_through) == flag::see_through;
+ }
+
// Target type map.
//
class target_type_map
@@ -189,7 +239,7 @@ namespace build2
file_map_.emplace (n, tt);
}
- private:
+ public:
struct target_type_ref
{
// Like reference_wrapper except it sometimes deletes the target type.
@@ -201,7 +251,7 @@ namespace build2
target_type_ref (unique_ptr<target_type>&& p)
: p_ (p.release ()), d_ (true) {}
- target_type_ref (target_type_ref&& r)
+ target_type_ref (target_type_ref&& r) noexcept
: p_ (r.p_), d_ (r.d_) {r.p_ = nullptr;}
~target_type_ref () {if (p_ != nullptr && d_) delete p_;}
@@ -214,8 +264,17 @@ namespace build2
bool d_;
};
- map<string, target_type_ref> type_map_;
- map<string, reference_wrapper<const target_type>> file_map_;
+ using type_map = map<string, target_type_ref>;
+ using file_map = map<string, reference_wrapper<const target_type>>;
+
+ using type_iterator = type_map::const_iterator;
+
+ type_iterator type_begin () const {return type_map_.begin ();}
+ type_iterator type_end () const {return type_map_.end ();}
+
+ private:
+ type_map type_map_;
+ file_map file_map_;
};
}
diff --git a/libbuild2/target.cxx b/libbuild2/target.cxx
index bc5dbba..fb47b6d 100644
--- a/libbuild2/target.cxx
+++ b/libbuild2/target.cxx
@@ -22,26 +22,13 @@ namespace build2
bool target_type::
is_a (const char* n) const
{
- if (strcmp (name, n) == 0)
- return true;
-
- for (const target_type* b (base); b != nullptr; b = b->base)
+ for (const target_type* b (this); b != nullptr; b = b->base)
if (strcmp (b->name, n) == 0)
return true;
return false;
}
- bool target_type::
- is_a_base (const target_type& tt) const
- {
- for (const target_type* b (base); b != nullptr; b = b->base)
- if (*b == tt)
- return true;
-
- return false;
- }
-
// target_key
//
void target_key::
@@ -51,7 +38,9 @@ namespace build2
if (!name->empty ())
{
v = *name;
- target::combine_name (v, ext, false /* @@ TODO: what to do? */);
+ // @@ TMP: see also other calls to combine_name() -- need to fix.
+ //
+ target::combine_name (v, ext, false /* @@ TMP: what to do? */);
}
else
assert (!ext || ext->empty ()); // Unspecified or none.
@@ -69,6 +58,7 @@ namespace build2
//
static const char* const target_state_[] =
{
+ "<invalid>", // Absent/invalid (see target_state for details).
"unknown",
"unchanged",
"postponed",
@@ -78,10 +68,10 @@ namespace build2
"group"
};
- ostream&
- operator<< (ostream& os, target_state ts)
+ string
+ to_string (target_state ts)
{
- return os << target_state_[static_cast<uint8_t> (ts)];
+ return target_state_[static_cast<uint8_t> (ts)];
}
// target
@@ -91,7 +81,6 @@ namespace build2
target::
~target ()
{
- clear_data ();
}
const string& target::
@@ -122,27 +111,33 @@ namespace build2
group_view target::
group_members (action) const
{
- assert (false); // Not a group or doesn't expose its members.
+ // Not a group or doesn't expose its members.
+ //
return group_view {nullptr, 0};
}
const scope& target::
- base_scope () const
+ base_scope_impl () const
{
// If this target is from the src tree, use its out directory to find
// the scope.
//
- return ctx.scopes.find_out (out_dir ());
- }
+ const scope& s (ctx.scopes.find_out (out_dir ()));
- const scope& target::
- root_scope () const
- {
- // This is tricky to cache so we do the lookup for now.
+ // Cache unless we are in the load phase.
//
- const scope* r (base_scope ().root_scope ());
- assert (r != nullptr);
- return *r;
+ if (ctx.phase != run_phase::load)
+ {
+ const scope* e (nullptr);
+ if (!base_scope_.compare_exchange_strong (
+ e,
+ &s,
+ memory_order_release,
+ memory_order_consume))
+ assert (e == &s);
+ }
+
+ return s;
}
pair<lookup, size_t> target::
@@ -166,6 +161,11 @@ namespace build2
{
++r.second;
+ // While we went back to not treating the first member as a group for
+ // variable lookup, let's keep this logic in case one day we end up with
+ // a separate ad hoc group target.
+ //
+#if 0
// In case of an ad hoc group, we may have to look in two groups.
//
if ((g1 = group) != nullptr)
@@ -183,6 +183,19 @@ namespace build2
}
}
}
+#else
+ // Skip looking up in the ad hoc group, which is semantically the
+ // first/primary member.
+ //
+ if ((g1 = group == nullptr
+ ? nullptr
+ : group->adhoc_group () ? group->group : group))
+ {
+ auto p (g1->vars.lookup (var));
+ if (p.first != nullptr)
+ r.first = lookup_type (*p.first, p.second, g1->vars);
+ }
+#endif
}
// Delegate to scope's lookup_original().
@@ -541,40 +554,127 @@ namespace build2
// include()
//
+ // See var_include documentation for details on what's going on here.
+ //
include_type
include_impl (action a,
const target& t,
const prerequisite& p,
- const target* m)
+ const target* m,
+ lookup* rl)
{
context& ctx (t.ctx);
include_type r (include_type::normal);
+ {
+ lookup l (p.vars[ctx.var_include]);
+
+ if (l.defined ())
+ {
+ if (l->null)
+ {
+ // @@ TMP (added in 0.16.0).
+ //
+ warn << "null " << *ctx.var_include << " variable value specified "
+ << "for prerequisite " << p <<
+ info << "treated as undefined for backwards compatibility" <<
+ info << "this warning will become error in the future";
+ }
+ else
+ {
+ const string& v (cast<string> (*l));
- // If var_clean is defined, then it takes precedence over include for
- // the clean operation.
+ if (v == "false") r = include_type::excluded;
+ else if (v == "true") r = include_type::normal;
+ else if (v == "adhoc") r = include_type::adhoc;
+ else if (v == "posthoc") r = include_type::posthoc;
+ else
+ fail << "invalid " << *ctx.var_include << " variable value '"
+ << v << "' specified for prerequisite " << p;
+ }
+ }
+ }
+
+ // Handle operation-specific override (see var_include documentation
+ // for details).
//
lookup l;
- if (a.operation () == clean_id && (l = p.vars[ctx.var_clean]))
- {
- r = cast<bool> (l) ? include_type::normal : include_type::excluded;
- }
- else if (const string* v = cast_null<string> (p.vars[ctx.var_include]))
+ optional<bool> r1; // Absent means something other than true|false.
+
+ names storage;
+ names_view ns;
+ const variable* ovar (nullptr);
+
+ if (r != include_type::excluded)
{
- if (*v == "false") r = include_type::excluded;
- else if (*v == "adhoc") r = include_type::adhoc;
- else if (*v == "true") r = include_type::normal;
- else
- fail << "invalid " << ctx.var_include->name << " variable value "
- << "'" << *v << "' specified for prerequisite " << p;
+ // Instead of going via potentially expensive target::base_scope(), use
+ // the prerequisite's scope; while it may not be the same as the
+ // targets's base scope, they must have the same root scope.
+ //
+ const scope& rs (*p.scope.root_scope ());
+
+ ovar = rs.root_extra->operations[
+ (a.outer ()
+ ? ctx.current_outer_oif
+ : ctx.current_inner_oif)->id].ovar;
+
+ if (ovar != nullptr)
+ {
+ l = p.vars[*ovar];
+
+ if (l.defined ())
+ {
+ if (l->null)
+ fail << "null " << *ovar << " variable value specified for "
+ << "prerequisite " << p;
+
+ // Maybe we should optimize this for the common cases (bool, path,
+ // name)? But then again we don't expect many such overrides. Plus
+ // will complicate the diagnostics below.
+ //
+ ns = reverse (*l, storage, true /* reduce */);
+
+ if (ns.size () == 1)
+ {
+ const name& n (ns[0]);
+
+ if (n.simple ())
+ {
+ const string& v (n.value);
+
+ if (v == "false")
+ r1 = false;
+ else if (v == "true")
+ r1 = true;
+ }
+ }
+
+ if (r1 && !*r1)
+ r = include_type::excluded;
+ }
+ }
}
// Call the meta-operation override, if any (currently used by dist).
//
- if (r != include_type::normal)
+ if (r != include_type::normal || l)
{
if (auto f = ctx.current_mif->include)
- r = f (a, t, prerequisite_member {p, m}, r);
+ r = f (a, t, prerequisite_member {p, m}, r, l);
+ }
+
+ if (l)
+ {
+ if (rl != nullptr)
+ *rl = l;
+ else if (!r1)
+ {
+ // Note: we have to delay this until the meta-operation callback above
+ // had a chance to override it.
+ //
+ fail << "unrecognized " << *ovar << " variable value '" << ns
+ << "' specified for prerequisite " << p;
+ }
}
return r;
@@ -585,7 +685,9 @@ namespace build2
const target* target_set::
find (const target_key& k, tracer& trace) const
{
- slock sl (mutex_);
+ bool load (ctx.phase == run_phase::load);
+
+ slock sl (mutex_, defer_lock); if (!load) sl.lock ();
map_type::const_iterator i (map_.find (k));
if (i == map_.end ())
@@ -604,14 +706,18 @@ namespace build2
// Between us releasing the shared lock and acquiring unique the
// extension could change and possibly a new target that matches the
// key could be inserted. In this case we simply re-run find ().
+ // Naturally, can't happen during load.
//
- sl.unlock ();
- ul = ulock (mutex_);
-
- if (ext) // Someone set the extension.
+ if (!load)
{
- ul.unlock ();
- return find (k, trace);
+ sl.unlock ();
+ ul = ulock (mutex_);
+
+ if (ext) // Someone set the extension.
+ {
+ ul.unlock ();
+ return find (k, trace);
+ }
}
}
@@ -645,10 +751,12 @@ namespace build2
string name,
optional<string> ext,
target_decl decl,
- tracer& trace)
+ tracer& trace,
+ bool skip_find,
+ bool need_lock)
{
target_key tk {&tt, &dir, &out, &name, move (ext)};
- target* t (const_cast<target*> (find (tk, trace)));
+ target* t (skip_find ? nullptr : const_cast<target*> (find (tk, trace)));
if (t == nullptr)
{
@@ -669,7 +777,9 @@ namespace build2
// case we proceed pretty much like find() except already under the
// exclusive lock.
//
- ulock ul (mutex_);
+ ulock ul (mutex_, defer_lock);
+ if (ctx.phase != run_phase::load || need_lock)
+ ul.lock ();
auto p (map_.emplace (target_key {&tt, &t->dir, &t->out, &t->name, e},
unique_ptr<target> (t)));
@@ -678,10 +788,28 @@ namespace build2
if (p.second)
{
+#if 0
+ {
+ size_t n (map_.bucket_count ());
+ if (n > buckets_)
+ {
+ text << "target_set buckets: " << buckets_ << " -> " << n
+ << " (" << map_.size () << ")";
+ buckets_ = n;
+ }
+ }
+#endif
+
t->ext_ = &i->first.ext;
t->decl = decl;
t->state.inner.target_ = t;
t->state.outer.target_ = t;
+ t->state.inner.vars.target_ = t;
+ t->state.outer.vars.target_ = t;
+
+ if (ctx.phase != run_phase::load && !need_lock)
+ ul.unlock ();
+
return pair<target&, ulock> (*t, move (ul));
}
@@ -733,9 +861,14 @@ namespace build2
static const optional<string> unknown_ext ("?");
- ostream&
- to_stream (ostream& os, const target_key& k, optional<stream_verbosity> osv)
+ bool
+ to_stream (ostream& os,
+ const target_key& k,
+ optional<stream_verbosity> osv,
+ bool name_only)
{
+ // Note: similar code in print_diag_impl(vector<target_key>).
+
stream_verbosity sv (osv ? *osv : stream_verb (os));
uint16_t dv (sv.path);
uint16_t ev (sv.extension);
@@ -745,22 +878,29 @@ namespace build2
//
bool n (!k.name->empty ());
- // Note: relative() returns empty for './'.
- //
- const dir_path& rd (dv < 1 ? relative (*k.dir) : *k.dir); // Relative.
- const dir_path& pd (n ? rd : rd.directory ()); // Parent.
+ const target_type& tt (*k.type);
- if (!pd.empty ())
+ dir_path rds; // Storage.
+ if (!name_only)
{
+ // Note: relative() returns empty for './'.
+ //
if (dv < 1)
- os << diag_relative (pd);
- else
- to_stream (os, pd, true /* representation */);
- }
+ rds = relative (*k.dir);
- const target_type& tt (*k.type);
+ const dir_path& rd (dv < 1 ? rds : *k.dir); // Relative.
+ const dir_path& pd (n ? rd : rd.directory ()); // Parent.
- os << tt.name << '{';
+ if (!pd.empty ())
+ {
+ if (dv < 1)
+ os << diag_relative (pd);
+ else
+ to_stream (os, pd, true /* representation */);
+ }
+
+ os << tt.name << '{';
+ }
if (n)
{
@@ -803,37 +943,47 @@ namespace build2
}
}
else
+ {
+ if (name_only && dv < 1) // Already done if !name_only.
+ rds = relative (*k.dir);
+
+ const dir_path& rd (dv < 1 ? rds : *k.dir);
+
to_stream (os,
rd.empty () ? dir_path (".") : rd.leaf (),
true /* representation */);
+ }
- os << '}';
-
- // If this target is from src, print its out.
- //
- if (!k.out->empty ())
+ if (!name_only)
{
- if (dv < 1)
+ os << '}';
+
+ // If this target is from src, print its out.
+ //
+ if (!k.out->empty ())
{
- // Don't print '@./'.
- //
- const string& o (diag_relative (*k.out, false));
+ if (dv < 1)
+ {
+ // Don't print '@./'.
+ //
+ const string& o (diag_relative (*k.out, false));
- if (!o.empty ())
- os << '@' << o;
+ if (!o.empty ())
+ os << '@' << o;
+ }
+ else
+ os << '@' << *k.out;
}
- else
- os << '@' << *k.out;
}
- return os;
+ return n; // Regular if we had the name.
}
ostream&
operator<< (ostream& os, const target_key& k)
{
if (auto p = k.type->print)
- p (os, k);
+ p (os, k, false /* name_only */);
else
to_stream (os, k, stream_verb (os));
@@ -859,9 +1009,10 @@ namespace build2
const opstate& s (state[action () /* inner */]);
// Note: already synchronized.
- size_t o (s.task_count.load (memory_order_relaxed) - ctx.count_base ());
+ size_t c (s.task_count.load (memory_order_relaxed));
+ size_t b (ctx.count_base ()); // Note: cannot do (c - b)!
- if (o != offset_applied && o != offset_executed)
+ if (c != (b + offset_applied) && c != (b + offset_executed))
break;
}
// Fall through.
@@ -1022,20 +1173,20 @@ namespace build2
return tk.ext->c_str ();
}
- void
- target_print_0_ext_verb (ostream& os, const target_key& k)
+ bool
+ target_print_0_ext_verb (ostream& os, const target_key& k, bool no)
{
stream_verbosity sv (stream_verb (os));
if (sv.extension == 1) sv.extension = 0; // Remap 1 to 0.
- to_stream (os, k, sv);
+ return to_stream (os, k, sv, no);
}
- void
- target_print_1_ext_verb (ostream& os, const target_key& k)
+ bool
+ target_print_1_ext_verb (ostream& os, const target_key& k, bool no)
{
stream_verbosity sv (stream_verb (os));
if (sv.extension == 0) sv.extension = 1; // Remap 0 to 1.
- to_stream (os, k, sv);
+ return to_stream (os, k, sv, no);
}
// type info
@@ -1051,7 +1202,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none,
};
const target_type mtime_target::static_type
@@ -1064,7 +1215,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
const target_type path_target::static_type
@@ -1077,7 +1228,7 @@ namespace build2
nullptr,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
const target_type file::static_type
@@ -1090,18 +1241,77 @@ namespace build2
nullptr, /* pattern */
&target_print_1_ext_verb, // Print extension even at verbosity level 0.
&file_search,
- false
+ target_type::flag::none
};
+ // group
+ //
+ group_view group::
+ group_members (action a) const
+ {
+ if (members_on == 0) // Not yet discovered.
+ return group_view {nullptr, 0};
+
+ // Members discovered during anything other than perform_update are only
+ // good for that operation. For example, we only return the static members
+ // ("representative sample") for perform_configure.
+ //
+ // We also re-discover the members on each update and clean not to
+ // overcomplicate the already twisted adhoc_buildscript_rule::apply()
+ // logic.
+ //
+ if (members_on != ctx.current_on)
+ {
+ if (members_action != perform_update_id ||
+ a == perform_update_id ||
+ a == perform_clean_id)
+ return group_view {nullptr, 0};
+ }
+
+ // Note that we may have no members (e.g., perform_configure and there are
+ // no static members). However, whether std::vector returns a non-NULL
+ // pointer in this case is undefined.
+ //
+ size_t n (members.size ());
+ return group_view {
+ n != 0
+ ? members.data ()
+ : reinterpret_cast<const target* const*> (this),
+ n};
+ }
+
+ const target_type group::static_type
+ {
+ "group",
+ &mtime_target::static_type,
+ &target_factory<group>,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ &target_search,
+ //
+ // Note that the dyn_members semantics is used not only to handle
+ // depdb-dyndep --dyn-target, but also pattern rule-static members.
+ //
+ target_type::flag::group | target_type::flag::dyn_members
+ };
+
+ // alias
+ //
static const target*
alias_search (const target& t, const prerequisite_key& pk)
{
// For an alias we don't want to silently create a target since it will do
// nothing and it most likely not what the user intended.
//
+ // But, allowing implied aliases seems harmless since all the alias does
+ // is pull its prerequisites. And they are handy to use as metadata
+ // carriers.
+ //
const target* e (search_existing_target (t.ctx, pk));
- if (e == nullptr || e->decl != target_decl::real)
+ if (e == nullptr || !(operator>= (e->decl, target_decl::implied)))
fail << "no explicit target for " << pk;
return e;
@@ -1117,7 +1327,7 @@ namespace build2
nullptr,
nullptr,
&alias_search,
- false
+ target_type::flag::none
};
// dir
@@ -1127,7 +1337,7 @@ namespace build2
{
try
{
- for (const dir_entry& e: dir_iterator (d, true /* ignore_dangling */))
+ for (const dir_entry& e: dir_iterator (d, dir_iterator::detect_dangling))
{
switch (e.type ())
{
@@ -1145,6 +1355,16 @@ namespace build2
break;
}
+ case entry_type::unknown:
+ {
+ bool sl (e.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << d / e.path ();
+
+ break;
+ }
default:
break;
}
@@ -1166,9 +1386,10 @@ namespace build2
try
{
- for (const dir_entry& e: dir_iterator (d, true /* ignore_dangling */))
+ for (const dir_entry& e: dir_iterator (d, dir_iterator::detect_dangling))
{
if (e.type () == entry_type::directory)
+ {
r.push_back (
prerequisite (nullopt,
dir::static_type,
@@ -1177,6 +1398,15 @@ namespace build2
string (),
nullopt,
bs));
+ }
+ else if (e.type () == entry_type::unknown)
+ {
+ bool sl (e.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << d / e.path ();
+ }
}
}
catch (const system_error& e)
@@ -1192,7 +1422,8 @@ namespace build2
{
tracer trace ("dir_search");
- // The first step is like in search_alias(): looks for an existing target.
+ // The first step is like in alias_search(): looks for an existing target
+ // (but unlike alias, no implied, think `test/: install=false`).
//
const target* e (search_existing_target (t.ctx, pk));
@@ -1333,7 +1564,7 @@ namespace build2
&dir_pattern,
nullptr,
&dir_search,
- false
+ target_type::flag::none
};
const target_type fsdir::static_type
@@ -1346,7 +1577,7 @@ namespace build2
&dir_pattern,
nullptr,
&target_search,
- false
+ target_type::flag::none
};
static optional<string>
@@ -1414,7 +1645,7 @@ namespace build2
#endif
nullptr,
&file_search,
- false
+ target_type::flag::none
};
static const char*
@@ -1500,7 +1731,7 @@ namespace build2
&buildfile_target_pattern,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
const target_type doc::static_type
@@ -1513,7 +1744,7 @@ namespace build2
nullptr, /* pattern */ // Same as file.
&target_print_1_ext_verb, // Same as file.
&file_search,
- false
+ target_type::flag::none
};
const target_type legal::static_type
@@ -1526,7 +1757,7 @@ namespace build2
nullptr, /* pattern */ // Same as file.
&target_print_1_ext_verb, // Same as file.
&file_search,
- false
+ target_type::flag::none
};
const target_type man::static_type
@@ -1539,7 +1770,7 @@ namespace build2
nullptr,
&target_print_1_ext_verb, // Print extension even at verbosity level 0.
&file_search,
- false
+ target_type::flag::none
};
extern const char man1_ext[] = "1"; // VC14 rejects constexpr.
@@ -1554,7 +1785,7 @@ namespace build2
&target_pattern_fix<man1_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
&file_search,
- false
+ target_type::flag::none
};
static const char*
@@ -1603,6 +1834,6 @@ namespace build2
&manifest_target_pattern,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
}
diff --git a/libbuild2/target.hxx b/libbuild2/target.hxx
index 1c10621..aa3df7f 100644
--- a/libbuild2/target.hxx
+++ b/libbuild2/target.hxx
@@ -4,8 +4,9 @@
#ifndef LIBBUILD2_TARGET_HXX
#define LIBBUILD2_TARGET_HXX
+#include <cstddef> // max_align_t
#include <iterator> // tags, etc.
-#include <type_traits> // aligned_storage
+#include <type_traits> // is_*
#include <unordered_map>
#include <libbutl/multi-index.hxx> // map_iterator_adapter
@@ -38,16 +39,19 @@ namespace build2
// Prerequisite inclusion/exclusion (see include() function below).
//
+ // Note that posthoc is handled internally and should normally be treated by
+ // the rules the same as excluded.
+ //
class include_type
{
public:
- enum value {excluded, adhoc, normal};
+ enum value {excluded, posthoc, adhoc, normal};
include_type (value v): v_ (v) {}
include_type (bool v): v_ (v ? normal : excluded) {}
operator value () const {return v_;}
- explicit operator bool () const {return v_ != excluded;}
+ explicit operator bool () const {return v_ == normal || v_ == adhoc;}
private:
value v_;
@@ -70,43 +74,176 @@ namespace build2
};
// List of prerequisites resolved to targets. Unless additional storage is
- // needed, it can be used as just vector<const target*> (which is what we
+ // needed, it can be treated as just vector<const target*> (which is what we
// used to have initially).
//
+ // The include member normally just indicates (in the first bit) whether
+ // this prerequisite is ad hoc. But it can also carry additional information
+ // (for example, from operation-specific override) in other bits (see below
+ // for details).
+ //
struct prerequisite_target
{
using target_type = build2::target;
prerequisite_target (const target_type* t, bool a = false, uintptr_t d = 0)
- : target (t), adhoc (a), data (d) {}
+ : target (t), include (a ? include_adhoc : 0), data (d) {}
prerequisite_target (const target_type* t, include_type a, uintptr_t d = 0)
: prerequisite_target (t, a == include_type::adhoc, d) {}
+ const target_type* target;
+
operator const target_type*& () {return target;}
operator const target_type* () const {return target;}
const target_type* operator-> () const {return target;}
- const target_type* target;
- bool adhoc; // True if include=adhoc.
- uintptr_t data;
+ // The first 8 bits are reserved with the first two having the following
+ // semantics:
+ //
+ // adhoc
+ //
+ // This prerequisite is ad hoc.
+ //
+ // udm
+ //
+ // This prerequisite is updated during match. Note that only static
+ // prerequisites that are updated during match should have this bit set
+ // (see dyndep_rule::*_existing_file() for details).
+ //
+ static const uintptr_t include_adhoc = 0x01;
+ static const uintptr_t include_udm = 0x02;
+
+ uintptr_t include;
+
+ bool adhoc () const {return (include & include_adhoc) != 0;}
+
+ // Auxiliary data.
+ //
+ uintptr_t data;
};
using prerequisite_targets = vector<prerequisite_target>;
- // A rule match is an element of hint_rule_map.
+ // A rule match is an element of name_rule_map.
//
using rule_match = pair<const string, reference_wrapper<const rule>>;
+ // A map of target type plus operation ids to rule hints (see name_rule_map
+ // for details on rule names and hints). The default_id serves as a fallback
+ // for update and clean operations.
+ //
+ // Note that for now hints are tried in the order specified and the first
+ // that matches, used.
+ //
+ struct rule_hints
+ {
+ // Return empty string if not found.
+ //
+ const string&
+ find (const target_type&, operation_id, bool untyped) const;
+
+ bool
+ empty () const {return map.empty ();}
+
+ // Note that insertion of an existing entry overrides the old value.
+ //
+ void
+ insert (const target_type*, operation_id, string);
+
+ struct value_type
+ {
+ const target_type* type;
+ operation_id operation;
+ string hint;
+ };
+
+ vector<value_type> map;
+ };
+
// Additional information about a rule match (see rule.hxx for details).
//
+ // Note that passing this information to a base rule's match() as-is may or
+ // may not be correct. If some changes must be made (for example, the
+ // fallback flag must be cleared), then that should be done by modifying
+ // (and restoring, if necessary) the passed instance rather than making a
+ // copy (which would not survive to apply()).
+ //
struct match_extra
{
- bool fallback; // True if matching a fallback rule.
- string buffer; // Auxiliary buffer that's reused during match/apply.
+ bool locked; // Normally true (see adhoc_rule::match() for background).
+ bool fallback; // True if matching a fallback rule (see match_rule()).
+
+ // Auxiliary data storage.
+ //
+ // A rule (whether matches or not) may use this pad to pass data between
+ // its match and apply functions (but not the recipe). The rule should
+ // static assert that the size of the pad is sufficient for its needs.
+ //
+ // This facility is complementary to the auxiliary data storage in target:
+ // it can store slightly more/extra data without dynamic memory allocation
+ // but can only be used during match/apply.
+ //
+ // Note also that a rule that delegates to another rule may not be able to
+ // use this mechanism fully since the delegated-to rule may also need the
+ // data storage.
+ //
+ static constexpr size_t data_size = (sizeof (string) > sizeof (void*) * 4
+ ? sizeof (string)
+ : sizeof (void*) * 4);
+
+ alignas (std::max_align_t) unsigned char data_[data_size];
+ void (*data_dtor_) (void*) = nullptr;
+
+ template <typename R,
+ typename T = typename std::remove_cv<
+ typename std::remove_reference<R>::type>::type>
+ typename std::enable_if<std::is_trivially_destructible<T>::value,T&>::type
+ data (R&& d)
+ {
+ assert (sizeof (T) <= data_size);
+ clear_data ();
+ return *new (&data_) T (forward<R> (d));
+ }
+
+ template <typename R,
+ typename T = typename std::remove_cv<
+ typename std::remove_reference<R>::type>::type>
+ typename std::enable_if<!std::is_trivially_destructible<T>::value,T&>::type
+ data (R&& d)
+ {
+ assert (sizeof (T) <= data_size);
+ clear_data ();
+ T& r (*new (&data_) T (forward<R> (d)));
+ data_dtor_ = [] (void* p) {static_cast<T*> (p)->~T ();};
+ return r;
+ }
+
+ template <typename T>
+ T&
+ data () {return *reinterpret_cast<T*> (&data_);}
+
+ template <typename T>
+ const T&
+ data () const {return *reinterpret_cast<const T*> (&data_);}
+
+ void
+ clear_data ()
+ {
+ if (data_dtor_ != nullptr)
+ {
+ data_dtor_ (&data_);
+ data_dtor_ = nullptr;
+ }
+ }
// Implementation details.
//
+ // NOTE: see match_rule() in algorithms.cxx if changing anything here.
+ //
public:
+ explicit
+ match_extra (bool l = true, bool f = false): locked (l), fallback (f) {}
+
void
init (bool fallback);
@@ -114,6 +251,11 @@ namespace build2
//
void
free ();
+
+ ~match_extra ()
+ {
+ clear_data ();
+ }
};
// Target.
@@ -126,17 +268,34 @@ namespace build2
// Note that the order of the enumerators is arranged so that their
// integral values indicate whether one "overrides" the other.
//
+ // We refer to the targets other than real and implied as
+ // dynamically-created or just dynamic.
+ //
// @@ We have cases (like pkg-config extraction) where it should probably be
// prereq_file rather than implied (also audit targets.insert<> calls).
//
+ // @@ Also, synthesized dependency declarations (e.g., in cc::link_rule) are
+ // fuzzy: they feel more `real` than `implied`. Maybe introduce
+ // `synthesized` in-between?
+ //
+ // @@ There are also now dynamically-discovered targets (ad hoc group
+ // members; see depdb-dyndep --dyn-target) which currently end up
+ // with prereq_new.
+ //
enum class target_decl: uint8_t
{
- prereq_new, // Created from prerequisite (create_new_target()).
- prereq_file, // Created from prerequisite/file (search_existing_file ()).
- implied, // Target-spec variable assignment, implicitly-entered, etc.
- real // Real dependency declaration.
+ prereq_new = 1, // Created from prerequisite (create_new_target()).
+ prereq_file, // Created from prerequisite/file (search_existing_file()).
+ implied, // Target-spec variable assignment, implicitly-entered, etc.
+ real // Real dependency declaration.
};
+ inline bool
+ operator>= (target_decl l, target_decl r)
+ {
+ return static_cast<uint8_t> (l) >= static_cast<uint8_t> (r);
+ }
+
class LIBBUILD2_SYMEXPORT target
{
public:
@@ -201,15 +360,15 @@ namespace build2
// obj{}).
//
// In an all-group, when a group is updated, normally all its members are
- // updates (and usually with a single command), though there could be some
+ // updated (and usually with a single command), though there could be some
// members that are omitted, depending on the configuration (e.g., an
// inline file not/being generated). When an all-group is mentioned as a
// prerequisite, the rule is usually interested in the individual members
- // rather than the whole group. For example, a C++ compile rule would like
- // to "see" the ?xx{} members when it gets a cli.cxx{} group.
+ // rather than the group target. For example, a C++ compile rule would
+ // like to "see" the ?xx{} members when it gets a cli.cxx{} group.
//
// Which brings us to the group iteration mode. The target type contains a
- // member called see_through that indicates whether the default iteration
+ // flag called see_through that indicates whether the default iteration
// mode for the group should be "see through"; that is, whether we see the
// members or the group itself. For the iteration support itself, see the
// *_prerequisite_members() machinery below.
@@ -221,9 +380,12 @@ namespace build2
//
// Note that the group-member link-up can happen anywhere between the
// member creation and rule matching so reading the group before the
- // member has been matched can be racy.
+ // member has been matched can be racy. However, once the member is linked
+ // up to the group, this relationship is immutable. As a result, one can
+ // atomically query the current value to see if already linked up (can be
+ // used as an optimization, to avoid deadlocks, etc).
//
- const target* group = nullptr;
+ relaxed_atomic<const target*> group = nullptr;
// What has been described above is an "explicit" group. That is, there is
// a dedicated target type that explicitly serves as a group and there is
@@ -256,7 +418,7 @@ namespace build2
// usually needed is to derive its path.
//
// - Unless declared, members are discovered lazily, they are only known
- // after the group's rule's apply() call.
+ // after the matching rule's apply() call.
//
// - Only declared members can be used as prerequisites but all can be
// used as targets (e.g., to set variables, etc).
@@ -266,6 +428,10 @@ namespace build2
// - Ad hoc group cannot have sub-groups (of any kind) though an ad hoc
// group can be a sub-group of an explicit group.
//
+ // - Member variable lookup skips the ad hoc group (since the group is the
+ // first member, this is normally what we want). But special semantics
+ // could be arranged; see var_backlink, for example.
+ //
// Note that ad hoc groups can be part of explicit groups. In a sense, we
// have a two-level grouping: an explicit group with its members each of
// which can be an ad hoc group. For example, lib{} contains libs{} which
@@ -274,6 +440,20 @@ namespace build2
// Use add_adhoc_member(), find_adhoc_member() from algorithms to manage
// ad hoc members.
//
+ // One conceptual issue we have with our ad hoc group implementation is
+ // that the behavior could be sensitive to the order in which the members
+ // are specified (due to the primary member logic). For example, unless we
+ // specify the header in the header/source group first, it will not be
+ // installed. Perhaps the solution is to synthesize a separate group
+ // target for the ad hoc members (with a special target type that rules
+ // like install could recognize). See also the variable lookup semantics.
+ // We could also probably support see_through via an attribute or some
+ // such. Or perhaps such cases should be handled through explicit groups
+ // and the ad hoc semantics is left to the non-see_through "primary
+ // targets with a bunch of subordinates" cases. In other words, if the
+ // members are "equal/symmetrical", then perhaps an explicit group is the
+ // correct approach.
+ //
const_ptr<target> adhoc_member = nullptr;
// Return true if this target is an ad hoc group (that is, its primary
@@ -299,7 +479,8 @@ namespace build2
public:
// Normally you should not call this function directly and rather use
- // resolve_members() from <libbuild2/algorithm.hxx>.
+ // resolve_members() from <libbuild2/algorithm.hxx>. Note that action
+ // is always inner.
//
virtual group_view
group_members (action) const;
@@ -315,6 +496,11 @@ namespace build2
target_key
key_locked () const;
+ // Note that the returned name is guaranteed to be "stable" (e.g., for
+ // hashing) only if the target has the extension assigned. This happens,
+ // for example, when a path is derived for a path-based target (which
+ // normally happens when such a target is matched for update).
+ //
names
as_name () const;
@@ -327,7 +513,16 @@ namespace build2
// Most qualified scope that contains this target.
//
const scope&
- base_scope () const;
+ base_scope () const
+ {
+ if (ctx.phase != run_phase::load)
+ {
+ if (const scope* s = base_scope_.load (memory_order_consume))
+ return *s;
+ }
+
+ return base_scope_impl ();
+ }
// Root scope of a project that contains this target. Note that
// a target can be out of any (known) project root in which case
@@ -335,7 +530,10 @@ namespace build2
// then use base_scope().root_scope() expression instead.
//
const scope&
- root_scope () const;
+ root_scope () const
+ {
+ return *base_scope ().root_scope ();
+ }
// Root scope of a bundle amalgamation that contains this target. The
// same notes as to root_scope() apply.
@@ -361,6 +559,16 @@ namespace build2
return out_dir ().sub (s.out_path ());
}
+ // Implementation details (use above functions instead).
+ //
+ // Base scope cached value. Invalidated every time we switch to the load
+ // phase (which is the only phase where we may insert new scopes).
+ //
+ mutable atomic<const scope*> base_scope_ {nullptr};
+
+ const scope&
+ base_scope_impl () const;
+
// Prerequisites.
//
// We use an atomic-empty semantics that allows one to "swap in" a set of
@@ -433,8 +641,9 @@ namespace build2
lookup_type
operator[] (const string& name) const
{
- const variable* var (ctx.var_pool.find (name));
- return var != nullptr ? operator[] (*var) : lookup_type ();
+ const scope& bs (base_scope ());
+ const variable* var (bs.var_pool ().find (name));
+ return var != nullptr ? lookup (*var, &bs).first : lookup_type ();
}
// As above but also return the depth at which the value is found. The
@@ -446,12 +655,14 @@ namespace build2
// earlier. If no value is found, then the depth is set to ~0.
//
pair<lookup_type, size_t>
- lookup (const variable& var) const
+ lookup (const variable& var, const scope* bs = nullptr) const
{
- auto p (lookup_original (var));
+ auto p (lookup_original (var, false, bs));
return var.overrides == nullptr
? p
- : base_scope ().lookup_override (var, move (p), true);
+ : (bs != nullptr
+ ? *bs
+ : base_scope ()).lookup_override (var, move (p), true);
}
// If target_only is true, then only look in target and its target group
@@ -476,6 +687,19 @@ namespace build2
value&
append (const variable&);
+
+ // Rule hints.
+ //
+ public:
+ build2::rule_hints rule_hints;
+
+ // Find the rule hint for the specified operation taking into account the
+ // target type/group. Note: racy with regards to the group link-up and
+ // should only be called when safe.
+ //
+ const string&
+ find_hint (operation_id) const;
+
// Ad hoc recipes.
//
public:
@@ -511,6 +735,12 @@ namespace build2
static const size_t offset_executed = 5; // Recipe has been executed.
static const size_t offset_busy = 6; // Match/execute in progress.
+ // @@ PERF There is a lot of data below that is only needed for "output"
+ // as opposed to "source" targets (data pads, prerequisite_targets,
+ // etc). Maybe we should move this stuff to an optional extra (like we
+ // have for the root scope). Maybe we could even allocate it as part of
+ // the target's memory block or some such?
+
// Inner/outer operation state. See <libbuild2/action.hxx> for details.
//
class LIBBUILD2_SYMEXPORT opstate
@@ -530,20 +760,32 @@ namespace build2
//
build2::match_extra match_extra;
- // Matched rule (pointer to hint_rule_map element). Note that in case of
+ // Matched rule (pointer to name_rule_map element). Note that in case of
// a direct recipe assignment we may not have a rule (NULL).
//
const rule_match* rule;
// Applied recipe.
//
- build2::recipe recipe;
+ // Note: also used as the auxiliary data storage during match, which is
+ // why mutable (see the target::data() API below for details). The
+ // default recipe_keep value is set by clear_target().
+ //
+ mutable build2::recipe recipe;
+ mutable bool recipe_keep; // Keep after execution.
+ bool recipe_group_action; // Recipe is group_action.
// Target state for this operation. Note that it is undetermined until
// a rule is matched and recipe applied (see set_recipe()).
//
target_state state;
+ // Set to true (only for the inner action) if this target has been
+ // matched but not executed as a result of the resolve_members() call.
+ // See also context::resolve_count.
+ //
+ bool resolve_counted;
+
// Rule-specific variables.
//
// The rule (for this action) has to be matched before these variables
@@ -551,8 +793,8 @@ namespace build2
// no iffy modifications of the group's variables by member's rules).
//
// They are also automatically cleared before another rule is matched,
- // similar to the data pad. In other words, rule-specific variables are
- // only valid for this match-execute phase.
+ // similar to the auxiliary data storage. In other words, rule-specific
+ // variables are only valid for this match-execute phase.
//
variable_map vars;
@@ -576,13 +818,6 @@ namespace build2
return operator[] (*var);
}
- lookup_type
- operator[] (const string& name) const
- {
- const variable* var (target_->ctx.var_pool.find (name));
- return var != nullptr ? operator[] (*var) : lookup_type ();
- }
-
// As above but also return the depth at which the value is found. The
// depth is calculated by adding 1 for each test performed. So a value
// that is from the rule will have depth 1. That from the target - 2,
@@ -611,14 +846,18 @@ namespace build2
value&
assign (const variable* var) {return vars.assign (var);} // For cached.
+ // Implementation details.
+ //
public:
explicit
- opstate (context& c): vars (c, false /* global */) {}
+ opstate (context& c): vars (variable_map::owner::target, &c) {}
private:
friend class target_set;
- const target* target_ = nullptr; // Back-pointer, set by target_set.
+ // Back-pointer, set by target_set along with vars.target_.
+ //
+ const target* target_ = nullptr;
};
action_state<opstate> state;
@@ -627,10 +866,13 @@ namespace build2
const opstate& operator[] (action a) const {return state[a];}
// Return true if the target has been matched for the specified action.
- // This function can only be called during execution.
+ // This function can only be called during the match or execute phases.
+ //
+ // If you need to observe something in the matched target (e.g., the
+ // matched rule or recipe), use memory_order_acquire.
//
bool
- matched (action) const;
+ matched (action, memory_order mo = memory_order_relaxed) const;
// This function can only be called during match if we have observed
// (synchronization-wise) that this target has been matched (i.e., the
@@ -639,7 +881,7 @@ namespace build2
target_state
matched_state (action, bool fail = true) const;
- // See try_match().
+ // See try_match_sync().
//
pair<bool, target_state>
try_matched_state (action, bool fail = true) const;
@@ -664,7 +906,7 @@ namespace build2
// matched for this action.
//
// Indicate whether there is a rule match with the first half of the
- // result (see try_match()).
+ // result (see try_match_sync()).
//
pair<bool, target_state>
matched_state_impl (action) const;
@@ -687,98 +929,217 @@ namespace build2
// NULL means the target should be skipped (or the rule may simply not add
// such a target to the list).
//
- // Note also that it is possible the target can vary from action to
- // action, just like recipes. We don't need to keep track of the action
- // here since the targets will be updated if the recipe is updated,
- // normally as part of rule::apply().
- //
- // Note that the recipe may modify this list.
+ // A rule should make sure that the target's prerequisite_targets are in
+ // the "canonical" form (that is, all the prerequisites that need to be
+ // executed are present with prerequisite_target::target pointing to the
+ // corresponding target). This is relied upon in a number of places,
+ // including in dump and to be able to pretend-execute the operation on
+ // this target without actually calling the recipe (see perform_execute(),
+ // resolve_members_impl() for background). Note that a rule should not
+ // store targets that are semantically prerequisites in an ad hoc manner
+ // (e.g., in match data) with a few well-known execeptions (see
+ // group_recipe and inner_recipe).
+ //
+ // Note that the recipe may modify this list during execute. Normally this
+ // would be just blanking out of ad hoc prerequisites, in which case check
+ // for ad hoc first and for not NULL second if accessing prerequisites of
+ // targets that you did not execute (see the library metadata protocol in
+ // cc for an example).
//
mutable action_state<build2::prerequisite_targets> prerequisite_targets;
- // Auxilary data storage.
+ // Auxiliary data storage.
//
// A rule that matches (i.e., returns true from its match() function) may
- // use this pad to pass data between its match and apply functions as well
- // as the recipe. After the recipe is executed, the data is destroyed by
- // calling data_dtor (if not NULL). The rule should static assert that the
- // size of the pad is sufficient for its needs.
- //
- // Note also that normally at least 2 extra pointers may be stored without
- // a dynamic allocation in the returned recipe (small object optimization
- // in std::function). So if you need to pass data only between apply() and
- // the recipe, then this might be a more convenient way.
- //
- // Note also that a rule that delegates to another rule may not be able to
- // use this mechanism fully since the delegated-to rule may also need the
- // data pad.
- //
- // Currenly the data is not destroyed until the next match.
+ // use this facility to pass data between its match and apply functions as
+ // well as the recipe. Specifically, between match() and apply() the data
+ // is stored in the recipe member (which is std::move_only_function-like).
+ // If the data needs to be passed on to the recipe, then it must become
+ // the recipe itself. Here is a typical arrangement:
+ //
+ // class compile_rule
+ // {
+ // struct match_data
+ // {
+ // ... // Data.
+ //
+ // const compile_rule& rule;
+ //
+ // target_state
+ // operator() (action a, const target& t)
+ // {
+ // return rule.perform_update (a, t, this);
+ // }
+ // };
+ //
+ // virtual bool
+ // match (action a, const target& t)
+ // {
+ // ... // Determine if matching.
+ //
+ // t.data (a, match_data {..., *this});
+ // return true;
+ // }
+ //
+ // virtual bool
+ // apply (action a, target& t)
+ // {
+ // match_data& md (t.data (a));
+ //
+ // ... // Match prerequisites, etc.
+ //
+ // return move (md); // Data becomes the recipe.
+ // }
+ //
+ // target_state
+ // perform_update (action a, const target& t, match_data& md) const
+ // {
+ // ... // Access data (also available as t.data<match_data> (a)).
+ // }
+ // };
+ //
+ // Note: see also similar facility in match_extra.
+ //
+ // After the recipe is executed, the recipe/data is destroyed, unless
+ // explicitly requested not to (see below). The rule may static assert
+ // that the small size of the storage (which doesn't require dynamic
+ // memory allocation) is sufficient for its needs.
+ //
+ // Note also that a rule that delegates to another rule may need to store
+ // the base rule's data/recipe in its own data/recipe.
+
+ // Provide the small object optimization size for the common compilers
+ // (see recipe.hxx for details) in case a rule wants to make sure its data
+ // won't require a dynamic memory allocation. Note that using a minimum
+ // generally available (2 pointers) is not always possible because the
+ // data size may depend on sizes of other compiler-specific types (e.g.,
+ // std::string).
+ //
+ static constexpr size_t small_data_size =
+#if defined(__GLIBCXX__)
+ sizeof (void*) * 2
+#elif defined(_LIBCPP_VERSION)
+ sizeof (void*) * 3
+#elif defined(_MSC_VER)
+ sizeof (void*) * 6
+#else
+ sizeof (void*) * 2 // Assume at least 2 pointers.
+#endif
+ ;
+
+ template <typename T>
+ struct data_wrapper
+ {
+ T d;
+
+ target_state
+ operator() (action, const target&) const // Never called.
+ {
+ return target_state::unknown;
+ }
+ };
+
+ // Avoid wrapping the data if it is already a recipe.
//
- // Note that the recipe may modify the data. Currently reserved for the
- // inner part of the action.
+ // Note that this techniques requires a fix for LWG issue 2132 (which all
+ // our minimum supported compiler versions appear to have).
//
- static constexpr size_t data_size = sizeof (string) * 16;
- mutable std::aligned_storage<data_size>::type data_pad;
-
- mutable void (*data_dtor) (void*) = nullptr;
+ template <typename T>
+ struct data_invocable: std::is_constructible<
+ std::function<recipe_function>,
+ std::reference_wrapper<typename std::remove_reference<T>::type>> {};
- template <typename R,
- typename T = typename std::remove_cv<
- typename std::remove_reference<R>::type>::type>
- typename std::enable_if<std::is_trivially_destructible<T>::value,T&>::type
- data (R&& d) const
+ template <typename T>
+ typename std::enable_if<!data_invocable<T>::value, void>::type
+ data (action a, T&& d) const
{
- assert (sizeof (T) <= data_size);
- clear_data ();
- return *new (&data_pad) T (forward<R> (d));
+ using V = typename std::remove_cv<
+ typename std::remove_reference<T>::type>::type;
+
+ const opstate& s (state[a]);
+ s.recipe = data_wrapper<V> {forward<T> (d)};
+ s.recipe_keep = false; // Can't keep non-recipe data.
}
- template <typename R,
- typename T = typename std::remove_cv<
- typename std::remove_reference<R>::type>::type>
- typename std::enable_if<!std::is_trivially_destructible<T>::value,T&>::type
- data (R&& d) const
+ template <typename T>
+ typename std::enable_if<!data_invocable<T>::value, T&>::type&
+ data (action a) const
{
- assert (sizeof (T) <= data_size);
- clear_data ();
- T& r (*new (&data_pad) T (forward<R> (d)));
- data_dtor = [] (void* p) {static_cast<T*> (p)->~T ();};
- return r;
+ using V = typename std::remove_cv<T>::type;
+ return state[a].recipe.target<data_wrapper<V>> ()->d;
}
+ // Note that in this case we don't strip const (the expectation is that we
+ // move the recipe in/out of data).
+ //
+ // If keep is true, then keep the recipe as data after execution. In
+ // particular, this can be used to communicate between inner/outer rules
+ // (see cc::install_rule for an example).
+ //
+ //
template <typename T>
- T&
- data () const {return *reinterpret_cast<T*> (&data_pad);}
+ typename std::enable_if<data_invocable<T>::value, void>::type
+ data (action a, T&& d, bool keep = false) const
+ {
+ const opstate& s (state[a]);
+ s.recipe = forward<T> (d);
+ s.recipe_keep = keep;
+ }
void
- clear_data () const
+ keep_data (action a, bool keep = true) const
{
- if (data_dtor != nullptr)
- {
- data_dtor (&data_pad);
- data_dtor = nullptr;
- }
+ state[a].recipe_keep = keep;
+ }
+
+ template <typename T>
+ typename std::enable_if<data_invocable<T>::value, T&>::type&
+ data (action a) const
+ {
+ return *state[a].recipe.target<T> ();
}
// Target type info and casting.
//
public:
const target*
- is_a (const target_type& tt) const {
- return type ().is_a (tt) ? this : nullptr;}
+ is_a (const target_type& tt) const
+ {
+ return type ().is_a (tt) ? this : nullptr;
+ }
template <typename T>
T*
- is_a () {return dynamic_cast<T*> (this);}
+ is_a ()
+ {
+ // At least with GCC we see slightly better and more consistent
+ // performance with our own type information.
+ //
+#if 0
+ return dynamic_cast<T*> (this);
+#else
+ // We can skip dynamically-derived type here (derived_type).
+ //
+ return dynamic_type->is_a<T> () ? static_cast<T*> (this) : nullptr;
+#endif
+ }
template <typename T>
const T*
- is_a () const {return dynamic_cast<const T*> (this);}
+ is_a () const
+ {
+#if 0
+ return dynamic_cast<const T*> (this);
+#else
+ return dynamic_type->is_a<T> () ? static_cast<const T*> (this) : nullptr;
+#endif
+ }
const target*
- is_a (const char* n) const {
- return type ().is_a (n) ? this : nullptr;}
+ is_a (const char* n) const
+ {
+ return type ().is_a (n) ? this : nullptr;
+ }
// Unchecked cast.
//
@@ -790,18 +1151,23 @@ namespace build2
const T&
as () const {return static_cast<const T&> (*this);}
- // Dynamic derivation to support define.
+ // Target type information.
+ //
+ // A derived target is expected to set dynamic_type to its static_type in
+ // its constructor body.
+ //
+ // We also have dynamic "derivation" support (e.g., via define in
+ // buildfile).
//
- const target_type* derived_type = nullptr;
-
const target_type&
type () const
{
- return derived_type != nullptr ? *derived_type : dynamic_type ();
+ return derived_type != nullptr ? *derived_type : *dynamic_type;
}
static const target_type static_type;
- virtual const target_type& dynamic_type () const = 0;
+ const target_type* dynamic_type;
+ const target_type* derived_type = nullptr;
// RW access.
//
@@ -830,13 +1196,19 @@ namespace build2
// Targets should be created via the targets set below.
//
- public:
+ protected:
+ friend class target_set;
+
target (context& c, dir_path d, dir_path o, string n)
: ctx (c),
dir (move (d)), out (move (o)), name (move (n)),
- vars (c, false /* global */),
- state (c) {}
+ vars (*this, false /* shared */),
+ state (c)
+ {
+ dynamic_type = &static_type;
+ }
+ public:
target (target&&) = delete;
target& operator= (target&&) = delete;
@@ -845,8 +1217,6 @@ namespace build2
virtual
~target ();
-
- friend class target_set;
};
// All targets are from the targets set below.
@@ -883,13 +1253,15 @@ namespace build2
// Helper for dealing with the prerequisite inclusion/exclusion (see
// var_include in context.hxx).
//
+ // If the lookup argument is not NULL, then it will be set to the operation-
+ // specific override, if present. Note that in this case the caller is
+ // expected to validate that the override value is valid (note: use the same
+ // diagnostics as in include() for consistency).
+ //
// Note that the include(prerequisite_member) overload is also provided.
//
include_type
- include (action,
- const target&,
- const prerequisite&,
- const target* = nullptr);
+ include (action, const target&, const prerequisite&, lookup* = nullptr);
// A "range" that presents the prerequisites of a group and one of
// its members as one continuous sequence, or, in other words, as
@@ -1072,7 +1444,8 @@ namespace build2
return member != nullptr ? member : prerequisite.target.load (mo);
}
- // Return as a new prerequisite instance.
+ // Return as a new prerequisite instance. Note that it includes a copy
+ // of prerequisite-specific variables.
//
prerequisite_type
as_prerequisite () const;
@@ -1102,11 +1475,8 @@ namespace build2
return os << pm.key ();
}
- inline include_type
- include (action a, const target& t, const prerequisite_member& pm)
- {
- return include (a, t, pm.prerequisite, pm.member);
- }
+ include_type
+ include (action, const target&, const prerequisite_member&, lookup* = nullptr);
// A "range" that presents a sequence of prerequisites (e.g., from
// group_prerequisites()) as a sequence of prerequisite_member's. For each
@@ -1139,10 +1509,19 @@ namespace build2
// See-through group members iteration mode. Ad hoc members must always
// be entered explicitly.
//
+ // Note that if the group is empty, then we see the group itself (rather
+ // than nothing). Failed that, an empty group would never be executed (e.g.,
+ // during clean) since there is no member to trigger the group execution.
+ // Other than that, it feels like seeing the group in this cases should be
+ // harmless (i.e., rules are generally prepared to see prerequisites they
+ // don't recognize).
+ //
enum class members_mode
{
- always, // Iterate over members, assert if not resolvable.
- maybe, // Iterate over members if resolvable, group otherwise.
+ always, // Iterate over members if not empty, group if empty, assert if
+ // not resolvable.
+ maybe, // Iterate over members if resolvable and not empty, group
+ // otherwise.
never // Iterate over group (can still use enter_group()).
};
@@ -1180,7 +1559,7 @@ namespace build2
{
if (r_->mode_ != members_mode::never &&
i_ != r_->e_ &&
- i_->type.see_through)
+ i_->type.see_through ())
switch_mode ();
}
@@ -1195,9 +1574,10 @@ namespace build2
leave_group ();
// Iterate over this group's members. Return false if the member
- // information is not available. Similar to leave_group(), you should
- // increment the iterator after calling this function (provided it
- // returned true).
+ // information is not available (note: return true if the group is
+ // empty). Similar to leave_group(), you should increment the iterator
+ // after calling this function provided group() returns true (see
+ // below).
//
bool
enter_group ();
@@ -1207,7 +1587,7 @@ namespace build2
//
// for (...; ++i)
// {
- // if (i->prerequisite.type.see_through)
+ // if (i->prerequisite.type.see_through ())
// {
// for (i.enter_group (); i.group (); )
// {
@@ -1272,8 +1652,7 @@ namespace build2
group_view g_;
size_t j_; // 1-based index, to support enter_group().
const target* k_; // Current member of ad hoc group or NULL.
- mutable typename std::aligned_storage<sizeof (value_type),
- alignof (value_type)>::type m_;
+ alignas (value_type) mutable unsigned char m_[sizeof (value_type)];
};
iterator
@@ -1390,7 +1769,7 @@ namespace build2
const dir_path& out,
const string& name) const
{
- slock l (mutex_);
+ slock l (mutex_, defer_lock); if (ctx.phase != run_phase::load) l.lock ();
auto i (map_.find (target_key {&type, &dir, &out, &name, nullopt}));
return i != map_.end () ? i->second.get () : nullptr;
}
@@ -1404,7 +1783,17 @@ namespace build2
// If the target was inserted, keep the map exclusive-locked and return
// the lock. In this case, the target is effectively still being created
- // since nobody can see it until the lock is released.
+ // since nobody can see it until the lock is released. Note that there
+ // is normally quite a bit of contention around this map so make sure to
+ // not hold the lock longer than absolutely necessary.
+ //
+ // If skip_find is true, then don't first try to find an existing target
+ // with a shared lock, instead going directly for the unique lock and
+ // insert. It's a good idea to pass true as this argument if you know the
+ // target is unlikely to be there.
+ //
+ // If need_lock is false, then release the lock (the target insertion is
+ // indicated by the presence of the associated mutex).
//
pair<target&, ulock>
insert_locked (const target_type&,
@@ -1413,8 +1802,13 @@ namespace build2
string name,
optional<string> ext,
target_decl,
- tracer&);
+ tracer&,
+ bool skip_find = false,
+ bool need_lock = true);
+ // As above but instead of the lock return an indication of whether the
+ // target was inserted.
+ //
pair<target&, bool>
insert (const target_type& tt,
dir_path dir,
@@ -1422,7 +1816,8 @@ namespace build2
string name,
optional<string> ext,
target_decl decl,
- tracer& t)
+ tracer& t,
+ bool skip_find = false)
{
auto p (insert_locked (tt,
move (dir),
@@ -1430,9 +1825,11 @@ namespace build2
move (name),
move (ext),
decl,
- t));
+ t,
+ skip_find,
+ false));
- return pair<target&, bool> (p.first, p.second.owns_lock ()); // Clang 3.7
+ return pair<target&, bool> (p.first, p.second.mutex () != nullptr);
}
// Note that the following versions always enter implied targets.
@@ -1444,7 +1841,8 @@ namespace build2
dir_path out,
string name,
optional<string> ext,
- tracer& t)
+ tracer& t,
+ bool skip_find = false)
{
return insert (tt,
move (dir),
@@ -1452,7 +1850,8 @@ namespace build2
move (name),
move (ext),
target_decl::implied,
- t).first.template as<T> ();
+ t,
+ skip_find).first.template as<T> ();
}
template <typename T>
@@ -1461,9 +1860,10 @@ namespace build2
const dir_path& out,
const string& name,
const optional<string>& ext,
- tracer& t)
+ tracer& t,
+ bool skip_find = false)
{
- return insert<T> (T::static_type, dir, out, name, ext, t);
+ return insert<T> (T::static_type, dir, out, name, ext, t, skip_find);
}
template <typename T>
@@ -1471,18 +1871,26 @@ namespace build2
insert (const dir_path& dir,
const dir_path& out,
const string& name,
- tracer& t)
+ tracer& t,
+ bool skip_find = false)
{
- return insert<T> (dir, out, name, nullopt, t);
+ return insert<T> (dir, out, name, nullopt, t, skip_find);
}
// Note: not MT-safe so can only be used during serial execution.
//
public:
- using iterator = butl::map_iterator_adapter<map_type::const_iterator>;
+ using iterator = butl::map_iterator_adapter<map_type::iterator>;
+ using const_iterator = butl::map_iterator_adapter<map_type::const_iterator>;
+
+ iterator begin () {return map_.begin ();}
+ iterator end () {return map_.end ();}
- iterator begin () const {return map_.begin ();}
- iterator end () const {return map_.end ();}
+ const_iterator begin () const {return map_.begin ();}
+ const_iterator end () const {return map_.end ();}
+
+ size_t
+ size () const {return map_.size ();}
void
clear () {map_.clear ();}
@@ -1498,6 +1906,10 @@ namespace build2
mutable shared_mutex mutex_;
map_type map_;
+
+#if 0
+ size_t buckets_ = 0;
+#endif
};
// Modification time-based target.
@@ -1505,9 +1917,13 @@ namespace build2
class LIBBUILD2_SYMEXPORT mtime_target: public target
{
public:
- using target::target;
+ mtime_target (context& c, dir_path d, dir_path o, string n)
+ : target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
- // Modification time is an "atomic cash". That is, it can be set at any
+ // Modification time is an "atomic cache". That is, it can be set at any
// time (including on a const instance) and we assume everything will be
// ok regardless of the order in which racing updates happen because we do
// not modify the external state (which is the source of timestemps) while
@@ -1540,8 +1956,7 @@ namespace build2
// If the mtime is unknown, then load it from the filesystem also caching
// the result.
//
- // Note: can only be called during executing and must not be used if the
- // target state is group.
+ // Note: must not be used if the target state is group.
//
timestamp
load_mtime (const path&) const;
@@ -1592,11 +2007,17 @@ namespace build2
class LIBBUILD2_SYMEXPORT path_target: public mtime_target
{
public:
- using mtime_target::mtime_target;
+ path_target (context& c, dir_path d, dir_path o, string n)
+ : mtime_target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
using path_type = build2::path;
- // Target path is an "atomic consistent cash". That is, it can be set at
+ // Target path. Must be absolute and normalized.
+ //
+ // Target path is an "atomic consistent cache". That is, it can be set at
// any time (including on a const instance) but any subsequent updates
// must set the same path. Or, in other words, once the path is set, it
// never changes.
@@ -1625,7 +2046,7 @@ namespace build2
// the path_mtime() function to do it in the correct order.
//
const path_type&
- path () const;
+ path (memory_order = memory_order_acquire) const;
const path_type&
path (path_type) const;
@@ -1739,11 +2160,62 @@ namespace build2
class LIBBUILD2_SYMEXPORT file: public path_target
{
public:
- using path_target::path_target;
+ file (context& c, dir_path d, dir_path o, string n)
+ : path_target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // Mtime-based group target.
+ //
+ // Used to support explicit groups in buildfiles: can be derived from,
+ // populated with static members using the group{foo}<...> syntax, and
+ // matched with an ad hoc recipe/rule, including dynamic member extraction.
+ // Note that it is not see-through but a derived group can be made see-
+ // through via the [see_through] attribute.
+ //
+ // Note also that you shouldn't use it as a base for a custom group defined
+ // in C++, instead deriving from mtime_target directly and using a custom
+ // members layout more appropriate for the group's semantics. To put it
+ // another way, a group-based target should only be matched by an ad hoc
+ // recipe/rule (see match_rule() in algorithms.cxx for details).
+ //
+ class LIBBUILD2_SYMEXPORT group: public mtime_target
+ {
+ public:
+ vector<reference_wrapper<const target>> static_members;
+
+ // Note: we expect no NULL entries in members.
+ //
+ vector<const target*> members; // Layout compatible with group_view.
+ action members_action; // Action on which members were resolved.
+ size_t members_on = 0; // Operation number on which members were resolved.
+ size_t members_static; // Number of static ones in members (always first).
+
+ void
+ reset_members (action a)
+ {
+ members.clear ();
+ members_action = a;
+ members_on = ctx.current_on;
+ members_static = 0;
+ }
+
+ virtual group_view
+ group_members (action) const override;
+
+ group (context& c, dir_path d, dir_path o, string n)
+ : mtime_target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Alias target. It represents a list of targets (its prerequisites)
@@ -1752,11 +2224,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT alias: public target
{
public:
- using target::target;
+ alias (context& c, dir_path d, dir_path o, string n)
+ : target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Directory target. Note that this is not a filesystem directory
@@ -1766,11 +2241,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT dir: public alias
{
public:
- using alias::alias;
+ dir (context& c, dir_path d, dir_path o, string n)
+ : alias (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
public:
template <typename K>
@@ -1799,11 +2277,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT fsdir: public target
{
public:
- using target::target;
+ fsdir (context& c, dir_path d, dir_path o, string n)
+ : target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Executable file (not necessarily binary, though we do fallback to the
@@ -1813,7 +2294,11 @@ namespace build2
class LIBBUILD2_SYMEXPORT exe: public file
{
public:
- using file::file;
+ exe (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
using process_path_type = build2::process_path;
@@ -1841,7 +2326,6 @@ namespace build2
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
private:
process_path_type process_path_;
@@ -1850,11 +2334,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT buildfile: public file
{
public:
- using file::file;
+ buildfile (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Common documentation file target.
@@ -1862,11 +2349,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT doc: public file
{
public:
- using file::file;
+ doc (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Legal files (LICENSE, AUTHORS, COPYRIGHT, etc).
@@ -1874,11 +2364,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT legal: public doc
{
public:
- using doc::doc;
+ legal (context& c, dir_path d, dir_path o, string n)
+ : doc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// The problem with man pages is this: different platforms have
@@ -1918,21 +2411,27 @@ namespace build2
class LIBBUILD2_SYMEXPORT man: public doc
{
public:
- using doc::doc;
+ man (context& c, dir_path d, dir_path o, string n)
+ : doc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
class LIBBUILD2_SYMEXPORT man1: public man
{
public:
- using man::man;
+ man1 (context& c, dir_path d, dir_path o, string n)
+ : man (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// We derive manifest from doc rather than file so that it get automatically
@@ -1943,11 +2442,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT manifest: public doc
{
public:
- using doc::doc;
+ manifest (context& c, dir_path d, dir_path o, string n)
+ : doc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
// Common implementation of the target factory, extension, and search
@@ -1998,14 +2500,14 @@ namespace build2
// Target type uses the extension but it is fixed and there is no use
// printing it (e.g., man1{}).
//
- LIBBUILD2_SYMEXPORT void
- target_print_0_ext_verb (ostream&, const target_key&);
+ LIBBUILD2_SYMEXPORT bool
+ target_print_0_ext_verb (ostream&, const target_key&, bool);
// Target type uses the extension and there is normally no default so it
// should be printed (e.g., file{}).
//
- LIBBUILD2_SYMEXPORT void
- target_print_1_ext_verb (ostream&, const target_key&);
+ LIBBUILD2_SYMEXPORT bool
+ target_print_1_ext_verb (ostream&, const target_key&, bool);
// The default behavior, that is, look for an existing target in the
// prerequisite's directory scope.
diff --git a/libbuild2/target.ixx b/libbuild2/target.ixx
index 50750ca..f0d5cea 100644
--- a/libbuild2/target.ixx
+++ b/libbuild2/target.ixx
@@ -3,26 +3,149 @@
#include <cstring> // memcpy()
-#include <libbuild2/filesystem.hxx> // mtime()
-
#include <libbuild2/export.hxx>
namespace build2
{
+ LIBBUILD2_SYMEXPORT timestamp
+ mtime (const char*); // filesystem.cxx
+
+ // target_key
+ //
+ inline const string& target_key::
+ effective_name (string& r, bool force_ext) const
+ {
+ const target_type& tt (*type);
+
+ // Note that if the name is not empty, then we always use that, even
+ // if the type is dir/fsdir.
+ //
+ if (name->empty () && (tt.is_a<build2::dir> () || tt.is_a<fsdir> ()))
+ {
+ r = dir->leaf ().string ();
+ }
+ // If we have the extension and the type expects the extension to be
+ // always specified explicitly by the user, then add it to the name.
+ //
+ // Overall, we have the following cases:
+ //
+ // 1. Extension is fixed: man1{}.
+ //
+ // 2. Extension is always specified by the user: file{}.
+ //
+ // 3. Default extension that may be overridden by the user: hxx{}.
+ //
+ // 4. Extension assigned by the rule but may be overridden by the
+ // user: obje{}.
+ //
+ // By default we only include the extension for (2).
+ //
+ else if (ext && !ext->empty () &&
+ (force_ext ||
+ tt.fixed_extension == &target_extension_none ||
+ tt.fixed_extension == &target_extension_must))
+ {
+ r = *name + '.' + *ext;
+ }
+ else
+ return *name; // Use name as is.
+
+ return r;
+ }
+
+ // rule_hints
+ //
+ inline const string& rule_hints::
+ find (const target_type& tt, operation_id o, bool ut) const
+ {
+ // Look for fallback during the same iteration.
+ //
+ const value_type* f (nullptr);
+
+ for (const value_type& v: map)
+ {
+ if (!(v.type == nullptr ? ut : tt.is_a (*v.type)))
+ continue;
+
+ if (v.operation == o)
+ return v.hint;
+
+ if (f == nullptr &&
+ v.operation == default_id &&
+ (o == update_id || o == clean_id))
+ f = &v;
+ }
+
+ return f != nullptr ? f->hint : empty_string;
+ }
+
+ inline void rule_hints::
+ insert (const target_type* tt, operation_id o, string h)
+ {
+ auto i (find_if (map.begin (), map.end (),
+ [tt, o] (const value_type& v)
+ {
+ return v.operation == o && v.type == tt;
+ }));
+
+ if (i == map.end ())
+ map.push_back (value_type {tt, o, move (h)});
+ else
+ i->hint = move (h);
+ }
+
+ inline const string& target::
+ find_hint (operation_id o) const
+ {
+ using flag = target_type::flag;
+
+ const target_type& tt (type ());
+
+ // First check the target itself.
+ //
+ if (!rule_hints.empty ())
+ {
+ // If this is a group that "gave" its untyped hints to the members, then
+ // ignore untyped entries.
+ //
+ bool ut ((tt.flags & flag::member_hint) != flag::member_hint);
+
+ const string& r (rule_hints.find (tt, o, ut));
+ if (!r.empty ())
+ return r;
+ }
+
+ // Then check the group.
+ //
+ if (const target* g = group)
+ {
+ if (!g->rule_hints.empty ())
+ {
+ // If the group "gave" its untyped hints to the members, then don't
+ // ignore untyped entries.
+ //
+ bool ut ((g->type ().flags & flag::member_hint) == flag::member_hint);
+
+ return g->rule_hints.find (tt, o, ut);
+ }
+ }
+
+ return empty_string;
+ }
+
// match_extra
//
inline void match_extra::
init (bool f)
{
+ clear_data ();
fallback = f;
- buffer.clear ();
}
inline void match_extra::
free ()
{
- string s;
- buffer.swap (s);
+ clear_data ();
}
// target
@@ -112,19 +235,31 @@ namespace build2
}
inline bool target::
- matched (action a) const
+ matched (action a, memory_order mo) const
{
- assert (ctx.phase == run_phase::execute);
+ assert (ctx.phase == run_phase::match ||
+ ctx.phase == run_phase::execute);
- const opstate& s (state[a]);
-
- // Note that while the target could be being executed, we should see at
- // least offset_matched since it must have been "achieved" before the
- // phase switch.
- //
- size_t c (s.task_count.load (memory_order_relaxed) - ctx.count_base ());
+ size_t c (state[a].task_count.load (mo));
+ size_t b (ctx.count_base ()); // Note: cannot do (c - b)!
- return c >= offset_matched;
+ if (ctx.phase == run_phase::match)
+ {
+ // While it will normally be applied, it could also be already executed.
+ //
+ // Note that we can't do >= offset_applied since offset_busy means it is
+ // being matched.
+ //
+ return c == (b + offset_applied) || c == (b + offset_executed);
+ }
+ else
+ {
+ // Note that while the target could be being executed, we should see at
+ // least offset_matched since it must have been "achieved" before the
+ // phase switch.
+ //
+ return c >= (b + offset_matched);
+ }
}
LIBBUILD2_SYMEXPORT target_state
@@ -137,16 +272,19 @@ namespace build2
// raw state is not group provided the recipe is group_recipe and the
// state is unknown (see mtime() for a discussion on why we do it).
//
+ // Note that additionally s.state may not be target_state::group even
+ // after execution due to deferment (see execute_impl() for details).
+ //
+ // @@ Hm, I wonder why not just return s.recipe_group_action now that we
+ // cache it.
+ //
const opstate& s (state[a]);
if (s.state == target_state::group)
return true;
if (s.state == target_state::unknown && group != nullptr)
- {
- if (recipe_function* const* f = s.recipe.target<recipe_function*> ())
- return *f == &group_action;
- }
+ return s.recipe_group_action;
return false;
}
@@ -160,15 +298,18 @@ namespace build2
// Note: already synchronized.
//
- size_t o (s.task_count.load (memory_order_relaxed) - ctx.count_base ());
+ size_t c (s.task_count.load (memory_order_relaxed));
+ size_t b (ctx.count_base ()); // Note: cannot do (c - b)!
- if (o == offset_tried)
+ if (c == (b + offset_tried))
return make_pair (false, target_state::unknown);
else
{
- // Normally applied but can also be already executed.
+ // Normally applied but can also be already executed. Note that in the
+ // latter case we are guaranteed to be synchronized since we are in the
+ // match phase.
//
- assert (o == offset_applied || o == offset_executed);
+ assert (c == (b + offset_applied) || c == (b + offset_executed));
return make_pair (true, (group_state (a) ? group->state[a] : s).state);
}
}
@@ -287,15 +428,27 @@ namespace build2
// include()
//
LIBBUILD2_SYMEXPORT include_type
- include_impl (action, const target&, const prerequisite&, const target*);
+ include_impl (action, const target&,
+ const prerequisite&, const target*,
+ lookup*);
inline include_type
- include (action a, const target& t, const prerequisite& p, const target* m)
+ include (action a, const target& t, const prerequisite& p, lookup* l)
{
// Most of the time no prerequisite-specific variables will be specified,
// so let's optimize for that.
//
- return p.vars.empty () ? include_type (true) : include_impl (a, t, p, m);
+ return p.vars.empty ()
+ ? include_type (true)
+ : include_impl (a, t, p, nullptr, l);
+ }
+
+ inline include_type
+ include (action a, const target& t, const prerequisite_member& pm, lookup* l)
+ {
+ return pm.prerequisite.vars.empty ()
+ ? include_type (true)
+ : include_impl (a, t, pm.prerequisite, pm.member, l);
}
// group_prerequisites
@@ -380,7 +533,12 @@ namespace build2
//
assert (!member->adhoc_group_member ());
- return prerequisite_type (*member);
+ // Feels like copying the prerequisite's variables to member is more
+ // correct than not (consider for_install, for example).
+ //
+ prerequisite_type p (*member);
+ p.vars = prerequisite.vars;
+ return p;
}
inline prerequisite_key prerequisite_member::
@@ -413,6 +571,25 @@ namespace build2
}
template <typename T>
+ inline void prerequisite_members_range<T>::iterator::
+ switch_mode ()
+ {
+ g_ = resolve_members (*i_);
+
+ if (g_.members != nullptr)
+ {
+ // See empty see through groups as groups.
+ //
+ for (j_ = 1; j_ <= g_.count && g_.members[j_ - 1] == nullptr; ++j_) ;
+
+ if (j_ > g_.count)
+ g_.count = 0;
+ }
+ else
+ assert (r_->mode_ != members_mode::always); // Group can't be resolved.
+ }
+
+ template <typename T>
inline auto prerequisite_members_range<T>::iterator::
operator++ () -> iterator&
{
@@ -437,7 +614,7 @@ namespace build2
if (r_->mode_ != members_mode::never &&
i_ != r_->e_ &&
- i_->type.see_through)
+ i_->type.see_through ())
switch_mode ();
}
@@ -544,15 +721,20 @@ namespace build2
inline timestamp mtime_target::
load_mtime (const path& p) const
{
- assert (ctx.phase == run_phase::execute &&
- !group_state (action () /* inner */));
+ // We can only enforce "not group state" during the execute phase. During
+ // match (e.g., the target is being matched), we will just have to pay
+ // attention.
+ //
+ assert (ctx.phase == run_phase::match ||
+ (ctx.phase == run_phase::execute &&
+ !group_state (action () /* inner */)));
duration::rep r (mtime_.load (memory_order_consume));
if (r == timestamp_unknown_rep)
{
assert (!p.empty ());
- r = build2::mtime (p).time_since_epoch ().count ();
+ r = build2::mtime (p.string ().c_str ()).time_since_epoch ().count ();
mtime_.store (r, memory_order_release);
}
@@ -582,13 +764,13 @@ namespace build2
// path_target
//
inline const path& path_target::
- path () const
+ path (memory_order mo) const
{
// You may be wondering why don't we spin the transition out? The reason
// is it shouldn't matter since were we called just a moment earlier, we
// wouldn't have seen it.
//
- return path_state_.load (memory_order_acquire) == 2 ? path_ : empty_path;
+ return path_state_.load (mo) == 2 ? path_ : empty_path;
}
inline const path& path_target::
diff --git a/libbuild2/target.txx b/libbuild2/target.txx
index 5b48ad1..976d204 100644
--- a/libbuild2/target.txx
+++ b/libbuild2/target.txx
@@ -1,46 +1,11 @@
// file : libbuild2/target.txx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <libbutl/filesystem.hxx> // dir_iterator
-
#include <libbuild2/scope.hxx>
#include <libbuild2/diagnostics.hxx>
namespace build2
{
- // prerequisite_members_range
- //
- template <typename T>
- void prerequisite_members_range<T>::iterator::
- switch_mode ()
- {
- // A group could be empty, so we may have to iterate.
- //
- do
- {
- g_ = resolve_members (*i_);
-
- // Group could not be resolved.
- //
- if (g_.members == nullptr)
- {
- assert (r_->mode_ != members_mode::always);
- return;
- }
-
- // Skip empty see through groups.
- //
- for (j_ = 1; j_ <= g_.count && g_.members[j_ - 1] == nullptr; ++j_) ;
- if (j_ <= g_.count)
- break;
-
- g_.count = 0;
- }
- while (++i_ != r_->e_ && i_->type.see_through);
- }
-
- //
- //
template <const char* ext>
const char*
target_extension_fix (const target_key& tk, const scope*)
diff --git a/libbuild2/test/init.cxx b/libbuild2/test/init.cxx
index 539cdec..b7cf25f 100644
--- a/libbuild2/test/init.cxx
+++ b/libbuild2/test/init.cxx
@@ -30,15 +30,14 @@ namespace build2
l5 ([&]{trace << "for " << rs;});
- // Register our operations.
- //
- rs.insert_operation (test_id, op_test);
- rs.insert_operation (update_for_test_id, op_update_for_test);
-
// Enter module variables. Do it during boot in case they get assigned
// in bootstrap.build.
//
- auto& vp (rs.var_pool ());
+ // Most of the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
+ auto& pvp (rs.var_pool ()); // For `test` and `for_test`.
common_data d {
@@ -69,7 +68,7 @@ namespace build2
// The test variable is a name which can be a path (with the
// true/false special values) or a target name.
//
- vp.insert<name> ("test", variable_visibility::target),
+ pvp.insert<name> ("test", variable_visibility::target),
vp.insert<strings> ("test.options"),
vp.insert<strings> ("test.arguments"),
@@ -111,12 +110,12 @@ namespace build2
// This one is used by other modules/rules.
//
- vp.insert<bool> ("for_test", variable_visibility::prereq);
+ pvp.insert<bool> ("for_test", variable_visibility::prereq);
// These are only used in testscript.
//
- vp.insert<strings> ("test.redirects");
- vp.insert<strings> ("test.cleanups");
+ vp.insert<cmdline> ("test.redirects");
+ vp.insert<cmdline> ("test.cleanups");
// Unless already set, default test.target to build.host. Note that it
// can still be overriden by the user, e.g., in root.build.
@@ -125,9 +124,14 @@ namespace build2
value& v (rs.assign (d.test_target));
if (!v || v.empty ())
- v = cast<target_triplet> (rs.ctx.global_scope["build.host"]);
+ v = *rs.ctx.build_host;
}
+ // Register our operations.
+ //
+ rs.insert_operation (test_id, op_test, &d.var_test);
+ rs.insert_operation (update_for_test_id, op_update_for_test, &d.var_test);
+
extra.set_module (new module (move (d)));
}
diff --git a/libbuild2/test/operation.cxx b/libbuild2/test/operation.cxx
index 0a65bed..2535adb 100644
--- a/libbuild2/test/operation.cxx
+++ b/libbuild2/test/operation.cxx
@@ -17,11 +17,8 @@ namespace build2
namespace test
{
static operation_id
- test_pre (const values& params, meta_operation_id mo, const location& l)
+ pre_test (context&, const values&, meta_operation_id mo, const location&)
{
- if (!params.empty ())
- fail (l) << "unexpected parameters for operation test";
-
// Run update as a pre-operation, unless we are disfiguring.
//
return mo != disfigure_id ? update_id : 0;
@@ -67,7 +64,9 @@ namespace build2
"has nothing to test", // We cannot "be tested".
execution_mode::first,
1 /* concurrency */,
- &test_pre,
+ &pre_test,
+ nullptr,
+ nullptr,
nullptr,
nullptr,
&adhoc_apply
@@ -85,8 +84,10 @@ namespace build2
op_update.name_done,
op_update.mode,
op_update.concurrency,
- op_update.pre,
- op_update.post,
+ op_update.pre_operation,
+ op_update.post_operation,
+ op_update.operation_pre,
+ op_update.operation_post,
op_update.adhoc_match,
op_update.adhoc_apply
};
diff --git a/libbuild2/test/rule.cxx b/libbuild2/test/rule.cxx
index 06fb12f..81bf50a 100644
--- a/libbuild2/test/rule.cxx
+++ b/libbuild2/test/rule.cxx
@@ -30,7 +30,7 @@ namespace build2
namespace test
{
bool rule::
- match (action, target&, const string&) const
+ match (action, target&) const
{
// We always match, even if this target is not testable (so that we can
// ignore it; see apply()).
@@ -66,11 +66,11 @@ namespace build2
// Resolve group members.
//
- if (!see_through || t.type ().see_through)
+ if (!see_through_only || t.type ().see_through ())
{
// Remember that we are called twice: first during update for test
// (pre-operation) and then during test. During the former, we rely on
- // the normall update rule to resolve the group members. During the
+ // the normal update rule to resolve the group members. During the
// latter, there will be no rule to do this but the group will already
// have been resolved by the pre-operation.
//
@@ -540,11 +540,19 @@ namespace build2
if (verb)
{
- diag_record dr (text);
- dr << "test " << ts;
-
- if (!t.is_a<alias> ())
- dr << ' ' << t;
+ // If the target is an alias, then testscript itself is the
+ // target.
+ //
+ if (t.is_a<alias> ())
+ print_diag ("test", ts);
+ else
+ {
+ // In this case the test is really a combination of the target
+ // and testscript and using "->" feels off. Also, let's list the
+ // testscript after the target even though its a source.
+ //
+ print_diag ("test", t, ts, "+");
+ }
}
res.push_back (ctx.dry_run
@@ -555,22 +563,22 @@ namespace build2
{
scope_state& r (res.back ());
- if (!ctx.sched.async (ctx.count_busy (),
- t[a].task_count,
- [this] (const diag_frame* ds,
- scope_state& r,
- const target& t,
- const testscript& ts,
- const dir_path& wd)
- {
- diag_frame::stack_guard dsg (ds);
- r = perform_script_impl (t, ts, wd, *this);
- },
- diag_frame::stack (),
- ref (r),
- cref (t),
- cref (ts),
- cref (wd)))
+ if (!ctx.sched->async (ctx.count_busy (),
+ t[a].task_count,
+ [this] (const diag_frame* ds,
+ scope_state& r,
+ const target& t,
+ const testscript& ts,
+ const dir_path& wd)
+ {
+ diag_frame::stack_guard dsg (ds);
+ r = perform_script_impl (t, ts, wd, *this);
+ },
+ diag_frame::stack (),
+ ref (r),
+ cref (t),
+ cref (ts),
+ cref (wd)))
{
// Executed synchronously. If failed and we were not asked to
// keep going, bail out.
@@ -641,25 +649,50 @@ namespace build2
// Stack-allocated linked list of information about the running pipeline
// processes.
//
+ // Note: constructed incrementally.
+ //
struct pipe_process
{
- process& proc;
- const char* prog; // Only for diagnostics.
+ // Initially NULL. Set to the address of the process object when it is
+ // created. Reset back to NULL when the process is executed and its exit
+ // status is collected (see complete_pipe() for details).
+ //
+ process* proc = nullptr;
+
+ char const** args; // Only for diagnostics.
+
+ diag_buffer dbuf;
+ bool force_dbuf;
// True if this process has been terminated.
//
bool terminated = false;
- pipe_process* prev; // NULL for the left-most program.
+ // True if this process has been terminated but we failed to read out
+ // its stderr stream in the reasonable timeframe (2 seconds) after the
+ // termination.
+ //
+ // Note that this may happen if there is a still running child process
+ // of the terminated process which has inherited the parent's stderr
+ // file descriptor.
+ //
+ bool unread_stderr = false;
- pipe_process (process& p, const char* g, pipe_process* r)
- : proc (p), prog (g), prev (r) {}
+ pipe_process* prev; // NULL for the left-most program.
+ pipe_process* next; // Left-most program for the right-most program.
+
+ pipe_process (context& x,
+ char const** as,
+ bool fb,
+ pipe_process* p,
+ pipe_process* f)
+ : args (as), dbuf (x), force_dbuf (fb), prev (p), next (f) {}
};
- static bool
+ static void
run_test (const target& t,
- diag_record& dr,
char const** args,
+ int ofd,
const optional<timestamp>& deadline,
pipe_process* prev = nullptr)
{
@@ -669,14 +702,28 @@ namespace build2
for (next++; *next != nullptr; next++) ;
next++;
+ bool last (*next == nullptr);
+
// Redirect stdout to a pipe unless we are last.
//
- int out (*next != nullptr ? -1 : 1);
- bool pr;
+ int out (last ? ofd : -1);
- // Absent if the process misses the deadline.
+ // Propagate the pointer to the left-most program.
//
- optional<process_exit> pe;
+ // Also force diag buffering for the trailing diff process, so it's
+ // stderr is never printed if the test program fails (see
+ // complete_pipe() for details).
+ //
+ pipe_process pp (t.ctx,
+ args,
+ last && ofd == 2,
+ prev,
+ prev != nullptr ? prev->next : nullptr);
+
+ if (prev != nullptr)
+ prev->next = &pp;
+ else
+ pp.next = &pp; // Points to itself.
try
{
@@ -707,11 +754,11 @@ namespace build2
{
try
{
- p->proc.term ();
+ p->proc->term ();
}
catch (const process_error& e)
{
- dr << fail << "unable to terminate " << p->prog << ": " << e;
+ dr << fail << "unable to terminate " << p->args[0] << ": " << e;
}
p->terminated = true;
@@ -724,7 +771,7 @@ namespace build2
for (pipe_process* p (pp); p != nullptr; p = p->prev)
{
- process& pr (p->proc);
+ process& pr (*p->proc);
try
{
@@ -736,26 +783,310 @@ namespace build2
}
catch (const process_error& e)
{
- dr << fail << "unable to wait/kill " << p->prog << ": " << e;
+ dr << fail << "unable to wait/kill " << p->args[0] << ": " << e;
+ }
+ }
+ };
+
+ // Read out all the pipeline's buffered strerr streams watching for
+ // the deadline, if specified. If the deadline is reached, then
+ // terminate the whole pipeline, move the deadline by another 2
+ // seconds, and continue reading.
+ //
+ // Note that we assume that this timeout increment is normally
+ // sufficient to read out the buffered data written by the already
+ // terminated processes. If, however, that's not the case (see
+ // pipe_process for the possible reasons), then we just set
+ // unread_stderr flag to true for such processes and bail out.
+ //
+ // Also note that this implementation is inspired by the
+ // script::run_pipe::read_pipe() lambda.
+ //
+ auto read_pipe = [&pp, &deadline, &term_pipe] ()
+ {
+ fdselect_set fds;
+ for (pipe_process* p (&pp); p != nullptr; p = p->prev)
+ {
+ diag_buffer& b (p->dbuf);
+
+ if (b.is.is_open ())
+ fds.emplace_back (b.is.fd (), p);
+ }
+
+ optional<timestamp> dl (deadline);
+ bool terminated (false);
+
+ for (size_t unread (fds.size ()); unread != 0;)
+ {
+ try
+ {
+ // If a deadline is specified, then pass the timeout to
+ // fdselect().
+ //
+ if (dl)
+ {
+ timestamp now (system_clock::now ());
+
+ if (*dl <= now || ifdselect (fds, *dl - now) == 0)
+ {
+ if (!terminated)
+ {
+ term_pipe (&pp);
+ terminated = true;
+
+ dl = system_clock::now () + chrono::seconds (2);
+ continue;
+ }
+ else
+ {
+ for (fdselect_state& s: fds)
+ {
+ if (s.fd != nullfd)
+ {
+ pipe_process* p (static_cast<pipe_process*> (s.data));
+
+ p->unread_stderr = true;
+
+ // Let's also close the stderr stream not to confuse
+ // diag_buffer::close() (see script::read() for
+ // details).
+ //
+ try
+ {
+ p->dbuf.is.close ();
+ }
+ catch (const io_error&) {}
+ }
+ }
+
+ break;
+ }
+ }
+ }
+ else
+ ifdselect (fds);
+
+ for (fdselect_state& s: fds)
+ {
+ if (s.ready)
+ {
+ pipe_process* p (static_cast<pipe_process*> (s.data));
+
+ if (!p->dbuf.read (p->force_dbuf))
+ {
+ s.fd = nullfd;
+ --unread;
+ }
+ }
+ }
+ }
+ catch (const io_error& e)
+ {
+ fail << "io error reading pipeline streams: " << e;
+ }
+ }
+ };
+
+ // Wait for the pipeline processes to complete, watching for the
+ // deadline, if specified. If the deadline is reached, then terminate
+ // the whole pipeline.
+ //
+ // Note: must be called after read_pipe().
+ //
+ auto wait_pipe = [&pp, &deadline, &timed_wait, &term_pipe] ()
+ {
+ for (pipe_process* p (&pp); p != nullptr; p = p->prev)
+ {
+ try
+ {
+ if (!deadline)
+ p->proc->wait ();
+ else if (!timed_wait (*p->proc, *deadline))
+ term_pipe (p);
+ }
+ catch (const process_error& e)
+ {
+ fail << "unable to wait " << p->args[0] << ": " << e;
+ }
+ }
+ };
+
+ // Iterate over the pipeline processes left to right, printing their
+ // stderr if buffered and issuing the diagnostics if the exit code is
+ // not available (terminated abnormally or due to a deadline), is
+ // non-zero, or stderr was not fully read. Afterwards, fail if any of
+ // such a faulty processes were encountered.
+ //
+ // Note that we only issue diagnostics for the first failure.
+ //
+ // Note: must be called after wait_pipe() and only once.
+ //
+ auto complete_pipe = [&pp, &t] ()
+ {
+ pipe_process* b (pp.next); // Left-most program.
+ assert (b != nullptr); // The lambda can only be called once.
+ pp.next = nullptr;
+
+ bool fail (false);
+ for (pipe_process* p (b); p != nullptr; p = p->next)
+ {
+ assert (p->proc != nullptr); // The lambda can only be called once.
+
+ // Collect the exit status, if present.
+ //
+ // Absent if the process misses the deadline.
+ //
+ optional<process_exit> pe;
+
+ const process& pr (*p->proc);
+
+#ifndef _WIN32
+ if (!(p->terminated &&
+ !pr.exit->normal () &&
+ pr.exit->signal () == SIGTERM))
+#else
+ if (!(p->terminated &&
+ !pr.exit->normal () &&
+ pr.exit->status == DBG_TERMINATE_PROCESS))
+#endif
+ pe = pr.exit;
+
+ p->proc = nullptr;
+
+ // Verify the exit status and issue the diagnostics on failure.
+ //
+ // Note that we only issue diagnostics for the first failure but
+ // continue iterating to reset process pointers to NULL. Also note
+ // that if the test program fails, then the potential diff's
+ // diagnostics is suppressed since it is always buffered.
+ //
+ if (!fail)
+ {
+ diag_record dr;
+
+ // Note that there can be a race, so that the process we have
+ // terminated due to reaching the deadline has in fact exited
+ // normally. Thus, the 'unread stderr' situation can also happen
+ // to a successfully terminated process. If that's the case, we
+ // report this problem as the main error and the secondary error
+ // otherwise.
+ //
+ if (!pe ||
+ !pe->normal () ||
+ pe->code () != 0 ||
+ p->unread_stderr)
+ {
+ fail = true;
+
+ dr << error << "test " << t << " failed" // Multi test: test 1.
+ << error << "process " << p->args[0] << ' ';
+
+ if (!pe)
+ {
+ dr << "terminated: execution timeout expired";
+
+ if (p->unread_stderr)
+ dr << error << "stderr not closed after exit";
+ }
+ else if (!pe->normal () || pe->code () != 0)
+ {
+ dr << *pe;
+
+ if (p->unread_stderr)
+ dr << error << "stderr not closed after exit";
+ }
+ else
+ {
+ assert (p->unread_stderr);
+
+ dr << "stderr not closed after exit";
+ }
+
+ if (verb == 1)
+ {
+ dr << info << "test command line: ";
+
+ for (pipe_process* p (b); p != nullptr; p = p->next)
+ {
+ if (p != b)
+ dr << " | ";
+
+ print_process (dr, p->args);
+ }
+ }
+ }
+
+ // Now print the buffered stderr, if present, and/or flush the
+ // diagnostics, if issued.
+ //
+ if (p->dbuf.is_open ())
+ p->dbuf.close (move (dr));
}
}
+
+ if (fail)
+ throw failed ();
};
- process p (prev == nullptr
- ? process (args, 0, out) // First process.
- : process (args, prev->proc, out)); // Next process.
+ process p;
+ {
+ process::pipe ep;
+ {
+ fdpipe p;
+ if (diag_buffer::pipe (t.ctx, pp.force_dbuf) == -1) // Buffering?
+ {
+ try
+ {
+ p = fdopen_pipe ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to redirect stderr: " << e;
+ }
+
+ // Note that we must return non-owning fd to our end of the pipe
+ // (see the process class for details).
+ //
+ ep = process::pipe (p.in.get (), move (p.out));
+ }
+ else
+ ep = process::pipe (-1, 2);
+
+ // Note that we must open the diag buffer regardless of the
+ // diag_buffer::pipe() result.
+ //
+ pp.dbuf.open (args[0], move (p.in), fdstream_mode::non_blocking);
+ }
+
+ p = (prev == nullptr
+ ? process (args, 0, out, move (ep)) // First process.
+ : process (args, *prev->proc, out, move (ep))); // Next process.
+ }
- pipe_process pp (p, args[0], prev);
+ pp.proc = &p;
- // If the deadline is specified, then make sure we don't miss it
- // waiting indefinitely in the process destructor on the right-hand
- // part of the pipe failure.
+ // If the right-hand part of the pipe fails, then make sure we don't
+ // wait indefinitely in the process destructor if the deadline is
+ // specified or just because a process is blocked on stderr.
//
- auto g (make_exception_guard ([&deadline, &pp, &term_pipe] ()
+ auto g (make_exception_guard ([&pp, &term_pipe] ()
{
- if (deadline)
+ if (pp.proc != nullptr)
try
{
+ // Close all buffered pipeline stderr streams ignoring io_error
+ // exceptions.
+ //
+ for (pipe_process* p (&pp); p != nullptr; p = p->prev)
+ {
+ if (p->dbuf.is.is_open ())
+ try
+ {
+ p->dbuf.is.close();
+ }
+ catch (const io_error&) {}
+ }
+
term_pipe (&pp);
}
catch (const failed&)
@@ -764,25 +1095,17 @@ namespace build2
}
}));
- pr = *next == nullptr || run_test (t, dr, next, deadline, &pp);
-
- if (!deadline)
- p.wait ();
- else if (!timed_wait (p, *deadline))
- term_pipe (&pp);
+ if (!last)
+ run_test (t, next, ofd, deadline, &pp);
- assert (p.exit);
-
-#ifndef _WIN32
- if (!(pp.terminated &&
- !p.exit->normal () &&
- p.exit->signal () == SIGTERM))
-#else
- if (!(pp.terminated &&
- !p.exit->normal () &&
- p.exit->status == DBG_TERMINATE_PROCESS))
-#endif
- pe = *p.exit;
+ // Complete the pipeline execution, if not done yet.
+ //
+ if (pp.proc != nullptr)
+ {
+ read_pipe ();
+ wait_pipe ();
+ complete_pipe ();
+ }
}
catch (const process_error& e)
{
@@ -793,24 +1116,6 @@ namespace build2
throw failed ();
}
-
- bool wr (pe && pe->normal () && pe->code () == 0);
-
- if (!wr)
- {
- if (pr) // First failure?
- dr << fail << "test " << t << " failed"; // Multi test: test 1.
-
- dr << error;
- print_process (dr, args);
-
- if (pe)
- dr << " " << *pe;
- else
- dr << " terminated: execution timeout expired";
- }
-
- return pr && wr;
}
target_state rule::
@@ -986,10 +1291,19 @@ namespace build2
// Do we have stdout?
//
+ // If we do, then match it using diff. Also redirect the diff's stdout
+ // to stderr, similar to how we do that for the script (see
+ // script::check_output() for the reasoning). That will also prevent the
+ // diff's output from interleaving with any other output.
+ //
path dp ("diff");
process_path dpp;
+ int ofd (1);
+
if (pass_n != pts_n && pts[pass_n + 1] != nullptr)
{
+ ofd = 2;
+
const file& ot (pts[pass_n + 1]->as<file> ());
const path& op (ot.path ());
assert (!op.empty ()); // Should have been assigned by update.
@@ -1035,25 +1349,29 @@ namespace build2
args.push_back (nullptr); // Second.
if (verb >= 2)
- print_process (args);
+ print_process (args); // Note: prints the whole pipeline.
else if (verb)
- text << "test " << tt;
+ print_diag ("test", tt);
if (!ctx.dry_run)
{
- diag_record dr;
- pipe_process pp (cat, "cat", nullptr);
-
- if (!run_test (tt,
- dr,
- args.data () + (sin ? 3 : 0), // Skip cat.
- test_deadline (tt),
- sin ? &pp : nullptr))
+ pipe_process pp (tt.ctx,
+ args.data (), // Note: only cat's args are considered.
+ false /* force_dbuf */,
+ nullptr /* prev */,
+ nullptr /* next */);
+
+ if (sin)
{
- dr << info << "test command line: ";
- print_process (dr, args);
- dr << endf; // return
+ pp.next = &pp; // Points to itself.
+ pp.proc = &cat;
}
+
+ run_test (tt,
+ args.data () + (sin ? 3 : 0), // Skip cat.
+ ofd,
+ test_deadline (tt),
+ sin ? &pp : nullptr);
}
return target_state::changed;
diff --git a/libbuild2/test/rule.hxx b/libbuild2/test/rule.hxx
index e96b68b..6fcf208 100644
--- a/libbuild2/test/rule.hxx
+++ b/libbuild2/test/rule.hxx
@@ -20,7 +20,7 @@ namespace build2
{
public:
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual recipe
apply (action, target&) const override;
@@ -34,10 +34,10 @@ namespace build2
target_state
perform_script (action, const target&, size_t) const;
- rule (common_data&& d, bool see_through_only)
- : common (move (d)), see_through (see_through_only) {}
+ rule (common_data&& d, bool sto)
+ : common (move (d)), see_through_only (sto) {}
- bool see_through;
+ bool see_through_only;
};
class default_rule: public rule
diff --git a/libbuild2/test/script/lexer+for-loop.test.testscript b/libbuild2/test/script/lexer+for-loop.test.testscript
new file mode 100644
index 0000000..fcd12f7
--- /dev/null
+++ b/libbuild2/test/script/lexer+for-loop.test.testscript
@@ -0,0 +1,231 @@
+# file : libbuild2/test/script/lexer+for-loop.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+test.arguments = for-loop
+
+: semi
+{
+ : immediate
+ :
+ $* <"cmd;" >>EOO
+ 'cmd'
+ ;
+ <newline>
+ EOO
+
+ : separated
+ :
+ $* <"cmd ;" >>EOO
+ 'cmd'
+ ;
+ <newline>
+ EOO
+
+ : only
+ :
+ $* <";" >>EOO
+ ;
+ <newline>
+ EOO
+}
+
+: colon
+:
+{
+ : immediate
+ :
+ $* <"cmd: dsc" >>EOO
+ 'cmd'
+ :
+ 'dsc'
+ <newline>
+ EOO
+
+ : separated
+ :
+ $* <"cmd :dsc" >>EOO
+ 'cmd'
+ :
+ 'dsc'
+ <newline>
+ EOO
+
+ : only
+ :
+ $* <":" >>EOO
+ :
+ <newline>
+ EOO
+}
+
+: redirect
+:
+{
+ : pass
+ :
+ $* <"cmd <| 1>|" >>EOO
+ 'cmd'
+ <|
+ '1'
+ >|
+ <newline>
+ EOO
+
+ : null
+ :
+ $* <"cmd <- 1>-" >>EOO
+ 'cmd'
+ <-
+ '1'
+ >-
+ <newline>
+ EOO
+
+ : trace
+ :
+ $* <"cmd 1>!" >>EOO
+ 'cmd'
+ '1'
+ >!
+ <newline>
+ EOO
+
+ : merge
+ :
+ $* <"cmd 1>&2" >>EOO
+ 'cmd'
+ '1'
+ >&
+ '2'
+ <newline>
+ EOO
+
+ : str
+ :
+ $* <"cmd <a 1>b" >>EOO
+ 'cmd'
+ <
+ 'a'
+ '1'
+ >
+ 'b'
+ <newline>
+ EOO
+
+ : str-nn
+ :
+ $* <"cmd <:a 1>:b" >>EOO
+ 'cmd'
+ <:
+ 'a'
+ '1'
+ >:
+ 'b'
+ <newline>
+ EOO
+
+ : doc
+ :
+ $* <"cmd <<EOI 1>>EOO" >>EOO
+ 'cmd'
+ <<
+ 'EOI'
+ '1'
+ >>
+ 'EOO'
+ <newline>
+ EOO
+
+ : doc-nn
+ :
+ $* <"cmd <<:EOI 1>>:EOO" >>EOO
+ 'cmd'
+ <<:
+ 'EOI'
+ '1'
+ >>:
+ 'EOO'
+ <newline>
+ EOO
+
+ : file-cmp
+ :
+ $* <"cmd <<<in >>>out 2>>>err" >>EOO
+ 'cmd'
+ <<<
+ 'in'
+ >>>
+ 'out'
+ '2'
+ >>>
+ 'err'
+ <newline>
+ EOO
+
+ : file-write
+ :
+ $* <"cmd >=out 2>+err" >>EOO
+ 'cmd'
+ >=
+ 'out'
+ '2'
+ >+
+ 'err'
+ <newline>
+ EOO
+}
+
+: cleanup
+:
+{
+ : always
+ :
+ $* <"cmd &file" >>EOO
+ 'cmd'
+ &
+ 'file'
+ <newline>
+ EOO
+
+ : maybe
+ :
+ $* <"cmd &?file" >>EOO
+ 'cmd'
+ &?
+ 'file'
+ <newline>
+ EOO
+
+ : never
+ :
+ $* <"cmd &!file" >>EOO
+ 'cmd'
+ &!
+ 'file'
+ <newline>
+ EOO
+}
+
+: for
+:
+{
+ : form-1
+ :
+ $* <"for x: a" >>EOO
+ 'for'
+ 'x'
+ :
+ 'a'
+ <newline>
+ EOO
+
+ : form-3
+ :
+ $* <"for <<<a x" >>EOO
+ 'for'
+ <<<
+ 'a'
+ 'x'
+ <newline>
+ EOO
+}
diff --git a/libbuild2/test/script/lexer.cxx b/libbuild2/test/script/lexer.cxx
index f9c8ac6..aec91fc 100644
--- a/libbuild2/test/script/lexer.cxx
+++ b/libbuild2/test/script/lexer.cxx
@@ -34,13 +34,16 @@ namespace build2
bool q (true); // quotes
if (!esc)
- {
- assert (!state_.empty ());
- esc = state_.top ().escapes;
- }
+ esc = current_state ().escapes;
switch (m)
{
+ case lexer_mode::for_loop:
+ {
+ // Leading tokens of the for-loop. Like command_line but also
+ // recognizes lsbrace like value.
+ }
+ // Fall through.
case lexer_mode::command_line:
{
s1 = ":;=!|&<> $(#\t\n";
@@ -107,7 +110,7 @@ namespace build2
}
assert (ps == '\0');
- state_.push (
+ mode_impl (
state {m, data, nullopt, false, false, ps, s, n, q, *esc, s1, s2});
}
@@ -116,12 +119,13 @@ namespace build2
{
token r;
- switch (state_.top ().mode)
+ switch (mode ())
{
case lexer_mode::command_line:
case lexer_mode::first_token:
case lexer_mode::second_token:
case lexer_mode::variable_line:
+ case lexer_mode::for_loop:
r = next_line ();
break;
case lexer_mode::description_line:
@@ -144,7 +148,7 @@ namespace build2
xchar c (get ());
uint64_t ln (c.line), cn (c.column);
- state st (state_.top ()); // Make copy (see first/second_token).
+ state st (current_state ()); // Make copy (see first/second_token).
lexer_mode m (st.mode);
auto make_token = [&sep, ln, cn] (type t)
@@ -157,9 +161,10 @@ namespace build2
//
if (st.lsbrace)
{
- assert (m == lexer_mode::variable_line);
+ assert (m == lexer_mode::variable_line ||
+ m == lexer_mode::for_loop);
- state_.top ().lsbrace = false; // Note: st is a copy.
+ current_state ().lsbrace = false; // Note: st is a copy.
if (c == '[' && (!st.lsbrace_unsep || !sep))
return make_token (type::lsbrace);
@@ -172,7 +177,7 @@ namespace build2
// we push any new mode (e.g., double quote).
//
if (m == lexer_mode::first_token || m == lexer_mode::second_token)
- state_.pop ();
+ expire_mode ();
// NOTE: remember to update mode() if adding new special characters.
@@ -183,7 +188,7 @@ namespace build2
// Expire variable value mode at the end of the line.
//
if (m == lexer_mode::variable_line)
- state_.pop ();
+ expire_mode ();
sep = true; // Treat newline as always separated.
return make_token (type::newline);
@@ -197,10 +202,11 @@ namespace build2
// Line separators.
//
- if (m == lexer_mode::command_line ||
- m == lexer_mode::first_token ||
- m == lexer_mode::second_token ||
- m == lexer_mode::variable_line)
+ if (m == lexer_mode::command_line ||
+ m == lexer_mode::first_token ||
+ m == lexer_mode::second_token ||
+ m == lexer_mode::variable_line ||
+ m == lexer_mode::for_loop)
{
switch (c)
{
@@ -210,7 +216,8 @@ namespace build2
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
switch (c)
{
@@ -222,7 +229,8 @@ namespace build2
//
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
switch (c)
{
@@ -244,7 +252,8 @@ namespace build2
//
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
if (optional<token> t = next_cmd_op (c, sep))
return move (*t);
@@ -310,7 +319,7 @@ namespace build2
if (c == '\n')
{
get ();
- state_.pop (); // Expire the description mode.
+ expire_mode (); // Expire the description mode.
return token (type::newline, true, ln, cn, token_printer);
}
@@ -330,15 +339,17 @@ namespace build2
}
token lexer::
- word (state st, bool sep)
+ word (const state& st, bool sep)
{
- lexer_mode m (st.mode);
+ lexer_mode m (st.mode); // Save.
token r (base_lexer::word (st, sep));
if (m == lexer_mode::variable)
{
- if (r.value.size () == 1 && digit (r.value[0])) // $N
+ if (r.type == type::word &&
+ r.value.size () == 1 &&
+ digit (r.value[0])) // $N
{
xchar c (peek ());
diff --git a/libbuild2/test/script/lexer.hxx b/libbuild2/test/script/lexer.hxx
index 452e794..39b950a 100644
--- a/libbuild2/test/script/lexer.hxx
+++ b/libbuild2/test/script/lexer.hxx
@@ -24,10 +24,11 @@ namespace build2
enum
{
command_line = base_type::value_next,
- first_token, // Expires at the end of the token.
- second_token, // Expires at the end of the token.
- variable_line, // Expires at the end of the line.
- description_line // Expires at the end of the line.
+ first_token, // Expires at the end of the token.
+ second_token, // Expires at the end of the token.
+ variable_line, // Expires at the end of the line.
+ description_line, // Expires at the end of the line.
+ for_loop // Used for sensing the for-loop leading tokens.
};
lexer_mode () = default;
@@ -67,6 +68,8 @@ namespace build2
static redirect_aliases_type redirect_aliases;
private:
+ using build2::script::lexer::mode; // Getter.
+
token
next_line ();
@@ -74,7 +77,7 @@ namespace build2
next_description ();
virtual token
- word (state, bool) override;
+ word (const state&, bool) override;
};
}
}
diff --git a/libbuild2/test/script/lexer.test.cxx b/libbuild2/test/script/lexer.test.cxx
index 76f102d..ef3ce4d 100644
--- a/libbuild2/test/script/lexer.test.cxx
+++ b/libbuild2/test/script/lexer.test.cxx
@@ -36,6 +36,7 @@ namespace build2
else if (s == "variable-line") m = lexer_mode::variable_line;
else if (s == "description-line") m = lexer_mode::description_line;
else if (s == "variable") m = lexer_mode::variable;
+ else if (s == "for-loop") m = lexer_mode::for_loop;
else assert (false);
}
diff --git a/libbuild2/test/script/parser+command-if.test.testscript b/libbuild2/test/script/parser+command-if.test.testscript
index 0b72b4a..9e223dd 100644
--- a/libbuild2/test/script/parser+command-if.test.testscript
+++ b/libbuild2/test/script/parser+command-if.test.testscript
@@ -315,6 +315,7 @@
}
: end
+:
{
: without-if
:
@@ -322,7 +323,7 @@
cmd
end
EOI
- testscript:2:1: error: 'end' without preceding 'if'
+ testscript:2:1: error: 'end' without preceding 'if', 'for', or 'while'
EOE
: without-if-semi
@@ -331,10 +332,11 @@
cmd;
end
EOI
- testscript:2:1: error: 'end' without preceding 'if'
+ testscript:2:1: error: 'end' without preceding 'if', 'for', or 'while'
EOE
: before
+ :
{
: semi
:
diff --git a/libbuild2/test/script/parser+command-re-parse.test.testscript b/libbuild2/test/script/parser+command-re-parse.test.testscript
index 84465b3..5a082eb 100644
--- a/libbuild2/test/script/parser+command-re-parse.test.testscript
+++ b/libbuild2/test/script/parser+command-re-parse.test.testscript
@@ -4,7 +4,7 @@
: double-quote
:
$* <<EOI >>EOO
-x = cmd \">-\" "'<-'"
+x = [cmdline] cmd \">-\" "'<-'"
$x
EOI
cmd '>-' '<-'
diff --git a/libbuild2/test/script/parser+description.test.testscript b/libbuild2/test/script/parser+description.test.testscript
index cee540f..f656b7d 100644
--- a/libbuild2/test/script/parser+description.test.testscript
+++ b/libbuild2/test/script/parser+description.test.testscript
@@ -313,7 +313,7 @@
x = y
end
EOI
- testscript:2:1: error: description before/after setup/teardown variable-if
+ testscript:2:1: error: description before/after setup/teardown variable-only 'if'
EOE
: var-if-after
@@ -323,7 +323,7 @@
x = y
end : foo
EOI
- testscript:1:1: error: description before/after setup/teardown variable-if
+ testscript:1:1: error: description before/after setup/teardown variable-only 'if'
EOE
: test
diff --git a/libbuild2/test/script/parser+expansion.test.testscript b/libbuild2/test/script/parser+expansion.test.testscript
index 77a7d6d..c31b0ad 100644
--- a/libbuild2/test/script/parser+expansion.test.testscript
+++ b/libbuild2/test/script/parser+expansion.test.testscript
@@ -27,7 +27,7 @@ EOE
: invalid-redirect
:
$* <<EOI 2>>EOE != 0
-x = "1>&a"
+x = [cmdline] "1>&a"
cmd $x
EOI
<string>:1:4: error: stdout merge redirect file descriptor must be 2
diff --git a/libbuild2/test/script/parser+for.test.testscript b/libbuild2/test/script/parser+for.test.testscript
new file mode 100644
index 0000000..985f9c9
--- /dev/null
+++ b/libbuild2/test/script/parser+for.test.testscript
@@ -0,0 +1,1029 @@
+# file : libbuild2/test/script/parser+for.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: form-1
+:
+: for x: ...
+:
+{
+ : for
+ :
+ {
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for
+ cmd
+ end
+ EOI
+ testscript:1:1: error: for: missing variable name
+ EOE
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ for x: a b
+ cmd $x
+ end
+ EOI
+ cmd a
+ cmd b
+ EOO
+
+ : null
+ :
+ $* <<EOI >:''
+ for x: [null]
+ cmd $x
+ end
+ EOI
+
+ : empty
+ :
+ $* <<EOI >:''
+ for x:
+ cmd $x
+ end
+ EOI
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ for x: $vs
+ cmd $x
+ end
+ EOI
+ cmd a
+ cmd b
+ EOO
+
+ : typed-values
+ :
+ $* <<EOI >>~%EOO%
+ for x: [dir_paths] a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>~%EOO%
+ for x [dir_path]: a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : typed-elem-value
+ :
+ $* <<EOI >>~%EOO%
+ for x [dir_path]: [strings] a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : scope-var
+ :
+ $* <<EOI >>EOO
+ x = x
+
+ for x: a b
+ cmd $x
+ end
+
+ -cmd $x
+ EOI
+ cmd a
+ cmd b
+ -cmd x
+ EOO
+ }
+
+ : after-semi
+ :
+ $* -s <<EOI >>EOO
+ cmd1;
+ for x: a b
+ cmd2 $x
+ end
+ EOI
+ {
+ {
+ cmd1
+ cmd2 a
+ cmd2 b
+ }
+ }
+ EOO
+
+ : setup
+ :
+ $* -s <<EOI >>EOO
+ +for x: a b
+ cmd $x
+ end
+ EOI
+ {
+ +cmd a
+ +cmd b
+ }
+ EOO
+
+ : tdown
+ :
+ $* -s <<EOI >>EOO
+ -for x: a b
+ cmd $x
+ end
+ EOI
+ {
+ -cmd a
+ -cmd b
+ }
+ EOO
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ for x: a b
+ cmd
+ EOI
+ testscript:3:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ for x: a b
+ elif true
+ cmd
+ end
+ end
+ EOI
+ testscript:2:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ for x: a b
+ cmd1 $x # 1
+ if ($x == "a") # 2
+ cmd2 # 3
+ for y: x y
+ cmd3 # 4
+ end
+ else
+ cmd4 # 5
+ end
+ cmd5 # 6
+ end;
+ cmd6 # 7
+ EOI
+ cmd1 a # 1 i1
+ ? true # 2 i1
+ cmd2 # 3 i1
+ cmd3 # 4 i1 i1
+ cmd3 # 4 i1 i2
+ cmd5 # 6 i1
+ cmd1 b # 1 i2
+ ? false # 2 i2
+ cmd4 # 5 i2
+ cmd5 # 6 i2
+ cmd6 # 7
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : semi
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ cmd;
+ cmd
+ end
+ EOI
+ testscript:2:3: error: ';' inside 'for'
+ EOE
+
+ : colon-leading
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ : foo
+ cmd
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : colon-trailing
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ cmd : foo
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ EOI
+ testscript:2:1: error: expected closing 'end'
+ EOE
+
+ : scope
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ cmd
+ {
+ }
+ end
+ EOI
+ testscript:3:3: error: expected closing 'end'
+ EOE
+
+ : setup
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ +cmd
+ end
+ EOI
+ testscript:2:3: error: setup command inside 'for'
+ EOE
+
+ : tdown
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ -cmd
+ end
+ EOI
+ testscript:2:3: error: teardown command inside 'for'
+ EOE
+ }
+
+ : var
+ :
+ $* <<EOI >>EOO
+ for x: a b
+ cmd1 $x
+ end;
+ cmd2 $x
+ EOI
+ cmd1 a
+ cmd1 b
+ cmd2 b
+ EOO
+
+ : leading-and-trailing-description
+ :
+ $* <<EOI 2>>EOE != 0
+ : foo
+ for x: a b
+ cmd
+ end : bar
+ EOI
+ testscript:4:1: error: both leading and trailing descriptions
+ EOE
+}
+
+: form-2
+:
+: ... | for x
+:
+{
+ : for
+ :
+ {
+ : status
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x != 0
+ cmd
+ end
+ EOI
+ testscript:1:20: error: for-loop exit code cannot be checked
+ EOE
+
+ : not-last
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x | echo x
+ cmd
+ end
+ EOI
+ testscript:1:20: error: for-loop must be last command in a pipe
+ EOE
+
+ : not-last-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x|echo x
+ cmd
+ end
+ EOI
+ testscript:1:19: error: for-loop must be last command in a pipe
+ EOE
+
+ : expression-after
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x && echo x
+ cmd
+ end
+ EOI
+ testscript:1:20: error: command expression involving for-loop
+ EOE
+
+ : expression-after-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x&&echo x
+ cmd
+ end
+ EOI
+ testscript:1:19: error: command expression involving for-loop
+ EOE
+
+ : expression-before
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && echo x | for x
+ cmd
+ end
+ EOI
+ testscript:1:24: error: command expression involving for-loop
+ EOE
+
+ : expression-before-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && echo x|for x
+ cmd
+ end
+ EOI
+ testscript:1:22: error: command expression involving for-loop
+ EOE
+
+ : cleanup
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x &f
+ cmd
+ end
+ EOI
+ testscript:1:20: error: cleanup in for-loop
+ EOE
+
+ : cleanup-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x&f
+ cmd
+ end
+ EOI
+ testscript:1:19: error: cleanup in for-loop
+ EOE
+
+ : stdout-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x >a
+ cmd
+ end
+ EOI
+ testscript:1:20: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x>a
+ cmd
+ end
+ EOI
+ testscript:1:19: error: output redirect in for-loop
+ EOE
+
+ : stdin-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x <a
+ cmd
+ end
+ EOI
+ testscript:1:20: error: stdin is both piped and redirected
+ EOE
+
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for
+ cmd
+ end
+ EOI
+ testscript:1:1: error: for: missing variable name
+ EOE
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ echo 'a b' | for -w x
+ cmd $x
+ end
+ EOI
+ echo 'a b' | for -w x
+ EOO
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ echo $vs | for x
+ cmd $x
+ end
+ EOI
+ echo a b | for x
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>EOO
+ echo 'a b' | for -w x [dir_path]
+ cmd $x
+ end
+ EOI
+ echo 'a b' | for -w x [dir_path]
+ EOO
+ }
+
+ : after-semi
+ :
+ $* -s <<EOI >>EOO
+ cmd1;
+ echo 'a b' | for x
+ cmd2 $x
+ end
+ EOI
+ {
+ {
+ cmd1
+ echo 'a b' | for x
+ }
+ }
+ EOO
+
+ : setup
+ :
+ $* -s <<EOI >>EOO
+ +echo 'a b' | for x
+ cmd $x
+ end
+ EOI
+ {
+ +echo 'a b' | for x
+ }
+ EOO
+
+ : tdown
+ :
+ $* -s <<EOI >>EOO
+ -echo 'a b' | for x
+ cmd $x
+ end
+ EOI
+ {
+ -echo 'a b' | for x
+ }
+ EOO
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd
+ EOI
+ testscript:3:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ elif true
+ cmd
+ end
+ end
+ EOI
+ testscript:2:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ echo 'a b' | for x # 1
+ cmd1 $x # 2
+ if ($x == "a") # 3
+ cmd2 # 4
+ echo x y | for y # 5
+ cmd3 # 6
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ end;
+ cmd6 # 9
+ EOI
+ echo 'a b' | for x # 1
+ cmd6 # 9
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : semi
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd;
+ cmd
+ end
+ EOI
+ testscript:2:3: error: ';' inside 'for'
+ EOE
+
+ : colon-leading
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ : foo
+ cmd
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : colon-trailing
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd : foo
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ EOI
+ testscript:2:1: error: expected closing 'end'
+ EOE
+
+ : scope
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd
+ {
+ }
+ end
+ EOI
+ testscript:3:3: error: expected closing 'end'
+ EOE
+
+ : setup
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ +cmd
+ end
+ EOI
+ testscript:2:3: error: setup command inside 'for'
+ EOE
+
+ : tdown
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ -cmd
+ end
+ EOI
+ testscript:2:3: error: teardown command inside 'for'
+ EOE
+ }
+
+ : leading-and-trailing-description
+ :
+ $* <<EOI 2>>EOE != 0
+ : foo
+ echo 'a b' | for x
+ cmd
+ end : bar
+ EOI
+ testscript:4:1: error: both leading and trailing descriptions
+ EOE
+}
+
+: form-3
+:
+: for x <...
+:
+{
+ : for
+ :
+ {
+ : status
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a != 0
+ cmd
+ end
+ EOI
+ testscript:1:10: error: for-loop exit code cannot be checked
+ EOE
+
+ : not-last
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a | echo x
+ cmd
+ end
+ EOI
+ testscript:1:10: error: for-loop must be last command in a pipe
+ EOE
+
+ : not-last-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x|echo x
+ cmd
+ end
+ EOI
+ testscript:1:9: error: for-loop must be last command in a pipe
+ EOE
+
+ : expression-after
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a && echo x
+ cmd
+ end
+ EOI
+ testscript:1:10: error: command expression involving for-loop
+ EOE
+
+ : expression-after-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x&&echo x
+ cmd
+ end
+ EOI
+ testscript:1:9: error: command expression involving for-loop
+ EOE
+
+ : expression-before
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && for x <a
+ cmd
+ end
+ EOI
+ testscript:1:15: error: command expression involving for-loop
+ EOE
+
+ : cleanup
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a &f
+ cmd
+ end
+ EOI
+ testscript:1:10: error: cleanup in for-loop
+ EOE
+
+ : cleanup-before-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for &f x <a
+ cmd
+ end
+ EOI
+ testscript:1:5: error: cleanup in for-loop
+ EOE
+
+ : cleanup-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x&f
+ cmd
+ end
+ EOI
+ testscript:1:9: error: cleanup in for-loop
+ EOE
+
+ : stdout-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ for x >a
+ cmd
+ end
+ EOI
+ testscript:1:7: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-before-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for >a x
+ cmd
+ end
+ EOI
+ testscript:1:5: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for x>a
+ cmd
+ end
+ EOI
+ testscript:1:6: error: output redirect in for-loop
+ EOE
+
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a
+ cmd
+ end
+ EOI
+ testscript:1:1: error: for: missing variable name
+ EOE
+
+ : quoted-opt
+ :
+ $* <<EOI >>EOO
+ o = -w
+ for "$o" x <'a b'
+ cmd $x
+ end;
+ for "($o)" x <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x <'a b'
+ for -w x <'a b'
+ EOO
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ for -w x <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x <'a b'
+ EOO
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ for x <$vs
+ cmd $x
+ end
+ EOI
+ for x b <a
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>EOO
+ for -w x [dir_path] <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x [dir_path] <'a b'
+ EOO
+ }
+
+ : after-semi
+ :
+ $* -s <<EOI >>EOO
+ cmd1;
+ for x <'a b'
+ cmd2 $x
+ end
+ EOI
+ {
+ {
+ cmd1
+ for x <'a b'
+ }
+ }
+ EOO
+
+ : setup
+ :
+ $* -s <<EOI >>EOO
+ +for x <'a b'
+ cmd $x
+ end
+ EOI
+ {
+ +for x <'a b'
+ }
+ EOO
+
+ : tdown
+ :
+ $* -s <<EOI >>EOO
+ -for x <'a b'
+ cmd $x
+ end
+ EOI
+ {
+ -for x <'a b'
+ }
+ EOO
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd
+ EOI
+ testscript:3:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ elif true
+ cmd
+ end
+ end
+ EOI
+ testscript:2:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ for -w x <'a b' # 1
+ cmd1 $x # 2
+ if ($x == "a") # 3
+ cmd2 # 4
+ for -w y <'x y' # 5
+ cmd3 # 6
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ end;
+ cmd6 # 9
+ EOI
+ for -w x <'a b' # 1
+ cmd6 # 9
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : semi
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd;
+ cmd
+ end
+ EOI
+ testscript:2:3: error: ';' inside 'for'
+ EOE
+
+ : colon-leading
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ : foo
+ cmd
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : colon-trailing
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd : foo
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ EOI
+ testscript:2:1: error: expected closing 'end'
+ EOE
+
+ : scope
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd
+ {
+ }
+ end
+ EOI
+ testscript:3:3: error: expected closing 'end'
+ EOE
+
+ : setup
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ +cmd
+ end
+ EOI
+ testscript:2:3: error: setup command inside 'for'
+ EOE
+
+ : tdown
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ -cmd
+ end
+ EOI
+ testscript:2:3: error: teardown command inside 'for'
+ EOE
+ }
+
+ : leading-and-trailing-description
+ :
+ $* <<EOI 2>>EOE != 0
+ : foo
+ for x <'a b'
+ cmd
+ end : bar
+ EOI
+ testscript:4:1: error: both leading and trailing descriptions
+ EOE
+}
diff --git a/libbuild2/test/script/parser+while.test.testscript b/libbuild2/test/script/parser+while.test.testscript
new file mode 100644
index 0000000..b1a2b44
--- /dev/null
+++ b/libbuild2/test/script/parser+while.test.testscript
@@ -0,0 +1,265 @@
+# file : libbuild2/test/script/parser+while.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: while
+:
+{
+ : true
+ :
+ $* <<EOI >>EOO
+ while ($v != "aa")
+ cmd "$v"
+ v = "$(v)a"
+ end
+ EOI
+ ? true
+ cmd ''
+ ? true
+ cmd a
+ ? false
+ EOO
+
+ : false
+ :
+ $* <<EOI >>EOO
+ while ($v == "aa")
+ cmd "$v"
+ v = "$(v)a"
+ end
+ EOI
+ ? false
+ EOO
+
+ : without-command
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd
+ end
+ EOI
+ testscript:1:6: error: missing program
+ EOE
+
+ : after-semi
+ :
+ $* -s <<EOI >>EOO
+ cmd1;
+ while ($v != "aa")
+ cmd2 "$v"
+ v = "$(v)a"
+ end
+ EOI
+ {
+ {
+ cmd1
+ ? true
+ cmd2 ''
+ ? true
+ cmd2 a
+ ? false
+ }
+ }
+ EOO
+
+ : setup
+ :
+ $* -s <<EOI >>EOO
+ +while ($v != "aa")
+ cmd2 "$v"
+ v = "$(v)a"
+ end
+ EOI
+ {
+ ? true
+ +cmd2 ''
+ ? true
+ +cmd2 a
+ ? false
+ }
+ EOO
+
+ : tdown
+ :
+ $* -s <<EOI >>EOO
+ -while ($v != "aa")
+ cmd2 "$v"
+ v = "$(v)a"
+ end
+ EOI
+ {
+ ? true
+ -cmd2 ''
+ ? true
+ -cmd2 a
+ ? false
+ }
+ EOO
+}
+
+: end
+:
+{
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ while true
+ cmd
+ EOI
+ testscript:3:1: error: expected closing 'end'
+ EOE
+}
+
+: elif
+:
+{
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ while false
+ elif true
+ cmd
+ end
+ end
+ EOI
+ testscript:2:3: error: 'elif' without preceding 'if'
+ EOE
+}
+
+: nested
+:
+{
+ $* -l -r <<EOI >>EOO
+ while ($v != "aa") # 1
+ cmd1 "$v" # 2
+ if ($v == "a") # 3
+ cmd2 # 4
+ while ($v2 != "$v") # 5
+ cmd3 # 6
+ v2=$v
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ v = "$(v)a"
+ end;
+ cmd6
+ EOI
+ ? true # 1 i1
+ cmd1 '' # 2 i1
+ ? false # 3 i1
+ cmd4 # 7 i1
+ cmd5 # 8 i1
+ ? true # 1 i2
+ cmd1 a # 2 i2
+ ? true # 3 i2
+ cmd2 # 4 i2
+ ? true # 5 i2 i1
+ cmd3 # 6 i2 i1
+ ? false # 5 i2 i2
+ cmd5 # 8 i2
+ ? false # 1 i3
+ cmd6 # 9
+ EOO
+}
+
+: contained
+:
+{
+ : semi
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd;
+ cmd
+ end
+ EOI
+ testscript:2:3: error: ';' inside 'while'
+ EOE
+
+ : colon-leading
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ : foo
+ cmd
+ end
+ EOI
+ testscript:2:3: error: description inside 'while'
+ EOE
+
+ : colon-trailing
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd : foo
+ end
+ EOI
+ testscript:2:3: error: description inside 'while'
+ EOE
+
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ EOI
+ testscript:2:1: error: expected closing 'end'
+ EOE
+
+ : scope
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd
+ {
+ }
+ end
+ EOI
+ testscript:3:3: error: expected closing 'end'
+ EOE
+
+ : setup
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ +cmd
+ end
+ EOI
+ testscript:2:3: error: setup command inside 'while'
+ EOE
+
+ : tdown
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ -cmd
+ end
+ EOI
+ testscript:2:3: error: teardown command inside 'while'
+ EOE
+}
+
+: var
+:
+$* <<EOI >>EOO
+while ($v1 != "a")
+ v1 = "$(v1)a"
+ v2 = "$v1"
+end
+cmd $v1
+EOI
+? true
+? false
+cmd a
+EOO
+
+: leading-and-trailing-description
+:
+$* <<EOI 2>>EOE != 0
+: foo
+while false
+ cmd
+end : bar
+EOI
+testscript:4:1: error: both leading and trailing descriptions
+EOE
diff --git a/libbuild2/test/script/parser.cxx b/libbuild2/test/script/parser.cxx
index 9e92f3b..b712c21 100644
--- a/libbuild2/test/script/parser.cxx
+++ b/libbuild2/test/script/parser.cxx
@@ -293,22 +293,30 @@ namespace build2
}
// Parse a logical line (as well as scope-if since the only way to
- // recognize it is to parse the if line).
+ // recognize it is to parse the if line), handling the flow control
+ // constructs recursively.
//
// If one is true then only parse one line returning an indication of
- // whether the line ended with a semicolon. If if_line is true then this
- // line can be an if-else construct flow control line (else, end, etc).
+ // whether the line ended with a semicolon. If the flow control
+ // construct type is specified, then this line is assumed to belong to
+ // such construct.
//
bool parser::
pre_parse_line (token& t, type& tt,
optional<description>& d,
lines* ls,
bool one,
- bool if_line)
+ optional<line_type> fct)
{
// enter: next token is peeked at (type in tt)
// leave: newline
+ assert (!fct ||
+ *fct == line_type::cmd_if ||
+ *fct == line_type::cmd_while ||
+ *fct == line_type::cmd_for_stream ||
+ *fct == line_type::cmd_for_args);
+
// Note: token is only peeked at.
//
const location ll (get_location (peeked ()));
@@ -317,6 +325,52 @@ namespace build2
//
line_type lt;
type st (type::eos); // Later, can only be set to plus or minus.
+ bool semi (false);
+
+ // Parse the command line tail, starting from the newline or the
+ // potential colon/semicolon token.
+ //
+ // Note that colon and semicolon are only valid in test command lines
+ // and after 'end' in flow control constructs. Note that we always
+ // recognize them lexically, even when they are not valid tokens per
+ // the grammar.
+ //
+ auto parse_command_tail = [&t, &tt, &st, &lt, &d, &semi, &ll, this] ()
+ {
+ if (tt != type::newline)
+ {
+ if (lt != line_type::cmd && lt != line_type::cmd_end)
+ fail (t) << "expected newline instead of " << t;
+
+ switch (st)
+ {
+ case type::plus: fail (t) << t << " after setup command" << endf;
+ case type::minus: fail (t) << t << " after teardown command" << endf;
+ }
+ }
+
+ switch (tt)
+ {
+ case type::colon:
+ {
+ if (d)
+ fail (ll) << "both leading and trailing descriptions";
+
+ d = parse_trailing_description (t, tt);
+ break;
+ }
+ case type::semi:
+ {
+ semi = true;
+ replay_pop (); // See above for the reasoning.
+ next (t, tt); // Get newline.
+ break;
+ }
+ }
+
+ if (tt != type::newline)
+ fail (t) << "expected newline instead of " << t;
+ };
switch (tt)
{
@@ -364,8 +418,12 @@ namespace build2
{
const string& n (t.value);
- if (n == "if") lt = line_type::cmd_if;
- else if (n == "if!") lt = line_type::cmd_ifn;
+ // Handle the for-loop consistently with pre_parse_line_start().
+ //
+ if (n == "if") lt = line_type::cmd_if;
+ else if (n == "if!") lt = line_type::cmd_ifn;
+ else if (n == "while") lt = line_type::cmd_while;
+ else if (n == "for") lt = line_type::cmd_for_stream;
}
break;
@@ -379,8 +437,6 @@ namespace build2
// Pre-parse the line keeping track of whether it ends with a semi.
//
- bool semi (false);
-
line ln;
switch (lt)
{
@@ -407,76 +463,147 @@ namespace build2
mode (lexer_mode::variable_line);
parse_variable_line (t, tt);
+ // Note that the semicolon token is only required during
+ // pre-parsing to decide which line list the current line should
+ // go to and provides no additional semantics during the
+ // execution. Moreover, build2::script::parser::exec_lines()
+ // doesn't expect this token to be present. Thus, we just drop
+ // this token from the saved tokens.
+ //
semi = (tt == type::semi);
- if (tt == type::semi)
+ if (semi)
+ {
+ replay_pop ();
next (t, tt);
+ }
if (tt != type::newline)
fail (t) << "expected newline instead of " << t;
break;
}
+ //
+ // See pre_parse_line_start() for details.
+ //
+ case line_type::cmd_for_args: assert (false); break;
+ case line_type::cmd_for_stream:
+ {
+ // First we need to sense the next few tokens and detect which
+ // form of the for-loop that actually is (see
+ // libbuild2/build/script/parser.cxx for details).
+ //
+ token pt (t);
+ assert (pt.type == type::word && pt.value == "for");
+
+ mode (lexer_mode::for_loop);
+ next (t, tt);
+
+ string& n (t.value);
+
+ if (tt == type::word && t.qtype == quote_type::unquoted &&
+ (n[0] == '_' || alpha (n[0]) || // Variable.
+ n == "*" || n == "~" || n == "@")) // Special variable.
+ {
+ // Detect patterns analogous to parse_variable_name() (so we
+ // diagnose `for x[string]: ...`).
+ //
+ if (n.find_first_of ("[*?") != string::npos)
+ fail (t) << "expected variable name instead of " << n;
+
+ if (special_variable (n))
+ fail (t) << "attempt to set '" << n << "' variable directly";
+
+ if (lexer_->peek_char ().first == '[')
+ {
+ token vt (move (t));
+ next_with_attributes (t, tt);
+
+ attributes_push (t, tt,
+ true /* standalone */,
+ false /* next_token */);
+
+ t = move (vt);
+ tt = t.type;
+ }
+
+ if (lexer_->peek_char ().first == ':')
+ lt = line_type::cmd_for_args;
+ }
+
+ if (lt == line_type::cmd_for_stream) // for x <...
+ {
+ ln.var = nullptr;
+
+ expire_mode ();
+
+ parse_command_expr_result r (
+ parse_command_expr (t, tt,
+ lexer::redirect_aliases,
+ move (pt)));
+
+ assert (r.for_loop);
+
+ parse_command_tail ();
+ parse_here_documents (t, tt, r);
+ }
+ else // for x: ...
+ {
+ ln.var = &script_->var_pool.insert (move (n));
+
+ next (t, tt);
+
+ assert (tt == type::colon);
+
+ expire_mode ();
+
+ // Parse the value similar to the var line type (see above),
+ // except for the fact that we don't expect a trailing semicolon.
+ //
+ mode (lexer_mode::variable_line);
+ parse_variable_line (t, tt);
+
+ if (tt != type::newline)
+ fail (t) << "expected newline instead of " << t << " after for";
+ }
+
+ break;
+ }
case line_type::cmd_elif:
case line_type::cmd_elifn:
case line_type::cmd_else:
- case line_type::cmd_end:
{
- if (!if_line)
- {
+ if (!fct || *fct != line_type::cmd_if)
fail (t) << lt << " without preceding 'if'";
- }
+ }
+ // Fall through.
+ case line_type::cmd_end:
+ {
+ if (!fct)
+ fail (t) << lt << " without preceding 'if', 'for', or 'while'";
}
// Fall through.
case line_type::cmd_if:
case line_type::cmd_ifn:
+ case line_type::cmd_while:
next (t, tt); // Skip to start of command.
// Fall through.
case line_type::cmd:
{
- pair<command_expr, here_docs> p;
+ parse_command_expr_result r;
if (lt != line_type::cmd_else && lt != line_type::cmd_end)
- p = parse_command_expr (t, tt, lexer::redirect_aliases);
+ r = parse_command_expr (t, tt, lexer::redirect_aliases);
- // Colon and semicolon are only valid in test command lines and
- // after 'end' in if-else. Note that we still recognize them
- // lexically, they are just not valid tokens per the grammar.
- //
- if (tt != type::newline)
+ if (r.for_loop)
{
- if (lt != line_type::cmd && lt != line_type::cmd_end)
- fail (t) << "expected newline instead of " << t;
-
- switch (st)
- {
- case type::plus: fail (t) << t << " after setup command" << endf;
- case type::minus: fail (t) << t << " after teardown command" << endf;
- }
+ lt = line_type::cmd_for_stream;
+ ln.var = nullptr;
}
- switch (tt)
- {
- case type::colon:
- {
- if (d)
- fail (ll) << "both leading and trailing descriptions";
-
- d = parse_trailing_description (t, tt);
- break;
- }
- case type::semi:
- {
- semi = true;
- next (t, tt); // Get newline.
- break;
- }
- }
-
- if (tt != type::newline)
- fail (t) << "expected newline instead of " << t;
+ parse_command_tail ();
+ parse_here_documents (t, tt, r);
- parse_here_documents (t, tt, p);
break;
}
}
@@ -494,24 +621,39 @@ namespace build2
ln.tokens = replay_data ();
ls->push_back (move (ln));
- if (lt == line_type::cmd_if || lt == line_type::cmd_ifn)
+ switch (lt)
{
- semi = pre_parse_if_else (t, tt, d, *ls);
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ {
+ semi = pre_parse_if_else (t, tt, d, *ls);
- // If this turned out to be scope-if, then ls is empty, semi is
- // false, and none of the below logic applies.
- //
- if (ls->empty ())
- return semi;
+ // If this turned out to be scope-if, then ls is empty, semi is
+ // false, and none of the below logic applies.
+ //
+ if (ls->empty ())
+ return semi;
+
+ break;
+ }
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
+ {
+ semi = pre_parse_loop (t, tt, lt, d, *ls);
+ break;
+ }
+ default: break;
}
// Unless we were told where to put it, decide where it actually goes.
//
if (ls == &ls_data)
{
- // First pre-check variable and variable-if: by themselves (i.e.,
- // without a trailing semicolon) they are treated as either setup or
- // teardown without plus/minus. Also handle illegal line types.
+ // First pre-check variables and variable-only flow control
+ // constructs: by themselves (i.e., without a trailing semicolon)
+ // they are treated as either setup or teardown without
+ // plus/minus. Also handle illegal line types.
//
switch (lt)
{
@@ -524,8 +666,11 @@ namespace build2
}
case line_type::cmd_if:
case line_type::cmd_ifn:
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
{
- // See if this is a variable-only command-if.
+ // See if this is a variable-only flow control construct.
//
if (find_if (ls_data.begin (), ls_data.end (),
[] (const line& l) {
@@ -549,7 +694,7 @@ namespace build2
fail (ll) << "description before setup/teardown variable";
else
fail (ll) << "description before/after setup/teardown "
- << "variable-if";
+ << "variable-only " << lt;
}
// If we don't have any nested scopes or teardown commands,
@@ -793,7 +938,7 @@ namespace build2
td,
&ls,
true /* one */,
- true /* if_line */));
+ line_type::cmd_if));
assert (ls.size () == 1 && ls.back ().type == lt);
assert (tt == type::newline);
@@ -831,6 +976,99 @@ namespace build2
return false; // We never end with a semi.
}
+ // Pre-parse the flow control construct block line. Fail if the line is
+ // unexpectedly followed with a semicolon or test description.
+ //
+ bool parser::
+ pre_parse_block_line (token& t, type& tt,
+ line_type bt,
+ optional<description>& d,
+ lines& ls)
+ {
+ // enter: peeked first token of the line (type in tt)
+ // leave: newline
+
+ const location ll (get_location (peeked ()));
+
+ switch (tt)
+ {
+ case type::colon:
+ fail (ll) << "description inside " << bt << endf;
+ case type::eos:
+ case type::rcbrace:
+ case type::lcbrace:
+ fail (ll) << "expected closing 'end'" << endf;
+ case type::plus:
+ fail (ll) << "setup command inside " << bt << endf;
+ case type::minus:
+ fail (ll) << "teardown command inside " << bt << endf;
+ }
+
+ // Parse one line. Note that this one line can still be multiple lines
+ // in case of a flow control construct. In this case we want to view
+ // it as, for example, cmd_if, not cmd_end. Thus remember the start
+ // position of the next logical line.
+ //
+ size_t i (ls.size ());
+
+ line_type fct; // Flow control construct type the block type relates to.
+
+ switch (bt)
+ {
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ case line_type::cmd_elif:
+ case line_type::cmd_elifn:
+ case line_type::cmd_else:
+ {
+ fct = line_type::cmd_if;
+ break;
+ }
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
+ {
+ fct = bt;
+ break;
+ }
+ default: assert(false);
+ }
+
+ optional<description> td;
+ bool semi (pre_parse_line (t, tt, td, &ls, true /* one */, fct));
+
+ assert (tt == type::newline);
+
+ line_type lt (ls[i].type);
+
+ // First take care of 'end'.
+ //
+ if (lt == line_type::cmd_end)
+ {
+ if (td)
+ {
+ if (d)
+ fail (ll) << "both leading and trailing descriptions";
+
+ d = move (td);
+ }
+
+ return semi;
+ }
+
+ // For any other line trailing semi or description is illegal.
+ //
+ // @@ Not the exact location of semi/colon.
+ //
+ if (semi)
+ fail (ll) << "';' inside " << bt;
+
+ if (td)
+ fail (ll) << "description inside " << bt;
+
+ return false;
+ }
+
bool parser::
pre_parse_if_else_command (token& t, type& tt,
optional<description>& d,
@@ -839,70 +1077,23 @@ namespace build2
// enter: peeked first token of next line (type in tt)
// leave: newline
- // Parse lines until we see closing 'end'. Nested if-else blocks are
- // handled recursively.
+ // Parse lines until we see closing 'end'.
//
for (line_type bt (line_type::cmd_if); // Current block.
;
tt = peek (lexer_mode::first_token))
{
const location ll (get_location (peeked ()));
-
- switch (tt)
- {
- case type::colon:
- fail (ll) << "description inside " << bt << endf;
- case type::eos:
- case type::rcbrace:
- case type::lcbrace:
- fail (ll) << "expected closing 'end'" << endf;
- case type::plus:
- fail (ll) << "setup command inside " << bt << endf;
- case type::minus:
- fail (ll) << "teardown command inside " << bt << endf;
- }
-
- // Parse one line. Note that this one line can still be multiple
- // lines in case of if-else. In this case we want to view it as
- // cmd_if, not cmd_end. Thus remember the start position of the
- // next logical line.
- //
size_t i (ls.size ());
- optional<description> td;
- bool semi (pre_parse_line (t, tt,
- td,
- &ls,
- true /* one */,
- true /* if_line */));
- assert (tt == type::newline);
+ bool semi (pre_parse_block_line (t, tt, bt, d, ls));
line_type lt (ls[i].type);
// First take care of 'end'.
//
if (lt == line_type::cmd_end)
- {
- if (td)
- {
- if (d)
- fail (ll) << "both leading and trailing descriptions";
-
- d = move (td);
- }
-
return semi;
- }
-
- // For any other line trailing semi or description is illegal.
- //
- // @@ Not the exact location of semi/colon.
- //
- if (semi)
- fail (ll) << "';' inside " << bt;
-
- if (td)
- fail (ll) << "description inside " << bt;
// Check if-else block sequencing.
//
@@ -924,6 +1115,40 @@ namespace build2
default: break;
}
}
+
+ assert (false); // Can't be here.
+ return false;
+ }
+
+ bool parser::
+ pre_parse_loop (token& t, type& tt,
+ line_type lt,
+ optional<description>& d,
+ lines& ls)
+ {
+ // enter: <newline> (previous line)
+ // leave: <newline>
+
+ assert (lt == line_type::cmd_while ||
+ lt == line_type::cmd_for_stream ||
+ lt == line_type::cmd_for_args);
+
+ tt = peek (lexer_mode::first_token);
+
+ // Parse lines until we see closing 'end'.
+ //
+ for (;; tt = peek (lexer_mode::first_token))
+ {
+ size_t i (ls.size ());
+
+ bool semi (pre_parse_block_line (t, tt, lt, d, ls));
+
+ if (ls[i].type == line_type::cmd_end)
+ return semi;
+ }
+
+ assert (false); // Can't be here.
+ return false;
}
void parser::
@@ -1057,7 +1282,7 @@ namespace build2
diag_record dr (fail (dl));
dr << "invalid testscript include path ";
- to_stream (dr.os, n, true); // Quote.
+ to_stream (dr.os, n, quote_mode::normal);
}
}
@@ -1266,21 +1491,18 @@ namespace build2
// Note: this one is only used during execution.
- pair<command_expr, here_docs> p (
+ parse_command_expr_result pr (
parse_command_expr (t, tt, lexer::redirect_aliases));
- switch (tt)
- {
- case type::colon: parse_trailing_description (t, tt); break;
- case type::semi: next (t, tt); break; // Get newline.
- }
+ if (tt == type::colon)
+ parse_trailing_description (t, tt);
assert (tt == type::newline);
- parse_here_documents (t, tt, p);
+ parse_here_documents (t, tt, pr);
assert (tt == type::newline);
- command_expr r (move (p.first));
+ command_expr r (move (pr.expr));
// If the test program runner is specified, then adjust the
// expressions to run test programs via this runner.
@@ -1402,9 +1624,6 @@ namespace build2
mode (lexer_mode::variable_line);
value rhs (parse_variable_line (t, tt));
- if (tt == type::semi)
- next (t, tt);
-
assert (tt == type::newline);
// Assign.
@@ -1424,8 +1643,9 @@ namespace build2
command_type ct;
auto exec_cmd = [&ct, this] (token& t, build2::script::token_type& tt,
- size_t li,
+ const iteration_index* ii, size_t li,
bool single,
+ const function<command_function>& cf,
const location& ll)
{
// We use the 0 index to signal that this is the only command.
@@ -1437,19 +1657,35 @@ namespace build2
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- runner_->run (*scope_, ce, ct, li, ll);
+ runner_->run (*scope_, ce, ct, ii, li, cf, ll);
};
- auto exec_if = [this] (token& t, build2::script::token_type& tt,
- size_t li,
- const location& ll)
+ auto exec_cond = [this] (token& t, build2::script::token_type& tt,
+ const iteration_index* ii, size_t li,
+ const location& ll)
{
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- // Assume if-else always involves multiple commands.
+ // Assume a flow control construct always involves multiple
+ // commands.
//
- return runner_->run_if (*scope_, ce, li, ll);
+ return runner_->run_cond (*scope_, ce, ii, li, ll);
+ };
+
+ auto exec_for = [this] (const variable& var,
+ value&& val,
+ const attributes& val_attrs,
+ const location&)
+ {
+ value& lhs (scope_->assign (var));
+
+ attributes_.push_back (val_attrs);
+
+ apply_value_attributes (&var, lhs, move (val), type::assign);
+
+ if (script_->test_command_var (var.name))
+ scope_->reset_special ();
};
size_t li (1);
@@ -1459,16 +1695,17 @@ namespace build2
ct = command_type::test;
exec_lines (t->tests_.begin (), t->tests_.end (),
- exec_set, exec_cmd, exec_if,
- li);
+ exec_set, exec_cmd, exec_cond, exec_for,
+ nullptr /* iteration_index */, li);
}
else if (group* g = dynamic_cast<group*> (scope_))
{
ct = command_type::setup;
- bool exec_scope (exec_lines (g->setup_.begin (), g->setup_.end (),
- exec_set, exec_cmd, exec_if,
- li));
+ bool exec_scope (
+ exec_lines (g->setup_.begin (), g->setup_.end (),
+ exec_set, exec_cmd, exec_cond, exec_for,
+ nullptr /* iteration_index */, li));
if (exec_scope)
{
@@ -1526,7 +1763,8 @@ namespace build2
try
{
- take = runner_->run_if (*scope_, ce, li++, ll);
+ take = runner_->run_cond (
+ *scope_, ce, nullptr /* iteration_index */, li++, ll);
}
catch (const exit_scope& e)
{
@@ -1593,24 +1831,24 @@ namespace build2
// UBSan workaround.
//
const diag_frame* df (diag_frame::stack ());
- if (!ctx.sched.async (task_count,
- [] (const diag_frame* ds,
- scope& s,
- script& scr,
- runner& r)
- {
- diag_frame::stack_guard dsg (ds);
- execute_impl (s, scr, r);
- },
- df,
- ref (*chain),
- ref (*script_),
- ref (*runner_)))
+ if (!ctx->sched->async (task_count,
+ [] (const diag_frame* ds,
+ scope& s,
+ script& scr,
+ runner& r)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (s, scr, r);
+ },
+ df,
+ ref (*chain),
+ ref (*script_),
+ ref (*runner_)))
{
// Bail out if the scope has failed and we weren't instructed
// to keep going.
//
- if (chain->state == scope_state::failed && !ctx.keep_going)
+ if (chain->state == scope_state::failed && !ctx->keep_going)
throw failed ();
}
}
@@ -1637,8 +1875,8 @@ namespace build2
ct = command_type::teardown;
exec_lines (g->tdown_.begin (), g->tdown_.end (),
- exec_set, exec_cmd, exec_if,
- li);
+ exec_set, exec_cmd, exec_cond, exec_for,
+ nullptr /* iteration_index */, li);
}
else
assert (false);
@@ -1652,7 +1890,8 @@ namespace build2
// The rest.
//
- // When add a special variable don't forget to update lexer::word().
+ // When add a special variable don't forget to update lexer::word() and
+ // for-loop parsing in pre_parse_line().
//
bool parser::
special_variable (const string& n) noexcept
@@ -1661,7 +1900,7 @@ namespace build2
}
lookup parser::
- lookup_variable (name&& qual, string&& name, const location& loc)
+ lookup_variable (names&& qual, string&& name, const location& loc)
{
if (pre_parse_)
return lookup ();
diff --git a/libbuild2/test/script/parser.hxx b/libbuild2/test/script/parser.hxx
index c63bce6..6fe46e2 100644
--- a/libbuild2/test/script/parser.hxx
+++ b/libbuild2/test/script/parser.hxx
@@ -30,7 +30,7 @@ namespace build2
// Pre-parse. Issue diagnostics and throw failed in case of an error.
//
public:
- parser (context& c): build2::script::parser (c, true /* relex */) {}
+ parser (context& c): build2::script::parser (c) {}
void
pre_parse (script&);
@@ -62,7 +62,13 @@ namespace build2
optional<description>&,
lines* = nullptr,
bool one = false,
- bool if_line = false);
+ optional<line_type> flow_control_type = nullopt);
+
+ bool
+ pre_parse_block_line (token&, token_type&,
+ line_type block_type,
+ optional<description>&,
+ lines&);
bool
pre_parse_if_else (token&, token_type&,
@@ -79,6 +85,12 @@ namespace build2
optional<description>&,
lines&);
+ bool
+ pre_parse_loop (token&, token_type&,
+ line_type,
+ optional<description>&,
+ lines&);
+
void
pre_parse_directive (token&, token_type&);
@@ -117,7 +129,7 @@ namespace build2
//
protected:
virtual lookup
- lookup_variable (name&&, string&&, const location&) override;
+ lookup_variable (names&&, string&&, const location&) override;
// Insert id into the id map checking for duplicates.
//
diff --git a/libbuild2/test/script/parser.test.cxx b/libbuild2/test/script/parser.test.cxx
index 47d56ce..6838e47 100644
--- a/libbuild2/test/script/parser.test.cxx
+++ b/libbuild2/test/script/parser.test.cxx
@@ -33,8 +33,11 @@ namespace build2
class print_runner: public runner
{
public:
- print_runner (bool scope, bool id, bool line)
- : scope_ (scope), id_ (id), line_ (line) {}
+ print_runner (bool scope, bool id, bool line, bool iterations)
+ : scope_ (scope),
+ id_ (id),
+ line_ (line),
+ iterations_ (iterations) {}
virtual bool
test (scope&) const override
@@ -97,11 +100,32 @@ namespace build2
}
virtual void
- run (scope&,
+ run (scope& env,
const command_expr& e, command_type t,
- size_t i,
- const location&) override
+ const iteration_index* ii, size_t i,
+ const function<command_function>& cf,
+ const location& ll) override
{
+ // If the functions is specified, then just execute it with an empty
+ // stdin so it can perform the housekeeping (stop replaying tokens,
+ // increment line index, etc).
+ //
+ if (cf != nullptr)
+ {
+ assert (e.size () == 1 && !e[0].pipe.empty ());
+
+ const command& c (e[0].pipe.back ());
+
+ // Must be enforced by the caller.
+ //
+ assert (!c.out && !c.err && !c.exit);
+
+ cf (env, c.arguments,
+ fdopen_null (), nullptr /* pipe */,
+ nullopt /* deadline */,
+ ll);
+ }
+
const char* s (nullptr);
switch (t)
@@ -113,22 +137,22 @@ namespace build2
cout << ind_ << s << e;
- if (line_)
- cout << " # " << i;
+ if (line_ || iterations_)
+ print_line_info (ii, i);
cout << endl;
}
virtual bool
- run_if (scope&,
- const command_expr& e,
- size_t i,
- const location&) override
+ run_cond (scope&,
+ const command_expr& e,
+ const iteration_index* ii, size_t i,
+ const location&) override
{
cout << ind_ << "? " << e;
- if (line_)
- cout << " # " << i;
+ if (line_ || iterations_)
+ print_line_info (ii, i);
cout << endl;
@@ -146,13 +170,33 @@ namespace build2
}
private:
+ void
+ print_line_info (const iteration_index* ii, size_t i) const
+ {
+ cout << " #";
+
+ if (line_)
+ cout << ' ' << i;
+
+ if (iterations_ && ii != nullptr)
+ {
+ string s;
+ for (const iteration_index* i (ii); i != nullptr; i = i->prev)
+ s.insert (0, " i" + to_string (i->index));
+
+ cout << s;
+ }
+ }
+
+ private:
bool scope_;
bool id_;
bool line_;
+ bool iterations_;
string ind_;
};
- // Usage: argv[0] [-s] [-i] [-l] [<testscript-name>]
+ // Usage: argv[0] [-s] [-i] [-l] [-r] [<testscript-name>]
//
int
main (int argc, char* argv[])
@@ -162,18 +206,19 @@ namespace build2
// Fake build system driver, default verbosity.
//
init_diag (1);
- init (nullptr, argv[0]);
+ init (nullptr, argv[0], true);
// Serial execution.
//
scheduler sched (1);
global_mutexes mutexes (1);
- file_cache fcache;
+ file_cache fcache (true);
context ctx (sched, mutexes, fcache);
bool scope (false);
bool id (false);
bool line (false);
+ bool iterations (false);
path name;
for (int i (1); i != argc; ++i)
@@ -186,6 +231,8 @@ namespace build2
id = true;
else if (a == "-l")
line = true;
+ else if (a == "-r")
+ iterations = true;
else
{
name = path (move (a));
@@ -218,7 +265,7 @@ namespace build2
tt.assign (
ctx.var_pool.rw ().insert<target_triplet> ("test.target")));
- v = cast<target_triplet> (ctx.global_scope["build.host"]);
+ v = *ctx.build_host;
testscript& st (
ctx.targets.insert<testscript> (work,
@@ -236,7 +283,7 @@ namespace build2
script s (tt, st, dir_path (work) /= "test-driver");
p.pre_parse (cin, s);
- print_runner r (scope, id, line);
+ print_runner r (scope, id, line, iterations);
p.execute (s, r);
}
catch (const failed&)
diff --git a/libbuild2/test/script/runner.cxx b/libbuild2/test/script/runner.cxx
index af5f30a..98d6868 100644
--- a/libbuild2/test/script/runner.cxx
+++ b/libbuild2/test/script/runner.cxx
@@ -3,6 +3,8 @@
#include <libbuild2/test/script/runner.hxx>
+#include <libbuild2/filesystem.hxx>
+
#include <libbuild2/script/run.hxx>
#include <libbuild2/test/common.hxx>
@@ -140,7 +142,9 @@ namespace build2
void default_runner::
run (scope& sp,
const command_expr& expr, command_type ct,
- size_t li, const location& ll)
+ const iteration_index* ii, size_t li,
+ const function<command_function>& cf,
+ const location& ll)
{
// Noop for teardown commands if keeping tests output is requested.
//
@@ -162,40 +166,55 @@ namespace build2
text << ": " << c << expr;
}
- // Print test id once per test expression.
+ // Print test id once per test expression and only for the topmost
+ // one.
//
auto df = make_diag_frame (
- [&sp](const diag_record& dr)
+ [&sp, print = (sp.exec_level == 0)](const diag_record& dr)
{
- // Let's not depend on how the path representation can be improved
- // for readability on printing.
- //
- dr << info << "test id: " << sp.id_path.posix_string ();
+ if (print)
+ {
+ // Let's not depend on how the path representation can be
+ // improved for readability on printing.
+ //
+ dr << info << "test id: " << sp.id_path.posix_string ();
+ }
});
- build2::script::run (sp, expr, li, ll);
+ ++sp.exec_level;
+ build2::script::run (sp, expr, ii, li, ll, cf);
+ --sp.exec_level;
}
bool default_runner::
- run_if (scope& sp,
- const command_expr& expr,
- size_t li, const location& ll)
+ run_cond (scope& sp,
+ const command_expr& expr,
+ const iteration_index* ii, size_t li,
+ const location& ll)
{
if (verb >= 3)
text << ": ?" << expr;
- // Print test id once per test expression.
+ // Print test id once per test expression and only for the topmost
+ // one.
//
auto df = make_diag_frame (
- [&sp](const diag_record& dr)
+ [&sp, print = (sp.exec_level == 0)](const diag_record& dr)
{
- // Let's not depend on how the path representation can be improved
- // for readability on printing.
- //
- dr << info << "test id: " << sp.id_path.posix_string ();
+ if (print)
+ {
+ // Let's not depend on how the path representation can be
+ // improved for readability on printing.
+ //
+ dr << info << "test id: " << sp.id_path.posix_string ();
+ }
});
- return build2::script::run_if (sp, expr, li, ll);
+ ++sp.exec_level;
+ bool r (build2::script::run_cond (sp, expr, ii, li, ll));
+ --sp.exec_level;
+
+ return r;
}
}
}
diff --git a/libbuild2/test/script/runner.hxx b/libbuild2/test/script/runner.hxx
index b6a038d..687d991 100644
--- a/libbuild2/test/script/runner.hxx
+++ b/libbuild2/test/script/runner.hxx
@@ -48,14 +48,21 @@ namespace build2
// Location is the start position of this command line in the
// testscript. It can be used in diagnostics.
//
+ // Optionally, execute the specified function instead of the last
+ // pipe command.
+ //
virtual void
run (scope&,
const command_expr&, command_type,
- size_t index,
+ const iteration_index*, size_t index,
+ const function<command_function>&,
const location&) = 0;
virtual bool
- run_if (scope&, const command_expr&, size_t, const location&) = 0;
+ run_cond (scope&,
+ const command_expr&,
+ const iteration_index*, size_t,
+ const location&) = 0;
// Location is the scope end location (for diagnostics, etc).
//
@@ -84,11 +91,15 @@ namespace build2
virtual void
run (scope&,
const command_expr&, command_type,
- size_t,
+ const iteration_index*, size_t,
+ const function<command_function>&,
const location&) override;
virtual bool
- run_if (scope&, const command_expr&, size_t, const location&) override;
+ run_cond (scope&,
+ const command_expr&,
+ const iteration_index*, size_t,
+ const location&) override;
virtual void
leave (scope&, const location&) override;
diff --git a/libbuild2/test/script/script.cxx b/libbuild2/test/script/script.cxx
index 3a8ceac..05dc7b0 100644
--- a/libbuild2/test/script/script.cxx
+++ b/libbuild2/test/script/script.cxx
@@ -30,7 +30,7 @@ namespace build2
scope_base::
scope_base (script& s)
: root (s),
- vars (s.test_target.ctx, false /* global */)
+ vars (s.test_target.ctx, false /* shared */) // Note: managed.
{
vars.assign (root.wd_var) = dir_path ();
}
@@ -115,7 +115,7 @@ namespace build2
}
void scope::
- set_variable (string&& nm,
+ set_variable (string nm,
names&& val,
const string& attrs,
const location& ll)
@@ -197,12 +197,12 @@ namespace build2
test_var (var_pool.insert<path> ("test")),
options_var (var_pool.insert<strings> ("test.options")),
arguments_var (var_pool.insert<strings> ("test.arguments")),
- redirects_var (var_pool.insert<strings> ("test.redirects")),
- cleanups_var (var_pool.insert<strings> ("test.cleanups")),
+ redirects_var (var_pool.insert<cmdline> ("test.redirects")),
+ cleanups_var (var_pool.insert<cmdline> ("test.cleanups")),
wd_var (var_pool.insert<dir_path> ("~")),
id_var (var_pool.insert<path> ("@")),
- cmd_var (var_pool.insert<strings> ("*")),
+ cmd_var (var_pool.insert<cmdline> ("*")),
cmdN_var {
&var_pool.insert<path> ("0"),
&var_pool.insert<string> ("1"),
@@ -355,7 +355,7 @@ namespace build2
// in parallel). Plus, if there is no such variable, then we cannot
// possibly find any value.
//
- const variable* pvar (context.var_pool.find (n));
+ const variable* pvar (root.target_scope.var_pool ().find (n));
if (pvar == nullptr)
return lookup_type ();
@@ -410,11 +410,12 @@ namespace build2
// First assemble the $* value and save the test variable value into
// the test program set.
//
- strings s;
+ cmdline s;
- auto append = [&s] (const strings& v)
+ auto append = [&s] (const strings& vs)
{
- s.insert (s.end (), v.begin (), v.end ());
+ for (const string& v: vs)
+ s.push_back (name (v)); // Simple name.
};
// If the test variable can't be looked up for any reason (is NULL,
@@ -423,7 +424,7 @@ namespace build2
if (auto l = lookup (root.test_var))
{
const path& p (cast<path> (l));
- s.push_back (p.representation ());
+ s.push_back (name (p.representation ()));
test_programs[0] = &p;
@@ -441,10 +442,16 @@ namespace build2
size_t n (s.size ());
if (auto l = lookup (root.redirects_var))
- append (cast<strings> (l));
+ {
+ const auto& v (cast<cmdline> (l));
+ s.insert (s.end (), v.begin (), v.end ());
+ }
if (auto l = lookup (root.cleanups_var))
- append (cast<strings> (l));
+ {
+ const auto& v (cast<cmdline> (l));
+ s.insert (s.end (), v.begin (), v.end ());
+ }
// Set the $N values if present.
//
@@ -455,9 +462,9 @@ namespace build2
if (i < n)
{
if (i == 0)
- v = path (s[i]);
+ v = path (s[i].value);
else
- v = s[i];
+ v = s[i].value;
}
else
v = nullptr; // Clear any old values.
@@ -465,6 +472,88 @@ namespace build2
// Set $*.
//
+ // We need to effective-quote the $test $test.options, $test.arguments
+ // part of it since they will be re-lexed. See the Testscript manual
+ // for details on quoting semantics. In particular, we cannot escape
+ // the special character (|<>&) so we have to rely on quoting. We can
+ // use single-quoting for everything except if the value contains a
+ // single quote. In which case we should probably just do separately-
+ // quoted regions (similar to shell), for example:
+ //
+ // <''>
+ //
+ // Can be quoted as:
+ //
+ // '<'"''"'>'
+ //
+ for (size_t i (0); i != n; ++i)
+ {
+ string& v (s[i].value);
+
+ // Check if the quoting is required for this value.
+ //
+ if (!parser::need_cmdline_relex (v))
+ continue;
+
+ // If the value doesn't contain the single-quote character, then
+ // single-quote it.
+ //
+ size_t p (v.find ('\''));
+
+ if (p == string::npos)
+ {
+ v = '\'' + v + '\'';
+ continue;
+ }
+
+ // Otherwise quote the regions.
+ //
+ // Note that we double-quote the single-quote character sequences
+ // and single-quote all the other regions.
+ //
+ string r;
+ char q (p == 0 ? '"' : '\''); // Current region quoting mode.
+
+ r += q; // Open the first region.
+
+ for (char c: v)
+ {
+ // If we are in the double-quoting mode, then switch to the
+ // single-quoting mode if a non-single-quote character is
+ // encountered.
+ //
+ if (q == '"')
+ {
+ if (c != '\'')
+ {
+ r += q; // Close the double-quoted region.
+ q = '\''; // Set the single-quoting mode.
+ r += q; // Open the single-quoted region.
+ }
+ }
+ //
+ // If we are in the single-quoting mode, then switch to the
+ // double-quoting mode if the single-quote character is
+ // encountered.
+ //
+ else
+ {
+ if (c == '\'')
+ {
+ r += q; // Close the single-quoted region.
+ q = '"'; // Set the double-quoting mode.
+ r += q; // Open the double-quoted region.
+ }
+ }
+
+ r += c;
+ }
+
+ r += q; // Close the last region.
+
+ v = move (r);
+ }
+
assign (root.cmd_var) = move (s);
}
diff --git a/libbuild2/test/script/script.hxx b/libbuild2/test/script/script.hxx
index 22f6725..9409b01 100644
--- a/libbuild2/test/script/script.hxx
+++ b/libbuild2/test/script/script.hxx
@@ -21,16 +21,19 @@ namespace build2
namespace script
{
using build2::script::line;
+ using build2::script::line_type;
using build2::script::lines;
using build2::script::redirect;
using build2::script::redirect_type;
- using build2::script::line_type;
- using build2::script::command_expr;
- using build2::script::expr_term;
using build2::script::command;
+ using build2::script::expr_term;
+ using build2::script::command_expr;
+ using build2::script::iteration_index;
using build2::script::environment_vars;
using build2::script::deadline;
using build2::script::timeout;
+ using build2::script::pipe_command;
+ using build2::script::command_function;
class parser; // Required by VC for 'friend class parser' declaration.
@@ -94,6 +97,22 @@ namespace build2
scope_state state = scope_state::unknown;
+ // The command expression execution nesting level. Can be maintained
+ // by the runner to, for example, only perform some housekeeping on
+ // the topmost level (add the test id to the diagnostics, etc).
+ //
+ // Note that the command expression execution can be nested, so that
+ // the outer expression execution is not completed before all the
+ // inner expressions are executed. As for example in:
+ //
+ // echo 'a b' | for x
+ // echo 'c d' | for y
+ // test $x $y
+ // end
+ // end
+ //
+ size_t exec_level = 0;
+
// Test program paths.
//
// Currently always contains a single element (see test_program() for
@@ -103,8 +122,8 @@ namespace build2
//
small_vector<const path*, 1> test_programs;
- void
- set_variable (string&& name,
+ virtual void
+ set_variable (string name,
names&&,
const string& attrs,
const location&) override;
diff --git a/libbuild2/test/target.cxx b/libbuild2/test/target.cxx
index ce88baa..852abdf 100644
--- a/libbuild2/test/target.cxx
+++ b/libbuild2/test/target.cxx
@@ -56,7 +56,7 @@ namespace build2
&testscript_target_pattern,
nullptr,
&file_search,
- false
+ target_type::flag::none
};
}
}
diff --git a/libbuild2/test/target.hxx b/libbuild2/test/target.hxx
index 1dd7307..e6c549f 100644
--- a/libbuild2/test/target.hxx
+++ b/libbuild2/test/target.hxx
@@ -18,11 +18,14 @@ namespace build2
class LIBBUILD2_SYMEXPORT testscript: public file
{
public:
- using file::file;
+ testscript (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
public:
static const target_type static_type;
- virtual const target_type& dynamic_type () const {return static_type;}
};
}
}
diff --git a/libbuild2/token.cxx b/libbuild2/token.cxx
index ab14388..cc102cc 100644
--- a/libbuild2/token.cxx
+++ b/libbuild2/token.cxx
@@ -29,21 +29,30 @@ namespace build2
os << (r ? "\n" : "<newline>");
break;
}
- case token_type::pair_separator:
+ case token_type::word:
{
if (r)
- os << t.value[0];
+ os << t.value;
else
- os << "<pair separator " << t.value[0] << ">";
+ os << '\'' << t.value << '\'';
break;
}
- case token_type::word:
+ case token_type::escape:
{
if (r)
- os << t.value;
+ os << '\\' << t.value;
else
- os << '\'' << t.value << '\'';
+ os << "<escape sequence \\" << t.value << ">";
+
+ break;
+ }
+ case token_type::pair_separator:
+ {
+ if (r)
+ os << t.value[0];
+ else
+ os << "<pair separator " << t.value[0] << ">";
break;
}
diff --git a/libbuild2/token.hxx b/libbuild2/token.hxx
index fca888c..f9ede65 100644
--- a/libbuild2/token.hxx
+++ b/libbuild2/token.hxx
@@ -30,6 +30,7 @@ namespace build2
eos,
newline,
word,
+ escape, // token::value is <...> in $\<...>
pair_separator, // token::value[0] is the pair separator char.
colon, // :
@@ -159,16 +160,13 @@ namespace build2
token (string v, bool s,
quote_type qt, bool qc, bool qf,
uint64_t l, uint64_t c)
- : token (token_type::word, move (v), s,
- qt, qc, qf,
- l, c,
- &token_printer) {}
+ : token (token_type::word, move (v), s, qt, qc, qf, l, c) {}
token (token_type t,
string v, bool s,
quote_type qt, bool qc, bool qf,
uint64_t l, uint64_t c,
- printer_type* p)
+ printer_type* p = &token_printer)
: type (t), separated (s),
qtype (qt), qcomp (qc), qfirst (qf),
value (move (v)),
diff --git a/libbuild2/types-parsers.cxx b/libbuild2/types-parsers.cxx
new file mode 100644
index 0000000..9c3dc52
--- /dev/null
+++ b/libbuild2/types-parsers.cxx
@@ -0,0 +1,153 @@
+// file : libbuild2/types-parsers.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/types-parsers.hxx>
+
+#include <sstream>
+
+#include <libbuild2/lexer.hxx>
+#include <libbuild2/parser.hxx>
+
+namespace build2
+{
+ namespace build
+ {
+ namespace cli
+ {
+ template <typename T>
+ static void
+ parse_path (T& x, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ x = T (v);
+
+ if (x.empty ())
+ throw invalid_value (o, v);
+ }
+ catch (const invalid_path&)
+ {
+ throw invalid_value (o, v);
+ }
+ }
+
+ void parser<path>::
+ parse (path& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ parse_path (x, s);
+ }
+
+ void parser<dir_path>::
+ parse (dir_path& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ parse_path (x, s);
+ }
+
+ static names
+ parse_names (const char* o, const char* v)
+ {
+ using build2::parser;
+ using std::istringstream;
+
+ istringstream is (v);
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ // @@ TODO: currently this issues diagnostics to diag_stream.
+ // Perhaps we should redirect it? Also below.
+ //
+ path_name in (o);
+ lexer l (is, in, 1 /* line */, "\'\"\\$("); // Effective.
+ parser p (nullptr);
+ return p.parse_names (l, nullptr, parser::pattern_mode::preserve);
+ }
+
+ void parser<name>::
+ parse (name& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ names r (parse_names (o, v));
+
+ if (r.size () != 1)
+ throw invalid_value (o, v);
+
+ x = move (r.front ());
+ xs = true;
+ }
+ catch (const failed&)
+ {
+ throw invalid_value (o, v);
+ }
+ }
+
+ void parser<pair<name, optional<name>>>::
+ parse (pair<name, optional<name>>& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ names r (parse_names (o, v));
+
+ if (r.size () == 1)
+ {
+ x.first = move (r.front ());
+ x.second = nullopt;
+ }
+ else if (r.size () == 2 && r.front ().pair == '@')
+ {
+ x.first = move (r.front ());
+ x.second = move (r.back ());
+ }
+ else
+ throw invalid_value (o, v);
+
+ xs = true;
+ }
+ catch (const failed&)
+ {
+ throw invalid_value (o, v);
+ }
+ }
+
+ void parser<structured_result_format>::
+ parse (structured_result_format& x, bool& xs, scanner& s)
+ {
+ xs = true;
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const string v (s.next ());
+ if (v == "lines")
+ x = structured_result_format::lines;
+ else if (v == "json")
+ x = structured_result_format::json;
+ else
+ throw invalid_value (o, v);
+ }
+ }
+ }
+}
diff --git a/libbuild2/types-parsers.hxx b/libbuild2/types-parsers.hxx
new file mode 100644
index 0000000..42fc60d
--- /dev/null
+++ b/libbuild2/types-parsers.hxx
@@ -0,0 +1,83 @@
+// file : libbuild2/types-parsers.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+// CLI parsers, included into the generated source files.
+//
+
+#ifndef LIBBUILD2_TYPES_PARSERS_HXX
+#define LIBBUILD2_TYPES_PARSERS_HXX
+
+#include <libbuild2/types.hxx>
+
+#include <libbuild2/common-options.hxx> // build2::build::cli namespace
+#include <libbuild2/options-types.hxx>
+
+namespace build2
+{
+ namespace build
+ {
+ namespace cli
+ {
+ class scanner;
+
+ template <typename T>
+ struct parser;
+
+ template <>
+ struct parser<path>
+ {
+ static void
+ parse (path&, bool&, scanner&);
+
+ static void
+ merge (path& b, const path& a) {b = a;}
+ };
+
+ template <>
+ struct parser<dir_path>
+ {
+ static void
+ parse (dir_path&, bool&, scanner&);
+
+ static void
+ merge (dir_path& b, const dir_path& a) {b = a;}
+ };
+
+ template <>
+ struct parser<name>
+ {
+ static void
+ parse (name&, bool&, scanner&);
+
+ static void
+ merge (name& b, const name& a) {b = a;}
+ };
+
+ template <>
+ struct parser<pair<name, optional<name>>>
+ {
+ static void
+ parse (pair<name, optional<name>>&, bool&, scanner&);
+
+ static void
+ merge (pair<name, optional<name>>& b,
+ const pair<name, optional<name>>& a) {b = a;}
+ };
+
+ template <>
+ struct parser<structured_result_format>
+ {
+ static void
+ parse (structured_result_format&, bool&, scanner&);
+
+ static void
+ merge (structured_result_format& b, const structured_result_format& a)
+ {
+ b = a;
+ }
+ };
+ }
+ }
+}
+
+#endif // LIBBUILD2_TYPES_PARSERS_HXX
diff --git a/libbuild2/types.hxx b/libbuild2/types.hxx
index 99cc0dc..ea84701 100644
--- a/libbuild2/types.hxx
+++ b/libbuild2/types.hxx
@@ -15,6 +15,7 @@
#include <map>
#include <set>
+#include <list>
#include <array>
#include <tuple>
#include <regex>
@@ -29,14 +30,22 @@
#include <functional> // hash, function, reference_wrapper
#include <initializer_list>
-#include <mutex>
#include <atomic>
-#include <thread>
-#include <condition_variable>
-#include <libbutl/ft/shared_mutex.hxx>
-#if defined(__cpp_lib_shared_mutex) || defined(__cpp_lib_shared_timed_mutex)
-# include <shared_mutex>
+#ifndef LIBBUTL_MINGW_STDTHREAD
+# include <mutex>
+# include <thread>
+# include <condition_variable>
+
+# include <libbutl/ft/shared_mutex.hxx>
+# if defined(__cpp_lib_shared_mutex) || defined(__cpp_lib_shared_timed_mutex)
+# include <shared_mutex>
+# endif
+#else
+# include <libbutl/mingw-mutex.hxx>
+# include <libbutl/mingw-thread.hxx>
+# include <libbutl/mingw-condition_variable.hxx>
+# include <libbutl/mingw-shared_mutex.hxx>
#endif
#include <ios> // ios_base::failure
@@ -59,6 +68,7 @@
#include <libbutl/target-triplet.hxx>
#include <libbutl/semantic-version.hxx>
#include <libbutl/standard-version.hxx>
+#include <libbutl/move-only-function.hxx>
#include <libbuild2/export.hxx>
@@ -82,9 +92,12 @@ namespace build2
using std::pair;
using std::tuple;
using std::string;
- using std::function;
using std::reference_wrapper;
+ using std::function;
+ using butl::move_only_function;
+ using butl::move_only_function_ex;
+
using strings = std::vector<string>;
using cstrings = std::vector<const char*>;
@@ -102,6 +115,7 @@ namespace build2
using std::multiset;
using std::array;
using std::vector;
+ using std::list;
using butl::vector_view; // <libbutl/vector-view.hxx>
using butl::small_vector; // <libbutl/small-vector.hxx>
@@ -185,20 +199,27 @@ namespace build2
}
#endif
+#ifndef LIBBUTL_MINGW_STDTHREAD
using std::mutex;
using mlock = std::unique_lock<mutex>;
using std::condition_variable;
-#if defined(__cpp_lib_shared_mutex)
+ using std::defer_lock;
+ using std::adopt_lock;
+
+ using std::thread;
+ namespace this_thread = std::this_thread;
+
+# if defined(__cpp_lib_shared_mutex)
using shared_mutex = std::shared_mutex;
using ulock = std::unique_lock<shared_mutex>;
using slock = std::shared_lock<shared_mutex>;
-#elif defined(__cpp_lib_shared_timed_mutex)
+# elif defined(__cpp_lib_shared_timed_mutex)
using shared_mutex = std::shared_timed_mutex;
using ulock = std::unique_lock<shared_mutex>;
using slock = std::shared_lock<shared_mutex>;
-#else
+# else
// Because we have this fallback, we need to be careful not to create
// multiple shared locks in the same thread.
//
@@ -213,13 +234,23 @@ namespace build2
using ulock = std::unique_lock<shared_mutex>;
using slock = ulock;
-#endif
+# endif
+#else // LIBBUTL_MINGW_STDTHREAD
+ using mingw_stdthread::mutex;
+ using mlock = mingw_stdthread::unique_lock<mutex>;
- using std::defer_lock;
- using std::adopt_lock;
+ using mingw_stdthread::condition_variable;
- using std::thread;
- namespace this_thread = std::this_thread;
+ using mingw_stdthread::defer_lock;
+ using mingw_stdthread::adopt_lock;
+
+ using mingw_stdthread::thread;
+ namespace this_thread = mingw_stdthread::this_thread;
+
+ using shared_mutex = mingw_stdthread::shared_mutex;
+ using ulock = mingw_stdthread::unique_lock<shared_mutex>;
+ using slock = mingw_stdthread::shared_lock<shared_mutex>;
+#endif
// Global, MT-safe information cache. Normally used for caching information
// (versions, target triplets, search paths, etc) extracted from other
@@ -292,6 +323,8 @@ namespace build2
using butl::path_map;
using butl::dir_path_map;
+ using butl::path_multimap;
+ using butl::dir_path_multimap;
// Absolute directory path. Note that for now we don't do any checking that
// the path is in fact absolute.
@@ -315,6 +348,20 @@ namespace build2
using paths = std::vector<path>;
using dir_paths = std::vector<dir_path>;
+ // Path printing potentially relative with trailing slash for directories.
+ //
+ LIBBUILD2_SYMEXPORT ostream&
+ operator<< (ostream&, const path&); // utility.cxx
+
+ inline ostream&
+ operator<< (ostream& os, const dir_path& d) // For overload resolution.
+ {
+ return build2::operator<< (os, static_cast<const path&> (d));
+ }
+
+ LIBBUILD2_SYMEXPORT ostream&
+ operator<< (ostream&, const path_name_view&); // utility.cxx
+
// <libbutl/timestamp.hxx>
//
using butl::system_clock;
@@ -332,8 +379,10 @@ namespace build2
using butl::sha256;
// <libbutl/process.hxx>
+ //
using butl::process;
using butl::process_env;
+ using butl::process_exit;
using butl::process_path;
using butl::process_error;
@@ -379,8 +428,14 @@ namespace build2
process_path_ex () = default;
};
+ // Print as recall[@effect].
+ //
+ LIBBUILD2_SYMEXPORT ostream&
+ operator<< (ostream&, const process_path&); // utility.cxx
+
// <libbutl/fdstream.hxx>
//
+ using butl::nullfd;
using butl::auto_fd;
using butl::fdpipe;
using butl::ifdstream;
@@ -462,9 +517,9 @@ namespace build2
location_value (const location&);
- location_value (location_value&&);
+ location_value (location_value&&) noexcept;
location_value (const location_value&);
- location_value& operator= (location_value&&);
+ location_value& operator= (location_value&&) noexcept;
location_value& operator= (const location_value&);
};
@@ -476,26 +531,6 @@ namespace build2
operator<< (ostream&, run_phase); // utility.cxx
}
-// In order to be found (via ADL) these have to be either in std:: or in
-// butl::. The latter is a bad idea since libbutl includes the default
-// implementation. They are defined in utility.cxx.
-//
-namespace std
-{
- // Path printing potentially relative with trailing slash for directories.
- //
- LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const ::butl::path&);
-
- LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const ::butl::path_name_view&);
-
- // Print as recall[@effect].
- //
- LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const ::butl::process_path&);
-}
-
// <libbuild2/name.hxx>
//
#include <libbuild2/name.hxx>
diff --git a/libbuild2/types.ixx b/libbuild2/types.ixx
index 750c8c7..ee2a605 100644
--- a/libbuild2/types.ixx
+++ b/libbuild2/types.ixx
@@ -10,7 +10,7 @@ namespace build2
{
if (!l.empty ())
{
- o << l.file;
+ build2::operator<< (o, l.file); // Disambiguate.
if (l.line != 0)
{
@@ -43,7 +43,7 @@ namespace build2
}
inline location_value::
- location_value (location_value&& l)
+ location_value (location_value&& l) noexcept
: location (l.line, l.column),
file (std::move (l.file))
{
@@ -58,7 +58,7 @@ namespace build2
}
inline location_value& location_value::
- operator= (location_value&& l)
+ operator= (location_value&& l) noexcept
{
if (this != &l)
{
diff --git a/libbuild2/utility-installed.cxx b/libbuild2/utility-installed.cxx
index 441e31b..decc71d 100644
--- a/libbuild2/utility-installed.cxx
+++ b/libbuild2/utility-installed.cxx
@@ -14,6 +14,10 @@ namespace build2
#ifdef BUILD2_INSTALL_LIB
const dir_path build_install_lib (BUILD2_INSTALL_LIB);
#endif
+
+#ifdef BUILD2_INSTALL_BUILDFILE
+ const dir_path build_install_buildfile (BUILD2_INSTALL_BUILDFILE);
+#endif
}
#endif
diff --git a/libbuild2/utility-uninstalled.cxx b/libbuild2/utility-uninstalled.cxx
index a6bad55..69908f9 100644
--- a/libbuild2/utility-uninstalled.cxx
+++ b/libbuild2/utility-uninstalled.cxx
@@ -7,4 +7,10 @@ namespace build2
{
const bool build_installed = false;
const dir_path build_install_lib; // Empty.
+
+#ifdef BUILD2_INSTALL_BUILDFILE
+ const dir_path build_install_buildfile (BUILD2_INSTALL_BUILDFILE);
+#else
+ const dir_path build_install_buildfile; // Empty.
+#endif
}
diff --git a/libbuild2/utility.cxx b/libbuild2/utility.cxx
index f7f3d41..909db07 100644
--- a/libbuild2/utility.cxx
+++ b/libbuild2/utility.cxx
@@ -3,8 +3,18 @@
#include <libbuild2/utility.hxx>
+#ifndef _WIN32
+# include <signal.h> // signal()
+#else
+# include <libbutl/win32-utility.hxx>
+#endif
+
#include <time.h> // tzset() (POSIX), _tzset() (Windows)
+#ifdef __GLIBCXX__
+# include <locale>
+#endif
+
#include <cerrno> // ENOENT
#include <cstring> // strlen(), str[n]cmp()
#include <iostream> // cerr
@@ -19,7 +29,6 @@
using namespace std;
using namespace butl;
-//
// <libbuild2/types.hxx>
//
namespace build2
@@ -31,12 +40,9 @@ namespace build2
{
return os << run_phase_[static_cast<uint8_t> (p)];
}
-}
-namespace std
-{
ostream&
- operator<< (ostream& os, const ::butl::path& p)
+ operator<< (ostream& os, const path& p)
{
using namespace build2;
@@ -47,7 +53,7 @@ namespace std
}
ostream&
- operator<< (ostream& os, const ::butl::path_name_view& v)
+ operator<< (ostream& os, const path_name_view& v)
{
assert (!v.empty ());
@@ -55,7 +61,7 @@ namespace std
}
ostream&
- operator<< (ostream& os, const ::butl::process_path& p)
+ operator<< (ostream& os, const process_path& p)
{
using namespace build2;
@@ -76,11 +82,10 @@ namespace std
}
}
+// <libbuild2/utility.hxx>
+//
namespace build2
{
- //
- // <libbuild2/utility.hxx>
- //
void (*terminate) (bool);
process_path argv0;
@@ -126,13 +131,13 @@ namespace build2
if (p.absolute ())
{
if (p == b)
- return cur ? "." + p.separator_string () : string ();
+ return cur ? '.' + p.separator_string () : string ();
#ifndef _WIN32
if (!home.empty ())
{
if (p == home)
- return "~" + p.separator_string ();
+ return '~' + p.separator_string ();
}
#endif
@@ -210,11 +215,10 @@ namespace build2
process
run_start (uint16_t verbosity,
const process_env& pe,
- const char* args[],
+ const char* const* args,
int in,
int out,
- bool err,
- const dir_path& cwd,
+ int err,
const location& l)
try
{
@@ -228,17 +232,15 @@ namespace build2
args,
in,
out,
- (err ? 2 : 1),
- (!cwd.empty ()
- ? cwd.string ().c_str ()
- : pe.cwd != nullptr ? pe.cwd->string ().c_str () : nullptr),
+ err,
+ pe.cwd != nullptr ? pe.cwd->string ().c_str () : nullptr,
pe.vars);
}
catch (const process_error& e)
{
if (e.child)
{
- // Note: run_finish() expects this exact message.
+ // Note: run_finish_impl() below expects this exact message.
//
cerr << "unable to execute " << args[0] << ": " << e << endl;
@@ -253,7 +255,7 @@ namespace build2
}
bool
- run_wait (const char* args[], process& pr, const location& loc)
+ run_wait (const char* const* args, process& pr, const location& loc)
try
{
return pr.wait ();
@@ -264,55 +266,330 @@ namespace build2
}
bool
- run_finish_impl (const char* args[],
+ run_finish_impl (const char* const* args,
process& pr,
- bool err,
+ bool f,
const string& l,
+ uint16_t v,
+ bool omit_normal,
const location& loc)
- try
{
tracer trace ("run_finish");
- if (pr.wait ())
- return true;
-
- const process_exit& e (*pr.exit);
-
- if (!e.normal ())
- fail (loc) << "process " << args[0] << " " << e;
+ try
+ {
+ if (pr.wait ())
+ return true;
+ }
+ catch (const process_error& e)
+ {
+ fail (loc) << "unable to execute " << args[0] << ": " << e << endf;
+ }
- // Normall but non-zero exit status.
+ // Note: see similar code in diag_buffer::close().
+ //
+ const process_exit& pe (*pr.exit);
+ bool ne (pe.normal ());
+
+ // Even if the user redirected the diagnostics, one error that we want to
+ // let through is the inability to execute the program itself. We cannot
+ // reserve a special exit status to signal this so we will just have to
+ // compare the output. In a sense, we treat this as a special case of
+ // abnormal termination. This particular situation will result in a single
+ // error line printed by run_start() above.
//
- if (err)
+ if (ne && l.compare (0, 18, "unable to execute ") == 0)
+ fail (loc) << l;
+
+ if (omit_normal && ne)
{
- // While we assuming diagnostics has already been issued (to STDERR), if
- // that's not the case, it's a real pain to debug. So trace it.
+ // While we assume diagnostics has already been issued (to stderr), if
+ // that's not the case, it's a real pain to debug. So trace it. (And
+ // if you think that doesn't happen in sensible programs, check GCC
+ // bug #107448).
+ //
+ l4 ([&]{trace << "process " << args[0] << " " << pe;});
+ }
+ else
+ {
+ // It's unclear whether we should print this only if printing the
+ // command line (we could also do things differently for normal/abnormal
+ // exit). Let's print this always and see how it wears. Note that we now
+ // rely on this in, for example, process_finish(), extract_metadata().
+ //
+ // Note: make sure keep the above trace if decide not to print.
//
- l4 ([&]{trace << "process " << args[0] << " " << e;});
+ diag_record dr;
+ dr << error (loc) << "process " << args[0] << " " << pe;
- throw failed ();
+ if (verb >= 1 && verb <= v)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
}
- // Even if the user asked to suppress diagnostiscs, one error that we
- // want to let through is the inability to execute the program itself.
- // We cannot reserve a special exit status to signal this so we will
- // just have to compare the output. This particular situation will
- // result in a single error line printed by run_start() above.
- //
- if (l.compare (0, 18, "unable to execute ") == 0)
- fail (loc) << l;
+ if (f || !ne)
+ throw failed ();
return false;
}
- catch (const process_error& e)
+
+ bool
+ run_finish_impl (diag_buffer& dbuf,
+ const char* const* args,
+ process& pr,
+ bool f,
+ uint16_t v,
+ bool on,
+ const location& loc)
{
- fail (loc) << "unable to execute " << args[0] << ": " << e << endf;
+ try
+ {
+ pr.wait ();
+ }
+ catch (const process_error& e)
+ {
+ fail (loc) << "unable to execute " << args[0] << ": " << e << endf;
+ }
+
+ const process_exit& pe (*pr.exit);
+
+ dbuf.close (args, pe, v, on, loc);
+
+ if (pe)
+ return true;
+
+ if (f || !pe.normal ())
+ throw failed ();
+
+ return false;
}
void
- run_io_error (const char* args[], const io_error& e)
+ run (context& ctx,
+ const process_env& pe,
+ const char* const* args,
+ uint16_t v)
+ {
+ if (ctx.phase == run_phase::load)
+ {
+ process pr (run_start (pe, args));
+ run_finish (args, pr, v);
+ }
+ else
+ {
+ process pr (run_start (pe,
+ args,
+ 0 /* stdin */,
+ 1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */));
+ diag_buffer dbuf (ctx, args[0], pr);
+ dbuf.read ();
+ run_finish (dbuf, args, pr, v);
+ }
+ }
+
+ bool
+ run (context& ctx,
+ uint16_t verbosity,
+ const process_env& pe,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ const function<bool (string&, bool)>& f,
+ bool tr,
+ bool err,
+ bool ignore_exit,
+ sha256* checksum)
+ {
+ assert (!err || !ignore_exit);
+
+ if (!err || ctx.phase == run_phase::load)
+ {
+ process pr (run_start (verbosity,
+ pe,
+ args,
+ 0 /* stdin */,
+ -1 /* stdout */,
+ err ? 2 : 1 /* stderr */));
+
+ string l; // Last line of output.
+ try
+ {
+ ifdstream is (move (pr.in_ofd), fdstream_mode::skip);
+
+ bool empty (true);
+
+ // Make sure we keep the last line.
+ //
+ for (bool last (is.peek () == ifdstream::traits_type::eof ());
+ !last && getline (is, l); )
+ {
+ last = (is.peek () == ifdstream::traits_type::eof ());
+
+ if (tr)
+ trim (l);
+
+ if (checksum != nullptr)
+ checksum->append (l);
+
+ if (empty)
+ {
+ empty = f (l, last);
+
+ if (!empty && checksum == nullptr)
+ break;
+ }
+ }
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ if (run_wait (args, pr))
+ fail << "io error reading " << args[0] << " output: " << e << endf;
+
+ // If the child process has failed then assume the io error was
+ // caused by that and let run_finish() deal with it.
+ }
+
+ // Omit normal exit code diagnostics if err is false.
+ //
+ if (!(run_finish_impl (args, pr, err, l, finish_verbosity, !err) ||
+ ignore_exit))
+ return false;
+ }
+ else
+ {
+ // We have to use the non-blocking setup since we have to read from stdout
+ // and stderr simultaneously.
+ //
+ process pr (run_start (verbosity,
+ pe,
+ args,
+ 0 /* stdin */,
+ -1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */));
+
+ // Note that while we read both streams until eof in the normal
+ // circumstances, we cannot use fdstream_mode::skip for the exception
+ // case on both of them: we may end up being blocked trying to read one
+ // stream while the process may be blocked writing to the other. So in
+ // case of an exception we only skip the diagnostics and close stdout
+ // hard. The latter should happen first so the order of the dbuf/is
+ // variables is important.
+ //
+ diag_buffer dbuf (ctx, args[0], pr, (fdstream_mode::non_blocking |
+ fdstream_mode::skip));
+ try
+ {
+ ifdstream is (move (pr.in_ofd),
+ fdstream_mode::non_blocking,
+ ifdstream::badbit);
+
+ bool empty (true);
+
+ // Read until we reach EOF on all streams.
+ //
+ // Note that if dbuf is not opened, then we automatically get an
+ // inactive nullfd entry.
+ //
+ fdselect_set fds {is.fd (), dbuf.is.fd ()};
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
+
+ // To detect the last line we are going keep the previous line and
+ // only call the function once we've read the next.
+ //
+ optional<string> pl;
+
+ for (string l; ist.fd != nullfd || dst.fd != nullfd; )
+ {
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
+ {
+ if (eof (is))
+ {
+ if (pl && empty)
+ f (*pl, true /* last */);
+
+ ist.fd = nullfd;
+ }
+ else
+ {
+ if (checksum != nullptr || empty)
+ {
+ if (tr)
+ trim (l);
+
+ if (checksum != nullptr)
+ checksum->append (l);
+
+ if (empty)
+ {
+ if (pl)
+ {
+ if ((empty = f (*pl, false /* last */)))
+ swap (l, *pl);
+
+ // Note that we cannot bail out like in the other version
+ // since we don't have the skip mode on is. Plus, we might
+ // still have the diagnostics.
+ }
+ else
+ pl = move (l);
+ }
+ }
+
+ l.clear ();
+ }
+
+ continue;
+ }
+
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
+ }
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ if (run_wait (args, pr))
+ {
+ // Note that we will drop the diagnostics in this case since reading
+ // it could have been the cause of this error.
+ //
+ fail << "io error reading " << args[0] << " output: " << e << endf;
+ }
+
+ // If the child process has failed then assume the io error was caused
+ // by that and let run_finish() deal with it.
+ }
+
+ run_finish_impl (dbuf, args, pr, true /* fail */, finish_verbosity);
+ }
+
+ return true;
+ }
+
+ cstrings
+ process_args (const char* program, const strings& args)
{
- fail << "io error reading " << args[0] << " output: " << e << endf;
+ cstrings r;
+ r.reserve (args.size () + 2);
+
+ r.push_back (program);
+
+ for (const string& a: args)
+ r.push_back (a.c_str ());
+
+ r.push_back (nullptr);
+ return r;
}
fdpipe
@@ -556,8 +833,73 @@ namespace build2
}
void
+ init_process ()
+ {
+ // This is a little hack to make out baseutils for Windows work when
+ // called with absolute path. In a nutshell, MSYS2's exec*p() doesn't
+ // search in the parent's executable directory, only in PATH. And since we
+ // are running without a shell (that would read /etc/profile which sets
+ // PATH to some sensible values), we are only getting Win32 PATH values.
+ // And MSYS2 /bin is not one of them. So what we are going to do is add
+ // /bin at the end of PATH (which will be passed as is by the MSYS2
+ // machinery). This will make MSYS2 search in /bin (where our baseutils
+ // live). And for everyone else this should be harmless since it is not a
+ // valid Win32 path.
+ //
+#ifdef _WIN32
+ {
+ string mp;
+ if (optional<string> p = getenv ("PATH"))
+ {
+ mp = move (*p);
+ mp += ';';
+ }
+ mp += "/bin";
+
+ setenv ("PATH", mp);
+ }
+#endif
+
+ // On POSIX ignore SIGPIPE which is signaled to a pipe-writing process if
+ // the pipe reading end is closed. Note that by default this signal
+ // terminates a process. Also note that there is no way to disable this
+ // behavior on a file descriptor basis or for the write() function call.
+ //
+#ifndef _WIN32
+ if (signal (SIGPIPE, SIG_IGN) == SIG_ERR)
+ fail << "unable to ignore broken pipe (SIGPIPE) signal: "
+ << system_error (errno, generic_category ()); // Sanitize.
+#endif
+
+ // Initialize time conversion data that is used by localtime_r().
+ //
+#ifndef _WIN32
+ tzset ();
+#else
+ _tzset ();
+#endif
+
+ // A data race happens in the libstdc++ (as of GCC 7.2) implementation of
+ // the ctype<char>::narrow() function (bug #77704). The issue is easily
+ // triggered by the testscript runner that indirectly (via regex) uses
+ // ctype<char> facet of the global locale (and can potentially be
+ // triggered by other locale-aware code). We work around this by
+ // pre-initializing the global locale facet internal cache.
+ //
+#ifdef __GLIBCXX__
+ {
+ const ctype<char>& ct (use_facet<ctype<char>> (locale ()));
+
+ for (size_t i (0); i != 256; ++i)
+ ct.narrow (static_cast<char> (i), '\0');
+ }
+#endif
+ }
+
+ void
init (void (*t) (bool),
const char* a0,
+ bool ss,
optional<bool> mc,
optional<path> cs,
optional<path> cg)
@@ -592,6 +934,18 @@ namespace build2
}
script::regex::init ();
+
+ if (!ss)
+ {
+#ifdef _WIN32
+ // On Windows disable displaying error reporting dialog box for the
+ // current and child processes unless we are in the stop mode. Failed
+ // that we may have multiple dialog boxes popping up.
+ //
+ SetErrorMode (SetErrorMode (0) | // Returns the current mode.
+ SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
+#endif
+ }
}
optional<uint64_t>
diff --git a/libbuild2/utility.hxx b/libbuild2/utility.hxx
index b62d2ab..43cb904 100644
--- a/libbuild2/utility.hxx
+++ b/libbuild2/utility.hxx
@@ -4,14 +4,15 @@
#ifndef LIBBUILD2_UTILITY_HXX
#define LIBBUILD2_UTILITY_HXX
-#include <tuple> // make_tuple()
-#include <memory> // make_shared()
-#include <string> // to_string()
-#include <utility> // move(), forward(), declval(), make_pair(), swap()
-#include <cassert> // assert()
-#include <iterator> // make_move_iterator()
-#include <algorithm> // *
-#include <functional> // ref(), cref()
+#include <tuple> // make_tuple()
+#include <memory> // make_shared()
+#include <string> // to_string()
+#include <utility> // move(), forward(), declval(), make_pair(), swap()
+#include <cassert> // assert()
+#include <iterator> // make_move_iterator(), back_inserter()
+#include <algorithm> // *
+#include <functional> // ref(), cref()
+#include <type_traits>
#include <libbutl/ft/lang.hxx>
@@ -50,6 +51,7 @@ namespace build2
using std::make_tuple;
using std::make_shared;
using std::make_move_iterator;
+ using std::back_inserter;
using std::to_string;
using std::stoul;
using std::stoull;
@@ -69,6 +71,7 @@ namespace build2
using butl::alpha;
using butl::alnum;
using butl::digit;
+ using butl::wspace;
using butl::trim;
using butl::next_word;
@@ -90,12 +93,27 @@ namespace build2
// <libbutl/fdstream.hxx>
//
+ using butl::fdopen_null;
using butl::open_file_or_stdin;
using butl::open_file_or_stdout;
// <libbutl/path-pattern.hxx>
//
using butl::path_pattern;
+ using butl::path_match;
+
+ // Perform process-wide initializations/adjustments/workarounds. Should be
+ // called once early in main(). In particular, besides other things, this
+ // functions does the following:
+ //
+ // - Sets PATH to include baseutils /bin on Windows.
+ //
+ // - Ignores SIGPIPE.
+ //
+ // - Calls tzset().
+ //
+ LIBBUILD2_SYMEXPORT void
+ init_process ();
// Diagnostics state (verbosity level, etc; see <libbuild2/diagnostics.hxx>).
//
@@ -113,6 +131,7 @@ namespace build2
init_diag (uint16_t verbosity,
bool silent = false,
optional<bool> progress = nullopt,
+ optional<bool> diag_color = nullopt,
bool no_lines = false,
bool no_columns = false,
bool stderr_term = false);
@@ -122,13 +141,23 @@ namespace build2
LIBBUILD2_SYMEXPORT extern bool silent;
// --[no-]progress
+ // --[no-]diag-color
//
LIBBUILD2_SYMEXPORT extern optional<bool> diag_progress_option;
+ LIBBUILD2_SYMEXPORT extern optional<bool> diag_color_option;
LIBBUILD2_SYMEXPORT extern bool diag_no_line; // --no-line
LIBBUILD2_SYMEXPORT extern bool diag_no_column; // --no-column
- LIBBUILD2_SYMEXPORT extern bool stderr_term; // True if stderr is a terminal.
+ // If stderr is not a terminal, then the value is absent (so can be used as
+ // bool). Otherwise, it is the value of the TERM environment variable (which
+ // can be NULL).
+ //
+ LIBBUILD2_SYMEXPORT extern optional<const char*> stderr_term;
+
+ // True if the color can be used on the stderr terminal.
+ //
+ LIBBUILD2_SYMEXPORT extern bool stderr_term_color;
// Global state (verbosity, home/work directories, etc).
@@ -138,6 +167,7 @@ namespace build2
LIBBUILD2_SYMEXPORT void
init (void (*terminate) (bool),
const char* argv0,
+ bool serial_stop,
optional<bool> mtime_check = nullopt,
optional<path> config_sub = nullopt,
optional<path> config_guess = nullopt);
@@ -156,11 +186,13 @@ namespace build2
LIBBUILD2_SYMEXPORT extern const standard_version build_version;
LIBBUILD2_SYMEXPORT extern const string build_version_interface;
- // Whether running installed build and, if so, the library installation
- // directory (empty otherwise).
+ // Whether running installed build as well as the library installation
+ // directory (only if installed, empty otherwise) and the exported buildfile
+ // installation directory (only if configured, empty otherwise).
//
LIBBUILD2_SYMEXPORT extern const bool build_installed;
LIBBUILD2_SYMEXPORT extern const dir_path build_install_lib; // $install.lib
+ LIBBUILD2_SYMEXPORT extern const dir_path build_install_buildfile; // $install.buildfile
// --[no-]mtime-check
//
@@ -211,7 +243,7 @@ namespace build2
// Basic process utilities.
//
- // The run*() functions with process_path assume that you are printing
+ // The run*() functions with process_path/_env assume that you are printing
// the process command line yourself.
// Search for a process executable. Issue diagnostics and throw failed in
@@ -245,126 +277,55 @@ namespace build2
[[noreturn]] LIBBUILD2_SYMEXPORT void
run_search_fail (const path&, const location& = location ());
- // Wait for process termination returning true if the process exited
- // normally with a zero code and false otherwise. The latter case is
- // normally followed up with a call to run_finish().
- //
- LIBBUILD2_SYMEXPORT bool
- run_wait (const char* args[], process&, const location& = location ());
-
- bool
- run_wait (cstrings& args, process&, const location& = location ());
-
- // Wait for process termination. Issue diagnostics and throw failed in case
- // of abnormal termination. If the process has terminated normally but with
- // a non-zero exit status, then assume the diagnostics has already been
- // issued and just throw failed. The last argument is used in cooperation
- // with run_start() in case STDERR is redirected to STDOUT.
- //
- void
- run_finish (const char* args[],
- process&,
- const string& = string (),
- const location& = location ());
-
- void
- run_finish (cstrings& args, process& pr, const location& l = location ());
-
- // As above but if the process has exited normally with a non-zero code,
- // then return false rather than throwing.
- //
- bool
- run_finish_code (const char* args[],
- process&,
- const string& = string (),
- const location& = location ());
-
- // Start a process with the specified arguments. If in is -1, then redirect
- // STDIN to a pipe (can also be -2 to redirect to /dev/null or equivalent).
- // If out is -1, redirect STDOUT to a pipe. If error is false, then
- // redirecting STDERR to STDOUT (this can be used to suppress diagnostics
- // from the child process). Issue diagnostics and throw failed in case of an
- // error.
+ // Start a process with the specified arguments. Issue diagnostics and throw
+ // failed in case of an error. If in is -1, then redirect stdin to a pipe
+ // (can also be -2 to redirect it to /dev/null or equivalent). If out is -1,
+ // then redirect stdout to a pipe. If stderr is redirected to stdout (can
+ // be used to analyze diagnostics from the child process), then, in case of
+ // an error, the last line read from stdout must be passed to run_finish()
+ // below.
//
LIBBUILD2_SYMEXPORT process
run_start (uint16_t verbosity,
const process_env&, // Implicit-constructible from process_path.
- const char* args[],
+ const char* const* args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
- const location& = location ());
+ int err = 2,
+ const location& = {});
inline process
run_start (uint16_t verbosity,
const process_env& pe,
- cstrings& args,
+ const cstrings& args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
- const location& l = location ())
+ int err = 2,
+ const location& l = {})
{
- return run_start (verbosity, pe, args.data (), in, out, error, cwd, l);
+ return run_start (verbosity, pe, args.data (), in, out, err, l);
}
inline process
run_start (const process_env& pe,
- const char* args[],
+ const char* const* args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
- const location& l = location ())
+ int err = 2,
+ const location& l = {})
{
- return run_start (verb_never, pe, args, in, out, error, cwd, l);
+ return run_start (verb_never, pe, args, in, out, err, l);
}
inline process
run_start (const process_env& pe,
- cstrings& args,
+ const cstrings& args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
- const location& l = location ())
- {
- return run_start (pe, args.data (), in, out, error, cwd, l);
- }
-
- inline void
- run (const process_env& pe, // Implicit-constructible from process_path.
- const char* args[])
+ int err = 2,
+ const location& l = {})
{
- process pr (run_start (pe, args));
- run_finish (args, pr);
- }
-
- inline void
- run (const process_env& pe, // Implicit-constructible from process_path.
- cstrings& args)
- {
- run (pe, args.data ());
- }
-
- inline void
- run (const process_path& p,
- const char* args[],
- const dir_path& cwd,
- const char* const* env = nullptr)
- {
- process pr (run_start (process_env (p, env), args, 0, 1, true, cwd));
- run_finish (args, pr);
- }
-
- inline void
- run (const process_path& p,
- cstrings& args,
- const dir_path& cwd,
- const char* const* env = nullptr)
- {
- run (p, args.data (), cwd, env);
+ return run_start (pe, args.data (), in, out, err, l);
}
// As above, but search for the process (including updating args[0]) and
@@ -375,16 +336,16 @@ namespace build2
const char* args[],
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
+ int err = 2,
const char* const* env = nullptr,
- const location& l = location ())
+ const dir_path& cwd = {},
+ const location& l = {})
{
process_path pp (run_search (args[0], l));
return run_start (verbosity,
- process_env (pp, env), args,
- in, out, error,
- cwd, l);
+ process_env (pp, cwd, env), args,
+ in, out, err,
+ l);
}
inline process
@@ -392,55 +353,215 @@ namespace build2
cstrings& args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
+ int err = 2,
const char* const* env = nullptr,
- const location& l = location ())
+ const dir_path& cwd = {},
+ const location& l = {})
{
- return run_start (verbosity, args.data (), in, out, error, cwd, env, l);
+ return run_start (verbosity, args.data (), in, out, err, env, cwd, l);
}
+ // Wait for process termination returning true if the process exited
+ // normally with a zero code and false otherwise. The latter case is
+ // normally followed up with a call to run_finish().
+ //
+ LIBBUILD2_SYMEXPORT bool
+ run_wait (const char* const* args, process&, const location& = location ());
+
+ bool
+ run_wait (const cstrings& args, process&, const location& = location ());
+
+ // Wait for process termination, issues diagnostics, and throw failed.
+ //
+ // If the child process exited abnormally or normally with non-0 code, then
+ // print the error diagnostics to this effect. Additionally, if the
+ // verbosity level is between 1 and the specified value, then print the
+ // command line as info after the error. If omit_normal is true, then don't
+ // print either for the normal exit (usually used for custom diagnostics or
+ // when process failure can be tolerated).
+ //
+ // Normally the specified verbosity will be 1 and the command line args
+ // represent the verbosity level 2 (logical) command line. Or, to put it
+ // another way, it should be 1 less than what gets passed to run_start().
+ // Note that args should only represent a single command in a pipe (see
+ // print_process() for details).
+ //
+ // See also diag_buffer::close().
+ //
+ // The line argument is used in cooperation with run_start() to diagnose a
+ // failure to exec in case stderr is redirected to stdout (see the
+ // implementation for details).
+ //
+ void
+ run_finish (const char* const* args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ void
+ run_finish (const cstrings& args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ void
+ run_finish (const char* const* args,
+ process&,
+ const string& line,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ // As above but if the process has exited normally with a non-zero code,
+ // then return false rather than throwing.
+ //
+ // Note that the normal non-0 exit diagnostics is omitted by default
+ // assuming appropriate custom diagnostics will be issued, if required.
+ //
+ bool
+ run_finish_code (const char* const* args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ bool
+ run_finish_code (const cstrings& args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ bool
+ run_finish_code (const char* const* args,
+ process&,
+ const string&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ // As above but with diagnostics buffering.
+ //
+ // Specifically, this version first waits for the process termination, then
+ // calls diag_buffer::close(verbosity, omit_normal), and finally throws
+ // failed if the process didn't exit with 0 code.
+ //
+ class diag_buffer;
+
+ void
+ run_finish (diag_buffer&,
+ const char* const* args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ void
+ run_finish (diag_buffer&,
+ const cstrings& args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ // As above but if the process has exited normally with a non-zero code,
+ // then return false rather than throwing.
+ //
+ // Note that the normal non-0 exit diagnostics is omitted by default
+ // assuming appropriate custom diagnostics will be issued, if required.
+ //
+ bool
+ run_finish_code (diag_buffer&,
+ const char* const* args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ bool
+ run_finish_code (diag_buffer&,
+ const cstrings& args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ // Run the process with the specified arguments by calling the above start
+ // and finish functions. Buffer diagnostics unless in the load phase.
+ //
+ LIBBUILD2_SYMEXPORT void
+ run (context&,
+ const process_env& pe, // Implicit-constructible from process_path.
+ const char* const* args,
+ uint16_t finish_verbosity);
+
inline void
- run (uint16_t verbosity,
- const char* args[],
- const dir_path& cwd = dir_path (),
- const char* const* env = nullptr)
+ run (context& ctx,
+ const process_env& pe,
+ const cstrings& args,
+ uint16_t finish_verbosity)
{
- process pr (run_start (verbosity, args, 0, 1, true, cwd, env));
- run_finish (args, pr);
+ run (ctx, pe, args.data (), finish_verbosity);
}
+ // As above but pass cwd/env vars as arguments rather than as part of
+ // process_env.
+ //
inline void
- run (uint16_t verbosity,
- cstrings& args,
- const dir_path& cwd = dir_path (),
- const char* const* env = nullptr)
+ run (context& ctx,
+ const process_path& p,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ const char* const* env,
+ const dir_path& cwd = {})
+ {
+ run (ctx, process_env (p, cwd, env), args, finish_verbosity);
+ }
+
+ inline void
+ run (context& ctx,
+ const process_path& p,
+ const cstrings& args,
+ uint16_t finish_verbosity,
+ const char* const* env,
+ const dir_path& cwd = {})
{
- run (verbosity, args.data (), cwd, env);
+ run (ctx, p, args.data (), finish_verbosity, env, cwd);
}
// Start the process as above and then call the specified function on each
// trimmed line of the output until it returns a non-empty object T (tested
// with T::empty()) which is then returned to the caller.
//
+ // If verbosity is specified, print the process commands line at that level
+ // (with the verbosite-1 value passed run_finish()).
+ //
+ // If error is false, then redirecting stderr to stdout and don't fail if
+ // the process exits normally but with non-0 code (can be used to suppress
+ // and/or analyze diagnostics from the child process). Otherwise, buffer
+ // diagnostics unless in the load phase.
+ //
// The predicate can move the value out of the passed string but, if error
// is false, only in case of a "content match" (so that any diagnostics
// lines are left intact). The function signature should be:
//
// T (string& line, bool last)
//
- // If ignore_exit is true, then the program's exit status is ignored (if it
- // is false and the program exits with the non-zero status, then an empty T
- // instance is returned).
+ // If, in addition to error being false, ignore_exit is true, then the
+ // program's normal exit status is ignored (if it is false and the program
+ // exits with the non-zero status, then an empty T instance is returned).
//
// If checksum is not NULL, then feed it the content of each trimmed line
// (including those that come after the callback returns non-empty object).
//
template <typename T, typename F>
T
- run (uint16_t verbosity,
+ run (context&,
+ uint16_t verbosity,
const process_env&, // Implicit-constructible from process_path.
- const char* args[],
+ const char* const* args,
F&&,
bool error = true,
bool ignore_exit = false,
@@ -448,20 +569,55 @@ namespace build2
template <typename T, typename F>
inline T
- run (const process_env& pe, // Implicit-constructible from process_path.
- const char* args[],
+ run (context& ctx,
+ uint16_t verbosity,
+ const process_env& pe,
+ const cstrings& args,
+ F&& f,
+ bool error = true,
+ bool ignore_exit = false,
+ sha256* checksum = nullptr)
+ {
+ return run<T> (ctx,
+ verbosity,
+ pe, args.data (),
+ forward<F> (f),
+ error, ignore_exit, checksum);
+ }
+
+ template <typename T, typename F>
+ inline T
+ run (context&,
+ const process_env&,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ F&&,
+ bool error = true,
+ bool ignore_exit = false,
+ sha256* checksum = nullptr);
+
+ template <typename T, typename F>
+ inline T
+ run (context& ctx,
+ const process_env& pe,
+ const cstrings& args,
+ uint16_t finish_verbosity,
F&& f,
bool error = true,
bool ignore_exit = false,
sha256* checksum = nullptr)
{
- return run<T> (
- verb_never, pe, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ pe, args.data (),
+ finish_verbosity,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
template <typename T, typename F>
inline T
- run (uint16_t verbosity,
+ run (context& ctx,
+ uint16_t verbosity,
const char* args[],
F&& f,
bool error = true,
@@ -469,15 +625,38 @@ namespace build2
sha256* checksum = nullptr)
{
process_path pp (run_search (args[0]));
- return run<T> (
- verbosity, pp, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ pp, args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
+ template <typename T, typename F>
+ inline T
+ run (context& ctx,
+ uint16_t verbosity,
+ cstrings& args,
+ F&& f,
+ bool error = true,
+ bool ignore_exit = false,
+ sha256* checksum = nullptr)
+ {
+ return run<T> (ctx,
+ verbosity,
+ args.data (),
+ forward<F> (f),
+ error, ignore_exit, checksum);
+ }
+
+ // As above but run a program without any arguments or with one argument.
+ //
// run <prog>
//
template <typename T, typename F>
inline T
- run (uint16_t verbosity,
+ run (context& ctx,
+ uint16_t verbosity,
const path& prog,
F&& f,
bool error = true,
@@ -485,13 +664,20 @@ namespace build2
sha256* checksum = nullptr)
{
const char* args[] = {prog.string ().c_str (), nullptr};
- return run<T> (
- verbosity, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
template <typename T, typename F>
- inline T
- run (uint16_t verbosity,
+ inline typename std::enable_if<
+ (!std::is_same<typename std::decay<F>::type, const char**>::value &&
+ !std::is_same<typename std::remove_reference<F>::type, cstrings>::value),
+ T>::type
+ run (context& ctx,
+ uint16_t verbosity,
const process_env& pe, // Implicit-constructible from process_path.
F&& f,
bool error = true,
@@ -499,15 +685,19 @@ namespace build2
sha256* checksum = nullptr)
{
const char* args[] = {pe.path->recall_string (), nullptr};
- return run<T> (
- verbosity, pe, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ pe, args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
// run <prog> <arg>
//
template <typename T, typename F>
inline T
- run (uint16_t verbosity,
+ run (context& ctx,
+ uint16_t verbosity,
const path& prog,
const char* arg,
F&& f,
@@ -516,13 +706,17 @@ namespace build2
sha256* checksum = nullptr)
{
const char* args[] = {prog.string ().c_str (), arg, nullptr};
- return run<T> (
- verbosity, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
template <typename T, typename F>
inline T
- run (uint16_t verbosity,
+ run (context& ctx,
+ uint16_t verbosity,
const process_env& pe, // Implicit-constructible from process_path.
const char* arg,
F&& f,
@@ -531,8 +725,47 @@ namespace build2
sha256* checksum = nullptr)
{
const char* args[] = {pe.path->recall_string (), arg, nullptr};
- return run<T> (
- verbosity, pe, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ pe, args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
+ }
+
+ // As above but a lower-level interface that erases T and F and can also be
+ // used to suppress trimming.
+ //
+ // The passed function should return true if it should be called again
+ // (i.e., the object is still empty in the T & F interface) and false
+ // otherwise.
+ //
+ // The first version ruturn true if the result is usable and false
+ // otherwise, depending on the process exit code and error/ignore_exit
+ // values. (In the latter case, the T & F interface makes the resulting
+ // object empty).
+ //
+ LIBBUILD2_SYMEXPORT bool
+ run (context&,
+ uint16_t verbosity,
+ const process_env&,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ const function<bool (string& line, bool last)>&,
+ bool trim = true,
+ bool error = true,
+ bool ignore_exit = false,
+ sha256* checksum = nullptr);
+
+ // Concatenate the program path and arguments into a shallow NULL-terminated
+ // vector of C-strings.
+ //
+ LIBBUILD2_SYMEXPORT cstrings
+ process_args (const char* program, const strings& args);
+
+ inline cstrings
+ process_args (const string& program, const strings& args)
+ {
+ return process_args (program.c_str (), args);
}
// File descriptor streams.
diff --git a/libbuild2/utility.ixx b/libbuild2/utility.ixx
index aedfc94..58ea8db 100644
--- a/libbuild2/utility.ixx
+++ b/libbuild2/utility.ixx
@@ -6,42 +6,195 @@
namespace build2
{
inline bool
- run_wait (cstrings& args, process& pr, const location& loc)
+ run_wait (const cstrings& args, process& pr, const location& loc)
{
return run_wait (args.data (), pr, loc);
}
- // Note: currently this function is also used in a run() implementations.
+ // Note: these functions are also used in the run() implementations.
//
LIBBUILD2_SYMEXPORT bool
- run_finish_impl (const char*[],
+ run_finish_impl (const char* const*,
process&,
- bool error,
+ bool fail,
const string&,
- const location& = location ());
+ uint16_t,
+ bool = false,
+ const location& = {});
+
+ LIBBUILD2_SYMEXPORT bool
+ run_finish_impl (diag_buffer&,
+ const char* const*,
+ process&,
+ bool fail,
+ uint16_t,
+ bool = false,
+ const location& = {});
inline void
- run_finish (const char* args[],
+ run_finish (const char* const* args,
process& pr,
- const string& l,
+ uint16_t v,
+ bool on,
const location& loc)
{
- run_finish_impl (args, pr, true /* error */, l, loc);
+ run_finish_impl (args, pr, true /* fail */, string (), v, on, loc);
}
inline void
- run_finish (cstrings& args, process& pr, const location& loc)
+ run_finish (const cstrings& args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ run_finish (args.data (), pr, v, on, loc);
+ }
+
+ inline void
+ run_finish (const char* const* args,
+ process& pr,
+ const string& l,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ run_finish_impl (args, pr, true, l, v, on, loc);
+ }
+
+ inline bool
+ run_finish_code (const char* const* args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ return run_finish_impl (args, pr, false, string (), v, on, loc);
+ }
+
+ inline bool
+ run_finish_code (const cstrings& args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
{
- run_finish (args.data (), pr, string (), loc);
+ return run_finish_code (args.data (), pr, v, on, loc);
}
inline bool
- run_finish_code (const char* args[],
+ run_finish_code (const char* const* args,
process& pr,
const string& l,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ return run_finish_impl (args, pr, false, l, v, on, loc);
+ }
+
+ inline void
+ run_finish (diag_buffer& dbuf,
+ const char* const* args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ run_finish_impl (dbuf, args, pr, true /* fail */, v, on, loc);
+ }
+
+ inline void
+ run_finish (diag_buffer& dbuf,
+ const cstrings& args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ run_finish_impl (dbuf, args.data (), pr, true, v, on, loc);
+ }
+
+ inline bool
+ run_finish_code (diag_buffer& dbuf,
+ const char* const* args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ return run_finish_impl (dbuf, args, pr, false, v, on, loc);
+ }
+
+ inline bool
+ run_finish_code (diag_buffer& dbuf,
+ const cstrings& args,
+ process& pr,
+ uint16_t v,
+ bool on,
const location& loc)
{
- return run_finish_impl (args, pr, false /* error */, l, loc);
+ return run_finish_impl (dbuf, args.data (), pr, false, v, on, loc);
+ }
+
+ template <typename T, typename F>
+ inline T
+ run (context& ctx,
+ uint16_t verbosity,
+ const process_env& pe,
+ const char* const* args,
+ F&& f,
+ bool err,
+ bool ignore_exit,
+ sha256* checksum)
+ {
+ T r;
+ if (!run (ctx,
+ verbosity,
+ pe, args,
+ verbosity - 1,
+ [&r, &f] (string& l, bool last) // Small function optimmization.
+ {
+ r = f (l, last);
+ return r.empty ();
+ },
+ true /* trim */,
+ err,
+ ignore_exit,
+ checksum))
+ r = T ();
+
+ return r;
+ }
+
+ template <typename T, typename F>
+ inline T
+ run (context& ctx,
+ const process_env& pe,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ F&& f,
+ bool err,
+ bool ignore_exit,
+ sha256* checksum)
+ {
+ T r;
+ if (!run (ctx,
+ verb_never,
+ pe, args,
+ finish_verbosity,
+ [&r, &f] (string& l, bool last)
+ {
+ r = f (l, last);
+ return r.empty ();
+ },
+ true /* trim */,
+ err,
+ ignore_exit,
+ checksum))
+ r = T ();
+
+ return r;
}
inline void
diff --git a/libbuild2/utility.txx b/libbuild2/utility.txx
index bb25288..d2fc29c 100644
--- a/libbuild2/utility.txx
+++ b/libbuild2/utility.txx
@@ -54,68 +54,4 @@ namespace build2
return p;
}
-
- [[noreturn]] LIBBUILD2_SYMEXPORT void
- run_io_error (const char*[], const io_error&);
-
- template <typename T, typename F>
- T
- run (uint16_t verbosity,
- const process_env& pe,
- const char* args[],
- F&& f,
- bool err,
- bool ignore_exit,
- sha256* checksum)
- {
- process pr (run_start (verbosity,
- pe,
- args,
- 0 /* stdin */,
- -1 /* stdout */,
- err));
- T r;
- string l; // Last line of output.
-
- try
- {
- ifdstream is (move (pr.in_ofd), butl::fdstream_mode::skip);
-
- // Make sure we keep the last line.
- //
- for (bool last (is.peek () == ifdstream::traits_type::eof ());
- !last && getline (is, l); )
- {
- last = (is.peek () == ifdstream::traits_type::eof ());
-
- trim (l);
-
- if (checksum != nullptr)
- checksum->append (l);
-
- if (r.empty ())
- {
- r = f (l, last);
-
- if (!r.empty () && checksum == nullptr)
- break;
- }
- }
-
- is.close ();
- }
- catch (const io_error& e)
- {
- if (run_wait (args, pr))
- run_io_error (args, e);
-
- // If the child process has failed then assume the io error was
- // caused by that and let run_finish() deal with it.
- }
-
- if (!(run_finish_impl (args, pr, err, l) || ignore_exit))
- r = T ();
-
- return r;
- }
}
diff --git a/libbuild2/variable.cxx b/libbuild2/variable.cxx
index 6f2812c..8c52e22 100644
--- a/libbuild2/variable.cxx
+++ b/libbuild2/variable.cxx
@@ -3,7 +3,7 @@
#include <libbuild2/variable.hxx>
-#include <cstring> // memcmp()
+#include <cstring> // memcmp(), memcpy()
#include <libbutl/path-pattern.hxx>
@@ -47,7 +47,7 @@ namespace build2
}
value::
- value (value&& v)
+ value (value&& v) noexcept
: type (v.type), null (v.null), extra (v.extra)
{
if (!null)
@@ -57,7 +57,7 @@ namespace build2
else if (type->copy_ctor != nullptr)
type->copy_ctor (*this, v, true);
else
- data_ = v.data_; // Copy as POD.
+ memcpy (data_, v.data_, size_); // Copy as POD.
}
}
@@ -72,7 +72,7 @@ namespace build2
else if (type->copy_ctor != nullptr)
type->copy_ctor (*this, v, false);
else
- data_ = v.data_; // Copy as POD.
+ memcpy (data_, v.data_, size_); // Copy as POD.
}
}
@@ -99,12 +99,14 @@ namespace build2
if (null)
new (&data_) names (move (v).as<names> ());
else
+ // Note: can throw (see small_vector for details).
+ //
as<names> () = move (v).as<names> ();
}
else if (auto f = null ? type->copy_ctor : type->copy_assign)
f (*this, v, true);
else
- data_ = v.data_; // Assign as POD.
+ memcpy (data_, v.data_, size_); // Assign as POD.
null = v.null;
}
@@ -143,7 +145,7 @@ namespace build2
else if (auto f = null ? type->copy_ctor : type->copy_assign)
f (*this, v, false);
else
- data_ = v.data_; // Assign as POD.
+ memcpy (data_, v.data_, size_); // Assign as POD.
null = v.null;
}
@@ -367,8 +369,8 @@ namespace build2
// Typification is kind of like caching so we reuse that mutex shard.
//
shared_mutex& m (
- ctx.mutexes.variable_cache[
- hash<value*> () (&v) % ctx.mutexes.variable_cache_size]);
+ ctx.mutexes->variable_cache[
+ hash<value*> () (&v) % ctx.mutexes->variable_cache_size]);
// Note: v.type is rechecked by typify() under lock.
//
@@ -377,7 +379,7 @@ namespace build2
}
void
- untypify (value& v)
+ untypify (value& v, bool reduce)
{
if (v.type == nullptr)
return;
@@ -389,7 +391,7 @@ namespace build2
}
names ns;
- names_view nv (v.type->reverse (v, ns));
+ names_view nv (v.type->reverse (v, ns, reduce));
if (nv.empty () || nv.data () == ns.data ())
{
@@ -451,11 +453,11 @@ namespace build2
m = "invalid " + t + " value ";
if (n.simple ())
- m += "'" + n.value + "'";
+ m += '\'' + n.value + '\'';
else if (n.directory ())
- m += "'" + n.dir.representation () + "'";
+ m += '\'' + n.dir.representation () + '\'';
else
- m += "name '" + to_string (n) + "'";
+ m += "name '" + to_string (n) + '\'';
}
throw invalid_argument (m);
@@ -470,7 +472,7 @@ namespace build2
bool value_traits<bool>::
convert (const name& n, const name* r)
{
- if (r == nullptr && !n.pattern && n.simple () )
+ if (r == nullptr && !n.pattern && n.simple ())
{
const string& s (n.value);
@@ -493,6 +495,7 @@ namespace build2
type_name,
sizeof (bool),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
nullptr, // No dtor (POD).
nullptr, // No copy_ctor (POD).
@@ -515,13 +518,22 @@ namespace build2
{
try
{
- // May throw invalid_argument or out_of_range.
- //
- size_t i;
- int64_t r (stoll (n.value, &i));
+ const string& v (n.value);
+
+ if (!wspace (v[0]))
+ {
+ // Note that unlike uint64, we don't support hex notation for int64.
+
+ // May throw invalid_argument or out_of_range.
+ //
+ size_t i;
+ int64_t r (stoll (v, &i));
+
+ if (i == v.size ())
+ return r;
- if (i == n.value.size ())
- return r;
+ // Fall through.
+ }
// Fall through.
}
@@ -541,6 +553,7 @@ namespace build2
type_name,
sizeof (int64_t),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
nullptr, // No dtor (POD).
nullptr, // No copy_ctor (POD).
@@ -563,13 +576,22 @@ namespace build2
{
try
{
- // May throw invalid_argument or out_of_range.
- //
- size_t i;
- uint64_t r (stoull (n.value, &i));
+ const string& v (n.value);
+
+ if (!wspace (v[0]))
+ {
+ int b (v[0] == '0' && (v[1] == 'x' || v[1] == 'X') ? 16 : 10);
+
+ // May throw invalid_argument or out_of_range.
+ //
+ size_t i;
+ uint64_t r (stoull (v, &i, b));
- if (i == n.value.size ())
- return r;
+ if (i == v.size ())
+ return r;
+
+ // Fall through.
+ }
// Fall through.
}
@@ -589,6 +611,7 @@ namespace build2
type_name,
sizeof (uint64_t),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
nullptr, // No dtor (POD).
nullptr, // No copy_ctor (POD).
@@ -612,28 +635,31 @@ namespace build2
// the common cases (unqualified, unpaired simple name or directory).
//
- // We can only convert project-qualified simple and directory names.
+ // We can only convert project-qualified untyped names.
//
- if (n.pattern ||
- !(n.simple (true) || n.directory (true)))
+ if (n.pattern || n.typed ())
throw_invalid_argument (n, nullptr, "string");
if (r != nullptr)
{
- if (r->pattern ||
- !(r->simple (true) || r->directory (true)))
+ if (r->pattern || r->typed ())
throw_invalid_argument (*r, nullptr, "string");
}
string s;
- if (n.directory (true))
+ if (n.simple (true))
+ s.swap (n.value);
+ else
+ {
// Note that here we cannot assume what's in dir is really a
// path (think s/foo/bar/) so we have to reverse it exactly.
//
s = move (n.dir).representation (); // Move out of path.
- else
- s.swap (n.value);
+
+ if (!n.value.empty ())
+ s += n.value; // Separator is already there.
+ }
// Convert project qualification to its string representation.
//
@@ -657,10 +683,15 @@ namespace build2
s += '%';
}
- if (r->directory (true))
- s += move (r->dir).representation ();
- else
+ if (r->simple (true))
s += r->value;
+ else
+ {
+ s += move (r->dir).representation ();
+
+ if (!r->value.empty ())
+ s += r->value;
+ }
}
return s;
@@ -675,6 +706,7 @@ namespace build2
type_name,
sizeof (string),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<string>,
&default_copy_ctor<string>,
@@ -742,6 +774,7 @@ namespace build2
type_name,
sizeof (path),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<path>,
&default_copy_ctor<path>,
@@ -809,6 +842,7 @@ namespace build2
sizeof (dir_path),
&value_traits<path>::value_type, // Base (assuming direct cast works for
// both).
+ false, // Not container.
nullptr, // No element.
&default_dtor<dir_path>,
&default_copy_ctor<dir_path>,
@@ -857,6 +891,7 @@ namespace build2
sizeof (abs_dir_path),
&value_traits<dir_path>::value_type, // Base (assuming direct cast works
// for both).
+ false, // Not container.
nullptr, // No element.
&default_dtor<abs_dir_path>,
&default_copy_ctor<abs_dir_path>,
@@ -882,10 +917,10 @@ namespace build2
}
static names_view
- name_reverse (const value& v, names&)
+ name_reverse (const value& v, names&, bool reduce)
{
const name& n (v.as<name> ());
- return n.empty () ? names_view (nullptr, 0) : names_view (&n, 1);
+ return reduce && n.empty () ? names_view (nullptr, 0) : names_view (&n, 1);
}
const char* const value_traits<name>::type_name = "name";
@@ -895,6 +930,7 @@ namespace build2
type_name,
sizeof (name),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<name>,
&default_copy_ctor<name>,
@@ -949,13 +985,13 @@ namespace build2
}
static names_view
- name_pair_reverse (const value& v, names& ns)
+ name_pair_reverse (const value& v, names& ns, bool reduce)
{
const name_pair& p (v.as<name_pair> ());
const name& f (p.first);
const name& s (p.second);
- if (f.empty () && s.empty ())
+ if (reduce && f.empty () && s.empty ())
return names_view (nullptr, 0);
if (f.empty ())
@@ -977,6 +1013,7 @@ namespace build2
type_name,
sizeof (name_pair),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<name_pair>,
&default_copy_ctor<name_pair>,
@@ -1107,10 +1144,14 @@ namespace build2
}
static names_view
- process_path_reverse (const value& v, names& s)
+ process_path_reverse (const value& v, names& s, bool)
{
const auto& x (v.as<process_path> ());
+ // Note that strictly speaking process_path doesn't have empty
+ // representation (see convert() above). Thus we always return reduced
+ // representation.
+ //
if (!x.empty ())
{
s.reserve (x.effect.empty () ? 1 : 2);
@@ -1127,6 +1168,7 @@ namespace build2
type_name,
sizeof (process_path),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<process_path>,
&process_path_copy_ctor<process_path>,
@@ -1275,10 +1317,13 @@ namespace build2
}
static names_view
- process_path_ex_reverse (const value& v, names& s)
+ process_path_ex_reverse (const value& v, names& s, bool)
{
const auto& x (v.as<process_path_ex> ());
+ // Note that process_path_ex only has reduced empty representation (see
+ // convert() above).
+ //
if (!x.empty ())
{
s.reserve ((x.effect.empty () ? 1 : 2) +
@@ -1322,6 +1367,7 @@ namespace build2
sizeof (process_path_ex),
&value_traits< // Base (assuming direct cast works
process_path>::value_type, // for both).
+ false, // Not container.
nullptr, // No element.
&default_dtor<process_path_ex>,
&process_path_ex_copy_ctor,
@@ -1363,6 +1409,7 @@ namespace build2
type_name,
sizeof (target_triplet),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<target_triplet>,
&default_copy_ctor<target_triplet>,
@@ -1407,6 +1454,7 @@ namespace build2
type_name,
sizeof (project_name),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<project_name>,
&default_copy_ctor<project_name>,
@@ -1420,6 +1468,139 @@ namespace build2
&default_empty<project_name>
};
+ // cmdline
+ //
+ cmdline value_traits<cmdline>::
+ convert (names&& ns)
+ {
+ return cmdline (make_move_iterator (ns.begin ()),
+ make_move_iterator (ns.end ()));
+ }
+
+ void value_traits<cmdline>::
+ assign (value& v, cmdline&& x)
+ {
+ if (v)
+ v.as<cmdline> () = move (x);
+ else
+ new (&v.data_) cmdline (move (x));
+ }
+
+ void value_traits<cmdline>::
+ append (value& v, cmdline&& x)
+ {
+ if (v)
+ {
+ cmdline& p (v.as<cmdline> ());
+
+ if (p.empty ())
+ p.swap (x);
+ else
+ p.insert (p.end (),
+ make_move_iterator (x.begin ()),
+ make_move_iterator (x.end ()));
+ }
+ else
+ new (&v.data_) cmdline (move (x));
+ }
+
+ void value_traits<cmdline>::
+ prepend (value& v, cmdline&& x)
+ {
+ if (v)
+ {
+ cmdline& p (v.as<cmdline> ());
+
+ if (!p.empty ())
+ x.insert (x.end (),
+ make_move_iterator (p.begin ()),
+ make_move_iterator (p.end ()));
+
+ p.swap (x);
+ }
+ else
+ new (&v.data_) cmdline (move (x));
+ }
+
+ void
+ cmdline_assign (value& v, names&& ns, const variable*)
+ {
+ if (!v)
+ {
+ new (&v.data_) cmdline ();
+ v.null = false;
+ }
+
+ v.as<cmdline> ().assign (make_move_iterator (ns.begin ()),
+ make_move_iterator (ns.end ()));
+ }
+
+ void
+ cmdline_append (value& v, names&& ns, const variable*)
+ {
+ if (!v)
+ {
+ new (&v.data_) cmdline ();
+ v.null = false;
+ }
+
+ auto& x (v.as<cmdline> ());
+ x.insert (x.end (),
+ make_move_iterator (ns.begin ()),
+ make_move_iterator (ns.end ()));
+ }
+
+ void
+ cmdline_prepend (value& v, names&& ns, const variable*)
+ {
+ if (!v)
+ {
+ new (&v.data_) cmdline ();
+ v.null = false;
+ }
+
+ auto& x (v.as<cmdline> ());
+ x.insert (x.begin (),
+ make_move_iterator (ns.begin ()),
+ make_move_iterator (ns.end ()));
+ }
+
+ static names_view
+ cmdline_reverse (const value& v, names&, bool)
+ {
+ const auto& x (v.as<cmdline> ());
+ return names_view (x.data (), x.size ());
+ }
+
+ static int
+ cmdline_compare (const value& l, const value& r)
+ {
+ return vector_compare<name> (l, r);
+ }
+
+ const cmdline value_traits<cmdline>::empty_instance;
+
+ const char* const value_traits<cmdline>::type_name = "cmdline";
+
+ const value_type value_traits<cmdline>::value_type
+ {
+ type_name,
+ sizeof (cmdline),
+ nullptr, // No base.
+ true, // Container.
+ &value_traits<string>::value_type, // Element type.
+ &default_dtor<cmdline>,
+ &default_copy_ctor<cmdline>,
+ &default_copy_assign<cmdline>,
+ &cmdline_assign,
+ &cmdline_append,
+ &cmdline_prepend,
+ &cmdline_reverse,
+ nullptr, // No cast (cast data_ directly).
+ &cmdline_compare,
+ &default_empty<cmdline>
+ };
+
// variable_pool
//
void variable_pool::
@@ -1428,6 +1609,17 @@ namespace build2
const variable_visibility* v,
const bool* o) const
{
+ assert (var.owner == this);
+
+ if (outer_ != nullptr)
+ {
+ // Project-private variable. Assert visibility/overridability, the same
+ // as in insert().
+ //
+ assert ((o == nullptr || !*o) &&
+ (v == nullptr || *v >= variable_visibility::project));
+ }
+
// Check overridability (all overrides, if any, should already have
// been entered; see context ctor for details).
//
@@ -1509,7 +1701,7 @@ namespace build2
}
static inline void
- merge_pattern (const variable_pool::pattern& p,
+ merge_pattern (const variable_patterns::pattern& p,
const build2::value_type*& t,
const variable_visibility*& v,
const bool*& o)
@@ -1560,20 +1752,68 @@ namespace build2
const bool* o,
bool pat)
{
- assert (!global_ || global_->phase == run_phase::load);
+ if (outer_ != nullptr)
+ {
+ // Project-private pool.
+ //
+ if (n.find ('.') != string::npos) // Qualified.
+ return outer_->insert (move (n), t, v, o, pat);
+
+ // Unqualified.
+ //
+ // The pool chaining semantics for insertion: first check the outer pool
+ // then, if not found, insert in own pool.
+ //
+ if (const variable* var = outer_->find (n))
+ {
+ // Verify type/visibility/overridability.
+ //
+ // Should we assert or fail? Currently the buildfile parser goes
+ // through update() to set these so let's do assert for now. We also
+ // require equality (these are a handful of special variables).
+ //
+ assert ((t == nullptr || t == var->type) &&
+ (v == nullptr || *v == var->visibility) &&
+ (o == nullptr || *o || var->overrides == nullptr));
+
+ return pair<variable&, bool> (const_cast<variable&> (*var), false);
+ }
+
+ // Project-private variable. Assert visibility/overridability and fall
+ // through. Again, we expect the buildfile parser to verify and diagnose
+ // these.
+ //
+ // Note: similar code in update().
+ //
+ assert ((o == nullptr || !*o) &&
+ (v == nullptr || *v >= variable_visibility::project));
+ }
+ else if (shared_)
+ {
+ // Public pool.
+ //
+ // Make sure all the unqualified variables are pre-entered during
+ // initialization.
+ //
+ assert (shared_->load_generation == 0 || n.find ('.') != string::npos);
+ }
+
+ assert (!shared_ || shared_->phase == run_phase::load);
// Apply pattern.
//
+ using pattern = variable_patterns::pattern;
+
const pattern* pa (nullptr);
auto pt (t); auto pv (v); auto po (o);
- if (pat)
+ if (pat && patterns_ != nullptr)
{
if (n.find ('.') != string::npos)
{
// Reverse means from the "largest" (most specific).
//
- for (const pattern& p: reverse_iterate (patterns_))
+ for (const pattern& p: reverse_iterate (patterns_->patterns_))
{
if (match_pattern (n, p.prefix, p.suffix, p.multi))
{
@@ -1590,6 +1830,7 @@ namespace build2
variable {
move (n),
nullptr,
+ nullptr,
pt,
nullptr,
pv != nullptr ? *pv : variable_visibility::project}));
@@ -1597,7 +1838,10 @@ namespace build2
variable& var (r.first->second);
if (r.second)
+ {
+ var.owner = this;
var.aliases = &var;
+ }
else // Note: overridden variable will always exist.
{
// This is tricky: if the pattern does not require a match, then we
@@ -1625,7 +1869,15 @@ namespace build2
const variable& variable_pool::
insert_alias (const variable& var, string n)
{
- assert (var.aliases != nullptr && var.overrides == nullptr);
+ if (outer_ != nullptr)
+ {
+ assert (n.find ('.') != string::npos); // Qualified.
+ return outer_->insert_alias (var, move (n));
+ }
+
+ assert (var.owner == this &&
+ var.aliases != nullptr &&
+ var.overrides == nullptr);
variable& a (insert (move (n),
var.type,
@@ -1646,15 +1898,15 @@ namespace build2
return a;
}
- void variable_pool::
- insert_pattern (const string& p,
- optional<const value_type*> t,
- optional<bool> o,
- optional<variable_visibility> v,
- bool retro,
- bool match)
+ void variable_patterns::
+ insert (const string& p,
+ optional<const value_type*> t,
+ optional<bool> o,
+ optional<variable_visibility> v,
+ bool retro,
+ bool match)
{
- assert (!global_ || global_->phase == run_phase::load);
+ assert (!shared_ || shared_->phase == run_phase::load);
size_t pn (p.size ());
@@ -1688,9 +1940,9 @@ namespace build2
// Apply retrospectively to existing variables.
//
- if (retro)
+ if (retro && pool_ != nullptr)
{
- for (auto& p: map_)
+ for (auto& p: pool_->map_)
{
variable& var (p.second);
@@ -1707,10 +1959,10 @@ namespace build2
}
if (j == e)
- update (var,
- t ? *t : nullptr,
- v ? &*v : nullptr,
- o ? &*o : nullptr); // Not changing the key.
+ pool_->update (var,
+ t ? *t : nullptr,
+ v ? &*v : nullptr,
+ o ? &*o : nullptr); // Not changing the key.
}
}
}
@@ -1718,7 +1970,66 @@ namespace build2
// variable_map
//
- const variable_map empty_variable_map (nullptr /* context */);
+ const variable_map empty_variable_map (variable_map::owner::empty);
+
+ // Need scope/target definition thus not inline.
+ //
+ variable_map::
+ variable_map (const scope& s, bool shared)
+ : shared_ (shared), owner_ (owner::scope), scope_ (&s), ctx (&s.ctx)
+ {
+ }
+
+ variable_map::
+ variable_map (const target& t, bool shared)
+ : shared_ (shared), owner_ (owner::target), target_ (&t), ctx (&t.ctx)
+ {
+ }
+
+ variable_map::
+ variable_map (const prerequisite& p, bool shared)
+ : shared_ (shared),
+ owner_ (owner::prereq), prereq_ (&p),
+ ctx (&p.scope.ctx)
+ {
+ }
+
+ variable_map::
+ variable_map (variable_map&& v, const prerequisite& p, bool shared)
+ : shared_ (shared),
+ owner_ (owner::scope), prereq_ (&p),
+ ctx (&p.scope.ctx),
+ m_ (move (v.m_))
+ {
+ }
+
+ variable_map::
+ variable_map (const variable_map& v, const prerequisite& p, bool shared)
+ : shared_ (shared),
+ owner_ (owner::scope), prereq_ (&p),
+ ctx (&p.scope.ctx),
+ m_ (v.m_)
+ {
+ }
+
+ lookup variable_map::
+ lookup (const string& name) const
+ {
+ lookup_type r;
+
+ const scope* bs (owner_ == owner::scope ? scope_ :
+ owner_ == owner::target ? &target_->base_scope () :
+ owner_ == owner::prereq ? &prereq_->scope :
+ nullptr);
+
+ if (const variable* var = bs->var_pool ().find (name))
+ {
+ auto p (lookup (*var));
+ r = lookup_type (p.first, &p.second, this);
+ }
+
+ return r;
+ }
auto variable_map::
lookup (const variable& var, bool typed, bool aliased) const ->
@@ -1761,24 +2072,43 @@ namespace build2
auto* r (const_cast<value_data*> (p.first));
if (r != nullptr)
+ {
+ r->extra = 0;
r->version++;
+ }
return pair<value_data*, const variable&> (r, p.second);
}
+ value& variable_map::
+ assign (const string& name)
+ {
+ assert (owner_ != owner::context);
+
+ const scope* bs (owner_ == owner::scope ? scope_ :
+ owner_ == owner::target ? &target_->base_scope () :
+ owner_ == owner::prereq ? &prereq_->scope :
+ nullptr);
+
+ return insert (bs->var_pool ()[name]).first;
+ }
+
pair<value&, bool> variable_map::
- insert (const variable& var, bool typed)
+ insert (const variable& var, bool typed, bool reset_extra)
{
- assert (!global_ || ctx->phase == run_phase::load);
+ assert (!shared_ || ctx->phase == run_phase::load);
auto p (m_.emplace (var, value_data (typed ? var.type : nullptr)));
value_data& r (p.first->second);
if (!p.second)
{
+ if (reset_extra)
+ r.extra = 0;
+
// Check if this is the first access after being assigned a type.
//
- // Note: we still need atomic in case this is not a global state.
+ // Note: we still need atomic in case this is not a shared state.
//
if (typed && var.type != nullptr)
typify (r, var);
@@ -1789,21 +2119,47 @@ namespace build2
return pair<value&, bool> (r, p.second);
}
+ auto variable_map::
+ find (const string& name) const -> const_iterator
+ {
+ assert (owner_ != owner::context);
+
+ const scope* bs (owner_ == owner::scope ? scope_ :
+ owner_ == owner::target ? &target_->base_scope () :
+ owner_ == owner::prereq ? &prereq_->scope :
+ nullptr);
+
+
+ const variable* var (bs->var_pool ().find (name));
+ return var != nullptr ? find (*var) : end ();
+ }
+
bool variable_map::
erase (const variable& var)
{
- assert (!global_ || ctx->phase == run_phase::load);
+ assert (!shared_ || ctx->phase == run_phase::load);
return m_.erase (var) != 0;
}
+ variable_map::const_iterator variable_map::
+ erase (const_iterator i)
+ {
+ assert (!shared_ || ctx->phase == run_phase::load);
+
+ return const_iterator (m_.erase (i), *this);
+ }
+
// variable_pattern_map
//
variable_map& variable_pattern_map::
insert (pattern_type type, string&& text)
{
+ // Note that this variable map is special and we use context as its owner
+ // (see variable_map for details).
+ //
auto r (map_.emplace (pattern {type, false, move (text), {}},
- variable_map (ctx, global_)));
+ variable_map (ctx, shared_)));
// Compile the regex.
//
@@ -1857,39 +2213,8 @@ namespace build2
{
if (!oname)
{
- const target_type& tt (*tk.type);
-
- // Note that if the name is not empty, then we always use that, even
- // if the type is dir/fsdir.
- //
- if (tk.name->empty () && (tt.is_a<dir> () || tt.is_a<fsdir> ()))
- {
- oname = tk.dir->leaf ().string ();
- }
- // If we have the extension and the type expects the extension to be
- // always specified explicitly by the user, then add it to the name.
- //
- // Overall, we have the following cases:
- //
- // 1. Extension is fixed: man1{}.
- //
- // 2. Extension is always specified by the user: file{}.
- //
- // 3. Default extension that may be overridden by the user: hxx{}.
- //
- // 4. Extension assigned by the rule but may be overridden by the
- // user: obje{}.
- //
- // By default we only match the extension for (2).
- //
- else if (tk.ext && !tk.ext->empty () &&
- (tt.fixed_extension == &target_extension_none ||
- tt.fixed_extension == &target_extension_must))
- {
- oname = *tk.name + '.' + *tk.ext;
- }
- else
- oname = string (); // Use tk.name as is.
+ oname = string ();
+ tk.effective_name (*oname);
}
return oname->empty () ? *tk.name : *oname;
diff --git a/libbuild2/variable.hxx b/libbuild2/variable.hxx
index 54d573b..2d7f8ba 100644
--- a/libbuild2/variable.hxx
+++ b/libbuild2/variable.hxx
@@ -4,7 +4,8 @@
#ifndef LIBBUILD2_VARIABLE_HXX
#define LIBBUILD2_VARIABLE_HXX
-#include <type_traits> // aligned_storage
+#include <cstddef> // max_align_t
+#include <type_traits> // is_*
#include <unordered_map>
#include <libbutl/prefix-map.hxx>
@@ -16,6 +17,7 @@
#include <libbuild2/context.hxx>
#include <libbuild2/target-type.hxx>
+#include <libbuild2/diagnostics.hxx>
#include <libbuild2/export.hxx>
@@ -47,7 +49,11 @@ namespace build2
template <typename T> const value_type* is_a () const;
- // Element type, if this is a vector.
+ // True if the type is a container.
+ //
+ bool container;
+
+ // Element type, if this is a container and the element type is named.
//
const value_type* element_type;
@@ -74,9 +80,11 @@ namespace build2
void (*const prepend) (value&, names&&, const variable*);
// Reverse the value back to a vector of names. Storage can be used by the
- // implementation if necessary. Cannot be NULL.
+ // implementation if necessary. If reduce is true, then for an empty
+ // simple value return an empty list rather than a list of one empty name.
+ // Note that the value cannot be NULL.
//
- names_view (*const reverse) (const value&, names& storage);
+ names_view (*const reverse) (const value&, names& storage, bool reduce);
// Cast value::data_ storage to value type so that the result can be
// static_cast to const T*. If it is NULL, then cast data_ directly. Note
@@ -106,6 +114,9 @@ namespace build2
scope, // This scope (no outer scopes).
target, // Target and target type/pattern-specific.
prereq // Prerequisite-specific.
+
+ // Note: remember to update the visibility attribute parsing if adding
+ // any new values here.
};
// VC14 reports ambiguity but seems to work if we don't provide any.
@@ -145,13 +156,27 @@ namespace build2
return o << to_string (v);
}
- // variable
+ // A variable.
+ //
+ // A variable can be public, project-private, or script-private, which
+ // corresponds to the variable pool it belongs to (see variable_pool). The
+ // two variables from the same pool are considered the same if they have the
+ // same name. The variable access (public/private) rules are:
+ //
+ // - Qualified variable are by default public while unqualified -- private.
+ //
+ // - Private must have project or lesser visibility and not be overridable.
+ //
+ // - An unqualified public variable can only be pre-entered during the
+ // context construction (to make sure it is not entered as private).
//
- // The two variables are considered the same if they have the same name.
+ // - There is no scope-private variables in our model due to side-loading,
+ // target type/pattern-specific append, etc.
//
// Variables can be aliases of each other in which case they form a circular
// linked list (the aliases pointer for variable without any aliases points
- // to the variable itself).
+ // to the variable itself). This mechanism should only be used for variables
+ // of the same access (normally public).
//
// If the variable is overridden on the command line, then override is the
// linked list of the special override variables. Their names are derived
@@ -198,6 +223,7 @@ namespace build2
struct variable
{
string name;
+ const variable_pool* owner;
const variable* aliases; // Circular linked list.
const value_type* type; // If NULL, then not (yet) typed.
unique_ptr<const variable> overrides;
@@ -276,7 +302,13 @@ namespace build2
// Extra data that is associated with the value that can be used to store
// flags, etc. It is initialized to 0 and copied (but not assigned) from
// one value to another but is otherwise untouched (not even when the
- // value is reset to NULL).
+ // value is reset to NULL) unless it is part of variable_map::value_data,
+ // in which case it is reset to 0 on each modification (version
+ // increment; however, see reset_extra flag in variable_map::insert()).
+ //
+ // (The reset on each modification semantics is used to implement the
+ // default value distinction as currently done in the config module but
+ // later probably will be done for ?= and $origin()).
//
// Note: if deciding to use for something make sure it is not overlapping
// with an existing usage.
@@ -327,9 +359,13 @@ namespace build2
value&
operator= (nullptr_t) {if (!null) reset (); return *this;}
- value (value&&);
+ // Note that we have the noexcept specification even though copy_ctor()
+ // could potentially throw (for example, for std::map).
+ //
+ value (value&&) noexcept;
+
explicit value (const value&);
- value& operator= (value&&);
+ value& operator= (value&&); // Note: can throw for untyped RHS.
value& operator= (const value&);
value& operator= (reference_wrapper<value>);
value& operator= (reference_wrapper<const value>);
@@ -338,8 +374,8 @@ namespace build2
//
public:
// Assign/append/prepend a typed value. For assign, LHS should be either
- // of the same type or untyped. For append, LHS should be either of the
- // same type or untyped and NULL.
+ // of the same type or untyped. For append/prepend, LHS should be either
+ // of the same type or untyped and NULL.
//
template <typename T> value& operator= (T);
template <typename T> value& operator+= (T);
@@ -388,8 +424,8 @@ namespace build2
// specialization below). Types that don't fit will have to be handled
// with an extra dynamic allocation.
//
- static constexpr size_t size_ = sizeof (name_pair);
- std::aligned_storage<size_>::type data_;
+ static constexpr size_t size_ = sizeof (name_pair);
+ alignas (std::max_align_t) unsigned char data_[size_];
// Make sure we have sufficient storage for untyped values.
//
@@ -429,38 +465,37 @@ namespace build2
template <typename T> T& cast (value&);
template <typename T> T&& cast (value&&);
template <typename T> const T& cast (const value&);
- template <typename T> const T& cast (const lookup&);
+ template <typename T> const T& cast (lookup);
// As above but returns NULL if the value is NULL (or not defined, in
// case of lookup).
//
template <typename T> T* cast_null (value&);
template <typename T> const T* cast_null (const value&);
- template <typename T> const T* cast_null (const lookup&);
+ template <typename T> const T* cast_null (lookup);
// As above but returns empty value if the value is NULL (or not defined, in
// case of lookup).
//
template <typename T> const T& cast_empty (const value&);
- template <typename T> const T& cast_empty (const lookup&);
+ template <typename T> const T& cast_empty (lookup);
// As above but returns the specified default if the value is NULL (or not
// defined, in case of lookup). Note that the return is by value, not by
// reference.
//
template <typename T> T cast_default (const value&, const T&);
- template <typename T> T cast_default (const lookup&, const T&);
+ template <typename T> T cast_default (lookup, const T&);
// As above but returns false/true if the value is NULL (or not defined,
// in case of lookup). Note that the template argument is only for
// documentation and should be bool (or semantically compatible).
//
template <typename T> T cast_false (const value&);
- template <typename T> T cast_false (const lookup&);
+ template <typename T> T cast_false (lookup);
template <typename T> T cast_true (const value&);
- template <typename T> T cast_true (const lookup&);
-
+ template <typename T> T cast_true (lookup);
// Assign value type to the value. The variable is optional and is only used
// for diagnostics.
@@ -473,20 +508,22 @@ namespace build2
typify_atomic (context&, value&, const value_type&, const variable*);
// Remove value type from the value reversing it to names. This is similar
- // to reverse() below except that it modifies the value itself.
+ // to reverse() below except that it modifies the value itself. Note that
+ // the reduce semantics applies to empty but not null.
//
- LIBBUILD2_SYMEXPORT void untypify (value&);
+ LIBBUILD2_SYMEXPORT void untypify (value&, bool reduce);
// Reverse the value back to names. The value should not be NULL and storage
- // should be empty.
+ // should be empty. If reduce is true, then for an empty simple value return
+ // an empty list rather than a list of one empty name.
//
vector_view<const name>
- reverse (const value&, names& storage);
+ reverse (const value&, names& storage, bool reduce);
vector_view<name>
- reverse (value&, names& storage);
+ reverse (value&, names& storage, bool reduce);
- // Variable lookup result, AKA, binding of a name to a value.
+ // Variable lookup result, AKA, binding of a variable to a value.
//
// A variable can be undefined, NULL, or contain a (potentially empty)
// value.
@@ -907,7 +944,7 @@ namespace build2
// pair of two empties).
//
// @@ Maybe we should redo this with optional<> to signify which half can
- // be missing?
+ // be missing? See also dump_value(json).
//
template <>
struct LIBBUILD2_SYMEXPORT value_traits<name_pair>
@@ -1141,12 +1178,43 @@ namespace build2
static const map_value_type<K, V> value_type;
};
+ // Canned command line to be re-lexed (used in {Build,Test}scripts).
+ //
+ // Note that because the executable can be specific as a target or as
+ // process_path_ex, this is a list of names rather than a list of strings.
+ // Note also that unlike vector<name> this type allows name pairs.
+ //
+ struct cmdline: vector<name>
+ {
+ using vector<name>::vector;
+
+ cmdline () {} // For Clang.
+ };
+
+ template <>
+ struct LIBBUILD2_SYMEXPORT value_traits<cmdline>
+ {
+ static_assert (sizeof (cmdline) <= value::size_, "insufficient space");
+
+ static cmdline convert (names&&);
+ static void assign (value&, cmdline&&);
+ static void append (value&, cmdline&&);
+ static void prepend (value&, cmdline&&);
+ static bool empty (const cmdline& x) {return x.empty ();}
+
+ static const cmdline empty_instance;
+ static const char* const type_name;
+ static const build2::value_type value_type;
+ };
+
// Explicitly pre-instantiate and export value_traits templates for
// vector/map value types used in the build2 project. Note that this is not
// merely an optimization since not doing so we may end up with multiple
// value type objects for the same traits type (and we use their addressed
// as identity; see cast(const value&) for an example).
//
+ // NOTE: REMEMBER TO UPDATE dump_value(json) IF CHANGING ANYTHING HERE!
+ //
extern template struct LIBBUILD2_DECEXPORT value_traits<strings>;
extern template struct LIBBUILD2_DECEXPORT value_traits<vector<name>>;
extern template struct LIBBUILD2_DECEXPORT value_traits<paths>;
@@ -1196,9 +1264,12 @@ namespace build2
// Variable pool.
//
- // The global (as in, context-wide) version is protected by the phase mutex.
+ // The shared versions (as in, context or project-wide) are protected by the
+ // phase mutex and thus can only be modified during the load phase.
//
- class variable_pool
+ class variable_patterns;
+
+ class LIBBUILD2_SYMEXPORT variable_pool
{
public:
// Find existing (assert exists).
@@ -1218,7 +1289,7 @@ namespace build2
//
// Note also that a pattern and later insertions may restrict (but not
// relax) visibility and overridability.
-
+ //
const variable&
insert (string name)
{
@@ -1276,6 +1347,12 @@ namespace build2
}
const variable&
+ insert (string name, const value_type* type)
+ {
+ return insert (move (name), type, nullptr, nullptr).first;
+ }
+
+ const variable&
insert (string name,
const value_type* type,
bool overridable,
@@ -1296,70 +1373,74 @@ namespace build2
// Overridable aliased variables are most likely a bad idea: without a
// significant effort, the overrides will only be applied along the alias
// names (i.e., there would be no cross-alias overriding). So for now we
- // don't allow this (use the common variable mechanism instead).
+ // don't allow this (manually handle multiple names by merging their
+ // values instead).
+ //
+ // Note: currently only public variables can be aliased.
//
- LIBBUILD2_SYMEXPORT const variable&
+ const variable&
insert_alias (const variable& var, string name);
- // Insert a variable pattern. Any variable that matches this pattern will
- // have the specified type, visibility, and overridability. If match is
- // true, then individual insertions of the matching variable must match
- // the specified type/visibility/overridability. Otherwise, individual
- // insertions can provide alternative values and the pattern values are a
- // fallback (if you specify false you better be very clear about what you
- // are trying to achieve).
+ // Iteration.
//
- // The pattern must be in the form [<prefix>.](*|**)[.<suffix>] where '*'
- // matches single component stems (i.e., 'foo' but not 'foo.bar') and '**'
- // matches single and multi-component stems. Note that only multi-
- // component variables are considered for pattern matching (so just '*'
- // won't match anything).
+ public:
+ using key = butl::map_key<string>;
+ using map = std::unordered_map<key, variable>;
+
+ using const_iterator = butl::map_iterator_adapter<map::const_iterator>;
+
+ const_iterator begin () const {return const_iterator (map_.begin ());}
+ const_iterator end () const {return const_iterator (map_.end ());}
+
+ // Construction.
//
- // The patterns are matched in the more-specific-first order where the
- // pattern is considered more specific if it has a greater sum of its
- // prefix and suffix lengths. If the prefix and suffix are equal, then the
- // '*' pattern is considered more specific than '**'. If neither is more
- // specific, then they are matched in the reverse order of insertion.
+ // There are three specific variable pool instances:
//
- // If retro is true then a newly inserted pattern is also applied
- // retrospectively to all the existing variables that match but only
- // if no more specific pattern already exists (which is then assumed
- // to have been applied). So if you use this functionality, watch out
- // for the insertion order (you probably want more specific first).
+ // shared outer
+ // ----------------
+ // true null -- public variable pool in context
+ // true not null -- project-private pool in scope::root_extra
+ // with outer pointing to context::var_pool
+ // false not null -- temporary scope-private pool in temp_scope
+ // with outer pointing to context::var_pool
+ // false null -- script-private pool in script::environment
//
- public:
- LIBBUILD2_SYMEXPORT void
- insert_pattern (const string& pattern,
- optional<const value_type*> type,
- optional<bool> overridable,
- optional<variable_visibility>,
- bool retro = false,
- bool match = true);
+ // Notice that the script-private pool doesn't rely on outer and does
+ // its own pool chaining. So currently we assume that if outer is not
+ // NULL, then this is a project-private pool.
+ //
+ private:
+ friend class context;
+ friend class temp_scope;
- template <typename T>
- void
- insert_pattern (const string& p,
- optional<bool> overridable,
- optional<variable_visibility> v,
- bool retro = false,
- bool match = true)
- {
- insert_pattern (
- p, &value_traits<T>::value_type, overridable, v, retro, match);
- }
+ // Shared pool (public or project-private). The shared argument is
+ // flag/context.
+ //
+ variable_pool (context* shared,
+ variable_pool* outer,
+ const variable_patterns* patterns)
+ : shared_ (shared), outer_ (outer), patterns_ (patterns) {}
public:
- void
- clear () {map_.clear ();}
+ // Script-private pool.
+ //
+ explicit
+ variable_pool (const variable_patterns* patterns = nullptr)
+ : shared_ (nullptr), outer_ (nullptr), patterns_ (patterns) {}
+
+ variable_pool (variable_pool&&) = delete;
+ variable_pool& operator= (variable_pool&&) = delete;
- variable_pool (): variable_pool (nullptr) {}
+ variable_pool (const variable_pool&) = delete;
+ variable_pool& operator= (const variable_pool&) = delete;
- // RW access (only for the global pool).
+ public:
+ // RW access (only for shared pools plus the temp_scope special case).
//
variable_pool&
rw () const
{
- assert (global_->phase == run_phase::load);
+ assert (shared_ == nullptr || shared_->phase == run_phase::load);
return const_cast<variable_pool&> (*this);
}
@@ -1375,14 +1456,16 @@ namespace build2
// Note that in insert() NULL overridable is interpreted as false unless
// overridden by a pattern while in update() NULL overridable is ignored.
//
- LIBBUILD2_SYMEXPORT pair<variable&, bool>
+ pair<variable&, bool>
insert (string name,
const value_type*,
const variable_visibility*,
const bool* overridable,
bool pattern = true);
- LIBBUILD2_SYMEXPORT void
+ // Note: the variable must belong to this pool.
+ //
+ void
update (variable&,
const value_type*,
const variable_visibility*,
@@ -1391,9 +1474,6 @@ namespace build2
// Variable map.
//
private:
- using key = butl::map_key<string>;
- using map = std::unordered_map<key, variable>;
-
pair<map::iterator, bool>
insert (variable&& var)
{
@@ -1402,19 +1482,127 @@ namespace build2
// gets hairy very quickly (there is no std::hash for C-strings). So
// let's rely on small object-optimized std::string for now.
//
- string n (var.name);
+ string n (var.name); // @@ PERF (maybe keep reuse buffer at least?)
auto r (map_.insert (map::value_type (&n, move (var))));
if (r.second)
+ {
+#if 0
+ if (shared_ && outer_ == nullptr) // Global pool in context.
+ {
+ size_t n (map_.bucket_count ());
+ if (n > buckets_)
+ {
+ text << "variable_pool buckets: " << buckets_ << " -> " << n
+ << " (" << map_.size () << ")";
+ buckets_ = n;
+ }
+ }
+#endif
r.first->first.p = &r.first->second.name;
+ }
return r;
}
+ private:
+ friend class variable_patterns;
+
+ context* shared_;
+ variable_pool* outer_;
+ const variable_patterns* patterns_;
map map_;
- // Patterns.
+#if 0
+ size_t buckets_ = 0;
+#endif
+ };
+
+ // Variable patterns.
+ //
+ // This mechanism is used to assign variable types/visibility/overridability
+ // based on the variable name pattern. This mechanism can only be used for
+ // qualified variables and is thus only provided for the public variable
+ // pool.
+ //
+ // Similar to variable_pool, the shared versions are protected by the phase
+ // mutex and thus can only be modified during the load phase.
+ //
+ class LIBBUILD2_SYMEXPORT variable_patterns
+ {
+ public:
+ // Insert a variable pattern. Any variable that matches this pattern will
+ // have the specified type, visibility, and overridability. If match is
+ // true, then individual insertions of the matching variable must match
+ // the specified type/visibility/overridability. Otherwise, individual
+ // insertions can provide alternative values and the pattern values are a
+ // fallback (if you specify false you better be very clear about what you
+ // are trying to achieve).
+ //
+ // The pattern must be in the form [<prefix>.](*|**)[.<suffix>] where '*'
+ // matches single component stems (i.e., 'foo' but not 'foo.bar') and '**'
+ // matches single and multi-component stems. Note that only multi-
+ // component variables are considered for pattern matching (so just '*'
+ // won't match anything).
+ //
+ // The patterns are matched in the more-specific-first order where the
+ // pattern is considered more specific if it has a greater sum of its
+ // prefix and suffix lengths. If the prefix and suffix are equal, then the
+ // '*' pattern is considered more specific than '**'. If neither is more
+ // specific, then they are matched in the reverse order of insertion.
+ //
+ // If retro is true then a newly inserted pattern is also applied
+ // retrospectively to all the existing variables that match but only
+ // if no more specific pattern already exists (which is then assumed
+ // to have been applied). So if you use this functionality, watch out
+ // for the insertion order (you probably want more specific first).
+ //
+ void
+ insert (const string& pattern,
+ optional<const value_type*> type,
+ optional<bool> overridable,
+ optional<variable_visibility>,
+ bool retro = false,
+ bool match = true);
+
+ template <typename T>
+ void
+ insert (const string& p,
+ optional<bool> overridable,
+ optional<variable_visibility> v,
+ bool retro = false,
+ bool match = true)
+ {
+ insert (p, &value_traits<T>::value_type, overridable, v, retro, match);
+ }
+
+ public:
+ // The shared argument is flag/context. The pool argument is for
+ // retrospective pattern application.
+ //
+ explicit
+ variable_patterns (context* shared, variable_pool* pool)
+ : shared_ (shared), pool_ (pool) {}
+
+ variable_patterns (variable_patterns&&) = delete;
+ variable_patterns& operator= (variable_patterns&&) = delete;
+
+ variable_patterns (const variable_patterns&) = delete;
+ variable_patterns& operator= (const variable_patterns&) = delete;
+
+ public:
+ // RW access (only for shared pools).
//
+ variable_patterns&
+ rw () const
+ {
+ assert (shared_->phase == run_phase::load);
+ return const_cast<variable_patterns&> (*this);
+ }
+
+ variable_patterns&
+ rw (scope&) const {return const_cast<variable_patterns&> (*this);}
+
public:
struct pattern
{
@@ -1442,17 +1630,11 @@ namespace build2
};
private:
- multiset<pattern> patterns_;
+ friend class variable_pool;
- // Global pool flag/context.
- //
- private:
- friend class context;
-
- explicit
- variable_pool (context* global): global_ (global) {}
-
- context* global_;
+ context* shared_;
+ variable_pool* pool_;
+ multiset<pattern> patterns_;
};
}
@@ -1493,7 +1675,10 @@ namespace build2
using value::value;
using value::operator=;
- size_t version = 0; // Incremented on each modification (variable_cache).
+ // Incremented on each modification, at which point we also reset
+ // value::extra to 0.
+ //
+ size_t version = 0;
};
// Note that we guarantee ascending iteration order (e.g., for predictable
@@ -1535,8 +1720,13 @@ namespace build2
lookup_type
operator[] (const variable& var) const
{
- auto p (lookup (var));
- return lookup_type (p.first, &p.second, this);
+ lookup_type r;
+ if (!empty ())
+ {
+ auto p (lookup (var));
+ r = lookup_type (p.first, &p.second, this);
+ }
+ return r;
}
lookup_type
@@ -1549,12 +1739,17 @@ namespace build2
lookup_type
operator[] (const string& name) const
{
- const variable* var (ctx != nullptr
- ? ctx->var_pool.find (name)
- : nullptr);
- return var != nullptr ? operator[] (*var) : lookup_type ();
+ assert (owner_ != owner::context);
+
+ lookup_type r;
+ if (!empty ())
+ r = lookup (name);
+ return r;
}
+ lookup_type
+ lookup (const string& name) const;
+
// If typed is false, leave the value untyped even if the variable is. If
// aliased is false, then don't consider aliases (used by the variable
// override machinery where the aliases chain is repurrposed for something
@@ -1574,6 +1769,18 @@ namespace build2
const_iterator (r.second, *this));
}
+ pair<const_iterator, const_iterator>
+ lookup_namespace (string ns) const
+ {
+ // It's ok to use the temporary here since we compare names and don't
+ // insert anything.
+ //
+ return lookup_namespace (variable {
+ move (ns),
+ nullptr, nullptr, nullptr, nullptr,
+ variable_visibility::project});
+ }
+
// Convert a lookup pointing to a value belonging to this variable map
// to its non-const version. Note that this is only safe on the original
// values (see lookup_original()).
@@ -1583,6 +1790,7 @@ namespace build2
{
assert (l.vars == this);
value& r (const_cast<value&> (*l.value));
+ r.extra = 0;
static_cast<value_data&> (r).version++;
return r;
}
@@ -1599,24 +1807,37 @@ namespace build2
return assign (*var);
}
- // Note that the variable is expected to have already been registered.
+ // Note that the variable is expected to have already been inserted.
//
value&
- assign (const string& name) {return insert (ctx->var_pool[name]).first;}
+ assign (const string& name);
// As above but also return an indication of whether the new value (which
// will be NULL) was actually inserted. Similar to find(), if typed is
- // false, leave the value untyped even if the variable is.
+ // false, leave the value untyped even if the variable is. If reset_extra
+ // is false, then don't reset the existing value's value::extra.
//
pair<value&, bool>
- insert (const variable&, bool typed = true);
+ insert (const variable&, bool typed = true, bool reset_extra = true);
- // Note: does not deal with aliases.
+ // Note: the following functions do not deal with aliases.
//
+ const_iterator
+ find (const variable& var) const
+ {
+ return const_iterator (m_.find (var), *this);
+ }
+
+ const_iterator
+ find (const string& name) const;
+
bool
erase (const variable&);
const_iterator
+ erase (const_iterator);
+
+ const_iterator
begin () const {return const_iterator (m_.begin (), *this);}
const_iterator
@@ -1629,21 +1850,58 @@ namespace build2
size () const {return m_.size ();}
public:
- // Global should be true if this map is part of the global build state
- // (e.g., scopes, etc).
+ // Shared should be true if this map is part of the shared build state
+ // (e.g., scopes) and thus should only be modified during the load phase.
//
explicit
- variable_map (context& c, bool global = false)
- : ctx (&c), global_ (global) {}
+ variable_map (const scope& owner, bool shared = false);
+
+ explicit
+ variable_map (const target& owner, bool shared = false);
+
+ explicit
+ variable_map (const prerequisite& owner, bool shared = false);
+
+ variable_map (variable_map&&, const prerequisite&, bool shared = false);
+ variable_map (const variable_map&, const prerequisite&, bool shared = false);
+
+ variable_map&
+ operator= (variable_map&& v) noexcept {m_ = move (v.m_); return *this;}
+
+ variable_map&
+ operator= (const variable_map& v) {m_ = v.m_; return *this;}
+
+ // The context owner is for special "managed" variable maps. Note that
+ // such maps cannot lookup/insert variable names specified as strings.
+ //
+ variable_map (context& c, bool shared)
+ : shared_ (shared), owner_ (owner::context), ctx (&c) {}
+
+ // Note: std::map's move constructor can throw.
+ //
+ variable_map (variable_map&& v)
+ : shared_ (v.shared_), owner_ (v.owner_), ctx (v.ctx), m_ (move (v.m_))
+ {
+ assert (owner_ == owner::context);
+ }
+
+ variable_map (const variable_map& v)
+ : shared_ (v.shared_), owner_ (v.owner_), ctx (v.ctx), m_ (v.m_)
+ {
+ assert (v.owner_ == owner::context);
+ }
void
clear () {m_.clear ();}
- // Implementation details (only used for empty_variable_map).
+ // Implementation details.
//
public:
+ enum class owner {empty, context, scope, target, prereq};
+
explicit
- variable_map (context* c): ctx (c) {}
+ variable_map (owner o, context* c = nullptr, bool shared = false)
+ : shared_ (shared), owner_ (o), ctx (c) {}
private:
friend class variable_type_map;
@@ -1652,9 +1910,18 @@ namespace build2
typify (const value_data&, const variable&) const;
private:
+ friend class target_set;
+
+ bool shared_;
+ owner owner_;
+ union
+ {
+ const scope* scope_;
+ const target* target_;
+ const prerequisite* prereq_;
+ };
context* ctx;
map_type m_;
- bool global_;
};
LIBBUILD2_SYMEXPORT extern const variable_map empty_variable_map;
@@ -1787,8 +2054,8 @@ namespace build2
using const_iterator = map_type::const_iterator;
using const_reverse_iterator = map_type::const_reverse_iterator;
- variable_pattern_map (context& c, bool global)
- : ctx (c), global_ (global) {}
+ variable_pattern_map (context& c, bool shared)
+ : ctx (c), shared_ (shared) {}
// Note that here we assume the "outer" pattern format (delimiters, flags,
// etc) is valid.
@@ -1804,7 +2071,7 @@ namespace build2
operator[] (string text)
{
return map_.emplace (pattern {pattern_type::path, false, move (text), {}},
- variable_map (ctx, global_)).first->second;
+ variable_map (ctx, shared_)).first->second;
}
const_iterator begin () const {return map_.begin ();}
@@ -1816,7 +2083,7 @@ namespace build2
private:
context& ctx;
map_type map_;
- bool global_;
+ bool shared_;
};
class LIBBUILD2_SYMEXPORT variable_type_map
@@ -1826,13 +2093,13 @@ namespace build2
variable_pattern_map>;
using const_iterator = map_type::const_iterator;
- variable_type_map (context& c, bool global): ctx (c), global_ (global) {}
+ variable_type_map (context& c, bool shared): ctx (c), shared_ (shared) {}
variable_pattern_map&
operator[] (const target_type& t)
{
return map_.emplace (
- t, variable_pattern_map (ctx, global_)).first->second;
+ t, variable_pattern_map (ctx, shared_)).first->second;
}
const_iterator begin () const {return map_.begin ();}
@@ -1862,7 +2129,7 @@ namespace build2
private:
context& ctx;
map_type map_;
- bool global_;
+ bool shared_;
};
}
diff --git a/libbuild2/variable.ixx b/libbuild2/variable.ixx
index a84c012..51c35fd 100644
--- a/libbuild2/variable.ixx
+++ b/libbuild2/variable.ixx
@@ -224,7 +224,7 @@ namespace build2
template <typename T>
inline const T&
- cast (const lookup& l)
+ cast (lookup l)
{
return cast<T> (*l);
}
@@ -245,7 +245,7 @@ namespace build2
template <typename T>
inline const T*
- cast_null (const lookup& l)
+ cast_null (lookup l)
{
return l ? &cast<T> (*l) : nullptr;
}
@@ -259,7 +259,7 @@ namespace build2
template <typename T>
inline const T&
- cast_empty (const lookup& l)
+ cast_empty (lookup l)
{
return l ? cast<T> (l) : value_traits<T>::empty_instance;
}
@@ -273,7 +273,7 @@ namespace build2
template <typename T>
inline T
- cast_default (const lookup& l, const T& d)
+ cast_default (lookup l, const T& d)
{
return l ? cast<T> (l) : d;
}
@@ -287,7 +287,7 @@ namespace build2
template <typename T>
inline T
- cast_false (const lookup& l)
+ cast_false (lookup l)
{
return l && cast<T> (l);
}
@@ -301,7 +301,7 @@ namespace build2
template <typename T>
inline T
- cast_true (const lookup& l)
+ cast_true (lookup l)
{
return !l || cast<T> (l);
}
@@ -326,18 +326,21 @@ namespace build2
}
inline vector_view<const name>
- reverse (const value& v, names& storage)
+ reverse (const value& v, names& storage, bool reduce)
{
assert (v &&
storage.empty () &&
(v.type == nullptr || v.type->reverse != nullptr));
- return v.type == nullptr ? v.as<names> () : v.type->reverse (v, storage);
+
+ return v.type == nullptr
+ ? v.as<names> ()
+ : v.type->reverse (v, storage, reduce);
}
inline vector_view<name>
- reverse (value& v, names& storage)
+ reverse (value& v, names& storage, bool reduce)
{
- names_view cv (reverse (static_cast<const value&> (v), storage));
+ names_view cv (reverse (static_cast<const value&> (v), storage, reduce));
return vector_view<name> (const_cast<name*> (cv.data ()), cv.size ());
}
@@ -905,6 +908,26 @@ namespace build2
// variable_pool
//
+ inline const variable* variable_pool::
+ find (const string& n) const
+ {
+ // The pool chaining semantics for lookup: first check own pool then, if
+ // not found, check the outer pool.
+ //
+ auto i (map_.find (&n));
+ if (i != map_.end ())
+ return &i->second;
+
+ if (outer_ != nullptr)
+ {
+ i = outer_->map_.find (&n);
+ if (i != outer_->map_.end ())
+ return &i->second;
+ }
+
+ return nullptr;
+ }
+
inline const variable& variable_pool::
operator[] (const string& n) const
{
@@ -913,13 +936,6 @@ namespace build2
return *r;
}
- inline const variable* variable_pool::
- find (const string& n) const
- {
- auto i (map_.find (&n));
- return i != map_.end () ? &i->second : nullptr;
- }
-
// variable_map
//
inline void variable_map::
diff --git a/libbuild2/variable.txx b/libbuild2/variable.txx
index b1c4112..2c1265a 100644
--- a/libbuild2/variable.txx
+++ b/libbuild2/variable.txx
@@ -229,13 +229,13 @@ namespace build2
template <typename T>
names_view
- simple_reverse (const value& v, names& s)
+ simple_reverse (const value& v, names& s, bool reduce)
{
const T& x (v.as<T> ());
- // Represent an empty simple value as empty name sequence rather than
- // a single empty name. This way, for example, during serialization we
- // end up with a much saner looking:
+ // Unless requested otherwise, represent an empty simple value as empty
+ // name sequence rather than a single empty name. This way, for example,
+ // during serialization we end up with a much saner looking:
//
// config.import.foo =
//
@@ -245,6 +245,8 @@ namespace build2
//
if (!value_traits<T>::empty (x))
s.emplace_back (value_traits<T>::reverse (x));
+ else if (!reduce)
+ s.push_back (name ());
return s;
}
@@ -492,7 +494,7 @@ namespace build2
if (n.pair != '@')
throw invalid_argument (
- string ("invalid pair character: '") + n.pair + "'");
+ string ("invalid pair character: '") + n.pair + '\'');
}
v.push_back (value_traits<T>::convert (move (n), r));
@@ -590,7 +592,7 @@ namespace build2
template <typename T>
static names_view
- vector_reverse (const value& v, names& s)
+ vector_reverse (const value& v, names& s, bool)
{
auto& vv (v.as<vector<T>> ());
s.reserve (vv.size ());
@@ -651,7 +653,8 @@ namespace build2
nullptr, // Patched above.
sizeof (vector<T>),
nullptr, // No base.
- &value_traits<T>::value_type,
+ true, // Container.
+ &value_traits<T>::value_type, // Element type.
&default_dtor<vector<T>>,
&default_copy_ctor<vector<T>>,
&default_copy_assign<vector<T>>,
@@ -702,7 +705,7 @@ namespace build2
template <typename K, typename V>
static names_view
- pair_vector_reverse (const value& v, names& s)
+ pair_vector_reverse (const value& v, names& s, bool)
{
auto& vv (v.as<vector<pair<K, V>>> ());
s.reserve (2 * vv.size ());
@@ -803,7 +806,8 @@ namespace build2
nullptr, // Patched above.
sizeof (vector<pair<K, V>>),
nullptr, // No base.
- nullptr, // No element.
+ true, // Container.
+ nullptr, // No element (not named).
&default_dtor<vector<pair<K, V>>>,
&default_copy_ctor<vector<pair<K, V>>>,
&default_copy_assign<vector<pair<K, V>>>,
@@ -882,7 +886,7 @@ namespace build2
template <typename K, typename V>
static names_view
- map_reverse (const value& v, names& s)
+ map_reverse (const value& v, names& s, bool)
{
auto& vm (v.as<map<K, V>> ());
s.reserve (2 * vm.size ());
@@ -983,7 +987,8 @@ namespace build2
nullptr, // Patched above.
sizeof (map<K, V>),
nullptr, // No base.
- nullptr, // No element.
+ true, // Container.
+ nullptr, // No element (not named).
&default_dtor<map<K, V>>,
&default_copy_ctor<map<K, V>>,
&default_copy_assign<map<K, V>>,
@@ -1014,8 +1019,8 @@ namespace build2
: 0);
shared_mutex& m (
- ctx.mutexes.variable_cache[
- hash<variable_cache*> () (this) % ctx.mutexes.variable_cache_size]);
+ ctx.mutexes->variable_cache[
+ hash<variable_cache*> () (this) % ctx.mutexes->variable_cache_size]);
slock sl (m);
ulock ul (m, defer_lock);
@@ -1070,6 +1075,7 @@ namespace build2
e.stem_version = sver;
+ e.value.extra = 0; // For consistency (we don't really use it).
e.value.version++; // Value changed.
}
else
diff --git a/libbuild2/version/init.cxx b/libbuild2/version/init.cxx
index 05d5fe0..b3657bc 100644
--- a/libbuild2/version/init.cxx
+++ b/libbuild2/version/init.cxx
@@ -3,6 +3,8 @@
#include <libbuild2/version/init.hxx>
+#include <cstring> // strchr()
+
#include <libbutl/manifest-parser.hxx>
#include <libbuild2/scope.hxx>
@@ -143,61 +145,98 @@ namespace build2
}
else if (nv.name == "depends")
{
- // According to the package manifest spec, the format of the
- // 'depends' value is as follows:
- //
- // depends: [?][*] <alternatives> [; <comment>]
- //
- // <alternatives> := <dependency> [ '|' <dependency>]*
- // <dependency> := <name> [<constraint>]
- // <constraint> := <comparison> | <range>
- // <comparison> := ('==' | '>' | '<' | '>=' | '<=') <version>
- // <range> := ('(' | '[') <version> <version> (')' | ']')
- //
- // Note that we don't do exhaustive validation here leaving it
- // to the package manager.
- //
string v (move (nv.value));
- size_t p;
+ // Parse the dependency and add it to the map (see
+ // bpkg::dependency_alternatives class for dependency syntax).
+ //
+ // Note that currently we only consider simple dependencies:
+ // singe package without alternatives, clauses, or newlines.
+ // In the future, if/when we add full support, we will likely
+ // keep this as a fast path.
+ //
+ // Also note that we don't do exhaustive validation here leaving
+ // it to the package manager.
// Get rid of the comment.
//
+ // Note that we can potentially mis-detect the comment
+ // separator, since ';' can be a part of some of the dependency
+ // alternative clauses. If that's the case, we will skip the
+ // dependency later.
+ //
+ size_t p;
if ((p = v.find (';')) != string::npos)
v.resize (p);
- // Get rid of conditional/runtime markers. Note that enither of
- // them is valid in the rest of the value.
+ // Skip the dependency if it is not a simple one.
+ //
+ // Note that we will check for the presence of the reflect
+ // clause later since `=` can also be in the constraint.
+ //
+ if (v.find_first_of ("{?|\n") != string::npos)
+ continue;
+
+ // Find the beginning of the dependency package name, skipping
+ // the build-time marker, if present.
//
- if ((p = v.find_last_of ("?*")) != string::npos)
- v.erase (0, p + 1);
+ bool buildtime (v[0] == '*');
+ size_t b (buildtime ? v.find_first_not_of (" \t", 1) : 0);
- // Parse as |-separated "words".
+ if (b == string::npos)
+ fail (l) << "invalid dependency " << v << ": no package name";
+
+ // Find the end of the dependency package name.
+ //
+ p = v.find_first_of (" \t=<>[(~^", b);
+
+ // Dependency name (without leading/trailing white-spaces).
//
- for (size_t b (0), e (0); next_word (v, b, e, '|'); )
+ string n (v, b, p == string::npos ? p : p - b);
+
+ string vc; // Empty if no constraint is specified
+
+ // Position to the first non-whitespace character after the
+ // dependency name, which, if present, can be a part of the
+ // version constraint or the reflect clause.
+ //
+ if (p != string::npos)
+ p = v.find_first_not_of (" \t", p);
+
+ if (p != string::npos)
+ {
+ // Check if this is definitely not a version constraint and
+ // drop this dependency if that's the case.
+ //
+ if (strchr ("=<>[(~^", v[p]) == nullptr)
+ continue;
+
+ // Ok, we have a constraint, check that there is no reflect
+ // clause after it (the only other valid `=` in a constraint
+ // is in the immediately following character as part of
+ // `==`, `<=`, or `>=`).
+ //
+ if (v.size () > p + 2 && v.find ('=', p + 2) != string::npos)
+ continue;
+
+ vc.assign (v, p, string::npos);
+ trim (vc);
+ }
+
+ // Finally, add the dependency to the map.
+ //
+ try
+ {
+ package_name pn (move (n));
+ string v (pn.variable ());
+
+ ds.emplace (move (v),
+ dependency {move (pn), move (vc), buildtime});
+ }
+ catch (const invalid_argument& e)
{
- string d (v, b, e - b);
- trim (d);
-
- p = d.find_first_of (" \t=<>[(~^");
- string n (d, 0, p);
- string c (p != string::npos ? string (d, p) : string ());
-
- trim (n);
- trim (c);
-
- try
- {
- package_name pn (move (n));
- string v (pn.variable ());
-
- ds.emplace (move (v), dependency {move (pn), move (c)});
- }
- catch (const invalid_argument& e)
- {
- fail (l) << "invalid package name for dependency "
- << d << ": " << e;
- }
+ fail (l) << "invalid dependency package name '" << n << "': "
+ << e;
}
}
}
@@ -246,7 +285,9 @@ namespace build2
{
auto i (ds.find ("build2"));
- if (i != ds.end () && !i->second.constraint.empty ())
+ if (i != ds.end () &&
+ i->second.buildtime &&
+ !i->second.constraint.empty ())
try
{
check_build_version (
@@ -349,7 +390,7 @@ namespace build2
if (cast_false<bool> (rs["install.booted"]))
{
rs.insert_rule<manifest> (
- perform_install_id, "version.manifest", manifest_install_rule_);
+ perform_install_id, "version.install", manifest_install_rule_);
}
return true;
diff --git a/libbuild2/version/module.hxx b/libbuild2/version/module.hxx
index e80870e..8549e03 100644
--- a/libbuild2/version/module.hxx
+++ b/libbuild2/version/module.hxx
@@ -22,6 +22,7 @@ namespace build2
{
package_name name;
string constraint;
+ bool buildtime;
};
using dependencies = map<string, dependency>;
diff --git a/libbuild2/version/rule.cxx b/libbuild2/version/rule.cxx
index 919dfcf..98dc2da 100644
--- a/libbuild2/version/rule.cxx
+++ b/libbuild2/version/rule.cxx
@@ -46,12 +46,31 @@ namespace build2
// in_rule
//
+
+ // Wrap the in::rule's perform_update recipe into a data-carrying recipe.
+ //
+ // To optimize this a bit further (i.e., to avoid the dynamic memory
+ // allocation) we are going to call in::rule::perform_update() directly
+ // (after all it's virtual and thus part of the in_rule's interface).
+ //
+ struct match_data
+ {
+ const module& mod;
+ const in_rule& rule;
+
+ target_state
+ operator() (action a, const target& t)
+ {
+ return rule.perform_update (a, t);
+ }
+ };
+
bool in_rule::
- match (action a, target& xt, const string&) const
+ match (action a, target& xt) const
{
tracer trace ("version::in_rule::match");
- file& t (static_cast<file&> (xt));
+ file& t (xt.as<file> ());
const scope& rs (t.root_scope ());
bool fm (false); // Found manifest.
@@ -74,14 +93,20 @@ namespace build2
if (!fi)
l5 ([&]{trace << "no in file prerequisite for target " << t;});
- bool r (fm && fi);
+ return fm && fi;
+ }
- // If we match, lookup and cache the module for the update operation.
- //
- if (r && a == perform_update_id)
- t.data (rs.find_module<module> (module::name));
+ recipe in_rule::
+ apply (action a, target& t) const
+ {
+ recipe r (rule::apply (a, t));
- return r;
+ // Lookup and cache the module for the update operation.
+ //
+ return a == perform_update_id
+ ? match_data {*t.root_scope ().find_module<module> (module::name),
+ *this}
+ : move (r);
}
string in_rule::
@@ -89,12 +114,16 @@ namespace build2
action a,
const target& t,
const string& n,
+ optional<uint64_t> flags,
+ const substitution_map* smap,
const optional<string>& null) const
{
+ assert (!flags);
+
// Note that this code will be executed during up-to-date check for each
// substitution so let's try not to do anything overly sub-optimal here.
//
- const module& m (*t.data<const module*> ());
+ const module& m (t.data<match_data> (a).mod);
// Split it into the package name and the variable/condition name.
//
@@ -110,7 +139,7 @@ namespace build2
a,
t,
p == string::npos ? n : string (n, p + 1),
- null);
+ nullopt, smap, null);
}
string pn (n, 0, p);
@@ -212,13 +241,13 @@ namespace build2
if (mav->snapshot ())
{
- r += (p ? "(" : "");
+ if (p) r += '(';
r += cmp (vm, " < ", mav->version) + " || (";
r += cmp (vm, " == ", mav->version) + " && ";
- r += cmp (sm, (mao ? " < " : " <= "), mav->snapshot_sn) + ")";
+ r += cmp (sm, (mao ? " < " : " <= "), mav->snapshot_sn) + ')';
- r += (p ? ")" : "");
+ if (p) r += ')';
}
else
r = cmp (vm, (mao ? " < " : " <= "), mav->version);
@@ -232,13 +261,13 @@ namespace build2
if (miv->snapshot ())
{
- r += (p ? "(" : "");
+ if (p) r += '(';
r += cmp (vm, " > ", miv->version) + " || (";
r += cmp (vm, " == ", miv->version) + " && ";
- r += cmp (sm, (mio ? " > " : " >= "), miv->snapshot_sn) + ")";
+ r += cmp (sm, (mio ? " > " : " >= "), miv->snapshot_sn) + ')';
- r += (p ? ")" : "");
+ if (p) r += ')';
}
else
r = cmp (vm, (mio ? " > " : " >= "), miv->version);
@@ -298,7 +327,7 @@ namespace build2
// manifest_install_rule
//
bool manifest_install_rule::
- match (action a, target& t, const string&) const
+ match (action a, target& t) const
{
// We only match project's manifest.
//
@@ -311,7 +340,7 @@ namespace build2
if (s.root_scope () != &s || s.src_path () != t.dir)
return false;
- return file_rule::match (a, t, "");
+ return file_rule::match (a, t);
}
auto_rmfile manifest_install_rule::
diff --git a/libbuild2/version/rule.hxx b/libbuild2/version/rule.hxx
index ddc5e11..0bdc090 100644
--- a/libbuild2/version/rule.hxx
+++ b/libbuild2/version/rule.hxx
@@ -20,16 +20,21 @@ namespace build2
class in_rule: public in::rule
{
public:
- in_rule (): rule ("version.in 2", "version.in") {}
+ in_rule (): rule ("version.in 2", "version") {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
+
+ virtual recipe
+ apply (action, target&) const override;
virtual string
lookup (const location&,
action,
const target&,
const string&,
+ optional<uint64_t>,
+ const substitution_map*,
const optional<string>&) const override;
};
@@ -41,7 +46,7 @@ namespace build2
manifest_install_rule () {}
virtual bool
- match (action, target&, const string&) const override;
+ match (action, target&) const override;
virtual auto_rmfile
install_pre (const file&, const install_dir&) const override;
diff --git a/libbuild2/version/snapshot-git.cxx b/libbuild2/version/snapshot-git.cxx
index 2ae3f5b..ab0224a 100644
--- a/libbuild2/version/snapshot-git.cxx
+++ b/libbuild2/version/snapshot-git.cxx
@@ -21,7 +21,7 @@ namespace build2
static global_cache<snapshot, dir_path> cache;
snapshot
- extract_snapshot_git (dir_path rep_root)
+ extract_snapshot_git (context& ctx, dir_path rep_root)
{
if (const snapshot* r = cache.find (rep_root))
return *r;
@@ -82,7 +82,11 @@ namespace build2
args[args_i + 1] = "--porcelain";
args[args_i + 2] = nullptr;
+ // @@ PERF: redo with custom stream reading code (then could also
+ // get rid of context).
+ //
r.committed = run<string> (
+ ctx,
3 /* verbosity */,
pp,
args,
@@ -108,7 +112,8 @@ namespace build2
// (reluctantly) assume that the only reason git cat-file fails is if
// there is no HEAD (that we equal with the "new repository" condition
// which is, strictly speaking, might not be the case either). So we
- // suppress any diagnostics, and handle non-zero exit code.
+ // suppress any diagnostics, and handle non-zero exit code (and so no
+ // diagnostics buffering is needed, plus we are in the load phase).
//
string data;
@@ -117,12 +122,12 @@ namespace build2
args[args_i + 2] = "HEAD";
args[args_i + 3] = nullptr;
- process pr (run_start (3 /* verbosity */,
+ process pr (run_start (3 /* verbosity */,
pp,
args,
- 0 /* stdin */,
- -1 /* stdout */,
- false /* error */));
+ 0 /* stdin */,
+ -1 /* stdout */,
+ 1 /* stderr (to stdout) */));
string l;
try
@@ -201,7 +206,7 @@ namespace build2
// that.
}
- if (run_finish_code (args, pr, l))
+ if (run_finish_code (args, pr, l, 2 /* verbosity */))
{
if (r.sn == 0)
fail << "unable to extract git commit id/date for " << rep_root;
diff --git a/libbuild2/version/snapshot.cxx b/libbuild2/version/snapshot.cxx
index d20e633..000bcba 100644
--- a/libbuild2/version/snapshot.cxx
+++ b/libbuild2/version/snapshot.cxx
@@ -12,7 +12,7 @@ namespace build2
namespace version
{
snapshot
- extract_snapshot_git (dir_path);
+ extract_snapshot_git (context&, dir_path);
static const path git (".git");
@@ -46,7 +46,7 @@ namespace build2
if (butl::entry_exists (d / git,
true /* follow_symlinks */,
true /* ignore_errors */))
- return extract_snapshot_git (move (d));
+ return extract_snapshot_git (rs.ctx, move (d));
}
return snapshot ();
diff --git a/manifest b/manifest
index f1a4620..02cfd7d 100644
--- a/manifest
+++ b/manifest
@@ -1,6 +1,6 @@
: 1
name: build2
-version: 0.14.0-a.0.z
+version: 0.16.0-a.0.z
summary: build2 build system
license: MIT
topics: build system, build toolchain
@@ -14,9 +14,9 @@ email: users@build2.org
build-warning-email: builds@build2.org
builds: host
requires: c++14
-depends: * build2 >= 0.13.0
-depends: * bpkg >= 0.13.0
-# @@ Should probably become conditional dependency.
-requires: ? cli ; Only required if changing .cli files.
-depends: libbutl [0.14.0-a.0.1 0.14.0-a.1)
-depends: libpkgconf [1.4.2 1.7.0-)
+depends: * build2 >= 0.15.0-
+depends: * bpkg >= 0.15.0-
+# @@ DEP Should probably become conditional dependency.
+#requires: ? cli ; Only required if changing .cli files.
+depends: libbutl [0.16.0-a.0.1 0.16.0-a.1)
+depends: libpkg-config ~0.1.0
diff --git a/old-tests/variable/override/buildfile b/old-tests/variable/override/buildfile
index 2889f69..c0330cb 100644
--- a/old-tests/variable/override/buildfile
+++ b/old-tests/variable/override/buildfile
@@ -1,57 +1,61 @@
-if ($t != [null])
+if ($p.t != [null])
{
- [$t] v = [null]
+ [$p.t] p.v = [null]
}
-print "/ :" $(/: v)
+/:
-if ($a == as)
+print "/ :" $(/: p.v)
+
+if ($p.a == as)
{
- v = x
+ p.v = x
}
-elif ($a == ap)
+elif ($p.a == ap)
{
- v += s
+ p.v += s
}
-elif ($a == pr)
+elif ($p.a == pr)
{
- v =+ p
+ p.v =+ p
}
-print ". :" $v
+print ". :" $p.v
d/
{
- if ($d_a == as)
+ file{t}:
+
+ if ($p.d_a == as)
{
- v = x
+ p.v = x
}
- elif ($d_a == ap)
+ elif ($p.d_a == ap)
{
- v += s
+ p.v += s
}
- elif ($d_a == pr)
+ elif ($p.d_a == pr)
{
- v =+ p
+ p.v =+ p
}
- print "d :" $v
+ print "d :" $p.v
- if ($d_t_a == as)
+ if ($p.d_t_a == as)
{
- file{t}: v = x
+ file{t}: p.v = x
}
- elif ($d_t_a == ap)
+ elif ($p.d_t_a == ap)
{
- file{t}: v += s
+ file{t}: p.v += s
}
- elif ($d_t_a == pr)
+ elif ($p.d_t_a == pr)
{
- file{t}: v =+ p
+ file{t}: p.v =+ p
}
- print "d/t :" $(file{t}: v)
+ print "d/t :" $(file{t}: p.v)
}
include p/
diff --git a/old-tests/variable/override/p/buildfile b/old-tests/variable/override/p/buildfile
index 5b84925..8f4df28 100644
--- a/old-tests/variable/override/p/buildfile
+++ b/old-tests/variable/override/p/buildfile
@@ -1,49 +1,51 @@
-if ($p_a == as)
+if ($p.p_a == as)
{
- v = x
+ p.v = x
}
-elif ($p_a == ap)
+elif ($p.p_a == ap)
{
- v += s
+ p.v += s
}
-elif ($p_a == pr)
+elif ($p.p_a == pr)
{
- v =+ p
+ p.v =+ p
}
-print "p :" $v
+print "p :" $p.v
d/
{
- if ($p_d_a == as)
+ file{t}:
+
+ if ($p.p_d_a == as)
{
- v = x
+ p.v = x
}
- elif ($p_d_a == ap)
+ elif ($p.p_d_a == ap)
{
- v += s
+ p.v += s
}
- elif ($p_d_a == pr)
+ elif ($p.p_d_a == pr)
{
- v =+ p
+ p.v =+ p
}
- print "p/d :" $v
+ print "p/d :" $p.v
- if ($p_d_t_a == as)
+ if ($p.p_d_t_a == as)
{
- file{t}: v = x
+ file{t}: p.v = x
}
- elif ($p_d_t_a == ap)
+ elif ($p.p_d_t_a == ap)
{
- file{t}: v += s
+ file{t}: p.v += s
}
- elif ($p_d_t_a == pr)
+ elif ($p.p_d_t_a == pr)
{
- file{t}: v =+ p
+ file{t}: p.v =+ p
}
- print "p/d/t :" $(file{t}: v)
+ print "p/d/t :" $(file{t}: p.v)
}
./:
diff --git a/old-tests/variable/override/simple b/old-tests/variable/override/simple
index 899daa2..983401a 100644
--- a/old-tests/variable/override/simple
+++ b/old-tests/variable/override/simple
@@ -1,3 +1,3 @@
-print $foo
+print $p.foo
./:
diff --git a/old-tests/variable/override/test.sh b/old-tests/variable/override/test.sh
index 4675b7e..fe89c56 100755
--- a/old-tests/variable/override/test.sh
+++ b/old-tests/variable/override/test.sh
@@ -53,16 +53,16 @@ function test ()
fi
}
-fail "foo= [string] bar" # error: typed override of variable foo
-#fail "!foo=bar" "!foo=BAR" # error: multiple global overrides of variable foo
-#fail "foo=bar" "foo=BAR" # error: multiple project overrides of variable foo
-#fail "%foo=bar" "%foo=BAR" # error: multiple project overrides of variable foo
+fail "p.foo= [string] bar" # error: typed override of variable p.foo
+#fail "!p.foo=bar" "!p.foo=BAR" # error: multiple global overrides of variable p.foo
+#fail "p.foo=bar" "p.foo=BAR" # error: multiple project overrides of variable p.foo
+#fail "%p.foo=bar" "%p.foo=BAR" # error: multiple project overrides of variable p.foo
-test --buildfile simple foo=bar ./ ./ <<< "bar" # Multiple bootstraps of the same project.
+test --buildfile simple p.foo=bar ./ ./ <<< "bar" # Multiple bootstraps of the same project.
# Visibility/qualification.
#
-test !v=X <<EOF
+test !p.v=X <<EOF
/ : X
. : X
d : X
@@ -72,7 +72,7 @@ p/d : X
p/d/t : X
EOF
-test v=X <<EOF
+test p.v=X <<EOF
/ :
. : X
d : X
@@ -82,7 +82,7 @@ p/d : X
p/d/t : X
EOF
-test ./v=X <<EOF
+test ./p.v=X <<EOF
/ :
. : X
d : X
@@ -92,7 +92,7 @@ p/d : X
p/d/t : X
EOF
-test .../v=X <<EOF
+test .../p.v=X <<EOF
/ :
. : X
d : X
@@ -102,7 +102,7 @@ p/d : X
p/d/t : X
EOF
-test ./p/v=X <<EOF
+test ./p/p.v=X <<EOF
/ :
. :
d :
@@ -112,7 +112,7 @@ p/d : X
p/d/t : X
EOF
-test .../p/v=X <<EOF
+test .../p/p.v=X <<EOF
/ :
. :
d :
@@ -122,7 +122,7 @@ p/d : X
p/d/t : X
EOF
-test v=X --buildfile loader ./p/ <<EOF
+test p.v=X --buildfile loader ./p/ <<EOF
/ :
. : X
d : X
@@ -132,7 +132,7 @@ p/d : X
p/d/t : X
EOF
-test .../v=X --buildfile loader ./p/ <<EOF
+test .../p.v=X --buildfile loader ./p/ <<EOF
/ :
. :
d :
@@ -142,7 +142,7 @@ p/d : X
p/d/t : X
EOF
-test /v=X <<EOF
+test /p.v=X <<EOF
/ :
. : X
d : X
@@ -152,7 +152,7 @@ p/d : X
p/d/t : X
EOF
-test v=X p_a=as <<EOF
+test p.v=X p.p_a=as <<EOF
/ :
. : X
d : X
@@ -162,7 +162,7 @@ p/d : X
p/d/t : X
EOF
-test %v=X <<EOF
+test %p.v=X <<EOF
/ :
. : X
d : X
@@ -172,7 +172,7 @@ p/d : X
p/d/t : X
EOF
-test %v=X p_a=as <<EOF
+test %p.v=X p.p_a=as <<EOF
/ :
. : X
d : X
@@ -182,7 +182,7 @@ p/d : x
p/d/t : x
EOF
-test /v=X d_a=as p_d_a=as <<EOF
+test /p.v=X p.d_a=as p.p_d_a=as <<EOF
/ :
. : X
d : x
@@ -192,17 +192,17 @@ p/d : x
p/d/t : x
EOF
-test %v+=S %v=+P a=as <<EOF
+test %p.v+=S %p.v=+P p.a=as <<EOF
/ :
. : P x S
d : P x S
d/t : P x S
-p : P x S
-p/d : P x S
-p/d/t : P x S
+p : P S
+p/d : P S
+p/d/t : P S
EOF
-test %v+=S %v=+P a=as p_a=as <<EOF
+test %p.v+=S %p.v=+P p.a=as p.p_a=as <<EOF
/ :
. : P x S
d : P x S
@@ -214,7 +214,7 @@ EOF
# Append/Prepend in override.
#
-test v+=S <<EOF
+test p.v+=S <<EOF
/ :
. : S
d : S
@@ -224,17 +224,17 @@ p/d : S
p/d/t : S
EOF
-test v+=S a=as <<EOF
+test p.v+=S p.a=as <<EOF
/ :
. : x S
d : x S
d/t : x S
-p : x S
-p/d : x S
-p/d/t : x S
+p : S
+p/d : S
+p/d/t : S
EOF
-test %v=+P a=as p_a=as <<EOF
+test %p.v=+P p.a=as p.p_a=as <<EOF
/ :
. : P x
d : P x
@@ -244,7 +244,7 @@ p/d : x
p/d/t : x
EOF
-test %v+=S v=+P a=as p_a=as <<EOF
+test %p.v+=S p.v=+P p.a=as p.p_a=as <<EOF
/ :
. : P x S
d : P x S
@@ -256,7 +256,7 @@ EOF
# Append/Prepend in both.
#
-test v=X a=ap d_a=ap p_a=ap p_d_a=ap <<EOF
+test p.v=X p.a=ap p.d_a=ap p.p_a=ap p.p_d_a=ap <<EOF
/ :
. : X
d : X
@@ -266,52 +266,52 @@ p/d : X
p/d/t : X
EOF
-test v+=S v=+P a=as d_a=ap d_t_a=ap p_a=ap p_d_a=ap p_d_t_a=ap <<EOF
+test p.v+=S p.v=+P p.a=as p.d_a=ap p.d_t_a=ap p.p_a=ap p.p_d_a=ap p.p_d_t_a=ap <<EOF
/ :
. : P x S
d : P x s S
d/t : P x s s S
-p : P x s S
-p/d : P x s s S
-p/d/t : P x s s s S
+p : P s S
+p/d : P s s S
+p/d/t : P s s s S
EOF
# These ones are surprising. I guess the moral is we shouldn't do "blind"
# cross-project append/prepend.
#
-test %v=X a=as d_a=ap p_a=ap p_d_a=ap <<EOF
+test %p.v=X p.a=as p.d_a=ap p.p_a=ap p.p_d_a=ap <<EOF
/ :
. : X
d : X
d/t : X
-p : x s
-p/d : x s s
-p/d/t : x s s
+p : s
+p/d : s s
+p/d/t : s s
EOF
-test %v+=S a=as d_a=ap p_a=ap p_d_a=ap <<EOF
+test %p.v+=S p.a=as p.d_a=ap p.p_a=ap p.p_d_a=ap <<EOF
/ :
. : x S
d : x s S
d/t : x s S
-p : x s
-p/d : x s s
-p/d/t : x s s
+p : s
+p/d : s s
+p/d/t : s s
EOF
-test %v+=S a=as d_a=ap p_a=ap p_d_a=ap ./ p/ <<EOF
+test %p.v+=S p.a=as p.d_a=ap p.p_a=ap p.p_d_a=ap ./ p/ <<EOF
/ :
. : x S
d : x s S
d/t : x s S
-p : x s S
-p/d : x s s S
-p/d/t : x s s S
+p : s S
+p/d : s s S
+p/d/t : s s S
EOF
# Typed override.
#
-test v+=S v=+P t=string <<EOF
+test p.v+=S p.v=+P p.t=string <<EOF
/ :
. : PS
d : PS
@@ -321,12 +321,12 @@ p/d : PS
p/d/t : PS
EOF
-test v+=S v=+P t=string a=as d_a=ap d_t_a=ap p_a=ap p_d_a=ap p_d_t_a=ap <<EOF
+test p.v+=S p.v=+P p.t=string p.a=as p.d_a=ap p.d_t_a=ap p.p_a=ap p.p_d_a=ap p.p_d_t_a=ap <<EOF
/ :
. : PxS
d : PxsS
d/t : PxssS
-p : PxsS
-p/d : PxssS
-p/d/t : PxsssS
+p : PsS
+p/d : PssS
+p/d/t : PsssS
EOF
diff --git a/old-tests/variable/type-pattern-append/buildfile b/old-tests/variable/type-pattern-append/buildfile
index 348f70f..3077c32 100644
--- a/old-tests/variable/type-pattern-append/buildfile
+++ b/old-tests/variable/type-pattern-append/buildfile
@@ -1,3 +1,5 @@
+./ sub/:
+
# Typed append/prepend.
#
#dir{a*}: x += [bool] true
diff --git a/repositories.manifest b/repositories.manifest
index 6bacad3..f2e3fa4 100644
--- a/repositories.manifest
+++ b/repositories.manifest
@@ -1,10 +1,8 @@
: 1
summary: build2 build system repository
-:
-role: prerequisite
-location: ../libbutl.git##HEAD
:
role: prerequisite
-location: https://git.build2.org/packaging/pkgconf/pkgconf.git##HEAD
+location: https://stage.build2.org/1
+trust: EC:50:13:E2:3D:F7:92:B4:50:0B:BF:2A:1F:7D:31:04:C6:57:6F:BC:BE:04:2E:E0:58:14:FA:66:66:21:1F:14
diff --git a/tests/bash/testscript b/tests/bash/testscript
index 1e5665c..05f48a1 100644
--- a/tests/bash/testscript
+++ b/tests/bash/testscript
@@ -119,7 +119,7 @@ if ($test.target == $build.host && $build.host.class != 'windows')
}
fi
- @import sub/foo@
+ @import sub.bash/foo@
EOI
cat <<EOI >=buildfile;
@@ -140,7 +140,7 @@ if ($test.target == $build.host && $build.host.class != 'windows')
:
{
cat <<EOI >=test.bash.in;
- @import sub/foo@
+ @import sub.bash/foo@
EOI
cat <<EOI >=driver.in;
diff --git a/tests/build/root.build b/tests/build/root.build
index 8f9a482..712e73c 100644
--- a/tests/build/root.build
+++ b/tests/build/root.build
@@ -14,9 +14,16 @@ if ($cxx.target.system == 'win32-msvc')
if ($cxx.class == 'msvc')
cxx.coptions += /wd4251 /wd4275 /wd4800
elif ($cxx.id == 'gcc')
+{
cxx.coptions += -Wno-maybe-uninitialized -Wno-free-nonheap-object \
-Wno-stringop-overread # libbutl
+ if ($cxx.version.major >= 13)
+ cxx.coptions += -Wno-dangling-reference
+}
+elif ($cxx.id.type == 'clang' && $cxx.version.major >= 15)
+ cxx.coptions += -Wno-unqualified-std-cast-call
+
# Setup the build system driver that we are testing (which may not be the same
# as our $build.path). We also need to disable importation using the built-in
# path.
diff --git a/tests/cc/libu/testscript b/tests/cc/libu/testscript
index 9db3406..b562157 100644
--- a/tests/cc/libu/testscript
+++ b/tests/cc/libu/testscript
@@ -2,7 +2,7 @@
# license : MIT; see accompanying LICENSE file
crosstest = false
-test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode)
.include ../../common.testscript
diff --git a/tests/cc/modules/common.testscript b/tests/cc/modules/common.testscript
index 6f09c62..50dc865 100644
--- a/tests/cc/modules/common.testscript
+++ b/tests/cc/modules/common.testscript
@@ -2,7 +2,7 @@
# license : MIT; see accompanying LICENSE file
crosstest = false
-test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode)
.include ../../common.testscript
@@ -47,3 +47,10 @@ EOI
+$* noop <<EOI | set headers
print ($cxx.features.modules && $cxx.id == 'gcc')
EOI
+
+# @@ TMP: modules support is broken in MinGW GCC (not just symexport).
+#
+if ($cxx.target.class == 'windows' && $cxx.id == 'gcc')
+ modules = false
+ headers = false
+end
diff --git a/tests/cc/modules/modules.testscript b/tests/cc/modules/modules.testscript
index 681238a..8762885 100644
--- a/tests/cc/modules/modules.testscript
+++ b/tests/cc/modules/modules.testscript
@@ -354,7 +354,7 @@ $* test --verbose 1 <<EOI 2>>EOE;
exe{test}: cxx{driver} {mxx}{foo-core}
exe{test}: test.arguments = two
EOI
- c++ cxx{driver}
+ c++ cxx{driver} -> obje{driver}
ld exe{test}
test exe{test}
EOE
diff --git a/tests/cc/preprocessed/testscript b/tests/cc/preprocessed/testscript
index 269cafe..507a92d 100644
--- a/tests/cc/preprocessed/testscript
+++ b/tests/cc/preprocessed/testscript
@@ -2,7 +2,7 @@
# license : MIT; see accompanying LICENSE file
crosstest = false
-test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true) update
+test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode) update
.include ../../common.testscript
@@ -10,7 +10,7 @@ test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true) up
#
# trace: cxx::compile::apply extracting (headers|modules) from: .../obje{(test).o...}
#
-filter = sed -n -e \
+filter = [cmdline] sed -n -e \
\''s/^trace: cxx::compile_rule::apply: extracting ([^ ]+) from[^{]+\{([^.]+).*/\1 \2/p'\'
+cat <<EOI >=build/root.build
diff --git a/tests/dependency/recipe/testscript b/tests/dependency/recipe/testscript
index ebcdfdb..f43111e 100644
--- a/tests/dependency/recipe/testscript
+++ b/tests/dependency/recipe/testscript
@@ -611,18 +611,42 @@ EOE
{
: weight-0
:
- $* <<EOI 2>>EOE != 0
- alias{x}:
- {{
-
- exit
- }}
- dump alias{x}
- EOI
- <stdin>:3:1: error: unable to deduce low-verbosity script diagnostics name
- info: consider specifying it explicitly with the 'diag' recipe attribute
- info: or provide custom low-verbosity diagnostics with the 'diag' builtin
- EOE
+ {
+ : single-operation
+ :
+ {
+ $* <<EOI 2>>~%EOE%
+ alias{x}:
+ {{
+
+ exit
+ }}
+ dump alias{x}
+ EOI
+ %.{2}
+ % [diag=update] perform(update)
+ %.{3}
+ EOE
+ }
+
+ : multiple-operations
+ :
+ {
+ $* <<EOI 2>>EOE != 0
+ alias{x}:
+ % update clean
+ {{
+
+ exit
+ }}
+ dump alias{x}
+ EOI
+ <stdin>:4:1: error: unable to deduce low-verbosity script diagnostics name
+ info: consider specifying it explicitly with the 'diag' recipe attribute
+ info: or provide custom low-verbosity diagnostics with the 'diag' builtin
+ EOE
+ }
+ }
: weight-1
:
@@ -675,7 +699,7 @@ EOE
: process-path-ex
:
{
- config_cxx = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+ config_cxx = config.cxx=$quote($recall($cxx.path) $cxx.config.mode)
mkdir build;
cat <<EOI >=build/bootstrap.build;
@@ -691,7 +715,7 @@ EOE
EOI
$* $config_cxx <<EOI 2>>~%EOE%
- c = $cxx.path --version
+ c = [cmdline] $cxx.path --version
alias{x}:
{{
$c
@@ -729,7 +753,7 @@ EOE
$* <<EOI 2>>EOE != 0
alias{x}:
{{
- foo = bar
+ foo = [cmdline] bar
$foo
}}
dump alias{x}
@@ -845,8 +869,11 @@ EOE
% .+alias\{x\}:%
% perform(update)
{{
+ rm a
+ echo b | set c
diag bar
- %.{4}
+ fo$v
+ }}
EOE
}
}
diff --git a/tests/directive/config.testscript b/tests/directive/config.testscript
index 4049fa0..fba858f 100644
--- a/tests/directive/config.testscript
+++ b/tests/directive/config.testscript
@@ -14,7 +14,7 @@ test.arguments =
EOI
+cat <<EOI >=build/root.build
- config [bool] config.test.fancy ?= false
+ config [bool, null] config.test.fancy ?= false
print ($defined(config.test.fancy) ? $config.test.fancy : undefined)
EOI
@@ -114,7 +114,6 @@ test.arguments =
EOE
}
-
: default-none
:
{
@@ -125,7 +124,7 @@ test.arguments =
EOI
+cat <<EOI >=build/root.build
- config [bool] config.test.fancy
+ config [bool, null] config.test.fancy
print ($defined(config.test.fancy) ? $config.test.fancy : undefined)
EOI
@@ -166,6 +165,37 @@ test.arguments =
EOE
}
+: non-nullable
+:
+{
+ .include ../common.testscript
+
+ +cat <<EOI >+build/bootstrap.build
+ using config
+ EOI
+
+ +cat <<EOI >=build/root.build
+ config [bool] config.test.fancy ?= false
+ print ($defined(config.test.fancy) ? $config.test.fancy : undefined)
+ EOI
+
+ # This must be a single, serial test since we are sharing config.build.
+ #
+ : test
+ :
+ cat <<EOI >=buildfile;
+ ./:
+ EOI
+
+ $* noop >'false' ;
+ $* noop config.test.fancy=false >'false' ;
+ $* noop config.test.fancy=true >'true' ;
+
+ $* noop config.test.fancy=[null] 2>>~/EOE/ != 0
+ /.+root.build:1:1: error: null value in non-nullable variable config\.test\.fancy/
+ EOE
+}
+
: report
:
{
diff --git a/tests/directive/parsing.testscript b/tests/directive/parsing.testscript
index 04dd054..3f180f0 100644
--- a/tests/directive/parsing.testscript
+++ b/tests/directive/parsing.testscript
@@ -1,4 +1,4 @@
-# file : tests/directive/assert.testscript
+# file : tests/directive/parsing.testscript
# license : MIT; see accompanying LICENSE file
# Test overall directive parsing.
diff --git a/tests/directive/run.testscript b/tests/directive/run.testscript
index 199dd5f..ecff0fe 100644
--- a/tests/directive/run.testscript
+++ b/tests/directive/run.testscript
@@ -25,8 +25,9 @@ EOI
: bad-exit
:
cat <'assert false' >=buildfile;
-$* <"$run" 2>>EOE != 0
+$* <"$run" 2>>~/EOE/ != 0
buildfile:1:1: error: assertion failed
+/<stdin>:1:5: error: process .+ exited with code 1/
EOE
: bad-output
diff --git a/tests/eval/qual.testscript b/tests/eval/qual.testscript
index 29f6340..88339fd 100644
--- a/tests/eval/qual.testscript
+++ b/tests/eval/qual.testscript
@@ -5,8 +5,9 @@
.include ../common.testscript
-$* <'print (file{foo}: bar)' >'file{foo}:bar' : target
-$* <'print (foo/dir{}: bar)' >'dir{foo/}:bar' : scope
+$* <'print (file{foo}: bar)' >'bar:file{foo}' : target
+$* <'print (file{foo}@./: bar)' >'bar:file{foo}@./' : target-out
+$* <'print (foo/dir{}: bar)' >'bar:dir{foo/}' : target-dir
: attribute
:
diff --git a/tests/expansion/escape.testscript b/tests/expansion/escape.testscript
new file mode 100644
index 0000000..1140032
--- /dev/null
+++ b/tests/expansion/escape.testscript
@@ -0,0 +1,17 @@
+# file : tests/expansion/type.testscript
+# license : MIT; see accompanying LICENSE file
+
+# Test escape sequence expansion.
+
+.include ../common.testscript
+
+: simple
+:
+$* <<EOI >>EOO
+print "foo$\nbar"
+print $size([string] "foo$\0bar")
+EOI
+foo
+bar
+7
+EOO
diff --git a/tests/function/builtin/testscript b/tests/function/builtin/testscript
index 3d31ca2..714a38d 100644
--- a/tests/function/builtin/testscript
+++ b/tests/function/builtin/testscript
@@ -86,8 +86,8 @@
: likely is set at the time of login, and on Windows it is set by build2 on
: startup.
:
- : @@ Use a custom variable, when an ability to set environment variables in
- : testscript is implemented.
+ : @@ TMP Use a custom variable, when an ability to set environment variables
+ : in testscript is implemented. It is now!
:
{
: string
diff --git a/tests/function/integer/buildfile b/tests/function/integer/buildfile
new file mode 100644
index 0000000..308fe09
--- /dev/null
+++ b/tests/function/integer/buildfile
@@ -0,0 +1,4 @@
+# file : tests/function/integer/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/function/integer/testscript b/tests/function/integer/testscript
new file mode 100644
index 0000000..ad2d3bb
--- /dev/null
+++ b/tests/function/integer/testscript
@@ -0,0 +1,41 @@
+# file : tests/function/integer/testscript
+# license : MIT; see accompanying LICENSE file
+
+.include ../../common.testscript
+
+: integer-sequence
+:
+{
+ $* <'print $integer_sequence(1, 3)' >'1 2' : basics
+ $* <'print $integer_sequence(1, 0)' >'' : empty
+ $* <'print $integer_sequence(0, 8, 2)' >'0 2 4 6' : step
+}
+
+: string
+:
+{
+ $* <'print $string([uint64] 0xffff)' >'65535' : uint
+ $* <'print $string([uint64] 0xffff, 16)' >'0xffff' : uint-hex
+ $* <'print $string([uint64] 0xffff, 16, 8)' >'0x0000ffff' : uint-hex-width
+}
+
+: sort
+:
+{
+ $* <'print $sort([uint64s] 0 2 1 000)' >'0 0 1 2' : basics
+ $* <'print $sort([uint64s] 0 2 1 000, dedup)' >'0 1 2' : dedup
+}
+
+: find
+:
+{
+ $* <'print $find([uint64s] 1 2 3, 2)' >'true' : basics-true
+ $* <'print $find([uint64s] 1 2 3, 0)' >'false' : basics-false
+}
+
+: find_index
+:
+{
+ $* <'print $find_index([int64s] -1 -2 -3, -2)' >'1' : basics-true
+ $* <'print $find_index([int64s] -1 -2 -3, 0)' >'3' : basics-false
+}
diff --git a/tests/function/name/buildfile b/tests/function/name/buildfile
new file mode 100644
index 0000000..48be4c3
--- /dev/null
+++ b/tests/function/name/buildfile
@@ -0,0 +1,4 @@
+# file : tests/function/name/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/function/name/testscript b/tests/function/name/testscript
new file mode 100644
index 0000000..4588e1d
--- /dev/null
+++ b/tests/function/name/testscript
@@ -0,0 +1,68 @@
+# file : tests/function/name/testscript
+# license : MIT; see accompanying LICENSE file
+
+.include ../../common.testscript
+
+: is_a
+:
+{
+ $* <'print $is_a(file{foo}, path_target)' >'true' : basics-true
+ $* <'print $is_a(alias{foo}, path_target)' >'false' : basics-false
+ $* <'print $is_a(file{foo}@./, path_target)' >'true' : out
+ $* <<EOI >'true' : derived
+ define txt: file
+ print $is_a(txt{foo}, path_target)
+ EOI
+}
+
+: filter
+:
+{
+ $* <<EOI >'file{foo}@./ txt{baz}' : basics
+ define txt: file
+ print $filter(file{foo}@./ alias{bar} dir{./} txt{baz}, file)
+ EOI
+
+ $* <<EOI >'file{foo}@./ txt{baz}' : basics-out
+ define txt: file
+ print $filter_out(file{foo}@./ alias{bar} dir{./} txt{baz}, alias)
+ EOI
+
+ $* <<EOI >'file{foo}@./ dir{./} txt{baz}' : multiple
+ define txt: file
+ print $filter(file{foo}@./ alias{bar} dir{./} txt{baz}, file dir)
+ EOI
+
+ $* <<EOI >'file{foo}@./ alias{bar}' : multiple-out
+ define txt: file
+ print $filter_out(file{foo}@./ alias{bar} dir{./} txt{baz}, txt dir)
+ EOI
+}
+
+: size
+:
+{
+ $* <'print $size(a b c@./)' >'3' : basics
+ $* <'print $type($size(a))' >'uint64' : type
+}
+
+: sort
+:
+{
+ $* <'print $sort(d/t{a} t{c b} d/t{a})' >'t{b} t{c} d/t{a} d/t{a}' : basics
+ $* <'print $sort(d/t{a} t{c b} d/t{a}, dedup)' >'t{b} t{c} d/t{a}' : dedup
+}
+
+: find
+:
+{
+ $* <'print $find([names] d/t{a} t{a b}, t{a})' >'true' : basics-true
+ $* <'print $find([names] d/t{a} t{a b}, d/t{b})' >'false' : basics-false
+}
+
+: find_index
+:
+{
+ $* <'print $find_index([names] d/t{a} t{a b}, t{a})' >'1' : basics-true
+ $* <'print $find_index([names] d/t{a} t{a b}, d/t{b})' >'3' : basics-false
+}
diff --git a/tests/function/path/testscript b/tests/function/path/testscript
index ad76513..1ed89ca 100644
--- a/tests/function/path/testscript
+++ b/tests/function/path/testscript
@@ -3,10 +3,83 @@
.include ../../common.testscript
-posix = ($cxx.target.class != 'windows')
+windows = ($cxx.target.class == 'windows')
+posix = (!$windows)
s = ($posix ? '/' : '\')
+: posix-string
+:
+{
+ : relative
+ :
+ {
+ s = ($posix ? '/' : '\\')
+
+ $* <"print \$posix_string\([path] a$(s)b)" >'a/b' : path
+ $* <"print \$posix_string\([paths] a$(s)b a$(s)c$(s))" >'a/b a/c' : paths
+ $* <"print \$posix_string\([dir_path] a$(s)b)" >'a/b' : dir-path
+ $* <"print \$posix_string\([dir_paths] a$(s)b a$(s)c$(s))" >'a/b a/c' : dir-paths
+ $* <"print \$path.posix_string\(a$(s)b a$(s)c$(s))" >'a/b a/c' : untyped
+ }
+
+ : absolute
+ :
+ {
+ if $posix
+ {
+ $* <'print $posix_string([paths] /a/b /a/c/)' >'/a/b /a/c' : paths
+ $* <'print $posix_string([dir_paths] /a/b /a/c/)' >'/a/b /a/c' : dir-paths
+ $* <'print $posix_string([dir_path] /)' >'/' : root-dir
+ $* <'print $path.posix_string(/a/b /a/c/)' >'/a/b /a/c' : untyped
+ }
+ else
+ {
+ $* <'print $posix_string([paths] "c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c' : paths
+ $* <'print $posix_string([dir_paths] "c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c' : dir-paths
+ $* <'print $posix_string([dir_paths] "c:\\" "C:")' >'c:/ C:/' : root-dir
+ $* <'print $path.posix_string("c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c' : untyped
+ $* <'print $path.posix_string("c:\\" "C:")' >'c:/ C:/' : untyped-root
+ }
+ }
+}
+
+: posix-representation
+:
+{
+ : relative
+ :
+ {
+ s = ($posix ? '/' : '\\')
+
+ $* <"print \$posix_representation\([path] a$(s)b)" >'a/b' : path
+ $* <"print \$posix_representation\([paths] a$(s)b a$(s)c$(s))" >'a/b a/c/' : paths
+ $* <"print \$posix_representation\([dir_path] a$(s)b)" >'a/b/' : dir-path
+ $* <"print \$posix_representation\([dir_paths] a$(s)b a$(s)c$(s))" >'a/b/ a/c/' : dir-paths
+ $* <"print \$path.posix_representation\(a$(s)b a$(s)c$(s))" >'a/b a/c/' : untyped
+ }
+
+ : absolute
+ :
+ {
+ if $posix
+ {
+ $* <'print $posix_representation([paths] /a/b /a/c/)' >'/a/b /a/c/' : paths
+ $* <'print $posix_representation([dir_paths] /a/b /a/c/)' >'/a/b/ /a/c/' : dir-paths
+ $* <'print $posix_representation([dir_path] /)' >'/' : root-dir
+ $* <'print $path.posix_representation(/a/b /a/c/)' >'/a/b /a/c/' : untyped
+ }
+ else
+ {
+ $* <'print $posix_representation([paths] "c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c/' : paths
+ $* <'print $posix_representation([dir_paths] "c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b/ C:/a/c/' : dir-paths
+ $* <'print $posix_representation([dir_paths] "c:\\" "C:")' >'c:/ C:/' : root-dir
+ $* <'print $path.posix_representation("c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c/' : untyped
+ $* <'print $path.posix_representation("c:\\" "C:")' >'c:/ C:/' : untyped-root
+ }
+ }
+}
+
: canonicalize
:
{
@@ -48,6 +121,28 @@ if! $posix
$* <'print $path.directory(a/b c/d/)' >"a/ c/" : dir-names
}
+: root_directory
+:
+{
+ : posix
+ :
+ if $posix
+ {
+ $* <'print $root_directory([path] /a/b)' >'/' : basics
+ $* <'print $root_directory([path] /)' >'/' : root
+ }
+
+ : windows
+ :
+ if $windows
+ {
+ $* <'print $root_directory([path] "c:\\a\\b")' >'c:\' : basics
+ $* <'print $root_directory([path] "c:")' >'c:\' : root
+ }
+
+ $* <'print $root_directory([path] a/b)' >'' : relative
+}
+
: base
:
{
@@ -78,6 +173,12 @@ if! $posix
EOE
}
+: relative
+:
+{
+ $* <'print $relative([path] a/b/c, [dir_path] a/x/y)' >"..$s..$(s)b/c" : basics
+}
+
: extension
:
{
@@ -104,6 +205,45 @@ if! $posix
EOO
}
+: sort
+:
+{
+ $* <'print $sort([paths] a c b a)' >'a a b c' : basics
+ $* <'print $sort([paths] a c b a, dedup)' >'a b c' : dedup
+
+ : icase
+ :
+ if $windows
+ {
+ $* <'print $sort([paths] a C B a)' >'a a B C'
+ }
+}
+
+: size
+:
+{
+ $* <'print $size([path] abc)' >'3' : basics
+ $* <'print $size([path] )' >'0' : zero
+
+ $* <'print $size([dir_path] abc)' >'3' : dir-basics
+ $* <'print $size([dir_path] abc/)' >'3' : dir-separator
+ $* <'print $size([dir_path] )' >'0' : dir-zero
+}
+
+: find
+:
+{
+ $* <'print $find([paths] x y z, y)' >'true' : basics-true
+ $* <'print $find([paths] x y z, a)' >'false' : basics-false
+}
+
+: find_index
+:
+{
+ $* <'print $find_index([dir_paths] x y z, y)' >'1' : basics-true
+ $* <'print $find_index([dir_paths] x y z, a)' >'3' : basics-false
+}
+
: invalid-path
:
p = ($posix ? /../foo : 'c:/../foo');
diff --git a/tests/function/regex/testscript b/tests/function/regex/testscript
index 5167390..538bdab 100644
--- a/tests/function/regex/testscript
+++ b/tests/function/regex/testscript
@@ -478,6 +478,64 @@
}
}
+: filter-match
+:
+{
+ : match
+ :
+ {
+ : string
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_match(-g -O2 -O3, [string] '-O[23]')
+ EOI
+
+ : untyped
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_match(-g -O2 -O3, '-O[23]')
+ EOI
+
+ : strings
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_match([strings] -g -O2 -O3, '-O[23]')
+ EOI
+
+ : nomatch
+ :
+ $* <<EOI >''
+ print $regex.filter_match(-g -O1, '-O[23]')
+ EOI
+ }
+
+ : filter-out
+ :
+ {
+ : untyped
+ :
+ $* <<EOI >'-g'
+ print $regex.filter_out_match(-g -O2 -O3, '-O[23]')
+ EOI
+
+ : all-match
+ :
+ $* <<EOI >''
+ print $regex.filter_out_match(-O2 -O3, '-O[23]')
+ EOI
+ }
+
+ : flags
+ :
+ {
+ : icase
+ :
+ $* <<EOI >'Foo.cxx'
+ print $regex.filter_match(Foo.cxx, 'f[^.]+.*', icase)
+ EOI
+ }
+}
+
: find-search
:
{
@@ -520,6 +578,64 @@
}
}
+: filter-search
+:
+{
+ : match
+ :
+ {
+ : string
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_search(-g -O2 -O3, [string] '-O')
+ EOI
+
+ : untyped
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_search(-g -O2 -O3, '-O')
+ EOI
+
+ : strings
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_search([strings] -g -O2 -O3, '-O')
+ EOI
+
+ : nomatch
+ :
+ $* <<EOI >''
+ print $regex.filter_search(-g, '-O')
+ EOI
+ }
+
+ : filter-out
+ :
+ {
+ : untyped
+ :
+ $* <<EOI >'-g'
+ print $regex.filter_out_search(-g -O2 -O3, '-O')
+ EOI
+
+ : all-match
+ :
+ $* <<EOI >''
+ print $regex.filter_out_search(-O2 -O3, '-O')
+ EOI
+ }
+
+ : flags
+ :
+ {
+ : icase
+ :
+ $* <<EOI >'Foo.cxx'
+ print $regex.filter_search(Foo.cxx, 'f', icase)
+ EOI
+ }
+}
+
: merge
:
{
diff --git a/tests/function/string/testscript b/tests/function/string/testscript
index 9275fe5..364ce42 100644
--- a/tests/function/string/testscript
+++ b/tests/function/string/testscript
@@ -31,3 +31,34 @@
$* <'print $trim([string] " a ")' >'a' : string
$* <'print $string.trim( " a ")' >'a' : untyped
}
+
+: sort
+:
+{
+ $* <'print $sort([strings] a c b a)' >'a a b c' : basics
+ $* <'print $sort([strings] a c b a, dedup)' >'a b c' : dedup
+ $* <'print $sort([strings] a C B a, icase)' >'a a B C' : icase
+}
+
+: size
+:
+{
+ $* <'print $size([string] abc)' >'3' : basics
+ $* <'print $size([string] )' >'0' : zero
+}
+
+: find
+:
+{
+ $* <'print $find([strings] x y z, y)' >'true' : basics-true
+ $* <'print $find([strings] x y z, Y)' >'false' : basics-false
+ $* <'print $find([strings] x y z, Y, icase)' >'true' : icase
+}
+
+: find_index
+:
+{
+ $* <'print $find_index([strings] x y z, y)' >'1' : basics-true
+ $* <'print $find_index([strings] x y z, Y)' >'3' : basics-false
+ $* <'print $find_index([strings] x y z, Y, icase)' >'1' : icase
+}
diff --git a/tests/in/testscript b/tests/in/testscript
index 0d5be48..5dce1d3 100644
--- a/tests/in/testscript
+++ b/tests/in/testscript
@@ -17,7 +17,9 @@ cat <<EOI >=test.in;
EOI
cat <<EOI >=buildfile;
file{test}: in{test}
- file{test}: foo = FOO
+ {
+ foo = FOO
+ }
EOI
$* <<<buildfile;
cat test >>EOO;
@@ -25,6 +27,27 @@ cat test >>EOO;
EOO
$* clean <<<buildfile
+: substitution-map
+:
+cat <<EOI >=test.in;
+ foo = $_foo$
+ bar = $bar$
+ EOI
+cat <<EOI >=buildfile;
+ file{test}: in{test}
+ {
+ in.substitutions = _foo@FOO
+ in.substitutions += bar@BAR
+ bar = wrong
+ }
+ EOI
+$* <<<buildfile;
+cat test >>EOO;
+ foo = FOO
+ bar = BAR
+ EOO
+$* clean <<<buildfile
+
: lax
:
cat <<EOI >=test.in;
@@ -33,7 +56,9 @@ cat <<EOI >=test.in;
EOI
$* <<EOI &test &test.d;
file{test}: in{test}
- file{test}: in.substitution = lax
+ {
+ in.mode = lax
+ }
EOI
cat test >>EOO
$10
diff --git a/tests/libbuild2/driver.cxx b/tests/libbuild2/driver.cxx
index 9a8db9e..6201a6c 100644
--- a/tests/libbuild2/driver.cxx
+++ b/tests/libbuild2/driver.cxx
@@ -4,16 +4,21 @@
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
+#include <libbuild2/module.hxx>
#include <libbuild2/context.hxx>
#include <libbuild2/scheduler.hxx>
#include <libbuild2/file-cache.hxx>
+#include <libbuild2/dist/init.hxx>
+#include <libbuild2/test/init.hxx>
+#include <libbuild2/config/init.hxx>
+#include <libbuild2/install/init.hxx>
+
#include <libbuild2/in/init.hxx>
#include <libbuild2/bin/init.hxx>
#include <libbuild2/c/init.hxx>
#include <libbuild2/cc/init.hxx>
#include <libbuild2/cxx/init.hxx>
-#include <libbuild2/bash/init.hxx>
#include <libbuild2/version/init.hxx>
#undef NDEBUG
@@ -27,21 +32,25 @@ main (int, char* argv[])
// Fake build system driver, default verbosity.
//
init_diag (1);
- init (nullptr, argv[0]);
+ init (nullptr, argv[0], true);
+
+ load_builtin_module (&config::build2_config_load);
+ load_builtin_module (&dist::build2_dist_load);
+ load_builtin_module (&test::build2_test_load);
+ load_builtin_module (&install::build2_install_load);
- bin::build2_bin_load ();
- cc::build2_cc_load ();
- c::build2_c_load ();
- cxx::build2_cxx_load ();
- version::build2_version_load ();
- in::build2_in_load ();
- bash::build2_bash_load ();
+ load_builtin_module (&bin::build2_bin_load);
+ load_builtin_module (&cc::build2_cc_load);
+ load_builtin_module (&c::build2_c_load);
+ load_builtin_module (&cxx::build2_cxx_load);
+ load_builtin_module (&version::build2_version_load);
+ load_builtin_module (&in::build2_in_load);
// Serial execution.
//
scheduler sched (1);
global_mutexes mutexes (1);
- file_cache fcache;
+ file_cache fcache (true);
context ctx (sched, mutexes, fcache);
return 0;
diff --git a/tests/loop/for.testscript b/tests/loop/for.testscript
index 5376029..e043ec0 100644
--- a/tests/loop/for.testscript
+++ b/tests/loop/for.testscript
@@ -116,3 +116,17 @@ a@1
b@2
c@3
EOO
+
+: elem-attribute
+:
+$* <<EOI >>EOO
+for i [uint64]: 0 1 2
+{
+ i += 1
+ print $i
+}
+EOI
+1
+2
+3
+EOO
diff --git a/tests/name/extension.testscript b/tests/name/extension.testscript
index 1583109..6a542fe 100644
--- a/tests/name/extension.testscript
+++ b/tests/name/extension.testscript
@@ -131,7 +131,7 @@ EOI
EOI
f.oo
- txt{f.oo.}
+
EOO
: default-extension
diff --git a/tests/name/pattern.testscript b/tests/name/pattern.testscript
index 91fb98d..c1a4ce4 100644
--- a/tests/name/pattern.testscript
+++ b/tests/name/pattern.testscript
@@ -18,6 +18,35 @@ pat = '*'
print "$(pat).txt"
EOI
+: typed-concat
+:
+{
+ : dir-path
+ :
+ touch foo.txt;
+ $* <'print {$src_base/*.txt}' >/~'%.+/foo\.txt%'
+
+ : path
+ :
+ touch foo.txt;
+ $* <<EOI >/~'%.+/foo\.txt%'
+ p = [path] $src_base
+ print {$p/*.txt}
+ EOI
+
+ : string
+ :
+ touch foo.txt;
+ $* <<EOI >~'%.+/\*\.txt%'
+ p = [string] "$src_base"
+ print {$p/*.txt}
+ EOI
+
+ : not-pattern
+ :
+ $* <'print {$src_base/foo.txt}' >/~'%.+/foo\.txt%'
+}
+
: detect
:
: Test pattern_mode parsing logic.
@@ -332,13 +361,13 @@ EOI
:
{
mkdir dir;
- $* <'print $d' 'd=*/' >/'dir/' : dir
+ $* <'print $p.d' 'p.d=*/' >/'dir/' : dir
mkdir dir;
- $* <'print $d' 'd=dir{*}' >/'dir{dir/}' : dir-type
+ $* <'print $p.d' 'p.d=dir{*}' >/'dir{dir/}' : dir-type
touch foo.txt;
- $* <'print $f' 'f=*.txt' >'foo.txt' : feil
+ $* <'print $p.f' 'p.f=*.txt' >'foo.txt' : feil
}
: buildspec
diff --git a/tests/recipe/buildscript/testscript b/tests/recipe/buildscript/testscript
index 12c5717..cded5ea 100644
--- a/tests/recipe/buildscript/testscript
+++ b/tests/recipe/buildscript/testscript
@@ -31,7 +31,7 @@ posix = ($cxx.target.class != 'windows')
}}
EOI
- $* 2>'cp file{foo}';
+ $* 2>'cp file{bar} -> file{foo}';
cat <<<foo >'bar';
@@ -65,9 +65,9 @@ posix = ($cxx.target.class != 'windows')
EOI
$* 2>>~%EOE% != 0;
- concat file{bar.}
+ concat file{bar}
%cat: unable to print '.+bar.baz': .+%
- buildfile:10:3: error: cat exited with code 1
+ buildfile:10:3: error: builtin cat exited with code 1
%.+
EOE
@@ -75,7 +75,7 @@ posix = ($cxx.target.class != 'windows')
echo 'baz' >=bar.baz;
- $* 2>'concat file{bar.}';
+ $* 2>'concat file{bar}';
cat <<<foo >>EOO;
bar
@@ -100,7 +100,7 @@ posix = ($cxx.target.class != 'windows')
EOI
$* 2>>~%EOE% != 0;
- cp file{foo}
+ cp file{bar} -> file{foo}
buildfile:4:3: error: stdout and stderr redirected to each other
%.+
EOE
@@ -108,7 +108,7 @@ posix = ($cxx.target.class != 'windows')
$* clean 2>-
}
- : untracked-var
+ : computed-var
:
{
cat <<EOI >=buildfile;
@@ -121,9 +121,29 @@ posix = ($cxx.target.class != 'windows')
}}
EOI
+ $* 2>>EOE != 0
+ buildfile:6:10: error: expansion of computed variable is only allowed in depdb preamble
+ info: consider using 'depdb' builtin to track its value changes
+ EOE
+ }
+
+ : untracked-var
+ :
+ {
+ cat <<EOI >=buildfile;
+ a = a
+ b = b
+ foo:
+ {{
+ x = true
+ y = $($x ? a : b)
+ depdb env BOGUS
+ echo $y >$path($>)
+ }}
+ EOI
+
$* 2>>~%EOE% != 0;
- echo file{foo}
- buildfile:6:10: error: use of untracked variable 'a'
+ buildfile:6:8: error: use of untracked variable 'a'
info: use the 'depdb' builtin to manually track it
%.+
EOE
@@ -155,13 +175,32 @@ posix = ($cxx.target.class != 'windows')
EOI
$* test 2>>EOE;
- cp exe{foo}
- test exe{foo.}
+ cp file{bar} -> exe{foo}
+ test exe{foo}
EOE
$* clean 2>-
}
+ : diag
+ :
+ {
+ cat <<EOI >=buildfile;
+ foo:
+ {{
+ v1 = foo
+ echo bar | set v2
+ diag echo "$v1 $v2" -> $>
+ echo "$v1 $v2" >$path($>)
+ }}
+ EOI
+
+ $* 2>'echo foo bar -> file{foo}';
+ cat <<<foo >'foo bar';
+
+ $* clean 2>-
+ }
+
: depdb
:
{
@@ -201,17 +240,19 @@ posix = ($cxx.target.class != 'windows')
a = $process.run(cat baz)
foo: bar
{{
+ x = true
+ y = $($x ? a : b)
depdb hash "$a"
+
diag compose $>
cp $path($<) $path($>)
- x = true
- echo "$($x ? a : b)" >>$path($>)
+ echo $y >>$path($>)
}}
EOI
- $* 2>'compose file{foo.}';
+ $* 2>'compose file{foo}';
cat <<<foo >>EOO;
bar
@@ -227,7 +268,7 @@ posix = ($cxx.target.class != 'windows')
echo 'BAR' >=bar;
- $* 2>'compose file{foo.}';
+ $* 2>'compose file{foo}';
cat <<<foo >>EOO;
BAR
@@ -238,7 +279,7 @@ posix = ($cxx.target.class != 'windows')
echo 'BAZ' >=baz;
- $* 2>'compose file{foo.}';
+ $* 2>'compose file{foo}';
cat <<<foo >>EOO;
BAR
@@ -250,6 +291,97 @@ posix = ($cxx.target.class != 'windows')
$* clean 2>-
}
+ : preamble
+ :
+ {
+ : valid
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ s = $process.run(cat bar)
+ foo:
+ {{
+ depdb clear
+
+ s1 = 'abc'
+ s2 = 'xyz'
+
+ if echo "$s" >>>? 'bar'
+ v = "$s1"
+ else
+ echo "$s2" | set v
+ end
+
+ depdb string "$v"
+
+ echo "$v" >$path($>)
+ }}
+ EOI
+
+ $* 2>'echo file{foo}';
+ cat <<<foo >'abc';
+
+ $* 2>/'info: dir{./} is up to date';
+
+ echo 'baz' >=bar;
+ $* 2>'echo file{foo}';
+ cat <<<foo >'xyz';
+
+ $* clean 2>-
+ }
+
+ : invalid
+ :
+ {
+ cat <<EOI >=buildfile;
+ foo:
+ {{
+ v = 'abc'
+ echo "$v" >$path($>)
+ depdb string "$v"
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ buildfile:4:3: error: disallowed command in depdb preamble
+ info: only variable assignments are allowed in depdb preamble
+ buildfile:5:3: info: depdb preamble ends here
+ %.+
+ EOE
+
+ $* clean 2>-
+ }
+
+ : temp-dir
+ :
+ {
+ cat <<EOI >=buildfile;
+ foo:
+ {{
+ touch $~/f | set dummy
+
+ if test -f $~/f
+ v = "yes"
+ else
+ v = "no"
+ end
+
+ depdb string "$v"
+ diag echo $>
+
+ test -f $~/f
+ echo "$v" >$path($>)
+ }}
+ EOI
+
+ $* 2>'echo file{foo}';
+
+ $* clean 2>-
+ }
+ }
+
: string
:
{
@@ -368,92 +500,96 @@ posix = ($cxx.target.class != 'windows')
}
}
- : preamble
+ : dyndep
:
{
- : valid
+ : normal
:
{
- echo 'bar' >=bar;
+ cat <<EOI >=bar.h;
+ bar
+ EOI
cat <<EOI >=buildfile;
- s = $process.run(cat bar)
- foo:
- {{
- depdb clear
+ define h: file
+ h{*}: extension = h
- s1 = 'abc'
- s2 = 'xyz'
+ ./: h{foo baz}
- if echo "$s" >>>? 'bar'
- v = "$s1"
- else
- echo "$s2" | set v
- end
+ h{foo}:
+ {{
+ # Note that strictly speaking we should return $out_base/baz.h
+ # on the second invocation (since it now exists). But our dyndep
+ # machinery skips the entry which it has already seen, so this
+ # works for now.
+ #
+ depdb dyndep "-I$out_base" --what=header --default-type=h -- \
+ echo "$out_base/foo.h: $src_base/bar.h baz.h"
- depdb string "$v"
+ diag gen $>
- echo "$v" >$path($>)
+ cat $path($<) >$path($>)
}}
- EOI
- $* 2>'echo file{foo}';
- cat <<<foo >'abc';
-
- $* 2>/'info: dir{./} is up to date';
-
- echo 'baz' >=bar;
- $* 2>'echo file{foo}';
- cat <<<foo >'xyz';
-
- $* clean 2>-
- }
-
- : invalid
- :
- {
- cat <<EOI >=buildfile;
- foo:
+ h{baz}:
{{
- v = 'abc'
- echo "$v" >$path($>)
- depdb string "$v"
+ diag gen $>
+ echo baz >$path($>)
}}
EOI
- $* 2>>~%EOE% != 0;
- buildfile:4:3: error: disallowed command in depdb preamble
- info: only variable assignments are allowed in depdb preamble
- buildfile:5:3: info: depdb preamble ends here
- %.+
+ $* 2>>EOE;
+ gen h{baz}
+ gen h{foo}
EOE
+ cat foo.h >>EOO;
+ bar
+ baz
+ EOO
+
$* clean 2>-
}
- : temp-dir
+ : byproduct
:
{
+ cat <<EOI >=bar.h;
+ bar
+ EOI
+
cat <<EOI >=buildfile;
- foo:
+ define h: file
+ h{*}: extension = h
+
+ h{foo}: h{baz}
{{
- touch $~/f | set dummy
+ o = $path($>)
+ t = $path($>).t
- if test -f $~/f
- v = "yes"
- else
- v = "no"
- end
+ depdb dyndep --byproduct --what=header --default-type=h --file $t
- depdb string "$v"
- diag echo $>
+ diag gen $>
+ cat $src_base/bar.h $out_base/baz.h >$o
+ echo "$out_base/foo.h: $src_base/bar.h $out_base/baz.h" >$t
+ }}
- test -f $~/f
- echo "$v" >$path($>)
+ h{baz}:
+ {{
+ diag gen $>
+ echo baz >$path($>)
}}
EOI
- $* 2>'echo file{foo.}';
+ $* 2>>EOE;
+ gen h{baz}
+ gen h{foo}
+ EOE
+
+ cat foo.h >>EOO;
+ bar
+ baz
+ EOO
$* clean 2>-
}
@@ -506,7 +642,7 @@ posix = ($cxx.target.class != 'windows')
EOI
$* test 2>>EOE;
- cp file{foo}
+ cp file{bar} -> file{foo}
test file{foo}
EOE
@@ -568,7 +704,7 @@ posix = ($cxx.target.class != 'windows')
EOI
$* test 2>>EOE;
- cp file{foo}
+ cp file{bar} -> file{foo}
test file{foo}
bar
EOE
@@ -605,6 +741,30 @@ posix = ($cxx.target.class != 'windows')
$* clean 2>-
}
+: canned-cmdline
+:
+{
+ cat <<EOI >=buildfile;
+ ./:
+ {{
+ x = echo >|
+ y = [cmdline] echo >|
+ diag update $>
+ $x foo
+ $y bar
+ ([cmdline] $x) baz
+ }}
+ EOI
+
+ $* >> EOO 2>>/EOE
+ bar
+ baz
+ EOO
+ update dir{./}
+ >| foo
+ EOE
+}
+
: timeout
:
if $posix
@@ -628,8 +788,9 @@ if $posix
EOI
$* 2>>~%EOE% != 0;
- update file{foo}
- buildfile:6:3: error: ^sleep terminated: execution timeout expired
+ update file{bar} -> file{foo}
+ buildfile:6:3: error: process ^sleep terminated: execution timeout expired
+ info: command line: sleep 5
info: while updating file{foo}
%.+
EOE
@@ -653,7 +814,7 @@ if $posix
EOI
$* 2>>EOE;
- update file{foo}
+ update file{bar} -> file{foo}
EOE
$* clean 2>-
@@ -680,16 +841,18 @@ if $posix
EOI
$* test config.test.timeout=1 2>>~%EOE% != 0;
- cp file{foo}
+ cp file{bar} -> file{foo}
test file{foo}
- buildfile:7:3: error: ^sleep terminated: execution timeout expired
+ buildfile:7:3: error: process ^sleep terminated: execution timeout expired
+ info: command line: sleep 5
info: while testing file{foo}
%.+
EOE
$* test config.test.timeout=/1 2>>~%EOE% != 0;
test file{foo}
- buildfile:7:3: error: ^sleep terminated: execution timeout expired
+ buildfile:7:3: error: process ^sleep terminated: execution timeout expired
+ info: command line: sleep 5
info: while testing file{foo}
%.+
EOE
@@ -716,7 +879,7 @@ if $posix
EOI
$* test config.test.timeout=3 2>>EOE;
- cp file{foo}
+ cp file{bar} -> file{foo}
test file{foo}
EOE
@@ -736,11 +899,933 @@ if $posix
alias{~'/f(.+)/'}: alias{~'/b\1/'}
{{
- diag $< $>
+ diag frob $< -> $>
}}
EOI
$* 2>>EOE
- alias{bar} alias{far}
+ frob alias{bar} -> alias{far}
EOE
}
+
+: loop
+:
+{
+ : while
+ :
+ {
+ : basics
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ while test -f $p != 0
+ cp $path($<) $p
+ end
+ }}
+ EOI
+
+ $* 2>'cp file{bar} -> file{foo}';
+
+ cat <<<foo >'bar';
+
+ $* clean 2>-
+ }
+
+ : exit
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ while test -f $p != 0
+ touch $p
+ exit
+ cp $path($<) $p
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+
+ cat <<<foo >:'';
+
+ $* clean 2>-
+ }
+
+ : error
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ while test -f $p != 0
+ touch $p
+ exit 'fed up'
+ cp $path($<) $p
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ gen file{foo}
+ buildfile:8:5: error: fed up
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : depdb
+ :
+ {
+ : inside
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ while test -f $p != 0
+ depdb hash $p
+ cp $path($<) $p
+ end
+ }}
+ EOI
+
+ $* 2>>EOE != 0
+ buildfile:5:5: error: 'depdb' call inside flow control construct
+ EOE
+ }
+
+ : after-commands
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ while test -f $p != 0
+ cp $path($<) $p
+ end
+
+ depdb hash $p
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ buildfile:5:5: error: disallowed command in depdb preamble
+ info: only variable assignments are allowed in depdb preamble
+ buildfile:8:3: info: depdb preamble ends here
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : after-vars
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($<)
+
+ h =
+ while test -f $p != 0
+ h += $p
+ end
+
+ depdb hash $p
+
+ cat $p >$path($>)
+ }}
+ EOI
+
+ $* 2>'cat file{bar} -> file{foo}';
+ $* clean 2>-
+ }
+ }
+ }
+
+ : for
+ :
+ {
+ : form-1
+ :
+ : for x: ...
+ :
+ {
+ : basics
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar baz
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'cat file{bar} -> file{foo}';
+
+ cat <<<foo >>EOO;
+ bar
+ baz
+ EOO
+
+ $* clean 2>-
+ }
+
+ : pair
+ :
+ {
+ mkdir -p src/build;
+ echo 'bar' >=src/bar;
+ echo 'baz' >=src/baz;
+
+ echo 'project =' >=src/build/bootstrap.build;
+
+ cat <<EOI >=src/buildfile;
+ foo: file{bar}@./ file{baz}@./
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* src/@out/ 2>>/EOE;
+ mkdir fsdir{out/}
+ cat src/file{bar} -> out/file{foo}
+ EOE
+
+ cat <<<out/foo >>EOO;
+ bar
+ baz
+ EOO
+
+ $* 'clean:' src/@out/ 2>-
+ }
+
+ : special-var
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for ~: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>EOE != 0
+ buildfile:6:7: error: attempt to set '~' special variable
+ EOE
+ }
+
+ : exit
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ exit
+ end
+ }}
+ EOI
+
+ $* 2>'cat file{bar} -> file{foo}';
+
+ cat <<<foo >>EOO;
+ bar
+ EOO
+
+ $* clean 2>-
+ }
+
+ : error
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ exit 'fed up'
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ cat file{bar} -> file{foo}
+ buildfile:8:5: error: fed up
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : depdb
+ :
+ {
+ : inside
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ for f: $<
+ depdb hash $f
+ end
+
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>EOE != 0
+ buildfile:4:5: error: 'depdb' call inside flow control construct
+ EOE
+ }
+
+ : after-commands
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ for f: $<
+ echo $path($f) >-
+ end
+
+ depdb hash a
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ buildfile:4:5: error: disallowed command in depdb preamble
+ info: only variable assignments are allowed in depdb preamble
+ buildfile:7:3: info: depdb preamble ends here
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : after-vars
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ h =
+ for f: $<
+ h += $path($f)
+ end
+
+ depdb hash $h
+
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'cat file{bar} -> file{foo}';
+ $* clean 2>-
+ }
+ }
+ }
+
+ : form-2
+ :
+ : ... | for x
+ :
+ {
+ : basics
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar baz
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for -w f
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+
+ cat <<<foo >>EOO;
+ bar
+ baz
+ EOO
+
+ $* clean 2>-
+ }
+
+ : special-var
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for ~
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ gen file{foo}
+ buildfile:8:3: error: attempt to set '~' special variable
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : misuse
+ :
+ {
+ : after-var
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for x:
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ gen file{foo}
+ buildfile:8:3: error: for: ':' after variable name
+ %.+
+ EOE
+
+ $* clean 2>-
+ }
+
+ : after-attrs
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for x [path]:
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ gen file{foo}
+ <attributes>:1:7: error: whitespace required after attributes
+ <attributes>:1:1: info: use the '\[' escape sequence if this is a wildcard pattern
+ buildfile:8:3: info: while parsing attributes '[path]:'
+ %.+
+ EOE
+
+ $* clean 2>-
+ }
+ }
+
+ : exit
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for -w f
+ cat $f >>$p
+ exit
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+
+ cat <<<foo >>EOO;
+ bar
+ EOO
+
+ $* clean 2>-
+ }
+
+ : error
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for -w f
+ cat $f >>$p
+ exit 'fed up'
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ gen file{foo}
+ buildfile:10:5: error: fed up
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : depdb
+ :
+ {
+ : inside
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ echo $path($<) | for -w f
+ depdb hash $f
+ end
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for -w f
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>EOE != 0
+ buildfile:4:5: error: 'depdb' call inside flow control construct
+ EOE
+ }
+
+ : after-commands
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ echo $path($<) | for -w f
+ echo $f >-
+ end
+
+ depdb hash $p
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ buildfile:4:5: error: disallowed command in depdb preamble
+ info: only variable assignments are allowed in depdb preamble
+ buildfile:7:3: info: depdb preamble ends here
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : after-vars
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ h =
+ echo $path($<) | for -w f
+ h += $f
+ end
+
+ depdb hash $h
+
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+ $* clean 2>-
+ }
+ }
+ }
+
+ : form-3
+ :
+ : for x <...
+ :
+ {
+ : basics
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar baz
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ for -w f <<"EOF"
+ $path($<)
+ EOF
+ cat $f >>$p
+ end
+
+ for <<"EOF" -w f
+ $path($<)
+ EOF
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+
+ cat <<<foo >>EOO;
+ bar
+ baz
+ bar
+ baz
+ EOO
+
+ $* clean 2>-
+ }
+
+ : quoting
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar baz
+ {{
+ n = 'gen'
+ diag "($n)" ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ o = -w
+ for "$o" f <<"EOF"
+ $path($<)
+ EOF
+ cat $f >>$p
+ end
+
+ o = -n
+ for "($o)" f <<"EOF"
+ $path($<)
+ EOF
+ echo $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+
+ cat <<<foo >>~%EOO%;
+ bar
+ baz
+ %.+bar .+baz%
+ EOO
+
+ $* clean 2>-
+ }
+
+ : special-var
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for ~ <<<$path($<)
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>EOE != 0
+ buildfile:6:6: error: attempt to set '~' special variable
+ EOE
+ }
+
+ : exit
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for f <<<$path($<)
+ cat $f >>$p
+ exit
+ end
+ }}
+ EOI
+
+ $* 2>'cat file{bar} -> file{foo}';
+
+ cat <<<foo >>EOO;
+ bar
+ EOO
+
+ $* clean 2>-
+ }
+
+ : error
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for f <<<$path($<)
+ cat $f >>$p
+ exit 'fed up'
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ cat file{bar} -> file{foo}
+ buildfile:8:5: error: fed up
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : depdb
+ :
+ {
+ : inside
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ for -w f <<<$path($<)
+ depdb hash $f
+ end
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for -w f
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>EOE != 0
+ buildfile:4:5: error: 'depdb' call inside flow control construct
+ EOE
+ }
+
+ : after-commands
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ for -w f <<<$path($<)
+ echo $f >-
+ end
+
+ depdb hash a
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ buildfile:4:5: error: disallowed command in depdb preamble
+ info: only variable assignments are allowed in depdb preamble
+ buildfile:7:3: info: depdb preamble ends here
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : after-vars
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ h =
+ for -w f <<<$path($<)
+ h += $f
+ end
+
+ depdb hash $h
+
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+ $* clean 2>-
+ }
+ }
+ }
+ }
+}
diff --git a/tests/recipe/cxx/testscript b/tests/recipe/cxx/testscript
index c94148e..323d671 100644
--- a/tests/recipe/cxx/testscript
+++ b/tests/recipe/cxx/testscript
@@ -81,7 +81,7 @@ if (!$static && $test.target == $build.host)
return r;
if (verb == 1)
- text << "cp " << t;
+ print_diag ("cp", s, t);
else if (verb >= 2)
text << "cp " << sp << ' ' << tp;
@@ -93,7 +93,7 @@ if (!$static && $test.target == $build.host)
$* 2>>~%EOE%;
%^(c\+\+|ld).*%+
- cp file{foo}
+ cp file{bar} -> file{foo}
EOE
cat <<<foo >'bar';
@@ -141,7 +141,7 @@ if (!$static && $test.target == $build.host)
const path& tp (t.path ());
if (verb == 1)
- text << "test " << t;
+ print_diag ("test", t);
else if (verb >= 2)
text << "cat " << tp;
@@ -157,7 +157,7 @@ if (!$static && $test.target == $build.host)
$* test 2>>~%EOE%;
%^(c\+\+|ld).*%+
- cp file{foo}
+ cp file{bar} -> file{foo}
test file{foo}
bar
EOE
@@ -180,9 +180,9 @@ if (!$static && $test.target == $build.host)
--
recipe
- apply (action, target& t) const override
+ apply (action a, target& t) const override
{
- const auto& mrs (t.data<regex_match_results> ());
+ const auto& mrs (t.data<regex_match_results> (a));
return [this, mr = mrs.str (1)] (action a, const target& t)
{
diff --git a/tests/test/script/builtin/sleep.testscript b/tests/test/script/builtin/sleep.testscript
index e1410ac..c044027 100644
--- a/tests/test/script/builtin/sleep.testscript
+++ b/tests/test/script/builtin/sleep.testscript
@@ -13,7 +13,7 @@ $c <'sleep 1' && $b
: failure
:
$c <'env -t 1 -- sleep 86400' && $b 2>>~%EOE% != 0
- %testscript:.*: error: sleep terminated: execution timeout expired%
+ %testscript:.*: error: builtin sleep terminated: execution timeout expired%
%.
EOE
diff --git a/tests/test/script/common.testscript b/tests/test/script/common.testscript
index 4469d1c..651e056 100644
--- a/tests/test/script/common.testscript
+++ b/tests/test/script/common.testscript
@@ -31,7 +31,7 @@ end
# Note that the buildfile is clever hack that relies on the first target
# automatically becoming dir{./}'s prerequisite.
#
-c = cat >=testscript
-b = $0 --no-default-options --serial-stop --quiet --buildfile - test \
+c = [cmdline] cat >=testscript
+b = [cmdline] $0 --no-default-options --serial-stop --quiet --buildfile - test \
<"'testscript{testscript}: \$target'" \
&?test/***
diff --git a/tests/test/script/runner/expr.testscript b/tests/test/script/runner/expr.testscript
index 98e495f..95d4bed 100644
--- a/tests/test/script/runner/expr.testscript
+++ b/tests/test/script/runner/expr.testscript
@@ -20,7 +20,7 @@
true = '$* >| -o'
false = '$* -s 1 >| -o'
- bf = $b 2>-
+ bf = [cmdline] $b 2>-
: true
:
diff --git a/tests/test/script/runner/for.testscript b/tests/test/script/runner/for.testscript
new file mode 100644
index 0000000..f43fcc2
--- /dev/null
+++ b/tests/test/script/runner/for.testscript
@@ -0,0 +1,502 @@
+# File : tests/test/script/runner/for.testscript
+# license : MIT; see accompanying LICENSE file
+
+.include ../common.testscript
+
+: form-1
+:
+: for x: ...
+:
+{
+ : basics
+ :
+ $c <<EOI && $b >>EOO
+ for x: a b
+ echo "$x" >|
+ end
+ EOI
+ a
+ b
+ EOO
+
+ : test-options
+ :
+ $c <<EOI && $b >>~%EOO%
+ for test.options: -a -b
+ echo $* >|
+ end
+ EOI
+ %.+ -a%
+ %.+ -b%
+ EOO
+
+ : special-var
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ for ~: -a -b
+ echo $~ >|
+ end
+ EOI
+ testscript:1:5: error: attempt to set '~' variable directly
+ EOE
+
+ : exit
+ :
+ $c <<EOI && $b >>EOO
+ for x: a b
+ echo "$x" >|
+ exit
+ end
+ EOI
+ a
+ EOO
+
+ : error
+ :
+ $c <<EOI && $b >>EOO 2>>EOE != 0
+ for x: a b
+ echo "$x" >|
+ exit 'fed up'
+ end
+ EOI
+ a
+ EOO
+ testscript:3:3: error: fed up
+ info: test id: 1
+ EOE
+}
+
+: form-2
+:
+: ... | for x
+:
+{
+ : whitespace-split
+ :
+ $c <<EOI && $b >>EOO
+ echo " a b " | for -w x
+ echo "'$x'" >|
+ end
+ EOI
+ 'a'
+ 'b'
+ EOO
+
+ : newline-split
+ :
+ $c <<EOI && $b >>EOO
+ cat <<EOF | for -n x
+
+
+ a
+
+
+ b
+
+ EOF
+ echo "'$x'" >|
+ end
+ EOI
+ ''
+ ''
+ 'a'
+ ''
+ ''
+ 'b'
+ ''
+ EOO
+
+ : typed
+ :
+ $c <<EOI && $b >>/EOO
+ echo "a b" | for -w x [dir_path]
+ echo $x >|
+ end
+ EOI
+ a/
+ b/
+ EOO
+
+ : nested
+ :
+ $c <<EOI && $b >>EOO
+ echo "a b" | for -w x
+ echo "x y" | for -w y
+ echo "'$x $y'" >|
+ end
+ end
+ EOI
+ 'a x'
+ 'a y'
+ 'b x'
+ 'b y'
+ EOO
+
+ : nested-diag
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ echo "a b" | for -w x
+ echo "x y" | for -w y
+ echo "'$x $y'" >"'a x'"
+ end
+ end
+ EOI
+ testscript:3:5: error: echo stdout doesn't match expected
+ info: stdout: test/1/stdout-i1-i2-n3
+ info: expected stdout: test/1/stdout-i1-i2-n3.orig
+ info: stdout diff: test/1/stdout-i1-i2-n3.diff
+ %.+
+ EOE
+
+ : nested-diag-test-id
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ echo "a b" | for -w x
+ echo "x y" | for -w y
+ test -f $x$y
+ end
+ end
+ EOI
+ testscript:3:5: error: builtin test exited with code 1
+ info: test id: 1
+ EOE
+
+ : var-value
+ :
+ $c <<EOI && $b >>EOO
+ x = 'x';
+ echo "a b" | for -w x
+ end;
+ echo $x >|
+ EOI
+ b
+ EOO
+
+ : both-sep-options
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ echo "a b" | for -n -w x
+ echo $x >|
+ end
+ EOI
+ testscript:1:1: error: for: both -n|--newline and -w|--whitespace specified
+ %.+
+ EOE
+
+ : invalid-option
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ echo "a b" | for -a x
+ echo $x >|
+ end
+ EOI
+ testscript:1:1: error: for: unknown option '-a'
+ %.+
+ EOE
+
+ : no-variable
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ echo "a b" | for -w
+ echo $x >|
+ end
+ EOI
+ testscript:1:1: error: for: missing variable name
+ %.+
+ EOE
+
+ : special-var
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ echo "a b" | for -w ~
+ echo $* >|
+ end
+ EOI
+ testscript:1:1: error: attempt to set '~' variable directly
+ info: test id: 1
+ EOE
+
+ : unsep-attrs
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ echo "a b" | for -w x[string]
+ echo $x >|
+ end
+ EOI
+ testscript:1:1: error: for: expected variable name instead of x[string]
+ info: test id: 1
+ EOE
+
+ : misuse
+ :
+ {
+ : after-var
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ echo "a b" | for v:
+ echo $v >|
+ end
+ EOI
+ testscript:1:19: error: expected newline instead of ':'
+ EOE
+
+ : after-attrs
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ echo "a b" | for v [string]:
+ echo $v >|
+ end
+ EOI
+ testscript:1:28: error: expected newline instead of ':'
+ EOE
+ }
+
+ : exit
+ :
+ $c <<EOI && $b >>EOO
+ echo "a b" | for x
+ echo "$x" >|
+ exit
+ end
+ EOI
+ a
+ EOO
+
+ : error
+ :
+ $c <<EOI && $b >>EOO 2>>EOE != 0
+ echo "a b" | for x
+ echo "$x" >|
+ exit 'fed up'
+ end
+ EOI
+ a
+ EOO
+ testscript:3:3: error: fed up
+ info: test id: 1
+ EOE
+}
+
+: form-3
+:
+: for x <...
+:
+{
+ : whitespace-split
+ :
+ $c <<EOI && $b >>EOO
+ for -w x <" a b "
+ echo "'$x'" >|
+ end
+ EOI
+ 'a'
+ 'b'
+ EOO
+
+ : quoted-opt
+ :
+ $c <<EOI && $b >>EOO
+ o = -n
+ for "$o" x <<EOF
+ a
+ b
+ EOF
+ echo "'$x'" >|
+ end;
+ for "($o)" x <<EOF
+ c
+ d
+ EOF
+ echo "'$x'" >|
+ end
+ EOI
+ 'a'
+ 'b'
+ 'c'
+ 'd'
+ EOO
+
+ : newline-split
+ :
+ $c <<EOI && $b >>EOO
+ for -n x <<EOF
+
+
+ a
+
+
+ b
+
+ EOF
+ echo "'$x'" >|
+ end
+ EOI
+ ''
+ ''
+ 'a'
+ ''
+ ''
+ 'b'
+ ''
+ EOO
+
+ : string-before-var
+ :
+ $c <<EOI && $b >>EOO
+ for <"a b" -w x
+ echo "'$x'" >|
+ end
+ EOI
+ 'a'
+ 'b'
+ EOO
+
+ : here-doc-before-var
+ :
+ $c <<EOI && $b >>EOO
+ for <<EOF -n x
+ a
+ b
+ EOF
+ echo "'$x'" >|
+ end
+ EOI
+ 'a'
+ 'b'
+ EOO
+
+ : typed
+ :
+ $c <<EOI && $b >>/EOO
+ for -w x [dir_path] <"a b"
+ echo $x >|
+ end
+ EOI
+ a/
+ b/
+ EOO
+
+ : typed-no-ops
+ :
+ $c <<EOI && $b >>/EOO
+ for x [dir_path] <"a b"
+ echo $x >|
+ end
+ EOI
+ a/
+ b/
+ EOO
+
+ : nested
+ :
+ $c <<EOI && $b >>EOO
+ for -w x <"a b"
+ for -w y <"x y"
+ echo "'$x $y'" >|
+ end
+ end
+ EOI
+ 'a x'
+ 'a y'
+ 'b x'
+ 'b y'
+ EOO
+
+ : nested-diag
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ for -w x <"a b"
+ for -w y <"x y"
+ echo "'$x $y'" >"'a x'"
+ end
+ end
+ EOI
+ testscript:3:5: error: echo stdout doesn't match expected
+ info: stdout: test/1/stdout-i1-i2-n3
+ info: expected stdout: test/1/stdout-i1-i2-n3.orig
+ info: stdout diff: test/1/stdout-i1-i2-n3.diff
+ %.+
+ EOE
+
+ : nested-diag-test-id
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ for -w x <"a b"
+ for -w y <"x y"
+ test -f $x$y
+ end
+ end
+ EOI
+ testscript:3:5: error: builtin test exited with code 1
+ info: test id: 1
+ EOE
+
+ : var-value
+ :
+ $c <<EOI && $b >>EOO
+ x = 'x';
+ for -w x <"a b"
+ end;
+ echo $x >|
+ EOI
+ b
+ EOO
+
+ : invalid-option
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ for -a x <"a b"
+ echo $x >|
+ end
+ EOI
+ testscript:1:1: error: for: unknown option '-a'
+ %.
+ EOE
+
+
+ : no-variable
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ for -w <"a b"
+ echo $x >|
+ end
+ EOI
+ testscript:1:1: error: for: missing variable name
+ %.
+ EOE
+
+ : special-var
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ for ~ <"a b"
+ echo $~ >|
+ end
+ EOI
+ testscript:1:5: error: attempt to set '~' variable directly
+ EOE
+
+ : exit
+ :
+ $c <<EOI && $b >>EOO
+ for x <"a b"
+ echo "$x" >|
+ exit
+ end
+ EOI
+ a
+ EOO
+
+ : error
+ :
+ $c <<EOI && $b >>EOO 2>>EOE != 0
+ for x <"a b"
+ echo "$x" >|
+ exit 'fed up'
+ end
+ EOI
+ a
+ EOO
+ testscript:3:3: error: fed up
+ info: test id: 1
+ EOE
+}
diff --git a/tests/test/script/runner/pipe.testscript b/tests/test/script/runner/pipe.testscript
index 205fd55..cdd30a6 100644
--- a/tests/test/script/runner/pipe.testscript
+++ b/tests/test/script/runner/pipe.testscript
@@ -6,7 +6,6 @@
$c <'cat <foo | $* -i 1 >foo' && $b : builtin-to-process
$c <'$* -o foo | cat >foo' && $b : process-to-builtin
-
: failure
:
: Note that while both commands for the pipe are faulty the diagnostics for
@@ -15,19 +14,28 @@ $c <'$* -o foo | cat >foo' && $b : process-to-builtin
{
: exit-code
:
- $c <'$* -o foo -s 1 | $* -i 1 >foo -s 2' && $b 2>>/~%EOE% != 0
- %testscript:1:1: error: .+ exited with code 2%
- info: stdout: test/1/stdout-2
+ : Also verify that the command line is printed.
+ :
+ $c <'$* -o foo -s 1 | $* -i 1 -s 2 >foo' && $b --verbose 1 2>>/~%EOE% != 0
+ %.
+ %testscript:1:1: error: process .+ exited with code 1%
+ % info: command line: .+driver.* -o foo -s 1%
+ info: test id: 1
+ %.
+ %testscript:1:1: error: process .+ exited with code 2%
+ % info: command line: .+driver.* -i 1 -s 2%
+ info: stdout: test/1/stdout-c2
info: test id: 1
+ %.{2}
EOE
: stderr
:
$c <'$* -o foo -e foo 2>bar | $* -i 2 2>baz' && $b 2>>/~%EOE% != 0
%testscript:1:1: error: .+ stderr doesn't match expected%
- info: stderr: test/1/stderr-2
- info: expected stderr: test/1/stderr-2.orig
- info: stderr diff: test/1/stderr-2.diff
+ info: stderr: test/1/stderr-c2
+ info: expected stderr: test/1/stderr-c2.orig
+ info: stderr diff: test/1/stderr-c2.diff
%.{3}
-baz
+foo
diff --git a/tests/test/script/runner/redirect.testscript b/tests/test/script/runner/redirect.testscript
index 0fe3aa3..209c4ce 100644
--- a/tests/test/script/runner/redirect.testscript
+++ b/tests/test/script/runner/redirect.testscript
@@ -654,9 +654,9 @@ psr = ($cxx.target.class != 'windows' ? '/' : '\\') # Path separator in regex.
$* -o bar >?out
EOI
%testscript:2: error: ../../../../../driver(.exe)? stdout doesn't match expected%
- info: stdout: test/1/stdout-2
+ info: stdout: test/1/stdout-n2
info: expected stdout: test/1/out
- info: stdout diff: test/1/stdout-2.diff
+ info: stdout diff: test/1/stdout-n2.diff
%--- \.*%
%\+\+\+ \.*%
%@@ \.*%
diff --git a/tests/test/script/runner/set.testscript b/tests/test/script/runner/set.testscript
index b2944a3..1800a7d 100644
--- a/tests/test/script/runner/set.testscript
+++ b/tests/test/script/runner/set.testscript
@@ -76,7 +76,7 @@
: empty-attrs
:
- $c <"set '' baz" && $b 2>>EOE != 0
+ $c <"set baz ''" && $b 2>>EOE != 0
testscript:1:1: error: set: empty variable attributes
info: test id: 1
EOE
@@ -300,7 +300,7 @@
$c <<EOI && $b 2>>~%EOE% != 0
$* -o 'foo' -l 10 | env -t 1 -- set bar
EOI
- %testscript:.*: error: set terminated: execution timeout expired%
+ %testscript:.*: error: .+driver.* terminated: execution timeout expired%
%.
EOE
@@ -326,23 +326,218 @@
echo "$s" >=f;
$* -o 'foo' -l 10 | cat f - | env -t 2 -- set bar
EOI
- %testscript:.*: error: set terminated: execution timeout expired%
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: builtin cat terminated: execution timeout expired%
%.
EOE
: success
:
- : Note that the cat builtin ends up with the 'broken pipe' diagnostics or
- : similar.
- :
$c <<EOI && $b
echo "$s" >=f;
timeout --success 2;
- $* -o 'foo' -l 10 | cat f - 2>>~%EOE% | set bar
- %cat: .+%
- EOE
+
+ # Suppress cat's 'broken pipe' diagnostics.
+ #
+ $* -o 'foo' -l 10 | cat f - 2>- | set bar
EOI
}
+
+ : split
+ :
+ : Test various splitting modes as above, but now reading the stream in the
+ : non-blocking mode.
+ :
+ {
+ : whitespace-separated-list
+ :
+ {
+ : non-exact
+ :
+ {
+ : non-empty
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -w baz <' foo bar ';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'"foo" "bar"'
+ EOI
+
+ : empty
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -w baz <:'';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >''
+ EOI
+
+ : spaces
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -w baz <' ';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >''
+ EOI
+ }
+
+ : exact
+ :
+ {
+ : trailing-ws
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set --exact --whitespace baz <' foo bar ';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'"foo" "bar" ""'
+ EOI
+
+ : no-trailing-ws
+ :
+ : Note that we need to strip the default trailing newline as well with the
+ : ':' modifier.
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -e -w baz <:' foo bar';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'"foo" "bar"'
+ EOI
+
+ : empty
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -e -w baz <:'';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >''
+ EOI
+
+ : spaces
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -e -w baz <' ';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'""'
+ EOI
+ }
+ }
+
+ : newline-separated-list
+ :
+ {
+ : non-exact
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -n baz <<EOF;
+
+ foo
+
+ bar
+
+ EOF
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'"" "foo" "" "bar" ""'
+ EOI
+
+ : exact
+ :
+ {
+ : trailing-newline
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set --exact --newline baz <<EOF;
+
+ foo
+
+ bar
+
+ EOF
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'"" "foo" "" "bar" "" ""'
+ EOI
+
+ : no-trailing-newline
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set --exact --newline baz <<:EOF;
+
+ foo
+
+ bar
+ EOF
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'"" "foo" "" "bar"'
+ EOI
+ }
+ }
+
+ : string
+ :
+ {
+ : non-exact
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set baz <<EOF;
+
+ foo
+
+ bar
+
+ EOF
+ echo ($baz[0]) >>EOO
+
+ foo
+
+ bar
+
+ EOO
+ EOI
+
+ : exact
+ :
+ : Note that echo adds the trailing newline, so EOF and EOO here-documents
+ : differ by this newline.
+ :
+ {
+ : trailing-newline
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -e baz <<EOF;
+
+ foo
+
+ bar
+ EOF
+ echo ($baz[0]) >>EOO
+
+ foo
+
+ bar
+
+ EOO
+ EOI
+
+ : no-trailing-newline
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -e baz <<:EOF;
+
+ foo
+
+ bar
+ EOF
+ echo ($baz[0]) >>EOO
+
+ foo
+
+ bar
+ EOO
+ EOI
+ }
+ }
+ }
}
: attributes
@@ -351,14 +546,14 @@
: dir_path
:
$c <<EOI && $b
- set [dir_path] bar <'foo';
+ set bar [dir_path] <'foo';
echo $bar >/'foo/'
EOI
: null
:
$c <<EOI && $b
- set [null] foo <-;
+ set foo [null] <-;
echo $foo >''
EOI
@@ -376,7 +571,7 @@
: empty-brackets
:
$c <<EOI && $b 2>>EOE != 0
- set -w '[]' baz <'foo bar';
+ set -w baz '[]' <'foo bar';
echo "$baz"
EOI
testscript:2:8: error: concatenating variable expansion contains multiple values
@@ -385,7 +580,7 @@
: no-left-bracket
:
$c <<EOI && $b 2>>EOE != 0
- set -w x baz
+ set -w baz x
EOI
<attributes>:1:1: error: expected '[' instead of 'x'
testscript:1:1: info: while parsing attributes 'x'
@@ -395,7 +590,7 @@
: unknown
:
$c <<EOI && $b 2>>EOE != 0
- set -w [x] baz
+ set -w baz [x]
EOI
<attributes>:1:1: error: unknown value attribute x
testscript:1:1: info: while parsing attributes '[x]'
@@ -405,7 +600,7 @@
: junk
:
$c <<EOI && $b 2>>EOE != 0
- set -w '[string] x' baz
+ set -w baz '[string] x'
EOI
<attributes>:1:10: error: trailing junk after ']'
testscript:1:1: info: while parsing attributes '[string] x'
diff --git a/tests/test/script/runner/status.testscript b/tests/test/script/runner/status.testscript
index e4586d9..461fd5c 100644
--- a/tests/test/script/runner/status.testscript
+++ b/tests/test/script/runner/status.testscript
@@ -15,7 +15,7 @@ b += --no-column
: false
:
$c <'$* -s 1 == 0' && $b 2>>/~%EOE%d != 0
- %testscript:1: error: ../../../../driver(.exe)? exit code 1 != 0%
+ %testscript:1: error: process ../../../../driver(.exe)? exit code 1 != 0%
info: test id: 1
EOE
}
@@ -30,7 +30,7 @@ b += --no-column
: false
:
$c <'$* -s 1 != 1' && $b 2>>/~%EOE% != 0
- %testscript:1: error: ../../../../driver(.exe)? exit code 1 == 1%
+ %testscript:1: error: process ../../../../driver(.exe)? exit code 1 == 1%
info: test id: 1
EOE
}
@@ -38,7 +38,7 @@ b += --no-column
: error
:
$c <'$* -s 1 -e "Error"' && $b 2>>/~%EOE% != 0
-%testscript:1: error: ../../../driver(.exe)? exited with code 1%
+%testscript:1: error: process ../../../driver(.exe)? exited with code 1%
info: stderr: test/1/stderr
Error
info: test id: 1
@@ -47,7 +47,7 @@ EOE
: error-check
:
$c <'$* -s 1 -e "Error" == 0' && $b 2>>/~%EOE% != 0
-%testscript:1: error: ../../../driver(.exe)? exit code 1 != 0%
+%testscript:1: error: process ../../../driver(.exe)? exit code 1 != 0%
info: stderr: test/1/stderr
Error
info: test id: 1
diff --git a/tests/test/script/runner/timeout.testscript b/tests/test/script/runner/timeout.testscript
index 5f87d39..f9b6ec7 100644
--- a/tests/test/script/runner/timeout.testscript
+++ b/tests/test/script/runner/timeout.testscript
@@ -424,7 +424,9 @@
$c <<EOI && $b 2>>~%EOE% != 0
env -t 1 -- $* -l 86400 -o 'foo' | touch $~/foo/bar
EOI
- %testscript:.*: error: touch exited with code 1%
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: builtin touch exited with code 1%
%.+
EOE
}
@@ -435,42 +437,54 @@
: prog-tm-prog
:
$c <'$* -l 10 | env -t 1 -- $* -i 0' && $b 2>>~%EOE% != 0
- %testscript:.*: error: .+driver.* terminated: execution timeout expired%
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
%.
EOE
: tm-prog-prog
:
$c <'env -t 1 -- $* -l 10 | $* -i 0' && $b 2>>~%EOE% != 0
- %testscript:.*: error: .+driver.* terminated: execution timeout expired%
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
%.
EOE
: tm-cat-prog
:
- $c <'env -t 1 -- cat <"test" | $* -l 10' && $b 2>>~%EOE% != 0
- %testscript:.*: error: cat terminated: execution timeout expired%
+ $c <'env -t 3 -- cat <"test" | $* -l 10' && $b 2>>~%EOE% != 0
+ %testscript:.*: error: builtin cat terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
%.
EOE
: cat-tm-prog
:
$c <'cat <"test" | env -t 1 -- $* -l 10' && $b 2>>~%EOE% != 0
- %testscript:.*: error: .+driver.* terminated: execution timeout expired%
+ %testscript:.*: error: builtin cat terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
%.
EOE
: tm-prog-cat
:
$c <'env -t 1 -- $* -l 10 | cat >-' && $b 2>>~%EOE% != 0
- %testscript:.*: error: .+driver.* terminated: execution timeout expired%
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: builtin cat terminated: execution timeout expired%
%.
EOE
: tm-echo-prog
:
- $c <'env -t 1 -- echo "test" | $* -l 10' && $b 2>>~%EOE% != 0
- %testscript:.*: error: echo terminated: execution timeout expired%
+ $c <'env -t 3 -- echo "test" | $* -l 10' && $b 2>>~%EOE% != 0
+ %testscript:.*: error: builtin echo terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
%.
EOE
diff --git a/tests/test/script/runner/while.testscript b/tests/test/script/runner/while.testscript
new file mode 100644
index 0000000..1c58827
--- /dev/null
+++ b/tests/test/script/runner/while.testscript
@@ -0,0 +1,16 @@
+# file : tests/test/script/runner/while.testscript
+# license : MIT; see accompanying LICENSE file
+
+.include ../common.testscript
+
+: basics
+:
+$c <<EOI && $b >>EOO
+ while ($v != "aa")
+ echo "$v" >|
+ v = "$(v)a"
+ end
+ EOI
+
+ a
+ EOO
diff --git a/tests/test/simple/generated/driver.cxx b/tests/test/simple/generated/driver.cxx
index 89dacca..ca6dfcb 100644
--- a/tests/test/simple/generated/driver.cxx
+++ b/tests/test/simple/generated/driver.cxx
@@ -49,13 +49,23 @@ main (int argc, char* argv[])
}
else
{
- ifstream ifs (argv[i]);
+ istream* is;
+ ifstream ifs;
- if (!ifs.is_open ())
- cerr << "unable to open " << argv[1] << endl;
+ if (argv[i] != string ("-"))
+ {
+ ifs.open (argv[i]);
+
+ if (!ifs.is_open ())
+ cerr << "unable to open " << argv[1] << endl;
+
+ is = &ifs;
+ }
+ else
+ is = &cin;
string s;
- r = getline (ifs, s) && s == "1.2.3" ? 0 : 1;
+ r = getline (*is, s) && s == "1.2.3" ? 0 : 1;
}
return r;
diff --git a/tests/test/simple/generated/testscript b/tests/test/simple/generated/testscript
index 9ce40ba..49ddbbd 100644
--- a/tests/test/simple/generated/testscript
+++ b/tests/test/simple/generated/testscript
@@ -43,6 +43,123 @@ driver = $src_root/../exe{driver}
file{output}: in{output} $src_root/manifest #@@ in module
EOI
+: output-mismatch
+:
+{
+ # Get rid of --serial-stop --quiet.
+ #
+ test.options = $regex.apply($test.options, '^(--serial-stop|--quiet)$', '')
+
+ : verbose-0
+ :
+ {
+ echo '1.2.3' >=input;
+ echo '3.4.5' >=output;
+ $* -q <<EOI 2>>/~%EOE% != 0
+ driver = $src_root/../exe{driver}
+ ./: test = $driver
+ ./: $driver
+ ./: test.arguments = '-'
+ ./: file{input}: test.stdin = true
+ ./: file{output}: test.stdout = true
+ EOI
+ %.+
+ -3.4.5
+ error: test dir{./} failed
+ error: process diff exited with code 1
+ EOE
+ }
+
+ : verbose-1
+ :
+ {
+ echo '1.2.3' >=input;
+ echo '3.4.5' >=output;
+ $* <<EOI 2>>/~%EOE% != 0
+ driver = $src_root/../exe{driver}
+ ./: test = $driver
+ ./: $driver
+ ./: test.arguments = '-'
+ ./: file{input}: test.stdin = true
+ ./: file{output}: test.stdout = true
+ EOI
+ test dir{./}
+ %.+
+ -3.4.5
+ error: test dir{./} failed
+ error: process diff exited with code 1
+ % info: test command line: cat .+/input \| .+/driver.* - \| diff -u .+%
+ info: while testing dir{./}
+ info: failed to test dir{./}
+ EOE
+ }
+
+ : verbose-2
+ :
+ {
+ echo '1.2.3' >=input;
+ echo '3.4.5' >=output;
+ $* --verbose 2 <<EOI 2>>/~%EOE% != 0
+ driver = $src_root/../exe{driver}
+ ./: test = $driver
+ ./: $driver
+ ./: test.arguments = '-'
+ ./: file{input}: test.stdin = true
+ ./: file{output}: test.stdout = true
+ EOI
+ %cat .+/input \| .+/driver.* - \| diff -u .+%
+ %.+
+ -3.4.5
+ error: test dir{./} failed
+ error: process diff exited with code 1
+ info: while testing dir{./}
+ info: failed to test dir{./}
+ EOE
+ }
+
+ : verbose-3
+ :
+ {
+ echo '1.2.3' >=input;
+ echo '3.4.5' >=output;
+ $* --verbose 3 <<EOI 2>>/~%EOE% != 0
+ driver = $src_root/../exe{driver}
+ ./: test = $driver
+ ./: $driver
+ ./: test.arguments = '-'
+ ./: file{input}: test.stdin = true
+ ./: file{output}: test.stdout = true
+ EOI
+ %cat .+/input \| .+/driver.* - \| diff -u .+%
+ %.+
+ -3.4.5
+ %error: test .+dir\{.+\} failed%
+ error: process diff exited with code 1
+ % info: while testing .+dir\{.+\}%
+ %info: failed to test .+dir\{.+\}%
+ EOE
+ }
+
+ : input-not-found
+ :
+ {
+ echo '1.2.3' >=input;
+ echo '3.4.5' >=output;
+ $* -q <<EOI 2>>/~%EOE% != 0
+ driver = $src_root/../exe{driver}
+ ./: test = $driver
+ ./: $driver
+ ./: test.arguments = 'foo'
+ ./: file{input}: test.stdin = true
+ ./: file{output}: test.stdout = true
+ EOI
+ unable to open foo
+ error: test dir{./} failed
+ % error: process .+/driver.* exited with code 1%
+ EOE
+ }
+}
+
: timeout
:
{
@@ -59,25 +176,30 @@ EOI
./: $driver
EOI
error: test dir{./} failed
- % error: .+ -s terminated: execution timeout expired%
- % info: test command line: .+%
+ % error: process .+driver.* terminated: execution timeout expired%
EOE
- : output
+ : stdin-stdout
:
+ ln -s $src_base/input.in ./;
ln -s $src_base/output.in ./;
- $* config.test.timeout=1 &output &output.d <<EOI 2>>/~%EOE% != 0
+ $* config.test.timeout=1 --verbose 1 &input &input.d &output &output.d <<EOI 2>>/~%EOE% != 0
driver = $src_root/../exe{driver}
./: test = $driver
./: test.options = -s
./: $driver
+ ./: file{input}: test.stdin = true
./: file{output}: test.stdout = true
+ file{input}: in{input} $src_root/manifest #@@ in module
file{output}: in{output} $src_root/manifest #@@ in module
EOI
+ %version in\{.+\} -> .+%{2}
+ test dir{./}
error: test dir{./} failed
- % error: diff .+ terminated: execution timeout expired%
- % error: .+ -s terminated: execution timeout expired%
- % info: test command line: .+%
+ % error: process .+driver.* terminated: execution timeout expired%
+ % info: test command line: cat .+/input \| .+driver.* -s \| diff -u .+%
+ info: while testing dir{./}
+ info: failed to test dir{./}
EOE
}
@@ -94,8 +216,7 @@ EOI
./: $driver
EOI
error: test dir{./} failed
- % error: .+ -s terminated: execution timeout expired%
- % info: test command line: .+%
+ % error: process .+driver.* terminated: execution timeout expired%
EOE
}
}
diff --git a/tests/value/concat.testscript b/tests/value/concat.testscript
index 97391c4..69ec9fc 100644
--- a/tests/value/concat.testscript
+++ b/tests/value/concat.testscript
@@ -3,6 +3,48 @@
.include ../common.testscript
+: null
+:
+{
+ : untyped
+ :
+ $* <<EOI >>/EOO
+ x = [null]
+
+ print y "$x x"
+ print "x $x" y
+
+ print $x"x"
+ print "x"$x
+ print $x$x
+ EOI
+ y x
+ x y
+ x
+ x
+ {}
+ EOO
+
+ : string
+ :
+ $* <<EOI >>/EOO
+ x = [string,null]
+
+ print y "$x x"
+ print "x $x" y
+
+ print $x"x"
+ print "x"$x
+ print $x$x
+ EOI
+ y x
+ x y
+ x
+ x
+ {}
+ EOO
+}
+
: dir_path
:
{
diff --git a/tests/value/reverse.testscript b/tests/value/reverse.testscript
index 9f73981..921d14b 100644
--- a/tests/value/reverse.testscript
+++ b/tests/value/reverse.testscript
@@ -89,3 +89,58 @@
EOO
}
}
+
+: reduce
+:
+: Test empty simple value reduction heuristics.
+:
+{
+ : typed
+ :
+ $* <<EOI >>"EOO"
+ x = [string]
+ n = [string,null]
+ y = [strings] $x
+ y += $x
+ y += $n
+ print $size($y)
+
+ file{*}: y += $x
+ file{x}:
+ print $size($(file{x}: y))
+
+ for i: $x
+ print iteration
+
+ print $null($x[0])
+ EOI
+ 2
+ 3
+ iteration
+ false
+ EOO
+
+ : untyped
+ :
+ $* <<EOI >>"EOO"
+ x =
+ n = [null]
+ y = $x
+ y += $x
+ y += $n
+ print $size($y)
+
+ file{*}: y += $x
+ file{x}:
+ print $size($(file{x}: y))
+
+ for i: $x
+ print iteration
+
+ print $null($x[0])
+ EOI
+ 0
+ 0
+ true
+ EOO
+}
diff --git a/tests/variable/override/testscript b/tests/variable/override/testscript
index 0c8ef5b..7b973c0 100644
--- a/tests/variable/override/testscript
+++ b/tests/variable/override/testscript
@@ -8,18 +8,18 @@
{
: value-version
:
- $* x+=01 y+=01 <<EOI >>EOO
- x = [string] 0
- print $x
+ $* p.x+=01 p.y+=01 <<EOI >>EOO
+ p.x = [string] 0
+ print $p.x
- x = [uint64] 1
- print $x
+ p.x = [uint64] 1
+ print $p.x
- y = 0
- print $y
+ p.y = 0
+ print $p.y
- [uint64] y = [null]
- print $y
+ [uint64] p.y = [null]
+ print $p.y
EOI
001
2
@@ -29,21 +29,21 @@
: value-position
:
- $* x+=01 <<EOI >>EOO
- x = [string] 0
+ $* p.x+=01 <<EOI >>EOO
+ p.x = [string] 0
- print $x
+ print $p.x
dir/
{
- print $x
+ print $p.x
}
- dir/ x = [uint64] 1
+ dir/ p.x = [uint64] 1
- print $x
+ print $p.x
dir/
{
- print $x
+ print $p.x
}
EOI
@@ -59,17 +59,19 @@
: Test overriding cached target type/pattern-specific prepend/append
:
{
- $* x+=X <<EOI >>EOO
- x = 0
- file{*}: x += a
+ $* p.x+=X <<EOI >>EOO
+ p.x = 0
+ file{*}: p.x += a
- print $(file{foo}:x)
+ file{foo}:
- x = 1 # Should invalidate both caches.
- print $(file{foo}:x)
+ print $(file{foo}:p.x)
- file{*}: x += b # Should invalidate both caches.
- print $(file{foo}:x)
+ p.x = 1 # Should invalidate both caches.
+ print $(file{foo}:p.x)
+
+ file{*}: p.x += b # Should invalidate both caches.
+ print $(file{foo}:p.x)
EOI
0 a X
1 a X
@@ -82,24 +84,24 @@
{
: after
:
- $* x=1 x+=2 x=+0 <<EOI >>EOO
- print $x
+ $* p.x=1 p.x+=2 p.x=+0 <<EOI >>EOO
+ print $p.x
EOI
0 1 2
EOO
: before
:
- $* x+=2 x=+0 x=1 <<EOI >>EOO
- print $x
+ $* p.x+=2 p.x=+0 p.x=1 <<EOI >>EOO
+ print $p.x
EOI
1
EOO
: both
:
- $* x=+0 x=1 x+=2 <<EOI >>EOO
- print $x
+ $* p.x=+0 p.x=1 p.x+=2 <<EOI >>EOO
+ print $p.x
EOI
1 2
EOO
@@ -110,9 +112,9 @@
{
: assign
:
- $* x=0 !y=0 x=1 !y=1 <<EOI >>EOO
- print $x
- print $y
+ $* p.x=0 !p.y=0 p.x=1 !p.y=1 <<EOI >>EOO
+ print $p.x
+ print $p.y
EOI
1
1
@@ -120,16 +122,16 @@
: append
:
- $* x=0 x+=1 x+=2 <<EOI >>EOO
- print $x
+ $* p.x=0 p.x+=1 p.x+=2 <<EOI >>EOO
+ print $p.x
EOI
0 1 2
EOO
: prepend
:
- $* x=2 x=+1 x=+0 <<EOI >>EOO
- print $x
+ $* p.x=2 p.x=+1 p.x=+0 <<EOI >>EOO
+ print $p.x
EOI
0 1 2
EOO
diff --git a/tests/variable/private/buildfile b/tests/variable/private/buildfile
new file mode 100644
index 0000000..3b0d20c
--- /dev/null
+++ b/tests/variable/private/buildfile
@@ -0,0 +1,4 @@
+# file : tests/variable/private/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/variable/private/testscript b/tests/variable/private/testscript
new file mode 100644
index 0000000..ddb78fd
--- /dev/null
+++ b/tests/variable/private/testscript
@@ -0,0 +1,46 @@
+# file : tests/variable/private/testscript
+# license : MIT; see accompanying LICENSE file
+
+# Test public/private variable mode.
+
+buildfile = true
+test.arguments = 'noop(../)'
+
+.include ../../common.testscript
+
++cat <<EOI >=build/bootstrap.build
+project = test
+amalgamation =
+subprojects = subproj
+
+using install
+EOI
++cat <<EOI >=buildfile
+[string] foo = abc
+print $type($foo) $foo
+
+subproj/: install = false
+print $type($(subproj/: install)) $(subproj/: install)
+
+include subproj/
+EOI
+
+: subproj
+:
+mkdir build;
+cat <<EOI >=build/bootstrap.build;
+project = subporj
+EOI
+cat <<EOI >=buildfile;
+[uint64] foo = 0123
+print $type($foo) $foo
+
+[bool] install = true
+print $type($install) $install
+EOI
+$* >>EOO
+string abc
+path false
+uint64 123
+bool true
+EOO
diff --git a/tests/variable/target-specific/testscript b/tests/variable/target-specific/testscript
index 627d8ab..c52712b 100644
--- a/tests/variable/target-specific/testscript
+++ b/tests/variable/target-specific/testscript
@@ -65,13 +65,15 @@ print (foo: bar)
print (foo : bar)
print (foo/: bar)
print (foo/file{fox}: bar)
+print (file{fox}@./: bar)
EOI
-foo:bar
-foo:bar
-foo:bar
-foo:bar
-foo/:bar
-foo/file{fox}:bar
+bar:foo
+bar:foo
+bar:foo
+bar:foo
+bar:foo/
+bar:foo/file{fox}
+bar:file{fox}@./
EOO
: eval-qual-name-expected
diff --git a/tests/variable/target-type-pattern-specific/testscript b/tests/variable/target-type-pattern-specific/testscript
index 016380b..9c600ca 100644
--- a/tests/variable/target-type-pattern-specific/testscript
+++ b/tests/variable/target-type-pattern-specific/testscript
@@ -12,6 +12,9 @@ x = x
y = y
dir{*}: x = X
dir{*}: y += Y
+
+./:
+
print $(./: x)
print $(./: y)
EOI
@@ -26,6 +29,7 @@ dir{*}: x = y
x = z
dir{*-foo}: x = $x # 'z'
+bar-foo/:
print $(bar-foo/: x)
x = G
@@ -59,6 +63,7 @@ print $(file{x-foz}: x)
*: x1 = X1
{*}: x2 = X2
target{*}: x3 = X3
+file{x}:
print $(file{x}: x1)
print $(file{x}: x2)
print $(file{x}: x3)
@@ -89,6 +94,9 @@ dir{*}:
y += Y
z = $x # Note: from scope.
}
+
+./:
+
print $(./: x)
print $(./: y)
print $(./: z)
@@ -108,6 +116,9 @@ file{f*} file{b*}:
x = X
y += Y
}
+
+file{foo bar}:
+
print $(file{foo}: x)
print $(file{bar}: y)
EOI
@@ -123,6 +134,8 @@ EOO
$* <<EOI >>EOO
file{~/'.+\.txt'/i}: x = 1
+ file{foo.txt foo.TXT}:
+
print $(file{foo.txt}: x)
print $(file{foo.TXT}: x)
EOI
@@ -140,6 +153,8 @@ EOO
txt{~/'.+\.tx'/e}: x = 2
txt{~/'.+\.txt'/e}: x = 3
+ txt{foo.x foo.tx foo.txt foo.bar...}:
+
print $(txt{foo.x}: x)
print $(txt{foo.tx}: x)
print $(txt{foo.txt}: x)
@@ -157,6 +172,8 @@ EOO
x = 0
file{~/'(.+)-\1'/}: x = 1
+ file{foo-foo foo-bar}:
+
print $(file{foo-foo}: x)
print $(file{foo-bar}: x)
EOI
@@ -169,6 +186,8 @@ EOO
$* <<EOI >>EOO
foo/dir{~/b.+/}: x = 1
+ foo/dir{bar}:
+
print $(foo/dir{bar}: x)
EOI
1