aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--LICENSE2
-rw-r--r--NEWS694
-rw-r--r--bootstrap-mingw.bat2
-rw-r--r--bootstrap.gmake4
-rw-r--r--build/root.build15
-rw-r--r--build2/b.cxx333
-rw-r--r--build2/buildfile4
m---------config0
-rw-r--r--doc/.gitignore4
-rw-r--r--doc/buildfile278
-rwxr-xr-xdoc/cli.sh28
-rw-r--r--doc/manual.cli2628
-rw-r--r--doc/testscript.cli489
-rw-r--r--libbuild2/adhoc-rule-buildscript.cxx1529
-rw-r--r--libbuild2/adhoc-rule-buildscript.hxx27
-rw-r--r--libbuild2/adhoc-rule-cxx.cxx67
-rw-r--r--libbuild2/adhoc-rule-cxx.hxx26
-rw-r--r--libbuild2/adhoc-rule-regex-pattern.cxx135
-rw-r--r--libbuild2/adhoc-rule-regex-pattern.hxx4
-rw-r--r--libbuild2/algorithm.cxx1973
-rw-r--r--libbuild2/algorithm.hxx354
-rw-r--r--libbuild2/algorithm.ixx279
-rw-r--r--libbuild2/b-cmdline.cxx38
-rw-r--r--libbuild2/b-cmdline.hxx1
-rw-r--r--libbuild2/b-options.cxx404
-rw-r--r--libbuild2/b-options.hxx74
-rw-r--r--libbuild2/b-options.ixx104
-rw-r--r--libbuild2/b.cli175
-rw-r--r--libbuild2/bash/rule.cxx34
-rw-r--r--libbuild2/bash/rule.hxx9
-rw-r--r--libbuild2/bin/def-rule.cxx172
-rw-r--r--libbuild2/bin/guess.cxx73
-rw-r--r--libbuild2/bin/guess.hxx8
-rw-r--r--libbuild2/bin/init.cxx195
-rw-r--r--libbuild2/bin/init.hxx6
-rw-r--r--libbuild2/bin/rule.cxx143
-rw-r--r--libbuild2/bin/rule.hxx25
-rw-r--r--libbuild2/bin/target.cxx6
-rw-r--r--libbuild2/bin/target.hxx26
-rw-r--r--libbuild2/bin/utility.cxx9
-rw-r--r--libbuild2/build/script/builtin-options.cxx113
-rw-r--r--libbuild2/build/script/builtin-options.hxx68
-rw-r--r--libbuild2/build/script/builtin-options.ixx120
-rw-r--r--libbuild2/build/script/builtin.cli72
-rw-r--r--libbuild2/build/script/lexer+for-loop.test.testscript188
-rw-r--r--libbuild2/build/script/lexer.cxx48
-rw-r--r--libbuild2/build/script/lexer.hxx9
-rw-r--r--libbuild2/build/script/lexer.test.cxx1
-rw-r--r--libbuild2/build/script/parser+command-if.test.testscript2
-rw-r--r--libbuild2/build/script/parser+command-re-parse.test.testscript6
-rw-r--r--libbuild2/build/script/parser+diag.test.testscript106
-rw-r--r--libbuild2/build/script/parser+expansion.test.testscript6
-rw-r--r--libbuild2/build/script/parser+for.test.testscript656
-rw-r--r--libbuild2/build/script/parser+while.test.testscript133
-rw-r--r--libbuild2/build/script/parser.cxx1693
-rw-r--r--libbuild2/build/script/parser.hxx137
-rw-r--r--libbuild2/build/script/parser.test.cxx166
-rw-r--r--libbuild2/build/script/runner.cxx59
-rw-r--r--libbuild2/build/script/runner.hxx31
-rw-r--r--libbuild2/build/script/script.cxx54
-rw-r--r--libbuild2/build/script/script.hxx33
-rw-r--r--libbuild2/buildfile148
-rw-r--r--libbuild2/buildspec.cxx4
-rw-r--r--libbuild2/c/init.cxx333
-rw-r--r--libbuild2/c/init.hxx19
-rw-r--r--libbuild2/c/target.hxx3
-rw-r--r--libbuild2/cc/buildfile30
-rw-r--r--libbuild2/cc/common.cxx695
-rw-r--r--libbuild2/cc/common.hxx112
-rw-r--r--libbuild2/cc/common.txx19
-rw-r--r--libbuild2/cc/compile-rule.cxx2299
-rw-r--r--libbuild2/cc/compile-rule.hxx25
-rw-r--r--libbuild2/cc/functions.cxx105
-rw-r--r--libbuild2/cc/gcc.cxx318
-rw-r--r--libbuild2/cc/guess.cxx371
-rw-r--r--libbuild2/cc/guess.hxx3
-rw-r--r--libbuild2/cc/init.cxx52
-rw-r--r--libbuild2/cc/install-rule.cxx532
-rw-r--r--libbuild2/cc/install-rule.hxx45
-rw-r--r--libbuild2/cc/lexer+comment.test.testscript5
-rw-r--r--libbuild2/cc/lexer+raw-string-literal.test.testscript2
-rw-r--r--libbuild2/cc/lexer.cxx34
-rw-r--r--libbuild2/cc/lexer.hxx30
-rw-r--r--libbuild2/cc/lexer.test.cxx2
-rw-r--r--libbuild2/cc/link-rule.cxx657
-rw-r--r--libbuild2/cc/link-rule.hxx5
-rw-r--r--libbuild2/cc/module.cxx165
-rw-r--r--libbuild2/cc/module.hxx15
-rw-r--r--libbuild2/cc/msvc.cxx162
-rw-r--r--libbuild2/cc/parser.cxx24
-rw-r--r--libbuild2/cc/parser.hxx11
-rw-r--r--libbuild2/cc/parser.test.cxx2
-rw-r--r--libbuild2/cc/pkgconfig-libpkg-config.cxx271
-rw-r--r--libbuild2/cc/pkgconfig-libpkgconf.cxx355
-rw-r--r--libbuild2/cc/pkgconfig.cxx1641
-rw-r--r--libbuild2/cc/pkgconfig.hxx129
-rw-r--r--libbuild2/cc/predefs-rule.cxx379
-rw-r--r--libbuild2/cc/predefs-rule.hxx45
-rw-r--r--libbuild2/cc/std.cppm6781
-rw-r--r--libbuild2/cc/target.cxx46
-rw-r--r--libbuild2/cc/target.hxx51
-rw-r--r--libbuild2/cc/types.cxx15
-rw-r--r--libbuild2/cc/windows-rpath.cxx31
-rw-r--r--libbuild2/cli/buildfile71
-rw-r--r--libbuild2/cli/export.hxx37
-rw-r--r--libbuild2/cli/init.cxx (renamed from build2/cli/init.cxx)14
-rw-r--r--libbuild2/cli/init.hxx (renamed from build2/cli/init.hxx)12
-rw-r--r--libbuild2/cli/module.hxx (renamed from build2/cli/module.hxx)10
-rw-r--r--libbuild2/cli/rule.cxx (renamed from build2/cli/rule.cxx)14
-rw-r--r--libbuild2/cli/rule.hxx (renamed from build2/cli/rule.hxx)13
-rw-r--r--libbuild2/cli/target.cxx (renamed from build2/cli/target.cxx)4
-rw-r--r--libbuild2/cli/target.hxx (renamed from build2/cli/target.hxx)15
-rw-r--r--libbuild2/common-options.cxx112
-rw-r--r--libbuild2/common-options.hxx26
-rw-r--r--libbuild2/config/functions.cxx40
-rw-r--r--libbuild2/config/init.cxx163
-rw-r--r--libbuild2/config/module.hxx2
-rw-r--r--libbuild2/config/operation.cxx104
-rw-r--r--libbuild2/config/operation.hxx6
-rw-r--r--libbuild2/config/types.hxx25
-rw-r--r--libbuild2/config/utility.cxx61
-rw-r--r--libbuild2/config/utility.hxx38
-rw-r--r--libbuild2/config/utility.txx4
-rw-r--r--libbuild2/context.cxx656
-rw-r--r--libbuild2/context.hxx315
-rw-r--r--libbuild2/context.ixx8
-rw-r--r--libbuild2/cxx/init.cxx621
-rw-r--r--libbuild2/cxx/init.hxx16
-rw-r--r--libbuild2/cxx/target.cxx31
-rw-r--r--libbuild2/cxx/target.hxx36
-rw-r--r--libbuild2/diagnostics.cxx798
-rw-r--r--libbuild2/diagnostics.hxx532
-rw-r--r--libbuild2/diagnostics.ixx126
-rw-r--r--libbuild2/dist/init.cxx70
-rw-r--r--libbuild2/dist/module.hxx14
-rw-r--r--libbuild2/dist/operation.cxx676
-rw-r--r--libbuild2/dist/rule.cxx88
-rw-r--r--libbuild2/dist/rule.hxx20
-rw-r--r--libbuild2/dist/types.hxx41
-rw-r--r--libbuild2/dump.cxx1047
-rw-r--r--libbuild2/dump.hxx32
-rw-r--r--libbuild2/dyndep.cxx421
-rw-r--r--libbuild2/dyndep.hxx100
-rw-r--r--libbuild2/file-cache.cxx2
-rw-r--r--libbuild2/file-cache.hxx12
-rw-r--r--libbuild2/file-cache.ixx16
-rw-r--r--libbuild2/file.cxx755
-rw-r--r--libbuild2/file.hxx133
-rw-r--r--libbuild2/file.ixx38
-rw-r--r--libbuild2/filesystem.cxx83
-rw-r--r--libbuild2/filesystem.hxx8
-rw-r--r--libbuild2/filesystem.txx30
-rw-r--r--libbuild2/forward.hxx1
-rw-r--r--libbuild2/function.cxx13
-rw-r--r--libbuild2/function.hxx12
-rw-r--r--libbuild2/function.test.cxx6
-rw-r--r--libbuild2/functions-bool.cxx26
-rw-r--r--libbuild2/functions-builtin.cxx161
-rw-r--r--libbuild2/functions-filesystem.cxx38
-rw-r--r--libbuild2/functions-integer.cxx156
-rw-r--r--libbuild2/functions-json.cxx335
-rw-r--r--libbuild2/functions-name.cxx341
-rw-r--r--libbuild2/functions-path.cxx354
-rw-r--r--libbuild2/functions-process-path.cxx53
-rw-r--r--libbuild2/functions-process.cxx86
-rw-r--r--libbuild2/functions-project-name.cxx39
-rw-r--r--libbuild2/functions-regex.cxx473
-rw-r--r--libbuild2/functions-string.cxx145
-rw-r--r--libbuild2/functions-target-triplet.cxx30
-rw-r--r--libbuild2/functions-target.cxx108
-rw-r--r--libbuild2/in/init.cxx27
-rw-r--r--libbuild2/in/rule.cxx90
-rw-r--r--libbuild2/in/rule.hxx10
-rw-r--r--libbuild2/in/target.cxx20
-rw-r--r--libbuild2/install/functions.cxx116
-rw-r--r--libbuild2/install/init.cxx321
-rw-r--r--libbuild2/install/operation.cxx361
-rw-r--r--libbuild2/install/operation.hxx64
-rw-r--r--libbuild2/install/rule.cxx822
-rw-r--r--libbuild2/install/rule.hxx185
-rw-r--r--libbuild2/install/utility.cxx261
-rw-r--r--libbuild2/install/utility.hxx47
-rw-r--r--libbuild2/json.cxx904
-rw-r--r--libbuild2/json.hxx369
-rw-r--r--libbuild2/json.ixx349
-rw-r--r--libbuild2/lexer.cxx177
-rw-r--r--libbuild2/lexer.hxx70
-rw-r--r--libbuild2/module.cxx312
-rw-r--r--libbuild2/module.hxx63
-rw-r--r--libbuild2/name.hxx3
-rw-r--r--libbuild2/operation.cxx579
-rw-r--r--libbuild2/operation.hxx117
-rw-r--r--libbuild2/parser.cxx3873
-rw-r--r--libbuild2/parser.hxx225
-rw-r--r--libbuild2/prerequisite.cxx6
-rw-r--r--libbuild2/prerequisite.hxx38
-rw-r--r--libbuild2/recipe.cxx1
-rw-r--r--libbuild2/recipe.hxx3
-rw-r--r--libbuild2/rule.cxx124
-rw-r--r--libbuild2/rule.hxx80
-rw-r--r--libbuild2/scheduler.cxx25
-rw-r--r--libbuild2/scheduler.hxx68
-rw-r--r--libbuild2/scheduler.ixx14
-rw-r--r--libbuild2/scheduler.txx42
-rw-r--r--libbuild2/scope.cxx123
-rw-r--r--libbuild2/scope.hxx163
-rw-r--r--libbuild2/scope.ixx12
-rw-r--r--libbuild2/script/builtin-options.cxx394
-rw-r--r--libbuild2/script/builtin-options.hxx84
-rw-r--r--libbuild2/script/builtin-options.ixx57
-rw-r--r--libbuild2/script/builtin.cli7
-rw-r--r--libbuild2/script/lexer.cxx11
-rw-r--r--libbuild2/script/lexer.hxx2
-rw-r--r--libbuild2/script/parser.cxx753
-rw-r--r--libbuild2/script/parser.hxx71
-rw-r--r--libbuild2/script/regex.cxx18
-rw-r--r--libbuild2/script/regex.hxx20
-rw-r--r--libbuild2/script/run.cxx1698
-rw-r--r--libbuild2/script/run.hxx54
-rw-r--r--libbuild2/script/script.cxx45
-rw-r--r--libbuild2/script/script.hxx42
-rw-r--r--libbuild2/search.cxx80
-rw-r--r--libbuild2/search.hxx7
-rw-r--r--libbuild2/target-key.hxx17
-rw-r--r--libbuild2/target-type.hxx31
-rw-r--r--libbuild2/target.cxx462
-rw-r--r--libbuild2/target.hxx497
-rw-r--r--libbuild2/target.ixx70
-rw-r--r--libbuild2/test/common.cxx6
-rw-r--r--libbuild2/test/init.cxx44
-rw-r--r--libbuild2/test/operation.cxx16
-rw-r--r--libbuild2/test/rule.cxx510
-rw-r--r--libbuild2/test/script/lexer+for-loop.test.testscript231
-rw-r--r--libbuild2/test/script/lexer.cxx55
-rw-r--r--libbuild2/test/script/lexer.hxx13
-rw-r--r--libbuild2/test/script/lexer.test.cxx1
-rw-r--r--libbuild2/test/script/parser+command-if.test.testscript6
-rw-r--r--libbuild2/test/script/parser+command-re-parse.test.testscript2
-rw-r--r--libbuild2/test/script/parser+description.test.testscript4
-rw-r--r--libbuild2/test/script/parser+expansion.test.testscript2
-rw-r--r--libbuild2/test/script/parser+for.test.testscript1029
-rw-r--r--libbuild2/test/script/parser+while.test.testscript265
-rw-r--r--libbuild2/test/script/parser.cxx545
-rw-r--r--libbuild2/test/script/parser.hxx18
-rw-r--r--libbuild2/test/script/parser.test.cxx79
-rw-r--r--libbuild2/test/script/runner.cxx53
-rw-r--r--libbuild2/test/script/runner.hxx19
-rw-r--r--libbuild2/test/script/script.cxx119
-rw-r--r--libbuild2/test/script/script.hxx29
-rw-r--r--libbuild2/token.cxx21
-rw-r--r--libbuild2/token.hxx8
-rw-r--r--libbuild2/types-parsers.cxx84
-rw-r--r--libbuild2/types-parsers.hxx21
-rw-r--r--libbuild2/types.hxx21
-rw-r--r--libbuild2/types.ixx6
-rw-r--r--libbuild2/utility-installed.cxx8
-rw-r--r--libbuild2/utility-uninstalled.cxx12
-rw-r--r--libbuild2/utility.cxx397
-rw-r--r--libbuild2/utility.hxx529
-rw-r--r--libbuild2/utility.ixx177
-rw-r--r--libbuild2/utility.txx64
-rw-r--r--libbuild2/variable.cxx1529
-rw-r--r--libbuild2/variable.hxx644
-rw-r--r--libbuild2/variable.ixx237
-rw-r--r--libbuild2/variable.txx523
-rw-r--r--libbuild2/version/rule.cxx22
-rw-r--r--libbuild2/version/rule.hxx3
-rw-r--r--libbuild2/version/snapshot-git.cxx19
-rw-r--r--libbuild2/version/snapshot.cxx4
-rw-r--r--manifest12
-rw-r--r--old-tests/variable/override/buildfile52
-rw-r--r--old-tests/variable/override/p/buildfile44
-rw-r--r--old-tests/variable/override/simple2
-rwxr-xr-xold-tests/variable/override/test.sh104
-rw-r--r--old-tests/variable/type-pattern-append/buildfile2
-rw-r--r--repositories.manifest2
-rw-r--r--tests/build/root.build7
-rw-r--r--tests/cc/libu/testscript2
-rw-r--r--tests/cc/modules/common.testscript20
-rw-r--r--tests/cc/modules/modules.testscript21
-rw-r--r--tests/cc/preprocessed/testscript5
-rw-r--r--tests/dependency/recipe/testscript61
-rw-r--r--tests/directive/config.testscript4
-rw-r--r--tests/directive/parsing.testscript2
-rw-r--r--tests/directive/run.testscript3
-rw-r--r--tests/eval/qual.testscript5
-rw-r--r--tests/expansion/escape.testscript17
-rw-r--r--tests/function/builtin/testscript39
-rw-r--r--tests/function/integer/buildfile4
-rw-r--r--tests/function/integer/testscript41
-rw-r--r--tests/function/json/buildfile4
-rw-r--r--tests/function/json/testscript257
-rw-r--r--tests/function/name/testscript54
-rw-r--r--tests/function/path/testscript86
-rw-r--r--tests/function/regex/testscript116
-rw-r--r--tests/function/string/testscript27
-rw-r--r--tests/in/testscript29
-rw-r--r--tests/loop/for.testscript14
-rw-r--r--tests/name/extension.testscript2
-rw-r--r--tests/name/pattern.testscript35
-rw-r--r--tests/recipe/buildscript/testscript1080
-rw-r--r--tests/recipe/cxx/testscript8
-rw-r--r--tests/test/script/builtin/sleep.testscript2
-rw-r--r--tests/test/script/common.testscript4
-rw-r--r--tests/test/script/runner/expr.testscript2
-rw-r--r--tests/test/script/runner/for.testscript502
-rw-r--r--tests/test/script/runner/pipe.testscript22
-rw-r--r--tests/test/script/runner/redirect.testscript4
-rw-r--r--tests/test/script/runner/set.testscript225
-rw-r--r--tests/test/script/runner/status.testscript8
-rw-r--r--tests/test/script/runner/timeout.testscript32
-rw-r--r--tests/test/script/runner/while.testscript16
-rw-r--r--tests/test/simple/generated/driver.cxx18
-rw-r--r--tests/test/simple/generated/testscript139
-rw-r--r--tests/type/json/buildfile4
-rw-r--r--tests/type/json/testscript504
-rw-r--r--tests/type/map/buildfile4
-rw-r--r--tests/type/map/testscript70
-rw-r--r--tests/type/set/buildfile4
-rw-r--r--tests/type/set/testscript55
-rw-r--r--tests/type/vector/buildfile4
-rw-r--r--tests/type/vector/testscript57
-rw-r--r--tests/value/concat.testscript42
-rw-r--r--tests/value/reverse.testscript55
-rw-r--r--tests/variable/override/testscript76
-rw-r--r--tests/variable/private/buildfile4
-rw-r--r--tests/variable/private/testscript46
-rw-r--r--tests/variable/target-specific/testscript14
-rw-r--r--tests/variable/target-type-pattern-specific/testscript19
330 files changed, 55828 insertions, 10052 deletions
diff --git a/.gitignore b/.gitignore
index 5a9e741..dfb9bab 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,10 +5,16 @@
*.d
*.t
*.i
+*.i.*
*.ii
+*.ii.*
*.o
*.obj
+*.gcm
+*.pcm
+*.ifc
*.so
+*.dylib
*.dll
*.a
*.lib
diff --git a/LICENSE b/LICENSE
index 6d28197..f2f9ac7 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2014-2022 the build2 authors (see the AUTHORS file).
+Copyright (c) 2014-2024 the build2 authors (see the AUTHORS file).
Copyright (c) Microsoft Corporation for the libbuild2/cc/msvc-setup.h file.
Permission is hereby granted, free of charge, to any person obtaining a copy
diff --git a/NEWS b/NEWS
index ee9c009..597156e 100644
--- a/NEWS
+++ b/NEWS
@@ -1,5 +1,450 @@
+Version 0.16.0
+
+ * Support for Objective-C/C++ compilation.
+
+ Specifically, the c and cxx modules now provide the c.objc and cxx.objcxx
+ submodules which can be loaded in order to register the m{}/mm{} target
+ types and enable Objective-C/C++ compilation in the c and cxx compile
+ rules. Note that c.objc and cxx.objcxx must be loaded after the c and cxx
+ modules, respectively, and while the m{}/mm{} target types are registered
+ unconditionally, compilation is only enabled if the C/C++ compiler
+ supports Objective-C/C++ for this target platform. Typical usage:
+
+ # root.build
+ #
+ using cxx
+ using cxx.objcxx
+
+ # buildfile
+ #
+ lib{hello}: {hxx cxx}{*}
+ lib{hello}: mm{*}: include = ($cxx.target.class == 'macos')
+
+ Note also that while there is support for linking Objective-C/C++
+ executables and libraries, this is done using the C/C++ compiler driver
+ and no attempt to automatically link any necessary Objective-C runtime
+ (such as -lobjc) is made. For details, refer to "Objective-C Compilation"
+ and "Objective-C++ Compilation" in the manual.
+
+ * Support for Assembler with C Preprocessor (.S) compilation.
+
+ Specifically, the c module now provides the c.as-cpp submodule which can
+ be loaded in order to register the S{} target type and enable Assembler
+ with C Preprocessor compilation in the c compile rule. For details, refer
+ to "Assembler with C Preprocessor Compilation" in the manual.
+
+ * Support for buildfile importation.
+
+ A project can now export buildfiles that can then be imported by other
+ projects. This mechanism is primarily useful for exporting target type
+ definitions and ad hoc rules.
+
+ Specifically, a project can now place *.build files into its build/export/
+ subdirectory (or *.build2 and build2/export/ in the alternative naming
+ scheme). Such files can then be imported by other projects as buildfile{}
+ targets. For example:
+
+ import thrift%buildfile{thrift-cxx}
+
+ While for other target types the semantics of import is to load the
+ project's export stub and return the exported target, for buildfile{} the
+ semantics is to source the imported buildfile at the point of importation.
+
+ Note that care must be taken when authoring exported buildfiles since they
+ will be sourced by other projects in unpredictable circumstances. In
+ particular, the import directive by default does not prevent sourcing the
+ same buildfile multiple times (neither in the same project nor in the same
+ scope). As a result, if certain parts must only be sourced once per
+ project (such as target type definitions), then they must be factored into
+ a separate buildfile (in build/export/) that is imported by the "main"
+ exported buildfile with the `once` attribute. For example, the above
+ thrift-cxx.build may contain:
+
+ import [once] thrift%buildfile{thrift-cxx-target-type}
+
+ See also "install Module" in the manual for details on the exported
+ buildfile installation.
+
+ * Support for defining explicit (as opposed to ad hoc) target groups.
+
+ A user-defined explicit target group must be derived from the group base
+ target type. If desired, it can be marked as "see-through", meaning that
+ when it is listed as a prerequisite of a target, the matching rule will
+ "see" its members, rather than the group itself. For example:
+
+ define [see_through] thrift_cxx: group
+
+ define thrift: file
+ thrift{*}: extension = thrift
+
+ exe{hello}: cxx{hello} thrift_cxx{data}
+ thrift_cxx{data}: thrift{data}
+
+ Explicit group members can be specified statically, injected by an ad hoc
+ rule, or extracted dynamically by the depdb-dyndep builtin (see the next
+ NEWS item). For example:
+
+ thrift_cxx{data}<{hxx cxx}{data_constants}>: thrift{data} # Static.
+
+ thrift_cxx{~'/(.+)/'}<{hxx cxx}{^'/\1_types/'}>: thrift{~'/\1/'} # Inject.
+ {{
+ depdb dyndep --dyn-target ... # Dynamic.
+ }}
+
+ * Support for dynamic target extraction in addition to prerequisites.
+
+ This functionality is enabled with the depdb-dyndep --dyn-target option.
+ If the recipe target is an explicit group (see the previous NEWS item),
+ then the dynamically extracted targets are added as its members.
+ Otherwise, the listed targets are added as ad hoc group members. In both
+ cases the dynamically extracted target is ignored if it is already
+ specified as a static member or injected by a rule. Note that this
+ functionality is not available in the --byproduct mode. See the
+ depdb-dyndep builtin options description for details.
+
+ * New `lines` depdb-dyndep dependency format in addition to `make`.
+
+ The `lines` format lists targets and/or prerequisites one per line. See
+ the depdb-dyndep builtin options description for details.
+
+ * Low verbosity diagnostics rework.
+
+ The low verbosity (level 1) rule diagnostics format has been adjusted to
+ include the output target where appropriate. The implementation has also
+ been redesigned to go through the uniform print_diag() API, including for
+ the `diag` pseudo-builtin in ad hoc recipes.
+
+ Specifically, the `diag` builtin now expects its arguments to be in one of
+ the following two forms (which correspond to the two print_diag() forms):
+
+ diag <prog> <l-target> <comb> <r-target>...
+ diag <prog> <r-target>...
+
+ If the `diag` builtin is not specified, the default diagnostics is now
+ equivalent to:
+
+ For update:
+
+ diag <prog> ($<[0]) -> $>
+
+ And for other operations:
+
+ diag <prog> $>
+
+ For details, see the print_diag() API description in diagnostics.hxx. See
+ also GitHub issue #40 for additional background/details.
+
+ * Buffering of diagnostics from child processes.
+
+ By default, unless running serially or --no-diag-buffer is specified,
+ diagnostics issued by child processes (compilers, etc) is buffered and
+ printed all at once after each child exits in order to prevent
+ interleaving. See also the new --[no-]diag-color options.
+
+ * New $path.posix_string() and $path.posix_representation() functions.
+
+ These functions are similar to $path.string() and $path.representation()
+ except that they always return the string/representation of a path in the
+ POSIX notation, that is, using forward slashes.
+
+ * New $regex.filter[_out]_{match,search}(<vals>, <pat>) functions.
+
+ The match versions return elements of a list that match (filter) or do not
+ match (filter_out) the regular expression. The search versions do the same
+ except for the search instead of match regex semantics.
+
+ * New $find(<sequence>, <value>), $find_index(<sequence>, <value>) functions.
+
+ The $find() function returns true if the sequence contains the specified
+ value. The $find_index() function returns the index of the first element
+ in the sequence that is equal to the specified value or $size(<sequence>)
+ if none is found. For string sequences, it's possible to request case-
+ insensitive comparison with a flag, for example:
+
+ if ($find ($values, 'foo', icase))
+ ...
+
+ * New $integer_sequence(<begin>, <end>[, <step>]) function.
+
+ This function returns the list of uint64 integers starting from <begin>
+ (including) to <end> (excluding) with the specified <step> or 1 if
+ unspecified. For example:
+
+ hdr = foo.hxx bar.hxx baz.hxx
+ src = foo.cxx bar.cxx baz.cxx
+
+ assert ($size($hdr) == $size($src)) "hdr and src expected to be parallel"
+
+ for i: $integer_sequence(0, $size($hdr))
+ {
+ h = ($hdr[$i])
+ s = ($src[$i])
+ ...
+ }
+
+ * New $is_a(<name>, <target-type>), $filter[_out](<names>, <target-types>)
+ functions.
+
+ $is_a() returns true if the <name>'s target type is-a <target-type>. Note
+ that this is a dynamic type check that takes into account target type
+ inheritance.
+
+ $filter[_out]() return names with target types which are-a (filter) or
+ not are-a (filter_out) one of <target-types>.
+
+ In particular, these functions are useful for filtering prerequisite
+ targets ($<) in ad hoc recipes and rules.
+
+ * Support for the hex notation for the uint64 type.
+
+ Specifically, now we can do:
+
+ x = [uint64] 0x0000ffff
+
+ cxx.poptions += "-DOFFSET=$x" # -DOFFSET=65535
+ cxx.poptions += "-DOFFSET=$string($x, 16)" # -DOFFSET=0xffff
+ cxx.poptions += "-DOFFSET=$string($x, 16, 8)" # -DOFFSET=0x0000ffff
+
+ Note that there is no hex notation support for the int64 (signed) type.
+
+ * Support for the `for` and `while` loops in Buildscript recipes and
+ Testscript.
+
+ For example:
+
+ for v: $values
+ ...
+ end
+
+ cat values.txt | for -n v
+ ...
+ end
+
+ while (!$regex.match(...))
+ ...
+ end
+
+ See "Command-For" and "Command-While" in the Testscript manual for
+ details.
+
+ * New `find` builtin in Buildscript recipes and Testscript.
+
+ For example:
+
+ find gen/ -type f -name '*.?xx' | for -n f
+ ...
+ end
+
+ See "find" in the Testscript manual for details.
+
+ * Improvements to escape sequence support.
+
+ In the double-quoted strings we now only do effective escaping of the
+ special [$("\] characters, line continuations, plus [)] for symmetry.
+
+ There is now support for "escape sequence expansion" in the $\X form where
+ \X can be any of the C/C++ simple escape sequences (\n, \t, etc) plus \0
+ (which in C/C++ is an octal escape sequence). For example:
+
+ info "foo$\n$\tbar$\n$\tbaz"
+
+ Will print:
+
+ buildfile:1:1: info: foo
+ bar
+ baz
+
+ * New include_arch installation location and the corresponding
+ config.install.include_arch configuration variable.
+
+ This location is meant for architecture-specific files, such as
+ configuration headers. By default it's the same as the standard include
+ location but can be configured by the user to a different value (for
+ example, /usr/include/x86_64-linux-gnu/) for platforms that support
+ multiple architectures from the same installation location. This is how
+ one would normally use it from a buildfile:
+
+ # The generated configuration header may contain target architecture-
+ # specific information so install it into include_arch/ instead of
+ # include/.
+ #
+ h{*}: install = include/libhello/
+ h{config}: install = include_arch/libhello/
+
+ * Support for installation filtering.
+
+ While project authors determine what gets installed at the buildfile
+ level, the users of the project can now further filter the installation
+ using the config.install.filter variable. For details, see "Installation
+ Filtering" in the manual.
+
+ * Support for relocatable installations.
+
+ A relocatable installation can be moved to a directory other than its
+ original installation location. To request a relocatable installation, set
+ the config.install.relocatable variable to true. For details, see
+ "Relocatable Installation" in the manual.
+
+ * Support for installation manifest.
+
+ During the install operation, the config.install.manifest variable can be
+ set to a file path (or `-`) in order to write the information about all
+ the filesystem entries being installed into the specified file (or
+ stdout). The format of the installation manifest is "JSON lines". For
+ details, see the config.install.manifest variable documentation in the
+ install module.
+
+ * Ability to remap paths in source distributions.
+
+ The dist target-specific variable can now specify a path besides true or
+ false. This path is the "imaginary" source location which is used to
+ derive the corresponding distribution location. This location can be
+ either a directory path (to remap with the same file name) or a file path
+ (to remap with a different name). If the path is relative, then it is
+ treated relative to the target directory. Note that to make things less
+ error-prone, simple paths without any directory separators are not allowed
+ (use ./<name> instead).
+
+ Note that if multiple targets end up with the same source location, the
+ behavior is undefined and no diagnostics is issued. Note also that such
+ remapping has naturally no effect in the bootstrap distribution mode.
+
+ * The in.substitution variable has been renamed to in.mode.
+
+ The original name is still recognized for backwards compatibility.
+
+ * Ability to specify `in` rule substitutions as key-value pairs.
+
+ See "in Module" in the manual for details.
+
+ * New public/private variables model.
+
+ Now unqualified variables are project-private and can be typed, meaning
+ that a value assigned to a variable with such a name anywhere within the
+ project will have this type. For example:
+
+ [uint64] priority = [null]
+ [uint64] stack_size = [null]
+
+ priority = 1 # Ok.
+ stack_size = abc # Error.
+
+ Besides the type, variable attributes can specify visibility (project by
+ default) and overridability (false by default). For example:
+
+ thread{*}:
+ {
+ [uint64, visibility=target] priority = [null]
+ [uint64, visibility=target] stack_size = [null]
+ }
+
+ thread{foo}: priority = 1 # Ok.
+ priority = 1 # Error.
+
+ * Support for post hoc prerequisites.
+
+ Unlike normal and ad hoc prerequisites, a post hoc prerequisite is built
+ after the target, not before. It may also form a dependency cycle together
+ with normal/ad hoc prerequisites. In other words, all this form of
+ dependency guarantees is that a post hoc prerequisite will be built if its
+ dependent target is built.
+
+ A canonical example where this can be useful is a library with a plugin:
+ the plugin depends on the library while the library would like to make
+ sure the plugin is built whenever the library is built so that programs
+ that link the library can be executed without having to specify explicit
+ dependency on the plugin (at least for the dynamic linking case):
+
+ lib{hello}: ...
+ lib{hello-plugin}: ... lib{hello}
+ libs{hello}: libs{hello-plugin}: include = posthoc
+
+ Note that there is no guarantee that post hoc prerequisites will be built
+ before the dependents of the target "see" it as built. Rather, it is
+ guaranteed that post hoc prerequisites will be built before the end of the
+ overall build (more precisely, before the current operation completes).
+ As a result, post hoc prerequisites should not be relied upon if the
+ result (for example, a source code generator) is expected to be used
+ during build (more precisely, within the same operation).
+
+ Note also that the post hoc semantics is not the same as order-only in
+ GNU make. In fact, it is an even more "relaxed" form of dependency.
+ Specifically, while order-only prerequisite is guaranteed to be built
+ before the target, post hoc prerequisite is only guaranteed to be built
+ before the end of the overall build.
+
+ * Support for dumping build system state in the JSON format.
+
+ The new --dump-format option can be used to select the desired format.
+ Its valid values are `buildfile` and `json-v0.1`. For details on the JSON
+ dump format see "Appendix A - JSON Dump Format" in the manual.
+
+ * Change to the --dump option semantics.
+
+ This option now recognizes two additional values: `match-pre` and
+ `match-post` to dump the state of pre/post-operations. The `match` value
+ now only triggers dumping of the main operation.
+
+ * New --dump-scope and --dump-target options to limit --dump output.
+
+ * New --load-only option in addition to --match-only.
+
+ This option has the effect of loading all the subdirectory buildfiles that
+ are not explicitly included and is primarily useful in combination with
+ --dump.
+
+ * Quoted/display target names in the JSON structured result are now
+ consistent with the JSON dump.
+
+ Specifically, before we had `target` (display) and `quoted_target` and now
+ we have `target` (quoted) and `display_target`. Note that this is a
+ backwards-incompatible change.
+
+ * The dist meta-operation no longer invokes the install program.
+
+ This results in a substantial speedup, especially on Windows. The use of
+ install (or another install-like program) can still be forced with
+ explicit config.dist.cmd=install.
+
+ * Clang -Wunqualified-std-cast-call warning was remapped to -Wextra.
+
+ Clang 15 introduced the -Wunqualified-std-cast-call warning which warns
+ about unqualified calls to std::move() and std::forward() (because they
+ can be "hijacked" via ADL). Surprisingly, this warning is enabled by
+ default, as opposed to with -Wextra or at least -Wall. It has also proven
+ to be quite disruptive, causing a large number of warnings in a large
+ number of packages. So we have "remapped" it to -Wextra for now and in the
+ future may "relax" it to -Wall and potentially to being enabled by
+ default. See GitHub issue #259 for background and details.
+
Version 0.15.0
+ * Generated C/C++ headers and ad hoc sources are now updated during match.
+
+ Specifically, all headers as well as ad hoc headers and sources are now
+ treated by the cc::link_rule as if they had update=match unless explicit
+ update=execute is specified (see below on the update operation-specific
+ variable).
+
+ This change should be transparent to most projects. For background and
+ discussion of rare cases where you may wish to disable this, see:
+
+ https://github.com/build2/HOWTO/blob/master/entries/handle-auto-generated-headers.md
+
+ * Support for rule hints.
+
+ A rule hint is a target attribute, for example:
+
+ [rule_hint=cxx] exe{hello}: c{hello}
+
+ Rule hints can be used to resolve ambiguity when multiple rules match the
+ same target as well as to override an unambiguous match.
+
+ In cc::link_rule we now support "linking" libraries without any sources or
+ headers with a hint. This can be useful for creating "metadata libraries"
+ whose only purpose is to convey metadata (options to use and/or libraries
+ to link).
+
* UTF-8 is now the default input/source character set for C/C++ compilation.
Specifically, the cc module now passes the appropriate compiler option
@@ -13,6 +458,81 @@ Version 0.15.0
https://github.com/build2/HOWTO/blob/master/entries/convert-source-files-to-utf8.md
+ * Project configuration variables are now non-nullable by default.
+
+ A project configuration variable with the NULL default value is naturally
+ assumed nullable, for example:
+
+ config [string] config.libhello.fallback_name ?= [null]
+
+ Otherwise, to make a project configuration nullable use the `null`
+ variable attribute, for example:
+
+ config [string, null] config.libhello.fallback_name ?= "World"
+
+ * New $relative(<path>, <dir-path>) function.
+
+ * New $root_directory(<path>) function.
+
+ * New $size() function to get the size of string, path, dir_path.
+
+ * New $size() function to get the size of a sequence (strings, paths, etc).
+
+ * New $sort() function to sort a sequence (strings, paths, etc).
+
+ The function has the following signature:
+
+ $sort(<sequence> [, <flags>])
+
+ The following flag is supported by all the overloads:
+
+ dedup - in addition to sorting also remove duplicates
+
+ Additionally, the strings overload also support the following flag:
+
+ icase - sort ignoring case
+
+ Note that on case-insensitive filesystems the paths and dir_paths
+ overloads' order is case-insensitive.
+
+ * New $config.origin() function for querying configuration value origin.
+
+ Give a config.* variable name, this function returns one of `undefined`,
+ `default`, `buildfile`, or `override`.
+
+ * Recognition of -pthread as a special -l option in *.libs.
+
+ For background, see:
+
+ https://github.com/build2/HOWTO/blob/master/entries/link-pthread.md
+
+ * The bin.whole (whole archive) value is now saved in generated pkg-config
+ files.
+
+ * Ability to customize header and library search paths in generated
+ pkg-config files.
+
+ Specifically, {cc,c,cxx}.pkgconfig.{include,lib} variables specify header
+ (-I) and library (-L) search paths to use in the generated pkg-config
+ files instead of the default install.{include,lib}. Relative paths are
+ resolved as installation paths. For example:
+
+ lib{Qt6Core}: cxx.pkgconfig.include = include/qt6/
+
+ * Ability to save user metadata in C/C++ libraries, including in generated
+ pkg-config files.
+
+ For background and details, see:
+
+ https://github.com/build2/HOWTO/blob/master/entries/convey-additional-information-with-exe-lib.md
+
+ * Support for rule-specific search in immediate import.
+
+ We can now nominate a rule to perform the rule-specific search (if
+ required) using the rule_hint attribute. For example:
+
+ import! [metadata, rule_hint=cxx.link] lib = libhello%lib{hello}
+
* Support for dynamic dependencies in ad hoc recipes.
Specifically, the `depdb` builtin now has the new `dyndep` command that
@@ -28,7 +548,7 @@ Version 0.15.0
coptions = $cc.coptions $cxx.coptions
depdb dyndep $poptions --what=header --default-type=h -- \
- $cxx.path $poptions $coptions $cxx.mode -M -MG $s
+ $cxx.path $poptions $coptions $cxx.mode -M -MG $s
diag c++ ($<[0])
@@ -39,7 +559,7 @@ Version 0.15.0
t = $(o).t
depdb dyndep $poptions --what=header --default-type=h --file $t -- \
- $cxx.path $poptions $coptions $cxx.mode -M -MG $s >$t
+ $cxx.path $poptions $coptions $cxx.mode -M -MG $s >$t
The above depdb-dyndep commands will run the C++ compiler with the -M -MG
options to extract the header dependency information, parse the resulting
@@ -130,22 +650,174 @@ Version 0.15.0
s = $path($<[0])
o = $path($>)
- poptions = $cxx.poptions $cc.poptions
- poptions += $cxx.lib_poptions(libue{hello-meta}, obje)
+ lib_poptions = $cxx.lib_poptions(libue{hello-meta}, obje)
+ depdb hash $lib_poptions
+
+ poptions = $cxx.poptions $cc.poptions $lib_poptions
coptions = $cc.coptions $cxx.coptions
depdb dyndep $poptions --what=header --default-type=h \
--update-exclude libue{hello-meta} -- \
- $cxx.path $poptions $coptions $cxx.mode -M -MG $s
+ $cxx.path $poptions $coptions $cxx.mode -M -MG $s
diag c++ ($<[0])
$cxx.path $poptions $coptions $cxx.mode -o $o -c $s
}}
- Planned future improvements include support the `lines` (list of files,
- one per line) input format in addition to `make` and support for dynamic
- targets in addition to prerequisites.
+ As another example, sometimes we need to extract the "common interface"
+ preprocessor options that are independent of the the library type (static
+ or shared). For example, the Qt moc compiler needs to "see" the C/C++
+ preprocessor options from imported libraries if they could affect its
+ input. Here is how we can implement this:
+
+ import libs = libhello%lib{hello}
+
+ libul{hello-meta}: $libs
+
+ cxx{hello-moc}: hxx{hello} libul{hello-meta} $moc
+ {{
+ s = $path($<[0])
+ o = $path($>[0])
+ t = $(o).t
+
+ lib_poptions = $cxx.lib_poptions(libul{hello-meta})
+ depdb hash $lib_poptions
+
+ depdb dyndep --byproduct --drop-cycles --what=header --default-type=h \
+ --update-exclude libul{hello-meta} --file $t
+
+ diag moc ($<[0])
+
+ $moc $cc.poptions $cxx.poptions $lib_poptions \
+ -f $leaf($s) --output-dep-file --dep-file-path $t -o $o $s
+ }}
+
+ Planned future improvements include support for the `lines` (list of
+ files, one per line) input format in addition to `make` and support for
+ dynamic targets in addition to prerequisites.
+
+ * Support for specifying custom ad hoc pattern rule names.
+
+ Besides improving diagnostics, this allows us to use such a name in the
+ rule hints, for example:
+
+ [rule_name=hello.link] exe{~'/(.*)/'}: obje{~'/\1/'}
+ {{
+ $cxx.path -o $path($>) $path($<[0])
+ }}
+
+ [rule_hint=hello] exe{hello}: obje{hello}
+
+ obje{hello}: c{hello-c}
+
+ * Ability to disfigure specific configuration variables.
+
+ The new config.config.disfigure variable can be used to specify the list
+ of variables to ignore when loading config.build (and any files specified
+ in config.config.load), letting them to take on the default values. For
+ example:
+
+ $ b configure config.config.disfigure=config.hello.fancy
+
+ Besides names, variables can also be specified as patterns in the
+ config.<prefix>.(*|**)[<suffix>] form where `*` matches single
+ component names (i.e., `foo` but not `foo.bar`), and `**` matches
+ single and multi-component names. Currently only single wildcard (`*` or
+ `**`) is supported. Additionally, a pattern in the config.<prefix>(*|**)
+ form (i.e., without `.` after <prefix>) matches config.<prefix>.(*|**)
+ plus config.<prefix> itself (but not config.<prefix>foo).
+
+ For example, to disfigure all the project configuration variables (while
+ preserving all the module configuration variables; note quoting to prevent
+ pattern expansion):
+
+ $ b config.config.disfigure="'config.hello**'"
+
+ * Ability to omit loading config.build.
+
+ If the new config.config.unload variable is set to true, then omit loading
+ the project's configuration from the config.build file. Note that the
+ configuration is still loaded from config.config.load if specified. Note
+ also that similar to config.config.load, only overrides specified on this
+ project's root scope and global scope are considered.
+
+ * Ability to match libul{} targets.
+
+ The bin.libul rule picks, matches, and unmatches (if possible) a member
+ for the purpose of making its metadata (for example, library's poptions,
+ if it's one of the cc libraries) available.
+
+ * Ability to get common interface options via ${c,cxx}.lib_poptions().
+
+ Specifically, the output target type may now be omitted for utility
+ libraries (libul{} and libu[eas]{}). In this case, only "common interface"
+ options will be returned for lib{} dependencies. This is primarily useful
+ for obtaining poptions to be passed to tools other than C/C++ compilers
+ (for example, Qt moc).
+
+ * Ability to control -I translation to -isystem or /external:I in
+ ${c,cxx}.lib_poptions().
+
+ See the function documentation for details.
+
+ * New `update` operation-specific variable.
+
+ This variable is similar to the already existing `clean` and `test`
+ variables but besides the standard `true` and `false` values, it can also
+ be set to `unmatch` (match but do not update) and `match` (update during
+ match) and `execute` (update during execute, as is normally; this value is
+ primarily useful if the rule has the `match` semantics by default).
+
+ Note that the unmatch (match but do not update) and match (update during
+ match) values are only supported by certain rules (and potentially only
+ for certain prerequisite types).
+
+ Additionally:
+
+ - All the operation-specific variables are now checked for `false` as an
+ override for the prerequisite-specific `include` variable. This can now
+ be used to disable a prerequisite for update, for example:
+
+ ./: exe{test}: update = false
+
+ - Ad hoc Buildscript recipes now support update=unmatch|match.
+
+ - The cc::link_rule now supports the `match` value for headers and ad hoc
+ prerequisites. This can be used to make sure all the library headers are
+ updated before matching any of its (or dependent's) object files.
+
+ * New build.mode global scope variable.
+
+ This variable signals the mode the build system may be running in. The two
+ core modes are `no-external-modules` (bootstrapping of external modules is
+ disabled, see --no-external-modules for details) and `normal` (normal
+ execution). Other build system drivers may invent additional modes (for
+ example, the bpkg `skeleton` mode; see "Package Build System Skeleton" in
+ the package manager manual for details).
+
+ * New cmdline value type for canned command lines.
+
+ The Testscript and Buildscript languages now use the special cmdline value
+ type for canned command lines. Specifically, the re-lexing after expansion
+ now only happens if the expended value is of the cmdline type. See
+ "Lexical Structure" in the Testscript manual for details.
+
+ * The bash build system module now installs bash modules into
+ bin/<project>.bash/ instead of bin/<project>/ to avoid clashes.
+
+ * New --trace-{match,execute} options.
+
+ These options can be used to understand which dependency chain causes
+ matching or execution of a particular target. See b(1) for details.
+
+ * JSON format support for the --structured-result option and the info meta
+ operation.
+
+ See b(1) for details.
+
+ * Switch to using libpkg-config instead of libpkfconf for loading pkg-config
+ files.
Version 0.14.0
@@ -1299,10 +1971,10 @@ Version 0.8.0
The alternative variable substitution symbol can be specified with the
in.symbol variable and lax (instead of the default strict) mode with
- in.substitution. For example:
+ in.mode. For example:
file{test}: in.symbol = '@'
- file{test}: in.substitution = lax
+ file{test}: in.mode = lax
* New 'bash' build system module that provides modularization support for
bash scripts. See the build system manual for all the details.
@@ -1477,7 +2149,7 @@ Version 0.7.0
* Support for forwarded configurations with target backlinking. See the
configure meta-operation discussion in b(1) for details.
- * Improvements to the in module (in.symbol, in.substitution={strict|lax}).
+ * Improvements to the in module (in.symbol, in.mode={strict|lax}).
* New $directory(), $base(), $leaf() and $extension() path functions.
diff --git a/bootstrap-mingw.bat b/bootstrap-mingw.bat
index cfd9d7c..5638659 100644
--- a/bootstrap-mingw.bat
+++ b/bootstrap-mingw.bat
@@ -107,7 +107,7 @@ for %%d in (%src%) do (
)
echo on
-%cxx% -I%libbutl% -I. -DBUILD2_BOOTSTRAP -DBUILD2_HOST_TRIPLET=\"x86_64-w64-mingw32\" %ops% -o build2\b-boot.exe %r% -limagehlp
+%cxx% -I%libbutl% -I. -DBUILD2_BOOTSTRAP -DBUILD2_HOST_TRIPLET=\"x86_64-w64-mingw32\" %ops% -o build2\b-boot.exe %r% -pthread -limagehlp
@echo off
if errorlevel 1 goto error
diff --git a/bootstrap.gmake b/bootstrap.gmake
index e5ab285..a2c9779 100644
--- a/bootstrap.gmake
+++ b/bootstrap.gmake
@@ -51,7 +51,7 @@ ifeq ($(OS),Windows_NT)
ifneq ($(filter %-w64-mingw32,$(target)),)
host := x86_64-w64-mingw32
chost := $(host)
- override LIBS += -limagehlp
+ override LIBS += -pthread -limagehlp
else ifneq ($(filter %-windows-msvc,$(target)),)
host := x86_64-microsoft-win32-msvc
chost := $(host)
@@ -65,7 +65,7 @@ ifeq ($(OS),Windows_NT)
$(error unsupported target $(target))
endif
else
- override LIBS += -lpthread
+ override LIBS += -pthread
endif
# Remove all the built-in rules, enable second expansion, etc.
diff --git a/build/root.build b/build/root.build
index 3afdcf9..ffc1a0f 100644
--- a/build/root.build
+++ b/build/root.build
@@ -22,9 +22,16 @@ if ($cxx.target.system == 'win32-msvc')
if ($cxx.class == 'msvc')
cxx.coptions += /wd4251 /wd4275 /wd4800
elif ($cxx.id == 'gcc')
+{
cxx.coptions += -Wno-maybe-uninitialized -Wno-free-nonheap-object \
-Wno-stringop-overread # libbutl
+ if ($cxx.version.major >= 13)
+ cxx.coptions += -Wno-dangling-reference
+}
+elif ($cxx.id.type == 'clang' && $cxx.version.major >= 15)
+ cxx.coptions += -Wno-unqualified-std-cast-call
+
cxx.poptions =+ "-I$out_root" "-I$src_root"
# While we don't have any C sources to compile, we need to get the C compiler
@@ -49,3 +56,11 @@ using? cli
# Specify the test target for cross-testing.
#
test.target = $cxx.target
+
+# Temporary ability to build with now deprecated libpkgconf instead of
+# libpkg-config. Note that libpkgconf is known to have issues on Windows and
+# Mac OS so this should only be used on Linux and maybe BSDs. Also note that
+# we will only keep this until upstream (again) breaks backwards compatibility
+# at which point we will drop this support.
+#
+config [bool, config.report=false] config.build2.libpkgconf ?= false
diff --git a/build2/b.cxx b/build2/b.cxx
index 470aade..f0c9338 100644
--- a/build2/b.cxx
+++ b/build2/b.cxx
@@ -54,8 +54,7 @@
#ifndef BUILD2_BOOTSTRAP
# include <libbuild2/bash/init.hxx>
-
-# include <build2/cli/init.hxx>
+# include <libbuild2/cli/init.hxx>
#endif
using namespace butl;
@@ -149,32 +148,20 @@ namespace build2
s.begin_object ();
- // Target.
- //
- {
- // Change the stream verbosity (see print_lines() for details).
- //
- ostringstream os;
- stream_verb (os, stream_verbosity (1, 0));
- os << t;
- s.member ("target", os.str ());
- }
-
// Quoted target.
//
- {
- names ns (t.as_name ()); // Note: potentially adds an extension.
+ s.member_name ("target");
+ dump_quoted_target_name (s, t);
- ostringstream os;
- stream_verb (os, stream_verbosity (1, 0));
- to_stream (os, ns, quote_mode::effective, '@');
- s.member ("quoted_target", os.str ());
- }
+ // Display target.
+ //
+ s.member_name ("display_target");
+ dump_display_target_name (s, t);
- s.member ("target_type", t.key ().type->name, false /* check */);
+ s.member ("target_type", t.type ().name, false /* check */);
if (t.is_a<dir> ())
- s.member ("target_path", t.key ().dir->string ());
+ s.member ("target_path", t.dir.string ());
else if (const auto* pt = t.is_a<path_target> ())
s.member ("target_path", pt->path ().string ());
@@ -329,6 +316,7 @@ main (int argc, char* argv[])
init_diag (cmdl.verbosity,
ops.silent (),
cmdl.progress,
+ cmdl.diag_color,
ops.no_line (),
ops.no_column (),
fdterm (stderr_fd ()));
@@ -383,8 +371,8 @@ main (int argc, char* argv[])
load_builtin_module (&in::build2_in_load);
#ifndef BUILD2_BOOTSTRAP
- load_builtin_module (&cli::build2_cli_load);
load_builtin_module (&bash::build2_bash_load);
+ load_builtin_module (&cli::build2_cli_load);
#endif
// Start up the scheduler and allocate lock shards.
@@ -428,14 +416,25 @@ main (int argc, char* argv[])
pctx = nullptr; // Free first to reuse memory.
}
+ optional<match_only_level> mo;
+ if (ops.load_only ()) mo = match_only_level::alias;
+ else if (ops.match_only ()) mo = match_only_level::all;
+
pctx.reset (new context (sched,
mutexes,
fcache,
- ops.match_only (),
+ mo,
ops.no_external_modules (),
ops.dry_run (),
+ ops.no_diag_buffer (),
!ops.serial_stop () /* keep_going */,
cmdl.cmd_vars));
+
+ if (ops.trace_match_specified ())
+ pctx->trace_match = &ops.trace_match ();
+
+ if (ops.trace_execute_specified ())
+ pctx->trace_execute = &ops.trace_execute ();
};
new_context ();
@@ -443,13 +442,14 @@ main (int argc, char* argv[])
// Parse the buildspec.
//
buildspec bspec;
+ path_name bspec_name ("<buildspec>");
try
{
istringstream is (cmdl.buildspec);
is.exceptions (istringstream::failbit | istringstream::badbit);
parser p (*pctx);
- bspec = p.parse_buildspec (is, path_name ("<buildspec>"));
+ bspec = p.parse_buildspec (is, bspec_name);
}
catch (const io_error&)
{
@@ -461,18 +461,194 @@ main (int argc, char* argv[])
if (bspec.empty ())
bspec.push_back (metaopspec ()); // Default meta-operation.
+ // The reserve values were picked experimentally. They allow building a
+ // sample application that depends on Qt and Boost without causing a
+ // rehash.
+ //
+ // Note: omit reserving anything for the info meta-operation since it
+ // won't be loading the buildfiles and needs to be as fast as possible.
+ //
+ bool mo_info (bspec.size () == 1 &&
+ bspec.front ().size () == 1 &&
+ (bspec.front ().name == "info" ||
+ (bspec.front ().name.empty () &&
+ bspec.front ().front ().name == "info")));
+
+ if (!mo_info)
+ {
+ // Note: also adjust in bpkg if adjusting here.
+ //
+ pctx->reserve (context::reserves {
+ 30000 /* targets */,
+ 1100 /* variables */});
+ }
+
+ bool load_only (ops.load_only ());
+
const path& buildfile (ops.buildfile_specified ()
? ops.buildfile ()
: empty_path);
bool dump_load (false);
bool dump_match (false);
- if (ops.dump_specified ())
+ bool dump_match_pre (false);
+ bool dump_match_post (false);
+ for (const string& p: ops.dump ())
+ {
+ if (p == "load") dump_load = true;
+ else if (p == "match") dump_match = true;
+ else if (p == "match-pre") dump_match_pre = true;
+ else if (p == "match-post") dump_match_post = true;
+ else fail << "unknown phase '" << p << "' specified with --dump";
+ }
+
+ dump_format dump_fmt (dump_format::buildfile);
+ if (ops.dump_format_specified ())
{
- dump_load = ops.dump ().find ("load") != ops.dump ().end ();
- dump_match = ops.dump ().find ("match") != ops.dump ().end ();
+ const string& f (ops.dump_format ());
+
+ if (f == "json-v0.1")
+ {
+#ifdef BUILD2_BOOTSTRAP
+ fail << "json dump not supported in bootstrap build system";
+#endif
+ dump_fmt = dump_format::json;
+ }
+ else if (f != "buildfile")
+ {
+ diag_record dr (fail);
+
+ dr << "unsupported format '" << f << "' specified with --dump-format";
+
+ if (f.compare (0, 4, "json") == 0)
+ dr << info << "supported json format version is json-v0.1";
+ }
}
+ auto dump = [&trace, &ops, dump_fmt] (context& ctx, optional<action> a)
+ {
+ const dir_paths& scopes (ops.dump_scope ());
+ const vector<pair<name, optional<name>>>& targets (ops.dump_target ());
+
+ if (scopes.empty () && targets.empty ())
+ build2::dump (ctx, a, dump_fmt);
+ else
+ {
+ auto comp_norm = [] (dir_path& d, const char* what)
+ {
+ try
+ {
+ if (d.relative ())
+ d.complete ();
+
+ d.normalize ();
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid path '" << e.path << "' specified with " << what;
+ }
+ };
+
+ // If exact is false then return any outer scope that contains this
+ // directory except for the global scope.
+ //
+ auto find_scope = [&ctx, &comp_norm] (dir_path& d,
+ bool exact,
+ const char* what) -> const scope*
+ {
+ comp_norm (d, what);
+
+ // This is always the output directory (specifically, see the target
+ // case below).
+ //
+ const scope& s (ctx.scopes.find_out (d));
+
+ return ((exact ? s.out_path () == d : s != ctx.global_scope)
+ ? &s
+ : nullptr);
+ };
+
+ // Dump scopes.
+ //
+ for (dir_path d: scopes)
+ {
+ const scope* s (find_scope (d, true, "--dump-scope"));
+
+ if (s == nullptr)
+ l5 ([&]{trace << "unknown target scope " << d
+ << " specified with --dump-scope";});
+
+ build2::dump (s, a, dump_fmt);
+ }
+
+ // Dump targets.
+ //
+ for (const pair<name, optional<name>>& p: targets)
+ {
+ const target* t (nullptr);
+
+ // Find the innermost known scope that contains this target. This
+ // is where we are going to resolve its type.
+ //
+ dir_path d (p.second ? p.second->dir : p.first.dir);
+
+ if (const scope* s = find_scope (d, false, "--dump-target"))
+ {
+ // Complete relative directories in names.
+ //
+ name n (p.first), o;
+
+ if (p.second)
+ {
+ comp_norm (n.dir, "--dump-target");
+ o.dir = move (d);
+ }
+ else
+ n.dir = move (d);
+
+ // Similar logic to parser::enter_target::find_target() as used by
+ // the dump directive. Except here we treat unknown target type as
+ // unknown target.
+ //
+ auto r (s->find_target_type (n, location ()));
+
+ if (r.first != nullptr)
+ {
+ t = ctx.targets.find (*r.first, // target type
+ n.dir,
+ o.dir,
+ n.value,
+ r.second, // extension
+ trace);
+
+ if (t == nullptr)
+ l5 ([&]
+ {
+ // @@ TODO: default_extension?
+ //
+ target::combine_name (n.value, r.second, false);
+ names ns {move (n)};
+ if (p.second)
+ ns.push_back (move (o));
+
+ trace << "unknown target " << ns
+ << " specified with --dump-target";
+ });
+ }
+ else
+ l5 ([&]{trace << "unknown target type '" << n.type << "' in "
+ << *s << " specified with --dump-target";});
+
+ }
+ else
+ l5 ([&]{trace << "unknown target scope " << d
+ << " specified with --dump-target";});
+
+ build2::dump (t, a, dump_fmt);
+ }
+ }
+ };
+
// If not NULL, then lifted points to the operation that has been "lifted"
// to the meta-operaion (see the logic below for details). Skip is the
// position of the next operation.
@@ -489,7 +665,10 @@ main (int argc, char* argv[])
// Note that this constructor is cheap and so we rather call it always
// instead of resorting to dynamic allocations.
//
- json::stream_serializer js (cout);
+ // Note also that we disable pretty-printing if there is also the JSON
+ // dump and thus we need to combine the two in the JSON Lines format.
+ //
+ json::stream_serializer js (cout, dump_fmt == dump_format::json ? 0 : 2);
if (ops.structured_result_specified () &&
ops.structured_result () == structured_result_format::json)
@@ -539,8 +718,7 @@ main (int argc, char* argv[])
context& ctx (*pctx);
- const path p ("<buildspec>");
- const location l (p, 0, 0); //@@ TODO
+ const location l (bspec_name, 0, 0); //@@ TODO (also bpkg::pkg_configure())
meta_operation_id mid (0); // Not yet translated.
const meta_operation_info* mif (nullptr);
@@ -869,8 +1047,13 @@ main (int argc, char* argv[])
// Now that we have src_root, load the src_root bootstrap file,
// if there is one.
//
+ // As an optimization, omit discovering subprojects for the info
+ // meta-operation if not needed.
+ //
bootstrap_pre (rs, altn);
- bootstrap_src (rs, altn);
+ bootstrap_src (rs, altn,
+ nullopt /* amalgamation */,
+ !mo_info || info_subprojects (mparams) /*subprojects*/);
// If this is a simple project, then implicitly load the test and
// install modules.
@@ -1087,23 +1270,38 @@ main (int argc, char* argv[])
if (oif->outer_id != 0)
outer_oif = lookup (oif->outer_id);
+ if (!oparams.empty ())
+ {
+ // Operation parameters belong to outer operation, if any.
+ //
+ auto* i (outer_oif != nullptr ? outer_oif : oif);
+
+ if (i->operation_pre == nullptr)
+ fail (l) << "unexpected parameters for operation " << i->name;
+ }
+
// Handle pre/post operations.
//
if (auto po = oif->pre_operation)
{
- if ((orig_pre_oid = po (ctx, oparams, mid, l)) != 0)
+ if ((orig_pre_oid = po (
+ ctx,
+ outer_oif == nullptr ? oparams : values {},
+ mid,
+ l)) != 0)
{
assert (orig_pre_oid != default_id);
pre_oif = lookup (orig_pre_oid);
pre_oid = pre_oif->id; // De-alias.
}
}
- else if (!oparams.empty ())
- fail (l) << "unexpected parameters for operation " << oif->name;
if (auto po = oif->post_operation)
{
- if ((orig_post_oid = po (ctx, oparams, mid)) != 0)
+ if ((orig_post_oid = po (
+ ctx,
+ outer_oif == nullptr ? oparams : values {},
+ mid)) != 0)
{
assert (orig_post_oid != default_id);
post_oif = lookup (orig_post_oid);
@@ -1149,6 +1347,9 @@ main (int argc, char* argv[])
// defined there (common with non-intrusive project conversions
// where everything is built from a single root buildfile).
//
+ // Note: we use find_plausible_buildfile() and not find_buildfile()
+ // to look in outer directories.
+ //
optional<path> bf (
find_buildfile (src_base, src_base, altn, buildfile));
@@ -1214,6 +1415,9 @@ main (int argc, char* argv[])
break;
}
+ if (load_only && (mid != perform_id || oid != update_id))
+ fail << "--load-only requires perform(update) action";
+
// Now load the buildfiles and search the targets.
//
action_targets tgs;
@@ -1245,6 +1449,9 @@ main (int argc, char* argv[])
if (tt == nullptr)
fail (l) << "unknown target type " << tn.type;
+ if (load_only && !tt->is_a<alias> ())
+ fail << "--load-only requires alias target";
+
if (mif->search != nullptr)
{
// If the directory is relative, assume it is relative to work
@@ -1275,8 +1482,10 @@ main (int argc, char* argv[])
}
} // target
- if (dump_load)
- dump (ctx);
+ // Delay until after match in the --load-only mode (see below).
+ //
+ if (dump_load && !load_only)
+ dump (ctx, nullopt /* action */);
// Finally, match the rules and perform the operation.
//
@@ -1290,6 +1499,12 @@ main (int argc, char* argv[])
ctx.current_operation (*pre_oif, oif);
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, oparams, false /* inner */, l);
+
+ if (pre_oif->operation_pre != nullptr)
+ pre_oif->operation_pre (ctx, {}, true /* inner */, l);
+
action a (mid, pre_oid, oid);
{
@@ -1301,13 +1516,19 @@ main (int argc, char* argv[])
if (mif->match != nullptr)
mif->match (mparams, a, tgs, diag, true /* progress */);
- if (dump_match)
+ if (dump_match_pre)
dump (ctx, a);
if (mif->execute != nullptr && !ctx.match_only)
mif->execute (mparams, a, tgs, diag, true /* progress */);
}
+ if (pre_oif->operation_post != nullptr)
+ pre_oif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, oparams, false /* inner */);
+
if (mif->operation_post != nullptr)
mif->operation_post (ctx, mparams, pre_oid);
@@ -1319,6 +1540,15 @@ main (int argc, char* argv[])
ctx.current_operation (*oif, outer_oif);
+ if (outer_oif != nullptr && outer_oif->operation_pre != nullptr)
+ outer_oif->operation_pre (ctx, oparams, false /* inner */, l);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx,
+ outer_oif == nullptr ? oparams : values {},
+ true /* inner */,
+ l);
+
action a (mid, oid, oif->outer_id);
{
@@ -1337,6 +1567,14 @@ main (int argc, char* argv[])
mif->execute (mparams, a, tgs, diag, true /* progress */);
}
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx,
+ outer_oif == nullptr ? oparams : values {},
+ true /* inner */);
+
+ if (outer_oif != nullptr && outer_oif->operation_post != nullptr)
+ outer_oif->operation_post (ctx, oparams, false /* inner */);
+
if (post_oid != 0)
{
tgs.reset ();
@@ -1349,6 +1587,12 @@ main (int argc, char* argv[])
ctx.current_operation (*post_oif, oif);
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, oparams, false /* inner */, l);
+
+ if (post_oif->operation_pre != nullptr)
+ post_oif->operation_pre (ctx, {}, true /* inner */, l);
+
action a (mid, post_oid, oid);
{
@@ -1360,13 +1604,19 @@ main (int argc, char* argv[])
if (mif->match != nullptr)
mif->match (mparams, a, tgs, diag, true /* progress */);
- if (dump_match)
+ if (dump_match_post)
dump (ctx, a);
if (mif->execute != nullptr && !ctx.match_only)
mif->execute (mparams, a, tgs, diag, true /* progress */);
}
+ if (post_oif->operation_post != nullptr)
+ post_oif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, oparams, false /* inner */);
+
if (mif->operation_post != nullptr)
mif->operation_post (ctx, mparams, post_oid);
@@ -1374,6 +1624,9 @@ main (int argc, char* argv[])
<< ", id " << static_cast<uint16_t> (post_oid);});
}
+ if (dump_load && load_only)
+ dump (ctx, nullopt /* action */);
+
if (mif->operation_post != nullptr)
mif->operation_post (ctx, mparams, oid);
diff --git a/build2/buildfile b/build2/buildfile
index 0c21388..0111ed2 100644
--- a/build2/buildfile
+++ b/build2/buildfile
@@ -11,7 +11,7 @@ libs = $libbutl
include ../libbuild2/
libs += ../libbuild2/lib{build2}
-for m: bash bin c cc cxx in version
+for m: bash bin c cc cli cxx in version
{
include ../libbuild2/$m/
libs += ../libbuild2/$m/lib{build2-$m}
@@ -45,6 +45,8 @@ copyright = $process.run_regex( \
obj{b}: cxx.poptions += -DBUILD2_COPYRIGHT=\"$copyright\"
+# NOTE: remember to update bpkg buildfile if changing anything here.
+#
switch $cxx.target.class
{
case 'linux'
diff --git a/config b/config
-Subproject 191bcb948f7191c36eefe634336f5fc5c0c4c2b
+Subproject 4ad4bb7c30aca1e705448ba8d51a210bbd47bb5
diff --git a/doc/.gitignore b/doc/.gitignore
index 9d45a89..d33dca5 100644
--- a/doc/.gitignore
+++ b/doc/.gitignore
@@ -3,3 +3,7 @@ b.1
build2-*-manual.xhtml
*.ps
*.pdf
+
+# Auto-extracted documentation.
+#
+functions-*.cli
diff --git a/doc/buildfile b/doc/buildfile
index c797761..5508ddb 100644
--- a/doc/buildfile
+++ b/doc/buildfile
@@ -11,8 +11,286 @@ xhtml{*}: extension = xhtml
css{common pre-box man} \
file{man-*}
+# @@ TODO: why no testscript manual?
+
./: doc{build2-build-system-manual*} \
css{code-box common doc pre-box toc} \
file{manual.cli doc-* *.html2ps}
./: file{cli.sh}
+
+# The build2 function documentation format for auto-extraction.
+#
+# Each listed .cxx file is expected to provide functions for one function
+# family. In order to plug a new family/file, perform the following steps:
+#
+# 1. List the corresponding functions-<family>.cxx file stem below.
+# 2. Add a section and source the generated .cli file in manual.cli.
+#
+# The functions-<family>.cxx file is expected to contain one or more comments
+# in the following form:
+#
+# // <synopsis-line>+
+# // <blank-line>
+# // (<paragraph-line>+|<code-block-line>+
+# // <blank-line>)+
+#
+# That is, the comment starts with one or more synopsis lines followed by a
+# blank line followed by a mixture of text paragraphs and/or preformatted code
+# blocks separated by blank lines. The comment must be terminated with a blank
+# line. See functions-regex.cxx for comprehensive examples.
+#
+# The synopsis line should be in the form:
+#
+# // $[<family>.]<name>(...)
+#
+# Each synopsis line may or may not be be qualified with <family>. The rule is
+# as follows: If the function can only be called qualified, then the synopsis
+# should contains a single qualified line. If the function can be called
+# unqualified, then the synopsis should contains a single unqualified line.
+# If some signatures can be called unqualifed while some -- only qualified,
+# then there should be both qualified and unqualified lines. Note that there
+# can also be functions with different <name>s in a single synopsis block.
+#
+# The text paragraphs may contain `...` and <...> fragments which are
+# translated to \c{} and \ci{}, respectively. Note that these fragments cannot
+# span multiple lines.
+#
+# The preformatted code blocks must be indented four spaces (not counting
+# the space after //).
+#
+# There is problem with distinguishing blanks within a code block and a blank
+# that separates the code block from the subsequent paragraph (or another code
+# block). Strictly speaking, such a blank should be indented with four spaces
+# but trailing spaces at the end of the line are generally frowned upon and in
+# our code should be automatically zapped on save.
+#
+# So what we are going to do is treat a single blank line between two code
+# lines as belonging to the code block rather than separating two code
+# blocks. The latter can be achieved with a double blank line. Note that this
+# means we cannot have double blank lines within a code block.
+
+# @@ TODO: using file{.cli}, change to cli{} once switch to ad hoc recipes.
+# @@ TODO: get rid of backlink below once switch to ad hoc recipes.
+
+for ff: functions-builtin \
+ functions-string \
+ functions-integer \
+ functions-json \
+ functions-bool \
+ functions-path \
+ functions-name \
+ functions-target \
+ functions-regex \
+ functions-process \
+ functions-filesystem \
+ functions-project-name \
+ functions-process-path \
+ functions-target-triplet
+{
+ alias{functions}: file{$(ff).cli}: $src_root/libbuild2/cxx{$ff}
+ file{$(ff).cli}: backlink = true # @@ TMP until migrate to recipe (see cli.sh)
+}
+
+file{~'/(functions-.+)\.cli/'}: cxx{~'/\1/'}
+{{
+ diag doc $< -> $>
+
+ i = $path($<) # Input.
+ o = $path($>) # Output.
+
+ # Extract the family name.
+ #
+ family = $regex.replace($name($<), 'functions-(.+)', '\1')
+ family = $regex.replace($family, '-', '_')
+
+ echo "// Auto-extracted from $leaf($i) for \$$(family).*\(\)" >$o
+
+ # The overall plan is as follows: read the file line by line recognizing the
+ # function documentation comments and maintaining the parsing state.
+ #
+ # Parsing state, one of:
+ #
+ # none -- outside of a documentation comment
+ # syno -- inside synopsis
+ # para -- inside text
+ # code -- inside preformatted code block
+ # blnk -- blank line separating synopsis/para/code
+ #
+ s = none # Current state.
+ p = none # Previous state.
+
+ ln = [uint64] 0 # Line number.
+ for -n l <=$i
+ ln += 1
+
+ # Look for a C++ comments and extract its text.
+ #
+ t = $regex.match($l, '\s*// ?(.*)', return_subs)
+
+ # Note that when writing the output we use the "leading blank line" rather
+ # than trailing approach. That is, we write the blank before starting the
+ # next block rather than after.
+
+ if ($t == [null])
+ if ($s != 'none')
+ if ($s != 'blnk')
+ exit "$i:$ln: blank line expected after description"
+ end
+
+ # Close delayed code block (see below for details).
+ #
+ if ($p == 'code')
+ echo "\\" >>$o # end code
+ end
+
+ echo "\"" >>$o # end cli doc string
+ end
+
+ p = $s
+ s = 'none'
+ else
+ # This is a comment. What we do next depends on which state we are in.
+ #
+ if ($s == 'none' || $s == 'syno')
+ p = $s
+
+ # See if this is a synopsys line.
+ #
+ if $regex.match($t, '\$.+\(.+\)')
+ if ($s == 'none')
+ synopsis = [strings] # Accumulate synopsis lines.
+ s = 'syno'
+ end
+
+ synopsis += $t
+ elif ($s == 'syno')
+ if ($t != '')
+ exit "$i:$ln: blank line expected after synopsis"
+ end
+
+ echo "$\n\"" >>$o # start cli doc string
+
+ # Write the heading. Use the first function name as id.
+ #
+ # Note that while the functions in the synopsis could be
+ # unqualified, in the heading we always write them qualified. We
+ # also have to suppress duplicates since the same function can be
+ # mentioned in the synopsis both qualified and unqualified.
+ #
+ id = [null]
+ hs = [strings]
+ for t: $synopsis
+ t = $regex.replace($t, '\$(.+)\(.+\)', '\1') # Extract func name.
+ f = $regex.match($t, '(.+)\..+', return_subs) # Extract family.
+
+ if ($f == [null])
+ t = "$(family).$t" # Qualify.
+ elif ($f != $family)
+ exit "$i:$ln: function family in $t does not match $family"
+ end
+
+ if ($id == [null]) # First.
+ id = $regex.replace($t, '\.', '-')
+ end
+
+ # Suppress duplicates.
+ #
+ if! $find($hs, $t)
+ hs += $t
+ end
+ end
+
+ h = $regex.merge($hs, '(.+)', '\\c{$\1()}', ', ')
+
+ echo "\\h2#functions-$id|$h|$\n" >>$o # heading
+
+ echo "\\" >>$o # start synopsis
+ for t: $synopsis
+ echo $t >>$o # synopsis line
+ end
+ echo "\\" >>$o # end synopsis
+
+ s = 'blnk'
+ end
+ else # para|code|blnk
+ # See if this is a code line.
+ #
+ c = $regex.match($t, ' (.+)', return_subs)
+
+ if ($c != [null])
+ # Code line.
+ #
+ if ($s == 'para')
+ exit "$i:$ln: blank line expected before code block"
+ end
+
+ # Treat a single blank line between two code lines as belonging to
+ # the code block rather than separating two code blocks (see above
+ # for details).
+ #
+ if ($s == 'blnk')
+ if ($p == 'code')
+ echo '' >>$o # continue code, write preceding blank
+ s = 'code'
+ else
+ echo "$\n\\" >>$o # start code
+ end
+ end
+
+ echo $regex.replace($c, '"', '\\"') >>$o # write code line
+
+ p = $s
+ s = 'code'
+ elif ($t != '')
+ # Paragraph line.
+ #
+ if ($s == 'code')
+ exit "$i:$ln: blank line expected after code block"
+ end
+
+ # Close delayed code block (see above for details).
+ #
+ if ($p == 'code')
+ echo "\\" >>$o # end code
+ end
+
+ if ($s == 'blnk')
+ echo '' >>$o # start para
+ end
+
+ t = $regex.replace($t, '\\', '\\\\') # Escape backslashed
+ t = $regex.replace($t, '"', '\\"') # Escape double quotes.
+
+ # Convert `` to \c{} and <> to \ci{}.
+ #
+ t = $regex.replace($t, '`([^`]+)`', '\\c{\1}')
+ t = $regex.replace($t, '<([^\s<>]+)>', '\\ci{\1}')
+
+ echo $t >>$o # write para line
+
+ p = $s
+ s = 'para'
+ else
+ # Blank line.
+ #
+
+ # Note that we delay closing the code block in case this blank line
+ # is followed by another code line (and is therefore treated as
+ # belonging to the code block; see above for details).
+ #
+ if ($s != 'code' && $p == 'code')
+ echo "\\" >>$o # end code
+ end
+
+ #if ($s == 'para')
+ # end para
+ #end
+
+ p = $s
+ s = 'blnk'
+ end
+ end
+ end
+ end
+}}
diff --git a/doc/cli.sh b/doc/cli.sh
index 011f482..398371c 100755
--- a/doc/cli.sh
+++ b/doc/cli.sh
@@ -1,6 +1,6 @@
#! /usr/bin/env bash
-version=0.15.0-a.0.z
+version=0.17.0-a.0.z
trap 'exit 1' ERR
set -o errtrace # Trap in functions.
@@ -47,6 +47,7 @@ function compile ()
--generate-html --html-suffix .xhtml \
--html-prologue-file man-prologue.xhtml \
--html-epilogue-file man-epilogue.xhtml \
+--link-regex '%b(#.+)?%build2-build-system-manual.xhtml$1%' \
../libbuild2/$n.cli
cli -I .. \
@@ -58,6 +59,7 @@ function compile ()
--generate-man --man-suffix .1 --ascii-tree \
--man-prologue-file man-prologue.1 \
--man-epilogue-file man-epilogue.1 \
+--link-regex '%b(#.+)?%$1%' \
../libbuild2/$n.cli
}
@@ -91,6 +93,13 @@ function xhtml_to_ps () # <from> <to> [<html2ps-options>]
function compile_doc () # <file> <prefix> <suffix>
{
+ local file="$1"
+ shift
+ local prefix="$1"
+ shift
+ local suffix="$1"
+ shift
+
cli -I .. \
-v version="$(echo "$version" | sed -e 's/^\([^.]*\.[^.]*\).*/\1/')" \
-v date="$date" \
@@ -104,11 +113,12 @@ function compile_doc () # <file> <prefix> <suffix>
--link-regex '%bdep([-.].+)%../../bdep/doc/bdep$1%' \
--link-regex '%testscript(#.+)?%build2-testscript-manual.xhtml$1%' \
--link-regex '%build2(#.+)?%build2-build-system-manual.xhtml$1%' \
---output-prefix "$2" \
---output-suffix "$3" \
-"$1"
+--output-prefix "$prefix" \
+--output-suffix "$suffix" \
+"${@}" \
+"$file"
- local n="$2$(basename -s .cli $1)$3"
+ local n="$prefix$(basename -s .cli $file)$suffix"
xhtml_to_ps "$n.xhtml" "$n-a4.ps" -f doc.html2ps:a4.html2ps
ps2pdf14 -sPAPERSIZE=a4 -dOptimize=true -dEmbedAllFonts=true "$n-a4.ps" "$n-a4.pdf"
@@ -117,7 +127,13 @@ function compile_doc () # <file> <prefix> <suffix>
ps2pdf14 -sPAPERSIZE=letter -dOptimize=true -dEmbedAllFonts=true "$n-letter.ps" "$n-letter.pdf"
}
-compile_doc manual.cli 'build2-build-system-'
+# @@ TODO: replace -I. with $out_base and get rid of backlinking once
+# migrated to reciped.
+#
+# Note: we have to manually map \h to h2 since we break the doc string.
+#
+b update: alias{functions}
+compile_doc manual.cli 'build2-build-system-' '' --html-heading-map h=h2 -I .
compile_doc testscript.cli 'build2-' '-manual'
# Generate INSTALL in ../
diff --git a/doc/manual.cli b/doc/manual.cli
index b72700d..847691d 100644
--- a/doc/manual.cli
+++ b/doc/manual.cli
@@ -156,7 +156,7 @@ Let's now try to build and run our program (\c{b} is the build system driver):
$ cd hello/ # Change to project root.
$ b
-c++ cxx{hello}
+c++ cxx{hello} -> obje{hello}
ld exe{hello}
$ ls -1
@@ -177,7 +177,7 @@ Or, if we are on Windows and using Visual Studio:
> cd hello
> b
-c++ cxx{hello}
+c++ cxx{hello} -> obje{hello}
ld exe{hello}
> dir /b
@@ -235,7 +235,7 @@ noise, the commands being executed are by default shown abbreviated and with
the same target type notation as we used in the \c{buildfile}. For example:
\
-c++ cxx{hello}
+c++ cxx{hello} -> obje{hello}
ld exe{hello}
\
@@ -275,7 +275,8 @@ above listings. In our \c{buildfile} we refer to the executable target as
extension, if any, will be determined based on the compiler's target platform
by the rule doing the linking. In this sense, target types are a
platform-independent replacement of file extensions (though they do have other
-benefits, such as allowing non-file targets as well as being hierarchical).
+benefits, such as allowing non-file targets as well as being hierarchical;
+see \l{#targets-types Target Types} for details).
Let's revisit the dependency declaration line from our \c{buildfile}:
@@ -311,6 +312,16 @@ it searches for a target for the \c{cxx{hello\}} prerequisite. During this
search, the \c{extension} variable is looked up and its value is used to end
up with the \c{hello.cxx} file.
+\N|To resolve a rule match ambiguity or to override a default match \c{build2}
+uses \i{rule hints}. For example, if we wanted link a C executable using the
+C++ link rule:
+
+\
+[rule_hint=cxx] exe{hello}: c{hello}
+\
+
+|
+
Here is our new dependency declaration again:
\
@@ -361,7 +372,7 @@ Nothing really new here: we've specified the default extension for the
prerequisites. If you have experience with other build systems, then
explicitly listing headers might seem strange to you. As will be discussed
later, in \c{build2} we have to explicitly list all the prerequisites of a
-target that should end up in a distribution of our project.
+target that should end up in a source distribution of our project.
\N|You don't have to list \i{all} headers that you include, only the ones
belonging to your project. Like all modern C/C++ build systems, \c{build2}
@@ -411,11 +422,11 @@ exe{hello}: {hxx cxx}{**}
development more pleasant and less error prone: you don't need to update your
\c{buildfile} every time you add, remove, or rename a source file and you
won't forget to explicitly list headers, a mistake that is often only detected
-when trying to build a distribution of a project. On the other hand, there is
-the possibility of including stray source files into your build without
-noticing. And, for more complex projects, name patterns can become fairly
-complex (see \l{#name-patterns Name Patterns} for details). Note also that on
-modern hardware the performance of wildcard searches hardly warrants a
+when trying to build a source distribution of a project. On the other hand,
+there is the possibility of including stray source files into your build
+without noticing. And, for more complex projects, name patterns can become
+fairly complex (see \l{#name-patterns Name Patterns} for details). Note also
+that on modern hardware the performance of wildcard searches hardly warrants a
consideration.
In our experience, when combined with modern version control systems like
@@ -448,7 +459,7 @@ invocation. In other words, expect an experience similar to a plain
\c{Makefile}.
One notable example where simple projects are handy is a \i{glue
-\c{buildfiles}} that \"pulls\" together several other projects, usually for
+\c{buildfile}} that \"pulls\" together several other projects, usually for
convenience of development. See \l{#intro-import Target Importation} for
details.|
@@ -587,7 +598,7 @@ configuration \i{persistent}. We will see an example of this shortly.
Next up are the \c{test}, \c{install}, and \c{dist} modules. As their names
suggest, they provide support for testing, installation and preparation of
-distributions. Specifically, the \c{test} module defines the \c{test}
+source distributions. Specifically, the \c{test} module defines the \c{test}
operation, the \c{install} module defines the \c{install} and \c{uninstall}
operations, and the \c{dist} module defines the \c{dist}
(meta-)operation. Again, we will try them out in a moment.
@@ -671,7 +682,8 @@ notation for \c{dir{\}}. As we will see shortly, it fits naturally with other
uses of directories in \c{buildfiles} (for example, in scopes).
The \c{dir{\}} target type is an \i{alias} (and, in fact, is derived from more
-general \c{alias{\}}). Building it means building all its prerequisites.
+general \c{alias{\}}; see \l{#targets-types Target Types} for
+details). Building it means building all its prerequisites.
\N|If you are familiar with \c{make}, then you can probably see the similarity
with the ubiquitous \c{all} pseudo-target. In \c{build2} we instead use
@@ -746,7 +758,7 @@ Let's take a look at a slightly more realistic root \c{buildfile}:
Here we have the customary \c{README.md} and \c{LICENSE} files as well as the
package \c{manifest}. Listing them as prerequisites achieves two things: they
will be installed if/when our project is installed and, as mentioned earlier,
-they will be included into the project distribution.
+they will be included into the project source distribution.
The \c{README.md} and \c{LICENSE} files use the \c{doc{\}} target type. We
could have used the generic \c{file{\}} but using the more precise \c{doc{\}}
@@ -805,7 +817,7 @@ in this new layout:
\
$ cd hello/ # Change to project root.
$ b
-c++ hello/cxx{hello}
+c++ hello/cxx{hello} -> hello/obje{hello}
ld hello/exe{hello}
$ tree ./
@@ -882,8 +894,8 @@ libhello/
└── README.md
\
-The overall layout (\c{build/}, \c{libhello/} source directory) as well as the
-contents of the root files (\c{bootstrap.build}, \c{root.build}, root
+The overall layout (\c{build/}, \c{libhello/} source subdirectory) as well as
+the contents of the root files (\c{bootstrap.build}, \c{root.build}, root
\c{buildfile}) are exactly the same. There is, however, the new file
\c{export.build} in \c{build/}, the new subdirectory \c{tests/}, and the
contents of the project's source subdirectory \c{libhello/} look quite a bit
@@ -963,7 +975,7 @@ To start, let's build it in the \c{hello-out/} directory next to the project:
$ b hello/@hello-out/
mkdir fsdir{hello-out/}
mkdir hello-out/fsdir{hello/}
-c++ hello/hello/cxx{hello}@hello-out/hello/
+c++ hello/hello/cxx{hello} -> hello-out/hello/obje{hello}
ld hello-out/hello/exe{hello}
$ ls -1
@@ -1043,21 +1055,25 @@ We could have also specified out for an in source build, but that's redundant:
$ b hello/@hello/
\
-There is another example of this elaborate target specification in the build
-diagnostics:
+There is another example of this elaborate target specification that can be
+seen in the build diagnostics, for instance, when installing headers of a
+library (the \c{install} operation is discussed in the next section):
\
-c++ hello/hello/cxx{hello}@hello-out/hello/
+$ b install: libhello/@libhello-out/
+...
+install libhello/libhello/hxx{hello}@libhello-out/libhello/ ->
+ /usr/local/include/
\
-Notice, however, that now the target (\c{cxx{hello\}}) is on the left of
+Notice, however, that now the target (\c{hxx{hello\}}) is on the left of
\c{@}, that is, in the src directory. It does, however, make sense if you
-think about it: our \c{hello.cxx} is a \i{source file}, it is not built and it
-resides in the project's source directory. This is in contrast, for example,
-to the \c{exe{hello\}} target which is the output of the build system and goes
-to the out directory. So in \c{build2} targets can be either in src or in out
-(there can also be \i{out of any project} targets, for example, installed
-files).
+think about it: our \c{hello.hxx} is a \i{source file}, in a sense that it is
+not built and it resides in the project's source directory. This is in
+contrast, for example, to the \c{exe{hello\}} target which is the output of
+the build system and goes to the out directory. So in \c{build2} targets can
+be either in src or in out (there can also be \i{out of any project} targets,
+for example, installed files).
The elaborate target specification can also be used in \c{buildfiles}. We
haven't encountered any so far because targets mentioned without explicit
@@ -1350,7 +1366,7 @@ prerequisite-specific). These and other variable-related topics will be
covered in subsequent sections.|
One typical place to find \c{src/out_root} expansions is in the include search
-path options. For example, the source directory \c{buildfile} generated by
+path options. For example, the source subdirectory \c{buildfile} generated by
\l{bdep-new(1)} for an executable project actually looks like this
(\c{poptions} stands for \i{preprocessor options}):
@@ -1418,7 +1434,7 @@ if ($cc.class == 'gcc')
}
if ($c.target.class != 'windows')
- c.libs += -lpthread # only C
+ c.libs += -ldl # only C
\
Additionally, as we will see in \l{#intro-operations-config Configuring},
@@ -1490,7 +1506,7 @@ In the few cases that we do, we use the following syntax:
If the scope directory is relative, then it is assumed to be relative to the
current scope. As an exercise for understanding, let's reimplement our
\c{hello} project as a single \c{buildfile}. That is, we move the contents of
-the source directory \c{buildfile} into the root \c{buildfile}:
+the source subdirectory \c{buildfile} into the root \c{buildfile}:
\
$ tree hello/
@@ -1520,7 +1536,7 @@ which explicitly opens scopes to define the build over the upstream project's
subdirectory structure.|
Seeing this merged \c{buildfile} may make you wonder what exactly caused the
-loading of the source directory \c{buildfile} in our normal setup. In other
+loading of the source subdirectory \c{buildfile} in our normal setup. In other
words, when we build our \c{hello} from the project root, who loads
\c{hello/buildfile} and why?
@@ -1634,6 +1650,15 @@ $ b update: hello/exe{hello} # Update specific target
$ b update: libhello/ tests/ # Update two targets.
\
+\N|If you are running \c{build2} from PowerShell, then you will need to use
+quoting when updating specific targets, for example:
+
+\
+$ b update: 'hello/exe{hello}'
+\
+
+|
+
Let's revisit \c{build/bootstrap.build} from our \c{hello} project:
\
@@ -1857,7 +1882,7 @@ $ b configure: hello/@hello-gcc/,forward
$ cd hello/ # Change to project root.
$ b
-c++ hello/cxx{hello}@../hello-gcc/hello/
+c++ hello/cxx{hello} -> ../hello-gcc/hello/obje{hello}
ld ../hello-gcc/hello/exe{hello}
ln ../hello-gcc/hello/exe{hello} -> hello/
\
@@ -1967,7 +1992,7 @@ actual output:
\
$ b test
-c++ hello/cxx{hello}
+c++ hello/cxx{hello} -> hello/obje{hello}
ld hello/exe{hello}
test hello/exe{hello}
--- test.out
@@ -2069,9 +2094,9 @@ expect to see when running the tests:
\
b test
-c++ hello/cxx{hello}
+c++ hello/cxx{hello} -> hello/obje{hello}
ld hello/exe{hello}
-test hello/testscript{testscript} hello/exe{hello}
+test hello/exe{hello} + hello/testscript{testscript}
hello/testscript:7:1: error: hello/hello exit code 0 == 0
info: stdout: hello/test-hello/missing-name/stdout
\
@@ -2118,7 +2143,7 @@ libhello/
\
Specifically, there is no \c{testscript} in \c{libhello/}, the project's
-source directory. Instead, we have the \c{tests/} subdirectory which itself
+source subdirectory. Instead, we have the \c{tests/} subdirectory which itself
looks like a project: it contains the \c{build/} subdirectory with all the
familiar files, etc. In fact, \c{tests} is a \i{subproject} of our
\c{libhello} project.
@@ -2293,36 +2318,40 @@ If the value of the \c{install} variable is not \c{false}, then it is normally
a relative path with the first path component being one of these names:
\
-name default override
----- ------- --------
-root config.install.root
+name default override
+---- ------- --------
+root config.install.root
-data_root root/ config.install.data_root
-exec_root root/ config.install.exec_root
+data_root root/ config.install.data_root
+exec_root root/ config.install.exec_root
-bin exec_root/bin/ config.install.bin
-sbin exec_root/sbin/ config.install.sbin
-lib exec_root/lib/ config.install.lib
-libexec exec_root/libexec/<project>/ config.install.libexec
-pkgconfig lib/pkgconfig/ config.install.pkgconfig
+bin exec_root/bin/ config.install.bin
+sbin exec_root/sbin/ config.install.sbin
+lib exec_root/lib/ config.install.lib
+libexec exec_root/libexec/<project>/ config.install.libexec
+pkgconfig lib/pkgconfig/ config.install.pkgconfig
-etc data_root/etc/ config.install.etc
-include data_root/include/ config.install.include
-share data_root/share/ config.install.share
-data share/<project>/ config.install.data
+etc data_root/etc/ config.install.etc
+include data_root/include/ config.install.include
+include_arch include/ config.install.include_arch
+share data_root/share/ config.install.share
+data share/<project>/ config.install.data
+buildfile share/build2/export/<project>/ config.install.buildfile
-doc share/doc/<project>/ config.install.doc
-legal doc/ config.install.legal
-man share/man/ config.install.man
-man<N> man/man<N>/ config.install.man<N>
+doc share/doc/<project>/ config.install.doc
+legal doc/ config.install.legal
+man share/man/ config.install.man
+man<N> man/man<N>/ config.install.man<N>
\
Let's see what's going on here: The default install directory tree is derived
from the \c{config.install.root} value but the location of each node in this
tree can be overridden by the user that installs our project using the
-corresponding \c{config.install.*} variables. In our \c{buildfiles}, in turn,
-we use the node names instead of actual directories. As an example, here is a
-\c{buildfile} fragment from the source directory of our \c{libhello} project:
+corresponding \c{config.install.*} variables (see the \l{#module-install
+\c{install}} module documentation for details on their meaning). In our
+\c{buildfiles}, in turn, we use the node names instead of actual
+directories. As an example, here is a \c{buildfile} fragment from the source
+subdirectory of our \c{libhello} project:
\
hxx{*}:
@@ -2348,7 +2377,7 @@ root/include/libhello/
In the above \c{buildfile} fragment we also see the use of the
\c{install.subdirs} variable. Setting it to \c{true} instructs the \c{install}
module to recreate subdirectories starting from this point in the project's
-directory hierarchy. For example, if our \c{libhello/} source directory had
+directory hierarchy. For example, if our \c{libhello/} source subdirectory had
the \c{details/} subdirectory with the \c{utility.hxx} header, then this
header would have been installed as
\c{.../include/libhello/details/utility.hxx}.
@@ -2378,11 +2407,11 @@ lib{hello}: cxx.pkgconfig.include = include/hello/
\h2#intro-operations-dist|Distributing|
The last module that we load in our \c{bootstrap.build} is \c{dist} which
-provides support for the preparation of distributions and defines the \c{dist}
-meta-operation. Similar to \c{configure}, \c{dist} is a meta-operation rather
-than an operation because, conceptually, we are preparing a distribution for
-performing operations (like \c{update}, \c{test}) on targets rather than
-targets themselves.
+provides support for the preparation of source distributions and defines the
+\c{dist} meta-operation. Similar to \c{configure}, \c{dist} is a
+meta-operation rather than an operation because, conceptually, we are
+preparing a distribution for performing operations (like \c{update}, \c{test})
+on targets rather than targets themselves.
The preparation of a correct distribution requires that all the necessary
project files (sources, documentation, etc) be listed as prerequisites in the
@@ -2462,7 +2491,7 @@ $ b dist
Let's now take a look at an example of customizing what gets distributed.
Most of the time you will be using this mechanism to include certain targets
-from out. Here is a fragment from the \c{libhello} source directory
+from out. Here is a fragment from the \c{libhello} source subdirectory
\c{buildfile}:
\
@@ -2500,9 +2529,9 @@ state identical to distributed.
\h#intro-import|Target Importation|
Recall that if we need to depend on a target defined in another \c{buildfile}
-within our project, then we simply include said \c{buildfile} and reference
-the target. For example, if our \c{hello} included both an executable and a
-library in separate subdirectories next to each other:
+within our project, then we simply include the said \c{buildfile} and
+reference the target. For example, if our \c{hello} included both an
+executable and a library in separate subdirectories next to each other:
\
hello/
@@ -2577,9 +2606,9 @@ source directory to use its in source build (\c{out_root\ ==\ src_root}):
\
$ b hello/ config.import.libhello=libhello/
-c++ libhello/libhello/cxx{hello}
+c++ libhello/libhello/cxx{hello} -> libhello/libhello/objs{hello}
ld libhello/libhello/libs{hello}
-c++ hello/hello/cxx{hello}
+c++ hello/hello/cxx{hello} -> hello/hello/obje{hello}
ld hello/hello/exe{hello}
\
@@ -2594,9 +2623,9 @@ $ b configure: hello/@hello-clang/ config.cxx=clang++ \
config.import.libhello=libhello-clang/
$ b hello-clang/
-c++ libhello/libhello/cxx{hello}@libhello-clang/libhello/
+c++ libhello/libhello/cxx{hello} -> libhello-clang/libhello/objs{hello}
ld libhello-clang/libhello/libs{hello}
-c++ hello/hello/cxx{hello}@hello-clang/hello/
+c++ hello/hello/cxx{hello} -> hello-clang/hello/obje{hello}
ld hello-clang/hello/exe{hello}
\
@@ -2634,7 +2663,8 @@ subproject, or from an installation directory.
\N|Importation of an installed library will work even if it is not a
\c{build2} project. Besides finding the library itself, the link rule will
also try to locate its \c{pkg-config(1)} file and, if present, extract
-additional compile/link flags from it. The link rule also automatically
+additional compile/link flags from it (see \l{#cc-import-installed Importation
+of Installed Libraries} for details). The link rule also automatically
produces \c{pkg-config(1)} files for libraries that it installs.|
\N|A common problem with importing and using third-party C/C++ libraries is
@@ -2872,7 +2902,16 @@ recursively. And when the library is installed, this information is carried
over to its \c{pkg-config(1)} file.
\N|Similar to the \c{c.*} and \c{cc.*} sets discussed earlier, there are also
-\c{c.export.*} and \c{cc.export.*} sets.|
+\c{c.export.*} and \c{cc.export.*} sets.
+
+Note, however, that there is no \c{*.export.coptions} since a library imposing
+compilation options on its consumers is bad practice (too coarse-grained, does
+not compose, etc). Instead, the recommended approach is to specify in the
+library documentation that it expects its consumers to use a certain
+compilation option. And if your library is unusable without exporting a
+compilation option and you are sure benefits outweigh the drawbacks, then you
+can specify it as part of \c{*.export.poptions} (it is still a good idea to
+prominently document this).|
Here are the parts relevant to the library metadata protocol in the above
\c{buildfile}:
@@ -2970,7 +3009,12 @@ memory.
Note also that this only applies to shared libraries. In case of static
libraries, both interface and implementation dependencies are always linked,
-recursively.|
+recursively. Specifically, when linking a shared library, only libraries
+specified in its \c{*.export.libs} are linked. While when linking a static
+library, all its library prerequisites as well as those specified in
+\c{*.libs} are linked. Note that \c{*.export.libs} is not used when linking a
+static library since it is naturally assumed that all such libraries are also
+specified as library prerequisites or in \c{*.libs}.|
The remaining lines in the library metadata fragment are:
@@ -3029,6 +3073,16 @@ a binary-ful (\i{binful}) shared variants. Note also that binless libraries
can depend on binful libraries and are fully supported where the
\c{pkg-config(1)} functionality is concerned.
+One counter-intuitive aspect of having a binless library that depends on a
+system binful library, for example, \c{-lm}, is that you still have to specify
+the system library in both \c{*.export.libs} and \c{*.libs} because the latter
+is used when linking the static variant of the binless library. For example:
+
+\
+cxx.libs = -lm
+lib{hello}: cxx.export.libs = -lm
+\
+
If you are creating a new library with \l{bdep-new(1)} and are certain that it
will always be binless and in all configurations, then you can produce a
simplified \c{buildfile} by specifying the \c{binless} option, for example:
@@ -3143,9 +3197,10 @@ hello/
└── buildfile
$ b hello/
-c++ hello/libhello/libhello/cxx{hello}
+c++ hello/libhello/libhello/cxx{hello} ->
+ hello/libhello/libhello/objs{hello}
ld hello/libhello/libhello/libs{hello}
-c++ hello/hello/cxx{hello}
+c++ hello/hello/cxx{hello} -> hello/hello/obje{hello}
ld hello/hello/exe{hello}
\
@@ -3200,7 +3255,7 @@ configuration inheritance. As an example, let's configure the above bundled
\
$ b configure: hello/ config.cxx=clang++ config.cxx.coptions=-g
-$ b tree
+$ tree
hello/
├── build/
│ ├── config.build
@@ -3449,7 +3504,7 @@ example:
h{config}: in{config}
{
in.symbol = '@'
- in.substitution = lax
+ in.mode = lax
SYSTEM_NAME = $c.target.system
SYSTEM_PROCESSOR = $c.target.cpu
@@ -3583,8 +3638,9 @@ info $path.directory($src_base) # $src_base
info $path.base($path.leaf($src_base)) # foo
\
-Note that functions in \c{build2} are \i{pure} in a sense that they do not
-alter the build state in any way.
+Note that the majority of functions in \c{build2} are \i{pure} in a sense that
+they do not alter the build state in any way (see \l{#functions Functions} for
+details).
\N|Functions in \c{build2} are currently defined either by the build system
core or build system modules and are implemented in C++. In the future it will
@@ -4103,7 +4159,7 @@ source subdirectory \c{buildfile} of an executable created with this option:
# Unit tests.
#
-exe{*.test}
+exe{*.test}:
{
test = true
install = false
@@ -4192,18 +4248,8 @@ files as belonging to unit tests. Because it is a second-level extension, we
have to indicate this fact to the pattern matching machinery with the trailing
triple dot (meaning \"there are more extensions coming\"). If we didn't do
that, \c{.test} would have been treated as a first-level extension explicitly
-specified for our source files.
-
-\N|If you need to specify a name that does not have an extension, then end it
-with a single dot. For example, for a header \c{utility} you would write
-\c{hxx{utility.\}}. If you need to specify a name with an actual trailing dot,
-then escape it with a double dot, for example, \c{hxx{utility..\}}.
-
-More generally, anywhere in a name, a double dot can be used to specify a dot
-that should not be considered the extension separator while a triple dot \-
-which should. For example, in \c{obja{foo.a.o\}} the extension is \c{.o} and
-if instead we wanted \c{.a.o} to be considered the extension, then we could
-rewrite it either as \c{obja{foo.a..o\}} or as \c{obja{foo...a.o\}}.|
+specified for our source files (see \l{#targets-types Target Types} for
+details).
The next couple of lines set target type/pattern-specific variables to treat
all unit test executables as tests that should not be installed:
@@ -4310,10 +4356,10 @@ latter is used to update generated source code (such as headers) that is
required to complete the match.|
Debugging issues in each phase requires different techniques. Let's start with
-the load phase. As mentioned in \l{#intro-lang Build Language}, \c{buildfiles}
-are processed linearly with directives executed and variables expanded as they
-are encountered. As we have already seen, to print a variable value we can use
-the \c{info} directive. For example:
+the load phase. As mentioned in \l{#intro-lang Buildfile Language},
+\c{buildfiles} are processed linearly with directives executed and variables
+expanded as they are encountered. As we have already seen, to print a variable
+value we can use the \c{info} directive. For example:
\
x = X
@@ -4408,7 +4454,7 @@ scopes. For that we use the \c{dump} directive.
Without any arguments, \c{dump} prints (to \c{stderr}) the contents of the
scope it was encountered in and at that point of processing the \c{buildfile}.
Its output includes variables, targets and their prerequisites, as well as
-nested scopes, recursively. As an example, let's print the source directory
+nested scopes, recursively. As an example, let's print the source subdirectory
scope of our \c{hello} executable project. Here is its \c{buildfile} with
the \c{dump} directive at the end:
@@ -4443,12 +4489,12 @@ Instead of printing the entire scope, we can also print individual targets by
specifying one or more target names in \c{dump}. To make things more
interesting, let's convert our \c{hello} project to use a utility library,
similar to the unit testing setup (\l{#intro-unit-test Implementing Unit
-Testing}). We will also link to the \c{pthread} library to see an example of a
+Testing}). We will also link to the \c{dl} library to see an example of a
target-specific variable being dumped:
\
exe{hello}: libue{hello}: bin.whole = false
-exe{hello}: cxx.libs += -lpthread
+exe{hello}: cxx.libs += -ldl
libue{hello}: {hxx cxx}{**}
dump exe{hello}
@@ -4460,7 +4506,7 @@ The output will look along these lines:
buildfile:5:1: dump:
/tmp/hello/hello/exe{hello.?}:
{
- [strings] cxx.libs = -lpthread
+ [strings] cxx.libs = -ldl
}
/tmp/hello/hello/exe{hello.?}: /tmp/hello/hello/:libue{hello.?}:
{
@@ -4471,7 +4517,8 @@ buildfile:5:1: dump:
The output of \c{dump} might look familiar: in \l{#intro-dirs-scopes Output
Directories and Scopes} we've used the \c{--dump} option to print the entire
build state, which looks pretty similar. In fact, the \c{dump} directive uses
-the same mechanism but allows us to print individual scopes and targets.
+the same mechanism but allows us to print individual scopes and targets from
+within a \c{buildfile}.
There is, however, an important difference to keep in mind: \c{dump} prints
the state of a target or scope at the point in the \c{buildfile} load phase
@@ -4485,6 +4532,9 @@ a result, while the \c{dump} directive should be sufficient in most cases,
sometimes you may need to use the \c{--dump} option to examine the build state
just before rule execution.
+\N|It is possible to limit the output of \c{--dump} to specific scopes and/or
+targets with the \c{--dump-scope} and \c{--dump-target} options.|
+
Let's now move from state to behavior. As we already know, to see the
underlying commands executed by the build system we use the \c{-v} options
(which is equivalent to \c{--verbose\ 2}). Note, however, that these are
@@ -4561,6 +4611,25 @@ Higher verbosity levels result in more and more tracing statements being
printed. These include \c{buildfile} loading and parsing, prerequisite to
target resolution, as well as build system module and rule-specific logic.
+While the tracing statements can be helpful in understanding what is
+happening, they don't make it easy to see why things are happening a
+certain way. In particular, one question that is often encountered during
+build troubleshooting is which dependency chain causes matching or execution
+of a particular target. These questions can be answered with the help of
+the \c{--trace-match} and \c{--trace-execute} options. For example, if we
+want to understand what causes the update of \c{obje{hello\}} in the
+\c{hello} project above:
+
+\
+$ b -s --trace-execute 'obje{hello}'
+info: updating hello/obje{hello}
+ info: using rule cxx.compile
+ info: while updating hello/libue{hello}
+ info: while updating hello/exe{hello}
+ info: while updating dir{hello/}
+ info: while updating dir{./}
+\
+
Another useful diagnostics option is \c{--mtime-check}. When specified, the
build system performs a number of file modification time sanity checks that
can be helpful in diagnosing spurious rebuilds.
@@ -4605,15 +4674,20 @@ cross-compilation (specifically, inability to run tests).
As a result, we recommend using \i{expectation-based} configuration where your
project assumes a feature to be available if certain conditions are
-met. Examples of such conditions at the source code level include feature
-test macros, platform macros, runtime library macros, compiler macros, etc.,
-with the build system modules exposing some of the same information via
-variables to allow making similar decisions in \c{buildfiles}. Another
-alternative is to automatically adapt to missing features using more advanced
-techniques such as C++ SFINAE. And in situations where none of this is
-possible, we recommend delegating the decision to the user via a configuration
-value. Our experience with \c{build2} as well as those of other large
-cross-platform projects such as Boost show that this is a viable strategy.
+met. Examples of such conditions at the source code level include feature test
+macros, platform macros, runtime library macros, compiler macros, etc., with
+the build system modules exposing some of the same information via variables
+to allow making similar decisions in \c{buildfiles}. The standard
+pre-installed \l{https://github.com/build2/libbuild2-autoconf/ \c{autoconf}}
+build system module provides emulation of GNU \c{autoconf} using this
+approach.
+
+Another alternative is to automatically adapt to missing features using more
+advanced techniques such as C++ SFINAE. And in situations where none of this
+is possible, we recommend delegating the decision to the user via a
+configuration value. Our experience with \c{build2} as well as those of other
+large cross-platform projects such as Boost show that this is a viable
+strategy.
Having said that, \c{build2} does provide the ability to extract configuration
information from the environment (\c{$getenv()} function) or other tools
@@ -4728,6 +4802,9 @@ executable target in the \c{<project>%exe{<project>\}} form, the
\c{config.<project>} variable is treated as an alias for
\c{config.import.<project>.<project>.exe}.
+For an imported \c{buildfile}, \c{<project>} may refer to either the importing
+project or the project from which the said \c{buildfile} was imported.
+
The build system core reserves \c{build} and \c{import} as the second
component in configuration variables as well as \c{configured} as the third
and subsequent components.|
@@ -4889,6 +4966,32 @@ if! $defined(config.libhello.database)
fail 'config.libhello.database must be specified'
\
+\N|A configuration variable without a default value is omitted from
+\c{config.build} unless the value is specified by the user. This semantics is
+useful for values that are normally derived from other configuration values
+but could also be specified by the user. If the value is derived, then we
+don't want it saved in \c{config.build} since that would prevent it from
+being re-derived if the configuration values it is based on are changed.
+For example:
+
+\
+config [strings] config.hello.database
+
+assert ($size($config.hello.database) > 0) \
+ 'database must be specified with config.hello.database'
+
+config [bool, config.report.variable=multi] config.hello.multi_database
+
+multi = ($defined(config.hello.multi_database) \
+ ? $config.hello.multi_database \
+ : $size(config.hello.database) > 1)
+
+assert ($multi || $size(config.hello.database) == 1) \
+ 'one database can be specified if config.hello.multi_database=false'
+\
+
+|
+
If computing the default value is expensive or requires elaborate logic, then
the handling of a configuration variable can be broken down into two steps
along these lines:
@@ -4989,9 +5092,96 @@ $ b -v config.libhello.woptions=-Wno-extra
g++ ... -Wall -Wextra -Wno-extra -Werror ...
\
-While we have already seen some examples of how to propagate the configuration
-values to our source code, \l{#proj-config-propag Configuration Propagation}
-discusses this topic in more detail.
+If you do not plan to package your project, then the above rules are the only
+constraints you have. However, if your project is also a package, then other
+projects that use it as a dependency may have preferences and requirements
+regarding its configuration. And it becomes the job of the package manager
+(\c{bpkg}) to negotiate a suitable configuration between all the dependents of
+your project (see \l{bpkg#dep-config-negotiation Dependency Configuration
+Negotiation} for details). This can be a difficult problem to solve optimally
+in a reasonable time and to help the package manager come up with the best
+configuration quickly you should follow the below additional rules and
+recommendations for configuration of packages (but which are also generally
+good ideas):
+
+\ol|
+
+\li|Prefer \c{bool} configuration variables. For example, if your project
+ supports a fixed number of backends, then provide a \c{bool} variable to
+ enable each rather than a single variable that lists all the backends to
+ be enabled.|
+
+\li|Avoid project configuration variable dependencies, that is, where the
+ default value of one variable depends on the value of another. But if you
+ do need such a dependency, make sure it is expressed using the original
+ \c{config.<project>.*} variables rather than any intermediate/computed
+ values. For example:
+
+ \
+ # Enable Y only if X is enabled.
+ #
+ config [bool] config.hello.x ?= false
+ config [bool] config.hello.y ?= $config.libhello.x
+ \
+
+ |
+
+\li|Do not make project configuration variables conditional. In other words,
+ the set of configuration variables and their types should be a static
+ property of the project. If you do need to make a certain configuration
+ variable \"unavailable\" or \"disabled\" if certain conditions are met
+ (for example, on a certain platform or based on the value of another
+ configuration variable), then express this with a default value and/or a
+ check. For example:
+
+ \
+ windows = ($cxx.target.class == 'windows')
+
+ # Y should only be enabled if X is enabled and we are not on
+ # Windows.
+ #
+ config [bool] config.hello.x ?= false
+ config [bool] config.hello.y ?= ($config.hello.x && !$windows)
+
+ if $config.libhello.y
+ {
+ assert $config.hello.x \"Y can only be enabled if X is enabled\"
+ assert (!$windows) \"Y cannot be enabled on Windows\"
+ }
+ \
+
+ |
+
+|
+
+Additionally, if you wish to factor some \c{config} directives into a separate
+file (for example, if you have a large number of them or you would like to
+share them with subprojects) and source it from your \c{build/root.build},
+then it is recommended that you place this file into the \c{build/config/}
+subdirectory, where the package manager expects to find such files (see
+\l{bpkg#package-skeleton Package Build System Skeleton} for background). For
+example:
+
+\
+# root.build
+#
+
+...
+
+source $src_root/build/config/common.build
+\
+
+\N|If you would prefer to keep such a file in a different location (for
+example, because it contains things other than \c{config} directives), then
+you will need to manually list it in your package's \c{manifest} file, see the
+\l{bpkg#manifest-package-build-file \c{build-file}} value for details.|
+
+Another effect of the \c{config} directive is to print the configuration
+variable in the project's configuration report. This functionality is
+discussed in the following section. While we have already seen some examples
+of how to propagate the configuration values to our source code,
+\l{#proj-config-propag Configuration Propagation} discusses this topic in more
+detail.
\h#proj-config-report|Configuration Report|
@@ -5106,6 +5296,29 @@ config libhello@/tmp/libhello/
woptions -Wall -Wextra -Wno-extra -Werror
\
+The \c{config.report.module} attribute can be used to override the reporting
+module name, that is, \c{config} in the \c{config\ libhello@/tmp/libhello/}
+line above. It is primarily useful in imported \c{buildfiles} that wish to
+report non-\c{config.*} variables under their own name. For example:
+
+\
+config [string] config.rtos.board
+
+# Load the board description and report key information such as the
+# capability revoker.
+#
+...
+revoker = ...
+
+config [config.report.module=rtos] revoker
+\
+
+\
+$ b config.rtos.board=ibex-safe-simulator -v
+rtos hello@/tmp/hello/
+ board ibex-safe-simulator
+ revoker hardware
+\
\h#proj-config-propag|Configuration Propagation|
@@ -5375,6 +5588,684 @@ configuration header into two, one public and installed while the other
private.|
+\h1#targets|Targets and Target Types|
+
+\N{This chapter is a work in progress and is incomplete.}
+
+
+\h#targets-types|Target Types|
+
+A target type is part of a target's identity. The core idea behind the concept
+of target types is to abstract away from file extensions which can vary from
+project to project (for example, C++ source files extensions) or from platform
+to platform (for example, executable file extensions). It also allows us to
+have non-file-based targets.
+
+Target types form a \i{base-derived} inheritance tree. The root of this tree
+is the abstract \c{target{\}} type. The \c{build2} core defines a number of
+standard target types, such as \c{file{\}}, \c{doc{\}}, and \c{exe{\}}. Build
+system modules can define additional target types that are based on the
+standard ones (or on types defined by other modules). For example, the \c{c}
+module that provides the C compilation support defines the \c{h{\}} and
+\c{c{\}} target types. Finally, \c{buildfiles} can derive project-local target
+types using the \c{define} directive.
+
+\N|If a target type represents a file type with a well-established extension,
+then by convention such an extension is used as the target type name. For
+example, the C language header and source files use the \c{.h} and \c{.c}
+extensions and the target types are called \c{h{\}} and \c{c{\}}.
+
+Speaking of conventions, as you may have noticed, when mentioning a target
+type we customarily add \c{{\}} after its name. We found that this helps with
+comprehension since target type names are often short (you can also search for
+\c{<type>{} to narrow it down to target types). In a way this is a similar
+approach to adding \c{()} after a function name except here we use \c{{\}},
+which mimics target type usage in target names, for example \c{c{hello\}} for
+\c{hello.c}.|
+
+The following listing shows the hierarchy of the standard target types defined
+by the \c{build2} core (the abstract target types are marked with \c{*}) while
+the following sections describe each standard target type in detail. For
+target types defined by a module refer to the respective module documentation.
+
+\
+ .-----target*------------.
+ | | |
+ mtime_target*---. alias fsdir
+ | | |
+ path_target* group dir
+ |
+ .---------file----.
+ | | |
+ .----doc-----. exe buildfile
+ | | |
+legal man manifest
+ |
+ man<N>
+\
+
+While target types replace (potentially variable) extensions, there still
+needs to be a mechanism for specifying them since in most cases targets have
+to be mapped to files. There are several ways this can be achieved.
+
+If a target type represents a file type with a well-established extension,
+then such an extension is normally used by default and we don't need to take
+any extra steps. For example the \c{h{\}} and \c{c{\}} target types for C
+header and source files default to the \c{.h} and \c{.c} extensions,
+respectively, and if our project follows this convention, then we can simply
+write:
+
+\
+exe{utility}: c{utility} h{utility}
+\
+
+And \c{c{utility\}} will be mapped to \c{utility.c} and \c{h{utility\}} \-
+to \c{utility.h}.
+
+There are two variants of this default extension case: fixed extension and
+customizable extension. A target type may choose to fix the default extension
+if it's a bad idea to deviate from the default extension. A good example of
+such a target is \c{man1{\}}, which fixes the default extension to be
+\c{.1}. More commonly, however, a target will have a default extension but
+will allow customizing it with the \c{extension} variable.
+
+A good example where extension customization is often required are the
+\c{hxx{\}} and \c{cxx{\}} target types for C++ header and source files, which
+default to the \c{.hxx} and \c{.cxx} extensions, respectively. If our project
+uses other extensions, for example, \c{.hpp} and \c{.cpp}, then we can adjust
+the defaults (typically done in \c{root.build}, after loading the \c{cxx}
+module):
+
+\
+hxx{*}: extension = hpp
+cxx{*}: extension = cpp
+\
+
+Then we can write:
+
+\
+exe{utility}: cxx{utility} hxx{utility}
+\
+
+And \c{cxx{utility\}} will be mapped to \c{utility.cpp} and \c{hxx{utility\}}
+\- to \c{utility.hpp}.
+
+What about \c{exe{utility\}}, where does its extension come from? This is an
+example of a target type with an extension that varies from platform to
+platform. In such cases the extension is expected to be assigned by the rule
+that matches the target. In the above example, the link rule from the \c{cxx}
+module that matches updating \c{exe{utility\}} will assign a suitable
+extension based on the target platform of the C++ compiler that it was
+instructed to use.
+
+Finally, it is always possible to specify the file extension explicitly as
+part of the target name. For example:
+
+\
+exe{utility}: cxx{utility.cc} hxx{utility.hh}
+\
+
+This is normally only needed if the default extension is not appropriate or if
+the target type does not have a default extension, as is the case, for
+example, for the \l{#targets-types-file \c{file{\}}} and \l{#targets-types-doc
+\c{doc{\}}} target types. This mechanism can also be used to override the
+automatically derived extension. For example:
+
+\
+exe{($cxx.target.class == 'windows' ? utility.com : utility)}: ...
+\
+
+\N|If you need to specify a name that does not have an extension, then end it
+with a single dot. For example, for a header \c{utility} you would write
+\c{hxx{utility.\}}. If you need to specify a name with an actual trailing dot,
+then escape it with a double dot, for example, \c{hxx{utility..\}}.
+
+More generally, anywhere in a name, a double dot can be used to specify a dot
+that should not be considered the extension separator while a triple dot \-
+which should. For example, in \c{obja{foo.a.o\}} the extension is \c{.o} and
+if instead we wanted \c{.a.o} to be considered the extension, then we could
+rewrite it either as \c{obja{foo.a..o\}} or as \c{obja{foo...a.o\}}.|
+
+To derive a new target type in a \c{buildfile} we use the \c{define}
+directive. Such target types are project-local, meaning they cannot be
+exported to other projects. Typically this is used to provide a more
+meaningful name to a set of files and also avoid having to specify their
+extensions explicitly. Compare:
+
+\
+./: doc{README.md PACKAGE-README.md INSTALL.md}
+\
+
+To:
+
+\
+define md: doc
+doc{*}: extension = md
+
+./: md{README PACKAGE-README INSTALL}
+\
+
+
+\h2#targets-types-target|\c{target{\}}|
+
+The \c{target{\}} target type is a root of the target type hierarchy. It is
+abstract and is not commonly used directly, except perhaps in patterns (target
+type/pattern-specific variable, pattern rules).
+
+
+\h2#targets-types-alias|\c{alias{\}} and \c{dir{\}}|
+
+The \c{alias{\}} target type is used for non-file-based targets that serve as
+aliases for their prerequisite.
+
+\N|Alias targets in \c{build2} are roughly equivalent to phony targets in
+\c{make}.|
+
+For example:
+
+\
+alias{tests}: exe{test1 test2 test3}
+\
+
+\
+$ b test: alias{tests}
+\
+
+An \c{alias{\}} target can also serve as an \"action\" if supplied with an ad
+hoc recipe (or matched by an ad hoc pattern rule). For example:
+
+\
+alias{strip}: exe{hello}
+{{
+ diag strip $<
+ strip $path($<)
+}}
+\
+
+The \c{dir{\}} target type is a special kind of alias that represents a
+directory. Building it means building everything inside the directory. See
+\l{#intro-proj-struct Project Structure} for background.
+
+A target without a type that ends with a directory separator (\c{/}) is
+automatically treated as \c{dir{\}}. For example, the following two lines are
+equivalent:
+
+\
+./: exe{test1 test2}
+dir{./}: exe{test1 test2}
+\
+
+Omitting the target type in such situations is customary.
+
+
+\h2#targets-types-fsdir|\c{fsdir{\}}|
+
+The \c{fsdir{\}} target type represents a filesystem directory. Unlike
+\c{dir{\}} above, it is not an alias and listing an \c{fsdir{\}} directory as
+a prerequisite of a target will cause that directory to be created on
+\c{update} and removed on \c{clean}.
+
+While we usually don't need to list explicit \c{fsdir{\}} prerequisites for
+our targets, one situation where this is necessary is when the target resides
+in a subdirectory that does not correspond to an existing source directory. A
+typical example of this situation is placing object files into subdirectories.
+Compare:
+
+\
+obj{foo}: c{foo}
+sub/obj{bar}: c{bar} fsdir{sub/}
+\
+
+
+\h2#targets-types-mtime-path|\c{mtime_target{\}} and \c{path_target{\}}|
+
+The \c{mtime_target{\}} target type represents a target that uses modification
+times to determine if it is out of date. The \c{path_target{\}} target type
+represents a target that has a corresponding filesystem entry. It is derived
+from \c{mtime_target{\}} and uses the modification time of that filesystem
+entry to determine if the target is out of date.
+
+Both of these target types are abstract and are not commonly used directly,
+except perhaps in patterns (target type/pattern-specific variable, pattern
+rules).
+
+
+\h2#targets-types-group|\c{group{\}}|
+
+The \c{group{\}} target type represents a user-defined explicit target group,
+that is, a target that has multiple member targets that are all built together
+with a single recipe.
+
+Normally this target type is not used to declare targets or prerequisites but
+rather as a base of a derived group. If desired, such a derived group can be
+marked with an attribute as \"see-through\", meaning that when the group is
+listed as a prerequisite of a target, the matching rule \"sees\" its members,
+rather than the group itself. For example:
+
+\
+define [see_through] thrift_cxx: group
+\
+
+
+\h2#targets-types-file|\c{file{\}}|
+
+The \c{file{\}} target type represents a generic file. This target type is
+used as a base for most of the file-based targets and can also be used to
+declare targets and prerequisites when there are no more specific target
+types.
+
+A target or prerequisite without a target type is automatically treated as
+\c{file{\}}. However, omitting a target type in such situations is not
+customary.
+
+The \c{file{\}} target type has no default extension and one cannot be
+assigned with the \c{extension} variable. As a result, if a \c{file{\}} target
+has an extension, then it must be specified explicitly as part of the target
+name. For example:
+
+\
+./: file{example.conf}
+\
+
+\h2#targets-types-doc|\c{doc{\}}, \c{legal{\}}, and \c{man{\}}|
+
+The \c{doc{\}} target type represents a generic documentation file. It has
+semantics similar to \c{file{\}} (from which it derives): it can be used as a
+base or declare targets/prerequisites and there is no default extension. One
+notable difference, however, is that \c{doc{\}} targets are by default
+installed into the \c{doc/} installation location (see \l{#module-install
+\c{install} Module}). For example:
+
+\
+./: doc{README.md ChangeLog.txt}
+\
+
+The \c{legal{\}} target type is derived from \c{doc{\}} and represents a legal
+documentation file, such as a license, copyright notice, authorship
+information, etc. The main purpose of having a separate target type like this
+is to help with installing licensing-related files into a different
+location. To this effect, \c{legal{\}} targets are installed into the
+\c{legal/} installation location, which by default is the same as \c{doc/} but
+can be customized. For example:
+
+\
+./: legal{COPYRIGHT LICENSE AUTHORS.md}
+\
+
+The \c{man{\}} target type is derived from \c{doc{\}} and represents a manual
+page. This target type requires an explicit extension specification and is
+installed into the \c{man/} installation location
+
+\N|If you are using the \c{man{\}} target type directly (instead of one of
+\c{man<N>{\}} described below), for example, to install a localized version of
+a man page, then you will likely need to adjust the installation location
+on the per target basis.|
+
+The \c{man<N>{\}} target types (where \c{<N>} is an integer between 1 and 9)
+are derived from \c{man{\}} and represent manual pages in the respective
+sections. These target types have fixed default extensions \c{.<N>} (but an
+explicit extension can still be specified, for example \c{man1{foo.1p\}}) and
+are installed into the \c{man<N>/} installation locations. For example:
+
+\
+./: man1{foo}
+\
+
+
+\h2#targets-types-exe|\c{exe{\}}|
+
+The \c{exe{\}} target type represents an executable file. Executables in
+\c{build2} appear in two distinct but sometimes overlapping contexts: We can
+build an executable target, for example from C source files. Or we can list an
+executable target as a prerequisite in order to execute it as part of a
+recipe. And sometimes this can be the same executable target. For example,
+one project may build an executable target that is a source code generator and
+another project may import this executable target and use it in its recipes in
+order to generate some source code.
+
+To support this semantics the \c{exe{\}} target type has a peculiar default
+extension logic. Specifically, if the \c{exe{\}} target is \"output\", then
+the extension is expected to be assigned by the matching rule according to the
+target platform for which this executable is built. But if it does not,
+then we fall back to no extension (for example, a script). If, however, the
+\c{exe{\}} target is \"input\" (that is, it's listed as a prerequisite and
+there is no corresponding \"output\" target), then the extension of the host
+platform is used as the default.
+
+In all these cases the extension can also be specified explicitly. This, for
+example, would be necessary if the executable were a batch file:
+
+\
+h{generate}: exe{generate.bat}
+{{
+ diag $< -> $>
+ $< -o $path($>)
+}}
+\
+
+Here, without the explicit extension, the \c{.exe} extension would have been
+used by default.
+
+
+\h1#variables|Variables|
+
+\N{This chapter is a work in progress and is incomplete.}
+
+The following variable/value types can currently be used in \c{buildfiles}:
+
+\
+bool
+
+int64
+int64s
+
+uint64
+uint64s
+
+string
+strings
+string_set
+string_map
+
+path
+paths
+dir_path
+dir_paths
+
+json
+json_array
+json_object
+json_set
+json_map
+
+name
+names
+name_pair
+
+cmdline
+project_name
+target_triplet
+\
+
+Note that while expansions in the target and prerequisite-specific assignments
+happen in the corresponding target and prerequisite contexts, respectively,
+for type/pattern-specific assignments they happen in the scope context. Plus,
+a type/pattern-specific prepend/append is applied at the time of expansion for
+the actual target. For example:
+
+\
+x = s
+
+file{foo}: # target
+{
+ x += t # s t
+ y = $x y # s t y
+}
+
+file{foo}: file{bar} # prerequisite
+{
+ x += p # x t p
+ y = $x y # x t p y
+}
+
+file{b*}: # type/pattern
+{
+ x += w # <append w>
+ y = $x w # <assign s w>
+}
+
+x = S
+
+info $(file{bar}: x) # S w
+info $(file{bar}: y) # s w
+\
+
+
+\h1#functions|Functions|
+
+\N{This chapter is a work in progress and is incomplete.}
+
+
+Functions in \c{build2} are organized into families, such as the
+\c{$string.*()} family for manipulating strings or \c{$regex.*()} for working
+with regular expressions. Most functions are pure and those that are not,
+such as \c{$builtin.getenv()}, are explicitly documented as such.
+
+Some functions, such as from the \c{$regex.*()} family, can only be called
+fully qualified with their family name. For example:
+
+\
+if $regex.match($name, '(.+)-(.+)')
+ ...
+\
+
+While other functions can be called without explicit qualification. For
+example:
+
+\
+path = $getenv('PATH')
+\
+
+There are also functions that can be called unqualified only for certain types
+of arguments (this fact will be reflected in their synopsis and/or
+documentation). Note, however, that every function can always be called
+qualified.
+"
+
+// $builtin.*()
+//
+"
+\h#functions-builtin|Builtin Functions|
+
+The \c{$builtin.*()} function family contains fundamental \c{build2}
+functions.
+"
+source <functions-builtin.cli>;
+
+// $string.*()
+//
+"
+\h#functions-string|String Functions|
+"
+source <functions-string.cli>;
+
+
+// $integer.*()
+//
+"
+\h#functions-integer|Integer Functions|
+"
+source <functions-integer.cli>;
+
+
+// $bool.*()
+//
+"
+\h#functions-bool|Bool Functions|
+"
+source <functions-bool.cli>;
+
+
+// $path.*()
+//
+"
+\h#functions-path|Path Functions|
+
+The \c{$path.*()} function family contains function that manipulating
+filesystem paths.
+"
+source <functions-path.cli>;
+
+
+// $name.*()
+//
+"
+\h#functions-name|Name Functions|
+
+The \c{$name.*()} function family contains function that operate on target and
+prerequisite names. See also the \l{#functions-target \c{$target.*()} function
+family} for functions that operate on actual targets.
+"
+source <functions-name.cli>;
+
+
+// $target.*()
+//
+"
+\h#functions-target|Target Functions|
+
+The \c{$target.*()} function family contains function that operate on
+targets. See also the \l{#functions-name \c{$name.*()} function family} for
+functions that operate on target (and prerequisite) names.
+"
+source <functions-target.cli>;
+
+
+// $regex.*()
+//
+"
+\h#functions-regex|Regex Functions|
+
+The \c{$regex.*()} function family contains function that provide
+comprehensive regular expression matching and substitution facilities. The
+supported regular expression flavor is ECMAScript (more specifically,
+ECMA-262-based C++11 regular expressions).
+
+In the \c{$regex.*()} functions the substitution escape sequences in the
+format string (the \ci{fmt} argument) are extended with a subset of the Perl
+escape sequences: \c{\\n}, \c{\\u}, \c{\\l}, \c{\\U}, \c{\\L}, \c{\\E},
+\c{\\1} ... \c{\\9}, and \c{\\\\}. Note that the standard ECMAScript escape
+sequences (\c{$1}, \c{$2}, \c{$&}, etc) are still supported.
+
+Note that functions from the \c{$regex.*()} family can only be called fully
+qualified with their family name. For example:
+
+\
+if $regex.match($name, '(.+)-(.+)')
+ ...
+\
+
+"
+source <functions-regex.cli>;
+
+// $json.*()
+//
+"
+\h#functions-json|JSON Functions|
+
+The \c{$json.*()} function family contains function that operate on the JSON
+types: \c{json}, \c{json_array}, and \c{json_object}. For example:
+
+\
+j = [json] one@1 two@abc three@([json] x@1 y@-1)
+
+for m: $j
+{
+ n = $member_name($m)
+ v = $member_value($m)
+
+ info $n $value_type($v) $v
+}
+\
+
+"
+source <functions-json.cli>;
+
+
+// $process.*()
+//
+"
+\h#functions-process|Process Functions|
+"
+source <functions-process.cli>;
+
+
+// $filesystem.*()
+//
+"
+\h#functions-filesystem|Filesystem Functions|
+"
+source <functions-filesystem.cli>;
+
+
+// $project_name.*()
+//
+"
+\h#functions-project_name|Project Name Functions|
+
+The \c{$project_name.*()} function family contains function that operate on
+the \c{project_name} type.
+"
+source <functions-project-name.cli>;
+
+
+// $process_path.*()
+//
+"
+\h#functions-process-path|Process Path Functions|
+
+The \c{$process_path.*()} function family contains function that operate on
+the \c{process_path} type and its extended \c{process_path_ex} variant. These
+types describe a path to an executable that, if necessary, has been found in
+\c{PATH}, completed with an extension, etc. The \c{process_path_ex} variant
+includes additional metadata, such as the stable process name for diagnostics
+and the executable checksum for change tracking.
+"
+source <functions-process-path.cli>;
+
+
+// $target_triplet.*()
+//
+"
+\h#functions-target-triplet|Target Triplet Functions|
+
+The \c{$target_triplet.*()} function family contains function that operate on
+the \c{target_triplet} type that represents the ubiquitous
+\c{\i{cpu}-\i{vendor}-\i{os}} target platform triplet.
+"
+source <functions-target-triplet.cli>;
+
+
+"
+\h1#directives|Directives|
+
+\N{This chapter is a work in progress and is incomplete.}
+
+\h#directives-define|\c{define}|
+
+\
+define <derived>: <base>
+\
+
+Define a new target type \c{<derived>} by inheriting from existing target type
+\c{<base>}. See \l{#targets-types Target Types} for details.
+
+
+\h#directives-include|\c{include}|
+
+\
+include <file>
+include <directory>
+\
+
+Load the specified file (the first form) or \c{buildfile} in the specified
+directory (the second form). In both cases the file is loaded in the scope
+corresponding to its directory. Subsequent inclusions of the same file are
+automatically ignored. See also \l{#directives-source \c{source}}.
+
+
+\h#directives-source|\c{source}|
+
+
+\
+source <file>
+\
+
+Load the specified file in the current scope as if its contents were copied
+and pasted in place of the \c{source} directive. Note that subsequent sourcing
+of the same file in the same scope are not automatically ignored. See also
+\l{#directives-include \c{include}}.
+
\h1#attributes|Attributes|
@@ -5530,7 +6421,7 @@ exe{hello}: cxx{+{f* b*} -{foo bar}}
This is particularly useful if you would like to list the names to include or
exclude in a variable. For example, this is how we can exclude certain files
from compilation but still include them as ordinary file prerequisites (so
-that they are still included into the distribution):
+that they are still included into the source distribution):
\
exc = foo.cxx bar.cxx
@@ -5607,8 +6498,8 @@ patterns/matches that do not already contain an extension. Then the filesystem
search is performed for matching files.
For example, the \c{cxx{\}} target type obtains the default extension from the
-\c{extension} variable. Assuming we have the following line in our
-\c{root.build}:
+\c{extension} variable (see \l{#targets-types Target Types} for background).
+Assuming we have the following line in our \c{root.build}:
\
cxx{*}: extension = cxx
@@ -5632,101 +6523,6 @@ file-based, then the name pattern is returned as is (that is, as an ordinary
name). Project-qualified names are never considered to be patterns.
-\h1#variables|Variables|
-
-\N{This chapter is a work in progress and is incomplete.}
-
-The following variable/value types can currently be used in \c{buildfiles}:
-
-\
-bool
-
-int64
-int64s
-
-uint64
-uint64s
-
-string
-strings
-
-path
-paths
-dir_path
-dir_paths
-
-name
-names
-name_pair
-
-project_name
-target_triplet
-\
-
-Note that while expansions in the target and prerequisite-specific assignments
-happen in the corresponding target and prerequisite contexts, respectively,
-for type/pattern-specific assignments they happen in the scope context. Plus,
-a type/pattern-specific prepend/append is applied at the time of expansion for
-the actual target. For example:
-
-\
-x = s
-
-file{foo}: # target
-{
- x += t # s t
- y = $x y # s t y
-}
-
-file{foo}: file{bar} # prerequisite
-{
- x += p # x t p
- y = $x y # x t p y
-}
-
-file{b*}: # type/pattern
-{
- x += w # <append w>
- y = $x w # <assign s w>
-}
-
-x = S
-
-info $(file{bar}: x) # S w
-info $(file{bar}: y) # s w
-\
-
-
-\h1#directives|Directives|
-
-\N{This chapter is a work in progress and is incomplete.}
-
-\h#directives-include|\c{include}|
-
-\
-include <file>
-include <directory>
-\
-
-Load the specified file (the first form) or \c{buildfile} in the specified
-directory (the second form). In both cases the file is loaded in the scope
-corresponding to its directory. Subsequent inclusions of the same file are
-automatically ignored. See also \l{#directives-source \c{source}}.
-
-
-\h#directives-source|\c{source}|
-
-
-\
-source <file>
-\
-
-Load the specified file in the current scope as if its contents were copied
-and pasted in place of the \c{source} directive. Note that subsequent sourcing
-of the same file in the same scope are not automatically ignored. See also
-\l{#directives-include \c{include}}.
-
-
\h1#module-config|\c{config} Module|
\N{This chapter is a work in progress and is incomplete.}
@@ -6003,30 +6799,54 @@ of the Introduction, the \c{install} module defines the following standard
installation locations:
\
-name default config.* override
----- ------- -----------------
-root install.root
+name default config.install.*
+ (c.i.*) override
+---- ------- ----------------
+root c.i.root
+
+data_root root/ c.i.data_root
+exec_root root/ c.i.exec_root
-data_root root/ install.data_root
-exec_root root/ install.exec_root
+bin exec_root/bin/ c.i.bin
+sbin exec_root/sbin/ c.i.sbin
+lib exec_root/lib/<private>/ c.i.lib
+libexec exec_root/libexec/<private>/<project>/ c.i.libexec
+pkgconfig lib/pkgconfig/ c.i.pkgconfig
-bin exec_root/bin/ install.bin
-sbin exec_root/sbin/ install.sbin
-lib exec_root/lib/<private>/ install.lib
-libexec exec_root/libexec/<private>/<project>/ install.libexec
-pkgconfig lib/pkgconfig/ install.pkgconfig
+etc data_root/etc/ c.i.etc
+include data_root/include/<private>/ c.i.include
+include_arch include/ c.i.include_arch
+share data_root/share/ c.i.share
+data share/<private>/<project>/ c.i.data
+buildfile share/build2/export/<project>/ c.i.buildfile
-etc data_root/etc/ install.etc
-include data_root/include/<private>/ install.include
-share data_root/share/ install.share
-data share/<private>/<project>/ install.data
+doc share/doc/<private>/<project>/ c.i.doc
+legal doc/ c.i.legal
+man share/man/ c.i.man
+man<N> man/man<N>/ c.i.man<N>
+\
+
+The \c{include_arch} location is meant for architecture-specific files, such
+as configuration headers. By default it's the same as \c{include} but can be
+configured by the user to a different value (for example,
+\c{/usr/include/x86_64-linux-gnu/}) for platforms that support multiple
+architectures from the same installation location. This is how one would
+normally use it from a \c{buildfile}:
-doc share/doc/<private>/<project>/ install.doc
-legal doc/ install.legal
-man share/man/ install.man
-man<N> man/man<N>/ install.man<N>
+\
+# The configuration header may contain target architecture-specific
+# information so install it into include_arch/ instead of include/.
+#
+h{*}: install = include/libhello/
+h{config}: install = include_arch/libhello/
\
+The \c{buildfile} location is meant for exported buildfiles that can be
+imported by other projects. If a project contains any \c{**.build} buildfiles
+in its \c{build/export/} directory (or \c{**.build2} and \c{build2/export/} in
+the alternative naming scheme), then they are automatically installed into
+this location (recreating subdirectories).
+
The \c{<project>}, \c{<version>}, and \c{<private>} substitutions in these
\c{config.install.*} values are replaced with the project name, version, and
private subdirectory, respectively. If either is empty, then the corresponding
@@ -6045,7 +6865,9 @@ The private installation subdirectory is specified with the
directory and may include multiple components. For example:
\
-$ b install config.install.root=/usr/local/ config.install.private=hello/
+$ b install \
+ config.install.root=/usr/local/ \
+ config.install.private=hello/
\
\N|If you are relying on your system's dynamic linker defaults to
@@ -6063,6 +6885,153 @@ $ b install \
|
+
+\h#install-reloc|Relocatable Installation|
+
+A relocatable installation can be moved to a directory other than its original
+installation location. Note that the installation should be moved as a whole
+preserving the directory structure under its root (\c{config.install.root}).
+To request a relocatable installation, set the \c{config.install.relocatable}
+variable to \c{true}. For example:
+
+\
+$ b install \
+ config.install.root=/tmp/install \
+ config.install.relocatable=true
+\
+
+A relocatable installation is achieved by using paths relative to one
+filesystem entry within the installation to locate another. Some examples
+include:
+
+\ul|
+
+\li|Paths specified in \c{config.bin.rpath} are made relative using the
+\c{$ORIGIN} (Linux, BSD) or \c{@loader_path} (Mac OS) mechanisms.|
+
+\li|Paths in the generated \c{pkg-config} files are made relative to the
+\c{${pcfiledir\}} built-in variable.|
+
+\li|Paths in the generated installation manifest (\c{config.install.manifest})
+are made relative to the location of the manifest file.||
+
+While these common aspects are handled automatically, if a projects relies on
+knowing its installation location, then it will most likely need to add manual
+support for relocatable installations.
+
+As an example, consider an executable that supports loading plugins and
+requires the plugin installation directory to be embedded into the executable
+during the build. The common way to support relocatable installations for such
+cases is to embed a path relative to the executable and complete it at
+runtime, normally by resolving the executable's path and using its directory
+as a base.
+
+If you would like to always use the relative path, regardless of whether the
+installation is relocatable of not, then you can obtain the library
+installation directory relative to the executable installation directory like
+this:
+
+\
+plugin_dir = $install.resolve($install.lib, $install.bin)
+\
+
+Alternatively, if you would like to continue using absolute paths for
+non-relocatable installations, then you can use something like this:
+
+\
+plugin_dir = $install.resolve( \
+ $install.lib, \
+ ($install.relocatable ? $install.bin : [dir_path] ))
+\
+
+Finally, if you are unable to support relocatable installations, the correct
+way to handle this is to assert this fact in \c{root.build} of your project,
+for example:
+
+\
+assert (!$install.relocatable) 'relocatable installation not supported'
+\
+
+
+\h#install-filter|Installation Filtering|
+
+While project authors determine what gets installed at the \c{buildfile}
+level, the users of the project can further filter the installation using the
+\c{config.install.filter} variable.
+
+The value of this variable is a list of key-value pairs that specify the
+filesystem entries to include or exclude from the installation. For example,
+the following filters will omit installing headers and static libraries
+(notice the quoting of the wildcard).
+
+\
+$ b install config.install.filter='include/@false \"*.a\"@false'
+\
+
+The key in each pair is a file or directory path or a path wildcard pattern.
+If a key is relative and contains a directory component or is a directory,
+then it is treated relative to the corresponding \c{config.install.*}
+location. Otherwise (simple path, normally a pattern), it is matched against
+the leaf of any path. Note that if an absolute path is specified, it should be
+without the \c{config.install.chroot} prefix.
+
+The value in each pair is either \c{true} (include) or \c{false} (exclude).
+The filters are evaluated in the order specified and the first match that is
+found determines the outcome. If no match is found, the default is to
+include. For a directory, while \c{false} means exclude all the sub-paths
+inside this directory, \c{true} does not mean that all the sub-paths will be
+included wholesale. Rather, the matched component of the sub-path is treated
+as included with the rest of the components matched against the following
+sub-filters. For example:
+
+\
+$ b install config.install.filter='
+ include/x86_64-linux-gnu/@true
+ include/x86_64-linux-gnu/details/@false
+ include/@false'
+\
+
+The \c{true} or \c{false} value may be followed by comma and the \c{symlink}
+modifier to only apply to symlink filesystem entries. For example:
+
+\
+$ b config.install.filter='\"*.so\"@false,symlink'
+\
+
+A filter can be negated by specifying \c{!} as the first pair. For example:
+
+\
+$ b install config.install.filter='! include/@false \"*.a\"@false'
+\
+
+Note that the filtering mechanism only affects what gets physically copied to
+the installation directory without affecting what gets built for install or
+the view of what gets installed at the \c{buildfile} level. For example, given
+the \c{include/@false *.a@false} filters, static libraries will still be built
+(unless arranged not to with \c{config.bin.lib}) and the \c{pkg-config} files
+will still end up with \c{-I} options pointing to the header installation
+directory. Note also that this mechanism applies to both \c{install} and
+\c{uninstall} operations.
+
+\N|If you are familiar with the Debian or Fedora packaging, this mechanism is
+somewhat similar to (and can be used for a similar purpose as) the Debian's
+\c{.install} files and Fedora's \c{%files} spec file sections, which are used
+to split the installation into multiple binary packages.|
+
+As another example, the following filters will omit all the
+development-related files (headers, \c{pkg-config} files, static libraries,
+and shared library symlinks; assuming the platform uses the \c{.a}/\c{.so}
+extensions for the libraries):
+
+\
+$ b install config.install.filter='
+ include/@false
+ pkgconfig/@false
+ \"lib/*.a\"@false
+ \"lib/*.so\"@false,symlink'
+\
+
+
\h1#module-version|\c{version} Module|
A project can use any version format as long as it meets the package version
@@ -6333,7 +7302,7 @@ just not ordered correctly. As a result, we feel that the risks are justified
when the only alternative is manual version management (which is always an
option, nevertheless).
-When we prepare a distribution of a snapshot, the \c{version} module
+When we prepare a source distribution of a snapshot, the \c{version} module
automatically adjusts the package name to include the snapshot information as
well as patches the manifest file in the distribution with the snapshot number
and id (that is, replacing \c{.z} in the version value with the actual
@@ -6525,6 +7494,116 @@ depends: libprint [3.0.0-b.2.1 3.0.0-b.3)
\N{This chapter is a work in progress and is incomplete.}
+\h#module-bin-target-types|Binary Target Types|
+
+The following listing shows the hierarchy of the target types defined by the
+\c{bin} module while the following sections describe each target type in
+detail (\c{target{\}} and \c{file{\}} are standard target types defined by the
+\c{build2} core; see \l{#targets-types Target Types} for details).
+
+\
+ target----------------.
+ | |
+ ... |
+ | |
+ .---------------file------------. lib
+ | | | | | | libul
+ | libue obje bmie hbmie def obj
+liba libua obja bmia hbmia bmi
+libs libus objs bmis hbmis hbmi
+\
+
+
+\h2#module-bin-target-types-lib|\c{lib{\}}, \c{liba{\}}, \c{libs{\}}|
+
+The \c{liba{\}} and \c{libs{\}} target types represent static (archive) and
+shared libraries, respectively.
+
+The \c{lib{\}} target type is a group with the \c{liba{\}} and/or \c{libs{\}}
+members. A rule that encounters a \c{lib{\}} prerequisite may pick a member
+appropriate for the target being built or it may build all the members
+according to the \c{bin.lib} variable. See \l{#intro-lib Library Exportation
+and Versioning} for background.
+
+The \c{lib*{\}} file extensions are normally automatically assigned by the
+matching rules based on the target platform.
+
+
+\h2#module-bin-target-types-libu|\c{libul{\}}, \c{libue{\}}, \c{libua{\}},
+\c{libus{\}}|
+
+The \c{libu*{\}} target types represent utility libraries. Utility libraries
+are static libraries with object files appropriate for linking an executable
+(\c{libue{\}}), static library (\c{libua{\}}), or shared library
+(\c{libus{\}}). Where possible, utility libraries are built in the
+\"thin archive\" mode.
+
+The \c{libul{\}} target type is a group with the \c{libua{\}} and/or
+\c{libus{\}} members. A rule that encounters a \c{libul{\}} prerequisite picks
+a member appropriate for the target being built.
+
+The \c{libu*{\}} file extensions are normally automatically assigned by the
+matching rules based on the target platform.
+
+
+\h2#module-bin-target-types-obj|\c{obj{\}}, \c{obje{\}}, \c{obja{\}},
+\c{objs{\}}|
+
+The \c{obj*{\}} target types represent object files appropriate for linking an
+executable (\c{obje{\}}), static library (\c{obja{\}}), or shared library
+(\c{objs{\}}).
+
+\N|In \c{build2} we use distinct object files for the three types of binaries
+(executable, static library, and shared library). The distinction between
+static and shared libraries is made to accommodate build differences such as
+the need for position-independent code (\c{-fPIC}) in shared libraries. While
+in most cases the same object file can be used for executables and static
+libraries, they are kept separate for consistency and generality.|
+
+The \c{obj{\}} target type is a group with the \c{obje{\}}, and/or
+\c{obja{\}}, and/or \c{objs{\}} members. A rule that encounters an \c{obj{\}}
+prerequisite picks a member appropriate for the target being built.
+
+The \c{obj*{\}} file extensions are normally automatically assigned by the
+matching rules based on the target platform.
+
+
+\h2#module-bin-target-types-bmi|\c{bmi{\}}, \c{bmie{\}}, \c{bmia{\}},
+\c{bmis{\}}|
+
+The \c{bmi*{\}} target types represent binary module interfaces (BMI) for
+C++20 named modules appropriate for linking an executable (\c{bmie{\}}),
+static library (\c{bmia{\}}), or shared library (\c{bmis{\}}).
+
+The \c{bmi{\}} target type is a group with the \c{bmie{\}}, and/or
+\c{bmia{\}}, and/or \c{bmis{\}} members. A rule that encounters an \c{bmi{\}}
+prerequisite picks a member appropriate for the target being built.
+
+The \c{bmi*{\}} file extensions are normally automatically assigned by the
+matching rules based on the target platform.
+
+
+\h2#module-bin-target-types-hbmi|\c{hbmi{\}}, \c{hbmie{\}}, \c{hbmia{\}},
+\c{hbmis{\}}|
+
+The \c{hbmi*{\}} target types represent binary module interfaces (BMI) for
+C++20 header units appropriate for linking an executable (\c{hbmie{\}}),
+static library (\c{hbmia{\}}), or shared library (\c{hbmis{\}}).
+
+The \c{hbmi{\}} target type is a group with the \c{hbmie{\}}, and/or
+\c{hbmia{\}}, and/or \c{hbmis{\}} members. A rule that encounters an
+\c{hbmi{\}} prerequisite picks a member appropriate for the target being
+built.
+
+The \c{hbmi*{\}} file extensions are normally automatically assigned by the
+matching rules based on the target platform.
+
+
+\h2#module-bin-target-types-def|\c{def{\}}|
+
+The \c{def{\}} target type represents Windows module definition files and has
+the fixed default extension \c{.def}.
+
\h1#module-cc|\c{cc} Module|
@@ -6564,6 +7643,11 @@ config.cc.libs
config.cc.internal.scope
cc.internal.scope
+
+config.cc.reprocess
+ cc.reprocess
+
+config.cc.pkgconfig.sysroot
\
Note that the compiler mode options are \"cross-hinted\" between \c{config.c}
@@ -6579,6 +7663,41 @@ $ b config.cxx=\"g++ -m32\"
$ b config.cxx=\"clang++ -stdlib=libc++\"
\
+\h#cc-target-types|C-Common Target Types|
+
+The following listing shows the hierarchy of the target types defined by the
+\c{cc} module while the following sections describe each target type in detail
+(\c{file{\}} is a standard target type defined by the \c{build2} core; see
+\l{#targets-types Target Types} for details). Every \c{cc}-based module (such
+as \c{c} and \c{cxx}) will have these common target types defined in addition
+to the language-specific ones.
+
+\
+.--file--.
+| |
+h pc
+ |
+ pca
+ pcs
+\
+
+\N|While the \c{h{\}} target type represents a C header file, there is hardly
+a C-family compilation without a C header inclusion. As a result, this target
+types is defined by all \c{cc}-based modules.|
+
+For the description of the \c{h{\}} target type refer to \l{#c-target-types-c
+\c{c{\}}, \c{h{\}}} in the C module documentation.
+
+\h2#cc-target-types-pc|\c{pc{\}}, \c{pca{\}}, \c{pcs{\}}|
+
+The \c{pc*{\}} target types represent \c{pkg-config} files. The \c{pc{\}}
+target type represents the common file and has the fixed default extension
+\c{.pc}. The \c{pca{\}} and \c{pcs{\}} target types represent the static and
+shared files and have the fixed default extensions \c{.static.pc} and
+\c{.shared.pc}, respectively. See \l{#cc-import-installed Importation of
+Installed Libraries} for background.
+
+
\h#cc-internal-scope|Compilation Internal Scope|
\N|While this section uses the \c{cxx} module and C++ compilation as an
@@ -6742,6 +7861,9 @@ if ($cxx.target.system == 'mingw32')
That is, we use the \c{.def} file approach for MSVC (including when building
with Clang) and the built-in support (\c{--export-all-symbols}) for MinGW.
+\N|You will likely also want to add the generated \c{.def} file (or the
+blanket \c{*.def}) to your \c{.gitignore} file.|
+
Note that it is also possible to use the \c{.def} file approach for MinGW. In
this case we need to explicitly load the \c{bin.def} module (which should be
done after loading \c{c} or \c{cxx}) and can use the following arrangement:
@@ -6769,6 +7891,169 @@ to the symbol auto-importing support in Windows linkers. Note, however, that
auto-importing only works for functions and not for global variables.
+\h#cc-import-installed|Importation of Installed Libraries|
+
+As discussed in \l{#intro-import Target Importation}, searching for installed
+C/C++ libraries is seamlessly integrated into the general target importation
+mechanism. This section provides more details on the installed library search
+semantics and \c{pkg-config} integration. These details can be particularly
+useful when dealing with libraries that were not built with \c{build2} and
+which often use idiosyncratic \c{pkg-config} file names.
+
+The \c{cc}-based modules use the common installed library search
+implementation with the following semantics. To illustrate the finer points,
+we assume the following import:
+
+\
+import libs = libbar%lib{Xfoo}
+\
+
+\ol|
+
+\li|First, the ordered list of library search directories is obtained by
+combining two lists: the lists of the compiler's system library search
+directories (extracted, for example, with \c{-print-search-dirs} GCC/Clang
+options) and the list of user library search directories (specified, for
+example, with the \c{-L} options in \c{*.loptions}).
+
+The key property of this combined list is that it matches the search semantics
+that would be used by the compiler to find libraries specified with the \c{-l}
+option during linking.|
+
+\li|Given the list obtained in the previous step, a library binary (shared
+and/or static library) is searched for in the correct order and using the
+target platform-appropriate library prefix and extension (for example, \c{lib}
+prefix and the \c{.so}/\c{.a} extensions if targeting Linux).
+
+For example (continuing with the above import and assuming Linux), each
+directory will be checked for the presence of \c{libXfoo.so} and \c{libXfoo.a}
+(where the \c{Xfoo} stem is the imported target name).
+
+If only a shared or static binary is found in a given directory, no further
+directories are checked for the missing variant. Instead, the missing
+variant is assumed to be unavailable.
+
+If neither a shared nor static library is found in a given directory, then
+it is also checked for the presence of the corresponding \c{pkg-config}
+file as in the following step. If such a file is found, then the library is
+assumed to be \i{binless} (header-only, etc).|
+
+\li|If a static and/or shared library is found (or if looking for a binless
+library), the corresponding \c{pkg-config} subdirectory (normally just
+\c{pkgconfig/}) is searched for the library's \c{.pc} file.
+
+More precisely, we first look for the \c{.static.pc} file for a static
+library and for the \c{.shared.pc} file for a shared library falling back
+to the common \c{.pc} if they don't exist.
+
+\N|It is often required to use different options for consuming static and
+shared libraries. While there is the \c{Libs.private} and \c{Cflags.private}
+mechanism in \c{pkg-config}, its semantics is to append options to \c{Libs}
+and \c{Cflags} rather than to provide alternative options. And often the
+required semantics is to provide different options for static and shared
+libraries, such as to provide a macro which indicates whether linking static
+or shared in order to setup symbol exporting.
+
+As a result, in \c{build2} we produce separate \c{.pc} files for static and
+shared libraries in addition to the \"best effort\" common \c{.pc} file for
+compatibility with other build systems. Similarly, when consuming a library
+we first look for the \c{.static.pc} and \c{.shared.pc} files falling back
+to the common \c{.pc} if they are not available.|
+
+To deal with idiosyncrasies in \c{pkg-config} file names, the following base
+names are tried in order, where \ci{name} is the imported target name
+(\c{Xfoo} in the above import), \ci{proj} is the imported project name
+(\c{libbar} in the above import), and \ci{ext} is one of the above-mentioned
+\c{pkg-config} extensions (\c{static.pc}, \c{shared.pc}, or \c{pc}). The
+concrete name tried for the above import is shown in parenthesis as an
+example.
+
+\ol|
+
+\li|\c{lib\i{name}.\i{ext}} (\c{libXfoo.pc})|
+
+\li|\c{\i{name}.\i{ext}} (\c{Xfoo.pc})|
+
+\li|lowercase \c{lib\i{name}.\i{ext}} (\c{libxfoo.pc})|
+
+\li|lowercase \c{\i{name}.\i{ext}} (\c{xfoo.pc})|
+
+\li|\c{\i{proj}.\i{ext}} (\c{libbar.pc}; this test is omitted if not project-qualified)||
+
+||
+
+In particular, the last try (for \c{\i{proj}.\i{ext}}) serves as an escape
+hatch for cases where the \c{.pc} file name does not have anything to do with
+the names of library binaries. The canonical example of this is \c{zlib} which
+names its library binaries \c{libz.so}/\c{libz.a} while its \c{.pc} file \-
+\c{zlib.pc}. To be able to import \c{zlib} that was not built with \c{build2},
+we have to use the following import:
+
+\
+import libs = zlib%lib{z}
+\
+
+Note also that these complex rules (which are unfortunately necessary to deal
+with the lack of any consistency in \c{.pc} file naming) can sometimes produce
+surprising interactions. For example, it may appear that a clearly incorrect
+import nevertheless appears to somehow work, as in the following example:
+
+\
+import libs = zlib%lib{znonsense}
+\
+
+What happens here is that while no library binary is found, \c{zlib.pc} is
+found and as a result the library ends up being considered binless with the
+\c{-lz} (that is found in the \c{Libs} value of \c{zlib.pc}) treated as a
+prerequisite library, resolved using the above algorithm, and linked. In other
+words, in this case we end up with a binless library \c{lib{znonsense\}} that
+depends on \c{lib{z\}} instead of a single \c{lib{z\}} library.
+
+\h2#cc-import-installed-sysroot|Rewriting Installed Libraries System Root (sysroot)|
+
+Sometimes the installed libraries are moved to a different location after the
+installation. This is especially common in embedded development where the code
+is normally cross-compiled and the libraries for the target platform are
+placed into a host directory, called system root or \i{sysroot}, that doesn't
+match where these libraries were originally installed to. For example, the
+libraries might have been installed into \c{/usr/} but on the host machine
+they may reside in \c{/opt/target/usr/}. In this example, \c{/opt/target/} is
+the sysroot.
+
+While such relocations usually do not affect the library headers or binaries,
+they do break the \c{pkg-config}'s \c{.pc} files which often contain \c{-I}
+and \c{-L} options with absolute paths. Continue with the above example, a
+\c{.pc} file as originally installed may contain \c{-I/usr/include} and
+\c{-L/usr/lib} while now, that the libraries have been relocated to
+\c{/opt/target/}, they somehow need to be adjusted to
+\c{-I/opt/target/usr/include} and \c{-L/opt/target/usr/lib}.
+
+While it is possible (and perhaps correct) to accomplish this by fixing the
+\c{.pc} files to match the new location, it is not always possible or easy.
+As a result, \c{build2} provides a mechanism for automatically adjusting the
+system root in the \c{-I} and \c{-L} options extracted from \c{.pc} files.
+
+\N|This functionality is roughly equivalent to that provided with the
+\c{PKG_CONFIG_SYSROOT_DIR} environment variable by the \c{pkg-config}
+utility.|
+
+Specifically, the \c{config.cc.pkgconfig.sysroot} variable can be used to
+specify an alternative system root. When specified, all absolute paths in the
+\c{-I} and \c{-L} options that are not already in this directory will be
+rewritten to start with this sysroot.
+
+\N|Note that this mechanism is a workaround rather than a proper solution since
+it is limited to the \c{-I} and \c{-L} options. In particular, it does not
+handle any other options that may contain absolute paths nor \c{pkg-config}
+variables that may be queried.
+
+As a result, it should only be used for dealing with issues in third-party
+\c{.pc} files that do not handle relocation (for example, using the
+\c{${pcfiledir\}} built-in \c{pkg-config} variable). In particular, for
+\c{build2}-generated \c{.pc} files a \l{#install-reloc relocatable
+installation} should be used instead.|
+
+
\h#cc-gcc|GCC Compiler Toolchain|
The GCC compiler id is \c{gcc}.
@@ -6997,6 +8282,185 @@ config.c.internal.scope
c.internal.scope
\
+\h#c-target-types|C Target Types|
+
+The following listing shows the hierarchy of the target types defined by the
+\c{c} module while the following sections describe each target type in detail
+(\c{file{\}} is a standard target type defined by the \c{build2} core; see
+\l{#targets-types Target Types} for details). See also \l{#cc-target-types
+C-Common Target Types} for target types defined by all the \c{cc}-based
+modules.
+
+\
+.--file--.
+| | |
+c m S
+h
+\
+
+The \c{m{\}} target type represents an Objective-C source file, see \l{c-objc
+Objective-C Compilation} for details.
+
+The \c{S{\}} target type represents an Assembler with C Preprocessor file, see
+\l{c-as-cpp Assembler with C Preprocessor Compilation} for details.
+
+\h2#c-target-types-c|\c{c{\}}, \c{h{\}}|
+
+The \c{c{\}} and \c{h{\}} target types represent C source and header files.
+They have the default extensions \c{.c} and \c{.h}, respectively, which can
+be customized with the \c{extension} variable.
+
+
+\h#c-objc|Objective-C Compilation|
+
+The \c{c} module provides the \c{c.objc} submodule which can be loaded in
+order to register the \c{m{\}} target type and enable Objective-C compilation
+in the \c{C} compile rule. Note that \c{c.objc} must be loaded after the \c{c}
+module and while the \c{m{\}} target type is registered unconditionally,
+compilation is only enabled if the C compiler supports Objective-C for the
+target platform. Typical usage:
+
+\
+# root.build
+#
+using c
+using c.objc
+\
+
+\
+# buildfile
+#
+lib{hello}: {h c}{*}
+lib{hello}: m{*}: include = ($c.target.class == 'macos')
+\
+
+Note also that while there is support for linking Objective-C executables and
+libraries, this is done using the C compiler driver and no attempt is made to
+automatically link any necessary Objective-C runtime library (such as
+\c{-lobjc}).
+
+
+\h#c-as-cpp|Assembler with C Preprocessor Compilation|
+
+The \c{c} module provides the \c{c.as-cpp} submodule which can be loaded in
+order to register the \c{S{\}} target type and enable Assembler with C
+Preprocessor compilation in the \c{C} compile rule. Note that \c{c.as-cpp}
+must be loaded after the \c{c} module and while the \c{S{\}} target type is
+registered unconditionally, compilation is only enabled if the C compiler
+supports Assembler with C Preprocessor compilation. Typical usage:
+
+\
+# root.build
+#
+using c
+using c.as-cpp
+\
+
+\
+# buildfile
+#
+exe{hello}: {h c}{* -hello.c}
+
+# Use C implementation as a fallback if no assembler.
+#
+assembler = ($c.class == 'gcc' && $c.target.cpu == 'x86_64')
+
+exe{hello}: S{hello}: include = $assembler
+exe{hello}: c{hello}: include = (!$assembler)
+\
+
+\
+/* hello.S
+ */
+#ifndef HELLO_RESULT
+# define HELLO_RESULT 0
+#endif
+
+text
+
+.global hello
+hello:
+ /* ... */
+ movq $HELLO_RESULT, %rax
+ ret
+
+#ifdef __ELF__
+.section .note.GNU-stack, \"\", @progbits
+#endif
+\
+
+The default file extension for the \c{S{\}} target type is \c{.S} (capital)
+but that can be customized using the standard mechanisms. For example:
+
+\
+# root.build
+#
+using c
+using c.as-cpp
+
+h{*}: extension = h
+c{*}: extension = c
+S{*}: extension = sx
+\
+
+Note that \c{*.coptions} are passed to the C compiler when compiling Assembler
+with C Preprocessor files because compile options may cause additional
+preprocessor macros to be defined. Plus, some of them (such as \c{-g}) are
+passed (potentially translated) to the underlying assembler. To pass
+additional options when compiling Assembler files use \c{c.poptions} and
+\c{c.coptions}. For example (continuing with the previous example):
+
+\
+if $assembler
+{
+ obj{hello}:
+ {
+ c.poptions += -DHELLO_RESULT=1
+ c.coptions += -Wa,--no-pad-sections
+ }
+}
+\
+
+\h#c-predefs|C Compiler Predefined Macro Extraction|
+
+The \c{c} module provides the \c{c.predefs} submodule which can be loaded in
+order to register a rule that generates a C header with predefined compiler
+macros. Note that the \c{c.predefs} module must be loaded after the \c{c}
+module and the rule will only match with an explicit rule hint. Typical usage:
+
+\
+# root.build
+#
+using c
+using c.predefs
+\
+
+\
+# buildfile
+#
+[rule_hint=c.predefs] h{predefs}:
+\
+
+Note also that the MSVC compiler only supports the predefined macro extraction
+starting from Visual Studio 2019 (16.0; \c{cl.exe} version 19.20). If support
+for earlier versions is required, then you will need to provide a fallback
+implementation appropriate for your project. For example:
+
+\
+[rule_hint=c.predefs] h{predefs}:
+% update
+if ($c.id == 'msvc' && \
+ ($c.version.major < 19 || \
+ ($c.version.major == 19 && $c.version.minor < 20)))
+{{
+ diag c-predefs $>
+
+ cat <<EOF >$path($>)
+ #define _WIN32
+ EOF
+}}
+\
+
\h1#module-cxx|\c{cxx} Module|
@@ -7065,6 +8529,45 @@ config.cxx.translate_include
\
+\h#cxx-target-types|C++ Target Types|
+
+The following listing shows the hierarchy of the target types defined by the
+\c{cxx} module while the following sections describe each target type in
+detail (\c{file{\}} is a standard target type defined by the \c{build2} core;
+see \l{#targets-types Target Types} for details). See also \l{#cc-target-types
+C-Common Target Types} for target types defined by all the \c{cc}-based
+modules.
+
+\
+ .--file--.
+ | |
+cxx mm
+hxx
+ixx
+txx
+mxx
+\
+
+The \c{mm{\}} target type represents an Objective-C++ source file, see
+\l{cxx-objcxx Objective-C++ Compilation} for details.
+
+\h2#cxx-target-types-cxx|\c{cxx{\}}, \c{hxx{\}}, \c{ixx{\}}, \c{txx{\}},
+\c{mxx{\}}|
+
+The \c{cxx{\}}, \c{hxx{\}}, \c{ixx{\}}, \c{txx{\}}, and \c{mxx{\}} target
+types represent C++ source, header, inline, template, and module interface
+files. They have the default extensions \c{.cxx}, \c{.hxx}, \c{.ixx},
+\c{.txx}, and \c{.mxx}, respectively, which can be customized with the
+\c{extension} variable. For example (normally done in \c{root.build}):
+
+\
+using cxx
+
+cxx{*}: extension = cpp
+hxx{*}: extension = hpp
+mxx{*}: extension = cppm
+\
+
\h#cxx-modules|C++ Modules Support|
This section describes the build system support for C++ modules.
@@ -7202,7 +8705,7 @@ module implementation units appears reasonable and that's what we recommend.
A module declaration (exporting or non-exporting) starts a \i{module purview}
that extends until the end of the module translation unit. Any name declared
-in a module's purview \i{belongs} to said module. For example:
+in a module's purview \i{belongs} to the said module. For example:
\
#include <string> // Not in purview.
@@ -7372,7 +8875,7 @@ say_hello (const std::string&);
\
One way to think of a re-export is \i{as if} an import of a module also
-\"injects\" all the imports said module re-exports, recursively. That's
+\"injects\" all the imports the said module re-exports, recursively. That's
essentially how most compilers implement it.
Module re-export is the mechanism for assembling bigger modules out of
@@ -7770,7 +9273,7 @@ header-like search mechanism (\c{-I} paths, etc.), an explicit list of
exported modules is provided for each library in its \c{.pc} (\c{pkg-config})
file.
-Specifically, the library's \c{.pc} file contains the \c{cxx_modules} variable
+Specifically, the library's \c{.pc} file contains the \c{cxx.modules} variable
that lists all the exported C++ modules in the \c{<name>=<path>} form with
\c{<name>} being the module's C++ name and \c{<path>} \- the module interface
file's absolute path. For example:
@@ -7781,15 +9284,15 @@ Version: 1.0.0
Cflags:
Libs: -L/usr/lib -lhello
-cxx_modules = hello.core=/usr/include/hello/core.mxx hello.extra=/usr/include/hello/extra.mxx
+cxx.modules = hello.core=/usr/include/hello/core.mxx hello.extra=/usr/include/hello/extra.mxx
\
Additional module properties are specified with variables in the
-\c{cxx_module_<property>.<name>} form, for example:
+\c{cxx.module_<property>.<name>} form, for example:
\
-cxx_module_symexport.hello.core = true
-cxx_module_preprocessed.hello.core = all
+cxx.module_symexport.hello.core = true
+cxx.module_preprocessed.hello.core = all
\
Currently, two properties are defined. The \c{symexport} property with the
@@ -8650,6 +10153,77 @@ macros may not be needed by all consumers. This way we can also keep the
header macro-only which means it can be included freely, in or out of module
purviews.
+\h#cxx-objcxx|Objective-C++ Compilation|
+
+The \c{cxx} module provides the \c{cxx.objcxx} submodule which can be loaded
+in order to register the \c{mm{\}} target type and enable Objective-C++
+compilation in the \c{C++} compile rule. Note that \c{cxx.objcxx} must be
+loaded after the \c{cxx} module and while the \c{mm{\}} target type is
+registered unconditionally, compilation is only enabled if the C++ compiler
+supports Objective-C++ for the target platform. Typical usage:
+
+\
+# root.build
+#
+using cxx
+using cxx.objcxx
+\
+
+\
+# buildfile
+#
+lib{hello}: {hxx cxx}{*}
+lib{hello}: mm{*}: include = ($cxx.target.class == 'macos')
+\
+
+Note also that while there is support for linking Objective-C++ executables
+and libraries, this is done using the C++ compiler driver and no attempt is
+made to automatically link any necessary Objective-C runtime library (such as
+\c{-lobjc}).
+
+
+\h#cxx-predefs|C++ Compiler Predefined Macro Extraction|
+
+The \c{cxx} module provides the \c{cxx.predefs} submodule which can be loaded
+in order to register a rule that generates a C++ header with predefined
+compiler macros. Note that the \c{cxx.predefs} module must be loaded after the
+\c{cxx} module and the rule will only match with an explicit rule
+hint. Typical usage:
+
+\
+# root.build
+#
+using cxx
+using cxx.predefs
+\
+
+\
+# buildfile
+#
+[rule_hint=cxx.predefs] hxx{predefs}:
+\
+
+Note also that the MSVC compiler only supports the predefined macro extraction
+starting from Visual Studio 2019 (16.0; \c{cl.exe} version 19.20). If support
+for earlier versions is required, then you will need to provide a fallback
+implementation appropriate for your project. For example:
+
+\
+[rule_hint=cxx.predefs] hxx{predefs}:
+% update
+if ($cxx.id == 'msvc' && \
+ ($cxx.version.major < 19 || \
+ ($cxx.version.major == 19 && $cxx.version.minor < 20)))
+{{
+ diag c++-predefs $>
+
+ cat <<EOF >$path($>)
+ #define _WIN32
+ #define __cplusplus 201402L
+ EOF
+}}
+\
+
\h1#module-in|\c{in} Module|
@@ -8717,13 +10291,13 @@ symbol is expected to start a substitution with unresolved (to a variable
value) names treated as errors. The double substitution symbol (for example,
\c{$$}) serves as an escape sequence.
-The substitution mode can be relaxed using the \c{in.substitution} variable.
-Its valid values are \c{strict} (default) and \c{lax}. In the lax mode a pair
-of substitution symbols is only treated as a substitution if what's between
-them looks like a build system variable name (that is, it doesn't contain
-spaces, etc). Everything else, including unterminated substitution symbols, is
-copied as is. Note also that in this mode the double substitution symbol is
-not treated as an escape sequence.
+The substitution mode can be relaxed using the \c{in.mode} variable. Its
+valid values are \c{strict} (default) and \c{lax}. In the lax mode a pair of
+substitution symbols is only treated as a substitution if what's between them
+looks like a build system variable name (that is, it doesn't contain spaces,
+etc). Everything else, including unterminated substitution symbols, is copied
+as is. Note also that in this mode the double substitution symbol is not
+treated as an escape sequence.
The lax mode is mostly useful when trying to reuse existing \c{.in} files from
other build systems, such as \c{autoconf}. Note, however, that the lax mode is
@@ -8736,7 +10310,7 @@ substitutions as is. For example:
h{config}: in{config} # config.h.in
{
in.symbol = '@'
- in.substitution = lax
+ in.mode = lax
CMAKE_SYSTEM_NAME = $c.target.system
CMAKE_SYSTEM_PROCESSOR = $c.target.cpu
@@ -8750,6 +10324,42 @@ target-specific variables. Typed variable values are converted to string
using the corresponding \c{builtin.string()} function overload before
substitution.
+While specifying substitution values as \c{buildfile} variables is usually
+natural, sometimes this may not be possible or convenient. Specifically, we
+may have substitution names that cannot be specified as \c{buildfile}
+variables, for example, because they start with an underscore (and are thus
+reserved) or because they refer to one of the predefined variables. Also, we
+may need to have different groups of substitution values for different cases,
+for example, for different platforms, and it would be convenient to pass such
+groups around as a single value.
+
+To support these requirements the substitution values can alternatively be
+specified as key-value pairs in the \c{in.substitutions} variable. Note that
+entries in this substitution map take precedence over the \c{buildfile}
+variables. For example:
+
+\
+/* config.h.in */
+
+#define _GNU_SOURCE @_GNU_SOURCE@
+#define _POSIX_SOURCE @_POSIX_SOURCE@
+\
+
+\
+# buildfile
+
+h{config}: in{config}
+{
+ in.symbol = '@'
+ in.mode = lax
+
+ in.substitutions = _GNU_SOURCE@0 _POSIX_SOURCE@1
+}
+\
+
+\N|In the above example, the \c{@} characters in \c{in.symbol} and
+\c{in.substitutions} are unrelated.|
+
Using an undefined variable in a substitution is an error. Using a \c{null}
value in a substitution is also an error unless the fallback value is
specified with the \c{in.null} variable. For example:
@@ -8763,11 +10373,21 @@ h{config}: in{config}
}
\
-A number of other build system modules, for example, \l{#module-version
-\c{version}} and \l{#module-bash \c{bash}}, are based on the \c{in} module and
-provide extended functionality. The \c{in} preprocessing rule matches any
-\c{file{\}}-based target that has the corresponding \c{in{\}} prerequisite
-provided none of the extended rules match.
+\N|To specify a \c{null} value using the \c{in.substitutions} mechanism omit
+the value, for example:
+
+\
+in.substitutions = _GNU_SOURCE
+\
+
+|
+
+A number of other build system modules, for example,
+\l{https://github.com/build2/libbuild2-autoconf/ \c{autoconf}},
+\l{#module-version \c{version}}, and \l{#module-bash \c{bash}}, are based on
+the \c{in} module and provide extended functionality. The \c{in} preprocessing
+rule matches any \c{file{\}}-based target that has the corresponding \c{in{\}}
+prerequisite provided none of the extended rules match.
\h1#module-bash|\c{bash} Module|
@@ -8914,11 +10534,11 @@ by searching in the \c{PATH} environment variable.
By convention, \c{bash} module libraries should use the \c{lib} name prefix,
for example, \c{libhello}. If there is also a native library (that is, one
written in C/C++) that provides the same functionality (or the \c{bash}
-library is a language binding for said library), then it is customary to add
-the \c{.bash} extension to the \c{bash} library name, for example,
+library is a language binding for the said library), then it is customary to
+add the \c{.bash} extension to the \c{bash} library name, for example,
\c{libhello.bash}. Note that in this case the top-level subdirectory within
-the project is expected to be called without the \c{bash} extension,
-for example, \c{libhello}.
+the project is expected to be called without the \c{bash} extension, for
+example, \c{libhello}.
Modules can be \i{private} or \i{public}. Private modules are implementation
details of a specific project and are not expected to be imported from other
@@ -8965,4 +10585,498 @@ corresponding \c{in{\}} and one or more \c{bash{\}} prerequisites as well as
\c{bash{\}} targets that have the corresponding \c{in{\}} prerequisite (if you
need to preprocess a script that does not depend on any modules, you can use
the \c{in} module's rule).
+
+
+\h1#json-dump|Appendix A \- JSON Dump Format|
+
+This appendix describes the machine-readable, JSON-based build system state
+dump format that can be requested with the \c{--dump-format=json-v0.1} build
+system driver option (see \l{b(1)} for details).
+
+The format is specified in terms of the serialized representation of C++
+\c{struct} instances. See \l{b.xhtml#json-output JSON OUTPUT} for details on
+the overall properties of this format and the semantics of the \c{struct}
+serialization.
+
+\N|This format is currently unstable (thus the temporary \c{-v0.1} suffix)
+and may be changed in ways other than as described in \l{b.xhtml#json-output
+JSON OUTPUT}. In case of such changes the format version will be incremented
+to allow detecting incompatibilities but no support for older versions is
+guaranteed.|
+
+The build system state can be dumped after the load phase (\c{--dump=load}),
+once the build state has been loaded, and/or after the match phase
+(\c{--dump=match}), after rules have been matched to targets to execute the
+desired action. The JSON format differs depending on after which phase it is
+produced. After the load phase the format aims to describe the
+action-independent state, essentially as specified in the \c{buildfiles}.
+While after the match phase it aims to describe the state for executing the
+specified action, as determined by the rules that have been matched. The
+former state would be more appropriate, for example, for an IDE that tries to
+use \c{buildfiles} as project files. While the latter state could be used to
+determine the actual build graph for a certain action, for example, in order
+to infer which executable targets are considered tests by the \c{test}
+operation.
+
+While it's possible to dump the build state as a byproduct of executing an
+action (for example, performing an update), it's often desirable to only dump
+the build state and do it as quickly as possible. For such cases the
+recommended option combinations are as follows (see the \c{--load-only} and
+\c{--match-only} documentation for details):
+
+\
+$ b --load-only --dump=load --dump-format=json-v0.1 .../dir/
+
+$ b --match-only --dump=match --dump-format=json-v0.1 .../dir/
+$ b --match-only --dump=match --dump-format=json-v0.1 .../dir/type{name}
+\
+
+\N|Note that a match dump for a large project can produce a large amount of
+data, especially for the \c{update} operation (tens and even hundreds of
+megabytes is not uncommon). To reduce this size it is possible to limit the
+dump to specific scopes and/or targets with the \c{--dump-scope} and
+\c{--dump-target} options.|
+
+The complete dump (that is, not of a specific scope or target) is a tree of
+nested scope objects (see \l{#intro-dirs-scopes Output Directories and Scopes}
+for background). The scope object has the serialized representation of the
+following C++ \c{struct} \c{scope}. It is the same for both load and match
+dumps except for the type of the \c{targets} member:
+
+\
+struct scope
+{
+ string out_path;
+ optional<string> src_path;
+
+ vector<variable> variables; // Non-type/pattern scope variables.
+
+ vector<scope> scopes; // Immediate children.
+
+ vector<loaded_target|matched_target> targets;
+};
+\
+
+For example (parts of the output are omitted for brevity):
+
+\N|The actual output is produced unindented to reduce the size.|
+
+\
+$ cd /tmp
+$ bdep new hello
+$ cd hello
+$ bdep new -C @gcc cc
+$ b --load-only --dump=load --dump-format=json-v0.1
+{
+ \"out_path\": \"\",
+ \"variables\": [ ... ],
+ \"scopes\": [
+ {
+ \"out_path\": \"/tmp/hello-gcc\",
+ \"variables\": [ ... ],
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello\",
+ \"variables\": [ ... ],
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"variables\": [ ... ],
+ \"targets\": [ ... ]
+ }
+ ],
+ \"targets\": [ ... ]
+ }
+ ],
+ \"targets\": [ ... ]
+ }
+ ]
+}
+\
+
+The \c{out_path} member is relative to the parent scope. It is empty for the
+special global scope, which is the root of the tree. The \c{src_path} member
+is absent if it is the same as \c{out_path} (in source build or scope outside
+of project).
+
+\N|For the match dump, targets that have not been matched for the specified
+action are omitted.|
+
+In the load dump, the target object has the serialized representation of the
+following C++ \c{struct} \c{loaded_target}:
+
+\
+struct loaded_target
+{
+ string name; // Relative quoted/qualified name.
+ string display_name; // Relative display name.
+ string type; // Target type.
+ optional<string> group; // Absolute quoted/qualified group target.
+
+ vector<variable> variables; // Target variables.
+
+ vector<prerequisite> prerequisites;
+};
+\
+
+For example (continuing with the previous \c{hello} setup):
+
+\
+{
+ \"out_path\": \"\",
+ \"scopes\": [
+ {
+ \"out_path\": \"/tmp/hello-gcc\",
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello\",
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"targets\": [
+ {
+ \"name\": \"exe{hello}\",
+ \"display_name\": \"exe{hello}\",
+ \"type\": \"exe\",
+ \"prerequisites\": [
+ {
+ \"name\": \"cxx{hello}\",
+ \"type\": \"cxx\"
+ },
+ {
+ \"name\": \"testscript{testscript}\",
+ \"type\": \"testscript\"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
+\
+
+The target \c{name} member is the target name that is qualified with the
+extension (if applicable and known) and, if required, is quoted so that it can
+be passed back to the build system driver on the command line. The
+\c{display_name} member is unqualified and unquoted. Note that both the target
+\c{name} and \c{display_name} members are normally relative to the containing
+scope (if any).
+
+The prerequisite object has the serialized representation of the following C++
+\c{struct} \c{prerequisite}:
+
+\
+struct prerequisite
+{
+ string name; // Quoted/qualified name.
+ string type;
+ vector<variable> variables; // Prerequisite variables.
+};
+\
+
+The prerequisite \c{name} member is normally relative to the containing scope.
+
+In the match dump, the target object has the serialized representation of the
+following C++ \c{struct} \c{matched_target}:
+
+\
+struct matched_target
+{
+ string name;
+ string display_name;
+ string type;
+ optional<string> group;
+
+ optional<path> path; // Absent if not path target, not assigned.
+
+ vector<variable> variables;
+
+ optional<operation_state> outer_operation; // null if not matched.
+ operation_state inner_operation; // null if not matched.
+};
+\
+
+For example (outer scopes removed for brevity):
+
+\
+$ b --match-only --dump=match --dump-format=json-v0.1
+{
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"targets\": [
+ {
+ \"name\": \"/tmp/hello/hello/cxx{hello.cxx}@./\",
+ \"display_name\": \"/tmp/hello/hello/cxx{hello}@./\",
+ \"type\": \"cxx\",
+ \"path\": \"/tmp/hello/hello/hello.cxx\",
+ \"inner_operation\": {
+ \"rule\": \"build.file\",
+ \"state\": \"unchanged\"
+ }
+ },
+ {
+ \"name\": \"obje{hello.o}\",
+ \"display_name\": \"obje{hello}\",
+ \"type\": \"obje\",
+ \"group\": \"/tmp/hello-gcc/hello/hello/obj{hello}\",
+ \"path\": \"/tmp/hello-gcc/hello/hello/hello.o\",
+ \"inner_operation\": {
+ \"rule\": \"cxx.compile\",
+ \"prerequisite_targets\": [
+ {
+ \"name\": \"/tmp/hello/hello/cxx{hello.cxx}@./\",
+ \"type\": \"cxx\"
+ },
+ {
+ \"name\": \"/usr/include/c++/12/h{iostream.}\",
+ \"type\": \"h\"
+ },
+ ...
+ ]
+ }
+ },
+ {
+ \"name\": \"exe{hello.}\",
+ \"display_name\": \"exe{hello}\",
+ \"type\": \"exe\",
+ \"path\": \"/tmp/hello-gcc/hello/hello/hello\",
+ \"inner_operation\": {
+ \"rule\": \"cxx.link\",
+ \"prerequisite_targets\": [
+ {
+ \"name\": \"/tmp/hello-gcc/hello/hello/obje{hello.o}\",
+ \"type\": \"obje\"
+ }
+ ]
+ }
+ }
+ ]
+}
+\
+
+The first four members in \c{matched_target} have the same semantics as in
+\c{loaded_target}.
+
+The \c{outer_operation} member is only present if the action has an outer
+operation. For example, when performing \c{update-for-test}, \c{test} is the
+outer operation while \c{update} is the inner operation.
+
+The operation state object has the serialized representation of the following
+C++ \c{struct} \c{operation_state}:
+
+\
+struct operation_state
+{
+ string rule; // null if direct recipe match.
+
+ optional<string> state; // One of unchanged|changed|group.
+
+ vector<variable> variables; // Rule variables.
+
+ vector<prerequisite_target> prerequisite_targets;
+};
+\
+
+The \c{rule} member is the matched rule name. The \c{state} member is the
+target state, if known after match. The \c{prerequisite_targets} array is a
+subset of prerequisites resolved to targets that are in effect for this
+action. The matched rule may add additional targets, for example, dynamically
+extracted additional dependencies, like \c{/usr/include/c++/12/h{iostream.\}}
+in the above listing.
+
+The prerequisite target object has the serialized representation of the
+following C++ \c{struct} \c{prerequisite_target}:
+
+\
+struct prerequisite_target
+{
+ string name; // Absolute quoted/qualified target name.
+ string type;
+ bool adhoc;
+};
+\
+
+The \c{variables} array in the scope, target, prerequisite, and prerequisite
+target objects contains scope, target, prerequisite, and rule variables,
+respectively.
+
+The variable object has the serialized representation of the following C++
+\c{struct} \c{variable}:
+
+\
+struct variable
+{
+ string name;
+ optional<string> type;
+ json_value value; // null|boolean|number|string|object|array
+};
+\
+
+For example:
+
+\
+{
+ \"out_path\": \"\",
+ \"variables\": [
+ {
+ \"name\": \"build.show_progress\",
+ \"type\": \"bool\",
+ \"value\": true
+ },
+ {
+ \"name\": \"build.verbosity\",
+ \"type\": \"uint64\",
+ \"value\": 1
+ },
+ ...
+ ],
+ \"scopes\": [
+ {
+ \"out_path\": \"/tmp/hello-gcc\",
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello\",
+ \"scopes\": [
+ {
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"variables\": [
+ {
+ \"name\": \"out_base\",
+ \"type\": \"dir_path\",
+ \"value\": \"/tmp/hello-gcc/hello/hello\"
+ },
+ {
+ \"name\": \"src_base\",
+ \"type\": \"dir_path\",
+ \"value\": \"/tmp/hello/hello\"
+ },
+ {
+ \"name\": \"cxx.poptions\",
+ \"type\": \"strings\",
+ \"value\": [
+ \"-I/tmp/hello-gcc/hello\",
+ \"-I/tmp/hello\"
+ ]
+ },
+ {
+ \"name\": \"libs\",
+ \"value\": \"/tmp/hello-gcc/libhello/libhello/lib{hello}\"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
+\
+
+The \c{type} member is absent if the variable value is untyped.
+
+The \c{value} member contains the variable value in a suitable JSON
+representation. Specifically:
+
+\ul|
+
+\li|\c{null} values are represented as JSON \c{null}.|
+
+\li|\c{bool} values are represented as JSON \c{boolean}.|
+
+\li|\c{int64} and \c{uint64} values are represented as JSON \c{number}.|
+
+\li|\c{string}, \c{path}, \c{dir_path} values are represented as JSON
+ \c{string}.|
+
+\li|Untyped simple name values are represented as JSON \c{string}.|
+
+\li|Pairs of above values are represented as JSON objects with the \c{first}
+ and \c{second} members corresponding to the pair elements.|
+
+\li|Untyped complex name values are serialized as target names and represented
+ as JSON \c{string}.|
+
+\li|Containers of above values are represented as JSON arrays corresponding to
+ the container elements.|
+
+\li|An empty value is represented as an empty JSON object if it's a typed
+ pair, as an empty JSON array if it's a typed container or is untyped, and
+ as an empty string otherwise.||
+
+One expected use-case for the match dump is to determine the set of targets
+for which a given action is applicable. For example, we may want to determine
+all the executables in a project that can be tested with the \c{test}
+operation in order to present this list to the user in an IDE plugin or
+some such. To further illuminate the problem, consider the following
+\c{buildfile} which declares a number of executable targets, some are
+tests and some are not:
+
+\
+exe{hello1}: ... testscript # Test because of testscript prerequisite.
+
+exe{hello2}: test = true # Test because of test=true.
+
+exe{hello3}: ... testscript # Not a test because of test=false.
+{
+ test = false
+}
+\
+
+As can be seen, trying to infer this information is not straightforward and
+doing so manually by examining prerequisites, variables, etc., while possible,
+will be complex and likely brittle. Instead, the recommended approach is to
+use the match dump and base the decision on the \c{state} target object
+member. Specifically, a rule which matched the target but determined that
+nothing needs to be done for this target, returns the special \c{noop}
+recipe. The \c{build2} core recognizes this situation and sets such target's
+state to \c{unchanged} during match. Here is what the match dump will look
+like for the above three executables:
+
+\
+$ b --match-only --dump=match --dump-format=json-v0.1 test
+{
+ \"out_path\": \"hello\",
+ \"src_path\": \"/tmp/hello/hello\",
+ \"targets\": [
+ {
+ \"name\": \"exe{hello1.}\",
+ \"display_name\": \"exe{hello1}\",
+ \"type\": \"exe\",
+ \"path\": \"/tmp/hello-gcc/hello/hello/hello1\",
+ \"inner_operation\": {
+ \"rule\": \"test\"
+ }
+ },
+ {
+ \"name\": \"exe{hello2.}\",
+ \"display_name\": \"exe{hello2}\",
+ \"type\": \"exe\",
+ \"path\": \"/tmp/hello-gcc/hello/hello/hello2\",
+ \"inner_operation\": {
+ \"rule\": \"test\"
+ }
+ },
+ {
+ \"name\": \"exe{hello3}\",
+ \"display_name\": \"exe{hello3}\",
+ \"type\": \"exe\",
+ \"inner_operation\": {
+ \"rule\": \"test\",
+ \"state\": \"unchanged\"
+ }
+ }
+ ]
+}
+\
+
"
diff --git a/doc/testscript.cli b/doc/testscript.cli
index 1395363..20d9c2d 100644
--- a/doc/testscript.cli
+++ b/doc/testscript.cli
@@ -622,15 +622,27 @@ By convention, the testscript file should be called either \c{testscript} if
you only have one or have the \c{.testscript} extension, for example,
\c{basics.testscript}. The \c{test} module registers the \c{testscript{\}}
target type to be used for testscript files. We don't have to use explicit
-target type for the \c{testscript} file.
+target type for the \c{testscript} file. For example:
+
+\
+exe{hello}: testscript{basics advanced}
+\
A testscript prerequisite can be specified for any target. For example, if
-our directory contains a bunch of shell scripts that we want to test together,
+our directory contains a bunch of executables that we want to test together,
then it makes sense to specify the testscript prerequisite for the directory
target:
\
-./: testscript{basics}
+./: testscript
+\
+
+Similarly, the same testscript can be used to test multiple targets. For
+example:
+
+\
+exe{hello}: testscript{basics advanced}
+exe{hello-lite}: testscript{basics}
\
During variable lookup if a variable is not found in one of the testscript
@@ -741,6 +753,68 @@ Note also that these \c{test.*} variables only establish a convention. You
could also put everything into, say \c{test.arguments}, and it will still work
as expected.
+\N|The \c{test.redirects}, \c{test.cleanups}, and \c{$*} variables are of the
+special \c{cmdline} type, see \l{#lexical Lexical Structure} for details.|
+
+The special \c{test.*} variables make it fairly easy to arrange the testing of
+a single executable. What if we need to run multiple executables from a single
+testscript file? For example, we may have a pair of executables, such as
+\c{reader} and \c{writer}, that must be tested together. Or we may have a
+number of test executables that all require a common setup, for example,
+cryptographic key generation, which we would like not to repeating for each
+test. While it is possible to achieve this with target-specific variables
+similar to \c{test}, things will be less automatic. In particular, there
+will be no automatic translation of target names to paths and we will have
+to do it manually. For example:
+
+\
+# buildfile
+
+./: exe{reader}: cxx{reader} ...
+./: exe{writer}: cxx{writer} ...
+
+./: testscript
+{
+ reader = exe{reader}
+ writer = exe{writer}
+}
+\
+
+\
+# testscript
+
+# Translate targets to paths.
+#
+reader = $path($reader)
+writer = $path($writer)
+
+: pipe
+:
+$writer | $reader
+
+: file
+:
+$writer output;
+$reader output
+\
+
+\N|Strictly speaking, for local executables, there is no need to pass the
+target names from \c{buildfile} to \c{testscript} and instead we could just
+list them literally in \c{testscript}. In particular, this could be an
+attractive approach if we have a large number of such executables. For
+example:
+
+\
+# testscript
+
+$path(exe{test1}) : test1
+$path(exe{test2}) : test2
+$path(exe{test3}) : test3
+...
+\
+
+|
+
Another pre-defined variable is \c{test.target}. It is used to specify the
test target platform when cross-testing (for example, when running Windows
test on Linux under Wine). Normally, you would set it in your
@@ -964,6 +1038,9 @@ the teardown commands are executed sequentially and in the order specified.
Again, if any of them fail, the group execution is terminated and the group is
considered to have failed.
+\N|Currently, the only way to run several executables serially is to place
+them into a single compound test. See \l{#syntax-test Test} for details.|
+
As an example, consider the following version of \c{basics.testscript}:
\
@@ -1285,62 +1362,54 @@ here-document single-quoted here_line_single
here-document double-quoted here_line_double expansions
\
-Finally, unquoted expansions in command lines (test, setup, and teardown) are
-re-lexed in the \c{command_expansion} mode in order to recognize command line
-syntax tokens (redirects, pipes, etc). To illustrate why this re-lexing is
-necessary, consider the following example of a \"canned\" command line:
+Finally, unquoted expansions in command lines (test, setup, and teardown) of
+the special \c{cmdline} type are re-lexed in the \c{command_expansion} mode in
+order to recognize command line syntax tokens (redirects, pipes, etc). To
+illustrate this mechanism, consider the following example of a \"canned\"
+command line:
\
-x = echo >-
-$x foo
+cmd = [cmdline] echo >-
+$cmd foo
\
-The test command line token sequence will be \c{$}, \c{x}, \c{foo}. After the
-expansion we have \c{echo}, \c{>-}, \c{foo}, however, the second element
-(\c{>-}) is not (yet) recognized as a redirect. To recognize it we re-lex
-the result of the expansion.
+The test command line token sequence will be \c{$}, \c{cmd}, \c{foo}. After
+the expansion we have \c{echo}, \c{>-}, \c{foo}, however, the second element
+(\c{>-}) is not (yet) recognized as a redirect. To recognize it, the result of
+the expansion is re-lexed.
Note that besides the few command line syntax characters, re-lexing will also
\"consume\" quotes and escapes, for example:
\
-args = \"'foo'\" # 'foo'
-echo $args # echo foo
+cmd = [cmdline] echo \"'foo'\" # echo 'foo'
+$cmd # echo foo
\
To preserve quotes in this context we need to escape them:
\
-args = \"\\\\'foo\\\\'\" # \'foo\'
-echo $args # echo 'foo'
-\
-
-Alternatively, for a single value, we could quote the expansion (in order
-to suppress re-lexing; note, however, that quoting will also inhibit
-word-splitting):
-
-\
-arg = \"'foo'\" # 'foo'
-echo \"$arg\" # echo 'foo'
+cmd = [cmdline] echo \"\\\\'foo\\\\'\" # echo \'foo\'
+$cmd # echo 'foo'
\
To minimize unhelpful consumption of escape sequences (for example, in Windows
paths), re-lexing only performs the \i{effective escaping} for the \c{'\"\\}
characters. All other escape sequences are passed through uninterpreted. Note
-that this means there is no way to escape command line syntax characters. The
-recommendation is to use quoting except for passing literal quotes, for
-example:
+that this means there is no way to escape command line syntax characters in
+canned commands. The recommendation is to use quoting except for passing
+literal quotes, for example:
\
-args = \'&foo\' # '&foo'
-echo $args # echo &foo
+cmd = [cmdline] echo \'&foo\' # echo '&foo'
+$cmd # echo &foo
\
To make sure that a string is passed as is through both expansions use the
\i{doubled single-quoting} idiom, for example:
\
-filter = sed -e \''s/foo (bar|baz)/$&/'\'
+filter = [cmdline] sed -e \''s/foo (bar|baz)/$&/'\'
$* <<EOI | $filter >>EOO
...
EOI
@@ -1423,6 +1492,7 @@ while potentially spanning several physical lines. The \c{-line} suffix
here signifies a \i{logical line}, for example, a command line plus its
here-document fragments.
+
\h#syntax-grammar|Grammar|
The complete grammar of the Testscript language is presented next with the
@@ -1479,33 +1549,58 @@ test:
+(variable-line|command-like)
variable-like:
- variable-line|variable-if
+ variable-line|variable-flow
variable-line:
<variable-name> ('='|'+='|'=+') value-attributes? <value> ';'?
value-attributes: '[' <key-value-pairs> ']'
+variable-flow:
+ variable-if|variable-for|variable-while
+
variable-if:
('if'|'if!') command-line
- variable-if-body
+ variable-flow-body
*variable-elif
?variable-else
- 'end'
+ 'end' ';'?
variable-elif:
('elif'|'elif!') command-line
- variable-if-body
+ variable-flow-body
variable-else:
'else'
- variable-if-body
+ variable-flow-body
-variable-if-body:
+variable-flow-body:
*variable-like
+variable-for:
+ variable-for-args|variable-for-stream
+
+variable-for-args:
+ 'for' <variable-name> element-attributes? ':' \
+ value-attributes? <value>
+ variable-flow-body
+ 'end' ';'?
+
+element-attributes: value-attributes
+
+variable-for-stream:
+ (command-pipe '|')? \
+ 'for' (<opt>|stdin)* <variable-name> element-attributes? (stdin)*
+ variable-flow-body
+ 'end' ';'?
+
+variable-while:
+ 'while' command-line
+ variable-flow-body
+ 'end' ';'?
+
command-like:
- command-line|command-if
+ command-line|command-flow
command-line: command-expr (';'|(':' <text>))?
*here-document
@@ -1518,24 +1613,47 @@ command: <path>(' '+(<arg>|redirect|cleanup))* command-exit?
command-exit: ('=='|'!=') <exit-status>
+command-flow:
+ command-if|command-for|command-while
+
command-if:
('if'|'if!') command-line
- command-if-body
+ command-flow-body
*command-elif
?command-else
'end' (';'|(':' <text>))?
command-elif:
('elif'|'elif!') command-line
- command-if-body
+ command-flow-body
command-else:
'else'
- command-if-body
+ command-flow-body
-command-if-body:
+command-flow-body:
*(variable-line|command-like)
+command-for:
+ command-for-args|command-for-stream
+
+command-for-args:
+ 'for' <variable-name> element-attributes? ':' \
+ value-attributes? <value>
+ command-flow-body
+ 'end' (';'|(':' <text>))?
+
+command-for-stream:
+ (command-pipe '|')? \
+ 'for' (<opt>|stdin)* <variable-name> element-attributes? (stdin)*
+ command-flow-body
+ 'end' (';'|(':' <text>))?
+
+command-while:
+ 'while' command-line
+ command-flow-body
+ 'end' (';'|(':' <text>))?
+
redirect: stdin|stdout|stderr
stdin: '0'?(in-redirect)
@@ -1568,6 +1686,12 @@ description:
+(':' <text>)
\
+Note that the only purpose of having separate (from the command flow control
+constructs) variable-only flow control constructs is to remove the error-prone
+requirement of having to specify \c{+} and \c{-} prefixes in group
+setup/teardown.
+
+
\h#syntax-script|Script|
\
@@ -1578,6 +1702,7 @@ script:
A testscript file is an implicit group scope (see \l{#model Model and
Execution} for details).
+
\h#syntax-scope|Scope|
\
@@ -1627,6 +1752,7 @@ the scopes in an \c{if-else} chain are alternative implementations of the same
group/test (thus the single description). If at least one of them is a group
scope, then all the others are treated as groups as well.
+
\h#syntax-directive|Directive|
\
@@ -1640,7 +1766,7 @@ variable is assigned. You can, however, use variables assigned in the
buildfile. For example:
\
-include common-$(cxx.target.class).testscript
+.include common-$(cxx.target.class).testscript
\
\h2#syntax-directive-include|Include|
@@ -1659,6 +1785,7 @@ this scope should not be included again. The implementation is not required to
handle links when determining if two paths are to the same file. Relative
paths are assumed to be relative to the including testscript file.
+
\h#syntax-setup-teardown|Setup and Teardown|
\
@@ -1672,11 +1799,12 @@ setup-line: '+' command-like
tdown-line: '-' command-like
\
-Note that variable assignments (including \c{variable-if}) do not use the
+Note that variable assignments (including \c{variable-flow}) do not use the
\c{'+'} and \c{'-'} prefixes. A standalone (not part of a test) variable
assignment is automatically treated as a setup if no tests have yet been
encountered in this scope and as a teardown otherwise.
+
\h#syntax-test|Test|
\
@@ -1695,11 +1823,16 @@ cat <'verbose = true' >=$conf;
test1 $conf
\
+\N|As discussed in \l{#model Model and Execution}, tests are executed in
+parallel. Currently, the only way to run several executables serially is to
+place them into a single compound test.|
+
+
\h#syntax-variable|Variable|
\
variable-like:
- variable-line|variable-if
+ variable-line|variable-flow
variable-line:
<variable-name> ('='|'+='|'=+') value-attributes? <value> ';'?
@@ -1718,25 +1851,26 @@ echo $args # foo bar fox baz
The value can only be followed by \c{;} inside a test to signal the test
continuation.
+
\h#syntax-variable-if|Variable-If|
\
variable-if:
('if'|'if!') command-line
- variable-if-body
+ variable-flow-body
*variable-elif
?variable-else
- 'end'
+ 'end' ';'?
variable-elif:
('elif'|'elif!') command-line
- variable-if-body
+ variable-flow-body
variable-else:
'else'
- variable-if-body
+ variable-flow-body
-variable-if-body:
+variable-flow-body:
*variable-like
\
@@ -1760,15 +1894,90 @@ with a ternary operator is often more concise:
slash = ($cxx.target.class == 'windows' ? \\\\ : /)
\
-Note also that the only purpose of having a separate (from \c{command-if})
-variable-only if-block is to remove the error-prone requirement of having to
-specify \c{+} and \c{-} prefixes in group setup/teardown.
+
+\h#syntax-variable-for|Variable-For|
+
+\
+variable-for:
+ variable-for-args|variable-for-stream
+
+variable-for-args:
+ 'for' <variable-name> element-attributes? ':' \
+ value-attributes? <value>
+ variable-flow-body
+ 'end' ';'?
+
+variable-for-stream:
+ (command-pipe '|')? \
+ 'for' (<opt>|stdin)* <variable-name> element-attributes? (stdin)*
+ variable-flow-body
+ 'end' ';'?
+
+variable-flow-body:
+ *variable-like
+\
+
+A group of variables can be set in a loop while iterating over elements of a
+list. The iteration semantics is the same as in \c{command-for}. For example:
+
+\
+uvalues =
+for v: $values
+ uvalues += $string.ucase($v)
+end
+\
+
+Another example:
+
+\
+uvalues =
+cat values.txt | for -n v
+ uvalues += $string.ucase($v)
+end
+\
+
+Or using the \c{stdin} redirect:
+
+\
+uvalues =
+for -n v <=values.txt
+ uvalues += $string.ucase($v)
+end
+\
+
+
+\h#syntax-variable-while|Variable-While|
+
+\
+variable-while:
+ 'while' command-line
+ variable-flow-body
+ 'end' ';'?
+
+variable-flow-body:
+ *variable-like
+\
+
+A group of variables can be set in a loop while the condition evaluates to
+\c{true}. The condition \c{command-line} semantics is the same as in
+\c{scope-if}. For example:
+
+\
+uvalues =
+i = [uint64] 0
+n = $size($values)
+while ($i != $n)
+ uvalues += $string.ucase($values[$i])
+ i += 1
+end
+\
+
\h#syntax-command|Command|
\
command-like:
- command-line|command-if
+ command-line|command-flow
command-line: command-expr (';'|(':' <text>))?
*here-document
@@ -1783,7 +1992,7 @@ command-exit: ('=='|'!=') <exit-status>
\
A command line is a command expression. If it appears directly (as opposed to
-inside \c{command-if}) in a test, then it can be followed by \c{;} to signal
+inside \c{command-flow}) in a test, then it can be followed by \c{;} to signal
the test continuation or by \c{:} and the trailing description.
A command expression can combine several command pipes with logical AND and OR
@@ -1808,25 +2017,26 @@ to succeed (0 exit code). The logical result of executing a command is
therefore a boolean value which is used in the higher-level constructs (pipe
and expression).
+
\h#syntax-command-if|Command-If|
\
command-if:
('if'|'if!') command-line
- command-if-body
+ command-flow-body
*command-elif
?command-else
'end' (';'|(':' <text>))?
command-elif:
('elif'|'elif!') command-line
- command-if-body
+ command-flow-body
command-else:
'else'
- command-if-body
+ command-flow-body
-command-if-body:
+command-flow-body:
*(variable-line|command-like)
\
@@ -1846,6 +2056,108 @@ end;
test1 $foo
\
+
+\h#syntax-command-for|Command-For|
+
+\
+command-for:
+ command-for-args|command-for-stream
+
+command-for-args:
+ 'for' <variable-name> element-attributes? ':' \
+ value-attributes? <value>
+ command-flow-body
+ 'end' (';'|(':' <text>))?
+
+command-for-stream:
+ (command-pipe '|')? \
+ 'for' (<opt>|stdin)* <variable-name> element-attributes? (stdin)*
+ command-flow-body
+ 'end' (';'|(':' <text>))?
+
+command-flow-body:
+ *(variable-line|command-like)
+\
+
+A group of commands can be executed in a loop while iterating over elements of
+a list and setting the specified variable (called \i{loop variable}) to the
+corresponding element on each iteration. At the end of the iteration the loop
+variable contains the value of the last element, if any. Note that in a
+compound test, commands inside \c{command-for} must not end with
+\c{;}. Rather, \c{;} may follow \c{end}.
+
+The \c{for} loop has two forms: In the first form the list is specified as
+arguments. Similar to the \c{for} loop in the Buildfile language, it can
+contain variable expansions, function calls, evaluation contexts, and/or
+literal values. For example:
+
+\
+for v: $values
+ test1 $v
+end;
+test2
+\
+
+In the second form the list is read from the \c{stdin} input. The input data
+is split into elements either at whitespaces (default) or newlines, which can
+be controlled with the \c{-n|--newline} and \c{-w|--whitespace} options.
+Overall, this form supports the same set of options as the \l{#builtins-set
+\c{set}} pseudo-builtin. For example:
+
+\
+cat values.txt | for -n v
+ test1 $v
+end
+\
+
+Or using the \c{stdin} redirect:
+
+\
+for -n v <=values.txt
+ test1 $v
+end
+\
+
+Both forms can include value attributes enclosed in \c{[]} to be applied to
+each element, again similar to the \l{#builtins-set \c{set}} pseudo-builtin.
+
+
+\h#syntax-command-while|Command-While|
+
+\
+command-while:
+ 'while' command-line
+ command-flow-body
+ 'end' (';'|(':' <text>))?
+
+command-flow-body:
+ *(variable-line|command-like)
+\
+
+A group of commands can be executed in a loop while a condition evaluates to
+\c{true}. The condition \c{command-line} semantics is the same as in
+\c{scope-if}. Note that in a compound test, commands inside \c{command-while}
+must not end with \c{;}. Rather, \c{;} may follow \c{end}. For example:
+
+\
+i = [uint64] 0;
+n = $size($values);
+while ($i != $n)
+ test1 ($values[$i])
+ i += 1
+end;
+test2
+\
+
+Another example:
+
+\
+while test -f $file
+ test1 $file
+end
+\
+
+
\h#syntax-redirect|Redirect|
\
@@ -1974,6 +2286,7 @@ Similar to the input redirects, an output here-document redirect must be
specified literally on the command line. See \l{#syntax-here-document Here
Document} for details.
+
\h#syntax-here-document|Here-Document|
\
@@ -2536,6 +2849,56 @@ false
Do nothing and terminate normally with the 1 exit code (indicating failure).
+\h#builtins-find|\c{find}|
+
+\
+find <start-path>... [<expression>]
+\
+
+Search for filesystem entries in a filesystem hierarchy. Traverse filesystem
+hierarchies from each \i{start-path} specified on the command line, evaluate
+for each filesystem entry the boolean \i{expression} consisting of the
+options-like arguments called \i{primaries}, and print the filesystem entry
+path if it evaluates to \c{true}, one path per line. The primaries are
+combined into the expression with an implicit logical AND operator. The empty
+expression always evaluates to \c{true}.
+
+Note that the implementation deviates from POSIX in a number of ways. It only
+supports a small subset of primaries and doesn't support compound expressions,
+negations, logical OR and (explicit) AND operators, and the \c{-type} primary
+values other than \c{f}, \c{d}, and \c{l}. It, however, supports the
+\c{-mindepth} and \c{-maxdepth} primaries which are not specified by POSIX but
+are supported by the major \c{find} utility implementations.
+
+The following primaries are supported:
+
+\dl|
+
+\li|\n\c{-name <pattern>}
+
+ Evaluates to \c{true} if a filesystem entry base name matches the specified
+ wildcard pattern.|
+
+\li|\n\c{-type <type>}
+
+ Evaluates to \c{true} if a filesystem entry type matches the specified type:
+ \c{f} for a regular file, \c{d} for a directory, and \c{l} for a symbolic
+ link.|
+
+\li|\n\c{-mindepth <depth>}
+
+ Evaluates to \c{true} if a filesystem entry directory level is not less than
+ the specified depth. The level of the \i{start-path} entries specified on
+ the command line is 0.|
+
+\li|\n\c{-maxdepth <depth>}
+
+ Evaluates to \c{true} if a filesystem entry directory level is not greater
+ than the specified depth. The level of the \i{start-path} entries specified
+ on the command line is 0. Note that the implementation is smart enough not
+ to traverse a directory when the maximum depth is reached.||
+
+
\h#builtins-ln|\c{ln}|
\
@@ -2783,7 +3146,7 @@ are supported.
\h#builtins-set|\c{set}|
\
-set [-e] [-n|-w] [<attr>] <var>
+set [-e] [-n|-w] <var> [<attr>]
\
Set variable from the \c{stdin} input.
@@ -2820,7 +3183,7 @@ If the \i{attr} argument is specified, then it must contain a list of value
attributes enclosed in \c{[]}, for example:
\
-sed -e 's/foo/bar/' input | set [string] x
+sed -e 's/foo/bar/' input | set x [string]
\
Note that this is also the only way to set a variable with a computed name,
@@ -2828,7 +3191,7 @@ for example:
\
foo = FOO
-set [null] $foo <-
+set $foo [null] <-
\
\dl|
diff --git a/libbuild2/adhoc-rule-buildscript.cxx b/libbuild2/adhoc-rule-buildscript.cxx
index 03e810d..3e868a6 100644
--- a/libbuild2/adhoc-rule-buildscript.cxx
+++ b/libbuild2/adhoc-rule-buildscript.cxx
@@ -5,6 +5,8 @@
#include <sstream>
+#include <libbutl/filesystem.hxx> // try_rm_file(), path_entry()
+
#include <libbuild2/depdb.hxx>
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
@@ -27,10 +29,11 @@ namespace build2
static inline void
hash_script_vars (sha256& cs,
const build::script::script& s,
+ const scope& bs,
const target& t,
names& storage)
{
- context& ctx (t.ctx);
+ auto& vp (bs.var_pool ());
for (const string& n: s.vars)
{
@@ -38,7 +41,7 @@ namespace build2
lookup l;
- if (const variable* var = ctx.var_pool.find (n))
+ if (const variable* var = vp.find (n))
l = t[var];
cs.append (!l.defined () ? '\x1' : l->null ? '\x2' : '\x3');
@@ -46,7 +49,7 @@ namespace build2
if (l)
{
storage.clear ();
- names_view ns (reverse (*l, storage));
+ names_view ns (reverse (*l, storage, true /* reduce */));
for (const name& n: ns)
to_checksum (cs, n);
@@ -198,11 +201,7 @@ namespace build2
os << ind << "depdb clear" << endl;
script::dump (os, ind, script.depdb_preamble);
-
- if (script.diag_line)
- {
- os << ind; script::dump (os, *script.diag_line, true /* newline */);
- }
+ script::dump (os, ind, script.diag_preamble);
script::dump (os, ind, script.body);
ind.resize (ind.size () - 2);
@@ -212,22 +211,28 @@ namespace build2
bool adhoc_buildscript_rule::
reverse_fallback (action a, const target_type& tt) const
{
- // We can provide clean for a file target if we are providing update.
+ // We can provide clean for a file or group target if we are providing
+ // update.
//
- return a == perform_clean_id && tt.is_a<file> () &&
- find (actions.begin (), actions.end (),
- perform_update_id) != actions.end ();
+ return (a == perform_clean_id &&
+ (tt.is_a<file> () || tt.is_a<group> ()) &&
+ find (actions.begin (), actions.end (),
+ perform_update_id) != actions.end ());
}
+ using dynamic_target = build::script::parser::dynamic_target;
+ using dynamic_targets = build::script::parser::dynamic_targets;
+
struct adhoc_buildscript_rule::match_data
{
- match_data (action a, const target& t, bool temp_dir)
- : env (a, t, temp_dir) {}
+ match_data (action a, const target& t, const scope& bs, bool temp_dir)
+ : env (a, t, bs, temp_dir) {}
build::script::environment env;
build::script::default_runner run;
path dd;
+ dynamic_targets dyn_targets;
const scope* bs;
timestamp mt;
@@ -236,8 +241,10 @@ namespace build2
struct adhoc_buildscript_rule::match_data_byproduct
{
- match_data_byproduct (action a, const target& t, bool temp_dir)
- : env (a, t, temp_dir) {}
+ match_data_byproduct (action a, const target& t,
+ const scope& bs,
+ bool temp_dir)
+ : env (a, t, bs, temp_dir) {}
build::script::environment env;
build::script::default_runner run;
@@ -253,22 +260,27 @@ namespace build2
};
bool adhoc_buildscript_rule::
- match (action a, target& t, const string& h, match_extra& me) const
+ match (action a, target& xt, const string& h, match_extra& me) const
{
+ const target& t (xt); // See adhoc_rule::match().
+
// We pre-parsed the script with the assumption it will be used on a
- // non/file-based target. Note that this should not be possible with
- // patterns.
+ // non/file-based (or file group-based) target. Note that this should not
+ // be possible with patterns.
//
if (pattern == nullptr)
{
- if ((t.is_a<file> () != nullptr) != ttype->is_a<file> ())
- {
+ // Let's not allow mixing file/group.
+ //
+ if ((t.is_a<file> () != nullptr) == ttype->is_a<file> () ||
+ (t.is_a<group> () != nullptr) == ttype->is_a<group> ())
+ ;
+ else
fail (loc) << "incompatible target types used with shared recipe" <<
- info << "all targets must be file-based or non-file-based";
- }
+ info << "all targets must be file- or file group-based or non";
}
- return adhoc_rule::match (a, t, h, me);
+ return adhoc_rule::match (a, xt, h, me);
}
recipe adhoc_buildscript_rule::
@@ -279,17 +291,28 @@ namespace build2
recipe adhoc_buildscript_rule::
apply (action a,
- target& xt,
+ target& t,
match_extra& me,
- const optional<timestamp>& d) const
+ const optional<timestamp>& deadline) const
{
tracer trace ("adhoc_buildscript_rule::apply");
+ // Handle matching group members (see adhoc_rule::match() for background).
+ //
+ if (const group* g = t.group != nullptr ? t.group->is_a<group> () : nullptr)
+ {
+ // Note: this looks very similar to how we handle ad hoc group members.
+ //
+ match_sync (a, *g, 0 /* options */);
+ return group_recipe; // Execute the group's recipe.
+ }
+
// We don't support deadlines for any of these cases (see below).
//
- if (d && (a.outer () ||
- me.fallback ||
- (a == perform_update_id && xt.is_a<file> ())))
+ if (deadline && (a.outer () ||
+ me.fallback ||
+ (a == perform_update_id &&
+ (t.is_a<file> () || t.is_a<group> ()))))
return empty_recipe;
// If this is an outer operation (e.g., update-for-test), then delegate to
@@ -297,26 +320,98 @@ namespace build2
//
if (a.outer ())
{
- match_inner (a, xt);
- return execute_inner;
+ match_inner (a, t);
+ return inner_recipe;
}
- context& ctx (xt.ctx);
- const scope& bs (xt.base_scope ());
+ context& ctx (t.ctx);
+ const scope& bs (t.base_scope ());
- // Inject pattern's ad hoc group members, if any.
+ group* g (t.is_a<group> ()); // Explicit group.
+
+ // Inject pattern's ad hoc group members, if any (explicit group members
+ // are injected after reset below).
//
- if (pattern != nullptr)
- pattern->apply_adhoc_members (a, xt, bs, me);
+ if (g == nullptr && pattern != nullptr)
+ pattern->apply_group_members (a, t, bs, me);
- // Derive file names for the target and its ad hoc group members, if any.
+ // Derive file names for the target and its static/ad hoc group members,
+ // if any.
//
if (a == perform_update_id || a == perform_clean_id)
{
- for (target* m (&xt); m != nullptr; m = m->adhoc_member)
+ if (g != nullptr)
+ {
+ g->reset_members (a); // See group::group_members() for background.
+
+ // Note that we rely on the fact that if the group has static members,
+ // then they always come first in members and the first static member
+ // is a file.
+ //
+ for (const target& m: g->static_members)
+ g->members.push_back (&m);
+
+ g->members_static = g->members.size ();
+
+ if (pattern != nullptr)
+ {
+ pattern->apply_group_members (a, *g, bs, me);
+ g->members_static = g->members.size ();
+ }
+
+ if (g->members_static == 0)
+ {
+ if (!script.depdb_dyndep_dyn_target)
+ fail << "group " << *g << " has no static or dynamic members";
+ }
+ else
+ {
+ if (!g->members.front ()->is_a<file> ())
+ {
+ // We use the first static member to derive depdb path, get mtime,
+ // etc. So it must be file-based.
+ //
+ fail << "first static member " << g->members.front ()
+ << " of group " << *g << " is not a file";
+ }
+
+ // Derive paths for all the static members.
+ //
+ for (const target* m: g->members)
+ if (auto* p = m->is_a<path_target> ())
+ p->derive_path ();
+ }
+ }
+ else
+ {
+ for (target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (auto* p = m->is_a<path_target> ())
+ p->derive_path ();
+ }
+ }
+ }
+ else if (g != nullptr)
+ {
+ // This could be, for example, configure/dist update which could need a
+ // "representative sample" of members (in order to be able to match the
+ // rules). So add static members unless we already have something
+ // cached.
+ //
+ if (g->group_members (a).members == nullptr) // Note: not g->member.
{
- if (auto* p = m->is_a<path_target> ())
- p->derive_path ();
+ g->reset_members (a);
+
+ for (const target& m: g->static_members)
+ g->members.push_back (&m);
+
+ g->members_static = g->members.size ();
+
+ if (pattern != nullptr)
+ {
+ pattern->apply_group_members (a, *g, bs, me);
+ g->members_static = g->members.size ();
+ }
}
}
@@ -325,22 +420,26 @@ namespace build2
// We do it always instead of only if one of the targets is path-based in
// case the recipe creates temporary files or some such.
//
- const fsdir* dir (inject_fsdir (a, xt));
+ // Note that we disable the prerequisite search for fsdir{} because of the
+ // prerequisites injected by the pattern. So we have to handle this ad hoc
+ // below.
+ //
+ const fsdir* dir (inject_fsdir (a, t, true /*match*/, false /*prereq*/));
// Match prerequisites.
//
// This is essentially match_prerequisite_members() but with support
// for update=unmatch|match.
//
- auto& pts (xt.prerequisite_targets[a]);
+ auto& pts (t.prerequisite_targets[a]);
{
// Re-create the clean semantics as in match_prerequisite_members().
//
- bool clean (a.operation () == clean_id && !xt.is_a<alias> ());
+ bool clean (a.operation () == clean_id && !t.is_a<alias> ());
// Add target's prerequisites.
//
- for (prerequisite_member p: group_prerequisite_members (a, xt))
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
// Note that we have to recognize update=unmatch|match for *(update),
// not just perform(update). But only actually do anything about it
@@ -348,10 +447,10 @@ namespace build2
//
lookup l; // The `update` variable value, if any.
include_type pi (
- include (a, xt, p, a.operation () == update_id ? &l : nullptr));
+ include (a, t, p, a.operation () == update_id ? &l : nullptr));
- // Use bit 2 of prerequisite_target::include to signal update during
- // match and bit 3 -- unmatch.
+ // Use prerequisite_target::include to signal update during match or
+ // unmatch.
//
uintptr_t mask (0);
if (l)
@@ -361,12 +460,12 @@ namespace build2
if (v == "match")
{
if (a == perform_update_id)
- mask = 2;
+ mask = prerequisite_target::include_udm;
}
else if (v == "unmatch")
{
if (a == perform_update_id)
- mask = 4;
+ mask = include_unmatch;
}
else if (v != "false" && v != "true" && v != "execute")
{
@@ -380,7 +479,10 @@ namespace build2
if (!pi)
continue;
- const target& pt (p.search (xt));
+ const target& pt (p.search (t));
+
+ if (&pt == dir) // Don't add injected fsdir{} twice.
+ continue;
if (clean && !pt.in (*bs.root_scope ()))
continue;
@@ -396,19 +498,19 @@ namespace build2
// Inject pattern's prerequisites, if any.
//
if (pattern != nullptr)
- pattern->apply_prerequisites (a, xt, bs, me);
+ pattern->apply_prerequisites (a, t, bs, me);
// Start asynchronous matching of prerequisites. Wait with unlocked
// phase to allow phase switching.
//
- wait_guard wg (ctx, ctx.count_busy (), xt[a].task_count, true);
+ wait_guard wg (ctx, ctx.count_busy (), t[a].task_count, true);
for (const prerequisite_target& pt: pts)
{
- if (pt.target == dir)
+ if (pt.target == dir) // Don't match injected fsdir{} twice.
continue;
- match_async (a, *pt.target, ctx.count_busy (), xt[a].task_count);
+ match_async (a, *pt.target, ctx.count_busy (), t[a].task_count);
}
wg.wait ();
@@ -417,12 +519,14 @@ namespace build2
//
for (prerequisite_target& pt: pts)
{
- if (pt.target == dir)
+ if (pt.target == dir) // See above.
continue;
// Handle update=unmatch.
//
- unmatch um ((pt.include & 4) != 0 ? unmatch::safe : unmatch::none);
+ unmatch um ((pt.include & include_unmatch) != 0
+ ? unmatch::safe
+ : unmatch::none);
pair<bool, target_state> mr (match_complete (a, *pt.target, um));
@@ -431,7 +535,7 @@ namespace build2
l6 ([&]{trace << "unmatch " << *pt.target << ": " << mr.first;});
// If we managed to unmatch, blank it out so that it's not executed,
- // etc. Otherwise, convert it to ad hoc (we also automatically avoid
+ // etc. Otherwise, leave it as is (but we still automatically avoid
// hashing it, updating it during match in exec_depdb_dyndep(), and
// making us out of date in execute_update_prerequisites()).
//
@@ -441,18 +545,213 @@ namespace build2
// information (e.g., poptions from a library) and those will be
// change-tracked.
//
+ // Note: set the include_target flag for the updated_during_match()
+ // check.
+ //
if (mr.first)
+ {
+ pt.data = reinterpret_cast<uintptr_t> (pt.target);
pt.target = nullptr;
- else
- pt.include |= 1;
+ pt.include |= prerequisite_target::include_target;
+
+ // Note that this prerequisite could also be ad hoc and we must
+ // clear that flag if we managed to unmatch (failed that we will
+ // treat it as ordinary ad hoc since it has the target pointer in
+ // data).
+ //
+ // But that makes it impossible to distinguish ad hoc unmatch from
+ // ordinary unmatch prerequisites later when setting $<. Another
+ // flag to the rescue.
+ //
+ if ((pt.include & prerequisite_target::include_adhoc) != 0)
+ {
+ pt.include &= ~prerequisite_target::include_adhoc;
+ pt.include |= include_unmatch_adhoc;
+ }
+ }
}
}
}
+ // Read the list of dynamic targets and, optionally, fsdir{} prerequisites
+ // from depdb, if exists (used in a few depdb-dyndep --dyn-target handling
+ // places below).
+ //
+ auto read_dyn_targets = [] (path ddp, bool fsdir)
+ -> pair<dynamic_targets, dir_paths>
+ {
+ depdb dd (move (ddp), true /* read_only */);
+
+ pair<dynamic_targets, dir_paths> r;
+ while (dd.reading ()) // Breakout loop.
+ {
+ string* l;
+ auto read = [&dd, &l] () -> bool
+ {
+ return (l = dd.read ()) != nullptr;
+ };
+
+ if (!read ()) // Rule id.
+ break;
+
+ // We can omit this for as long as we don't break our blank line
+ // anchors semantics.
+ //
+#if 0
+ if (*l != rule_id_)
+ fail << "unable to clean dynamic target group " << t
+ << " with old depdb";
+#endif
+
+ // Note that we cannot read out expected lines since there can be
+ // custom depdb builtins. So we use the blank lines as anchors to
+ // skip to the parts we need.
+ //
+ // Skip until the first blank that separated custom depdb entries from
+ // the prerequisites list.
+ {
+ bool g;
+ while ((g = read ()) && !l->empty ()) ;
+ if (!g)
+ break;
+ }
+
+ // Next read the prerequisites, detecting fsdir{} entries if asked.
+ //
+ {
+ bool g;
+ while ((g = read ()) && !l->empty ())
+ {
+ if (fsdir)
+ {
+ path p (*l);
+ if (p.to_directory ())
+ r.second.push_back (path_cast<dir_path> (move (p)));
+ }
+ }
+
+ if (!g)
+ break;
+ }
+
+ // Read the dynamic target files. We should always end with a blank
+ // line.
+ //
+ for (;;)
+ {
+ if (!read () || l->empty ())
+ break;
+
+ // Split into type and path.
+ //
+ size_t p (l->find (' '));
+ if (p == string::npos || // Invalid format.
+ p == 0 || // Empty type.
+ p + 1 == l->size ()) // Empty path.
+ break;
+
+ r.first.push_back (
+ dynamic_target {string (*l, 0, p), path (*l, p + 1, string::npos)});
+ }
+
+ break;
+ }
+
+ return r;
+ };
+
+ // Target path to derive the depdb path, query mtime (if file), etc.
+ //
+ // To derive the depdb path for a group with at least one static member we
+ // use the path of the first member. For a group without any static
+ // members we use the group name with the target type name as the
+ // second-level extension.
+ //
+ auto target_path = [&t, g, p = path ()] () mutable -> const path&
+ {
+ return
+ g == nullptr ? t.as<file> ().path () :
+ g->members_static != 0 ? g->members.front ()->as<file> ().path () :
+ (p = g->dir / (g->name + '.' + g->type ().name));
+ };
+
// See if we are providing the standard clean as a fallback.
//
if (me.fallback)
- return &perform_clean_file;
+ {
+ // For depdb-dyndep --dyn-target use depdb to clean dynamic targets.
+ //
+ if (script.depdb_dyndep_dyn_target)
+ {
+ // Note that only removing the relevant filesystem entries is not
+ // enough: we actually have to populate the group with members since
+ // this information could be used to clean derived targets (for
+ // example, object files). So we just do that and let the standard
+ // clean logic take care of them the same as static members.
+ //
+ // NOTE that this logic should be consistent with what we have in
+ // exec_depdb_dyndep().
+ //
+ using dyndep = dyndep_rule;
+
+ function<dyndep::group_filter_func> filter;
+ if (g != nullptr)
+ {
+ filter = [] (mtime_target& g, const build2::file& m)
+ {
+ auto& ms (g.as<group> ().members);
+ return find (ms.begin (), ms.end (), &m) == ms.end ();
+ };
+ }
+
+ pair<dynamic_targets, dir_paths> p (
+ read_dyn_targets (target_path () + ".d", true));
+
+ for (dynamic_target& dt: p.first)
+ {
+ path& f (dt.path);
+
+ // Resolve target type. Clean it as file if unable to.
+ //
+ const target_type* tt (bs.find_target_type (dt.type));
+ if (tt == nullptr)
+ tt = &file::static_type;
+
+ if (g != nullptr)
+ {
+ pair<const build2::file&, bool> r (
+ dyndep::inject_group_member (a, bs, *g, move (f), *tt, filter));
+
+ if (r.second)
+ g->members.push_back (&r.first);
+ }
+ else
+ {
+ // Note that here we don't bother cleaning any old dynamic targets
+ // -- the more we can clean, the merrier.
+ //
+ dyndep::inject_adhoc_group_member (a, bs, t, move (f), *tt);
+ }
+ }
+
+ // Enter fsdir{} prerequisites.
+ //
+ // See the add lambda in exec_depdb_dyndep() for background.
+ //
+ for (dir_path& d: p.second)
+ {
+ dir_path o; string n; // For GCC 13 -Wdangling-reference.
+ const fsdir& dt (search<fsdir> (t,
+ move (d),
+ move (o),
+ move (n), nullptr, nullptr));
+ match_sync (a, dt);
+ pts.push_back (prerequisite_target (&dt, true /* adhoc */));
+ }
+ }
+
+ return g == nullptr ? perform_clean_file : perform_clean_group;
+ }
// If we have any update during match prerequisites, now is the time to
// update them.
@@ -464,17 +763,17 @@ namespace build2
// prerequisite_target::data.
//
if (a == perform_update_id)
- update_during_match_prerequisites (trace, a, xt, 2 /* mask */);
+ update_during_match_prerequisites (trace, a, t);
- // See if this is not update or not on a file-based target.
+ // See if this is not update or not on a file/group-based target.
//
- if (a != perform_update_id || !xt.is_a<file> ())
+ if (a != perform_update_id || !(g != nullptr || t.is_a<file> ()))
{
// Make sure we get small object optimization.
//
- if (d)
+ if (deadline)
{
- return [dv = *d, this] (action a, const target& t)
+ return [dv = *deadline, this] (action a, const target& t)
{
return default_action (a, t, dv);
};
@@ -488,20 +787,22 @@ namespace build2
}
}
+ // This is a perform update on a file or group target.
+ //
// See if this is the simple case with only static dependencies.
//
if (!script.depdb_dyndep)
{
return [this] (action a, const target& t)
{
- return perform_update_file (a, t);
+ return perform_update_file_or_group (a, t);
};
}
- // This is a perform update on a file target with extraction of dynamic
- // dependency information either in the depdb preamble (depdb-dyndep
- // without --byproduct) or as a byproduct of the recipe body execution
- // (depdb-dyndep with --byproduct).
+ // This is a perform update on a file or group target with extraction of
+ // dynamic dependency information either in the depdb preamble
+ // (depdb-dyndep without --byproduct) or as a byproduct of the recipe body
+ // execution (depdb-dyndep with --byproduct).
//
// For the former case, we may need to add additional prerequisites (or
// even target group members). We also have to save any such additional
@@ -519,18 +820,38 @@ namespace build2
// example and all this logic is based on the prior work in the cc module
// where you can often find more detailed rationale for some of the steps
// performed (like the fsdir update below).
+
+ // Re-acquire fsdir{} specified by the user, similar to inject_fsdir()
+ // (which we have disabled; see above).
//
- file& t (xt.as<file> ());
- const path& tp (t.path ());
+ if (dir == nullptr)
+ {
+ for (const target* pt: pts)
+ {
+ if (pt != nullptr)
+ {
+ if (const fsdir* dt = pt->is_a<fsdir> ())
+ {
+ if (dt->dir == t.dir)
+ {
+ dir = dt;
+ break;
+ }
+ }
+ }
+ }
+ }
if (dir != nullptr)
- fsdir_rule::perform_update_direct (a, t);
+ fsdir_rule::perform_update_direct (a, *dir);
// Because the depdb preamble can access $<, we have to blank out all the
// ad hoc prerequisites. Since we will still need them later, we "move"
// them to the auxiliary data member in prerequisite_target (see
// execute_update_prerequisites() for details).
//
+ // Note: set the include_target flag for the updated_during_match() check.
+ //
for (prerequisite_target& p: pts)
{
// Note that fsdir{} injected above is adhoc.
@@ -539,13 +860,57 @@ namespace build2
{
p.data = reinterpret_cast<uintptr_t> (p.target);
p.target = nullptr;
+ p.include |= prerequisite_target::include_target;
}
}
+ const path& tp (target_path ());
+
+ // Note that while it's tempting to turn match_data* into recipes, some of
+ // their members are not movable. And in the end we will have the same
+ // result: one dynamic memory allocation.
+ //
+ unique_ptr<match_data> md;
+ unique_ptr<match_data_byproduct> mdb;
+
+ dynamic_targets old_dyn_targets;
+
+ if (script.depdb_dyndep_byproduct)
+ {
+ mdb.reset (new match_data_byproduct (
+ a, t, bs, script.depdb_preamble_temp_dir));
+ }
+ else
+ {
+ md.reset (new match_data (a, t, bs, script.depdb_preamble_temp_dir));
+
+ // If the set of dynamic targets can change based on changes to the
+ // inputs (say, each entity, such as a type, in the input file gets its
+ // own output file), then we can end up with a large number of old
+ // output files laying around because they are not part of the new
+ // dynamic target set. So we try to clean them up based on the old depdb
+ // information, similar to how we do it for perform_clean above (except
+ // here we will just keep the list of old files).
+ //
+ // Note: do before opening depdb, which can start over-writing it.
+ //
+ // We also have to do this speculatively, without knowing whether we
+ // will need to update. Oh, well, being dynamic ain't free.
+ //
+ if (script.depdb_dyndep_dyn_target)
+ old_dyn_targets = read_dyn_targets (tp + ".d", false).first;
+ }
+
depdb dd (tp + ".d");
// NOTE: see the "static dependencies" version (with comments) below.
//
+ // NOTE: We use blank lines as anchors to skip directly to certain entries
+ // (e.g., dynamic targets). So make sure none of the other entries
+ // can be blank (for example, see `depdb string` builtin).
+ //
+ // NOTE: KEEP IN SYNC WITH read_dyn_targets ABOVE!
+ //
if (dd.expect ("<ad hoc buildscript recipe> 1") != nullptr)
l4 ([&]{trace << "rule mismatch forcing update of " << t;});
@@ -565,7 +930,7 @@ namespace build2
p.adhoc () ? reinterpret_cast<target*> (p.data) :
nullptr))
{
- if ((p.include & 4) != 0) // Skip update=unmatch.
+ if ((p.include & include_unmatch) != 0) // Skip update=unmatch.
continue;
hash_prerequisite_target (prq_cs, exe_cs, env_cs, *pt, storage);
@@ -574,16 +939,38 @@ namespace build2
{
sha256 cs;
- hash_script_vars (cs, script, t, storage);
+ hash_script_vars (cs, script, bs, t, storage);
if (dd.expect (cs.string ()) != nullptr)
l4 ([&]{trace << "recipe variable change forcing update of " << t;});
}
+ // Static targets and prerequisites (there can also be dynamic targets;
+ // see dyndep --dyn-target).
+ //
{
sha256 tcs;
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
- hash_target (tcs, *m, storage);
+ if (g == nullptr)
+ {
+ // There is a nuance: in an operation batch (e.g., `b update
+ // update`) we will already have the dynamic targets as members on
+ // the subsequent operations and we need to make sure we don't treat
+ // them as static. Using target_decl to distinguish the two seems
+ // like a natural way.
+ //
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (m->decl == target_decl::real)
+ hash_target (tcs, *m, storage);
+ }
+ }
+ else
+ {
+ // Feels like there is not much sense in hashing the group itself.
+ //
+ for (const target* m: g->members)
+ hash_target (tcs, *m, storage);
+ }
if (dd.expect (tcs.string ()) != nullptr)
l4 ([&]{trace << "target set change forcing update of " << t;});
@@ -601,22 +988,8 @@ namespace build2
}
}
- // Note that while it's tempting to turn match_data* into recipes, some of
- // their members are not movable. And in the end we will have the same
- // result: one dynamic memory allocation.
+ // Get ready to run the depdb preamble.
//
- unique_ptr<match_data> md;
- unique_ptr<match_data_byproduct> mdb;
-
- if (script.depdb_dyndep_byproduct)
- {
- mdb.reset (new match_data_byproduct (
- a, t, script.depdb_preamble_temp_dir));
- }
- else
- md.reset (new match_data (a, t, script.depdb_preamble_temp_dir));
-
-
build::script::environment& env (mdb != nullptr ? mdb->env : md->env);
build::script::default_runner& run (mdb != nullptr ? mdb->run : md->run);
@@ -627,21 +1000,51 @@ namespace build2
{
build::script::parser p (ctx);
p.execute_depdb_preamble (a, bs, t, env, script, run, dd);
+
+ // Write a blank line after the custom depdb entries and before
+ // prerequisites, which we use as an anchor (see read_dyn_targets
+ // above). We only do it for the new --dyn-target mode in order not to
+ // invalidate the existing depdb instances.
+ //
+ if (script.depdb_dyndep_dyn_target)
+ dd.expect ("");
}
// Determine if we need to do an update based on the above checks.
//
- bool update;
+ bool update (false);
timestamp mt;
if (dd.writing ())
update = true;
else
{
- if ((mt = t.mtime ()) == timestamp_unknown)
- t.mtime (mt = mtime (tp)); // Cache.
+ if (g == nullptr)
+ {
+ const file& ft (t.as<file> ());
- update = dd.mtime > mt;
+ if ((mt = ft.mtime ()) == timestamp_unknown)
+ ft.mtime (mt = mtime (tp)); // Cache.
+ }
+ else
+ {
+ // Use static member, old dynamic, or force update.
+ //
+ const path* p (
+ g->members_static != 0
+ ? &tp /* first static member path */
+ : (!old_dyn_targets.empty ()
+ ? &old_dyn_targets.front ().path
+ : nullptr));
+
+ if (p != nullptr)
+ mt = g->load_mtime (*p);
+ else
+ update = true;
+ }
+
+ if (!update)
+ update = dd.mtime > mt;
}
if (update)
@@ -665,7 +1068,8 @@ namespace build2
// update on encountering any non-existent files in depbd, we may
// actually incorrectly "validate" some number of depdb entires while
// having an out-of-date main source file. We could probably avoid the
- // update if we are already updating.
+ // update if we are already updating (or not: there is pre-generation
+ // to consider; see inject_existing_file() for details).
//
{
build::script::parser p (ctx);
@@ -701,7 +1105,7 @@ namespace build2
size_t& skip_count (mdb->skip_count);
auto add = [&trace, what,
- a, &bs, &t,
+ a, &bs, &t, pts_n = mdb->pts_n,
&byp, &map_ext,
&skip_count, mt] (path fp) -> optional<bool>
{
@@ -716,7 +1120,7 @@ namespace build2
//
if (optional<bool> u = dyndep::inject_existing_file (
trace, what,
- a, t,
+ a, t, pts_n,
*ft, mt,
false /* fail */,
false /* adhoc */,
@@ -776,7 +1180,7 @@ namespace build2
// Note that in case of dry run we will have an incomplete (but valid)
// database which will be updated on the next non-dry run.
//
- if (!update || ctx.dry_run)
+ if (!update || ctx.dry_run_option)
dd.close (false /* mtime_check */);
else
mdb->dd = dd.close_to_reopen ();
@@ -788,59 +1192,107 @@ namespace build2
return [this, md = move (mdb)] (action a, const target& t)
{
- return perform_update_file_dyndep_byproduct (a, t, *md);
+ return perform_update_file_or_group_dyndep_byproduct (a, t, *md);
};
}
else
{
// Run the second half of the preamble (depdb-dyndep commands) to update
- // our prerequisite targets and extract dynamic dependencies.
+ // our prerequisite targets and extract dynamic dependencies (targets
+ // and prerequisites).
//
// Note that this should be the last update to depdb (the invalidation
// order semantics).
//
- bool deferred_failure (false);
+ md->deferred_failure = false;
{
build::script::parser p (ctx);
p.execute_depdb_preamble_dyndep (a, bs, t,
env, script, run,
dd,
+ md->dyn_targets,
update,
mt,
- deferred_failure);
+ md->deferred_failure);
}
- if (update && dd.reading () && !ctx.dry_run)
+ if (update && dd.reading () && !ctx.dry_run_option)
dd.touch = timestamp_unknown;
dd.close (false /* mtime_check */);
- md->dd = move (dd.path);
- // Pass on base scope and update/mtime.
+ // Remove previous dynamic targets since their set may change with
+ // changes to the inputs.
+ //
+ // The dry-run mode complicates things: if we don't remove the old
+ // files, then that information will be gone (since we update depdb even
+ // in the dry-run mode). But if we remove everything in the dry-run
+ // mode, then we may also remove some of the current files, which would
+ // be incorrect. So let's always remove but only files that are not in
+ // the current set.
+ //
+ // Note that we used to do this in perform_update_file_or_group_dyndep()
+ // but that had a tricky issue: if we end up performing match but not
+ // execute (e.g., via the resolve_members() logic), then we will not
+ // cleanup old targets but loose this information (since the depdb has
+ // be updated). So now we do it here, which is a bit strange, but it
+ // sort of fits into that dry-run logic above. Note also that we do this
+ // unconditionally, update or not, since if everything is up to date,
+ // then old and new sets should be the same.
+ //
+ for (const dynamic_target& dt: old_dyn_targets)
+ {
+ const path& f (dt.path);
+
+ if (find_if (md->dyn_targets.begin (), md->dyn_targets.end (),
+ [&f] (const dynamic_target& dt)
+ {
+ return dt.path == f;
+ }) == md->dyn_targets.end ())
+ {
+ // This is an optimization so best effort.
+ //
+ if (optional<rmfile_status> s = butl::try_rmfile_ignore_error (f))
+ {
+ if (s == rmfile_status::success && verb >= 2)
+ text << "rm " << f;
+ }
+ }
+ }
+
+ // Pass on the base scope, depdb path, and update/mtime.
//
md->bs = &bs;
+ md->dd = move (dd.path);
md->mt = update ? timestamp_nonexistent : mt;
- md->deferred_failure = deferred_failure;
return [this, md = move (md)] (action a, const target& t)
{
- return perform_update_file_dyndep (a, t, *md);
+ return perform_update_file_or_group_dyndep (a, t, *md);
};
}
}
target_state adhoc_buildscript_rule::
- perform_update_file_dyndep_byproduct (action a,
- const target& xt,
- match_data_byproduct& md) const
+ perform_update_file_or_group_dyndep_byproduct (
+ action a, const target& t, match_data_byproduct& md) const
{
// Note: using shared function name among the three variants.
//
- tracer trace ("adhoc_buildscript_rule::perform_update_file");
+ tracer trace (
+ "adhoc_buildscript_rule::perform_update_file_or_group_dyndep_byproduct");
- context& ctx (xt.ctx);
+ context& ctx (t.ctx);
- const file& t (xt.as<file> ());
+ // For a group we use the first (for now static) member as a source of
+ // mtime.
+ //
+ // @@ TODO: expl: byproduct: Note that until we support dynamic targets in
+ // the byproduct mode, we verify there is at least one static member in
+ // apply() above. Once we do support this, we will need to verify after
+ // the dependency extraction below.
+ //
+ const group* g (t.is_a<group> ());
// Note that even if we've updated all our prerequisites in apply(), we
// still need to execute them here to keep the dependency counts straight.
@@ -869,7 +1321,14 @@ namespace build2
if (!ctx.dry_run || verb != 0)
{
- execute_update_file (bs, a, t, env, run);
+ if (g == nullptr)
+ execute_update_file (bs, a, t.as<file> (), env, run);
+ else
+ {
+ // Note: no dynamic members yet.
+ //
+ execute_update_group (bs, a, *g, env, run);
+ }
}
// Extract the dynamic dependency information as byproduct of the recipe
@@ -909,15 +1368,25 @@ namespace build2
const auto& pts (t.prerequisite_targets[a]);
auto add = [&trace, what,
- a, &bs, &t, &pts, pts_n = md.pts_n,
+ a, &bs, &t, g, &pts, pts_n = md.pts_n,
&byp, &map_ext, &dd, &skip] (path fp)
{
normalize_external (fp, what);
+ // Note that unless we take into account dynamic targets, the skip
+ // logic below falls apart since we neither see targets entered via
+ // prerequsites (skip static prerequisites) nor by the cache=true code
+ // above (skip depdb entries).
+ //
+ // If this turns out to be racy (which is the reason we would skip
+ // dynamic targets; see the fine_file() implementation for details),
+ // then the only answer for now is to not use the byproduct mode.
+ //
if (const build2::file* ft = dyndep::find_file (
trace, what,
a, bs, t,
fp, false /* cache */, true /* normalized */,
+ true /* dynamic */,
map_ext, *byp.default_type).first)
{
// Skip if this is one of the static prerequisites provided it was
@@ -937,15 +1406,25 @@ namespace build2
}
}
- // Skip if this is one of the targets.
+ // Skip if this is one of the targets (see the non-byproduct version
+ // for background).
//
if (byp.drop_cycles)
{
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ if (g != nullptr)
{
- if (ft == m)
+ auto& ms (g->members);
+ if (find (ms.begin (), ms.end (), ft) != ms.end ())
return;
}
+ else
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (ft == m)
+ return;
+ }
+ }
}
// Skip until where we left off.
@@ -961,7 +1440,7 @@ namespace build2
// @@ Currently we will issue an imprecise diagnostics if this is
// a static prerequisite that was not updated (see above).
//
- dyndep::verify_existing_file (trace, what, a, t, *ft);
+ dyndep::verify_existing_file (trace, what, a, t, pts_n, *ft);
}
dd.write (fp);
@@ -1022,7 +1501,7 @@ namespace build2
if (r.second.empty ())
continue;
- // @@ TODO: what should we do about targets?
+ // Note: no support for dynamic targets in byproduct mode.
//
if (r.first == make_type::target)
continue;
@@ -1032,10 +1511,11 @@ namespace build2
if (f.relative ())
{
if (!byp.cwd)
- fail (il) << "relative path '" << f << "' in make dependency"
- << " declaration" <<
+ fail (il) << "relative " << what
+ << " prerequisite path '" << f
+ << "' in make dependency declaration" <<
info << "consider using --cwd to specify relative path "
- << "base";
+ << "base";
f = *byp.cwd / f;
}
@@ -1050,6 +1530,52 @@ namespace build2
break;
}
+ case dyndep_format::lines:
+ {
+ for (string l;; ++il.line) // Reuse the buffer.
+ {
+ if (eof (getline (is, l)))
+ break;
+
+ if (l.empty ())
+ fail (il) << "blank line in prerequisites list";
+
+ if (l.front () == ' ')
+ fail (il) << "non-existent prerequisite in --byproduct mode";
+
+ path f;
+ try
+ {
+ f = path (l);
+
+ // fsdir{} prerequisites only make sense with dynamic targets.
+ //
+ if (f.to_directory ())
+ throw invalid_path ("");
+
+ if (f.relative ())
+ {
+ if (!byp.cwd)
+ fail (il) << "relative " << what
+ << " prerequisite path '" << f
+ << "' in lines dependency declaration" <<
+ info << "consider using --cwd to specify "
+ << "relative path base";
+
+ f = *byp.cwd / f;
+ }
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what << " prerequisite path '"
+ << l << "'";
+ }
+
+ add (move (f));
+ }
+
+ break;
+ }
}
// Add the terminating blank line.
@@ -1057,6 +1583,8 @@ namespace build2
dd.expect ("");
dd.close ();
+ //@@ TODO: expl: byproduct: verify have at least one member.
+
md.dd.path = move (dd.path); // For mtime check below.
}
@@ -1065,20 +1593,36 @@ namespace build2
timestamp now (system_clock::now ());
if (!ctx.dry_run)
- depdb::check_mtime (start, md.dd.path, t.path (), now);
+ {
+ // Only now we know for sure there must be a member in the group.
+ //
+ const file& ft ((g == nullptr ? t : *g->members.front ()).as<file> ());
+
+ depdb::check_mtime (start, md.dd.path, ft.path (), now);
+ }
+
+ (g == nullptr
+ ? static_cast<const mtime_target&> (t.as<file> ())
+ : static_cast<const mtime_target&> (*g)).mtime (now);
- t.mtime (now);
return target_state::changed;
}
target_state adhoc_buildscript_rule::
- perform_update_file_dyndep (action a, const target& xt, match_data& md) const
+ perform_update_file_or_group_dyndep (
+ action a, const target& t, match_data& md) const
{
- tracer trace ("adhoc_buildscript_rule::perform_update_file");
+ tracer trace (
+ "adhoc_buildscript_rule::perform_update_file_or_group_dyndep");
- context& ctx (xt.ctx);
+ context& ctx (t.ctx);
- const file& t (xt.as<file> ());
+ // For a group we use the first (static or dynamic) member as a source of
+ // mtime. Note that in this case there must be at least one since we fail
+ // if we were unable to extract any dynamic members and there are no
+ // static (see exec_depdb_dyndep()).
+ //
+ const group* g (t.is_a<group> ());
// Note that even if we've updated all our prerequisites in apply(), we
// still need to execute them here to keep the dependency counts straight.
@@ -1107,7 +1651,11 @@ namespace build2
if (!ctx.dry_run || verb != 0)
{
- execute_update_file (*md.bs, a, t, env, run, md.deferred_failure);
+ if (g == nullptr)
+ execute_update_file (
+ *md.bs, a, t.as<file> (), env, run, md.deferred_failure);
+ else
+ execute_update_group (*md.bs, a, *g, env, run, md.deferred_failure);
}
run.leave (env, script.end_loc);
@@ -1115,26 +1663,67 @@ namespace build2
timestamp now (system_clock::now ());
if (!ctx.dry_run)
- depdb::check_mtime (start, md.dd, t.path (), now);
+ {
+ // Note: in case of deferred failure we may not have any members.
+ //
+ const file& ft ((g == nullptr ? t : *g->members.front ()).as<file> ());
+ depdb::check_mtime (start, md.dd, ft.path (), now);
+ }
+
+ (g == nullptr
+ ? static_cast<const mtime_target&> (t)
+ : static_cast<const mtime_target&> (*g)).mtime (now);
- t.mtime (now);
return target_state::changed;
}
target_state adhoc_buildscript_rule::
- perform_update_file (action a, const target& xt) const
+ perform_update_file_or_group (action a, const target& t) const
{
- tracer trace ("adhoc_buildscript_rule::perform_update_file");
+ tracer trace ("adhoc_buildscript_rule::perform_update_file_or_group");
- context& ctx (xt.ctx);
+ context& ctx (t.ctx);
+ const scope& bs (t.base_scope ());
+
+ // For a group we use the first (static) member to derive depdb path, as a
+ // source of mtime, etc. Note that in this case there must be a static
+ // member since in this version of perform_update we don't extract dynamic
+ // dependencies (see apply() details).
+ //
+ const group* g (t.is_a<group> ());
- const file& t (xt.as<file> ());
- const path& tp (t.path ());
+ const file& ft ((g == nullptr ? t : *g->members.front ()).as<file> ());
+ const path& tp (ft.path ());
+
+ // Support creating file symlinks using ad hoc recipes.
+ //
+ auto path_symlink = [&tp] ()
+ {
+ pair<bool, butl::entry_stat> r (
+ butl::path_entry (tp,
+ false /* follow_symlinks */,
+ true /* ignore_errors */));
+
+ return r.first && r.second.type == butl::entry_type::symlink;
+ };
// Update prerequisites and determine if any of them render this target
// out-of-date.
//
- timestamp mt (t.load_mtime ());
+ // If the file entry exists, check if its a symlink.
+ //
+ bool symlink (false);
+ timestamp mt;
+
+ if (g == nullptr)
+ {
+ mt = ft.load_mtime ();
+
+ if (mt != timestamp_nonexistent)
+ symlink = path_symlink ();
+ }
+ else
+ mt = g->load_mtime (tp);
// This is essentially ps=execute_prerequisites(a, t, mt) which we
// cannot use because we need to see ad hoc prerequisites.
@@ -1156,7 +1745,7 @@ namespace build2
p.adhoc () ? reinterpret_cast<target*> (p.data)
: nullptr))
{
- if ((p.include & 4) != 0) // Skip update=unmatch.
+ if ((p.include & include_unmatch) != 0) // Skip update=unmatch.
continue;
hash_prerequisite_target (prq_cs, exe_cs, env_cs, *pt, storage);
@@ -1218,7 +1807,7 @@ namespace build2
//
{
sha256 cs;
- hash_script_vars (cs, script, t, storage);
+ hash_script_vars (cs, script, bs, t, storage);
if (dd.expect (cs.string ()) != nullptr)
l4 ([&]{trace << "recipe variable change forcing update of " << t;});
@@ -1228,8 +1817,18 @@ namespace build2
//
{
sha256 tcs;
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
- hash_target (tcs, *m, storage);
+ if (g == nullptr)
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ hash_target (tcs, *m, storage);
+ }
+ else
+ {
+ // Feels like there is not much sense in hashing the group itself.
+ //
+ for (const target* m: g->members)
+ hash_target (tcs, *m, storage);
+ }
if (dd.expect (tcs.string ()) != nullptr)
l4 ([&]{trace << "target set change forcing update of " << t;});
@@ -1249,8 +1848,6 @@ namespace build2
}
}
- const scope* bs (nullptr);
-
// Execute the custom dependency change tracking commands, if present.
//
// Note that we share the environment between the execute_depdb_preamble()
@@ -1265,7 +1862,10 @@ namespace build2
if (!depdb_preamble)
{
- if (dd.writing () || dd.mtime > mt)
+ // If this is a symlink, depdb mtime could be greater than the symlink
+ // target.
+ //
+ if (dd.writing () || (dd.mtime > mt && !symlink))
update = true;
if (!update)
@@ -1275,25 +1875,23 @@ namespace build2
}
}
- build::script::environment env (a, t, false /* temp_dir */);
+ build::script::environment env (a, t, bs, false /* temp_dir */);
build::script::default_runner run;
if (depdb_preamble)
{
- bs = &t.base_scope ();
-
if (script.depdb_preamble_temp_dir)
env.set_temp_dir_variable ();
build::script::parser p (ctx);
run.enter (env, script.start_loc);
- p.execute_depdb_preamble (a, *bs, t, env, script, run, dd);
+ p.execute_depdb_preamble (a, bs, t, env, script, run, dd);
}
// Update if depdb mismatch.
//
- if (dd.writing () || dd.mtime > mt)
+ if (dd.writing () || (dd.mtime > mt && !symlink))
update = true;
dd.close ();
@@ -1315,22 +1913,44 @@ namespace build2
bool r (false);
if (!ctx.dry_run || verb != 0)
{
- // Prepare to execute the script diag line and/or body.
+ // Prepare to execute the script diag preamble and/or body.
//
- if (bs == nullptr)
- bs = &t.base_scope ();
+ r = g == nullptr
+ ? execute_update_file (bs, a, ft, env, run)
+ : execute_update_group (bs, a, *g, env, run);
- if ((r = execute_update_file (*bs, a, t, env, run)))
+ if (r)
{
if (!ctx.dry_run)
- dd.check_mtime (tp);
+ {
+ if (g == nullptr)
+ symlink = path_symlink ();
+
+ // Again, if this is a symlink, depdb mtime will be greater than
+ // the symlink target.
+ //
+ if (!symlink)
+ dd.check_mtime (tp);
+ }
}
}
if (r || depdb_preamble)
run.leave (env, script.end_loc);
- t.mtime (system_clock::now ());
+ // Symlinks don't play well with dry-run: we can't extract accurate target
+ // timestamp without creating the symlink. Overriding the dry-run doesn't
+ // seem to be an option since we don't know whether it will be a symlink
+ // until it's created. At least we are being pessimistic rather than
+ // optimistic here.
+ //
+ (g == nullptr
+ ? static_cast<const mtime_target&> (ft)
+ : static_cast<const mtime_target&> (*g)).mtime (
+ symlink
+ ? build2::mtime (tp)
+ : system_clock::now ());
+
return target_state::changed;
}
@@ -1345,6 +1965,7 @@ namespace build2
// !NULL false 1 - normal prerequisite already updated
// !NULL true 0 - ad hoc prerequisite to be updated and blanked
// NULL true !NULL - ad hoc prerequisite already updated and blanked
+ // NULL false !NULL - unmatched prerequisite (ignored by this function)
//
// Note that we still execute already updated prerequisites to keep the
// dependency counts straight. But we don't consider them for the "renders
@@ -1352,6 +1973,8 @@ namespace build2
//
// See also environment::set_special_variables().
//
+ // See also perform_execute() which has to deal with these shenanigans.
+ //
optional<target_state> adhoc_buildscript_rule::
execute_update_prerequisites (action a, const target& t, timestamp mt) const
{
@@ -1396,7 +2019,7 @@ namespace build2
// Compare our timestamp to this prerequisite's skipping
// update=unmatch.
//
- if (!e && (p.include & 4) == 0)
+ if (!e && (p.include & include_unmatch) == 0)
{
// If this is an mtime-based target, then compare timestamps.
//
@@ -1417,10 +2040,14 @@ namespace build2
// Blank out adhoc.
//
+ // Note: set the include_target flag for the updated_during_match()
+ // check.
+ //
if (p.adhoc ())
{
p.data = reinterpret_cast<uintptr_t> (p.target);
p.target = nullptr;
+ p.include |= prerequisite_target::include_target;
}
}
}
@@ -1429,16 +2056,18 @@ namespace build2
return e ? nullopt : optional<target_state> (rs);
}
- // Return true if execute_body() was called and thus the caller should call
- // run.leave().
+ // Return true if execute_diag_preamble() and/or execute_body() were called
+ // and thus the caller should call run.leave().
//
bool adhoc_buildscript_rule::
execute_update_file (const scope& bs,
- action, const file& t,
+ action a, const file& t,
build::script::environment& env,
build::script::default_runner& run,
bool deferred_failure) const
{
+ // NOTE: similar to execute_update_group() below.
+ //
context& ctx (t.ctx);
const scope& rs (*bs.root_scope ());
@@ -1449,28 +2078,73 @@ namespace build2
//
build::script::parser p (ctx);
- if (verb == 1)
+ bool exec_body (!ctx.dry_run || verb >= 2);
+ bool exec_diag (!script.diag_preamble.empty () && (exec_body || verb == 1));
+ bool exec_depdb (!script.depdb_preamble.empty ());
+
+ if (script.diag_name)
{
- if (script.diag_line)
- {
- text << p.execute_special (rs, bs, env, *script.diag_line);
- }
- else
+ if (verb == 1)
{
- // @@ TODO (and in default_action() below):
+ // By default we print the first non-ad hoc prerequisite target as the
+ // "main" prerequisite, unless there isn't any or it's not file-based,
+ // in which case we fallback to the second form without the
+ // prerequisite. Potential future improvements:
//
- // - we are printing target, not source (like in most other places)
+ // - Somehow detect that the first prerequisite target is a tool being
+ // executed and fallback to the second form. It's tempting to just
+ // exclude all exe{} targets, but this could be a rule for something
+ // like strip.
//
- // - printing of ad hoc target group (the {hxx cxx}{foo} idea)
- //
- // - if we are printing prerequisites, should we print all of them
- // (including tools)?
- //
- text << *script.diag_name << ' ' << t;
+ const file* pt (nullptr);
+ for (const prerequisite_target& p: t.prerequisite_targets[a])
+ {
+ // See execute_update_prerequisites().
+ //
+ if (p.target != nullptr && !p.adhoc ())
+ {
+ pt = p.target->is_a<file> ();
+ break;
+ }
+ }
+
+ if (t.adhoc_member == nullptr)
+ {
+ if (pt != nullptr)
+ print_diag (script.diag_name->c_str (), *pt, t);
+ else
+ print_diag (script.diag_name->c_str (), t);
+ }
+ else
+ {
+ vector<target_key> ts;
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ ts.push_back (m->key ());
+
+ if (pt != nullptr)
+ print_diag (script.diag_name->c_str (), pt->key (), move (ts));
+ else
+ print_diag (script.diag_name->c_str (), move (ts));
+ }
}
}
+ else if (exec_diag)
+ {
+ if (script.diag_preamble_temp_dir && !script.depdb_preamble_temp_dir)
+ env.set_temp_dir_variable ();
+
+ pair<names, location> diag (
+ p.execute_diag_preamble (rs, bs,
+ env, script, run,
+ verb == 1 /* diag */,
+ !exec_depdb /* enter */,
+ false /* leave */));
- if (!ctx.dry_run || verb >= 2)
+ if (verb == 1)
+ print_custom_diag (bs, move (diag.first), diag.second);
+ }
+
+ if (exec_body)
{
// On failure remove the target files that may potentially exist but
// be invalid.
@@ -1486,13 +2160,15 @@ namespace build2
}
}
- if (script.body_temp_dir && !script.depdb_preamble_temp_dir)
+ if (script.body_temp_dir &&
+ !script.depdb_preamble_temp_dir &&
+ !script.diag_preamble_temp_dir)
env.set_temp_dir_variable ();
p.execute_body (rs, bs,
env, script, run,
- script.depdb_preamble.empty () /* enter */,
- false /* leave */);
+ !exec_depdb && !exec_diag /* enter */,
+ false /* leave */);
if (!ctx.dry_run)
{
@@ -1521,11 +2197,131 @@ namespace build2
for (auto& rm: rms)
rm.cancel ();
}
+ }
+
+ return exec_diag || exec_body;
+ }
- return true;
+ bool adhoc_buildscript_rule::
+ execute_update_group (const scope& bs,
+ action a, const group& g,
+ build::script::environment& env,
+ build::script::default_runner& run,
+ bool deferred_failure) const
+ {
+ // Note: similar to execute_update_file() above (see there for comments).
+ //
+ // NOTE: when called from perform_update_file_or_group_dyndep_byproduct(),
+ // the group does not contain dynamic members yet and thus could
+ // have no members at all.
+ //
+ context& ctx (g.ctx);
+
+ const scope& rs (*bs.root_scope ());
+
+ build::script::parser p (ctx);
+
+ bool exec_body (!ctx.dry_run || verb >= 2);
+ bool exec_diag (!script.diag_preamble.empty () && (exec_body || verb == 1));
+ bool exec_depdb (!script.depdb_preamble.empty ());
+
+ if (script.diag_name)
+ {
+ if (verb == 1)
+ {
+ const file* pt (nullptr);
+ for (const prerequisite_target& p: g.prerequisite_targets[a])
+ {
+ if (p.target != nullptr && !p.adhoc ())
+ {
+ pt = p.target->is_a<file> ();
+ break;
+ }
+ }
+
+ if (pt != nullptr)
+ print_diag (script.diag_name->c_str (), *pt, g);
+ else
+ print_diag (script.diag_name->c_str (), g);
+ }
}
- else
- return false;
+ else if (exec_diag)
+ {
+ if (script.diag_preamble_temp_dir && !script.depdb_preamble_temp_dir)
+ env.set_temp_dir_variable ();
+
+ pair<names, location> diag (
+ p.execute_diag_preamble (rs, bs,
+ env, script, run,
+ verb == 1 /* diag */,
+ !exec_depdb /* enter */,
+ false /* leave */));
+ if (verb == 1)
+ print_custom_diag (bs, move (diag.first), diag.second);
+ }
+
+ if (exec_body)
+ {
+ // On failure remove the target files that may potentially exist but
+ // be invalid.
+ //
+ // Note: we may leave dynamic members if we don't know about them yet.
+ // Feels natural enough.
+ //
+ small_vector<auto_rmfile, 8> rms;
+
+ if (!ctx.dry_run)
+ {
+ for (const target* m: g.members)
+ {
+ if (auto* f = m->is_a<file> ())
+ rms.emplace_back (f->path ());
+ }
+ }
+
+ if (script.body_temp_dir &&
+ !script.depdb_preamble_temp_dir &&
+ !script.diag_preamble_temp_dir)
+ env.set_temp_dir_variable ();
+
+ p.execute_body (rs, bs,
+ env, script, run,
+ !exec_depdb && !exec_diag /* enter */,
+ false /* leave */);
+
+ if (!ctx.dry_run)
+ {
+ if (deferred_failure)
+ fail << "expected error exit status from recipe body";
+
+ // @@ TODO: expl: byproduct
+ //
+ // Note: will not work for dynamic members if we don't know about them
+ // yet. Could probably fix by doing this later, after the dynamic
+ // dependency extraction.
+ //
+#ifndef _WIN32
+ auto chmod = [] (const path& p)
+ {
+ path_perms (p,
+ (path_perms (p) |
+ permissions::xu |
+ permissions::xg |
+ permissions::xo));
+ };
+
+ for (const target* m: g.members)
+ {
+ if (auto* p = m->is_a<exe> ())
+ chmod (p->path ());
+ }
+#endif
+ for (auto& rm: rms)
+ rm.cancel ();
+ }
+ }
+
+ return exec_diag || exec_body;
}
target_state adhoc_buildscript_rule::
@@ -1540,7 +2336,41 @@ namespace build2
// temporary directory ($~) is that it's next to other output which makes
// it easier to examine during recipe troubleshooting.
//
- return perform_clean_extra (a, t.as<file> (), {".d", ".t"});
+ // Finally, we print the entire ad hoc group at verbosity level 1, similar
+ // to the default update diagnostics.
+ //
+ // @@ TODO: .t may also be a temporary directory (and below).
+ //
+ return perform_clean_extra (a,
+ t.as<file> (),
+ {".d", ".t"},
+ {},
+ true /* show_adhoc_members */);
+ }
+
+ target_state adhoc_buildscript_rule::
+ perform_clean_group (action a, const target& xt)
+ {
+ const group& g (xt.as<group> ());
+
+ path d, t;
+ if (g.members_static != 0)
+ {
+ const path& p (g.members.front ()->as<file> ().path ());
+ d = p + ".d";
+ t = p + ".t";
+ }
+ else
+ {
+ // See target_path lambda in apply().
+ //
+ t = g.dir / (g.name + '.' + g.type ().name);
+ d = t + ".d";
+ t += ".t";
+ }
+
+ return perform_clean_group_extra (a, g, {d.string ().c_str (),
+ t.string ().c_str ()});
}
target_state adhoc_buildscript_rule::
@@ -1552,37 +2382,340 @@ namespace build2
context& ctx (t.ctx);
- execute_prerequisites (a, t);
+ target_state ts (target_state::unchanged);
- if (!ctx.dry_run || verb != 0)
+ if (ctx.current_mode == execution_mode::first)
+ ts |= straight_execute_prerequisites (a, t);
+
+ bool exec (!ctx.dry_run || verb != 0);
+
+ // Special handling for fsdir{} (which is the recommended if somewhat
+ // hackish way to represent directory symlinks). See fsdir_rule for
+ // background.
+ //
+ // @@ Note that because there is no depdb, we cannot detect the target
+ // directory change (or any other changes in the script).
+ //
+ if (exec &&
+ (a == perform_update_id || a == perform_clean_id) &&
+ t.is_a<fsdir> ())
+ {
+ // For update we only want to skip if it's a directory. For clean we
+ // want to (try) to clean up any filesystem entry, including a dangling
+ // symlink.
+ //
+ exec = a == perform_update_id
+ ? !exists (t.dir, true /* ignore_errors */)
+ : build2::entry_exists (t.dir, false /* follow_symlinks */);
+ }
+
+ if (exec)
{
const scope& bs (t.base_scope ());
const scope& rs (*bs.root_scope ());
- build::script::environment e (a, t, script.body_temp_dir, deadline);
+ build::script::environment e (a, t, bs, false /* temp_dir */, deadline);
build::script::parser p (ctx);
+ build::script::default_runner r;
- if (verb == 1)
+ bool exec_body (!ctx.dry_run || verb >= 2);
+ bool exec_diag (!script.diag_preamble.empty () &&
+ (exec_body || verb == 1));
+
+ if (script.diag_name)
{
- if (script.diag_line)
+ if (verb == 1)
{
- text << p.execute_special (rs, bs, e, *script.diag_line);
+ // For operations other than update (as well as for non-file
+ // targets), we default to the second form (without the
+ // prerequisite). Think test.
+ //
+ if (t.adhoc_member == nullptr)
+ print_diag (script.diag_name->c_str (), t);
+ else
+ {
+ vector<target_key> ts;
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ ts.push_back (m->key ());
+
+ print_diag (script.diag_name->c_str (), move (ts));
+ }
}
- else
+ }
+ else if (exec_diag)
+ {
+ if (script.diag_preamble_temp_dir)
+ e.set_temp_dir_variable ();
+
+ pair<names, location> diag (
+ p.execute_diag_preamble (rs, bs,
+ e, script, r,
+ verb == 1 /* diag */,
+ true /* enter */,
+ !exec_body /* leave */));
+
+ if (verb == 1)
+ print_custom_diag (bs, move (diag.first), diag.second);
+ }
+
+ if (exec_body)
+ {
+ if (script.body_temp_dir && !script.diag_preamble_temp_dir)
+ e.set_temp_dir_variable ();
+
+ p.execute_body (rs, bs, e, script, r, !exec_diag /* enter */);
+ }
+
+ ts |= target_state::changed;
+ }
+
+ if (ctx.current_mode == execution_mode::last)
+ ts |= reverse_execute_prerequisites (a, t);
+
+ return ts;
+ }
+
+ void adhoc_buildscript_rule::
+ print_custom_diag (const scope& bs, names&& ns, const location& l) const
+ {
+ // The straightforward thing to do would be to just print the diagnostics
+ // as specified by the user. But that will make some of the tidying up
+ // done by print_diag() unavailable to custom diagnostics. Things like
+ // omitting the out-qualification as well as compact printing of the
+ // groups. Also, in the future we may want to support colorization of the
+ // diagnostics, which will be difficult to achive with such a "just print"
+ // approach.
+ //
+ // So instead we are going to parse the custom diagnostics, translate
+ // names back to targets (where appropriate), and call one of the
+ // print_diag() functions. Specifically, we expect the custom diagnostics
+ // to be in one of the following two forms (which correspond to the two
+ // forms of pring_diag()):
+ //
+ // diag <prog> <l-target> <comb> <r-target>...
+ // diag <prog> <r-target>...
+ //
+ // And the way we are going to disambiguate this is by analyzing name
+ // types. Specifically, we expect <comb> to be a simple name that also
+ // does not contain any directory separators (so we can distinguish it
+ // from both target names as well as paths, which can be specified on
+ // either side). We will also recognize `-` as the special stdout path
+ // name (so <comb> cannot be `-`). Finally, <l-target> (but not
+ // <r-target>) can be a string (e.g., an argument) but that should not
+ // pose an ambiguity.
+ //
+ // With this approach, the way to re-create the default diagnostics would
+ // be:
+ //
+ // diag <prog> ($<[0]) -> $>
+ // diag <prog> $>
+ //
+ auto i (ns.begin ()), e (ns.end ());
+
+ // <prog>
+ //
+ if (i == e)
+ fail (l) << "missing program name in diag builtin";
+
+ if (!i->simple () || i->empty ())
+ fail (l) << "expected simple name as program name in diag builtin";
+
+ const char* prog (i->value.c_str ());
+ ++i;
+
+ // <l-target>
+ //
+ const target* l_t (nullptr);
+ path l_p;
+ string l_s;
+
+ auto parse_target = [&bs, &l, &i, &e] () -> const target&
+ {
+ name& n (*i++);
+ name o;
+
+ if (n.pair)
+ {
+ if (i == e)
+ fail (l) << "invalid target name pair in diag builtin";
+
+ o = move (*i++);
+ }
+
+ // Similar to to_target() in $target.*().
+ //
+ if (const target* r = search_existing (n, bs, o.dir))
+ return *r;
+
+ fail (l) << "target "
+ << (n.pair ? names {move (n), move (o)} : names {move (n)})
+ << " not found in diag builtin" << endf;
+ };
+
+ auto parse_first = [&l, &i, &e,
+ &parse_target] (const target*& t, path& p, string& s,
+ const char* after)
+ {
+ if (i == e)
+ fail (l) << "missing target after " << after << " in diag builtin";
+
+ try
+ {
+ if (i->typed ())
{
- // @@ TODO: as above (execute_update_file()).
- //
- text << *script.diag_name << ' ' << t;
+ t = &parse_target ();
+ return; // i is already incremented.
+ }
+ else if (!i->dir.empty ())
+ {
+ p = move (i->dir);
+ p /= i->value;
}
+ else if (path_traits::find_separator (i->value) != string::npos)
+ {
+ p = path (move (i->value));
+ }
+ else if (!i->value.empty ())
+ {
+ s = move (i->value);
+ }
+ else
+ fail (l) << "expected target, path, or argument after "
+ << after << " in diag builtin";
}
+ catch (const invalid_path& e)
+ {
+ fail (l) << "invalid path '" << e.path << "' after "
+ << after << " in diag builtin";
+ }
+
+ ++i;
+ };
+
+ parse_first (l_t, l_p, l_s, "program name");
- if (!ctx.dry_run || verb >= 2)
+ // Now detect which form it is.
+ //
+ if (i != e &&
+ i->simple () &&
+ !i->empty () &&
+ path_traits::find_separator (i->value) == string::npos)
+ {
+ // The first form.
+
+ // <comb>
+ //
+ const char* comb (i->value.c_str ());
+ ++i;
+
+ // <r-target>
+ //
+ const target* r_t (nullptr);
+ path r_p;
+ string r_s;
+
+ parse_first (r_t, r_p, r_s, "combiner");
+
+ path_name r_pn;
+
+ if (r_t != nullptr)
+ ;
+ else if (!r_p.empty ())
+ r_pn = path_name (&r_p);
+ else
{
- build::script::default_runner r;
- p.execute_body (rs, bs, e, script, r);
+ if (r_s != "-")
+ fail (l) << "expected target or path instead of '" << r_s
+ << "' after combiner in diag builtin";
+
+ r_pn = path_name (move (r_s));
}
+
+ if (i == e)
+ {
+ if (r_t != nullptr)
+ {
+ if (l_t != nullptr) print_diag (prog, *l_t, *r_t, comb);
+ else if (!l_p.empty ()) print_diag (prog, l_p, *r_t, comb);
+ else print_diag (prog, l_s, *r_t, comb);
+ }
+ else
+ {
+ if (l_t != nullptr) print_diag (prog, *l_t, r_pn, comb);
+ else if (!l_p.empty ()) print_diag (prog, l_p, r_pn, comb);
+ else print_diag (prog, l_s, r_pn, comb);
+ }
+
+ return;
+ }
+
+ // We can only have multiple targets, not paths.
+ //
+ if (r_t == nullptr)
+ fail (l) << "unexpected name after path in diag builtin";
+
+ // <r-target>...
+ //
+ vector<target_key> r_ts {r_t->key ()};
+
+ do r_ts.push_back (parse_target ().key ()); while (i != e);
+
+ if (l_t != nullptr) print_diag (prog, l_t->key (), move (r_ts), comb);
+ else if (!l_p.empty ()) print_diag (prog, l_p, move (r_ts), comb);
+ else print_diag (prog, l_s, move (r_ts), comb);
}
+ else
+ {
+ // The second form.
- return target_state::changed;
+ // First "absorb" the l_* values as the first <r-target>.
+ //
+ const target* r_t (nullptr);
+ path_name r_pn;
+
+ if (l_t != nullptr)
+ r_t = l_t;
+ else if (!l_p.empty ())
+ r_pn = path_name (&l_p);
+ else
+ {
+ if (l_s != "-")
+ {
+ diag_record dr (fail (l));
+
+ dr << "expected target or path instead of '" << l_s
+ << "' after program name in diag builtin";
+
+ if (i != e)
+ dr << info << "alternatively, missing combiner after '"
+ << l_s << "'";
+ }
+
+ r_pn = path_name (move (l_s));
+ }
+
+ if (i == e)
+ {
+ if (r_t != nullptr)
+ print_diag (prog, *r_t);
+ else
+ print_diag (prog, r_pn);
+
+ return;
+ }
+
+ // We can only have multiple targets, not paths.
+ //
+ if (r_t == nullptr)
+ fail (l) << "unexpected name after path in diag builtin";
+
+ // <r-target>...
+ //
+ vector<target_key> r_ts {r_t->key ()};
+
+ do r_ts.push_back (parse_target ().key ()); while (i != e);
+
+ print_diag (prog, move (r_ts));
+ }
}
}
diff --git a/libbuild2/adhoc-rule-buildscript.hxx b/libbuild2/adhoc-rule-buildscript.hxx
index e7b18e2..336dceb 100644
--- a/libbuild2/adhoc-rule-buildscript.hxx
+++ b/libbuild2/adhoc-rule-buildscript.hxx
@@ -36,16 +36,17 @@ namespace build2
const optional<timestamp>&) const override;
target_state
- perform_update_file (action, const target&) const;
+ perform_update_file_or_group (action, const target&) const;
struct match_data;
struct match_data_byproduct;
target_state
- perform_update_file_dyndep (action, const target&, match_data&) const;
+ perform_update_file_or_group_dyndep (
+ action, const target&, match_data&) const;
target_state
- perform_update_file_dyndep_byproduct (
+ perform_update_file_or_group_dyndep_byproduct (
action, const target&, match_data_byproduct&) const;
optional<target_state>
@@ -58,9 +59,19 @@ namespace build2
build::script::default_runner&,
bool deferred_failure = false) const;
+ bool
+ execute_update_group (const scope&,
+ action a, const group&,
+ build::script::environment&,
+ build::script::default_runner&,
+ bool deferred_failure = false) const;
+
static target_state
perform_clean_file (action, const target&);
+ static target_state
+ perform_clean_group (action, const target&);
+
target_state
default_action (action, const target&, const optional<timestamp>&) const;
@@ -79,9 +90,19 @@ namespace build2
virtual void
dump_text (ostream&, string&) const override;
+ void
+ print_custom_diag (const scope&, names&&, const location&) const;
+
public:
using script_type = build::script::script;
+ // The prerequisite_target::include bits that indicate update=unmatch and
+ // an ad hoc version of that.
+ //
+ static const uintptr_t include_unmatch = 0x100;
+ static const uintptr_t include_unmatch_adhoc = 0x200;
+
+
script_type script;
string checksum; // Script text hash.
const target_type* ttype; // First target/pattern type.
diff --git a/libbuild2/adhoc-rule-cxx.cxx b/libbuild2/adhoc-rule-cxx.cxx
index dc795df..2ac97eb 100644
--- a/libbuild2/adhoc-rule-cxx.cxx
+++ b/libbuild2/adhoc-rule-cxx.cxx
@@ -25,6 +25,13 @@ namespace build2
return true;
}
+ recipe cxx_rule_v1::
+ apply (action, target&) const
+ {
+ assert (false); // This (or the match_extra version) must be overriden.
+ return empty_recipe;
+ }
+
// adhoc_cxx_rule
//
adhoc_cxx_rule::
@@ -95,8 +102,10 @@ namespace build2
load_module_library (const path& lib, const string& sym, string& err);
bool adhoc_cxx_rule::
- match (action a, target& t, const string& hint, match_extra& me) const
+ match (action a, target& xt, const string& hint, match_extra& me) const
{
+ const target& t (xt); // See adhoc_rule::match() for background.
+
if (pattern != nullptr && !pattern->match (a, t, hint, me))
return false;
@@ -302,9 +311,9 @@ namespace build2
//
auto_thread_env penv (nullptr);
context& ctx (*t.ctx.module_context);
- scheduler::phase_guard pg (ctx.sched);
+ scheduler::phase_guard pg (*ctx.sched);
- const uint16_t verbosity (3); // Project creation command verbosity.
+ uint16_t verbosity (3); // Project creation command verbosity.
// Project and location signatures.
//
@@ -326,6 +335,17 @@ namespace build2
if (!create && (create = !check_sig (bf, psig)))
rmdir_r (ctx, pd, false, verbosity); // Never dry-run.
+ auto diag = [verbosity] (const path& f)
+ {
+ if (verb >= verbosity)
+ {
+ if (verb >= 2)
+ text << "cat >" << f;
+ else if (verb)
+ print_diag ("save", f);
+ }
+ };
+
path of;
ofdstream ofs;
@@ -356,8 +376,7 @@ namespace build2
//
of = path (pd / "rule.cxx");
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << of;
+ diag (of);
ofs.open (of);
@@ -377,6 +396,8 @@ namespace build2
<< "#include <libbuild2/depdb.hxx>" << '\n'
<< "#include <libbuild2/scope.hxx>" << '\n'
<< "#include <libbuild2/target.hxx>" << '\n'
+ << "#include <libbuild2/recipe.hxx>" << '\n'
+ << "#include <libbuild2/dyndep.hxx>" << '\n'
<< "#include <libbuild2/context.hxx>" << '\n'
<< "#include <libbuild2/variable.hxx>" << '\n'
<< "#include <libbuild2/algorithm.hxx>" << '\n'
@@ -486,8 +507,7 @@ namespace build2
//
of = bf;
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << of;
+ diag (of);
ofs.open (of);
@@ -559,8 +579,7 @@ namespace build2
entry_time et (file_time (of));
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << of;
+ diag (of);
ofs.open (of);
@@ -665,13 +684,41 @@ namespace build2
}
}
- return impl->match (a, t, hint, me);
+ return impl->match (a, xt, hint, me);
}
#endif // BUILD2_BOOTSTRAP || LIBBUILD2_STATIC_BUILD
recipe adhoc_cxx_rule::
apply (action a, target& t, match_extra& me) const
{
+ // Handle matching explicit group member (see adhoc_rule::match() for
+ // background).
+ //
+ if (const group* g = (t.group != nullptr
+ ? t.group->is_a<group> ()
+ : nullptr))
+ {
+ // @@ Hm, this looks very similar to how we handle ad hoc group members.
+ // Shouldn't impl be given a chance to translate options or some
+ // such?
+ //
+ match_sync (a, *g, 0 /* options */);
+ return group_recipe; // Execute the group's recipe.
+ }
+
+ // Note that while we probably could call pattern's apply_group_members()
+ // here, apply_group_prerequisites() is normally called after adding
+ // prerequisites but before matching, which can only be done from the
+ // rule's implementation. Also, for apply_group_members(), there is the
+ // explicit group special case which may also require custom logic.
+ // So it feels best to leave both to the implementation.
+
return impl.load (memory_order_relaxed)->apply (a, t, me);
}
+
+ void adhoc_cxx_rule::
+ reapply (action a, target& t, match_extra& me) const
+ {
+ return impl.load (memory_order_relaxed)->reapply (a, t, me);
+ }
}
diff --git a/libbuild2/adhoc-rule-cxx.hxx b/libbuild2/adhoc-rule-cxx.hxx
index 9a17447..2ac2281 100644
--- a/libbuild2/adhoc-rule-cxx.hxx
+++ b/libbuild2/adhoc-rule-cxx.hxx
@@ -36,11 +36,6 @@ namespace build2
// cannot be injected as a real prerequisite since it's from a different
// build context).
//
- // If pattern is not NULL then this recipe belongs to an ad hoc pattern
- // rule and apply() may need to call the pattern's apply_*() functions if
- // the pattern has any ad hoc group member substitutions or prerequisite
- // substitutions/non-patterns, respectively.
- //
const location recipe_loc; // Buildfile location of the recipe.
const target_state recipe_state; // State of recipe library target.
const adhoc_rule_pattern* pattern; // Ad hoc pattern rule of recipe.
@@ -52,8 +47,26 @@ namespace build2
// Return true by default.
//
+ // Note: must treat target as const (unless known to match a non-group).
+ // See adhoc_rule::match() for background.
+ //
virtual bool
match (action, target&) const override;
+
+ using simple_rule::match; // Unhide the match_extra version.
+
+ // Either this version or the one with match_extra must be overridden.
+ //
+ // If the pattern member above is not NULL then this recipe belongs to an
+ // ad hoc pattern rule and the implementation may need to call the
+ // pattern's apply_*() functions if the pattern has any ad hoc group
+ // member substitutions or prerequisite substitutions/non-patterns,
+ // respectively.
+ //
+ virtual recipe
+ apply (action, target&) const override;
+
+ using simple_rule::apply; // Unhide the match_extra version.
};
// Note: not exported.
@@ -67,6 +80,9 @@ namespace build2
virtual recipe
apply (action, target&, match_extra&) const override;
+ virtual void
+ reapply (action, target&, match_extra&) const override;
+
adhoc_cxx_rule (string, const location&, size_t,
uint64_t ver,
optional<string> sep);
diff --git a/libbuild2/adhoc-rule-regex-pattern.cxx b/libbuild2/adhoc-rule-regex-pattern.cxx
index 59a63bc..2d60520 100644
--- a/libbuild2/adhoc-rule-regex-pattern.cxx
+++ b/libbuild2/adhoc-rule-regex-pattern.cxx
@@ -86,7 +86,9 @@ namespace build2
tt = n.untyped () ? &file::static_type : s.find_target_type (n.type);
if (tt == nullptr)
- fail (loc) << "unknown target type " << n.type;
+ fail (loc) << "unknown target type " << n.type <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *s.root_scope ();
}
bool e (n.pattern &&
@@ -126,10 +128,13 @@ namespace build2
}
bool adhoc_rule_regex_pattern::
- match (action a, target& t, const string&, match_extra& me) const
+ match (action a, const target& t, const string&, match_extra& me) const
{
tracer trace ("adhoc_rule_regex_pattern::match");
+ // Note: target may not be locked in which case we should not modify
+ // target or match_extra (see adhoc_rule::match() for background).
+
// The plan is as follows: First check the "type signature" of the target
// and its prerequisites (the primary target type has already been matched
// by the rule matching machinery). If there is a match, then concatenate
@@ -161,12 +166,17 @@ namespace build2
// implementation. Except we support the unmatch and match values in
// the update variable.
//
+ // Note: assuming group prerequisites are immutable (not locked).
+ //
for (prerequisite_member p: group_prerequisite_members (a, t))
{
// Note that here we don't validate the update operation override
// value (since we may not match). Instead the rule does this in
// apply().
//
+ // Note: assuming include()'s use of target only relied on immutable
+ // data (not locked).
+ //
lookup l;
if (include (a, t, p, a.operation () == update_id ? &l : nullptr) ==
include_type::normal && p.is_a (tt))
@@ -205,10 +215,13 @@ namespace build2
// So the plan is to store the string in match_extra::data() and
// regex_match_results (which we can move) in the auxiliary data storage.
//
+ // Note: only cache if locked.
+ //
static_assert (sizeof (string) <= match_extra::data_size,
"match data too large");
- string& ns (me.data (string ()));
+ string tmp;
+ string& ns (me.locked ? me.data (string ()) : tmp);
auto append_name = [&ns,
first = true,
@@ -226,10 +239,12 @@ namespace build2
// Primary target (always a pattern).
//
auto te (targets_.end ()), ti (targets_.begin ());
- append_name (t.key (), *ti);
+ append_name (t.key (), *ti); // Immutable (not locked).
// Match ad hoc group members.
//
+ // Note: shouldn't be in effect for an explicit group (not locked).
+ //
while ((ti = find_if (ti + 1, te, pattern)) != te)
{
const target* at (find_adhoc_member (t, ti->type));
@@ -279,7 +294,9 @@ namespace build2
return false;
}
- t.data (a, move (mr));
+ if (me.locked)
+ t.data (a, move (mr));
+
return true;
}
@@ -304,8 +321,14 @@ namespace build2
}
void adhoc_rule_regex_pattern::
- apply_adhoc_members (action a, target& t, const scope&, match_extra&) const
+ apply_group_members (action a, target& t, const scope& bs,
+ match_extra&) const
{
+ if (targets_.size () == 1) // The group/primary target is always present.
+ return;
+
+ group* g (t.is_a<group> ());
+
const auto& mr (t.data<regex_match_results> (a));
for (auto i (targets_.begin () + 1); i != targets_.end (); ++i)
@@ -333,14 +356,86 @@ namespace build2
d.normalize ();
}
- // @@ TODO: currently this uses type as the ad hoc member identity.
+ string n (substitute (
+ t,
+ mr,
+ e.name.value,
+ (g != nullptr
+ ? "explicit target group member"
+ : "ad hoc target group member")));
+
+ // @@ TODO: save location in constructor?
//
- add_adhoc_member (
- t,
- e.type,
- move (d),
- dir_path () /* out */,
- substitute (t, mr, e.name.value, "ad hoc target group member"));
+ location loc;
+
+ optional<string> ext (target::split_name (n, loc));
+
+ if (g != nullptr)
+ {
+ auto& ms (g->members);
+
+ // These are conceptually static but they behave more like dynamic in
+ // that we likely need to insert the target, set its group, etc. In a
+ // sense, they are rule-static, but group-dynamic.
+ //
+ // Note: a custom version of the dyndep_rule::inject_group_member()
+ // logic.
+ //
+ auto l (search_new_locked (
+ bs.ctx,
+ e.type,
+ move (d),
+ dir_path (), // Always in out.
+ move (n),
+ ext ? &*ext : nullptr,
+ &bs));
+
+ const target& t (l.first); // Note: non-const only if have lock.
+
+ if (l.second)
+ {
+ l.first.group = g;
+ l.second.unlock ();
+ }
+ else
+ {
+ if (find (ms.begin (), ms.end (), &t) != ms.end ())
+ continue;
+
+ if (t.group != g) // Note: atomic.
+ {
+ // We can only update the group under lock.
+ //
+ target_lock tl (lock (a, t));
+
+ if (!tl)
+ fail << "group " << *g << " member " << t << " is already matched" <<
+ info << "static group members specified by pattern rules cannot "
+ << "be used as prerequisites directly, only via group";
+
+ if (t.group == nullptr)
+ tl.target->group = g;
+ else if (t.group != g)
+ {
+ fail << "group " << *g << " member " << t
+ << " is already member of group " << *t.group;
+ }
+ }
+ }
+
+ ms.push_back (&t);
+ }
+ else
+ {
+ add_adhoc_member_identity (
+ t,
+ e.type,
+ move (d),
+ dir_path (), // Always in out.
+ move (n),
+ move (ext),
+ loc);
+ }
}
}
@@ -357,6 +452,18 @@ namespace build2
auto& pts (t.prerequisite_targets[a]);
+ // Avoid duplicating fsdir{} that may have already been injected by
+ // inject_fsdir() (in which case it is expected to be first).
+ //
+ const target* dir (nullptr);
+ if (!pts.empty ())
+ {
+ const prerequisite_target& pt (pts.front ());
+
+ if (pt.target != nullptr && pt.adhoc () && pt.target->is_a<fsdir> ())
+ dir = pt.target;
+ }
+
for (const element& e: prereqs_)
{
// While it would be nice to avoid copying here, the semantics of
@@ -393,7 +500,7 @@ namespace build2
const target& pt (search (t, move (n), *s, &e.type));
- if (clean && !pt.in (*bs.root_scope ()))
+ if (&pt == dir || (clean && !pt.in (*bs.root_scope ())))
continue;
// @@ TODO: it could be handy to mark a prerequisite (e.g., a tool)
diff --git a/libbuild2/adhoc-rule-regex-pattern.hxx b/libbuild2/adhoc-rule-regex-pattern.hxx
index 597f30d..9cb7874 100644
--- a/libbuild2/adhoc-rule-regex-pattern.hxx
+++ b/libbuild2/adhoc-rule-regex-pattern.hxx
@@ -32,10 +32,10 @@ namespace build2
names&&, const location&);
virtual bool
- match (action, target&, const string&, match_extra&) const override;
+ match (action, const target&, const string&, match_extra&) const override;
virtual void
- apply_adhoc_members (action, target&,
+ apply_group_members (action, target&,
const scope&,
match_extra&) const override;
diff --git a/libbuild2/algorithm.cxx b/libbuild2/algorithm.cxx
index 355e633..62c500d 100644
--- a/libbuild2/algorithm.cxx
+++ b/libbuild2/algorithm.cxx
@@ -54,29 +54,45 @@ namespace build2
const target&
search (const target& t, const prerequisite_key& pk)
{
- assert (t.ctx.phase == run_phase::match);
+ context& ctx (t.ctx);
+
+ assert (ctx.phase == run_phase::match);
// If this is a project-qualified prerequisite, then this is import's
- // business.
+ // business (phase 2).
//
if (pk.proj)
- return import (t.ctx, pk);
+ return import2 (ctx, pk);
- if (const target* pt = pk.tk.type->search (t, pk))
+ if (const target* pt = pk.tk.type->search (ctx, &t, pk))
return *pt;
- return create_new_target (t.ctx, pk);
+ if (pk.tk.out->empty ())
+ return create_new_target (ctx, pk);
+
+ // If this is triggered, then you are probably not passing scope to
+ // search() (which leads to search_existing_file() being skipped).
+ //
+ fail << "no existing source file for prerequisite " << pk << endf;
}
pair<target&, ulock>
search_locked (const target& t, const prerequisite_key& pk)
{
- assert (t.ctx.phase == run_phase::match && !pk.proj);
+ context& ctx (t.ctx);
- if (const target* pt = pk.tk.type->search (t, pk))
+ assert (ctx.phase == run_phase::match && !pk.proj);
+
+ if (const target* pt = pk.tk.type->search (ctx, &t, pk))
return {const_cast<target&> (*pt), ulock ()};
- return create_new_target_locked (t.ctx, pk);
+ if (pk.tk.out->empty ())
+ return create_new_target_locked (ctx, pk);
+
+ // If this is triggered, then you are probably not passing scope to
+ // search() (which leads to search_existing_file() being skipped).
+ //
+ fail << "no existing source file for prerequisite " << pk << endf;
}
const target*
@@ -84,7 +100,7 @@ namespace build2
{
return pk.proj
? import_existing (ctx, pk)
- : search_existing_target (ctx, pk);
+ : pk.tk.type->search (ctx, nullptr /* existing */, pk);
}
const target&
@@ -92,7 +108,7 @@ namespace build2
{
assert (ctx.phase == run_phase::load || ctx.phase == run_phase::match);
- if (const target* pt = search_existing_target (ctx, pk))
+ if (const target* pt = search_existing_target (ctx, pk, true /*out_only*/))
return *pt;
return create_new_target (ctx, pk);
@@ -103,14 +119,14 @@ namespace build2
{
assert (ctx.phase == run_phase::load || ctx.phase == run_phase::match);
- if (const target* pt = search_existing_target (ctx, pk))
+ if (const target* pt = search_existing_target (ctx, pk, true /*out_only*/))
return {const_cast<target&> (*pt), ulock ()};
return create_new_target_locked (ctx, pk);
}
const target&
- search (const target& t, name n, const scope& s, const target_type* tt)
+ search (const target& t, name&& n, const scope& s, const target_type* tt)
{
assert (t.ctx.phase == run_phase::match);
@@ -164,16 +180,12 @@ namespace build2
}
bool q (cn.qualified ());
-
- // @@ OUT: for now we assume the prerequisite's out is undetermined.
- // Would need to pass a pair of names.
- //
prerequisite_key pk {
n.proj, {tt, &n.dir, q ? &empty_dir_path : &out, &n.value, ext}, &s};
return q
? import_existing (s.ctx, pk)
- : search_existing_target (s.ctx, pk);
+ : tt->search (s.ctx, nullptr /* existing */, pk);
}
const target*
@@ -220,8 +232,14 @@ namespace build2
// If the work_queue is absent, then we don't wait.
//
+ // While already applied or executed targets are normally not locked, if
+ // options contain any bits that are not already in cur_options, then the
+ // target is locked even in these states.
+ //
target_lock
- lock_impl (action a, const target& ct, optional<scheduler::work_queue> wq)
+ lock_impl (action a, const target& ct,
+ optional<scheduler::work_queue> wq,
+ uint64_t options)
{
context& ctx (ct.ctx);
@@ -236,7 +254,8 @@ namespace build2
size_t appl (b + target::offset_applied);
size_t busy (b + target::offset_busy);
- atomic_count& task_count (ct[a].task_count);
+ const target::opstate& cs (ct[a]);
+ atomic_count& task_count (cs.task_count);
while (!task_count.compare_exchange_strong (
e,
@@ -256,7 +275,7 @@ namespace build2
fail << "dependency cycle detected involving target " << ct;
if (!wq)
- return target_lock {a, nullptr, e - b};
+ return target_lock {a, nullptr, e - b, false};
// We also unlock the phase for the duration of the wait. Why?
// Consider this scenario: we are trying to match a dir{} target whose
@@ -265,14 +284,20 @@ namespace build2
// to switch the phase to load. Which would result in a deadlock
// unless we release the phase.
//
- phase_unlock u (ct.ctx, true /* unlock */, true /* delay */);
- e = ctx.sched.wait (busy - 1, task_count, u, *wq);
+ phase_unlock u (ct.ctx, true /* delay */);
+ e = ctx.sched->wait (busy - 1, task_count, u, *wq);
}
- // We don't lock already applied or executed targets.
+ // We don't lock already applied or executed targets unless there
+ // are new options.
+ //
+ // Note: we don't have the lock yet so we must use atomic cur_options.
+ // We also have to re-check this once we've grabbed the lock.
//
- if (e >= appl)
- return target_lock {a, nullptr, e - b};
+ if (e >= appl &&
+ (cs.match_extra.cur_options_.load (memory_order_relaxed) & options)
+ == options)
+ return target_lock {a, nullptr, e - b, false};
}
// We now have the lock. Analyze the old value and decide what to do.
@@ -281,24 +306,41 @@ namespace build2
target::opstate& s (t[a]);
size_t offset;
- if (e <= b)
+ bool first;
+ if ((first = (e <= b)))
{
// First lock for this operation.
//
+ // Note that we use 0 match_extra::cur_options_ as an indication of not
+ // being applied yet. In particular, in the match phase, this is used to
+ // distinguish between the "busy because not applied yet" and "busy
+ // because relocked to reapply match options" cases. See
+ // target::matched() for details.
+ //
s.rule = nullptr;
s.dependents.store (0, memory_order_release);
+ s.match_extra.cur_options_.store (0, memory_order_relaxed);
offset = target::offset_touched;
}
else
{
+ // Re-check the options if already applied or worse.
+ //
+ if (e >= appl && (s.match_extra.cur_options & options) == options)
+ {
+ // Essentially unlock_impl().
+ //
+ task_count.store (e, memory_order_release);
+ ctx.sched->resume (task_count);
+
+ return target_lock {a, nullptr, e - b, false};
+ }
+
offset = e - b;
- assert (offset == target::offset_touched ||
- offset == target::offset_tried ||
- offset == target::offset_matched);
}
- return target_lock {a, &t, offset};
+ return target_lock {a, &t, offset, first};
}
void
@@ -314,7 +356,7 @@ namespace build2
// this target.
//
task_count.store (offset + ctx.count_base (), memory_order_release);
- ctx.sched.resume (task_count);
+ ctx.sched->resume (task_count);
}
target&
@@ -322,7 +364,8 @@ namespace build2
const target_type& tt,
dir_path dir,
dir_path out,
- string n)
+ string n,
+ optional<string> ext)
{
tracer trace ("add_adhoc_member");
@@ -332,113 +375,347 @@ namespace build2
if (*mp != nullptr) // Might already be there.
return **mp;
- target* m (nullptr);
+ pair<target&, ulock> r (
+ t.ctx.targets.insert_locked (tt,
+ move (dir),
+ move (out),
+ move (n),
+ move (ext),
+ target_decl::implied,
+ trace,
+ true /* skip_find */));
+
+ target& m (r.first);
+
+ if (!r.second)
+ fail << "target " << m << " already exists and cannot be made "
+ << "ad hoc member of group " << t;
+
+ m.group = &t;
+ *mp = &m;
+
+ return m;
+ };
+
+ pair<target&, bool>
+ add_adhoc_member_identity (target& t,
+ const target_type& tt,
+ dir_path dir,
+ dir_path out,
+ string n,
+ optional<string> ext,
+ const location& loc)
+ {
+ // NOTE: see similar code in parser::enter_adhoc_members().
+
+ tracer trace ("add_adhoc_member_identity");
+
+ pair<target&, ulock> r (
+ t.ctx.targets.insert_locked (tt,
+ move (dir),
+ move (out),
+ move (n),
+ move (ext),
+ target_decl::implied,
+ trace,
+ true /* skip_find */));
+ target& m (r.first);
+
+ // Add as an ad hoc member at the end of the chain skipping duplicates.
+ //
+ const_ptr<target>* mp (&t.adhoc_member);
+ for (; *mp != nullptr; mp = &(*mp)->adhoc_member)
{
- pair<target&, ulock> r (
- t.ctx.targets.insert_locked (tt,
- move (dir),
- move (out),
- move (n),
- nullopt /* ext */,
- target_decl::implied,
- trace,
- true /* skip_find */));
+ if (*mp == &m)
+ return {m, false};
+ }
+
+ if (!r.second)
+ fail (loc) << "target " << m << " already exists and cannot be made "
+ << "ad hoc member of group " << t;
+
+ m.group = &t;
+ *mp = &m;
- if (r.second) // Inserted.
+ return {m, true};
+ }
+
+ static bool
+ trace_target (const target& t, const vector<name>& ns)
+ {
+ for (const name& n: ns)
+ {
+ if (n.untyped () || n.qualified () || n.pattern)
+ fail << "unsupported trace target name '" << n << "'" <<
+ info << "unqualified, typed, non-pattern name expected";
+
+ if (!n.dir.empty ())
{
- m = &r.first;
- m->group = &t;
+ if (n.dir.relative () || !n.dir.normalized ())
+ fail << "absolute and normalized trace target directory expected";
+
+ if (t.dir != n.dir)
+ continue;
}
- }
- assert (m != nullptr);
- *mp = m;
+ if (n.type == t.type ().name && n.value == t.name)
+ return true;
+ }
- return *m;
- };
+ return false;
+ }
- // Return the matching rule or NULL if no match and try_match is true.
- //
- const rule_match*
- match_rule (action a, target& t, const rule* skip, bool try_match)
+ void
+ set_rule_trace (target_lock& l, const rule_match* rm)
{
- const scope& bs (t.base_scope ());
+ action a (l.action);
+ target& t (*l.target);
- // Match rules in project environment.
+ // Note: see similar code in execute_impl() for execute.
//
- auto_project_env penv;
- if (const scope* rs = bs.root_scope ())
- penv = auto_project_env (*rs);
+ if (trace_target (t, *t.ctx.trace_match))
+ {
+ diag_record dr (info);
- match_extra& me (t[a].match_extra);
+ dr << "matching to " << diag_do (a, t);
- // First check for an ad hoc recipe.
- //
- // Note that a fallback recipe is preferred over a non-fallback rule.
- //
- if (!t.adhoc_recipes.empty ())
- {
- auto df = make_diag_frame (
- [a, &t](const diag_record& dr)
+ if (rm != nullptr)
+ {
+ const rule& r (rm->second);
+
+ if (const adhoc_rule* ar = dynamic_cast<const adhoc_rule*> (&r))
{
- if (verb != 0)
- dr << info << "while matching ad hoc recipe to " << diag_do (a, t);
- });
+ dr << info (ar->loc);
+
+ if (ar->pattern != nullptr)
+ dr << "using ad hoc pattern rule ";
+ else
+ dr << "using ad hoc recipe ";
+ }
+ else
+ dr << info << "using rule ";
- auto match = [a, &t, &me] (const adhoc_rule& r, bool fallback) -> bool
+ dr << rm->first;
+ }
+ else
+ dr << info << "using directly-assigned recipe";
+ }
+
+ t[a].rule = rm;
+ }
+
+ // Note: not static since also called by rule::sub_match().
+ //
+ const rule_match*
+ match_adhoc_recipe (action a, target& t, match_extra& me)
+ {
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
{
- me.init (fallback);
+ if (verb != 0)
+ dr << info << "while matching ad hoc recipe to " << diag_do (a, t);
+ });
- if (auto* f = (a.outer ()
- ? t.ctx.current_outer_oif
- : t.ctx.current_inner_oif)->adhoc_match)
- return f (r, a, t, string () /* hint */, me);
- else
- return r.match (a, t, string () /* hint */, me);
- };
+ auto match = [a, &t, &me] (const adhoc_rule& r, bool fallback) -> bool
+ {
+ me.reinit (fallback);
- // The action could be Y-for-X while the ad hoc recipes are always for
- // X. So strip the Y-for part for comparison (but not for the match()
- // calls; see below for the hairy inner/outer semantics details).
- //
- action ca (a.inner ()
- ? a
- : action (a.meta_operation (), a.outer_operation ()));
+ if (auto* f = (a.outer ()
+ ? t.ctx.current_outer_oif
+ : t.ctx.current_inner_oif)->adhoc_match)
+ return f (r, a, t, string () /* hint */, me);
+ else
+ return r.match (a, t, string () /* hint */, me);
+ };
+
+ // The action could be Y-for-X while the ad hoc recipes are always for
+ // X. So strip the Y-for part for comparison (but not for the match()
+ // calls; see below for the hairy inner/outer semantics details).
+ //
+ action ca (a.inner ()
+ ? a
+ : action (a.meta_operation (), a.outer_operation ()));
+ // If returned rule_match is NULL, then the second half indicates whether
+ // the rule was found (but did not match).
+ //
+ auto find_match = [&t, &match] (action ca) -> pair<const rule_match*, bool>
+ {
+ // Note that there can be at most one recipe for any action.
+ //
auto b (t.adhoc_recipes.begin ()), e (t.adhoc_recipes.end ());
auto i (find_if (
b, e,
- [&match, ca] (const shared_ptr<adhoc_rule>& r)
+ [ca] (const shared_ptr<adhoc_rule>& r)
{
auto& as (r->actions);
- return (find (as.begin (), as.end (), ca) != as.end () &&
- match (*r, false));
+ return find (as.begin (), as.end (), ca) != as.end ();
}));
- if (i == e)
+ bool f (i != e);
+ if (f)
+ {
+ if (!match (**i, false /* fallback */))
+ i = e;
+ }
+ else
{
// See if we have a fallback implementation.
//
// See the adhoc_rule::reverse_fallback() documentation for details on
// what's going on here.
//
+ // Note that it feels natural not to look for a fallback if a custom
+ // recipe was provided but did not match.
+ //
+ const target_type& tt (t.type ());
i = find_if (
b, e,
- [&match, ca, &t] (const shared_ptr<adhoc_rule>& r)
+ [ca, &tt] (const shared_ptr<adhoc_rule>& r)
{
- auto& as (r->actions);
-
- // Note that the rule could be there but not match (see above),
- // thus this extra check.
+ // Only the rule that provides the "forward" action can provide
+ // "reverse", so there can be at most one such rule.
//
- return (find (as.begin (), as.end (), ca) == as.end () &&
- r->reverse_fallback (ca, t.type ()) &&
- match (*r, true));
+ return r->reverse_fallback (ca, tt);
});
+
+ f = (i != e);
+ if (f)
+ {
+ if (!match (**i, true /* fallback */))
+ i = e;
+ }
+ }
+
+ return pair<const rule_match*, bool> (
+ i != e ? &(*i)->rule_match : nullptr,
+ f);
+ };
+
+ pair<const rule_match*, bool> r (find_match (ca));
+
+ // Provide the "add dist_* and configure_* actions for every perform_*
+ // action unless there is a custom one" semantics (see the equivalent ad
+ // hoc rule registration code in the parser for background).
+ //
+ // Note that handling this in the parser by adding the extra actions is
+ // difficult because we store recipe actions in the recipe itself (
+ // adhoc_rule::actions) and a recipe could be shared among multiple
+ // targets, some of which may provide a "custom one" as another recipe. On
+ // the other hand, handling it here is relatively straightforward.
+ //
+ if (r.first == nullptr && !r.second)
+ {
+ meta_operation_id mo (ca.meta_operation ());
+ if (mo == configure_id || mo == dist_id)
+ {
+ action pa (perform_id, ca.operation ());
+ r = find_match (pa);
+ }
+ }
+
+ return r.first;
+ }
+
+ // Return the matching rule or NULL if no match and try_match is true.
+ //
+ const rule_match*
+ match_rule_impl (action a, target& t,
+ uint64_t options,
+ const rule* skip,
+ bool try_match,
+ match_extra* pme)
+ {
+ using fallback_rule = adhoc_rule_pattern::fallback_rule;
+
+ auto adhoc_rule_match = [] (const rule_match& r)
+ {
+ return dynamic_cast<const adhoc_rule*> (&r.second.get ());
+ };
+
+ auto fallback_rule_match = [] (const rule_match& r)
+ {
+ return dynamic_cast<const fallback_rule*> (&r.second.get ());
+ };
+
+ // Note: we copy the options value to me.new_options after successfully
+ // matching the rule to make sure rule::match() implementations don't rely
+ // on it.
+ //
+ match_extra& me (pme == nullptr ? t[a].match_extra : *pme);
+
+ if (const target* g = t.group)
+ {
+ // If this is a group with dynamic members, then match it with the
+ // group's rule automatically. See dyndep_rule::inject_group_member()
+ // for background.
+ //
+ if ((g->type ().flags & target_type::flag::dyn_members) ==
+ target_type::flag::dyn_members)
+ {
+ if (g->matched (a, memory_order_acquire))
+ {
+ const rule_match* r (g->state[a].rule);
+ assert (r != nullptr); // Shouldn't happen with dyn_members.
+
+ me.new_options = options;
+ return r;
+ }
+
+ // Assume static member and fall through.
}
- if (i != e)
- return &(*i)->rule_match;
+ // If this is a member of group-based target, then first try to find a
+ // matching ad hoc recipe/rule by matching (to an ad hoc recipe/rule)
+ // the group but applying to the member. See adhoc_rule::match() for
+ // background, including for why const_cast should be safe.
+ //
+ // To put it another way, if a group is matched by an ad hoc
+ // recipe/rule, then we want all the member to be matched to the same
+ // recipe/rule.
+ //
+ // Note that such a group is dyn_members so we would have tried the
+ // "already matched" case above.
+ //
+ if (g->is_a<group> ())
+ {
+ // We cannot init match_extra from the target if it's unlocked so use
+ // a temporary (it shouldn't be modified if unlocked).
+ //
+ match_extra gme (false /* locked */);
+ if (const rule_match* r = match_rule_impl (a, const_cast<target&> (*g),
+ 0 /* options */,
+ skip,
+ true /* try_match */,
+ &gme))
+ {
+ me.new_options = options;
+ return r;
+ }
+
+ // Fall through to normal match of the member.
+ }
+ }
+
+ const scope& bs (t.base_scope ());
+
+ // Match rules in project environment.
+ //
+ auto_project_env penv;
+ if (const scope* rs = bs.root_scope ())
+ penv = auto_project_env (*rs);
+
+ // First check for an ad hoc recipe.
+ //
+ // Note that a fallback recipe is preferred over a non-fallback rule.
+ //
+ if (!t.adhoc_recipes.empty ())
+ {
+ if (const rule_match* r = match_adhoc_recipe (a, t, me))
+ {
+ me.new_options = options;
+ return r;
+ }
}
// If this is an outer operation (Y-for-X), then we look for rules
@@ -534,8 +811,6 @@ namespace build2
// reverse_fallback() rather than it returning (a list) of
// reverse actions, which would be necessary to register them.
//
- using fallback_rule = adhoc_rule_pattern::fallback_rule;
-
auto find_fallback = [mo, o, tt] (const fallback_rule& fr)
-> const rule_match*
{
@@ -548,21 +823,27 @@ namespace build2
if (oi == 0)
{
- if (auto* fr =
- dynamic_cast<const fallback_rule*> (&r->second.get ()))
+ if (const fallback_rule* fr = fallback_rule_match (*r))
{
if ((r = find_fallback (*fr)) == nullptr)
continue;
}
}
+ // Skip non-ad hoc rules if the target is not locked (see above;
+ // note that in this case match_extra is a temporary which we
+ // can reinit).
+ //
+ if (!me.locked && !adhoc_rule_match (*r))
+ continue;
+
const string& n (r->first);
const rule& ru (r->second);
if (&ru == skip)
continue;
- me.init (oi == 0 /* fallback */);
+ me.reinit (oi == 0 /* fallback */);
{
auto df = make_diag_frame (
[a, &t, &n](const diag_record& dr)
@@ -587,14 +868,16 @@ namespace build2
if (oi == 0)
{
- if (auto* fr =
- dynamic_cast<const fallback_rule*> (&r1->second.get ()))
+ if (const fallback_rule* fr = fallback_rule_match (*r1))
{
if ((r1 = find_fallback (*fr)) == nullptr)
continue;
}
}
+ if (!me.locked && !adhoc_rule_match (*r1))
+ continue;
+
const string& n1 (r1->first);
const rule& ru1 (r1->second);
@@ -613,8 +896,7 @@ namespace build2
//
// @@ Can't we temporarily swap things out in target?
//
- match_extra me1;
- me1.init (oi == 0);
+ match_extra me1 (me.locked, oi == 0 /* fallback */);
if (!ru1.match (a, t, *hint, me1))
continue;
}
@@ -630,7 +912,10 @@ namespace build2
}
if (!ambig)
+ {
+ me.new_options = options;
return r;
+ }
else
dr << info << "use rule hint to disambiguate this match";
}
@@ -743,66 +1028,267 @@ namespace build2
recipe re (ar != nullptr ? f (*ar, a, t, me) : ru.apply (a, t, me));
- me.free ();
+ me.free (); // Note: cur_options are still in use.
+ assert (me.cur_options != 0); // Match options cannot be 0 after apply().
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
return re;
}
+ static void
+ apply_posthoc_impl (
+ action a, target& t,
+ const pair<const string, reference_wrapper<const rule>>& m,
+ context::posthoc_target& pt)
+ {
+ const scope& bs (t.base_scope ());
+
+ // Apply rules in project environment.
+ //
+ auto_project_env penv;
+ if (const scope* rs = bs.root_scope ())
+ penv = auto_project_env (*rs);
+
+ const rule& ru (m.second);
+ match_extra& me (t[a].match_extra);
+ me.posthoc_prerequisite_targets = &pt.prerequisite_targets;
+
+ auto df = make_diag_frame (
+ [a, &t, &m](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while applying rule " << m.first << " to "
+ << diag_do (a, t) << " for post hoc prerequisites";
+ });
+
+ // Note: for now no adhoc_apply_posthoc().
+ //
+ ru.apply_posthoc (a, t, me);
+ }
+
+ static void
+ reapply_impl (action a,
+ target& t,
+ const pair<const string, reference_wrapper<const rule>>& m)
+ {
+ const scope& bs (t.base_scope ());
+
+ // Reapply rules in project environment.
+ //
+ auto_project_env penv;
+ if (const scope* rs = bs.root_scope ())
+ penv = auto_project_env (*rs);
+
+ const rule& ru (m.second);
+ match_extra& me (t[a].match_extra);
+ // Note: me.posthoc_prerequisite_targets carried over.
+
+ auto df = make_diag_frame (
+ [a, &t, &m](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while reapplying rule " << m.first << " to "
+ << diag_do (a, t);
+ });
+
+ // Note: for now no adhoc_reapply().
+ //
+ ru.reapply (a, t, me);
+ assert (me.cur_options != 0); // Match options cannot be 0 after reapply().
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
+ }
+
+ // If anything goes wrong, set target state to failed and return nullopt.
+ // Otherwise return the pointer to the new posthoc_target entry if any post
+ // hoc prerequisites were present or NULL otherwise. Note that the returned
+ // entry is stable (because we use a list) and should only be accessed
+ // during the match phase if the holding the target lock.
+ //
+ // Note: must be called while holding target_lock.
+ //
+ static optional<context::posthoc_target*>
+ match_posthoc (action a, target& t)
+ {
+ // The plan is to, while holding the lock, search and collect all the post
+ // hoc prerequisited and add an entry to context::current_posthoc_targets.
+ // The actual matching happens as post-pass in the meta-operation's match
+ // function.
+ //
+ // While it may seem like we could do matching here by unlocking (or
+ // unstacking) the lock for this target, that will only work for simple
+ // cases. In particular, consider:
+ //
+ // lib{foo}: ...
+ // lib{plug}: ... lib{foo}
+ // libs{foo}: libs{plug}: include = posthoc
+ //
+ // The chain we will end up with:
+ //
+ // lib{foo}->libs{foo}=>libs{plug}->lib{foo}
+ //
+ // This will trip up the cycle detection for group lib{foo}, not for
+ // libs{foo}.
+ //
+ // In the end, matching (and execution) "inline" (i.e., as we match/
+ // execute the corresponding target) appears to be unworkable in the
+ // face of cycles.
+ //
+ // Note also that this delayed match also helps with allowing the rule to
+ // adjust match options of post hoc prerequisites without needing the
+ // rematch support (see match_extra::posthoc_prerequisites).
+ //
+ // @@ Anything we need to do for group members (see through)? Feels quite
+ // far-fetched.
+ //
+ using posthoc_target = context::posthoc_target;
+ using posthoc_prerequisite_target = posthoc_target::prerequisite_target;
+
+ vector<posthoc_prerequisite_target> pts;
+ try
+ {
+ for (const prerequisite& p: group_prerequisites (t))
+ {
+ // Note that we have to ignore any operation-specific values for
+ // non-posthoc prerequisites. See include_impl() for details.
+ //
+ lookup l;
+ if (include (a, t, p, &l) == include_type::posthoc)
+ {
+ if (l)
+ {
+ const string& v (cast<string> (l));
+
+ // The only valid values are true and false and the latter would
+ // have been translated to include_type::exclude.
+ //
+ if (v != "true")
+ {
+ fail << "unrecognized " << *l.var << " variable value "
+ << "'" << v << "' specified for prerequisite " << p;
+ }
+ }
+
+ pts.push_back (
+ posthoc_prerequisite_target {
+ &search (t, p), // May fail.
+ match_extra::all_options});
+ }
+ }
+ }
+ catch (const failed&)
+ {
+ t.state[a].state = target_state::failed;
+ return nullopt;
+ }
+
+ if (!pts.empty ())
+ {
+ context& ctx (t.ctx);
+
+ mlock l (ctx.current_posthoc_targets_mutex);
+ ctx.current_posthoc_targets.push_back (posthoc_target {a, t, move (pts)});
+ return &ctx.current_posthoc_targets.back (); // Stable.
+ }
+
+ return nullptr;
+ }
+
// If step is true then perform only one step of the match/apply sequence.
//
// If try_match is true, then indicate whether there is a rule match with
// the first half of the result.
//
static pair<bool, target_state>
- match_impl (target_lock& l,
- bool step = false,
- bool try_match = false)
+ match_impl_impl (target_lock& l,
+ uint64_t options,
+ bool step = false,
+ bool try_match = false)
{
+ // With regards to options, the semantics that we need to achieve for each
+ // target::offeset_*:
+ //
+ // tried -- nothing to do (no match)
+ // touched -- set to new_options
+ // matched -- add to new_options
+ // applied -- reapply if any new options
+ // executed -- check and fail if any new options
+ // busy -- postpone until *_complete() call
+ //
+ // Note that if options is 0 (see resolve_{members,group}_impl()), then
+ // all this can be skipped.
+
assert (l.target != nullptr);
action a (l.action);
target& t (*l.target);
target::opstate& s (t[a]);
- // Intercept and handle matching an ad hoc group member.
- //
- if (t.adhoc_group_member ())
+ try
{
- assert (!step);
+ // Intercept and handle matching an ad hoc group member.
+ //
+ if (t.adhoc_group_member ())
+ {
+ // It feels natural to "convert" this call to the one for the group,
+ // including the try_match part. Semantically, we want to achieve the
+ // following:
+ //
+ // [try_]match (a, g);
+ // match_recipe (l, group_recipe);
+ //
+ // Currently, ad hoc group members cannot have options. An alternative
+ // semantics could be to call the goup's rule to translate member
+ // options to group options and then (re)match the group with that.
+ // The implementation of this semantics could look like this:
+ //
+ // 1. Lock the group.
+ // 2. If not already offset_matched, do one step to get the rule.
+ // 3. Call the rule to translate options.
+ // 4. Continue matching the group passing the translated options.
+ // 5. Keep track of member options in member's cur_options to handle
+ // member rematches (if already offset_{applied,executed}).
+ //
+ // Note: see also similar semantics but for explicit groups in
+ // adhoc-rule-*.cxx.
- const target& g (*t.group);
+ assert (!step && options == match_extra::all_options);
- // It feels natural to "convert" this call to the one for the group,
- // including the try_match part. Semantically, we want to achieve the
- // following:
- //
- // [try_]match (a, g);
- // match_recipe (l, group_recipe);
- //
- auto df = make_diag_frame (
- [a, &t](const diag_record& dr)
- {
- if (verb != 0)
- dr << info << "while matching group rule to " << diag_do (a, t);
- });
+ const target& g (*t.group);
+
+ // What should we do with options? After some rumination it fells most
+ // natural to treat options for the group and for its ad hoc member as
+ // the same entity ... or not.
+ //
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while matching group rule to " << diag_do (a, t);
+ });
- pair<bool, target_state> r (match_impl (a, g, 0, nullptr, try_match));
+ pair<bool, target_state> r (
+ match_impl (a, g, 0 /* options */, 0, nullptr, try_match));
- if (r.first)
- {
- if (r.second != target_state::failed)
+ if (r.first)
{
- match_inc_dependents (a, g);
- match_recipe (l, group_recipe);
+ if (r.second != target_state::failed)
+ {
+ // Note: in particular, passing all_options makes sure we will
+ // never re-lock this member if already applied/executed.
+ //
+ match_inc_dependents (a, g);
+ match_recipe (l, group_recipe, match_extra::all_options);
+
+ // Note: no need to call match_posthoc() since an ad hoc member
+ // has no own prerequisites and the group's ones will be matched
+ // by the group.
+ }
}
- }
- else
- l.offset = target::offset_tried;
+ else
+ l.offset = target::offset_tried;
- return r; // Group state (must be consistent with matched_state()).
- }
+ return r; // Group state (must be consistent with matched_state()).
+ }
- try
- {
// Continue from where the target has been left off.
//
switch (l.offset)
@@ -827,7 +1313,8 @@ namespace build2
//
clear_target (a, t);
- const rule_match* r (match_rule (a, t, nullptr, try_match));
+ const rule_match* r (
+ match_rule_impl (a, t, options, nullptr, try_match));
assert (l.offset != target::offset_tried); // Should have failed.
@@ -837,7 +1324,7 @@ namespace build2
return make_pair (false, target_state::unknown);
}
- s.rule = r;
+ set_rule (l, r);
l.offset = target::offset_matched;
if (step)
@@ -849,25 +1336,86 @@ namespace build2
// Fall through.
case target::offset_matched:
{
+ // Add any new options.
+ //
+ s.match_extra.new_options |= options;
+
// Apply.
//
set_recipe (l, apply_impl (a, t, *s.rule));
l.offset = target::offset_applied;
+
+ if (t.has_group_prerequisites ()) // Ok since already matched.
+ {
+ if (optional<context::posthoc_target*> p = match_posthoc (a, t))
+ {
+ if (*p != nullptr)
+ {
+ // It would have been more elegant to do this before calling
+ // apply_impl() and then expose the post hoc prerequisites to
+ // apply(). The problem is the group may not be resolved until
+ // the call to apply(). And so we resort to the separate
+ // apply_posthoc() function.
+ //
+ apply_posthoc_impl (a, t, *s.rule, **p);
+ }
+ }
+ else
+ s.state = target_state::failed;
+ }
+
break;
}
+ case target::offset_applied:
+ {
+ // Reapply if any new options.
+ //
+ match_extra& me (s.match_extra);
+ me.new_options = options & ~me.cur_options; // Clear existing.
+ assert (me.new_options != 0); // Otherwise should not have locked.
+
+ // Feels like this can only be a logic bug since to end up with a
+ // subset of options requires a rule (see match_extra for details).
+ //
+ assert (s.rule != nullptr);
+
+ reapply_impl (a, t, *s.rule);
+ break;
+ }
+ case target::offset_executed:
+ {
+ // Diagnose new options after execute.
+ //
+ match_extra& me (s.match_extra);
+ assert ((me.cur_options & options) != options); // Otherwise no lock.
+
+ fail << "change of match options after " << diag_do (a, t)
+ << " has been executed" <<
+ info << "executed options 0x" << hex << me.cur_options <<
+ info << "requested options 0x" << hex << options << endf;
+ }
default:
assert (false);
}
}
catch (const failed&)
{
+ s.state = target_state::failed;
+ l.offset = target::offset_applied;
+
+ // Make sure we don't relock a failed target.
+ //
+ match_extra& me (s.match_extra);
+ me.cur_options = match_extra::all_options;
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
+ }
+
+ if (s.state == target_state::failed)
+ {
// As a sanity measure clear the target data since it can be incomplete
// or invalid (mark()/unmark() should give you some ideas).
//
clear_target (a, t);
-
- s.state = target_state::failed;
- l.offset = target::offset_applied;
}
return make_pair (true, s.state);
@@ -877,10 +1425,9 @@ namespace build2
// the first half of the result.
//
pair<bool, target_state>
- match_impl (action a,
- const target& ct,
- size_t start_count,
- atomic_count* task_count,
+ match_impl (action a, const target& ct,
+ uint64_t options,
+ size_t start_count, atomic_count* task_count,
bool try_match)
{
// If we are blocking then work our own queue one task at a time. The
@@ -902,17 +1449,16 @@ namespace build2
ct,
task_count == nullptr
? optional<scheduler::work_queue> (scheduler::work_none)
- : nullopt));
+ : nullopt,
+ options));
if (l.target != nullptr)
{
- assert (l.offset < target::offset_applied); // Shouldn't lock otherwise.
-
if (try_match && l.offset == target::offset_tried)
return make_pair (false, target_state::unknown);
if (task_count == nullptr)
- return match_impl (l, false /* step */, try_match);
+ return match_impl_impl (l, options, false /* step */, try_match);
// Pass "disassembled" lock since the scheduler queue doesn't support
// task destruction.
@@ -922,12 +1468,18 @@ namespace build2
// Also pass our diagnostics and lock stacks (this is safe since we
// expect the caller to wait for completion before unwinding its stack).
//
- if (ct.ctx.sched.async (
+ // Note: pack captures and arguments a bit to reduce the storage space
+ // requrements.
+ //
+ bool first (ld.first);
+
+ if (ct.ctx.sched->async (
start_count,
*task_count,
- [a, try_match] (const diag_frame* ds,
- const target_lock* ls,
- target& t, size_t offset)
+ [a, try_match, first] (const diag_frame* ds,
+ const target_lock* ls,
+ target& t, size_t offset,
+ uint64_t options)
{
// Switch to caller's diag and lock stacks.
//
@@ -938,17 +1490,18 @@ namespace build2
{
phase_lock pl (t.ctx, run_phase::match); // Throws.
{
- target_lock l {a, &t, offset}; // Reassemble.
- match_impl (l, false /* step */, try_match);
- // Unlock within the match phase.
+ // Note: target_lock must be unlocked within the match phase.
+ //
+ target_lock l {a, &t, offset, first}; // Reassemble.
+ match_impl_impl (l, options, false /* step */, try_match);
}
}
catch (const failed&) {} // Phase lock failure.
},
diag_frame::stack (),
target_lock::stack (),
- ref (*ld.target),
- ld.offset))
+ ref (*ld.target), ld.offset,
+ options))
return make_pair (true, target_state::postponed); // Queued.
// Matched synchronously, fall through.
@@ -966,9 +1519,39 @@ namespace build2
return ct.try_matched_state (a, false);
}
+ void
+ match_only_sync (action a, const target& t, uint64_t options)
+ {
+ assert (t.ctx.phase == run_phase::match);
+
+ target_lock l (lock_impl (a, t, scheduler::work_none, options));
+
+ if (l.target != nullptr)
+ {
+ if (l.offset != target::offset_matched)
+ {
+ if (match_impl_impl (l,
+ options,
+ true /* step */).second == target_state::failed)
+ throw failed ();
+ }
+ else
+ {
+ // If the target is already matched, then we need to add any new
+ // options but not call apply() (thus cannot use match_impl_impl()).
+ //
+ (*l.target)[a].match_extra.new_options |= options;
+ }
+ }
+ }
+
+ // Note: lock is a reference to avoid the stacking overhead.
+ //
static group_view
- resolve_members_impl (action a, const target& g, target_lock l)
+ resolve_members_impl (action a, const target& g, target_lock&& l)
{
+ assert (a.inner ());
+
// Note that we will be unlocked if the target is already applied.
//
group_view r;
@@ -982,7 +1565,9 @@ namespace build2
{
// Match (locked).
//
- if (match_impl (l, true).second == target_state::failed)
+ if (match_impl_impl (l,
+ 0 /* options */,
+ true /* step */).second == target_state::failed)
throw failed ();
if ((r = g.group_members (a)).members != nullptr)
@@ -993,34 +1578,52 @@ namespace build2
// Fall through.
case target::offset_matched:
{
- // @@ Doing match without execute messes up our target_count. Does
- // not seem like it will be easy to fix (we don't know whether
- // someone else will execute this target).
- //
- // What if we always do match & execute together? After all,
- // if a group can be resolved in apply(), then it can be
- // resolved in match()! Feels a bit drastic.
- //
- // But, this won't be a problem if the target returns noop_recipe.
- // And perhaps it's correct to fail if it's not noop_recipe but
- // nobody executed it? Maybe not.
- //
- // Another option would be to have a count for such "matched but
- // may not be executed" targets and then make sure target_count
- // is less than that at the end. Though this definitelt makes it
- // less exact (since we can end up executed this target but not
- // some other). Maybe we can increment and decrement such targets
- // in a separate count (i.e., mark their recipe as special or some
- // such).
- //
-
// Apply (locked).
//
- if (match_impl (l, true).second == target_state::failed)
+ pair<bool, target_state> s (
+ match_impl_impl (l, 0 /* options */, true /* step */));
+
+ if (s.second == target_state::failed)
throw failed ();
if ((r = g.group_members (a)).members != nullptr)
+ {
+ // Doing match without execute messes up our target_count. There
+ // doesn't seem to be a clean way to solve this. Well, just always
+ // executing if we've done the match would have been clean but quite
+ // heavy-handed (it would be especially surprising if otherwise
+ // there is nothing else to do, which can happen, for example,
+ // during update-for-test when there are no tests to run).
+ //
+ // So our solution is as follows:
+ //
+ // 1. Keep track both of the targets that ended up in this situation
+ // (the target::resolve_counted flag) as well as their total
+ // count (the context::resolve_count member). Only do this if
+ // set_recipe() (called by match_impl()) would have incremented
+ // target_count.
+ //
+ // 2. If we happen to execute such a target (common case), then
+ // clear the flag and decrement the count.
+ //
+ // 3. When it's time to assert that target_count==0 (i.e., all the
+ // matched targets have been executed), check if resolve_count is
+ // 0. If it's not, then find every target with the flag set,
+ // pretend-execute it, and decrement both counts. See
+ // perform_execute() for further details on this step.
+ //
+ if (s.second != target_state::unchanged)
+ {
+ target::opstate& s (l.target->state[a]); // Inner.
+
+ if (!s.recipe_group_action)
+ {
+ s.resolve_counted = true;
+ g.ctx.resolve_count.fetch_add (1, memory_order_relaxed);
+ }
+ }
break;
+ }
// Unlock and to execute ...
//
@@ -1037,6 +1640,10 @@ namespace build2
// we would have already known the members list) and we really do need
// to execute it now.
//
+ // Note that while it might be tempting to decrement resolve_count
+ // here, there is no guarantee that we were the ones who have matched
+ // this target.
+ //
{
phase_switch ps (g.ctx, run_phase::execute);
execute_direct_sync (a, g);
@@ -1085,10 +1692,23 @@ namespace build2
return r;
}
+ // Note: lock is a reference to avoid the stacking overhead.
+ //
void
- resolve_group_impl (action, const target&, target_lock l)
+ resolve_group_impl (target_lock&& l)
{
- match_impl (l, true /* step */, true /* try_match */);
+ assert (l.action.inner ());
+
+ pair<bool, target_state> r (
+ match_impl_impl (l,
+ 0 /* options */,
+ true /* step */,
+ true /* try_match */));
+
+ l.unlock ();
+
+ if (r.first && r.second == target_state::failed)
+ throw failed ();
}
template <typename R, typename S>
@@ -1096,16 +1716,33 @@ namespace build2
match_prerequisite_range (action a, target& t,
R&& r,
const S& ms,
- const scope* s)
+ const scope* s,
+ bool search_only)
{
auto& pts (t.prerequisite_targets[a]);
+ size_t i (pts.size ()); // Index of the first to be added.
+
+ // Avoid duplicating fsdir{} that may have already been injected by
+ // inject_fsdir() (in which case it is expected to be first).
+ //
+ const target* dir (nullptr);
+ if (i != 0)
+ {
+ const prerequisite_target& pt (pts.front ());
+
+ if (pt.target != nullptr && pt.adhoc () && pt.target->is_a<fsdir> ())
+ dir = pt.target;
+ }
+
// Start asynchronous matching of prerequisites. Wait with unlocked phase
// to allow phase switching.
//
- wait_guard wg (t.ctx, t.ctx.count_busy (), t[a].task_count, true);
+ wait_guard wg (
+ search_only
+ ? wait_guard ()
+ : wait_guard (t.ctx, t.ctx.count_busy (), t[a].task_count, true));
- size_t i (pts.size ()); // Index of the first to be added.
for (auto&& p: forward<R> (r))
{
// Ignore excluded.
@@ -1119,13 +1756,20 @@ namespace build2
? ms (a, t, p, pi)
: prerequisite_target (&search (t, p), pi));
- if (pt.target == nullptr || (s != nullptr && !pt.target->in (*s)))
+ if (pt.target == nullptr ||
+ pt.target == dir ||
+ (s != nullptr && !pt.target->in (*s)))
continue;
- match_async (a, *pt.target, t.ctx.count_busy (), t[a].task_count);
+ if (!search_only)
+ match_async (a, *pt.target, t.ctx.count_busy (), t[a].task_count);
+
pts.push_back (move (pt));
}
+ if (search_only)
+ return;
+
wg.wait ();
// Finish matching all the targets that we have started.
@@ -1140,22 +1784,31 @@ namespace build2
void
match_prerequisites (action a, target& t,
const match_search& ms,
- const scope* s)
+ const scope* s,
+ bool search_only)
{
- match_prerequisite_range (a, t, group_prerequisites (t), ms, s);
+ match_prerequisite_range (a, t,
+ group_prerequisites (t),
+ ms,
+ s,
+ search_only);
}
void
match_prerequisite_members (action a, target& t,
const match_search_member& msm,
- const scope* s)
+ const scope* s,
+ bool search_only)
{
- match_prerequisite_range (a, t, group_prerequisite_members (a, t), msm, s);
+ match_prerequisite_range (a, t,
+ group_prerequisite_members (a, t),
+ msm,
+ s,
+ search_only);
}
- template <typename T>
void
- match_members (action a, target& t, T const* ts, size_t n)
+ match_members (action a, const target& t, const target* const* ts, size_t n)
{
// Pretty much identical to match_prerequisite_range() except we don't
// search.
@@ -1187,18 +1840,48 @@ namespace build2
}
}
- // Instantiate only for what we need.
- //
- template LIBBUILD2_SYMEXPORT void
- match_members<const target*> (action, target&,
- const target* const*, size_t);
+ void
+ match_members (action a,
+ const target& t,
+ prerequisite_targets& ts,
+ size_t s,
+ pair<uintptr_t, uintptr_t> imv)
+ {
+ size_t n (ts.size ());
+
+ wait_guard wg (t.ctx, t.ctx.count_busy (), t[a].task_count, true);
+
+ for (size_t i (s); i != n; ++i)
+ {
+ const prerequisite_target& pt (ts[i]);
+ const target* m (pt.target);
+
+ if (m == nullptr ||
+ marked (m) ||
+ (imv.first != 0 && (pt.include & imv.first) != imv.second))
+ continue;
+
+ match_async (a, *m, t.ctx.count_busy (), t[a].task_count);
+ }
+
+ wg.wait ();
- template LIBBUILD2_SYMEXPORT void
- match_members<prerequisite_target> (action, target&,
- prerequisite_target const*, size_t);
+ for (size_t i (s); i != n; ++i)
+ {
+ const prerequisite_target& pt (ts[i]);
+ const target* m (pt.target);
+
+ if (m == nullptr ||
+ marked (m) ||
+ (imv.first != 0 && (pt.include & imv.first) != imv.second))
+ continue;
+
+ match_complete (a, *m);
+ }
+ }
const fsdir*
- inject_fsdir (action a, target& t, bool parent)
+ inject_fsdir_impl (target& t, bool prereq, bool parent)
{
tracer trace ("inject_fsdir");
@@ -1219,6 +1902,7 @@ namespace build2
// subprojects (e.g., tests/).
//
const fsdir* r (nullptr);
+
if (rs != nullptr && !d.sub (rs->src_path ()))
{
l6 ([&]{trace << d << " for " << t;});
@@ -1227,7 +1911,7 @@ namespace build2
//
r = &search<fsdir> (t, d, dir_path (), string (), nullptr, nullptr);
}
- else
+ else if (prereq)
{
// See if one was mentioned explicitly.
//
@@ -1246,13 +1930,45 @@ namespace build2
}
}
+ return r;
+ }
+
+ const fsdir*
+ inject_fsdir (action a, target& t, bool match, bool prereq, bool parent)
+ {
+ auto& pts (t.prerequisite_targets[a]);
+
+ assert (!prereq || pts.empty ()); // This prerequisite target must be first.
+
+ const fsdir* r (inject_fsdir_impl (t, prereq, parent));
+
if (r != nullptr)
{
+ if (match)
+ match_sync (a, *r);
+
// Make it ad hoc so that it doesn't end up in prerequisite_targets
// after execution.
//
- match_sync (a, *r);
- t.prerequisite_targets[a].emplace_back (r, include_type::adhoc);
+ pts.emplace_back (r, include_type::adhoc);
+ }
+
+ return r;
+ }
+
+ const fsdir*
+ inject_fsdir_direct (action a, target& t, bool prereq, bool parent)
+ {
+ auto& pts (t.prerequisite_targets[a]);
+
+ assert (!prereq || pts.empty ()); // This prerequisite target must be first.
+
+ const fsdir* r (inject_fsdir_impl (t, prereq, parent));
+
+ if (r != nullptr)
+ {
+ match_direct_sync (a, *r);
+ pts.emplace_back (r, include_type::adhoc);
}
return r;
@@ -1365,11 +2081,26 @@ namespace build2
return ts;
}
- void
- update_backlink (const file& f, const path& l, bool changed, backlink_mode m)
+ static inline const char*
+ update_backlink_name (backlink_mode m, bool to_dir)
{
using mode = backlink_mode;
+ const char* r (nullptr);
+ switch (m)
+ {
+ case mode::link:
+ case mode::symbolic: r = verb >= 3 ? "ln -sf" : verb >= 2 ? "ln -s" : "ln"; break;
+ case mode::hard: r = verb >= 3 ? "ln -f" : "ln"; break;
+ case mode::copy:
+ case mode::overwrite: r = to_dir ? "cp -r" : "cp"; break;
+ }
+ return r;
+ }
+
+ void
+ update_backlink (const file& f, const path& l, bool changed, backlink_mode m)
+ {
const path& p (f.path ());
dir_path d (l.directory ());
@@ -1381,28 +2112,20 @@ namespace build2
// actually updated to signal to the user that the updated out target is
// now available in src.
//
- if (verb <= 2)
+ if (verb == 1 || verb == 2)
{
if (changed || !butl::entry_exists (l,
false /* follow_symlinks */,
true /* ignore_errors */))
{
- const char* c (nullptr);
- switch (m)
- {
- case mode::link:
- case mode::symbolic: c = verb >= 2 ? "ln -s" : "ln"; break;
- case mode::hard: c = "ln"; break;
- case mode::copy:
- case mode::overwrite: c = l.to_directory () ? "cp -r" : "cp"; break;
- }
+ const char* c (update_backlink_name (m, l.to_directory ()));
- // Note: 'ln foo/ bar/' means a different thing.
+ // Note: 'ln foo/ bar/' means a different thing (and below).
//
- if (verb >= 2)
+ if (verb == 2)
text << c << ' ' << p.string () << ' ' << l.string ();
else
- text << c << ' ' << f << " -> " << d;
+ print_diag (c, f, d);
}
}
@@ -1422,30 +2145,25 @@ namespace build2
{
// As above but with a slightly different diagnostics.
- using mode = backlink_mode;
-
dir_path d (l.directory ());
- if (verb <= 2)
+ if (verb == 1 || verb == 2)
{
if (changed || !butl::entry_exists (l,
false /* follow_symlinks */,
true /* ignore_errors */))
{
- const char* c (nullptr);
- switch (m)
- {
- case mode::link:
- case mode::symbolic: c = verb >= 2 ? "ln -s" : "ln"; break;
- case mode::hard: c = "ln"; break;
- case mode::copy:
- case mode::overwrite: c = l.to_directory () ? "cp -r" : "cp"; break;
- }
+ const char* c (update_backlink_name (m, l.to_directory ()));
+ // Note: 'ln foo/ bar/' means a different thing (and above) so strip
+ // trailing directory separator (but keep as path for relative).
+ //
if (verb >= 2)
text << c << ' ' << p.string () << ' ' << l.string ();
else
- text << c << ' ' << p.string () << " -> " << d;
+ print_diag (c,
+ p.to_directory () ? path (p.string ()) : p,
+ d);
}
}
@@ -1497,6 +2215,8 @@ namespace build2
const path& p, const path& l, backlink_mode om,
uint16_t verbosity)
{
+ assert (verbosity >= 2);
+
using mode = backlink_mode;
bool d (l.to_directory ());
@@ -1506,17 +2226,8 @@ namespace build2
{
if (verb >= verbosity)
{
- const char* c (nullptr);
- switch (m)
- {
- case mode::link:
- case mode::symbolic: c = "ln -sf"; break;
- case mode::hard: c = "ln -f"; break;
- case mode::copy:
- case mode::overwrite: c = d ? "cp -r" : "cp"; break;
- }
-
- text << c << ' ' << p.string () << ' ' << l.string ();
+ text << update_backlink_name (m, d) << ' ' << p.string () << ' '
+ << l.string ();
}
};
@@ -1578,8 +2289,7 @@ namespace build2
try_mkdir (to);
- for (const auto& de:
- dir_iterator (fr, false /* ignore_dangling */))
+ for (const auto& de: dir_iterator (fr, dir_iterator::no_follow))
{
path f (fr / de.path ());
path t (to / de.path ());
@@ -1632,6 +2342,11 @@ namespace build2
//
// Note that here the dry-run mode is handled by the filesystem functions.
+ // Note that if we ever need to support level 1 for some reason, maybe
+ // consider showing the target, for example, `unlink exe{hello} <- dir/`?
+ //
+ assert (v >= 2);
+
using mode = backlink_mode;
if (l.to_directory ())
@@ -1666,9 +2381,15 @@ namespace build2
struct backlink: auto_rm<path>
{
using path_type = build2::path;
+ using target_type = build2::target;
reference_wrapper<const path_type> target;
- backlink_mode mode;
+ backlink_mode mode;
+
+ // Ad hoc group-specific information for diagnostics (see below).
+ //
+ const target_type* member = nullptr;
+ bool print = true;
backlink (const path_type& t, path_type&& l, backlink_mode m, bool active)
: auto_rm<path_type> (move (l), active), target (t), mode (m)
@@ -1690,33 +2411,65 @@ namespace build2
};
// Normally (i.e., on sane platforms that don't have things like PDBs, etc)
- // there will be just one backlink so optimize for that.
+ // there will be just one or two backlinks so optimize for that.
//
- using backlinks = small_vector<backlink, 1>;
+ using backlinks = small_vector<backlink, 2>;
- static optional<backlink_mode>
- backlink_test (const target& t, const lookup& l)
+ static optional<pair<backlink_mode, bool>>
+ backlink_test (const target& t, const lookup& l, optional<backlink_mode> gm)
{
using mode = backlink_mode;
- optional<mode> r;
- const string& v (cast<string> (l));
+ const names& ns (cast<names> (l));
- if (v == "true") r = mode::link;
- else if (v == "symbolic") r = mode::symbolic;
- else if (v == "hard") r = mode::hard;
- else if (v == "copy") r = mode::copy;
- else if (v == "overwrite") r = mode::overwrite;
- else if (v != "false")
- fail << "invalid backlink variable value '" << v << "' "
+ if (ns.size () != 1 && ns.size () != 2)
+ {
+ fail << "invalid backlink variable value '" << ns << "' "
<< "specified for target " << t;
+ }
- return r;
+ optional<mode> m;
+ for (;;) // Breakout loop.
+ {
+ const name& n (ns.front ());
+
+ if (n.simple ())
+ {
+ const string& v (n.value);
+
+ if (v == "true") {m = mode::link; break;}
+ else if (v == "symbolic") {m = mode::symbolic; break;}
+ else if (v == "hard") {m = mode::hard; break;}
+ else if (v == "copy") {m = mode::copy; break;}
+ else if (v == "overwrite") {m = mode::overwrite; break;}
+ else if (v == "false") { break;}
+ else if (v == "group") {if ((m = gm)) break;}
+ }
+
+ fail << "invalid backlink variable value mode component '" << n << "' "
+ << "specified for target " << t << endf;
+ }
+
+ bool np (false); // "not print"
+ if (ns.size () == 2)
+ {
+ const name& n (ns.back ());
+
+ if (n.simple () && (n.value == "true" || (np = (n.value == "false"))))
+ ;
+ else
+ fail << "invalid backlink variable value print component '" << n
+ << "' specified for target " << t;
+ }
+
+ return m ? optional<pair<mode, bool>> (make_pair (*m, !np)) : nullopt;
}
static optional<backlink_mode>
backlink_test (action a, target& t)
{
+ using mode = backlink_mode;
+
context& ctx (t.ctx);
// Note: the order of these checks is from the least to most expensive.
@@ -1726,9 +2479,20 @@ namespace build2
if (a.outer () || (a != perform_update_id && a != perform_clean_id))
return nullopt;
- // Only file-based targets in the out tree can be backlinked.
+ // Only targets in the out tree can be backlinked.
+ //
+ if (!t.out.empty ())
+ return nullopt;
+
+ // Only file-based targets or groups containing file-based targets can be
+ // backlinked. Note that we don't do the "file-based" check of the latter
+ // case here since they can still be execluded. So instead we are prepared
+ // to handle the empty backlinks list.
+ //
+ // @@ Potentially members could only be resolved in execute. I guess we
+ // don't support backlink for such groups at the moment.
//
- if (!t.out.empty () || !t.is_a<file> ())
+ if (!t.is_a<file> () && t.group_members (a).members == nullptr)
return nullopt;
// Neither an out-of-project nor in-src configuration can be forwarded.
@@ -1752,7 +2516,13 @@ namespace build2
if (!l.defined ())
l = ctx.global_scope.lookup (*ctx.var_backlink, t.key ());
- return l ? backlink_test (t, l) : nullopt;
+ optional<pair<mode, bool>> r (l ? backlink_test (t, l, nullopt) : nullopt);
+
+ if (r && !r->second)
+ fail << "backlink variable value print component cannot be false "
+ << "for primary target " << t;
+
+ return r ? optional<mode> (r->first) : nullopt;
}
static backlinks
@@ -1760,58 +2530,104 @@ namespace build2
{
using mode = backlink_mode;
+ context& ctx (t.ctx);
const scope& s (t.base_scope ());
backlinks bls;
- auto add = [&bls, &s] (const path& p, mode m)
+ auto add = [&bls, &s] (const path& p,
+ mode m,
+ const target* mt = nullptr,
+ bool print = true)
{
bls.emplace_back (p,
s.src_path () / p.leaf (s.out_path ()),
m,
!s.ctx.dry_run /* active */);
+
+ if (mt != nullptr)
+ {
+ backlink& bl (bls.back ());
+ bl.member = mt;
+ bl.print = print;
+ }
};
- // First the target itself.
+ // Check for a custom backlink mode for this member. If none, then
+ // inherit the one from the group (so if the user asked to copy
+ // .exe, we will also copy .pdb).
//
- add (t.as<file> ().path (), m);
+ // Note that we want to avoid group or tt/patter-spec lookup. And
+ // since this is an ad hoc member (which means it was either declared
+ // in the buildfile or added by the rule), we assume that the value,
+ // if any, will be set as a target or rule-specific variable.
+ //
+ auto member_mode = [a, m, &ctx] (const target& mt)
+ -> optional<pair<mode, bool>>
+ {
+ lookup l (mt.state[a].vars[ctx.var_backlink]);
- // Then ad hoc group file/fsdir members, if any.
+ if (!l)
+ l = mt.vars[ctx.var_backlink];
+
+ return l ? backlink_test (mt, l, m) : make_pair (m, true);
+ };
+
+ // @@ Currently we don't handle the following cases:
+ //
+ // 1. File-based explicit groups.
+ //
+ // 2. Ad hoc subgroups in explicit groups.
//
- for (const target* mt (t.adhoc_member);
- mt != nullptr;
- mt = mt->adhoc_member)
+ // Note: see also the corresponding code in backlink_update_post().
+ //
+ if (file* f = t.is_a<file> ())
{
- const path* p (nullptr);
+ // First the target itself.
+ //
+ add (f->path (), m, f, true); // Note: always printed.
- if (const file* f = mt->is_a<file> ())
+ // Then ad hoc group file/fsdir members, if any.
+ //
+ for (const target* mt (t.adhoc_member);
+ mt != nullptr;
+ mt = mt->adhoc_member)
{
- p = &f->path ();
+ const path* p (nullptr);
- if (p->empty ()) // The "trust me, it's somewhere" case.
- p = nullptr;
- }
- else if (const fsdir* d = mt->is_a<fsdir> ())
- p = &d->dir;
+ if (const file* f = mt->is_a<file> ())
+ {
+ p = &f->path ();
- if (p != nullptr)
- {
- // Check for a custom backlink mode for this member. If none, then
- // inherit the one from the group (so if the user asked to copy .exe,
- // we will also copy .pdb).
- //
- // Note that we want to avoid group or tt/patter-spec lookup. And
- // since this is an ad hoc member (which means it was either declared
- // in the buildfile or added by the rule), we assume that the value,
- // if any, will be set as a rule-specific variable (since setting it
- // as a target-specific wouldn't be MT-safe). @@ Don't think this
- // applies to declared ad hoc members.
- //
- lookup l (mt->state[a].vars[t.ctx.var_backlink]);
+ if (p->empty ()) // The "trust me, it's somewhere" case.
+ p = nullptr;
+ }
+ else if (const fsdir* d = mt->is_a<fsdir> ())
+ p = &d->dir;
- optional<mode> bm (l ? backlink_test (*mt, l) : m);
+ if (p != nullptr)
+ {
+ if (optional<pair<mode, bool>> m = member_mode (*mt))
+ add (*p, m->first, mt, m->second);
+ }
+ }
+ }
+ else
+ {
+ // Explicit group.
+ //
+ group_view gv (t.group_members (a));
+ assert (gv.members != nullptr);
- if (bm)
- add (*p, *bm);
+ for (size_t i (0); i != gv.count; ++i)
+ {
+ if (const target* mt = gv.members[i])
+ {
+ if (const file* f = mt->is_a<file> ())
+ {
+ if (optional<pair<mode, bool>> m = member_mode (*mt))
+ add (f->path (), m->first);
+ }
+ }
}
}
@@ -1825,29 +2641,89 @@ namespace build2
}
static void
- backlink_update_post (target& t, target_state ts, backlinks& bls)
+ backlink_update_post (target& t, target_state ts,
+ backlink_mode m, backlinks& bls)
{
if (ts == target_state::failed)
return; // Let auto rm clean things up.
- // Make backlinks.
- //
- for (auto b (bls.begin ()), i (b); i != bls.end (); ++i)
+ context& ctx (t.ctx);
+
+ file* ft (t.is_a<file> ());
+
+ if (ft != nullptr && bls.size () == 1)
{
- const backlink& bl (*i);
+ // Single file-based target.
+ //
+ const backlink& bl (bls.front ());
- if (i == b)
- update_backlink (t.as<file> (),
- bl.path,
- ts == target_state::changed,
- bl.mode);
- else
- update_backlink (t.ctx, bl.target, bl.path, bl.mode);
+ update_backlink (*ft,
+ bl.path,
+ ts == target_state::changed,
+ bl.mode);
+ }
+ else
+ {
+ // Explicit or ad hoc group.
+ //
+ // What we have below is a custom variant of update_backlink(file).
+ //
+ dir_path d (bls.front ().path.directory ());
+
+ // First print the verbosity level 1 diagnostics. Level 2 and higher are
+ // handled by the update_backlink() calls below.
+ //
+ if (verb == 1)
+ {
+ bool changed (ts == target_state::changed);
+
+ if (!changed)
+ {
+ for (const backlink& bl: bls)
+ {
+ changed = !butl::entry_exists (bl.path,
+ false /* follow_symlinks */,
+ true /* ignore_errors */);
+ if (changed)
+ break;
+ }
+ }
+
+ if (changed)
+ {
+ const char* c (update_backlink_name (m, false /* to_dir */));
+
+ // For explicit groups we only print the group target. For ad hoc
+ // groups we print all the members except those explicitly excluded.
+ //
+ if (ft == nullptr)
+ print_diag (c, t, d);
+ else
+ {
+ vector<target_key> tks;
+ tks.reserve (bls.size ());
+
+ for (const backlink& bl: bls)
+ if (bl.print)
+ tks.push_back (bl.member->key ());
+
+ print_diag (c, move (tks), d);
+ }
+ }
+ }
+
+ if (!exists (d))
+ mkdir_p (d, 2 /* verbosity */);
+
+ // Make backlinks.
+ //
+ for (const backlink& bl: bls)
+ update_backlink (ctx, bl.target, bl.path, bl.mode, 2 /* verbosity */);
}
// Cancel removal.
//
- if (!t.ctx.dry_run)
+ if (!ctx.dry_run)
{
for (backlink& bl: bls)
bl.cancel ();
@@ -1888,15 +2764,57 @@ namespace build2
// which is ok since such targets are probably not interesting for
// backlinking.
//
+ // Note also that for group members (both ad hoc and non) backlinking
+ // is handled when updating/cleaning the group.
+ //
backlinks bls;
- optional<backlink_mode> blm (backlink_test (a, t));
+ optional<backlink_mode> blm;
- if (blm)
+ if (t.group == nullptr) // Matched so must be already resolved.
{
- if (a == perform_update_id)
- bls = backlink_update_pre (a, t, *blm);
+ blm = backlink_test (a, t);
+
+ if (blm)
+ {
+ if (a == perform_update_id)
+ {
+ bls = backlink_update_pre (a, t, *blm);
+ if (bls.empty ())
+ blm = nullopt;
+ }
+ else
+ backlink_clean_pre (a, t, *blm);
+ }
+ }
+
+ // Note: see similar code in set_rule_trace() for match.
+ //
+ if (ctx.trace_execute != nullptr && trace_target (t, *ctx.trace_execute))
+ {
+ diag_record dr (info);
+
+ dr << diag_doing (a, t);
+
+ if (s.rule != nullptr)
+ {
+ const rule& r (s.rule->second);
+
+ if (const adhoc_rule* ar = dynamic_cast<const adhoc_rule*> (&r))
+ {
+ dr << info (ar->loc);
+
+ if (ar->pattern != nullptr)
+ dr << "using ad hoc pattern rule ";
+ else
+ dr << "using ad hoc recipe ";
+ }
+ else
+ dr << info << "using rule ";
+
+ dr << s.rule->first;
+ }
else
- backlink_clean_pre (a, t, *blm);
+ dr << info << "using directly-assigned recipe";
}
ts = execute_recipe (a, t, s.recipe);
@@ -1904,7 +2822,7 @@ namespace build2
if (blm)
{
if (a == perform_update_id)
- backlink_update_post (t, ts, bls);
+ backlink_update_post (t, ts, *blm, bls);
}
}
catch (const failed&)
@@ -1919,7 +2837,6 @@ namespace build2
// s.recipe_group_action may be used further (see, for example,
// group_state()) and should retain its value.
//
- //
if (!s.recipe_keep)
s.recipe = nullptr;
@@ -1929,7 +2846,17 @@ namespace build2
// postponment logic (see excute_recipe() for details).
//
if (a.inner () && !s.recipe_group_action)
+ {
+ // See resolve_members_impl() for background.
+ //
+ if (s.resolve_counted)
+ {
+ s.resolve_counted = false;
+ ctx.resolve_count.fetch_sub (1, memory_order_relaxed);
+ }
+
ctx.target_count.fetch_sub (1, memory_order_relaxed);
+ }
// Decrement the task count (to count_executed) and wake up any threads
// that might be waiting for this target.
@@ -1938,7 +2865,7 @@ namespace build2
target::offset_busy - target::offset_executed,
memory_order_release));
assert (tc == ctx.count_busy ());
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
return ts;
}
@@ -1949,6 +2876,8 @@ namespace build2
size_t start_count,
atomic_count* task_count)
{
+ // NOTE: see also pretend_execute lambda in perform_execute().
+
target& t (const_cast<target&> (ct)); // MT-aware.
target::opstate& s (t[a]);
@@ -1991,6 +2920,7 @@ namespace build2
size_t exec (ctx.count_executed ());
size_t busy (ctx.count_busy ());
+ optional<target_state> r;
if (s.task_count.compare_exchange_strong (
tc,
busy,
@@ -2003,32 +2933,35 @@ namespace build2
{
// There could still be scope operations.
//
- if (t.is_a<dir> ())
- execute_recipe (a, t, nullptr /* recipe */);
+ r = t.is_a<dir> ()
+ ? execute_recipe (a, t, nullptr /* recipe */)
+ : s.state;
s.task_count.store (exec, memory_order_release);
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
}
else
{
if (task_count == nullptr)
- return execute_impl (a, t);
-
- // Pass our diagnostics stack (this is safe since we expect the
- // caller to wait for completion before unwinding its diag stack).
- //
- if (ctx.sched.async (start_count,
- *task_count,
- [a] (const diag_frame* ds, target& t)
- {
- diag_frame::stack_guard dsg (ds);
- execute_impl (a, t);
- },
- diag_frame::stack (),
- ref (t)))
- return target_state::unknown; // Queued.
-
- // Executed synchronously, fall through.
+ r = execute_impl (a, t);
+ else
+ {
+ // Pass our diagnostics stack (this is safe since we expect the
+ // caller to wait for completion before unwinding its diag stack).
+ //
+ if (ctx.sched->async (start_count,
+ *task_count,
+ [a] (const diag_frame* ds, target& t)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (a, t);
+ },
+ diag_frame::stack (),
+ ref (t)))
+ return target_state::unknown; // Queued.
+
+ // Executed synchronously, fall through.
+ }
}
}
else
@@ -2039,7 +2972,7 @@ namespace build2
else assert (tc == exec);
}
- return t.executed_state (a, false);
+ return r ? *r : t.executed_state (a, false /* fail */);
}
target_state
@@ -2060,6 +2993,7 @@ namespace build2
size_t exec (ctx.count_executed ());
size_t busy (ctx.count_busy ());
+ optional<target_state> r;
if (s.task_count.compare_exchange_strong (
tc,
busy,
@@ -2069,34 +3003,34 @@ namespace build2
if (s.state == target_state::unknown)
{
if (task_count == nullptr)
- return execute_impl (a, t);
-
- if (ctx.sched.async (start_count,
- *task_count,
- [a] (const diag_frame* ds, target& t)
- {
- diag_frame::stack_guard dsg (ds);
- execute_impl (a, t);
- },
- diag_frame::stack (),
- ref (t)))
- return target_state::unknown; // Queued.
-
- // Executed synchronously, fall through.
+ r = execute_impl (a, t);
+ else
+ {
+ if (ctx.sched->async (start_count,
+ *task_count,
+ [a] (const diag_frame* ds, target& t)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (a, t);
+ },
+ diag_frame::stack (),
+ ref (t)))
+ return target_state::unknown; // Queued.
+
+ // Executed synchronously, fall through.
+ }
}
else
{
assert (s.state == target_state::unchanged ||
s.state == target_state::failed);
- if (s.state == target_state::unchanged)
- {
- if (t.is_a<dir> ())
- execute_recipe (a, t, nullptr /* recipe */);
- }
+ r = s.state == target_state::unchanged && t.is_a<dir> ()
+ ? execute_recipe (a, t, nullptr /* recipe */)
+ : s.state;
s.task_count.store (exec, memory_order_release);
- ctx.sched.resume (s.task_count);
+ ctx.sched->resume (s.task_count);
}
}
else
@@ -2107,12 +3041,14 @@ namespace build2
else assert (tc == exec);
}
- return t.executed_state (a, false);
+ return r ? *r : t.executed_state (a, false /* fail */);
}
bool
update_during_match (tracer& trace, action a, const target& t, timestamp ts)
{
+ // NOTE: see also clean_during_match() if changing anything here.
+
assert (a == perform_update_id);
// Note: this function is used to make sure header dependencies are up to
@@ -2184,6 +3120,11 @@ namespace build2
action a, target& t,
uintptr_t mask)
{
+ // NOTE: see also clean_during_match_prerequisites() if changing anything
+ // here.
+
+ assert (a == perform_update_id);
+
prerequisite_targets& pts (t.prerequisite_targets[a]);
// On the first pass detect and handle unchanged tragets. Note that we
@@ -2194,7 +3135,7 @@ namespace build2
for (prerequisite_target& p: pts)
{
- if ((p.include & mask) != 0)
+ if (mask == 0 || (p.include & mask) != 0)
{
if (p.target != nullptr)
{
@@ -2219,6 +3160,16 @@ namespace build2
if (n == 0)
return false;
+ // Provide additional information on what's going on.
+ //
+ auto df = make_diag_frame (
+ [&t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while updating during match prerequisites of "
+ << "target " << t;
+ });
+
context& ctx (t.ctx);
phase_switch ps (ctx, run_phase::execute);
@@ -2231,7 +3182,7 @@ namespace build2
#if 0
for (prerequisite_target& p: pts)
{
- if ((p.include & mask) != 0 && p.data != 0)
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
{
const target& pt (*p.target);
@@ -2266,7 +3217,7 @@ namespace build2
for (prerequisite_target& p: pts)
{
- if ((p.include & mask) != 0 && p.data != 0)
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
{
execute_direct_async (a, *p.target, busy, tc);
}
@@ -2278,7 +3229,7 @@ namespace build2
//
for (prerequisite_target& p: pts)
{
- if ((p.include & mask) != 0 && p.data != 0)
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
{
const target& pt (*p.target);
target_state ns (execute_complete (a, pt));
@@ -2300,6 +3251,188 @@ namespace build2
return r;
}
+ bool
+ clean_during_match (tracer& trace, action a, const target& t)
+ {
+ // Let's keep this as close to update_during_match() semantically as
+ // possible until we see a clear reason to deviate.
+
+ // We have a problem with fsdir{}: if the directory is not empty because
+ // there are other targets that depend on it and we execute it here and
+ // now, it will not remove the directory (because it's not yet empty) but
+ // will cause the target to be in the executed state, which means that
+ // when other targets try to execute it, it will be a noop and the
+ // directory will be left behind.
+
+ assert (a == perform_clean_id && !t.is_a<fsdir> ());
+
+ target_state os (t.matched_state (a));
+
+ if (os == target_state::unchanged)
+ return false;
+ else
+ {
+ target_state ns;
+ if (os != target_state::changed)
+ {
+ phase_switch ps (t.ctx, run_phase::execute);
+ ns = execute_direct_sync (a, t);
+ }
+ else
+ ns = os;
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "cleaned " << t
+ << "; old state " << os
+ << "; new state " << ns;});
+ return true;
+ }
+ else
+ return false;
+ }
+ }
+
+ bool
+ clean_during_match_prerequisites (tracer& trace,
+ action a, target& t,
+ uintptr_t mask)
+ {
+ // Let's keep this as close to update_during_match_prerequisites()
+ // semantically as possible until we see a clear reason to deviate.
+ //
+ // Currently the only substantial change is the reverse iteration order.
+
+ assert (a == perform_clean_id);
+
+ prerequisite_targets& pts (t.prerequisite_targets[a]);
+
+ // On the first pass detect and handle unchanged tragets. Note that we
+ // have to do it in a separate pass since we cannot call matched_state()
+ // once we've switched the phase.
+ //
+ size_t n (0);
+
+ for (prerequisite_target& p: pts)
+ {
+ if (mask == 0 || (p.include & mask) != 0)
+ {
+ if (p.target != nullptr)
+ {
+ const target& pt (*p.target);
+
+ assert (!pt.is_a<fsdir> ()); // See above.
+
+ target_state os (pt.matched_state (a));
+
+ if (os != target_state::unchanged)
+ {
+ ++n;
+ p.data = static_cast<uintptr_t> (os);
+ continue;
+ }
+ }
+
+ p.data = 0;
+ }
+ }
+
+ // If all unchanged, we are done.
+ //
+ if (n == 0)
+ return false;
+
+ // Provide additional information on what's going on.
+ //
+ auto df = make_diag_frame (
+ [&t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while cleaning during match prerequisites of "
+ << "target " << t;
+ });
+
+ context& ctx (t.ctx);
+
+ phase_switch ps (ctx, run_phase::execute);
+
+ bool r (false);
+
+ // @@ Maybe we should optimize for n == 1? Maybe we should just call
+ // smarter clean_during_match() in this case?
+ //
+#if 0
+ for (prerequisite_target& p: reverse_iterate (pts))
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ const target& pt (*p.target);
+
+ target_state os (static_cast<target_state> (p.data));
+ target_state ns (execute_direct_sync (a, pt));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "cleaned " << pt
+ << "; old state " << os
+ << "; new state " << ns;});
+ r = true;
+ }
+
+ p.data = 0;
+ }
+ }
+#else
+
+ // Start asynchronous execution of prerequisites. Similar logic to
+ // straight_execute_members().
+ //
+ // Note that the target's task count is expected to be busy (since this
+ // function is called during match). And there don't seem to be any
+ // problems in using it for execute.
+ //
+ atomic_count& tc (t[a].task_count);
+
+ size_t busy (ctx.count_busy ());
+
+ wait_guard wg (ctx, busy, tc);
+
+ for (prerequisite_target& p: reverse_iterate (pts))
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ execute_direct_async (a, *p.target, busy, tc);
+ }
+ }
+
+ wg.wait ();
+
+ // Finish execution and process the result.
+ //
+ for (prerequisite_target& p: reverse_iterate (pts))
+ {
+ if ((mask == 0 || (p.include & mask) != 0) && p.data != 0)
+ {
+ const target& pt (*p.target);
+ target_state ns (execute_complete (a, pt));
+ target_state os (static_cast<target_state> (p.data));
+
+ if (ns != os && ns != target_state::unchanged)
+ {
+ l6 ([&]{trace << "cleaned " << pt
+ << "; old state " << os
+ << "; new state " << ns;});
+ r = true;
+ }
+
+ p.data = 0;
+ }
+ }
+#endif
+
+ return r;
+ }
+
static inline void
blank_adhoc_member (const target*&)
{
@@ -2611,7 +3744,7 @@ namespace build2
target_state
noop_action (action a, const target& t)
{
- text << "noop action triggered for " << diag_doing (a, t);
+ error << "noop action triggered for " << diag_doing (a, t);
assert (false); // We shouldn't be called (see set_recipe()).
return target_state::unchanged;
}
@@ -2631,7 +3764,7 @@ namespace build2
target_state gs (execute_impl (a, g, 0, nullptr));
if (gs == target_state::busy)
- ctx.sched.wait (ctx.count_executed (),
+ ctx.sched->wait (ctx.count_executed (),
g[a].task_count,
scheduler::work_none);
@@ -2711,7 +3844,7 @@ namespace build2
case rmdir_status::not_empty:
{
if (verb >= 3)
- text << dp << " is current working directory, not removing";
+ info << dp << " is current working directory, not removing";
break;
}
case rmdir_status::not_exist:
@@ -2739,7 +3872,8 @@ namespace build2
target_state
perform_clean_extra (action a, const file& ft,
const clean_extras& extras,
- const clean_adhoc_extras& adhoc_extras)
+ const clean_adhoc_extras& adhoc_extras,
+ bool show_adhoc)
{
context& ctx (ft.ctx);
@@ -2771,6 +3905,12 @@ namespace build2
// Now clean the ad hoc group file members, if any.
//
+ // While at it, also collect the group target keys if we are showing
+ // the members. But only those that exist (since we don't want to
+ // print any diagnostics if none of them exist).
+ //
+ vector<target_key> tks;
+
for (const target* m (ft.adhoc_member);
m != nullptr;
m = m->adhoc_member)
@@ -2811,18 +3951,38 @@ namespace build2
? target_state::changed
: target_state::unchanged);
- if (r == target_state::changed && ep.empty ())
- ep = *mp;
-
- er |= r;
+ if (r == target_state::changed)
+ {
+ if (show_adhoc && verb == 1)
+ tks.push_back (mf->key ());
+ else if (ep.empty ())
+ {
+ ep = *mp;
+ er |= r;
+ }
+ }
}
}
// Now clean the primary target and its prerequisited in the reverse order
// of update: first remove the file, then clean the prerequisites.
//
- if (clean && !fp.empty () && rmfile (fp, ft))
- tr = target_state::changed;
+ if (clean && !fp.empty ())
+ {
+ if (show_adhoc && verb == 1 && !tks.empty ())
+ {
+ if (rmfile (fp, ft, 2 /* verbosity */))
+ tks.insert (tks.begin (), ft.key ());
+
+ print_diag ("rm", move (tks));
+ tr = target_state::changed;
+ }
+ else
+ {
+ if (rmfile (fp, ft))
+ tr = target_state::changed;
+ }
+ }
// Update timestamp in case there are operations after us that could use
// the information.
@@ -2842,10 +4002,20 @@ namespace build2
{
if (verb > (ctx.current_diag_noise ? 0 : 1) && verb < 3)
{
- if (ed)
- text << "rm -r " << path_cast<dir_path> (ep);
- else
- text << "rm " << ep;
+ if (verb >= 2)
+ {
+ if (ed)
+ text << "rm -r " << path_cast<dir_path> (ep);
+ else
+ text << "rm " << ep;
+ }
+ else if (verb)
+ {
+ if (ed)
+ print_diag ("rm -r", path_cast<dir_path> (ep));
+ else
+ print_diag ("rm", ep);
+ }
}
}
@@ -2878,10 +4048,17 @@ namespace build2
{
if (const target* m = gv.members[gv.count - 1])
{
- if (rmfile (m->as<file> ().path (), *m))
+ // Note that at the verbosity level 1 we don't show the removal of
+ // each group member. This is consistent with what is normally shown
+ // during update.
+ //
+ if (rmfile (m->as<file> ().path (), *m, 2 /* verbosity */))
tr |= target_state::changed;
}
}
+
+ if (tr == target_state::changed && verb == 1)
+ print_diag ("rm", g);
}
g.mtime (timestamp_nonexistent);
@@ -2890,10 +4067,20 @@ namespace build2
{
if (verb > (ctx.current_diag_noise ? 0 : 1) && verb < 3)
{
- if (ed)
- text << "rm -r " << path_cast<dir_path> (ep);
- else
- text << "rm " << ep;
+ if (verb >= 2)
+ {
+ if (ed)
+ text << "rm -r " << path_cast<dir_path> (ep);
+ else
+ text << "rm " << ep;
+ }
+ else if (verb)
+ {
+ if (ed)
+ print_diag ("rm -r", path_cast<dir_path> (ep));
+ else
+ print_diag ("rm", ep);
+ }
}
}
diff --git a/libbuild2/algorithm.hxx b/libbuild2/algorithm.hxx
index 75976bf..a4feaea 100644
--- a/libbuild2/algorithm.hxx
+++ b/libbuild2/algorithm.hxx
@@ -78,6 +78,9 @@ namespace build2
pair<target&, ulock>
search_locked (const target&, const target_type&, const prerequisite_key&);
+ const target*
+ search_existing (context&, const target_type&, const prerequisite_key&);
+
const target&
search_new (context&, const target_type&, const prerequisite_key&);
@@ -145,6 +148,15 @@ namespace build2
const string* ext = nullptr,
const scope* = nullptr);
+ template <typename T>
+ const T*
+ search_existing (context&,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext = nullptr,
+ const scope* = nullptr);
+
// Search for a target identified by the name. The semantics is "as if" we
// first created a prerequisite based on this name in exactly the same way
// as the parser would and then searched based on this prerequisite. If the
@@ -152,15 +164,13 @@ namespace build2
// argument.
//
LIBBUILD2_SYMEXPORT const target&
- search (const target&, name, const scope&, const target_type* = nullptr);
+ search (const target&, name&&, const scope&, const target_type* = nullptr);
- // Note: returns NULL for unknown target types. Note that unlike the above
- // version, these ones can be called during the load and execute phases.
+ // Note: returns NULL for unknown target types. Note also that unlike the
+ // above version, these can be called during the load and execute phases.
//
LIBBUILD2_SYMEXPORT const target*
- search_existing (const name&,
- const scope&,
- const dir_path& out = dir_path ());
+ search_existing (const name&, const scope&, const dir_path& out = dir_path ());
LIBBUILD2_SYMEXPORT const target*
search_existing (const names&, const scope&);
@@ -179,17 +189,20 @@ namespace build2
action_type action;
target_type* target = nullptr;
size_t offset = 0;
+ bool first;
explicit operator bool () const {return target != nullptr;}
+ // Note: achieved offset is preserved.
+ //
void
unlock ();
// Movable-only type with move-assignment only to NULL lock.
//
target_lock () = default;
- target_lock (target_lock&&);
- target_lock& operator= (target_lock&&);
+ target_lock (target_lock&&) noexcept;
+ target_lock& operator= (target_lock&&) noexcept;
target_lock (const target_lock&) = delete;
target_lock& operator= (const target_lock&) = delete;
@@ -197,13 +210,14 @@ namespace build2
// Implementation details.
//
~target_lock ();
- target_lock (action_type, target_type*, size_t);
+ target_lock (action_type, target_type*, size_t, bool);
struct data
{
action_type action;
target_type* target;
size_t offset;
+ bool first;
};
data
@@ -241,10 +255,10 @@ namespace build2
// If the target is already applied (for this action) or executed, then no
// lock is acquired. Otherwise, unless matched is true, the target must not
- // be matched but not yet applied for this action (and if that's the case
- // and matched is true, then you get a locked target that you should
- // probably check for consistency, for exmaple, by comparing the matched
- // rule).
+ // be in the matched but not yet applied state for this action (and if
+ // that's the case and matched is true, then you get a locked target that
+ // you should probably check for consistency, for example, by comparing the
+ // matched rule).
//
// @@ MT fuzzy: what if it is already in the desired state, why assert?
// Currently we only use it with match_recipe/rule() and if it is matched
@@ -260,21 +274,27 @@ namespace build2
//
// Note that here and in find_adhoc_member() below (as well as in
// perform_clean_extra()) we use target type (as opposed to, say, type and
- // name) as the member's identity. This fits our current needs where every
+ // name) as the member's identity. This fits common needs where every
// (rule-managed) ad hoc member has a unique target type and we have no need
// for multiple members of the same type. This also allows us to support
// things like changing the ad hoc member name by declaring it in a
- // buildfile.
+ // buildfile. However, if this semantics is not appropriate, use the
+ // add_adhoc_member_identity() version below.
+ //
+ // Note that the current implementation asserts if the member target already
+ // exists but is not already a member.
//
LIBBUILD2_SYMEXPORT target&
add_adhoc_member (target&,
const target_type&,
dir_path dir,
dir_path out,
- string name);
+ string name,
+ optional<string> ext);
// If the extension is specified then it is added to the member's target
- // name.
+ // name as a second-level extension (the first-level extension, if any,
+ // comes from the target type).
//
target&
add_adhoc_member (target&, const target_type&, const char* ext = nullptr);
@@ -293,6 +313,24 @@ namespace build2
return add_adhoc_member<T> (g, T::static_type, e);
}
+ // Add an ad hoc member using the member identity (as opposed to only its
+ // type as in add_adhoc_member() above) to suppress diplicates. See also
+ // dyndep::inject_adhoc_group_member().
+ //
+ // Return the member target as well as an indication of whether it was added
+ // or was already a member. Fail if the member target already exists but is
+ // not a member since it's not possible to make it a member in an MT-safe
+ // manner.
+ //
+ LIBBUILD2_SYMEXPORT pair<target&, bool>
+ add_adhoc_member_identity (target&,
+ const target_type&,
+ dir_path dir,
+ dir_path out,
+ string name,
+ optional<string> ext,
+ const location& = location ());
+
// Find an ad hoc member of the specified target type returning NULL if not
// found.
//
@@ -347,18 +385,34 @@ namespace build2
// to be unchanged after match. If it is unmatch::safe, then unmatch the
// target if it is safe (this includes unchanged or if we know that someone
// else will execute this target). Return true in first half of the pair if
- // unmatch succeeded. Always throw if failed.
+ // unmatch succeeded. Always throw if failed. Note that unmatching may not
+ // play well with options -- if unmatch succeeds, the options that have been
+ // passed to match will not be cleared.
//
enum class unmatch {none, unchanged, safe};
target_state
- match_sync (action, const target&, bool fail = true);
+ match_sync (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
pair<bool, target_state>
- try_match_sync (action, const target&, bool fail = true);
+ try_match_sync (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
pair<bool, target_state>
- match_sync (action, const target&, unmatch);
+ match_sync (action, const target&,
+ unmatch,
+ uint64_t options = match_extra::all_options);
+
+ // As above but only match the target (unless already matched) without
+ // applying the match (which is normally done with match_sync()). You will
+ // most likely regret using this function.
+ //
+ LIBBUILD2_SYMEXPORT void
+ match_only_sync (action, const target&,
+ uint64_t options = match_extra::all_options);
// Start asynchronous match. Return target_state::postponed if the
// asynchronous operation has been started and target_state::busy if the
@@ -370,28 +424,60 @@ namespace build2
// failed. Otherwise, throw the failed exception if keep_going is false and
// return target_state::failed otherwise.
//
+ // Note: same options must be passed to match_async() and match_complete().
+ //
target_state
match_async (action, const target&,
size_t start_count, atomic_count& task_count,
+ uint64_t options = match_extra::all_options,
bool fail = true);
target_state
- match_complete (action, const target&, bool fail = true);
+ match_complete (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
pair<bool, target_state>
- match_complete (action, const target&, unmatch);
+ match_complete (action, const target&,
+ unmatch,
+ uint64_t options = match_extra::all_options);
+
+ // As above but without incrementing the target's dependents count. Should
+ // be executed with execute_direct_*().
+ //
+ // For async, call match_async() followed by match_direct_complete().
+ //
+ target_state
+ match_direct_sync (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
+
+ target_state
+ match_direct_complete (action, const target&,
+ uint64_t options = match_extra::all_options,
+ bool fail = true);
// Apply the specified recipe directly and without incrementing the
- // dependency counts. The target must be locked.
+ // dependency counts. The target must be locked (and it remains locked
+ // after this function returns).
+ //
+ // Note that there will be no way to rematch on options change (since there
+ // is no rule), so passing anything other than all_options is most likely a
+ // bad idea. Passing 0 for options is illegal.
//
void
- match_recipe (target_lock&, recipe);
+ match_recipe (target_lock&,
+ recipe,
+ uint64_t options = match_extra::all_options);
// Match (but do not apply) the specified rule directly and without
- // incrementing the dependency counts. The target must be locked.
+ // incrementing the dependency counts. The target must be locked (and it
+ // remains locked after this function returns).
//
void
- match_rule (target_lock&, const rule_match&);
+ match_rule (target_lock&,
+ const rule_match&,
+ uint64_t options = match_extra::all_options);
// Match a "delegate rule" from withing another rules' apply() function
// avoiding recursive matches (thus the third argument). Unless try_match is
@@ -400,7 +486,10 @@ namespace build2
// See also the companion execute_delegate().
//
recipe
- match_delegate (action, target&, const rule&, bool try_match = false);
+ match_delegate (action, target&,
+ const rule&,
+ uint64_t options = match_extra::all_options,
+ bool try_match = false);
// Incrementing the dependency counts of the specified target.
//
@@ -408,13 +497,43 @@ namespace build2
match_inc_dependents (action, const target&);
// Match (synchronously) a rule for the inner operation from withing the
- // outer rule's apply() function. See also the companion execute_inner().
+ // outer rule's apply() function. See also the companion execute_inner()
+ // and inner_recipe.
//
target_state
- match_inner (action, const target&);
+ match_inner (action, const target&,
+ uint64_t options = match_extra::all_options);
pair<bool, target_state>
- match_inner (action, const target&, unmatch);
+ match_inner (action, const target&,
+ unmatch,
+ uint64_t options = match_extra::all_options);
+
+ // Re-match with new options a target that has already been matched with one
+ // of the match_*() functions. Note that natually you cannot rematch a
+ // target that you have unmatched.
+ //
+ // Note also that there is no way to check if the rematch is unnecessary
+ // (i.e., because the target is already matched with this option) because
+ // that would require MT-safety considerations (since there could be a
+ // concurrent rematch). Instead, you should rematch unconditionally and if
+ // the option is already present, it will be a cheap noop.
+ //
+ target_state
+ rematch_sync (action, const target&,
+ uint64_t options,
+ bool fail = true);
+
+ target_state
+ rematch_async (action, const target&,
+ size_t start_count, atomic_count& task_count,
+ uint64_t options,
+ bool fail = true);
+
+ target_state
+ rematch_complete (action, const target&,
+ uint64_t options,
+ bool fail = true);
// The standard prerequisite search and match implementations. They call
// search() (unless a custom is provided) and then match() (unless custom
@@ -444,6 +563,19 @@ namespace build2
void
match_prerequisites (action, target&, const match_search& = nullptr);
+ // As above but only do search. The match part can be performed later, for
+ // example, with the match_members() function below. The typical call
+ // sequence would be:
+ //
+ // inject_fsdir (a, t, false /* match */);
+ // search_prerequisite_members (a, t); // Potentially with filter.
+ // pattern->apply_prerequisites (a, t, bs, me); // If ad hoc pattern.
+ // <dependency synthesis> // Optional.
+ // match_members (a, t, t.prerequisite_targets[a]);
+ //
+ void
+ search_prerequisites (action, target&, const match_search& = nullptr);
+
// As above but go into group members.
//
// Note that if we are cleaning, this function doesn't go into group
@@ -459,39 +591,48 @@ namespace build2
match_prerequisite_members (action, target&,
const match_search_member& = nullptr);
+ void
+ search_prerequisite_members (action, target&,
+ const match_search_member& = nullptr);
+
// As above but omit prerequisites that are not in the specified scope.
//
void
match_prerequisites (action, target&, const scope&);
void
+ search_prerequisites (action, target&, const scope&);
+
+ void
match_prerequisite_members (action, target&, const scope&);
+ void
+ search_prerequisite_members (action, target&, const scope&);
+
// Match (already searched) members of a group or similar prerequisite-like
// dependencies. Similar in semantics to match_prerequisites(). Any marked
// target pointers are skipped.
//
- // T can only be const target* or prerequisite_target.
- //
- template <typename T>
- void
- match_members (action, target&, T const*, size_t);
+ LIBBUILD2_SYMEXPORT void
+ match_members (action, const target&, const target* const*, size_t);
template <size_t N>
inline void
- match_members (action a, target& t, const target* (&ts)[N])
+ match_members (action a, const target& t, const target* (&ts)[N])
{
match_members (a, t, ts, N);
}
- inline void
- match_members (action a,
- target& t,
- prerequisite_targets& ts,
- size_t start = 0)
- {
- match_members (a, t, ts.data () + start, ts.size () - start);
- }
+ // As above plus if the include mask (first) and value (second) are
+ // specified, then only match prerequisites that satisfy the
+ // ((prerequisite_target::include & mask) == value) condition.
+ //
+ LIBBUILD2_SYMEXPORT void
+ match_members (action,
+ const target&,
+ prerequisite_targets&,
+ size_t start = 0,
+ pair<uintptr_t, uintptr_t> include = {0, 0});
// Unless already known, match, and, if necessary, execute the group in
// order to resolve its members list. Note that even after that the member's
@@ -531,17 +672,35 @@ namespace build2
// Inject dependency on the target's directory fsdir{}, unless it is in the
// src tree or is outside of any project (say, for example, an installation
// directory). If the parent argument is true, then inject the parent
- // directory of a target that is itself a directory (name is empty). Return
- // the injected target or NULL. Normally this function is called from the
- // rule's apply() function.
+ // directory of a target that is itself a directory (name is empty). Match
+ // unless match is false and return the injected target or NULL. Normally
+ // this function is called from the rule's apply() function.
+ //
+ // The match=false semantics is useful when you wish to first collect all
+ // the prerequisites targets and then match them all as a separate step, for
+ // example, with match_members().
+ //
+ // As an extension, unless prereq is false, this function will also search
+ // for an existing fsdir{} prerequisite for the directory and if one exists,
+ // return that (even if the target is in the src tree). In this case, the
+ // injected fsdir{} (if any) must be the first prerequisite in this target's
+ // prerequisite_targets, which is relied upon by the match_prerequisite*()
+ // family of functons to suppress the duplicate addition.
//
- // As an extension, this function will also search for an existing fsdir{}
- // prerequisite for the directory and if one exists, return that (even if
- // the target is in src tree). This can be used, for example, to place
- // output into an otherwise non-existent directory.
+ // Note that the explicit fsdir{} prerquiste is used to place output into an
+ // otherwise non-existent (in src) directory.
//
LIBBUILD2_SYMEXPORT const fsdir*
- inject_fsdir (action, target&, bool parent = true);
+ inject_fsdir (action, target&,
+ bool match = true,
+ bool prereq = true,
+ bool parent = true);
+
+ // As above, but match the injected fsdir{} target directly (that is,
+ // without incrementing the dependency counts).
+ //
+ LIBBUILD2_SYMEXPORT const fsdir*
+ inject_fsdir_direct (action, target&, bool prereq = true, bool parent = true);
// Execute the action on target, assuming a rule has been matched and the
// recipe for this action has been set. This is the synchrounous executor
@@ -583,7 +742,8 @@ namespace build2
// Note that the returned target state is for the inner operation. The
// appropriate usage is to call this function from the outer operation's
// recipe and to factor the obtained state into the one returned (similar to
- // how we do it for prerequisites).
+ // how we do it for prerequisites). Or, if factoring is not needed, simply
+ // return inner_recipe as outer recipe.
//
// Note: waits for the completion if the target is busy and translates
// target_state::failed to the failed exception.
@@ -600,7 +760,7 @@ namespace build2
// translates target_state::failed to the failed exception.
//
target_state
- execute_direct_sync (action, const target&);
+ execute_direct_sync (action, const target&, bool fail = true);
target_state
execute_direct_async (action, const target&,
@@ -614,7 +774,8 @@ namespace build2
//
// Note that such a target must still be updated normally during the execute
// phase in order to keep the dependency counts straight (at which point the
- // target state/timestamp will be re-incorporated into the result).
+ // target state/timestamp will be re-incorporated into the result). Unless
+ // it was matched direct.
//
LIBBUILD2_SYMEXPORT bool
update_during_match (tracer&,
@@ -623,13 +784,35 @@ namespace build2
// As above, but update all the targets in prerequisite_targets that have
// the specified mask in prerequisite_target::include. Return true if any of
- // them have changed.
+ // them have changed. If mask is 0, then update all the targets.
//
// Note that this function spoils prerequisite_target::data (which is used
// for temporary storage). But it resets data to 0 once done.
//
LIBBUILD2_SYMEXPORT bool
- update_during_match_prerequisites (tracer&, action, target&, uintptr_t mask);
+ update_during_match_prerequisites (
+ tracer&,
+ action, target&,
+ uintptr_t mask = prerequisite_target::include_udm);
+
+ // Equivalent functions for clean. Note that if possible you should leave
+ // cleaning to normal execute and these functions should only be used in
+ // special cases where this is not possible.
+ //
+ // Note also that neither function should be called on fsdir{} since it's
+ // hard to guarantee such an execution won't be too early (see the
+ // implementation for details). If you do need to clean fsdir{} during
+ // match, use fsdir_rule::perform_clean_direct() instead.
+ //
+ LIBBUILD2_SYMEXPORT bool
+ clean_during_match (tracer&,
+ action, const target&);
+
+ LIBBUILD2_SYMEXPORT bool
+ clean_during_match_prerequisites (
+ tracer&,
+ action, target&,
+ uintptr_t mask = prerequisite_target::include_udm);
// The default prerequisite execute implementation. Call execute_async() on
// each non-ignored (non-NULL) prerequisite target in a loop and then wait
@@ -779,8 +962,9 @@ namespace build2
// Call straight or reverse depending on the current mode.
//
+ template <typename T>
target_state
- execute_members (action, const target&, const target*[], size_t);
+ execute_members (action, const target&, T[], size_t);
template <size_t N>
inline target_state
@@ -820,8 +1004,8 @@ namespace build2
LIBBUILD2_SYMEXPORT target_state
group_action (action, const target&);
- // Standard perform(clean) action implementation for the file target
- // (or derived).
+ // Standard perform(clean) action implementation for the file target (or
+ // derived). Note: also cleans ad hoc group members, if any.
//
LIBBUILD2_SYMEXPORT target_state
perform_clean (action, const target&);
@@ -831,8 +1015,8 @@ namespace build2
LIBBUILD2_SYMEXPORT target_state
perform_clean_depdb (action, const target&);
- // As above but clean the target group. The group should be an mtime_target
- // and members should be files.
+ // As above but clean the (non-ad hoc) target group. The group should be an
+ // mtime_target and members should be files.
//
LIBBUILD2_SYMEXPORT target_state
perform_clean_group (action, const target&);
@@ -843,21 +1027,22 @@ namespace build2
LIBBUILD2_SYMEXPORT target_state
perform_clean_group_depdb (action, const target&);
- // Helper for custom perform(clean) implementations that cleans extra files
- // and directories (recursively) specified as a list of either absolute
- // paths or "path derivation directives". The directive string can be NULL,
- // or empty in which case it is ignored. If the last character in a
- // directive is '/', then the resulting path is treated as a directory
- // rather than a file. The directive can start with zero or more '-'
- // characters which indicate the number of extensions that should be
- // stripped before the new extension (if any) is added (so if you want to
- // strip the extension, specify just "-"). For example:
+ // Helpers for custom perform(clean) implementations that, besides the
+ // target and group members, can also clean extra files and directories
+ // (recursively) specified as a list of either absolute paths or "path
+ // derivation directives". The directive string can be NULL, or empty in
+ // which case it is ignored. If the last character in a directive is '/',
+ // then the resulting path is treated as a directory rather than a file. The
+ // directive can start with zero or more '-' characters which indicate the
+ // number of extensions that should be stripped before the new extension (if
+ // any) is added (so if you want to strip the extension, specify just
+ // "-"). For example:
//
// perform_clean_extra (a, t, {".d", ".dlls/", "-.dll"});
//
// The extra files/directories are removed first in the specified order
- // followed by the ad hoc group member, then target itself, and, finally,
- // the prerequisites in the reverse order.
+ // followed by the group member, then target itself, and, finally, the
+ // prerequisites in the reverse order.
//
// You can also clean extra files derived from ad hoc group members that are
// "indexed" using their target types (see add/find_adhoc_member() for
@@ -876,16 +1061,25 @@ namespace build2
using clean_adhoc_extras = small_vector<clean_adhoc_extra, 2>;
+ // If show_adhoc_members is true, then print the entire ad hoc group instead
+ // of just the primary member at verbosity level 1 (see print_diag() for
+ // details). Note that the default is false because normally a rule
+ // implemented in C++ would only use an ad hoc group for subordiate members
+ // (.pdb, etc) and would use a dedicate target group type if the members
+ // are equal.
+ //
LIBBUILD2_SYMEXPORT target_state
perform_clean_extra (action, const file&,
const clean_extras&,
- const clean_adhoc_extras& = {});
+ const clean_adhoc_extras& = {},
+ bool show_adhoc_members = false);
inline target_state
perform_clean_extra (action a, const file& f,
- initializer_list<const char*> e)
+ initializer_list<const char*> e,
+ bool show_adhoc_members = false)
{
- return perform_clean_extra (a, f, clean_extras (e));
+ return perform_clean_extra (a, f, clean_extras (e), {}, show_adhoc_members);
}
// Similar to perform_clean_group() but with extras similar to
@@ -905,6 +1099,8 @@ namespace build2
// Update/clean a backlink issuing appropriate diagnostics at appropriate
// levels depending on the overload and the changed argument.
//
+ // Note that these functions assume (target.leaf() == link.leaf ()).
+ //
enum class backlink_mode
{
link, // Make a symbolic link if possible, hard otherwise.
@@ -927,6 +1123,8 @@ namespace build2
bool changed,
backlink_mode = backlink_mode::link);
+ // Note: verbosity should be 2 or greater.
+ //
LIBBUILD2_SYMEXPORT void
update_backlink (context&,
const path& target,
@@ -934,6 +1132,8 @@ namespace build2
backlink_mode = backlink_mode::link,
uint16_t verbosity = 3);
+ // Note: verbosity should be 2 or greater.
+ //
LIBBUILD2_SYMEXPORT void
clean_backlink (context&,
const path& link,
diff --git a/libbuild2/algorithm.ixx b/libbuild2/algorithm.ixx
index 2637349..836dbed 100644
--- a/libbuild2/algorithm.ixx
+++ b/libbuild2/algorithm.ixx
@@ -45,6 +45,17 @@ namespace build2
k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
}
+ inline const target*
+ search_existing (context& ctx,
+ const target_type& tt,
+ const prerequisite_key& k)
+ {
+ return search_existing (
+ ctx,
+ prerequisite_key {
+ k.proj, {&tt, k.tk.dir, k.tk.out, k.tk.name, k.tk.ext}, k.scope});
+ }
+
inline const target&
search_new (context& ctx,
const target_type& tt,
@@ -187,15 +198,32 @@ namespace build2
t, T::static_type, dir, out, name, ext, scope).template as<T> ();
}
+ template <typename T>
+ inline const T*
+ search_existing (context& ctx,
+ const dir_path& dir,
+ const dir_path& out,
+ const string& name,
+ const string* ext,
+ const scope* scope)
+ {
+ const target* r (
+ search_existing (
+ ctx, T::static_type, dir, out, name, ext, scope));
+ return r != nullptr ? &r->template as<T> () : nullptr;
+ }
+
LIBBUILD2_SYMEXPORT target_lock
- lock_impl (action, const target&, optional<scheduler::work_queue>);
+ lock_impl (action, const target&,
+ optional<scheduler::work_queue>,
+ uint64_t = 0);
LIBBUILD2_SYMEXPORT void
unlock_impl (action, target&, size_t);
inline target_lock::
- target_lock (action_type a, target_type* t, size_t o)
- : action (a), target (t), offset (o)
+ target_lock (action_type a, target_type* t, size_t o, bool f)
+ : action (a), target (t), offset (o), first (f)
{
if (target != nullptr)
prev = stack (this);
@@ -234,7 +262,7 @@ namespace build2
inline auto target_lock::
release () -> data
{
- data r {action, target, offset};
+ data r {action, target, offset, first};
if (target != nullptr)
{
@@ -258,7 +286,7 @@ namespace build2
}
inline target_lock::
- target_lock (target_lock&& x)
+ target_lock (target_lock&& x) noexcept
: action (x.action), target (x.target), offset (x.offset)
{
if (target != nullptr)
@@ -278,7 +306,7 @@ namespace build2
}
inline target_lock& target_lock::
- operator= (target_lock&& x)
+ operator= (target_lock&& x) noexcept
{
if (this != &x)
{
@@ -346,7 +374,7 @@ namespace build2
n += e;
}
- return add_adhoc_member (t, tt, t.dir, t.out, move (n));
+ return add_adhoc_member (t, tt, t.dir, t.out, move (n), nullopt /* ext */);
}
inline target*
@@ -366,13 +394,18 @@ namespace build2
}
LIBBUILD2_SYMEXPORT const rule_match*
- match_rule (action, target&, const rule* skip, bool try_match = false);
+ match_rule_impl (action, target&,
+ uint64_t options,
+ const rule* skip,
+ bool try_match = false,
+ match_extra* = nullptr);
LIBBUILD2_SYMEXPORT recipe
apply_impl (action, target&, const rule_match&);
LIBBUILD2_SYMEXPORT pair<bool, target_state>
match_impl (action, const target&,
+ uint64_t options,
size_t, atomic_count*,
bool try_match = false);
@@ -384,11 +417,11 @@ namespace build2
}
inline target_state
- match_sync (action a, const target& t, bool fail)
+ match_sync (action a, const target& t, uint64_t options, bool fail)
{
assert (t.ctx.phase == run_phase::match);
- target_state r (match_impl (a, t, 0, nullptr).second);
+ target_state r (match_impl (a, t, options, 0, nullptr).second);
if (r != target_state::failed)
match_inc_dependents (a, t);
@@ -399,12 +432,12 @@ namespace build2
}
inline pair<bool, target_state>
- try_match_sync (action a, const target& t, bool fail)
+ try_match_sync (action a, const target& t, uint64_t options, bool fail)
{
assert (t.ctx.phase == run_phase::match);
pair<bool, target_state> r (
- match_impl (a, t, 0, nullptr, true /* try_match */));
+ match_impl (a, t, options, 0, nullptr, true /* try_match */));
if (r.first)
{
@@ -418,11 +451,11 @@ namespace build2
}
inline pair<bool, target_state>
- match_sync (action a, const target& t, unmatch um)
+ match_sync (action a, const target& t, unmatch um, uint64_t options)
{
assert (t.ctx.phase == run_phase::match);
- target_state s (match_impl (a, t, 0, nullptr).second);
+ target_state s (match_impl (a, t, options, 0, nullptr).second);
if (s == target_state::failed)
throw failed ();
@@ -449,7 +482,7 @@ namespace build2
// cannot change their mind).
//
if ((s == target_state::unchanged && t.group == nullptr) ||
- t[a].dependents.load (memory_order_consume) != 0)
+ t[a].dependents.load (memory_order_relaxed) != 0)
return make_pair (true, s);
break;
@@ -463,39 +496,76 @@ namespace build2
inline target_state
match_async (action a, const target& t,
size_t sc, atomic_count& tc,
+ uint64_t options,
bool fail)
{
context& ctx (t.ctx);
assert (ctx.phase == run_phase::match);
- target_state r (match_impl (a, t, sc, &tc).second);
+ target_state r (match_impl (a, t, options, sc, &tc).second);
- if (fail && !ctx.keep_going && r == target_state::failed)
+ if (r == target_state::failed && fail && !ctx.keep_going)
throw failed ();
return r;
}
inline target_state
- match_complete (action a, const target& t, bool fail)
+ match_complete (action a, const target& t, uint64_t options, bool fail)
{
- return match_sync (a, t, fail);
+ return match_sync (a, t, options, fail);
}
inline pair<bool, target_state>
- match_complete (action a, const target& t, unmatch um)
+ match_complete (action a, const target& t, unmatch um, uint64_t options)
+ {
+ return match_sync (a, t, um, options);
+ }
+
+ inline target_state
+ match_direct_sync (action a, const target& t, uint64_t options, bool fail)
+ {
+ assert (t.ctx.phase == run_phase::match);
+
+ target_state r (match_impl (a, t, options, 0, nullptr).second);
+
+ if (r == target_state::failed && fail)
+ throw failed ();
+
+ return r;
+ }
+
+ inline target_state
+ match_direct_complete (action a, const target& t,
+ uint64_t options,
+ bool fail)
{
- return match_sync (a, t, um);
+ return match_direct_sync (a, t, options, fail);
}
- // Clear rule match-specific target data.
+ // Clear rule match-specific target data (except match_extra).
//
inline void
clear_target (action a, target& t)
{
- t[a].vars.clear ();
+ target::opstate& s (t.state[a]);
+ s.recipe = nullptr;
+ s.recipe_keep = false;
+ s.resolve_counted = false;
+ s.vars.clear ();
t.prerequisite_targets[a].clear ();
- t.clear_data (a);
+ }
+
+ LIBBUILD2_SYMEXPORT void
+ set_rule_trace (target_lock&, const rule_match*);
+
+ inline void
+ set_rule (target_lock& l, const rule_match* r)
+ {
+ if (l.target->ctx.trace_match == nullptr)
+ (*l.target)[l.action].rule = r;
+ else
+ set_rule_trace (l, r);
}
inline void
@@ -542,60 +612,101 @@ namespace build2
}
inline void
- match_recipe (target_lock& l, recipe r)
+ match_recipe (target_lock& l, recipe r, uint64_t options)
{
- assert (l.target != nullptr &&
- l.offset != target::offset_matched &&
+ assert (options != 0 &&
+ l.target != nullptr &&
+ l.offset < target::offset_matched &&
l.target->ctx.phase == run_phase::match);
+ match_extra& me ((*l.target)[l.action].match_extra);
+
+ me.reinit (false /* fallback */);
+ me.cur_options = options; // Already applied, so cur_, not new_options.
+ me.cur_options_.store (me.cur_options, memory_order_relaxed);
clear_target (l.action, *l.target);
- (*l.target)[l.action].rule = nullptr; // No rule.
+ set_rule (l, nullptr); // No rule.
set_recipe (l, move (r));
l.offset = target::offset_applied;
}
inline void
- match_rule (target_lock& l, const rule_match& r)
+ match_rule (target_lock& l, const rule_match& r, uint64_t options)
{
assert (l.target != nullptr &&
- l.offset != target::offset_matched &&
+ l.offset < target::offset_matched &&
l.target->ctx.phase == run_phase::match);
+ match_extra& me ((*l.target)[l.action].match_extra);
+
+ me.reinit (false /* fallback */);
+ me.new_options = options;
clear_target (l.action, *l.target);
- (*l.target)[l.action].rule = &r;
+ set_rule (l, &r);
l.offset = target::offset_matched;
}
inline recipe
- match_delegate (action a, target& t, const rule& dr, bool try_match)
+ match_delegate (action a, target& t,
+ const rule& dr,
+ uint64_t options,
+ bool try_match)
{
assert (t.ctx.phase == run_phase::match);
// Note: we don't touch any of the t[a] state since that was/will be set
// for the delegating rule.
//
- const rule_match* r (match_rule (a, t, &dr, try_match));
+ const rule_match* r (match_rule_impl (a, t, options, &dr, try_match));
return r != nullptr ? apply_impl (a, t, *r) : empty_recipe;
}
inline target_state
- match_inner (action a, const target& t)
+ match_inner (action a, const target& t, uint64_t options)
{
// In a sense this is like any other dependency.
//
assert (a.outer ());
- return match_sync (a.inner_action (), t);
+ return match_sync (a.inner_action (), t, options);
}
inline pair<bool, target_state>
- match_inner (action a, const target& t, unmatch um)
+ match_inner (action a, const target& t, unmatch um, uint64_t options)
{
assert (a.outer ());
- return match_sync (a.inner_action (), t, um);
+ return match_sync (a.inner_action (), t, um, options);
+ }
+
+ // Note: rematch is basically normal match but without the counts increment,
+ // so we just delegate to match_direct_*().
+ //
+ inline target_state
+ rematch_sync (action a, const target& t,
+ uint64_t options,
+ bool fail)
+ {
+ return match_direct_sync (a, t, options, fail);
+ }
+
+ inline target_state
+ rematch_async (action a, const target& t,
+ size_t start_count, atomic_count& task_count,
+ uint64_t options,
+ bool fail)
+ {
+ return match_async (a, t, start_count, task_count, options, fail);
+ }
+
+ inline target_state
+ rematch_complete (action a, const target& t,
+ uint64_t options,
+ bool fail)
+ {
+ return match_direct_complete (a, t, options, fail);
}
LIBBUILD2_SYMEXPORT void
- resolve_group_impl (action, const target&, target_lock);
+ resolve_group_impl (target_lock&&);
inline const target*
resolve_group (action a, const target& t)
@@ -615,7 +726,7 @@ namespace build2
// then unlock and return.
//
if (t.group == nullptr && l.offset < target::offset_tried)
- resolve_group_impl (a, t, move (l));
+ resolve_group_impl (move (l));
break;
}
@@ -634,12 +745,16 @@ namespace build2
}
LIBBUILD2_SYMEXPORT void
- match_prerequisites (action, target&, const match_search&, const scope*);
+ match_prerequisites (action, target&,
+ const match_search&,
+ const scope*,
+ bool search_only);
LIBBUILD2_SYMEXPORT void
match_prerequisite_members (action, target&,
const match_search_member&,
- const scope*);
+ const scope*,
+ bool search_only);
inline void
match_prerequisites (action a, target& t, const match_search& ms)
@@ -650,7 +765,21 @@ namespace build2
ms,
(a.operation () != clean_id || t.is_a<alias> ()
? nullptr
- : &t.root_scope ()));
+ : &t.root_scope ()),
+ false);
+ }
+
+ inline void
+ search_prerequisites (action a, target& t, const match_search& ms)
+ {
+ match_prerequisites (
+ a,
+ t,
+ ms,
+ (a.operation () != clean_id || t.is_a<alias> ()
+ ? nullptr
+ : &t.root_scope ()),
+ true);
}
inline void
@@ -658,13 +787,16 @@ namespace build2
const match_search_member& msm)
{
if (a.operation () != clean_id || t.is_a<alias> ())
- match_prerequisite_members (a, t, msm, nullptr);
+ match_prerequisite_members (a, t, msm, nullptr, false);
else
{
// Note that here we don't iterate over members even for see-through
// groups since the group target should clean eveything up. A bit of an
// optimization.
//
+ // @@ TMP: I wonder if this still holds for the new group semantics
+ // we have in Qt automoc? Also below.
+ //
match_search ms (
msm
? [&msm] (action a,
@@ -676,20 +808,62 @@ namespace build2
}
: match_search ());
- match_prerequisites (a, t, ms, &t.root_scope ());
+ match_prerequisites (a, t, ms, &t.root_scope (), false);
+ }
+ }
+
+ inline void
+ search_prerequisite_members (action a, target& t,
+ const match_search_member& msm)
+ {
+ if (a.operation () != clean_id || t.is_a<alias> ())
+ match_prerequisite_members (a, t, msm, nullptr, true);
+ else
+ {
+ // Note that here we don't iterate over members even for see-through
+ // groups since the group target should clean eveything up. A bit of an
+ // optimization.
+ //
+ // @@ TMP: I wonder if this still holds for the new group semantics
+ // we have in Qt automoc? Also above.
+ //
+ match_search ms (
+ msm
+ ? [&msm] (action a,
+ const target& t,
+ const prerequisite& p,
+ include_type i)
+ {
+ return msm (a, t, prerequisite_member {p, nullptr}, i);
+ }
+ : match_search ());
+
+ match_prerequisites (a, t, ms, &t.root_scope (), true);
}
}
inline void
match_prerequisites (action a, target& t, const scope& s)
{
- match_prerequisites (a, t, nullptr, &s);
+ match_prerequisites (a, t, nullptr, &s, false);
+ }
+
+ inline void
+ search_prerequisites (action a, target& t, const scope& s)
+ {
+ match_prerequisites (a, t, nullptr, &s, true);
}
inline void
match_prerequisite_members (action a, target& t, const scope& s)
{
- match_prerequisite_members (a, t, nullptr, &s);
+ match_prerequisite_members (a, t, nullptr, &s, false);
+ }
+
+ inline void
+ search_prerequisite_members (action a, target& t, const scope& s)
+ {
+ match_prerequisite_members (a, t, nullptr, &s, true);
}
LIBBUILD2_SYMEXPORT target_state
@@ -702,7 +876,7 @@ namespace build2
if (r == target_state::busy)
{
- t.ctx.sched.wait (t.ctx.count_executed (),
+ t.ctx.sched->wait (t.ctx.count_executed (),
t[a].task_count,
scheduler::work_none);
@@ -738,7 +912,7 @@ namespace build2
// If the target is still busy, wait for its completion.
//
- ctx.sched.wait (ctx.count_executed (),
+ ctx.sched->wait (ctx.count_executed (),
t[a].task_count,
scheduler::work_none);
@@ -749,20 +923,20 @@ namespace build2
execute_direct_impl (action, const target&, size_t, atomic_count*);
inline target_state
- execute_direct_sync (action a, const target& t)
+ execute_direct_sync (action a, const target& t, bool fail)
{
target_state r (execute_direct_impl (a, t, 0, nullptr));
if (r == target_state::busy)
{
- t.ctx.sched.wait (t.ctx.count_executed (),
+ t.ctx.sched->wait (t.ctx.count_executed (),
t[a].task_count,
scheduler::work_none);
r = t.executed_state (a, false);
}
- if (r == target_state::failed)
+ if (r == target_state::failed && fail)
throw failed ();
return r;
@@ -922,8 +1096,9 @@ namespace build2
p.first, static_cast<const T&> (p.second));
}
+ template <typename T>
inline target_state
- execute_members (action a, const target& t, const target* ts[], size_t n)
+ execute_members (action a, const target& t, T ts[], size_t n)
{
return t.ctx.current_mode == execution_mode::first
? straight_execute_members (a, t, ts, n, 0)
diff --git a/libbuild2/b-cmdline.cxx b/libbuild2/b-cmdline.cxx
index 2e2deb8..206c9de 100644
--- a/libbuild2/b-cmdline.cxx
+++ b/libbuild2/b-cmdline.cxx
@@ -286,8 +286,25 @@ namespace build2
{
optional<dir_path> extra;
if (ops.default_options_specified ())
+ {
extra = ops.default_options ();
+ // Note that load_default_options() expects absolute and normalized
+ // directory.
+ //
+ try
+ {
+ if (extra->relative ())
+ extra->complete ();
+
+ extra->normalize ();
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid --default-options value " << e.path;
+ }
+ }
+
// Load default options files.
//
default_options<b_options> def_ops (
@@ -396,8 +413,26 @@ namespace build2
if (ops.progress () && ops.no_progress ())
fail << "both --progress and --no-progress specified";
+ if (ops.diag_color () && ops.no_diag_color ())
+ fail << "both --diag-color and --no-diag-color specified";
+
if (ops.mtime_check () && ops.no_mtime_check ())
fail << "both --mtime-check and --no-mtime-check specified";
+
+ if (ops.match_only () && ops.load_only ())
+ fail << "both --match-only and --load-only specified";
+
+ if (!ops.dump_specified ())
+ {
+ // Note: let's allow specifying --dump-format without --dump in case
+ // it comes from a default options file or some such.
+
+ if (ops.dump_target_specified ())
+ fail << "--dump-target requires --dump";
+
+ if (ops.dump_scope_specified ())
+ fail << "--dump-scope requires --dump";
+ }
}
catch (const cli::exception& e)
{
@@ -416,6 +451,9 @@ namespace build2
r.progress = (ops.progress () ? optional<bool> (true) :
ops.no_progress () ? optional<bool> (false) : nullopt);
+ r.diag_color = (ops.diag_color () ? optional<bool> (true) :
+ ops.no_diag_color () ? optional<bool> (false) : nullopt);
+
r.mtime_check = (ops.mtime_check () ? optional<bool> (true) :
ops.no_mtime_check () ? optional<bool> (false) : nullopt);
diff --git a/libbuild2/b-cmdline.hxx b/libbuild2/b-cmdline.hxx
index c5c82fc..8ccbb20 100644
--- a/libbuild2/b-cmdline.hxx
+++ b/libbuild2/b-cmdline.hxx
@@ -24,6 +24,7 @@ namespace build2
//
uint16_t verbosity = 1;
optional<bool> progress;
+ optional<bool> diag_color;
optional<bool> mtime_check;
optional<path> config_sub;
optional<path> config_guess;
diff --git a/libbuild2/b-options.cxx b/libbuild2/b-options.cxx
index 223233a..c107b44 100644
--- a/libbuild2/b-options.cxx
+++ b/libbuild2/b-options.cxx
@@ -19,6 +19,7 @@
#include <utility>
#include <ostream>
#include <sstream>
+#include <cstring>
namespace build2
{
@@ -59,10 +60,31 @@ namespace build2
struct parser<bool>
{
static void
- parse (bool& x, scanner& s)
+ parse (bool& x, bool& xs, scanner& s)
{
- s.next ();
- x = true;
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
}
static void
@@ -211,6 +233,66 @@ namespace build2
}
};
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+
+ static void
+ merge (std::multimap<K, V, C>& b, const std::multimap<K, V, C>& a)
+ {
+ for (typename std::multimap<K, V, C>::const_iterator i (a.begin ());
+ i != a.end ();
+ ++i)
+ b.insert (typename std::multimap<K, V, C>::value_type (i->first,
+ i->second));
+ }
+ };
+
template <typename X, typename T, T X::*M>
void
thunk (X& x, scanner& s)
@@ -218,6 +300,14 @@ namespace build2
parser<T>::parse (x.*M, s);
}
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
template <typename X, typename T, T X::*M, bool X::*S>
void
thunk (X& x, scanner& s)
@@ -229,7 +319,6 @@ namespace build2
}
#include <map>
-#include <cstring>
namespace build2
{
@@ -247,10 +336,10 @@ namespace build2
verbose_ (1),
verbose_specified_ (false),
stat_ (),
- dump_ (),
- dump_specified_ (false),
progress_ (),
no_progress_ (),
+ diag_color_ (),
+ no_diag_color_ (),
jobs_ (),
jobs_specified_ (false),
max_jobs_ (),
@@ -263,12 +352,26 @@ namespace build2
max_stack_specified_ (false),
serial_stop_ (),
dry_run_ (),
+ no_diag_buffer_ (),
match_only_ (),
+ load_only_ (),
no_external_modules_ (),
structured_result_ (),
structured_result_specified_ (false),
mtime_check_ (),
no_mtime_check_ (),
+ dump_ (),
+ dump_specified_ (false),
+ dump_format_ (),
+ dump_format_specified_ (false),
+ dump_scope_ (),
+ dump_scope_specified_ (false),
+ dump_target_ (),
+ dump_target_specified_ (false),
+ trace_match_ (),
+ trace_match_specified_ (false),
+ trace_execute_ (),
+ trace_execute_specified_ (false),
no_column_ (),
no_line_ (),
buildfile_ (),
@@ -403,13 +506,6 @@ namespace build2
this->stat_, a.stat_);
}
- if (a.dump_specified_)
- {
- ::build2::build::cli::parser< std::set<string>>::merge (
- this->dump_, a.dump_);
- this->dump_specified_ = true;
- }
-
if (a.progress_)
{
::build2::build::cli::parser< bool>::merge (
@@ -422,6 +518,18 @@ namespace build2
this->no_progress_, a.no_progress_);
}
+ if (a.diag_color_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->diag_color_, a.diag_color_);
+ }
+
+ if (a.no_diag_color_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_diag_color_, a.no_diag_color_);
+ }
+
if (a.jobs_specified_)
{
::build2::build::cli::parser< size_t>::merge (
@@ -469,12 +577,24 @@ namespace build2
this->dry_run_, a.dry_run_);
}
+ if (a.no_diag_buffer_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->no_diag_buffer_, a.no_diag_buffer_);
+ }
+
if (a.match_only_)
{
::build2::build::cli::parser< bool>::merge (
this->match_only_, a.match_only_);
}
+ if (a.load_only_)
+ {
+ ::build2::build::cli::parser< bool>::merge (
+ this->load_only_, a.load_only_);
+ }
+
if (a.no_external_modules_)
{
::build2::build::cli::parser< bool>::merge (
@@ -500,6 +620,48 @@ namespace build2
this->no_mtime_check_, a.no_mtime_check_);
}
+ if (a.dump_specified_)
+ {
+ ::build2::build::cli::parser< strings>::merge (
+ this->dump_, a.dump_);
+ this->dump_specified_ = true;
+ }
+
+ if (a.dump_format_specified_)
+ {
+ ::build2::build::cli::parser< string>::merge (
+ this->dump_format_, a.dump_format_);
+ this->dump_format_specified_ = true;
+ }
+
+ if (a.dump_scope_specified_)
+ {
+ ::build2::build::cli::parser< dir_paths>::merge (
+ this->dump_scope_, a.dump_scope_);
+ this->dump_scope_specified_ = true;
+ }
+
+ if (a.dump_target_specified_)
+ {
+ ::build2::build::cli::parser< vector<pair<name, optional<name>>>>::merge (
+ this->dump_target_, a.dump_target_);
+ this->dump_target_specified_ = true;
+ }
+
+ if (a.trace_match_specified_)
+ {
+ ::build2::build::cli::parser< vector<name>>::merge (
+ this->trace_match_, a.trace_match_);
+ this->trace_match_specified_ = true;
+ }
+
+ if (a.trace_execute_specified_)
+ {
+ ::build2::build::cli::parser< vector<name>>::merge (
+ this->trace_execute_, a.trace_execute_);
+ this->trace_execute_specified_ = true;
+ }
+
if (a.no_column_)
{
::build2::build::cli::parser< bool>::merge (
@@ -628,12 +790,6 @@ namespace build2
<< "\033[1m--stat\033[0m Display build statistics." << ::std::endl;
os << std::endl
- << "\033[1m--dump\033[0m \033[4mphase\033[0m Dump the build system state after the specified phase." << ::std::endl
- << " Valid \033[4mphase\033[0m values are \033[1mload\033[0m (after loading \033[1mbuildfiles\033[0m)" << ::std::endl
- << " and \033[1mmatch\033[0m (after matching rules to targets). Repeat" << ::std::endl
- << " this option to dump the state after multiple phases." << ::std::endl;
-
- os << std::endl
<< "\033[1m--progress\033[0m Display build progress. If printing to a terminal the" << ::std::endl
<< " progress is displayed by default for low verbosity" << ::std::endl
<< " levels. Use \033[1m--no-progress\033[0m to suppress." << ::std::endl;
@@ -642,6 +798,19 @@ namespace build2
<< "\033[1m--no-progress\033[0m Don't display build progress." << ::std::endl;
os << std::endl
+ << "\033[1m--diag-color\033[0m Use color in diagnostics. If printing to a terminal the" << ::std::endl
+ << " color is used by default provided the terminal is not" << ::std::endl
+ << " dumb. Use \033[1m--no-diag-color\033[0m to suppress." << ::std::endl
+ << ::std::endl
+ << " This option affects the diagnostics printed by the" << ::std::endl
+ << " build system itself. Some rules may also choose to" << ::std::endl
+ << " propagate its value to tools (such as compilers) that" << ::std::endl
+ << " they invoke." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--no-diag-color\033[0m Don't use color in diagnostics." << ::std::endl;
+
+ os << std::endl
<< "\033[1m--jobs\033[0m|\033[1m-j\033[0m \033[4mnum\033[0m Number of active jobs to perform in parallel. This" << ::std::endl
<< " includes both the number of active threads inside the" << ::std::endl
<< " build system as well as the number of external commands" << ::std::endl
@@ -693,7 +862,10 @@ namespace build2
<< " build system errors rather than compilation errors." << ::std::endl
<< " Note that if you don't want to keep going but still" << ::std::endl
<< " want parallel execution, add \033[1m--jobs|-j\033[0m (for example \033[1m-j" << ::std::endl
- << " 0\033[0m for default concurrency)." << ::std::endl;
+ << " 0\033[0m for default concurrency). Note also that during" << ::std::endl
+ << " serial execution there is no diagnostics buffering and" << ::std::endl
+ << " child process' \033[1mstderr\033[0m is a terminal (unless redirected;" << ::std::endl
+ << " see \033[1m--no-diag-buffer\033[0m for details)." << ::std::endl;
os << std::endl
<< "\033[1m--dry-run\033[0m|\033[1m-n\033[0m Print commands without actually executing them. Note" << ::std::endl
@@ -706,8 +878,30 @@ namespace build2
<< " meta-operation supports this mode." << ::std::endl;
os << std::endl
- << "\033[1m--match-only\033[0m Match the rules but do not execute the operation. This" << ::std::endl
- << " mode is primarily useful for profiling." << ::std::endl;
+ << "\033[1m--no-diag-buffer\033[0m Do not buffer diagnostics from child processes. By" << ::std::endl
+ << " default, unless running serially, such diagnostics is" << ::std::endl
+ << " buffered and printed all at once after each child exits" << ::std::endl
+ << " in order to prevent interleaving. However, this can" << ::std::endl
+ << " have side-effects since the child process' \033[1mstderr\033[0m is no" << ::std::endl
+ << " longer a terminal. Most notably, the use of color in" << ::std::endl
+ << " diagnostics may be disabled by some programs. On the" << ::std::endl
+ << " other hand, depending on the platform and programs" << ::std::endl
+ << " invoked, the interleaving diagnostics may not break" << ::std::endl
+ << " lines and thus could be tolerable." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--match-only\033[0m Match the rules without executing the operation. This" << ::std::endl
+ << " mode is primarily useful for profiling and dumping the" << ::std::endl
+ << " build system state." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--load-only\033[0m Match the rules only to \033[1malias{}\033[0m targets ignoring other" << ::std::endl
+ << " targets and without executing the operation. In" << ::std::endl
+ << " particular, this has the effect of loading all the" << ::std::endl
+ << " subdirectory \033[1mbuildfiles\033[0m that are not explicitly" << ::std::endl
+ << " included. Note that this option can only be used with" << ::std::endl
+ << " the \033[1mperform(update)\033[0m action on an \033[1malias{}\033[0m target," << ::std::endl
+ << " usually \033[1mdir{}\033[0m." << ::std::endl;
os << std::endl
<< "\033[1m--no-external-modules\033[0m Don't load external modules during project bootstrap." << ::std::endl
@@ -736,8 +930,9 @@ namespace build2
<< " the outer operation is specified in parenthesis. For" << ::std::endl
<< " example:" << ::std::endl
<< ::std::endl
- << " unchanged perform update(test) /tmp/dir{hello/}" << ::std::endl
- << " changed perform test /tmp/hello/exe{test}" << ::std::endl
+ << " unchanged perform update(test)" << ::std::endl
+ << " /tmp/hello/hello/exe{hello}" << ::std::endl
+ << " changed perform test /tmp/hello/hello/exe{hello}" << ::std::endl
<< ::std::endl
<< " If the output format is \033[1mjson\033[0m, then the output is a JSON" << ::std::endl
<< " array of objects which are the serialized" << ::std::endl
@@ -747,7 +942,7 @@ namespace build2
<< " struct target_action_result" << ::std::endl
<< " {" << ::std::endl
<< " string target;" << ::std::endl
- << " string quoted_target;" << ::std::endl
+ << " string display_target;" << ::std::endl
<< " string target_type;" << ::std::endl
<< " optional<string> target_path;" << ::std::endl
<< " string meta_operation;" << ::std::endl
@@ -760,20 +955,20 @@ namespace build2
<< ::std::endl
<< " [" << ::std::endl
<< " {" << ::std::endl
- << " \"target\": \"/tmp/dir{hello/}\"," << ::std::endl
- << " \"quoted_target\": \"/tmp/dir{hello/}\"," << ::std::endl
- << " \"target_type\": \"dir\"," << ::std::endl
- << " \"target_path\": \"/tmp/hello\"," << ::std::endl
+ << " \"target\": \"/tmp/hello/hello/exe{hello.}\"," << ::std::endl
+ << " \"display_target\": \"/tmp/hello/hello/exe{hello}\"," << ::std::endl
+ << " \"target_type\": \"exe\"," << ::std::endl
+ << " \"target_path\": \"/tmp/hello/hello/hello\"," << ::std::endl
<< " \"meta_operation\": \"perform\"," << ::std::endl
<< " \"operation\": \"update\"," << ::std::endl
<< " \"outer_operation\": \"test\"," << ::std::endl
<< " \"state\": \"unchanged\"" << ::std::endl
<< " }," << ::std::endl
<< " {" << ::std::endl
- << " \"target\": \"/tmp/dir{hello/}\"," << ::std::endl
- << " \"quoted_target\": \"/tmp/dir{hello/}\"," << ::std::endl
- << " \"target_type\": \"dir\"," << ::std::endl
- << " \"target_path\": \"/tmp/hello\"," << ::std::endl
+ << " \"target\": \"/tmp/hello/hello/exe{hello.}\"," << ::std::endl
+ << " \"display_target\": \"/tmp/hello/hello/exe{hello}\"," << ::std::endl
+ << " \"target_type\": \"exe\"," << ::std::endl
+ << " \"target_path\": \"/tmp/hello/hello/hello\"," << ::std::endl
<< " \"meta_operation\": \"perform\"," << ::std::endl
<< " \"operation\": \"test\"," << ::std::endl
<< " \"state\": \"changed\"" << ::std::endl
@@ -784,13 +979,15 @@ namespace build2
<< " overall properties of this format and the semantics of" << ::std::endl
<< " the \033[1mstruct\033[0m serialization." << ::std::endl
<< ::std::endl
- << " The \033[1mtarget\033[0m member is a \"display\" target name, the same" << ::std::endl
- << " as in the \033[1mlines\033[0m format. The \033[1mquoted_target\033[0m member is a" << ::std::endl
- << " target name that, if required, is quoted so that it can" << ::std::endl
- << " be passed back to the driver on the command line. The" << ::std::endl
- << " \033[1mtarget_type\033[0m member is the type of target. The" << ::std::endl
- << " \033[1mtarget_path\033[0m member is an absolute path to the target if" << ::std::endl
- << " the target type is path-based or \033[1mdir\033[0m." << ::std::endl;
+ << " The \033[1mtarget\033[0m member is the target name that is qualified" << ::std::endl
+ << " with the extension (if applicable) and, if required, is" << ::std::endl
+ << " quoted so that it can be passed back to the build" << ::std::endl
+ << " system driver on the command line. The \033[1mdisplay_target\033[0m" << ::std::endl
+ << " member is the unqualified and unquoted \"display\" target" << ::std::endl
+ << " name, the same as in the \033[1mlines\033[0m format. The \033[1mtarget_type\033[0m" << ::std::endl
+ << " member is the type of target. The \033[1mtarget_path\033[0m member" << ::std::endl
+ << " is an absolute path to the target if the target type is" << ::std::endl
+ << " path-based or \033[1mdir\033[0m." << ::std::endl;
os << std::endl
<< "\033[1m--mtime-check\033[0m Perform file modification time sanity checks. These" << ::std::endl
@@ -805,6 +1002,62 @@ namespace build2
<< " \033[1m--mtime-check\033[0m for details." << ::std::endl;
os << std::endl
+ << "\033[1m--dump\033[0m \033[4mphase\033[0m Dump the build system state after the specified phase." << ::std::endl
+ << " Valid \033[4mphase\033[0m values are \033[1mload\033[0m (after loading \033[1mbuildfiles\033[0m)" << ::std::endl
+ << " and \033[1mmatch\033[0m (after matching rules to targets). The \033[1mmatch\033[0m" << ::std::endl
+ << " value also has the \033[1mmatch-pre\033[0m and \033[1mmatch-post\033[0m variants to" << ::std::endl
+ << " dump the state for the pre/post-operations (\033[1mmatch\033[0m dumps" << ::std::endl
+ << " the main operation only). Repeat this option to dump" << ::std::endl
+ << " the state after multiple phases/variants. By default" << ::std::endl
+ << " the entire build state is dumped but this behavior can" << ::std::endl
+ << " be altered with the \033[1m--dump-scope\033[0m and \033[1m--dump-target\033[0m" << ::std::endl
+ << " options. See also the \033[1m--match-only\033[0m and \033[1m--load-only\033[0m" << ::std::endl
+ << " options." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-format\033[0m \033[4mformat\033[0m Representation format and output stream to use when" << ::std::endl
+ << " dumping the build system state. Valid values for this" << ::std::endl
+ << " option are \033[1mbuildfile\033[0m (a human-readable, Buildfile-like" << ::std::endl
+ << " format written to \033[1mstderr\033[0m; this is the default), and" << ::std::endl
+ << " \033[1mjson-v0.1\033[0m (machine-readable, JSON-based format written" << ::std::endl
+ << " to \033[1mstdout\033[0m). For details on the \033[1mbuildfile\033[0m format, see" << ::std::endl
+ << " Diagnostics and Debugging (b#intro-diag-debug). For" << ::std::endl
+ << " details on the \033[1mjson-v0.1\033[0m format, see the JSON OUTPUT" << ::std::endl
+ << " section below (overall properties) and JSON Dump Format" << ::std::endl
+ << " (b#json-dump) (format specifics). Note that the JSON" << ::std::endl
+ << " format is currently unstable (thus the temporary \033[1m-v0.1\033[0m" << ::std::endl
+ << " suffix)." << ::std::endl
+ << ::std::endl
+ << " Note that because it's possible to end up with multiple" << ::std::endl
+ << " dumps (for example, by specifying the \033[1m--dump-scope\033[0m" << ::std::endl
+ << " and/or \033[1m--dump-target\033[0m options multiple times), the JSON" << ::std::endl
+ << " output is in the \"JSON Lines\" form, that is, without" << ::std::endl
+ << " pretty-printing and with the top-level JSON objects" << ::std::endl
+ << " delimited by newlines. Note also that if the JSON dump" << ::std::endl
+ << " output is combined with \033[1m--structured-result=json\033[0m, then" << ::std::endl
+ << " the structured result is the last line." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-scope\033[0m \033[4mdir\033[0m Dump the build system state for the specified scope" << ::std::endl
+ << " only. Repeat this option to dump the state of multiple" << ::std::endl
+ << " scopes." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--dump-target\033[0m \033[4mtarget\033[0m Dump the build system state for the specified target" << ::std::endl
+ << " only. Repeat this option to dump the state of multiple" << ::std::endl
+ << " targets." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--trace-match\033[0m \033[4mtarget\033[0m Trace rule matching for the specified target. This is" << ::std::endl
+ << " primarily useful during troubleshooting. Repeat this" << ::std::endl
+ << " option to trace multiple targets." << ::std::endl;
+
+ os << std::endl
+ << "\033[1m--trace-execute\033[0m \033[4mtarget\033[0m Trace rule execution for the specified target. This is" << ::std::endl
+ << " primarily useful during troubleshooting. Repeat this" << ::std::endl
+ << " option to trace multiple targets." << ::std::endl;
+
+ os << std::endl
<< "\033[1m--no-column\033[0m Don't print column numbers in diagnostics." << ::std::endl;
os << std::endl
@@ -901,27 +1154,28 @@ namespace build2
&::build2::build::cli::thunk< b_options, uint64_t, &b_options::build2_metadata_,
&b_options::build2_metadata_specified_ >;
_cli_b_options_map_["-v"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::v_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::v_ >;
_cli_b_options_map_["-V"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::V_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::V_ >;
_cli_b_options_map_["--quiet"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::quiet_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::quiet_ >;
_cli_b_options_map_["-q"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::quiet_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::quiet_ >;
_cli_b_options_map_["--silent"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::silent_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::silent_ >;
_cli_b_options_map_["--verbose"] =
&::build2::build::cli::thunk< b_options, uint16_t, &b_options::verbose_,
&b_options::verbose_specified_ >;
_cli_b_options_map_["--stat"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::stat_ >;
- _cli_b_options_map_["--dump"] =
- &::build2::build::cli::thunk< b_options, std::set<string>, &b_options::dump_,
- &b_options::dump_specified_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::stat_ >;
_cli_b_options_map_["--progress"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::progress_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::progress_ >;
_cli_b_options_map_["--no-progress"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::no_progress_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::no_progress_ >;
+ _cli_b_options_map_["--diag-color"] =
+ &::build2::build::cli::thunk< b_options, &b_options::diag_color_ >;
+ _cli_b_options_map_["--no-diag-color"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_diag_color_ >;
_cli_b_options_map_["--jobs"] =
&::build2::build::cli::thunk< b_options, size_t, &b_options::jobs_,
&b_options::jobs_specified_ >;
@@ -947,28 +1201,50 @@ namespace build2
&::build2::build::cli::thunk< b_options, size_t, &b_options::max_stack_,
&b_options::max_stack_specified_ >;
_cli_b_options_map_["--serial-stop"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::serial_stop_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::serial_stop_ >;
_cli_b_options_map_["-s"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::serial_stop_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::serial_stop_ >;
_cli_b_options_map_["--dry-run"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::dry_run_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::dry_run_ >;
_cli_b_options_map_["-n"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::dry_run_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::dry_run_ >;
+ _cli_b_options_map_["--no-diag-buffer"] =
+ &::build2::build::cli::thunk< b_options, &b_options::no_diag_buffer_ >;
_cli_b_options_map_["--match-only"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::match_only_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::match_only_ >;
+ _cli_b_options_map_["--load-only"] =
+ &::build2::build::cli::thunk< b_options, &b_options::load_only_ >;
_cli_b_options_map_["--no-external-modules"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::no_external_modules_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::no_external_modules_ >;
_cli_b_options_map_["--structured-result"] =
&::build2::build::cli::thunk< b_options, structured_result_format, &b_options::structured_result_,
&b_options::structured_result_specified_ >;
_cli_b_options_map_["--mtime-check"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::mtime_check_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::mtime_check_ >;
_cli_b_options_map_["--no-mtime-check"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::no_mtime_check_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::no_mtime_check_ >;
+ _cli_b_options_map_["--dump"] =
+ &::build2::build::cli::thunk< b_options, strings, &b_options::dump_,
+ &b_options::dump_specified_ >;
+ _cli_b_options_map_["--dump-format"] =
+ &::build2::build::cli::thunk< b_options, string, &b_options::dump_format_,
+ &b_options::dump_format_specified_ >;
+ _cli_b_options_map_["--dump-scope"] =
+ &::build2::build::cli::thunk< b_options, dir_paths, &b_options::dump_scope_,
+ &b_options::dump_scope_specified_ >;
+ _cli_b_options_map_["--dump-target"] =
+ &::build2::build::cli::thunk< b_options, vector<pair<name, optional<name>>>, &b_options::dump_target_,
+ &b_options::dump_target_specified_ >;
+ _cli_b_options_map_["--trace-match"] =
+ &::build2::build::cli::thunk< b_options, vector<name>, &b_options::trace_match_,
+ &b_options::trace_match_specified_ >;
+ _cli_b_options_map_["--trace-execute"] =
+ &::build2::build::cli::thunk< b_options, vector<name>, &b_options::trace_execute_,
+ &b_options::trace_execute_specified_ >;
_cli_b_options_map_["--no-column"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::no_column_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::no_column_ >;
_cli_b_options_map_["--no-line"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::no_line_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::no_line_ >;
_cli_b_options_map_["--buildfile"] =
&::build2::build::cli::thunk< b_options, path, &b_options::buildfile_,
&b_options::buildfile_specified_ >;
@@ -991,11 +1267,11 @@ namespace build2
&::build2::build::cli::thunk< b_options, dir_path, &b_options::default_options_,
&b_options::default_options_specified_ >;
_cli_b_options_map_["--no-default-options"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::no_default_options_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::no_default_options_ >;
_cli_b_options_map_["--help"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::help_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::help_ >;
_cli_b_options_map_["--version"] =
- &::build2::build::cli::thunk< b_options, bool, &b_options::version_ >;
+ &::build2::build::cli::thunk< b_options, &b_options::version_ >;
}
};
diff --git a/libbuild2/b-options.hxx b/libbuild2/b-options.hxx
index d8d85d3..48dd35f 100644
--- a/libbuild2/b-options.hxx
+++ b/libbuild2/b-options.hxx
@@ -13,8 +13,6 @@
//
// End prologue.
-#include <set>
-
#include <libbuild2/common-options.hxx>
namespace build2
@@ -98,18 +96,18 @@ namespace build2
const bool&
stat () const;
- const std::set<string>&
- dump () const;
-
- bool
- dump_specified () const;
-
const bool&
progress () const;
const bool&
no_progress () const;
+ const bool&
+ diag_color () const;
+
+ const bool&
+ no_diag_color () const;
+
const size_t&
jobs () const;
@@ -147,9 +145,15 @@ namespace build2
dry_run () const;
const bool&
+ no_diag_buffer () const;
+
+ const bool&
match_only () const;
const bool&
+ load_only () const;
+
+ const bool&
no_external_modules () const;
const structured_result_format&
@@ -164,6 +168,42 @@ namespace build2
const bool&
no_mtime_check () const;
+ const strings&
+ dump () const;
+
+ bool
+ dump_specified () const;
+
+ const string&
+ dump_format () const;
+
+ bool
+ dump_format_specified () const;
+
+ const dir_paths&
+ dump_scope () const;
+
+ bool
+ dump_scope_specified () const;
+
+ const vector<pair<name, optional<name>>>&
+ dump_target () const;
+
+ bool
+ dump_target_specified () const;
+
+ const vector<name>&
+ trace_match () const;
+
+ bool
+ trace_match_specified () const;
+
+ const vector<name>&
+ trace_execute () const;
+
+ bool
+ trace_execute_specified () const;
+
const bool&
no_column () const;
@@ -249,10 +289,10 @@ namespace build2
uint16_t verbose_;
bool verbose_specified_;
bool stat_;
- std::set<string> dump_;
- bool dump_specified_;
bool progress_;
bool no_progress_;
+ bool diag_color_;
+ bool no_diag_color_;
size_t jobs_;
bool jobs_specified_;
size_t max_jobs_;
@@ -265,12 +305,26 @@ namespace build2
bool max_stack_specified_;
bool serial_stop_;
bool dry_run_;
+ bool no_diag_buffer_;
bool match_only_;
+ bool load_only_;
bool no_external_modules_;
structured_result_format structured_result_;
bool structured_result_specified_;
bool mtime_check_;
bool no_mtime_check_;
+ strings dump_;
+ bool dump_specified_;
+ string dump_format_;
+ bool dump_format_specified_;
+ dir_paths dump_scope_;
+ bool dump_scope_specified_;
+ vector<pair<name, optional<name>>> dump_target_;
+ bool dump_target_specified_;
+ vector<name> trace_match_;
+ bool trace_match_specified_;
+ vector<name> trace_execute_;
+ bool trace_execute_specified_;
bool no_column_;
bool no_line_;
path buildfile_;
diff --git a/libbuild2/b-options.ixx b/libbuild2/b-options.ixx
index b7944ba..34b0d39 100644
--- a/libbuild2/b-options.ixx
+++ b/libbuild2/b-options.ixx
@@ -68,28 +68,28 @@ namespace build2
return this->stat_;
}
- inline const std::set<string>& b_options::
- dump () const
+ inline const bool& b_options::
+ progress () const
{
- return this->dump_;
+ return this->progress_;
}
- inline bool b_options::
- dump_specified () const
+ inline const bool& b_options::
+ no_progress () const
{
- return this->dump_specified_;
+ return this->no_progress_;
}
inline const bool& b_options::
- progress () const
+ diag_color () const
{
- return this->progress_;
+ return this->diag_color_;
}
inline const bool& b_options::
- no_progress () const
+ no_diag_color () const
{
- return this->no_progress_;
+ return this->no_diag_color_;
}
inline const size_t& b_options::
@@ -165,12 +165,24 @@ namespace build2
}
inline const bool& b_options::
+ no_diag_buffer () const
+ {
+ return this->no_diag_buffer_;
+ }
+
+ inline const bool& b_options::
match_only () const
{
return this->match_only_;
}
inline const bool& b_options::
+ load_only () const
+ {
+ return this->load_only_;
+ }
+
+ inline const bool& b_options::
no_external_modules () const
{
return this->no_external_modules_;
@@ -200,6 +212,78 @@ namespace build2
return this->no_mtime_check_;
}
+ inline const strings& b_options::
+ dump () const
+ {
+ return this->dump_;
+ }
+
+ inline bool b_options::
+ dump_specified () const
+ {
+ return this->dump_specified_;
+ }
+
+ inline const string& b_options::
+ dump_format () const
+ {
+ return this->dump_format_;
+ }
+
+ inline bool b_options::
+ dump_format_specified () const
+ {
+ return this->dump_format_specified_;
+ }
+
+ inline const dir_paths& b_options::
+ dump_scope () const
+ {
+ return this->dump_scope_;
+ }
+
+ inline bool b_options::
+ dump_scope_specified () const
+ {
+ return this->dump_scope_specified_;
+ }
+
+ inline const vector<pair<name, optional<name>>>& b_options::
+ dump_target () const
+ {
+ return this->dump_target_;
+ }
+
+ inline bool b_options::
+ dump_target_specified () const
+ {
+ return this->dump_target_specified_;
+ }
+
+ inline const vector<name>& b_options::
+ trace_match () const
+ {
+ return this->trace_match_;
+ }
+
+ inline bool b_options::
+ trace_match_specified () const
+ {
+ return this->trace_match_specified_;
+ }
+
+ inline const vector<name>& b_options::
+ trace_execute () const
+ {
+ return this->trace_execute_;
+ }
+
+ inline bool b_options::
+ trace_execute_specified () const
+ {
+ return this->trace_execute_specified_;
+ }
+
inline const bool& b_options::
no_column () const
{
diff --git a/libbuild2/b.cli b/libbuild2/b.cli
index 3ae6e0b..f58b869 100644
--- a/libbuild2/b.cli
+++ b/libbuild2/b.cli
@@ -1,8 +1,6 @@
// file : libbuild2/b.cli
// license : MIT; see accompanying LICENSE file
-include <set>;
-
include <libbuild2/common.cli>;
"\section=1"
@@ -328,11 +326,20 @@ namespace build2
version: 1.0.0
src_root: /tmp/libfoo
out_root: /tmp/libfoo
+ subprojects: @tests
project: libbar
version: 2.0.0
src_root: /tmp/libbar
out_root: /tmp/libbar-out
+ subprojects: @tests
+ \
+
+ To omit discovering and printing subprojects information, use the
+ \cb{no_subprojects} parameter, for example:
+
+ \
+ $ b info: libfoo/,no_subprojects
\
To instead print this information in the JSON format, use the
@@ -471,7 +478,7 @@ namespace build2
\
$ b config.install.root=c:\projects\install
- $ b \"config.install.root='c:\Program Files (x86)\test\'\"
+ $ b \"config.install.root='c:\Program Files\test\'\"
$ b 'config.cxx.poptions=-DFOO_STR=\"foo\"'
\
"
@@ -537,15 +544,6 @@ namespace build2
"Display build statistics."
}
- std::set<string> --dump
- {
- "<phase>",
- "Dump the build system state after the specified phase. Valid <phase>
- values are \cb{load} (after loading \cb{buildfiles}) and \cb{match}
- (after matching rules to targets). Repeat this option to dump the
- state after multiple phases."
- }
-
bool --progress
{
"Display build progress. If printing to a terminal the progress is
@@ -558,6 +556,22 @@ namespace build2
"Don't display build progress."
}
+ bool --diag-color
+ {
+ "Use color in diagnostics. If printing to a terminal the color is used
+ by default provided the terminal is not dumb. Use \cb{--no-diag-color}
+ to suppress.
+
+ This option affects the diagnostics printed by the build system itself.
+ Some rules may also choose to propagate its value to tools (such as
+ compilers) that they invoke."
+ }
+
+ bool --no-diag-color
+ {
+ "Don't use color in diagnostics."
+ }
+
size_t --jobs|-j
{
"<num>",
@@ -617,7 +631,10 @@ namespace build2
investigate build failures that are caused by build system errors
rather than compilation errors. Note that if you don't want to keep
going but still want parallel execution, add \cb{--jobs|-j} (for
- example \cb{-j\ 0} for default concurrency)."
+ example \cb{-j\ 0} for default concurrency). Note also that during
+ serial execution there is no diagnostics buffering and child
+ process' \cb{stderr} is a terminal (unless redirected; see
+ \cb{--no-diag-buffer} for details)."
}
bool --dry-run|-n
@@ -631,10 +648,33 @@ namespace build2
this mode."
}
+ bool --no-diag-buffer
+ {
+ "Do not buffer diagnostics from child processes. By default, unless
+ running serially, such diagnostics is buffered and printed all at
+ once after each child exits in order to prevent interleaving.
+ However, this can have side-effects since the child process'
+ \cb{stderr} is no longer a terminal. Most notably, the use of
+ color in diagnostics may be disabled by some programs. On the
+ other hand, depending on the platform and programs invoked, the
+ interleaving diagnostics may not break lines and thus could be
+ tolerable."
+ }
+
bool --match-only
{
- "Match the rules but do not execute the operation. This mode is primarily
- useful for profiling."
+ "Match the rules without executing the operation. This mode is primarily
+ useful for profiling and dumping the build system state."
+ }
+
+ bool --load-only
+ {
+ "Match the rules only to \cb{alias{\}} targets ignoring other targets
+ and without executing the operation. In particular, this has the
+ effect of loading all the subdirectory \cb{buildfiles} that are not
+ explicitly included. Note that this option can only be used with the
+ \cb{perform(update)} action on an \cb{alias{\}} target, usually
+ \cb{dir{\}}."
}
bool --no-external-modules
@@ -666,8 +706,8 @@ namespace build2
outer operation is specified in parenthesis. For example:
\
- unchanged perform update(test) /tmp/dir{hello/}
- changed perform test /tmp/hello/exe{test}
+ unchanged perform update(test) /tmp/hello/hello/exe{hello}
+ changed perform test /tmp/hello/hello/exe{hello}
\
If the output format is \cb{json}, then the output is a JSON array of
@@ -678,7 +718,7 @@ namespace build2
struct target_action_result
{
string target;
- string quoted_target;
+ string display_target;
string target_type;
optional<string> target_path;
string meta_operation;
@@ -693,20 +733,20 @@ namespace build2
\
[
{
- \"target\": \"/tmp/dir{hello/}\",
- \"quoted_target\": \"/tmp/dir{hello/}\",
- \"target_type\": \"dir\",
- \"target_path\": \"/tmp/hello\",
+ \"target\": \"/tmp/hello/hello/exe{hello.}\",
+ \"display_target\": \"/tmp/hello/hello/exe{hello}\",
+ \"target_type\": \"exe\",
+ \"target_path\": \"/tmp/hello/hello/hello\",
\"meta_operation\": \"perform\",
\"operation\": \"update\",
\"outer_operation\": \"test\",
\"state\": \"unchanged\"
},
{
- \"target\": \"/tmp/dir{hello/}\",
- \"quoted_target\": \"/tmp/dir{hello/}\",
- \"target_type\": \"dir\",
- \"target_path\": \"/tmp/hello\",
+ \"target\": \"/tmp/hello/hello/exe{hello.}\",
+ \"display_target\": \"/tmp/hello/hello/exe{hello}\",
+ \"target_type\": \"exe\",
+ \"target_path\": \"/tmp/hello/hello/hello\",
\"meta_operation\": \"perform\",
\"operation\": \"test\",
\"state\": \"changed\"
@@ -718,12 +758,14 @@ namespace build2
properties of this format and the semantics of the \cb{struct}
serialization.
- The \cb{target} member is a \"display\" target name, the same as in the
- \cb{lines} format. The \cb{quoted_target} member is a target name that,
- if required, is quoted so that it can be passed back to the driver on
- the command line. The \cb{target_type} member is the type of target.
- The \cb{target_path} member is an absolute path to the target if the
- target type is path-based or \cb{dir}.
+ The \cb{target} member is the target name that is qualified with the
+ extension (if applicable) and, if required, is quoted so that it can be
+ passed back to the build system driver on the command line. The
+ \cb{display_target} member is the unqualified and unquoted \"display\"
+ target name, the same as in the \cb{lines} format. The \cb{target_type}
+ member is the type of target. The \cb{target_path} member is an
+ absolute path to the target if the target type is path-based or
+ \cb{dir}.
"
}
@@ -742,6 +784,73 @@ namespace build2
\cb{--mtime-check} for details."
}
+ strings --dump
+ {
+ "<phase>",
+ "Dump the build system state after the specified phase. Valid <phase>
+ values are \cb{load} (after loading \cb{buildfiles}) and \cb{match}
+ (after matching rules to targets). The \cb{match} value also has the
+ \cb{match-pre} and \cb{match-post} variants to dump the state for the
+ pre/post-operations (\cb{match} dumps the main operation only). Repeat
+ this option to dump the state after multiple phases/variants. By
+ default the entire build state is dumped but this behavior can be
+ altered with the \cb{--dump-scope} and \cb{--dump-target} options.
+ See also the \cb{--match-only} and \cb{--load-only} options."
+ }
+
+ string --dump-format
+ {
+ // NOTE: fix all references to json-v0.1, including the manual.
+ //
+ "<format>",
+ "Representation format and output stream to use when dumping the build
+ system state. Valid values for this option are \cb{buildfile} (a
+ human-readable, Buildfile-like format written to \cb{stderr}; this is
+ the default), and \cb{json-v0.1} (machine-readable, JSON-based format
+ written to \cb{stdout}). For details on the \cb{buildfile} format, see
+ \l{b#intro-diag-debug Diagnostics and Debugging}. For details on the
+ \cb{json-v0.1} format, see the JSON OUTPUT section below (overall
+ properties) and \l{b#json-dump JSON Dump Format} (format specifics).
+ Note that the JSON format is currently unstable (thus the temporary
+ \cb{-v0.1} suffix).
+
+ Note that because it's possible to end up with multiple dumps (for
+ example, by specifying the \cb{--dump-scope} and/or \cb{--dump-target}
+ options multiple times), the JSON output is in the \"JSON Lines\" form,
+ that is, without pretty-printing and with the top-level JSON objects
+ delimited by newlines. Note also that if the JSON dump output is
+ combined with \cb{--structured-result=json}, then the structured
+ result is the last line."
+ }
+
+ dir_paths --dump-scope
+ {
+ "<dir>",
+ "Dump the build system state for the specified scope only. Repeat this
+ option to dump the state of multiple scopes."
+ }
+
+ vector<pair<name, optional<name>>> --dump-target
+ {
+ "<target>",
+ "Dump the build system state for the specified target only. Repeat this
+ option to dump the state of multiple targets."
+ }
+
+ vector<name> --trace-match
+ {
+ "<target>",
+ "Trace rule matching for the specified target. This is primarily useful
+ during troubleshooting. Repeat this option to trace multiple targets."
+ }
+
+ vector<name> --trace-execute
+ {
+ "<target>",
+ "Trace rule execution for the specified target. This is primarily useful
+ during troubleshooting. Repeat this option to trace multiple targets."
+ }
+
bool --no-column
{
"Don't print column numbers in diagnostics."
@@ -874,7 +983,7 @@ namespace build2
The order in which default options files are loaded is traced at the
verbosity level 3 (\cb{-V} option) or higher.
- \h|JSON OUTPUT|
+ \h#json-output|JSON OUTPUT|
Commands that support the JSON output specify their formats as a
serialized representation of a C++ \cb{struct} or an array thereof. For
diff --git a/libbuild2/bash/rule.cxx b/libbuild2/bash/rule.cxx
index 55f3cf0..6e96b34 100644
--- a/libbuild2/bash/rule.cxx
+++ b/libbuild2/bash/rule.cxx
@@ -63,16 +63,18 @@ namespace build2
// in_rule
//
bool in_rule::
- match (action a, target& t) const
+ match (action a, target& xt, const string& hint, match_extra&) const
{
tracer trace ("bash::in_rule::match");
- // Note that for bash{} we match even if the target does not depend on
- // any modules (while it could have been handled by the in module, that
- // would require loading it).
+ file& t (xt.as<file> ()); // Only registered for exe{} and bash{}.
+
+ // Note that for bash{} and for exe{} with hint we match even if the
+ // target does not depend on any modules (while it could have been
+ // handled by the in module, that would require loading it).
//
- bool fi (false); // Found in.
- bool fm (t.is_a<bash> ()); // Found module.
+ bool fi (false); // Found in.
+ bool fm (!hint.empty () || t.is_a<bash> ()); // Found module.
for (prerequisite_member p: group_prerequisite_members (a, t))
{
if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
@@ -86,7 +88,14 @@ namespace build2
l4 ([&]{trace << "no in file prerequisite for target " << t;});
if (!fm)
- l4 ([&]{trace << "no bash module prerequisite for target " << t;});
+ l4 ([&]{trace << "no bash module prerequisite or hint for target "
+ << t;});
+
+ // If we match, derive the file name early as recommended by the in
+ // rule.
+ //
+ if (fi && fm)
+ t.derive_path ();
return fi && fm;
}
@@ -213,13 +222,14 @@ namespace build2
const string& n,
optional<uint64_t> flags,
bool strict,
+ const substitution_map* smap,
const optional<string>& null) const
{
assert (!flags);
return n.compare (0, 6, "import") == 0 && (n[6] == ' ' || n[6] == '\t')
? substitute_import (l, a, t, trim (string (n, 7)))
- : rule::substitute (l, a, t, n, nullopt, strict, null);
+ : rule::substitute (l, a, t, n, nullopt, strict, smap, null);
}
string in_rule::
@@ -404,7 +414,7 @@ namespace build2
"source \"$(dirname"
" \"$(readlink -f"
" \"${BASH_SOURCE[0]}\")\")/"
- + iip.string () + "\"";
+ + iip.string () + '"';
}
else
{
@@ -425,7 +435,7 @@ namespace build2
return
"source \"$(dirname"
" \"${BASH_SOURCE[0]}\")/"
- + o + iip.string () + "\"";
+ + o + iip.string () + '"';
}
}
else
@@ -445,9 +455,9 @@ namespace build2
}
recipe install_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra& me) const
{
- recipe r (file_rule::apply_impl (a, t));
+ recipe r (file_rule::apply_impl (a, t, me));
if (r == nullptr)
return noop_recipe;
diff --git a/libbuild2/bash/rule.hxx b/libbuild2/bash/rule.hxx
index 3da0c73..3f9618f 100644
--- a/libbuild2/bash/rule.hxx
+++ b/libbuild2/bash/rule.hxx
@@ -29,10 +29,12 @@ namespace build2
class LIBBUILD2_BASH_SYMEXPORT in_rule: public in::rule
{
public:
- in_rule (): rule ("bash.in 1", "bash.in", '@', false /* strict */) {}
+ in_rule (): rule ("bash.in 1", "bash", '@', false /* strict */) {}
virtual bool
- match (action, target&) const override;
+ match (action, target&, const string&, match_extra&) const override;
+
+ using in::rule::match; // Make Clang happy.
virtual recipe
apply (action, target&) const override;
@@ -50,6 +52,7 @@ namespace build2
const string&,
optional<uint64_t>,
bool,
+ const substitution_map*,
const optional<string>&) const override;
string
@@ -71,7 +74,7 @@ namespace build2
match (action, target&) const override;
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
protected:
const in_rule& in_;
diff --git a/libbuild2/bin/def-rule.cxx b/libbuild2/bin/def-rule.cxx
index c0e82fb..143cc35 100644
--- a/libbuild2/bin/def-rule.cxx
+++ b/libbuild2/bin/def-rule.cxx
@@ -33,8 +33,10 @@ namespace build2
};
static void
- read_dumpbin (istream& is, symbols& syms)
+ read_dumpbin (diag_buffer& dbuf, ifdstream& is, symbols& syms)
{
+ // Note: io_error is handled by the caller.
+
// Lines that describe symbols look like:
//
// 0 1 2 3 4 5 6
@@ -72,29 +74,27 @@ namespace build2
//
// Note that an UNDEF data symbol with non-zero OFFSET is a "common
// symbol", equivalent to the nm `C` type.
-
- // Map of read-only (.rdata, .xdata) and uninitialized (.bss) sections
- // to their types (R and B, respectively). If a section is not found in
- // this map, then it's assumed to be normal data (.data).
//
- map<string, char> sections;
-
- string l;
- while (!eof (getline (is, l)))
+ // We keep a map of read-only (.rdata, .xdata) and uninitialized (.bss)
+ // sections to their types (R and B, respectively). If a section is not
+ // found in this map, then it's assumed to be normal data (.data).
+ //
+ auto parse_line = [&syms,
+ secs = map<string, char> ()] (const string& l) mutable
{
size_t b (0), e (0), n;
// IDX (note that it can be more than 3 characters).
//
if (next_word (l, b, e) == 0)
- continue;
+ return;
// OFFSET (always 8 characters).
//
n = next_word (l, b, e);
if (n != 8)
- continue;
+ return;
string off (l, b, n);
@@ -103,7 +103,7 @@ namespace build2
n = next_word (l, b, e);
if (n == 0)
- continue;
+ return;
string sec (l, b, n);
@@ -112,7 +112,7 @@ namespace build2
n = next_word (l, b, e);
if (l.compare (b, n, "notype") != 0)
- continue;
+ return;
bool dat;
if (l[e] == ' ' && l[e + 1] == '(' && l[e + 2] == ')')
@@ -128,7 +128,7 @@ namespace build2
n = next_word (l, b, e);
if (n == 0)
- continue;
+ return;
string vis (l, b, n);
@@ -137,14 +137,14 @@ namespace build2
n = next_word (l, b, e);
if (n != 1 || l[b] != '|')
- continue;
+ return;
// SYMNAME
//
n = next_word (l, b, e);
if (n == 0)
- continue;
+ return;
string s (l, b, n);
@@ -162,23 +162,23 @@ namespace build2
};
if (cmp (".rdata", 6) ||
- cmp (".xdata", 6)) sections.emplace (move (sec), 'R');
- else if (cmp (".bss", 4)) sections.emplace (move (sec), 'B');
+ cmp (".xdata", 6)) secs.emplace (move (sec), 'R');
+ else if (cmp (".bss", 4)) secs.emplace (move (sec), 'B');
- continue;
+ return;
}
// We can only export extern symbols.
//
if (vis != "External")
- continue;
+ return;
if (dat)
{
if (sec != "UNDEF")
{
- auto i (sections.find (sec));
- switch (i == sections.end () ? 'D' : i->second)
+ auto i (secs.find (sec));
+ switch (i == secs.end () ? 'D' : i->second)
{
case 'D': syms.d.insert (move (s)); break;
case 'R': syms.r.insert (move (s)); break;
@@ -196,20 +196,54 @@ namespace build2
if (sec != "UNDEF")
syms.t.insert (move (s));
}
+ };
+
+ // Read until we reach EOF on all streams.
+ //
+ // Note that if dbuf is not opened, then we automatically get an
+ // inactive nullfd entry.
+ //
+ fdselect_set fds {is.fd (), dbuf.is.fd ()};
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
+
+ for (string l; ist.fd != nullfd || dst.fd != nullfd; )
+ {
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
+ {
+ if (eof (is))
+ ist.fd = nullfd;
+ else
+ {
+ parse_line (l);
+ l.clear ();
+ }
+
+ continue;
+ }
+
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
}
}
static void
- read_posix_nm (istream& is, symbols& syms)
+ read_posix_nm (diag_buffer& dbuf, ifdstream& is, symbols& syms)
{
+ // Note: io_error is handled by the caller.
+
// Lines that describe symbols look like:
//
// <NAME> <TYPE> <VALUE> <SIZE>
//
// The types that we are interested in are T, D, R, and B.
//
- string l;
- while (!eof (getline (is, l)))
+ auto parse_line = [&syms] (const string& l)
{
size_t b (0), e (0), n;
@@ -218,7 +252,7 @@ namespace build2
n = next_word (l, b, e);
if (n == 0)
- continue;
+ return;
string s (l, b, n);
@@ -227,7 +261,7 @@ namespace build2
n = next_word (l, b, e);
if (n != 1)
- continue;
+ return;
switch (l[b])
{
@@ -238,6 +272,39 @@ namespace build2
case 'C': syms.c.insert (move (s)); break;
case 'T': syms.t.insert (move (s)); break;
}
+ };
+
+ // Read until we reach EOF on all streams.
+ //
+ // Note that if dbuf is not opened, then we automatically get an
+ // inactive nullfd entry.
+ //
+ fdselect_set fds {is.fd (), dbuf.is.fd ()};
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
+
+ for (string l; ist.fd != nullfd || dst.fd != nullfd; )
+ {
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
+ {
+ if (eof (is))
+ ist.fd = nullfd;
+ else
+ {
+ parse_line (l);
+ l.clear ();
+ }
+
+ continue;
+ }
+
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
}
}
@@ -350,6 +417,8 @@ namespace build2
// we will try to recognize C/C++ identifiers plus the special symbols
// that we need to export (e.g., vtable).
//
+ // Note that it looks like rdata should not be declared DATA. It is
+ // known to break ??_7 (vtable) exporting (see GH issue 315).
//
for (const string& s: syms.r)
{
@@ -357,7 +426,7 @@ namespace build2
(s[0] == '?' && s[1] != '?') || // C++
s.compare (0, 4, "??_7") == 0) // vtable
{
- os << " " << strip (s) << " DATA\n";
+ os << " " << strip (s) << '\n';
}
}
}
@@ -429,6 +498,12 @@ namespace build2
// we will try to recognize C/C++ identifiers plus the special symbols
// that we need to export (e.g., vtable and typeinfo).
//
+ // For the description of GNU binutils .def format, see:
+ //
+ // https://sourceware.org/binutils/docs/binutils/def-file-format.html
+ //
+ // @@ Maybe CONSTANT is more appropriate than DATA?
+ //
for (const string& s: syms.r)
{
if (s.find_first_of (".") != string::npos) // Special (.refptr.*)
@@ -653,8 +728,12 @@ namespace build2
const char*& arg (*(args.end () - 2));
+ // We could print the prerequisite if it's a single obj{}/libu{} (with
+ // the latter being the common case). But it doesn't feel like that's
+ // worth the variability and the associated possibility of confusion.
+ //
if (verb == 1)
- text << "def " << t;
+ print_diag ("def", t);
// Extract symbols from each object file.
//
@@ -674,22 +753,37 @@ namespace build2
// Both dumpbin.exe and nm send their output to stdout. While nm sends
// diagnostics to stderr, dumpbin sends it to stdout together with the
- // output.
+ // output. To keep things uniform we will buffer stderr in both cases.
+ //
+ process pr (
+ run_start (nm,
+ args,
+ 0 /* stdin */,
+ -1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */));
+
+ // Note that while we read both streams until eof in the normal
+ // circumstances, we cannot use fdstream_mode::skip for the exception
+ // case on both of them: we may end up being blocked trying to read
+ // one stream while the process may be blocked writing to the other.
+ // So in case of an exception we only skip the diagnostics and close
+ // stdout hard. The latter should happen first so the order of the
+ // dbuf/is variables is important.
//
- process pr (run_start (nm,
- args,
- 0 /* stdin */,
- -1 /* stdout */));
+ diag_buffer dbuf (ctx, args[0], pr, (fdstream_mode::non_blocking |
+ fdstream_mode::skip));
+
bool io (false);
try
{
- ifdstream is (
- move (pr.in_ofd), fdstream_mode::skip, ifdstream::badbit);
+ ifdstream is (move (pr.in_ofd),
+ fdstream_mode::non_blocking,
+ ifdstream::badbit);
if (lid == "msvc" || nid == "msvc")
- read_dumpbin (is, syms);
+ read_dumpbin (dbuf, is, syms);
else
- read_posix_nm (is, syms);
+ read_posix_nm (dbuf, is, syms);
is.close ();
}
@@ -701,7 +795,7 @@ namespace build2
io = true;
}
- if (!run_finish_code (args.data (), pr) || io)
+ if (!run_finish_code (dbuf, args, pr, 1 /* verbosity */) || io)
fail << "unable to extract symbols from " << arg;
}
diff --git a/libbuild2/bin/guess.cxx b/libbuild2/bin/guess.cxx
index 905bd0a..e9759b8 100644
--- a/libbuild2/bin/guess.cxx
+++ b/libbuild2/bin/guess.cxx
@@ -34,9 +34,12 @@ namespace build2
// Return 0-version if the version is invalid.
//
static inline semantic_version
- parse_version (const string& s, size_t p = 0, const char* bs = ".-+~ ")
+ parse_version (const string& s, size_t p = 0,
+ semantic_version::flags f = semantic_version::allow_omit_patch |
+ semantic_version::allow_build,
+ const char* bs = ".-+~ ")
{
- optional<semantic_version> v (parse_semantic_version (s, p, bs));
+ optional<semantic_version> v (parse_semantic_version (s, p, f, bs));
return v ? *v : semantic_version ();
}
@@ -89,7 +92,7 @@ namespace build2
static global_cache<ar_info> ar_cache;
const ar_info&
- guess_ar (const path& ar, const path* rl, const char* paths)
+ guess_ar (context& ctx, const path& ar, const path* rl, const char* paths)
{
tracer trace ("bin::guess_ar");
@@ -177,7 +180,11 @@ namespace build2
// "LLVM version 3.5.2"
// "LLVM version 5.0.0"
//
- if (l.compare (0, 13, "LLVM version ") == 0)
+ // But it can also be prefixed with some stuff, for example:
+ //
+ // "Debian LLVM version 14.0.6"
+ //
+ if (l.find ("LLVM version ") != string::npos)
{
semantic_version v (parse_version (l, l.rfind (' ') + 1));
return guess_result ("llvm", move (l), move (v));
@@ -227,7 +234,11 @@ namespace build2
// (yes, it goes to stdout) but that seems harmless.
//
sha256 cs;
- arr = run<guess_result> (3, are, "--version", f, false, false, &cs);
+ arr = run<guess_result> (ctx,
+ 3,
+ are, "--version",
+ f,
+ false , false, &cs);
if (!arr.empty ())
arr.checksum = cs.string ();
@@ -247,10 +258,10 @@ namespace build2
: guess_result ();
};
- // Redirect STDERR to STDOUT and ignore exit status.
+ // Redirect stderr to stdout and ignore exit status.
//
sha256 cs;
- arr = run<guess_result> (3, are, f, false, true, &cs);
+ arr = run<guess_result> (ctx, 3, are, f, false, true, &cs);
if (!arr.empty ())
{
@@ -280,7 +291,7 @@ namespace build2
// "LLVM version ".
//
- if (l.compare (0, 13, "LLVM version ") == 0)
+ if (l.find ("LLVM version ") != string::npos)
return guess_result ("llvm", move (l), semantic_version ());
// On FreeBSD we get "ranlib" rather than "BSD ranlib" for some
@@ -293,7 +304,11 @@ namespace build2
};
sha256 cs;
- rlr = run<guess_result> (3, rle, "--version", f, false, false, &cs);
+ rlr = run<guess_result> (ctx,
+ 3,
+ rle, "--version",
+ f,
+ false, false, &cs);
if (!rlr.empty ())
rlr.checksum = cs.string ();
@@ -310,10 +325,10 @@ namespace build2
: guess_result ();
};
- // Redirect STDERR to STDOUT and ignore exit status.
+ // Redirect stderr to stdout and ignore exit status.
//
sha256 cs;
- rlr = run<guess_result> (3, rle, f, false, true, &cs);
+ rlr = run<guess_result> (ctx, 3, rle, f, false, true, &cs);
if (!rlr.empty ())
{
@@ -378,7 +393,7 @@ namespace build2
static global_cache<ld_info> ld_cache;
const ld_info&
- guess_ld (const path& ld, const char* paths)
+ guess_ld (context& ctx, const path& ld, const char* paths)
{
tracer trace ("bin::guess_ld");
@@ -437,17 +452,22 @@ namespace build2
string id;
optional<semantic_version> ver;
+ size_t p;
+
// Microsoft link.exe output starts with "Microsoft (R) ".
//
if (l.compare (0, 14, "Microsoft (R) ") == 0)
{
id = "msvc";
}
- // LLD prints a line in the form "LLD X.Y.Z ...".
+ // LLD prints a line in the form "LLD X.Y.Z ...". But it can also
+ // be prefixed with some stuff, for example:
//
- else if (l.compare (0, 4, "LLD ") == 0)
+ // Debian LLD 14.0.6 (compatible with GNU linkers)
+ //
+ else if ((p = l.find ("LLD ")) != string::npos)
{
- ver = parse_version (l, 4);
+ ver = parse_version (l, p + 4);
// The only way to distinguish between various LLD drivers is via
// their name. Handle potential prefixes (say a target) and
@@ -485,12 +505,12 @@ namespace build2
: guess_result (move (id), move (l), move (ver)));
};
- // Redirect STDERR to STDOUT and ignore exit status. Note that in case
+ // Redirect stderr to stdout and ignore exit status. Note that in case
// of link.exe we will hash the diagnostics (yes, it goes to stdout)
// but that seems harmless.
//
sha256 cs;
- r = run<guess_result> (3, env, "--version", f, false, true, &cs);
+ r = run<guess_result> (ctx, 3, env, "--version", f, false, true, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -521,7 +541,7 @@ namespace build2
};
sha256 cs;
- r = run<guess_result> (3, env, "-v", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "-v", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -548,7 +568,7 @@ namespace build2
// option.
//
sha256 cs;
- r = run<guess_result> (3, env, "-version", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "-version", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -586,7 +606,7 @@ namespace build2
static global_cache<rc_info> rc_cache;
const rc_info&
- guess_rc (const path& rc, const char* paths)
+ guess_rc (context& ctx, const path& rc, const char* paths)
{
tracer trace ("bin::guess_rc");
@@ -642,7 +662,7 @@ namespace build2
// option.
//
sha256 cs;
- r = run<guess_result> (3, env, "--version", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "--version", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -675,7 +695,7 @@ namespace build2
};
sha256 cs;
- r = run<guess_result> (3, env, "/?", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "/?", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
@@ -703,7 +723,7 @@ namespace build2
static global_cache<nm_info> nm_cache;
const nm_info&
- guess_nm (const path& nm, const char* paths)
+ guess_nm (context& ctx, const path& nm, const char* paths)
{
tracer trace ("bin::guess_nm");
@@ -764,7 +784,10 @@ namespace build2
// LLVM nm --version output has a line that starts with
// "LLVM version" followed by a version.
//
- if (l.compare (0, 13, "LLVM version ") == 0)
+ // But let's assume it can be prefixed with some stuff like the rest
+ // of the LLVM tools (see above).
+ //
+ if (l.find ("LLVM version ") != string::npos)
return guess_result ("llvm", move (l), semantic_version ());
if (l.compare (0, 14, "Microsoft (R) ") == 0)
@@ -784,7 +807,7 @@ namespace build2
// option.
//
sha256 cs;
- r = run<guess_result> (3, env, "--version", f, false, false, &cs);
+ r = run<guess_result> (ctx, 3, env, "--version", f, false, false, &cs);
if (!r.empty ())
r.checksum = cs.string ();
diff --git a/libbuild2/bin/guess.hxx b/libbuild2/bin/guess.hxx
index 52c0e1b..7dc7b33 100644
--- a/libbuild2/bin/guess.hxx
+++ b/libbuild2/bin/guess.hxx
@@ -54,7 +54,7 @@ namespace build2
// attemplated and the returned ranlib_* members will be left empty.
//
const ar_info&
- guess_ar (const path& ar, const path* ranlib, const char* paths);
+ guess_ar (context&, const path& ar, const path* ranlib, const char* paths);
// ld information.
//
@@ -100,7 +100,7 @@ namespace build2
};
const ld_info&
- guess_ld (const path& ld, const char* paths);
+ guess_ld (context&, const path& ld, const char* paths);
// rc information.
//
@@ -132,7 +132,7 @@ namespace build2
};
const rc_info&
- guess_rc (const path& rc, const char* paths);
+ guess_rc (context&, const path& rc, const char* paths);
// nm information.
//
@@ -166,7 +166,7 @@ namespace build2
};
const nm_info&
- guess_nm (const path& nm, const char* paths);
+ guess_nm (context&, const path& nm, const char* paths);
}
}
diff --git a/libbuild2/bin/init.cxx b/libbuild2/bin/init.cxx
index 2b1df97..610082e 100644
--- a/libbuild2/bin/init.cxx
+++ b/libbuild2/bin/init.cxx
@@ -41,24 +41,30 @@ namespace build2
bool
vars_init (scope& rs,
- scope&,
- const location&,
- bool first,
+ scope& bs,
+ const location& loc,
+ bool,
bool,
module_init_extra&)
{
tracer trace ("bin::vars_init");
l5 ([&]{trace << "for " << rs;});
- assert (first);
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "bin.vars module must be loaded in project root";
// Enter variables.
//
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
+
// Target is a string and not target_triplet because it can be
// specified by the user.
//
- auto& vp (rs.var_pool ());
-
vp.insert<string> ("config.bin.target");
vp.insert<string> ("config.bin.pattern");
@@ -76,6 +82,9 @@ namespace build2
// example, addition of rpaths for prerequisite libraries (see the cc
// module for an example). Default is true.
//
+ // Note also that a rule may need to make rpath relative if
+ // install.relocatable is true.
+ //
vp.insert<dir_paths> ("config.bin.rpath");
vp.insert<bool> ("config.bin.rpath.auto");
@@ -104,12 +113,12 @@ namespace build2
// Link whole archive. Note: with target visibility.
//
// The lookup semantics is as follows: we first look for a prerequisite-
- // specific value, then for a target-specific value in the library being
- // linked, and then for target type/pattern-specific value starting from
- // the scope of the target being linked-to. In that final lookup we do
- // not look in the target being linked-to itself since that is used to
- // indicate how this target should be linked to other targets. For
- // example:
+ // specific value, then for a target-specific value in the prerequisite
+ // library, and then for target type/pattern-specific value starting
+ // from the scope of the target being linked. In that final lookup we do
+ // not look in the target being linked itself since that is used to
+ // indicate how this target should be used as a prerequisite of other
+ // targets. For example:
//
// exe{test}: liba{foo}
// liba{foo}: libua{foo1 foo2}
@@ -150,6 +159,68 @@ namespace build2
return true;
}
+ bool
+ types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("bin::types_init");
+ l5 ([&]{trace << "for " << rs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "bin.types module must be loaded in project root";
+
+ // Register target types.
+ //
+ // Note that certain platform-specific and toolchain-specific types are
+ // registered in bin and bin.ld.
+ //
+ // Note also that it would make sense to configure their default
+ // "installability" here but that requires the knowledge of the platform
+ // in some cases. So we do it all in bin for now. One way to support
+ // both use-cases would be to detect if we are loaded after bin.guess
+ // and then decide whether to do it here or delay to bin.
+ //
+ // NOTE: remember to update the documentation if changing anything here!
+ //
+ rs.insert_target_type<obj> ();
+ rs.insert_target_type<obje> ();
+ rs.insert_target_type<obja> ();
+ rs.insert_target_type<objs> ();
+
+ rs.insert_target_type<bmi> ();
+ rs.insert_target_type<bmie> ();
+ rs.insert_target_type<bmia> ();
+ rs.insert_target_type<bmis> ();
+
+ rs.insert_target_type<hbmi> ();
+ rs.insert_target_type<hbmie> ();
+ rs.insert_target_type<hbmia> ();
+ rs.insert_target_type<hbmis> ();
+
+ rs.insert_target_type<libul> ();
+ rs.insert_target_type<libue> ();
+ rs.insert_target_type<libua> ();
+ rs.insert_target_type<libus> ();
+
+ rs.insert_target_type<lib> ();
+ rs.insert_target_type<liba> ();
+ rs.insert_target_type<libs> ();
+
+ // Register the def{} target type. Note that we do it here since it is
+ // input and can be specified unconditionally (i.e., not only when
+ // building for Windows).
+ //
+ rs.insert_target_type<def> ();
+
+ return true;
+ }
+
void
functions (function_map&); // functions.cxx
@@ -195,6 +266,8 @@ namespace build2
//
const target_triplet* tgt (nullptr);
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (ctx.var_pool["config.bin.target"]);
// We first see if the value was specified via the configuration
@@ -231,9 +304,9 @@ namespace build2
//
if (!hint && config_sub)
{
- s = run<string> (3,
- *config_sub,
- s.c_str (),
+ s = run<string> (ctx,
+ 3,
+ *config_sub, s.c_str (),
[] (string& l, bool) {return move (l);});
l5 ([&]{trace << "config.sub target: '" << s << "'";});
}
@@ -272,6 +345,8 @@ namespace build2
//
const string* pat (nullptr);
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (ctx.var_pool["config.bin.pattern"]);
// We first see if the value was specified via the configuration
@@ -440,53 +515,22 @@ namespace build2
tracer trace ("bin::init");
l5 ([&]{trace << "for " << bs;});
- // Load bin.config.
+ // Load bin.{config,types}.
//
load_module (rs, rs, "bin.config", loc, extra.hints);
+ load_module (rs, rs, "bin.types", loc);
// Cache some config values we will be needing below.
//
const target_triplet& tgt (cast<target_triplet> (rs["bin.target"]));
- // Register target types and configure their default "installability".
+ // Configure target type default "installability". Also register
+ // additional platform-specific types.
//
bool install_loaded (cast_false<bool> (rs["install.loaded"]));
{
using namespace install;
- if (first)
- {
- rs.insert_target_type<obj> ();
- rs.insert_target_type<obje> ();
- rs.insert_target_type<obja> ();
- rs.insert_target_type<objs> ();
-
- rs.insert_target_type<bmi> ();
- rs.insert_target_type<bmie> ();
- rs.insert_target_type<bmia> ();
- rs.insert_target_type<bmis> ();
-
- rs.insert_target_type<hbmi> ();
- rs.insert_target_type<hbmie> ();
- rs.insert_target_type<hbmia> ();
- rs.insert_target_type<hbmis> ();
-
- rs.insert_target_type<libul> ();
- rs.insert_target_type<libue> ();
- rs.insert_target_type<libua> ();
- rs.insert_target_type<libus> ();
-
- rs.insert_target_type<lib> ();
- rs.insert_target_type<liba> ();
- rs.insert_target_type<libs> ();
-
- // Register the def{} target type. Note that we do it here since it
- // is input and can be specified unconditionally (i.e., not only
- // when building for Windows).
- //
- rs.insert_target_type<def> ();
- }
-
// Note: libu*{} members are not installable.
//
if (install_loaded)
@@ -536,6 +580,8 @@ namespace build2
if (tgt.cpu == "wasm32" || tgt.cpu == "wasm64")
{
+ // @@ TODO: shouldn't this be wrapped in if(first) somehow?
+
const target_type& wasm (
rs.derive_target_type(
target_type {
@@ -546,7 +592,7 @@ namespace build2
nullptr, /* default_extension */
&target_pattern_fix<wasm_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
- &file_search,
+ &target_search, // Note: don't look for an existing file.
target_type::flag::none}));
if (install_loaded)
@@ -598,6 +644,18 @@ namespace build2
if (rs.find_module ("dist"))
{
+ // Note that without custom dist rules in setups along the follwing
+ // lines the source file will be unreachable by dist:
+ //
+ // lib{foo}: obj{foo}
+ // obja{foo}: cxx{foo}
+ // objs{foo}: cxx{foo}
+ //
+ r.insert<obj> (dist_id, 0, "bin.obj", obj_);
+ r.insert<bmi> (dist_id, 0, "bin.bmi", obj_);
+ r.insert<hbmi> (dist_id, 0, "bin.hbmi", obj_);
+ r.insert<libul> (dist_id, 0, "bin.libul", libul_);
+
r.insert<lib> (dist_id, 0, "bin.lib", lib_);
}
}
@@ -624,7 +682,10 @@ namespace build2
//
if (first)
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
vp.insert<path> ("config.bin.ar");
vp.insert<path> ("config.bin.ranlib");
@@ -682,7 +743,7 @@ namespace build2
nullptr,
config::save_default_commented)));
- const ar_info& ari (guess_ar (ar, ranlib, pat.paths));
+ const ar_info& ari (guess_ar (rs.ctx, ar, ranlib, pat.paths));
// If this is a configuration with new values, then print the report
// at verbosity level 2 and up (-v).
@@ -798,7 +859,10 @@ namespace build2
//
if (first)
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
vp.insert<path> ("config.bin.ld");
}
@@ -830,7 +894,7 @@ namespace build2
path (apply_pattern (ld_d, pat.pattern)),
config::save_default_commented)));
- const ld_info& ldi (guess_ld (ld, pat.paths));
+ const ld_info& ldi (guess_ld (rs.ctx, ld, pat.paths));
// If this is a configuration with new values, then print the report
// at verbosity level 2 and up (-v).
@@ -914,6 +978,8 @@ namespace build2
if (lid == "msvc")
{
+ // @@ TODO: shouldn't this be wrapped in if(first) somehow?
+
const target_type& pdb (
rs.derive_target_type(
target_type {
@@ -924,7 +990,7 @@ namespace build2
nullptr, /* default_extension */
&target_pattern_fix<pdb_ext>,
&target_print_0_ext_verb, // Fixed extension, no use printing.
- &file_search,
+ &target_search, // Note: don't look for an existing file.
target_type::flag::none}));
if (cast_false<bool> (rs["install.loaded"]))
@@ -956,7 +1022,10 @@ namespace build2
//
if (first)
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
vp.insert<path> ("config.bin.rc");
}
@@ -988,7 +1057,7 @@ namespace build2
path (apply_pattern (rc_d, pat.pattern)),
config::save_default_commented)));
- const rc_info& rci (guess_rc (rc, pat.paths));
+ const rc_info& rci (guess_rc (rs.ctx, rc, pat.paths));
// If this is a configuration with new values, then print the report
// at verbosity level 2 and up (-v).
@@ -1055,7 +1124,10 @@ namespace build2
//
if (first)
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
vp.insert<path> ("config.bin.nm");
}
@@ -1097,7 +1169,7 @@ namespace build2
path (apply_pattern (nm_d, pat.pattern)),
config::save_default_commented)));
- const nm_info& nmi (guess_nm (nm, pat.paths));
+ const nm_info& nmi (guess_nm (rs.ctx, nm, pat.paths));
// If this is a configuration with new values, then print the report
// at verbosity level 2 and up (-v).
@@ -1182,8 +1254,8 @@ namespace build2
// changing anything here.
{"bin.vars", nullptr, vars_init},
+ {"bin.types", nullptr, types_init},
{"bin.config", nullptr, config_init},
- {"bin", nullptr, init},
{"bin.ar.config", nullptr, ar_config_init},
{"bin.ar", nullptr, ar_init},
{"bin.ld.config", nullptr, ld_config_init},
@@ -1193,6 +1265,7 @@ namespace build2
{"bin.nm.config", nullptr, nm_config_init},
{"bin.nm", nullptr, nm_init},
{"bin.def", nullptr, def_init},
+ {"bin", nullptr, init},
{nullptr, nullptr, nullptr}
};
diff --git a/libbuild2/bin/init.hxx b/libbuild2/bin/init.hxx
index 4eb0f10..b163bf5 100644
--- a/libbuild2/bin/init.hxx
+++ b/libbuild2/bin/init.hxx
@@ -20,9 +20,11 @@ namespace build2
// Submodules:
//
// `bin.vars` -- registers some variables.
+ // `bin.types` -- registers target types.
// `bin.config` -- loads bin.vars and sets some variables.
- // `bin` -- loads bin.config and registers target types and
- // rules.
+ // `bin` -- loads bin.{types,config} and registers rules and
+ // functions.
+ //
// `bin.ar.config` -- loads bin.config and registers/sets more variables.
// `bin.ar` -- loads bin and bin.ar.config.
//
diff --git a/libbuild2/bin/rule.cxx b/libbuild2/bin/rule.cxx
index 38a3d98..c7147bf 100644
--- a/libbuild2/bin/rule.cxx
+++ b/libbuild2/bin/rule.cxx
@@ -17,11 +17,29 @@ namespace build2
{
namespace bin
{
+ // Search for an existing (declared real) member and match it if found.
+ //
+ static void
+ dist_match (action a, target& t, const target_type& tt)
+ {
+ if (const target* m = search_existing (t.ctx, tt, t.dir, t.out, t.name))
+ {
+ // Only a real target declaration can have prerequisites (which is
+ // the reason we are doing this).
+ //
+ if (m->decl == target_decl::real)
+ match_sync (a, *m);
+ }
+ }
+
// obj_rule
//
bool obj_rule::
match (action a, target& t) const
{
+ if (a.meta_operation () == dist_id)
+ return true;
+
const char* n (t.dynamic_type->name); // Ignore derived type.
fail << diag_doing (a, t) << " target group" <<
@@ -30,25 +48,140 @@ namespace build2
}
recipe obj_rule::
- apply (action, target&) const {return empty_recipe;}
+ apply (action a, target& t) const
+ {
+ // We only get here for dist.
+ //
+ const target_type* ett (nullptr);
+ const target_type* att (nullptr);
+ const target_type* stt (nullptr);
+
+ if (t.is_a<obj> ())
+ {
+ ett = &obje::static_type;
+ att = &obja::static_type;
+ stt = &objs::static_type;
+ }
+ else if (t.is_a<bmi> ())
+ {
+ ett = &bmie::static_type;
+ att = &bmia::static_type;
+ stt = &bmis::static_type;
+ }
+ else if (t.is_a<hbmi> ())
+ {
+ ett = &hbmie::static_type;
+ att = &hbmia::static_type;
+ stt = &hbmis::static_type;
+ }
+ else
+ assert (false);
+
+ dist_match (a, t, *ett);
+ dist_match (a, t, *att);
+ dist_match (a, t, *stt);
+
+ // Delegate to the default dist rule to match prerequisites.
+ //
+ return dist::rule::apply (a, t);
+ }
// libul_rule
//
bool libul_rule::
- match (action a, target& t) const
+ match (action, target&) const
{
- fail << diag_doing (a, t) << " target group" <<
- info << "explicitly select libua{} or libus{} member" << endf;
+ return true;
}
recipe libul_rule::
- apply (action, target&) const {return empty_recipe;}
+ apply (action a, target& t) const
+ {
+ if (a.meta_operation () == dist_id)
+ {
+ dist_match (a, t, libua::static_type);
+ dist_match (a, t, libus::static_type);
+
+ // Delegate to the default dist rule to match prerequisites.
+ //
+ return dist::rule::apply (a, t);
+ }
+
+ // Pick one of the members. First looking for the one already matched.
+ //
+ const target* m (nullptr);
+
+ const libus* ls (nullptr);
+ {
+ ls = search_existing<libus> (t.ctx, t.dir, t.out, t.name);
+
+ if (ls != nullptr && ls->matched (a))
+ m = ls;
+ }
+
+ const libua* la (nullptr);
+ if (m == nullptr)
+ {
+ la = search_existing<libua> (t.ctx, t.dir, t.out, t.name);
+
+ if (la != nullptr && la->matched (a))
+ m = la;
+ }
+
+ if (m == nullptr)
+ {
+ const scope& bs (t.base_scope ());
+
+ lmembers lm (link_members (*bs.root_scope ()));
+
+ if (lm.s && lm.a)
+ {
+ // Use the bin.exe.lib order as a heuristics to pick the library
+ // (i.e., the most likely utility library to be built is the one
+ // most likely to be linked).
+ //
+ lorder lo (link_order (bs, otype::e));
+
+ (lo == lorder::s_a || lo == lorder::s ? lm.a : lm.s) = false;
+ }
+
+ if (lm.s)
+ m = ls != nullptr ? ls : &search<libus> (t, t.dir, t.out, t.name);
+ else
+ m = la != nullptr ? la : &search<libua> (t, t.dir, t.out, t.name);
+ }
+
+ // Save the member we picked in case others (e.g., $x.lib_poptions())
+ // need this information.
+ //
+ t.prerequisite_targets[a].push_back (m);
+
+ if (match_sync (a, *m, unmatch::safe).first)
+ return noop_recipe;
+
+ return [] (action a, const target& t)
+ {
+ const target* m (t.prerequisite_targets[a].back ());
+
+ // For update always return unchanged so we are consistent whether we
+ // managed to unmatch or now. Note that for clean we may get postponed
+ // so let's return the actual target state.
+ //
+ target_state r (execute_sync (a, *m));
+ return a == perform_update_id ? target_state::unchanged : r;
+ };
+ }
// lib_rule
//
// The whole logic is pretty much as if we had our two group members as
// our prerequisites.
//
+ // Note also that unlike the obj and libul rules above, we don't need to
+ // delegate to the default dist rule since any group prerequisites will be
+ // matched by one of the members (the key difference here is that unlike
+ // those rules, we insert and match members unconditionally).
+ //
bool lib_rule::
match (action a, target& xt) const
{
diff --git a/libbuild2/bin/rule.hxx b/libbuild2/bin/rule.hxx
index 8bc30c7..9dd1d14 100644
--- a/libbuild2/bin/rule.hxx
+++ b/libbuild2/bin/rule.hxx
@@ -9,6 +9,8 @@
#include <libbuild2/rule.hxx>
+#include <libbuild2/dist/rule.hxx>
+
#include <libbuild2/bin/export.hxx>
namespace build2
@@ -18,7 +20,10 @@ namespace build2
// "Fail rule" for obj{} and [h]bmi{} that issues diagnostics if someone
// tries to build these groups directly.
//
- class obj_rule: public simple_rule
+ // Note that for dist it acts as a pass-through to all existing (declared)
+ // members.
+ //
+ class obj_rule: public dist::rule
{
public:
obj_rule () {}
@@ -30,12 +35,22 @@ namespace build2
apply (action, target&) const override;
};
- // "Fail rule" for libul{} that issues diagnostics if someone tries to
- // build this group directly.
+ // This rule picks, matches, and unmatches (if possible) a member for the
+ // purpose of making its metadata (for example, library's poptions, if
+ // it's one of the cc libraries) available.
+ //
+ // The underlying idea here is that someone else (e.g., cc::link_rule)
+ // makes a more informed choice and we piggy back on that decision,
+ // falling back to making our own based on bin.lib and bin.exe.lib. Note
+ // that for update this rule always returns target_state::unchanged.
//
- class libul_rule: public simple_rule
+ // Note also that for dist it acts as a pass-through to all existing
+ // (declared) members.
+ //
+ class libul_rule: public dist::rule
{
public:
+ explicit
libul_rule () {}
virtual bool
@@ -47,6 +62,8 @@ namespace build2
// Pass-through to group members rule, similar to alias.
//
+ // Note that for dist it always passes to both members.
+ //
class LIBBUILD2_BIN_SYMEXPORT lib_rule: public simple_rule
{
public:
diff --git a/libbuild2/bin/target.cxx b/libbuild2/bin/target.cxx
index 38572ef..7e4875a 100644
--- a/libbuild2/bin/target.cxx
+++ b/libbuild2/bin/target.cxx
@@ -374,7 +374,7 @@ namespace build2
&target_extension_var<nullptr>,
&target_pattern_var<nullptr>,
nullptr,
- &file_search,
+ &target_search, // Note: not _file(); don't look for an existing file.
target_type::flag::none
};
@@ -387,7 +387,7 @@ namespace build2
&target_extension_var<nullptr>,
&target_pattern_var<nullptr>,
nullptr,
- &file_search,
+ &target_search, // Note: not _file(); don't look for an existing file.
target_type::flag::none
};
@@ -452,7 +452,7 @@ namespace build2
&target_extension_var<nullptr>,
&target_pattern_var<nullptr>,
nullptr,
- &file_search,
+ &target_search, // Note: not _file(); don't look for an existing file.
target_type::flag::none
};
diff --git a/libbuild2/bin/target.hxx b/libbuild2/bin/target.hxx
index 89e0f17..8f2a92e 100644
--- a/libbuild2/bin/target.hxx
+++ b/libbuild2/bin/target.hxx
@@ -71,6 +71,8 @@ namespace build2
static const target_type static_type;
};
+ // Note: this is a "choice" target group.
+ //
class LIBBUILD2_BIN_SYMEXPORT obj: public target
{
public:
@@ -219,6 +221,8 @@ namespace build2
static const target_type static_type;
};
+ // Note: this is a "choice" target group (similar to obj{}).
+ //
class LIBBUILD2_BIN_SYMEXPORT bmi: public target
{
public:
@@ -232,6 +236,8 @@ namespace build2
static const target_type static_type;
};
+ // Note: this is a "choice" target group (similar to bmi{} and obj{}).
+ //
class LIBBUILD2_BIN_SYMEXPORT hbmi: public target
{
public:
@@ -340,6 +346,11 @@ namespace build2
static const target_type static_type;
};
+ // Note: this is a "choice" target group.
+ //
+ // @@ Ideally this shouldn't derive from mtime_target (via libx). Maybe
+ // get rid of libx?
+ //
class LIBBUILD2_BIN_SYMEXPORT libul: public libx
{
public:
@@ -401,6 +412,21 @@ namespace build2
virtual group_view
group_members (action) const override;
+ // Match options for the install operation on the liba{}/libs{} and
+ // libua{}/libus{} target types (note: not lib{}/libul{} nor libue{}).
+ //
+ // If only install_runtime option is specified, then only install the
+ // runtime files omitting everything buildtime (headers, pkg-config
+ // files, shared library version-related symlinks, etc).
+ //
+ // Note that it's either runtime-only or runtime and buildtime (i.e.,
+ // everything), so match with install_all instead of install_buildtime
+ // (the latter is only useful in the rule implementations).
+ //
+ static constexpr uint64_t option_install_runtime = 0x01;
+ static constexpr uint64_t option_install_buildtime = 0x02;
+ static constexpr uint64_t option_install_all = match_extra::all_options;
+
public:
static const target_type static_type;
};
diff --git a/libbuild2/bin/utility.cxx b/libbuild2/bin/utility.cxx
index cb06287..a03ea50 100644
--- a/libbuild2/bin/utility.cxx
+++ b/libbuild2/bin/utility.cxx
@@ -57,6 +57,11 @@ namespace build2
// prefer static over shared since it could be faster (but I am sure
// someone will probably want this configurable).
//
+ // Maybe we should use the bin.exe.lib order as a heuristics (i.e.,
+ // the most likely utility library to be built is the one most likely
+ // to be linked)? Will need the variables rs-only, similar to
+ // bin.lib, which probably is a good thing. See also libul_rule.
+ //
if (li.type == otype::e)
{
// Utility libraries are project-local which means the primarily
@@ -84,7 +89,9 @@ namespace build2
// Make sure group members are resolved.
//
group_view gv (resolve_members (a, l));
- assert (gv.members != nullptr);
+
+ if (gv.members == nullptr)
+ fail << "group " << l << " has no members";
pair<otype, bool> p (
link_member (lmembers {l.a != nullptr, l.s != nullptr}, li.order));
diff --git a/libbuild2/build/script/builtin-options.cxx b/libbuild2/build/script/builtin-options.cxx
index 6fae0b4..dba3c59 100644
--- a/libbuild2/build/script/builtin-options.cxx
+++ b/libbuild2/build/script/builtin-options.cxx
@@ -19,6 +19,7 @@
#include <utility>
#include <ostream>
#include <sstream>
+#include <cstring>
namespace build2
{
@@ -53,10 +54,31 @@ namespace build2
struct parser<bool>
{
static void
- parse (bool& x, scanner& s)
+ parse (bool& x, bool& xs, scanner& s)
{
- s.next ();
- x = true;
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
}
};
@@ -166,6 +188,56 @@ namespace build2
}
};
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
template <typename X, typename T, T X::*M>
void
thunk (X& x, scanner& s)
@@ -173,6 +245,14 @@ namespace build2
parser<T>::parse (x.*M, s);
}
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
template <typename X, typename T, T X::*M, bool X::*S>
void
thunk (X& x, scanner& s)
@@ -184,7 +264,6 @@ namespace build2
}
#include <map>
-#include <cstring>
namespace build2
{
@@ -210,7 +289,15 @@ namespace build2
adhoc_ (),
cwd_ (),
cwd_specified_ (false),
- drop_cycles_ ()
+ drop_cycles_ (),
+ target_what_ (),
+ target_what_specified_ (false),
+ target_default_type_ (),
+ target_default_type_specified_ (false),
+ target_extension_type_ (),
+ target_extension_type_specified_ (false),
+ target_cwd_ (),
+ target_cwd_specified_ (false)
{
}
@@ -306,12 +393,24 @@ namespace build2
&::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::default_type_,
&depdb_dyndep_options::default_type_specified_ >;
_cli_depdb_dyndep_options_map_["--adhoc"] =
- &::build2::build::cli::thunk< depdb_dyndep_options, bool, &depdb_dyndep_options::adhoc_ >;
+ &::build2::build::cli::thunk< depdb_dyndep_options, &depdb_dyndep_options::adhoc_ >;
_cli_depdb_dyndep_options_map_["--cwd"] =
&::build2::build::cli::thunk< depdb_dyndep_options, dir_path, &depdb_dyndep_options::cwd_,
&depdb_dyndep_options::cwd_specified_ >;
_cli_depdb_dyndep_options_map_["--drop-cycles"] =
- &::build2::build::cli::thunk< depdb_dyndep_options, bool, &depdb_dyndep_options::drop_cycles_ >;
+ &::build2::build::cli::thunk< depdb_dyndep_options, &depdb_dyndep_options::drop_cycles_ >;
+ _cli_depdb_dyndep_options_map_["--target-what"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::target_what_,
+ &depdb_dyndep_options::target_what_specified_ >;
+ _cli_depdb_dyndep_options_map_["--target-default-type"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, string, &depdb_dyndep_options::target_default_type_,
+ &depdb_dyndep_options::target_default_type_specified_ >;
+ _cli_depdb_dyndep_options_map_["--target-extension-type"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, map<string, string>, &depdb_dyndep_options::target_extension_type_,
+ &depdb_dyndep_options::target_extension_type_specified_ >;
+ _cli_depdb_dyndep_options_map_["--target-cwd"] =
+ &::build2::build::cli::thunk< depdb_dyndep_options, dir_path, &depdb_dyndep_options::target_cwd_,
+ &depdb_dyndep_options::target_cwd_specified_ >;
}
};
diff --git a/libbuild2/build/script/builtin-options.hxx b/libbuild2/build/script/builtin-options.hxx
index 590d3b2..a8c3440 100644
--- a/libbuild2/build/script/builtin-options.hxx
+++ b/libbuild2/build/script/builtin-options.hxx
@@ -174,6 +174,66 @@ namespace build2
void
drop_cycles (const bool&);
+ const string&
+ target_what () const;
+
+ string&
+ target_what ();
+
+ void
+ target_what (const string&);
+
+ bool
+ target_what_specified () const;
+
+ void
+ target_what_specified (bool);
+
+ const string&
+ target_default_type () const;
+
+ string&
+ target_default_type ();
+
+ void
+ target_default_type (const string&);
+
+ bool
+ target_default_type_specified () const;
+
+ void
+ target_default_type_specified (bool);
+
+ const map<string, string>&
+ target_extension_type () const;
+
+ map<string, string>&
+ target_extension_type ();
+
+ void
+ target_extension_type (const map<string, string>&);
+
+ bool
+ target_extension_type_specified () const;
+
+ void
+ target_extension_type_specified (bool);
+
+ const dir_path&
+ target_cwd () const;
+
+ dir_path&
+ target_cwd ();
+
+ void
+ target_cwd (const dir_path&);
+
+ bool
+ target_cwd_specified () const;
+
+ void
+ target_cwd_specified (bool);
+
// Implementation details.
//
protected:
@@ -201,6 +261,14 @@ namespace build2
dir_path cwd_;
bool cwd_specified_;
bool drop_cycles_;
+ string target_what_;
+ bool target_what_specified_;
+ string target_default_type_;
+ bool target_default_type_specified_;
+ map<string, string> target_extension_type_;
+ bool target_extension_type_specified_;
+ dir_path target_cwd_;
+ bool target_cwd_specified_;
};
}
}
diff --git a/libbuild2/build/script/builtin-options.ixx b/libbuild2/build/script/builtin-options.ixx
index ea06a0f..20847c2 100644
--- a/libbuild2/build/script/builtin-options.ixx
+++ b/libbuild2/build/script/builtin-options.ixx
@@ -233,6 +233,126 @@ namespace build2
{
this->drop_cycles_ = x;
}
+
+ inline const string& depdb_dyndep_options::
+ target_what () const
+ {
+ return this->target_what_;
+ }
+
+ inline string& depdb_dyndep_options::
+ target_what ()
+ {
+ return this->target_what_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_what (const string& x)
+ {
+ this->target_what_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_what_specified () const
+ {
+ return this->target_what_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_what_specified (bool x)
+ {
+ this->target_what_specified_ = x;
+ }
+
+ inline const string& depdb_dyndep_options::
+ target_default_type () const
+ {
+ return this->target_default_type_;
+ }
+
+ inline string& depdb_dyndep_options::
+ target_default_type ()
+ {
+ return this->target_default_type_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_default_type (const string& x)
+ {
+ this->target_default_type_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_default_type_specified () const
+ {
+ return this->target_default_type_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_default_type_specified (bool x)
+ {
+ this->target_default_type_specified_ = x;
+ }
+
+ inline const map<string, string>& depdb_dyndep_options::
+ target_extension_type () const
+ {
+ return this->target_extension_type_;
+ }
+
+ inline map<string, string>& depdb_dyndep_options::
+ target_extension_type ()
+ {
+ return this->target_extension_type_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_extension_type (const map<string, string>& x)
+ {
+ this->target_extension_type_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_extension_type_specified () const
+ {
+ return this->target_extension_type_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_extension_type_specified (bool x)
+ {
+ this->target_extension_type_specified_ = x;
+ }
+
+ inline const dir_path& depdb_dyndep_options::
+ target_cwd () const
+ {
+ return this->target_cwd_;
+ }
+
+ inline dir_path& depdb_dyndep_options::
+ target_cwd ()
+ {
+ return this->target_cwd_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_cwd (const dir_path& x)
+ {
+ this->target_cwd_ = x;
+ }
+
+ inline bool depdb_dyndep_options::
+ target_cwd_specified () const
+ {
+ return this->target_cwd_specified_;
+ }
+
+ inline void depdb_dyndep_options::
+ target_cwd_specified (bool x)
+ {
+ this->target_cwd_specified_ = x;
+ }
}
}
}
diff --git a/libbuild2/build/script/builtin.cli b/libbuild2/build/script/builtin.cli
index 7d0936f..5aea034 100644
--- a/libbuild2/build/script/builtin.cli
+++ b/libbuild2/build/script/builtin.cli
@@ -17,8 +17,8 @@ namespace build2
//
class depdb_dyndep_options
{
- // Note that --byproduct, if any, must be the first option and is
- // handled ad hoc, kind of as a sub-command.
+ // Note that --byproduct or --dyn-target, if any, must be the first
+ // option and is handled ad hoc.
//
// Similarly, --update-{include,exclude} are handled ad hoc and must
// be literals, similar to the -- separator. They specify prerequisite
@@ -40,23 +40,48 @@ namespace build2
// with support for generated files (and thus -I) at least in the make
// format where we use relative paths for non-existent files.
//
+ // Currently Supported dependency formats (--format) are `make`
+ // (default) and `lines`.
+ //
+ // The `make` format is the make dependency declaration in the
+ // `<target>...: [<prerequisite>...]` form. In the non-byproduct mode
+ // a relative prerequisite path is considered non-existent.
+ //
+ // The `lines` format lists targets and/or prerequisites one per line.
+ // If the --dyn-target option is specified then the target list is
+ // expected to come first separated from the prerequisites list with a
+ // blank line. If there are no prerequisites, then the blank line can
+ // be omitted. If the --dyn-target option is not specified, then all
+ // lines are treated as prerequisites and there should be no blank
+ // lines. In the non-byproduct mode a prerequisite line that starts
+ // with a leading space is considered a non-existent prerequisite.
+ // Currently only relative non-existent prerequisites are supported.
+ // Finally, in this mode, if the prerequisite is syntactically a
+ // directory (that is, it ends with a trailing directory separator),
+ // then it is added as fsdir{}. This can be used to handle situations
+ // where the dynamic targets are placed into subdirectories.
+ //
// Note on naming: whenever we (may) have two options, one for target
// and the other for prerequisite, we omit "prerequisite" as that's
// what we extract by default and most commonly. For example:
//
- // --what --what-target
- // --default-type --default-target-type
+ // --what --target-what
+ // --default-type --target-default-type
//
path --file; // Read from file rather than stdin.
- string --format; // Dependency format: make (default).
+ string --format; // Dependency format: `make` (default),
+ // or `lines`.
- string --what; // Dependency kind, e.g., "header".
+ // Dynamic dependency extraction options.
+ //
+ string --what; // Prerequisite kind, e.g., "header".
- dir_paths --include-path|-I; // Search paths for generated files.
+ dir_paths --include-path|-I; // Search paths for generated
+ // prerequisites.
- string --default-type; // Default prerequisite type to use
- // if none could be derived from ext.
+ string --default-type; // Default prerequisite type to use if
+ // none could be derived from extension.
bool --adhoc; // Treat dynamically discovered
// prerequisites as ad hoc (so they
@@ -64,14 +89,39 @@ namespace build2
// normal mode).
dir_path --cwd; // Builtin's working directory used
- // to complete relative paths (only
- // in --byproduct mode).
+ // to complete relative paths of
+ // prerequisites (only in --byproduct
+ // mode, lines format for existing
+ // paths).
bool --drop-cycles; // Drop prerequisites that are also
// targets. Only use if you are sure
// such cycles are harmless, that is,
// the output is not affected by such
// prerequisites' content.
+
+ // Dynamic target extraction options.
+ //
+ // This functionality is enabled with the --dyn-target option. Only
+ // the make format is supported, where the listed targets are added as
+ // ad hoc group members (unless already specified as static members).
+ // This functionality is not available in the byproduct mode.
+ //
+ string --target-what; // Target kind, e.g., "source".
+
+ string --target-default-type; // Default target type to use if none
+ // could be derived from extension.
+
+ map<string, string> // Extension to target type mapping in
+ --target-extension-type; // the <ext>=<type> form, for example,
+ // h=hxx. This mapping is considered
+ // before attempting to automatically
+ // map the extension and so can be used
+ // to resolve ambiguities.
+
+ dir_path --target-cwd; // Builtin's working directory used to
+ // complete relative paths of targets.
+
};
}
}
diff --git a/libbuild2/build/script/lexer+for-loop.test.testscript b/libbuild2/build/script/lexer+for-loop.test.testscript
new file mode 100644
index 0000000..3f8e6b5
--- /dev/null
+++ b/libbuild2/build/script/lexer+for-loop.test.testscript
@@ -0,0 +1,188 @@
+# file : libbuild2/build/script/lexer+for-loop.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+test.arguments = for-loop
+
+: redirect
+:
+{
+ : pass
+ :
+ $* <"cmd <| 1>|" >>EOO
+ 'cmd'
+ <|
+ '1'
+ >|
+ <newline>
+ EOO
+
+ : null
+ :
+ $* <"cmd <- 1>-" >>EOO
+ 'cmd'
+ <-
+ '1'
+ >-
+ <newline>
+ EOO
+
+ : trace
+ :
+ $* <"cmd 1>!" >>EOO
+ 'cmd'
+ '1'
+ >!
+ <newline>
+ EOO
+
+ : merge
+ :
+ $* <"cmd 1>&2" >>EOO
+ 'cmd'
+ '1'
+ >&
+ '2'
+ <newline>
+ EOO
+
+ : str
+ :
+ $* <"cmd <<<=a 1>>>?b" >>EOO
+ 'cmd'
+ <<<=
+ 'a'
+ '1'
+ >>>?
+ 'b'
+ <newline>
+ EOO
+
+ : str-nn
+ :
+ $* <"cmd <<<=:a 1>>>?:b" >>EOO
+ 'cmd'
+ <<<=:
+ 'a'
+ '1'
+ >>>?:
+ 'b'
+ <newline>
+ EOO
+
+ : str-nn-alias
+ :
+ $* <"cmd <<<:a 1>>>?:b" >>EOO
+ 'cmd'
+ <<<:
+ 'a'
+ '1'
+ >>>?:
+ 'b'
+ <newline>
+ EOO
+
+ : doc
+ :
+ $* <"cmd <<EOI 1>>EOO" >>EOO
+ 'cmd'
+ <<
+ 'EOI'
+ '1'
+ >>
+ 'EOO'
+ <newline>
+ EOO
+
+ : doc-nn
+ :
+ $* <"cmd <<:EOI 1>>?:EOO" >>EOO
+ 'cmd'
+ <<:
+ 'EOI'
+ '1'
+ >>?:
+ 'EOO'
+ <newline>
+ EOO
+
+ : file-cmp
+ :
+ $* <"cmd <=in >?out 2>?err" >>EOO
+ 'cmd'
+ <=
+ 'in'
+ >?
+ 'out'
+ '2'
+ >?
+ 'err'
+ <newline>
+ EOO
+
+ : file-write
+ :
+ $* <"cmd >=out 2>+err" >>EOO
+ 'cmd'
+ >=
+ 'out'
+ '2'
+ >+
+ 'err'
+ <newline>
+ EOO
+}
+
+: cleanup
+:
+{
+ : always
+ :
+ $* <"cmd &file" >>EOO
+ 'cmd'
+ &
+ 'file'
+ <newline>
+ EOO
+
+ : maybe
+ :
+ $* <"cmd &?file" >>EOO
+ 'cmd'
+ &?
+ 'file'
+ <newline>
+ EOO
+
+ : never
+ :
+ $* <"cmd &!file" >>EOO
+ 'cmd'
+ &!
+ 'file'
+ <newline>
+ EOO
+}
+
+: for
+:
+{
+ : form-1
+ :
+ $* <"for x: a" >>EOO
+ 'for'
+ 'x'
+ :
+ 'a'
+ <newline>
+ EOO
+
+ : form-3
+ :
+ $* <"for <<<a x" >>EOO
+ 'for'
+ <<<
+ 'a'
+ 'x'
+ <newline>
+ EOO
+}
diff --git a/libbuild2/build/script/lexer.cxx b/libbuild2/build/script/lexer.cxx
index d849ac9..e0d87fe 100644
--- a/libbuild2/build/script/lexer.cxx
+++ b/libbuild2/build/script/lexer.cxx
@@ -35,10 +35,7 @@ namespace build2
bool q (true); // quotes
if (!esc)
- {
- assert (!state_.empty ());
- esc = state_.top ().escapes;
- }
+ esc = current_state ().escapes;
switch (m)
{
@@ -78,6 +75,19 @@ namespace build2
s2 = " ";
break;
}
+ case lexer_mode::for_loop:
+ {
+ // Leading tokens of the for-loop. Like command_line but
+ // recognizes colon as a separator and lsbrace like value.
+ //
+ // Note that while sensing the form of the for-loop (`for x:...`
+ // vs `for x <...`) we need to make sure that the pre-parsed token
+ // types are valid for the execution phase.
+ //
+ s1 = ":=!|&<> $(#\t\n";
+ s2 = " == ";
+ break;
+ }
default:
{
// Recognize special variable names ($>, $<, $~).
@@ -94,7 +104,7 @@ namespace build2
}
assert (ps == '\0');
- state_.push (
+ mode_impl (
state {m, data, nullopt, false, false, ps, s, n, q, *esc, s1, s2});
}
@@ -103,12 +113,13 @@ namespace build2
{
token r;
- switch (state_.top ().mode)
+ switch (mode ())
{
case lexer_mode::command_line:
case lexer_mode::first_token:
case lexer_mode::second_token:
case lexer_mode::variable_line:
+ case lexer_mode::for_loop:
r = next_line ();
break;
default: return base_lexer::next ();
@@ -128,7 +139,7 @@ namespace build2
xchar c (get ());
uint64_t ln (c.line), cn (c.column);
- state st (state_.top ()); // Make copy (see first/second_token).
+ state st (current_state ()); // Make copy (see first/second_token).
lexer_mode m (st.mode);
auto make_token = [&sep, ln, cn] (type t)
@@ -141,9 +152,10 @@ namespace build2
//
if (st.lsbrace)
{
- assert (m == lexer_mode::variable_line);
+ assert (m == lexer_mode::variable_line ||
+ m == lexer_mode::for_loop);
- state_.top ().lsbrace = false; // Note: st is a copy.
+ current_state ().lsbrace = false; // Note: st is a copy.
if (c == '[' && (!st.lsbrace_unsep || !sep))
return make_token (type::lsbrace);
@@ -156,7 +168,7 @@ namespace build2
// we push any new mode (e.g., double quote).
//
if (m == lexer_mode::first_token || m == lexer_mode::second_token)
- state_.pop ();
+ expire_mode ();
// NOTE: remember to update mode() if adding new special characters.
@@ -167,7 +179,7 @@ namespace build2
// Expire variable value mode at the end of the line.
//
if (m == lexer_mode::variable_line)
- state_.pop ();
+ expire_mode ();
sep = true; // Treat newline as always separated.
return make_token (type::newline);
@@ -179,11 +191,20 @@ namespace build2
case '(': return make_token (type::lparen);
}
+ if (m == lexer_mode::for_loop)
+ {
+ switch (c)
+ {
+ case ':': return make_token (type::colon);
+ }
+ }
+
// Command line operator/separators.
//
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
switch (c)
{
@@ -205,7 +226,8 @@ namespace build2
//
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
if (optional<token> t = next_cmd_op (c, sep))
return move (*t);
diff --git a/libbuild2/build/script/lexer.hxx b/libbuild2/build/script/lexer.hxx
index 646d3b9..3f51493 100644
--- a/libbuild2/build/script/lexer.hxx
+++ b/libbuild2/build/script/lexer.hxx
@@ -24,9 +24,10 @@ namespace build2
enum
{
command_line = base_type::value_next,
- first_token, // Expires at the end of the token.
- second_token, // Expires at the end of the token.
- variable_line // Expires at the end of the line.
+ first_token, // Expires at the end of the token.
+ second_token, // Expires at the end of the token.
+ variable_line, // Expires at the end of the line.
+ for_loop // Used for sensing the for-loop leading tokens.
};
lexer_mode () = default;
@@ -67,6 +68,8 @@ namespace build2
static redirect_aliases_type redirect_aliases;
private:
+ using build2::script::lexer::mode; // Getter.
+
token
next_line ();
};
diff --git a/libbuild2/build/script/lexer.test.cxx b/libbuild2/build/script/lexer.test.cxx
index e496f94..d8733ba 100644
--- a/libbuild2/build/script/lexer.test.cxx
+++ b/libbuild2/build/script/lexer.test.cxx
@@ -35,6 +35,7 @@ namespace build2
else if (s == "second-token") m = lexer_mode::second_token;
else if (s == "variable-line") m = lexer_mode::variable_line;
else if (s == "variable") m = lexer_mode::variable;
+ else if (s == "for-loop") m = lexer_mode::for_loop;
else assert (false);
}
diff --git a/libbuild2/build/script/parser+command-if.test.testscript b/libbuild2/build/script/parser+command-if.test.testscript
index a18a885..8b19186 100644
--- a/libbuild2/build/script/parser+command-if.test.testscript
+++ b/libbuild2/build/script/parser+command-if.test.testscript
@@ -279,7 +279,7 @@
cmd
end
EOI
- buildfile:12:1: error: 'end' without preceding 'if'
+ buildfile:12:1: error: 'end' without preceding 'if', 'for', or 'while'
EOE
: before
diff --git a/libbuild2/build/script/parser+command-re-parse.test.testscript b/libbuild2/build/script/parser+command-re-parse.test.testscript
index 56e05b5..3dbdc16 100644
--- a/libbuild2/build/script/parser+command-re-parse.test.testscript
+++ b/libbuild2/build/script/parser+command-re-parse.test.testscript
@@ -1,18 +1,14 @@
# file : libbuild2/build/script/parser+command-re-parse.test.testscript
# license : MIT; see accompanying LICENSE file
-# @@ TMP
-#
-#\
: double-quote
:
$* <<EOI >>EOO
-x = [cmd_line] cmd \">-\" "'<-'"
+x = [cmdline] cmd \">-\" "'<-'"
$x
EOI
cmd '>-' '<-'
EOO
-#\
: literal-re-parse
:
diff --git a/libbuild2/build/script/parser+diag.test.testscript b/libbuild2/build/script/parser+diag.test.testscript
index 30eb859..504c9a4 100644
--- a/libbuild2/build/script/parser+diag.test.testscript
+++ b/libbuild2/build/script/parser+diag.test.testscript
@@ -19,17 +19,99 @@ $* <<EOI >>EOO
name: echo
EOO
-: diag
+: name-operation
:
-$* <<EOI >>~%EOO%
- echo abc
- cat abc
- diag copy >= $>
- cp <- $>
+$* <<EOI >>EOO
+ a = 'b'
EOI
- %diag: copy >= .+file\{driver\.\}%
+ name: update
EOO
+: preamble
+:
+{
+ : disambiguate
+ :
+ $* <<EOI >>~%EOO%
+ echo abc | set v
+ cat abc | set v
+ diag copy >= $>
+ cp <- $>
+ EOI
+ echo abc | set v
+ cat abc | set v
+ %diag: copy >= .+file\{driver\.\}%
+ EOO
+
+ : name
+ :
+ $* <<EOI >>EOO
+ n = foo
+ diag copy $n
+ cp $n $>
+ EOI
+ diag: copy foo
+ EOO
+
+ : quoted
+ :
+ $* <<EOI >'diag: foo'
+ f = foo
+ diag "$f"
+ EOI
+
+ : quoted-eval
+ :
+ $* <<EOI >'diag: foo'
+ f = foo
+ diag "($f)"
+ EOI
+
+ : temp_dir
+ :
+ {
+ test.options += -t
+
+ : no
+ :
+ $* <<EOI >false
+ f = foo
+ diag $f
+ f = $~/f
+ foo "$f"
+ EOI
+
+ : no-depdb
+ :
+ $* <<EOI >false
+ f = $~/f
+ depdb hash "$f"
+ diag $f
+ f = $~/f
+ foo "$f"
+ EOI
+
+ : yes
+ :
+ $* <<EOI >true
+ f = $~/f
+ diag $f
+ foo $f
+ EOI
+
+ : yes-depdb
+ :
+ $* <<EOI >true
+ f = $~/f
+ depdb hash "$f"
+ f = $~/t
+ diag $f
+ f = $~/f
+ foo "$f"
+ EOI
+ }
+}
+
: ambiguity
:
{
@@ -67,16 +149,6 @@ $* <<EOI >>~%EOO%
info: consider specifying it explicitly with the 'diag' recipe attribute
info: or provide custom low-verbosity diagnostics with the 'diag' builtin
EOE
-
- : none
- :
- $* <<EOI 2>>EOE != 0
- a = 'b'
- EOI
- buildfile:11:1: error: unable to deduce low-verbosity script diagnostics name
- info: consider specifying it explicitly with the 'diag' recipe attribute
- info: or provide custom low-verbosity diagnostics with the 'diag' builtin
- EOE
}
: inside-if
diff --git a/libbuild2/build/script/parser+expansion.test.testscript b/libbuild2/build/script/parser+expansion.test.testscript
index 086ec8f..eb99ae2 100644
--- a/libbuild2/build/script/parser+expansion.test.testscript
+++ b/libbuild2/build/script/parser+expansion.test.testscript
@@ -24,19 +24,15 @@ EOI
buildfile:12:5: info: while parsing string 'xy'a bc'
EOE
-# @@ TMP
-#
-#\
: invalid-redirect
:
$* <<EOI 2>>EOE != 0
-x = [cmd_line] "1>&a"
+x = [cmdline] "1>&a"
cmd $x
EOI
<string>:1:4: error: stdout merge redirect file descriptor must be 2
buildfile:12:5: info: while parsing string '1>&a'
EOE
-#\
: expansion-re-parse
:
diff --git a/libbuild2/build/script/parser+for.test.testscript b/libbuild2/build/script/parser+for.test.testscript
new file mode 100644
index 0000000..847b253
--- /dev/null
+++ b/libbuild2/build/script/parser+for.test.testscript
@@ -0,0 +1,656 @@
+# file : libbuild2/build/script/parser+for.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: form-1
+:
+: for x: ...
+:
+{
+ : for
+ :
+ {
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for
+ cmd
+ end
+ EOI
+ buildfile:11:1: error: for: missing variable name
+ EOE
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ for x: a b
+ cmd $x
+ end
+ EOI
+ cmd a
+ cmd b
+ EOO
+
+ : null
+ :
+ $* <<EOI >:''
+ for x: [null]
+ cmd $x
+ end
+ EOI
+
+ : empty
+ :
+ $* <<EOI >:''
+ for x:
+ cmd $x
+ end
+ EOI
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ for x: $vs
+ cmd $x
+ end
+ EOI
+ cmd a
+ cmd b
+ EOO
+
+ : typed-values
+ :
+ $* <<EOI >>~%EOO%
+ for x: [dir_paths] a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>~%EOO%
+ for x [dir_path]: a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : typed-elem-value
+ :
+ $* <<EOI >>~%EOO%
+ for x [dir_path]: [strings] a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : defined-var
+ :
+ $* <<EOI >>EOO
+ x = x
+
+ for x: a b
+ cmd $x
+ end
+
+ cmd $x
+ EOI
+ cmd a
+ cmd b
+ cmd b
+ EOO
+ }
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ for x: a b
+ cmd
+ EOI
+ buildfile:13:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ for x: a b
+ elif true
+ cmd
+ end
+ end
+ EOI
+ buildfile:12:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ for x: a b
+ cmd1 $x # 1
+ if ($x == "a") # 2
+ cmd2 # 3
+ for y: x y
+ cmd3 # 4
+ end
+ else
+ cmd4 # 5
+ end
+ cmd5 # 6
+ end
+ cmd6 # 7
+ EOI
+ cmd1 a # 1 i1
+ ? true # 2 i1
+ cmd2 # 3 i1
+ cmd3 # 4 i1 i1
+ cmd3 # 4 i1 i2
+ cmd5 # 6 i1
+ cmd1 b # 1 i2
+ ? false # 2 i2
+ cmd4 # 5 i2
+ cmd5 # 6 i2
+ cmd6 # 7
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ EOI
+ buildfile:12:1: error: expected closing 'end'
+ EOE
+ }
+}
+
+: form-2
+:
+: ... | for x
+:
+{
+ : for
+ :
+ {
+ : status
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x != 0
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: for-loop exit code cannot be checked
+ EOE
+
+ : not-last
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x | echo x
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: for-loop must be last command in a pipe
+ EOE
+
+ : not-last-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x|echo x
+ cmd
+ end
+ EOI
+ buildfile:11:19: error: for-loop must be last command in a pipe
+ EOE
+
+ : expression-after
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x && echo x
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: command expression involving for-loop
+ EOE
+
+ : expression-after-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x&&echo x
+ cmd
+ end
+ EOI
+ buildfile:11:19: error: command expression involving for-loop
+ EOE
+
+ : expression-before
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && echo x | for x
+ cmd
+ end
+ EOI
+ buildfile:11:24: error: command expression involving for-loop
+ EOE
+
+ : expression-before-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && echo x|for x
+ cmd
+ end
+ EOI
+ buildfile:11:22: error: command expression involving for-loop
+ EOE
+
+ : cleanup
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x &f
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: cleanup in for-loop
+ EOE
+
+ : cleanup-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x&f
+ cmd
+ end
+ EOI
+ buildfile:11:19: error: cleanup in for-loop
+ EOE
+
+ : stdout-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x >a
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x>a
+ cmd
+ end
+ EOI
+ buildfile:11:19: error: output redirect in for-loop
+ EOE
+
+ : stdin-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x <a
+ cmd
+ end
+ EOI
+ buildfile:11:20: error: stdin is both piped and redirected
+ EOE
+
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for
+ cmd
+ end
+ EOI
+ buildfile:11:1: error: for: missing variable name
+ EOE
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ echo 'a b' | for -w x
+ cmd $x
+ end
+ EOI
+ echo 'a b' | for -w x
+ EOO
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ echo $vs | for x
+ cmd $x
+ end
+ EOI
+ echo a b | for x
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>EOO
+ echo 'a b' | for -w x [dir_path]
+ cmd $x
+ end
+ EOI
+ echo 'a b' | for -w x [dir_path]
+ EOO
+ }
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd
+ EOI
+ buildfile:13:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ elif true
+ cmd
+ end
+ end
+ EOI
+ buildfile:12:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ echo 'a b' | for x # 1
+ cmd1 $x # 2
+ if ($x == "a") # 3
+ cmd2 # 4
+ echo x y | for y # 5
+ cmd3 # 6
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ end
+ cmd6 # 9
+ EOI
+ echo 'a b' | for x # 1
+ cmd6 # 9
+ EOO
+ }
+}
+
+: form-3
+:
+: for x <...
+:
+{
+ : for
+ :
+ {
+ : status
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a != 0
+ cmd
+ end
+ EOI
+ buildfile:11:10: error: for-loop exit code cannot be checked
+ EOE
+
+ : not-last
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a | echo x
+ cmd
+ end
+ EOI
+ buildfile:11:10: error: for-loop must be last command in a pipe
+ EOE
+
+ : not-last-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x|echo x
+ cmd
+ end
+ EOI
+ buildfile:11:9: error: for-loop must be last command in a pipe
+ EOE
+
+ : expression-after
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a && echo x
+ cmd
+ end
+ EOI
+ buildfile:11:10: error: command expression involving for-loop
+ EOE
+
+ : expression-after-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x&&echo x
+ cmd
+ end
+ EOI
+ buildfile:11:9: error: command expression involving for-loop
+ EOE
+
+ : expression-before
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && for x <a
+ cmd
+ end
+ EOI
+ buildfile:11:15: error: command expression involving for-loop
+ EOE
+
+ : cleanup
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a &f
+ cmd
+ end
+ EOI
+ buildfile:11:10: error: cleanup in for-loop
+ EOE
+
+ : cleanup-before-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for &f x <a
+ cmd
+ end
+ EOI
+ buildfile:11:5: error: cleanup in for-loop
+ EOE
+
+ : cleanup-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x&f
+ cmd
+ end
+ EOI
+ buildfile:11:9: error: cleanup in for-loop
+ EOE
+
+ : stdout-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ for x >a
+ cmd
+ end
+ EOI
+ buildfile:11:7: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-before-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for >a x
+ cmd
+ end
+ EOI
+ buildfile:11:5: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for x>a
+ cmd
+ end
+ EOI
+ buildfile:11:6: error: output redirect in for-loop
+ EOE
+
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a
+ cmd
+ end
+ EOI
+ buildfile:11:1: error: for: missing variable name
+ EOE
+
+ : quoted-opt
+ :
+ $* <<EOI >>EOO
+ o = -w
+ for "$o" x <'a b'
+ cmd $x
+ end
+ for "($o)" x <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x <'a b'
+ for -w x <'a b'
+ EOO
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ for -w x <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x <'a b'
+ EOO
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ for x <$vs
+ cmd $x
+ end
+ EOI
+ for x b <a
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>EOO
+ for -w x [dir_path] <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x [dir_path] <'a b'
+ EOO
+ }
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd
+ EOI
+ buildfile:13:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ elif true
+ cmd
+ end
+ end
+ EOI
+ buildfile:12:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ for -w x <'a b' # 1
+ cmd1 $x # 2
+ if ($x == "a") # 3
+ cmd2 # 4
+ for -w y <'x y' # 5
+ cmd3 # 6
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ end
+ cmd6 # 9
+ EOI
+ for -w x <'a b' # 1
+ cmd6 # 9
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ EOI
+ buildfile:12:1: error: expected closing 'end'
+ EOE
+ }
+}
diff --git a/libbuild2/build/script/parser+while.test.testscript b/libbuild2/build/script/parser+while.test.testscript
new file mode 100644
index 0000000..5587291
--- /dev/null
+++ b/libbuild2/build/script/parser+while.test.testscript
@@ -0,0 +1,133 @@
+# file : libbuild2/build/script/parser+while.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: while
+:
+{
+ : true
+ :
+ $* <<EOI >>EOO
+ while ($v != "aa")
+ cmd "$v"
+ v = "$(v)a"
+ end
+ EOI
+ ? true
+ cmd ''
+ ? true
+ cmd a
+ ? false
+ EOO
+
+ : false
+ :
+ $* <<EOI >>EOO
+ while ($v == "aa")
+ cmd "$v"
+ v = "$(v)a"
+ end
+ EOI
+ ? false
+ EOO
+
+ : without-command
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd
+ end
+ EOI
+ buildfile:11:6: error: missing program
+ EOE
+}
+
+: end
+:
+{
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ while true
+ cmd
+ EOI
+ buildfile:13:1: error: expected closing 'end'
+ EOE
+}
+
+: elif
+:
+{
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ while false
+ elif true
+ cmd
+ end
+ end
+ EOI
+ buildfile:12:3: error: 'elif' without preceding 'if'
+ EOE
+}
+
+: nested
+:
+{
+ $* -l -r <<EOI >>EOO
+ while ($v != "aa") # 1
+ cmd1 "$v" # 2
+ if ($v == "a") # 3
+ cmd2 # 4
+ while ($v2 != "$v") # 5
+ cmd3 # 6
+ v2=$v
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ v = "$(v)a"
+ end
+ EOI
+ ? true # 1 i1
+ cmd1 '' # 2 i1
+ ? false # 3 i1
+ cmd4 # 7 i1
+ cmd5 # 8 i1
+ ? true # 1 i2
+ cmd1 a # 2 i2
+ ? true # 3 i2
+ cmd2 # 4 i2
+ ? true # 5 i2 i1
+ cmd3 # 6 i2 i1
+ ? false # 5 i2 i2
+ cmd5 # 8 i2
+ ? false # 1 i3
+ EOO
+}
+
+: contained
+:
+{
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ EOI
+ buildfile:12:1: error: expected closing 'end'
+ EOE
+}
+
+: var
+:
+$* <<EOI >>EOO
+while ($v1 != "a")
+ v1 = "$(v1)a"
+ v2 = "$v1"
+end
+cmd $v1
+EOI
+? true
+? false
+cmd a
+EOO
diff --git a/libbuild2/build/script/parser.cxx b/libbuild2/build/script/parser.cxx
index d9bfef6..3ecf23d 100644
--- a/libbuild2/build/script/parser.cxx
+++ b/libbuild2/build/script/parser.cxx
@@ -15,6 +15,8 @@
#include <libbuild2/algorithm.hxx>
#include <libbuild2/make-parser.hxx>
+#include <libbuild2/adhoc-rule-buildscript.hxx>
+
#include <libbuild2/script/run.hxx>
#include <libbuild2/build/script/lexer.hxx>
@@ -45,7 +47,7 @@ namespace build2
{
path_ = &pn;
- pre_parse_ = true;
+ top_pre_parse_ = pre_parse_ = true;
lexer l (is, *path_, line, lexer_mode::command_line);
set_lexer (&l);
@@ -59,7 +61,7 @@ namespace build2
pbase_ = scope_->src_path_;
- file_based_ = tt.is_a<file> ();
+ file_based_ = tt.is_a<file> () || tt.is_a<group> ();
perform_update_ = find (as.begin (), as.end (), perform_update_id) !=
as.end ();
@@ -91,15 +93,31 @@ namespace build2
info << "consider using 'depdb' builtin to track its result "
<< "changes";
- // Diagnose absent/ambigous script name.
+ // Diagnose computed variable exansions.
+ //
+ if (computed_var_)
+ fail (*computed_var_)
+ << "expansion of computed variable is only allowed in depdb "
+ << "preamble" <<
+ info << "consider using 'depdb' builtin to track its value "
+ << "changes";
+
+ // Diagnose absent/ambiguous script name. But try to deduce an absent
+ // name from the script operation first.
//
{
diag_record dr;
- if (!diag_name_ && !diag_line_)
+ if (!diag_name_ && diag_preamble_.empty ())
{
- dr << fail (s.start_loc)
- << "unable to deduce low-verbosity script diagnostics name";
+ if (as.size () == 1)
+ {
+ diag_name_ = make_pair (ctx->operation_table[as[0].operation ()],
+ location ());
+ }
+ else
+ dr << fail (s.start_loc)
+ << "unable to deduce low-verbosity script diagnostics name";
}
else if (diag_name2_)
{
@@ -125,20 +143,22 @@ namespace build2
// Save the script name or custom diagnostics line.
//
- assert (diag_name_.has_value () != diag_line_.has_value ());
+ assert (diag_name_.has_value () == diag_preamble_.empty ());
if (diag_name_)
s.diag_name = move (diag_name_->first);
else
- s.diag_line = move (diag_line_->first);
+ s.diag_preamble = move (diag_preamble_);
// Save the custom dependency change tracking lines, if present.
//
s.depdb_clear = depdb_clear_.has_value ();
+ s.depdb_value = depdb_value_;
if (depdb_dyndep_)
{
s.depdb_dyndep = depdb_dyndep_->second;
s.depdb_dyndep_byproduct = depdb_dyndep_byproduct_;
+ s.depdb_dyndep_dyn_target = depdb_dyndep_dyn_target_;
}
s.depdb_preamble = move (depdb_preamble_);
@@ -181,13 +201,27 @@ namespace build2
}
}
+ // Parse a logical line, handling the flow control constructs
+ // recursively.
+ //
+ // If the flow control construct type is specified, then this line is
+ // assumed to belong to such a construct.
+ //
void parser::
- pre_parse_line (token& t, type& tt, bool if_line)
+ pre_parse_line (token& t, type& tt, optional<line_type> fct)
{
+ // enter: next token is peeked at (type in tt)
+ // leave: newline
+
+ assert (!fct ||
+ *fct == line_type::cmd_if ||
+ *fct == line_type::cmd_while ||
+ *fct == line_type::cmd_for_stream ||
+ *fct == line_type::cmd_for_args);
+
// Determine the line type/start token.
//
- line_type lt (
- pre_parse_line_start (t, tt, lexer_mode::second_token));
+ line_type lt (pre_parse_line_start (t, tt, lexer_mode::second_token));
line ln;
@@ -220,22 +254,148 @@ namespace build2
break;
}
+ //
+ // See pre_parse_line_start() for details.
+ //
+ case line_type::cmd_for_args: assert (false); break;
+ case line_type::cmd_for_stream:
+ {
+ // First we need to sense the next few tokens and detect which
+ // form of the loop we are dealing with, the first (for x: ...)
+ // or the third (x <...) one. Note that the second form (... | for
+ // x) is handled separately.
+ //
+ // If the next token doesn't look like a variable name, then this
+ // is the third form. Otherwise, if colon follows the variable
+ // name, potentially after the attributes, then this is the first
+ // form and the third form otherwise.
+ //
+ // Note that for the third form we will need to pass the 'for'
+ // token as a program name to the command expression parsing
+ // function since it will be gone from the token stream by that
+ // time. Thus, we save it. We also need to make sure the sensing
+ // always leaves the variable name token in t/tt.
+ //
+ // Note also that in this model it won't be possible to support
+ // options in the first form.
+ //
+ token pt (t);
+ assert (pt.type == type::word && pt.value == "for");
+
+ mode (lexer_mode::for_loop);
+ next (t, tt);
+
+ // Note that we also consider special variable names (those that
+ // don't clash with the command line elements like redirects, etc)
+ // to later fail gracefully.
+ //
+ string& n (t.value);
+
+ if (tt == type::word && t.qtype == quote_type::unquoted &&
+ (n[0] == '_' || alpha (n[0]) || // Variable.
+ n == "~")) // Special variable.
+ {
+ // Detect patterns analogous to parse_variable_name() (so we
+ // diagnose `for x[string]: ...`).
+ //
+ if (n.find_first_of ("[*?") != string::npos)
+ fail (t) << "expected variable name instead of " << n;
+
+ if (special_variable (n))
+ fail (t) << "attempt to set '" << n << "' special variable";
+
+ // Parse out the element attributes, if present.
+ //
+ if (lexer_->peek_char ().first == '[')
+ {
+ // Save the variable name token before the attributes parsing
+ // and restore it afterwards. Also make sure that the token
+ // which follows the attributes stays in the stream.
+ //
+ token vt (move (t));
+ next_with_attributes (t, tt);
+
+ attributes_push (t, tt,
+ true /* standalone */,
+ false /* next_token */);
+
+ t = move (vt);
+ tt = t.type;
+ }
+
+ if (lexer_->peek_char ().first == ':')
+ lt = line_type::cmd_for_args;
+ }
+
+ if (lt == line_type::cmd_for_stream) // for x <...
+ {
+ // At this point t/tt contains the variable name token. Now
+ // pre-parse the command expression in the command_line lexer
+ // mode starting from this position and also passing the 'for'
+ // token as a program name.
+ //
+ // Note that the fact that the potential attributes are already
+ // parsed doesn't affect the command expression pre-parsing.
+ // Also note that they will be available during the execution
+ // phase being replayed.
+ //
+ expire_mode (); // Expire the for-loop lexer mode.
+
+ parse_command_expr_result r (
+ parse_command_expr (t, tt,
+ lexer::redirect_aliases,
+ move (pt)));
+
+ assert (r.for_loop);
+
+ if (tt != type::newline)
+ fail (t) << "expected newline instead of " << t;
+
+ parse_here_documents (t, tt, r);
+ }
+ else // for x: ...
+ {
+ next (t, tt);
+
+ assert (tt == type::colon);
+
+ expire_mode (); // Expire the for-loop lexer mode.
+
+ // Parse the value similar to the var line type (see above).
+ //
+ mode (lexer_mode::variable_line);
+ parse_variable_line (t, tt);
+
+ if (tt != type::newline)
+ fail (t) << "expected newline instead of " << t << " after for";
+ }
+
+ ln.var = nullptr;
+ ++level_;
+ break;
+ }
case line_type::cmd_elif:
case line_type::cmd_elifn:
case line_type::cmd_else:
- case line_type::cmd_end:
{
- if (!if_line)
- {
+ if (!fct || *fct != line_type::cmd_if)
fail (t) << lt << " without preceding 'if'";
- }
+ }
+ // Fall through.
+ case line_type::cmd_end:
+ {
+ if (!fct)
+ fail (t) << lt << " without preceding 'if', 'for', or 'while'";
}
// Fall through.
case line_type::cmd_if:
case line_type::cmd_ifn:
+ case line_type::cmd_while:
next (t, tt); // Skip to start of command.
- if (lt == line_type::cmd_if || lt == line_type::cmd_ifn)
+ if (lt == line_type::cmd_if ||
+ lt == line_type::cmd_ifn ||
+ lt == line_type::cmd_while)
++level_;
else if (lt == line_type::cmd_end)
--level_;
@@ -243,15 +403,24 @@ namespace build2
// Fall through.
case line_type::cmd:
{
- pair<command_expr, here_docs> p;
+ parse_command_expr_result r;
if (lt != line_type::cmd_else && lt != line_type::cmd_end)
- p = parse_command_expr (t, tt, lexer::redirect_aliases);
+ r = parse_command_expr (t, tt, lexer::redirect_aliases);
+
+ if (r.for_loop)
+ {
+ lt = line_type::cmd_for_stream;
+ ln.var = nullptr;
+
+ ++level_;
+ }
if (tt != type::newline)
fail (t) << "expected newline instead of " << t;
- parse_here_documents (t, tt, p);
+ parse_here_documents (t, tt, r);
+
break;
}
}
@@ -269,22 +438,76 @@ namespace build2
*save_line_ = move (ln);
}
- if (lt == line_type::cmd_if || lt == line_type::cmd_ifn)
+ switch (lt)
{
- tt = peek (lexer_mode::first_token);
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ {
+ tt = peek (lexer_mode::first_token);
+
+ pre_parse_if_else (t, tt);
+ break;
+ }
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
+ {
+ tt = peek (lexer_mode::first_token);
- pre_parse_if_else (t, tt);
+ pre_parse_loop (t, tt, lt);
+ break;
+ }
+ default: break;
}
}
+ // Pre-parse the flow control construct block line.
+ //
+ void parser::
+ pre_parse_block_line (token& t, type& tt, line_type bt)
+ {
+ // enter: peeked first token of the line (type in tt)
+ // leave: newline
+
+ const location ll (get_location (peeked ()));
+
+ if (tt == type::eos)
+ fail (ll) << "expected closing 'end'";
+
+ line_type fct; // Flow control type the block type relates to.
+
+ switch (bt)
+ {
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ case line_type::cmd_elif:
+ case line_type::cmd_elifn:
+ case line_type::cmd_else:
+ {
+ fct = line_type::cmd_if;
+ break;
+ }
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
+ {
+ fct = bt;
+ break;
+ }
+ default: assert(false);
+ }
+
+ pre_parse_line (t, tt, fct);
+ assert (tt == type::newline);
+ }
+
void parser::
pre_parse_if_else (token& t, type& tt)
{
// enter: peeked first token of next line (type in tt)
// leave: newline
- // Parse lines until we see closing 'end'. Nested if-else blocks are
- // handled recursively.
+ // Parse lines until we see closing 'end'.
//
for (line_type bt (line_type::cmd_if); // Current block.
;
@@ -292,25 +515,21 @@ namespace build2
{
const location ll (get_location (peeked ()));
- if (tt == type::eos)
- fail (ll) << "expected closing 'end'";
-
// Parse one line. Note that this one line can still be multiple
- // lines in case of if-else. In this case we want to view it as
- // cmd_if, not cmd_end. Thus remember the start position of the
- // next logical line.
+ // lines in case of a flow control construct. In this case we want
+ // to view it as cmd_if, not cmd_end. Thus remember the start
+ // position of the next logical line.
//
size_t i (script_->body.size ());
- pre_parse_line (t, tt, true /* if_line */);
- assert (tt == type::newline);
+ pre_parse_block_line (t, tt, bt);
line_type lt (script_->body[i].type);
// First take care of 'end'.
//
if (lt == line_type::cmd_end)
- return;
+ break;
// Check if-else block sequencing.
//
@@ -334,6 +553,29 @@ namespace build2
}
}
+ void parser::
+ pre_parse_loop (token& t, type& tt, line_type lt)
+ {
+ // enter: peeked first token of next line (type in tt)
+ // leave: newline
+
+ assert (lt == line_type::cmd_while ||
+ lt == line_type::cmd_for_stream ||
+ lt == line_type::cmd_for_args);
+
+ // Parse lines until we see closing 'end'.
+ //
+ for (;; tt = peek (lexer_mode::first_token))
+ {
+ size_t i (script_->body.size ());
+
+ pre_parse_block_line (t, tt, lt);
+
+ if (script_->body[i].type == line_type::cmd_end)
+ break;
+ }
+ }
+
command_expr parser::
parse_command_line (token& t, type& tt)
{
@@ -344,12 +586,12 @@ namespace build2
//
assert (!pre_parse_);
- pair<command_expr, here_docs> p (
+ parse_command_expr_result pr (
parse_command_expr (t, tt, lexer::redirect_aliases));
assert (tt == type::newline);
- parse_here_documents (t, tt, p);
+ parse_here_documents (t, tt, pr);
assert (tt == type::newline);
// @@ Note that currently running programs via a runner (e.g., see
@@ -362,7 +604,7 @@ namespace build2
// passed to the environment constructor, similar to passing the
// script deadline.
//
- return move (p.first);
+ return move (pr.expr);
}
//
@@ -423,6 +665,12 @@ namespace build2
fail (l) << "'" << v << "' call via 'env' builtin";
};
+ auto diag_loc = [this] ()
+ {
+ assert (!diag_preamble_.empty ());
+ return diag_preamble_.back ().tokens[0].location ();
+ };
+
if (v == "diag")
{
verify ();
@@ -439,24 +687,41 @@ namespace build2
}
else // Custom diagnostics.
{
- assert (diag_line_);
-
fail (l) << "multiple 'diag' builtin calls" <<
- info (diag_line_->second) << "previous call is here";
+ info (diag_loc ()) << "previous call is here";
}
}
- // Instruct the parser to save the diag builtin line separately
- // from the script lines, when it is fully parsed. Note that it
- // will be executed prior to the script body execution to obtain
- // the custom diagnostics.
+ // Move the script body to the end of the diag preamble.
//
- diag_line_ = make_pair (line (), l);
- save_line_ = &diag_line_->first;
- diag_weight_ = 4;
+ // Note that we move into the preamble whatever is there and delay
+ // the check until the execution (see the depdb preamble
+ // collecting for the reasoning).
+ //
+ lines& ls (script_->body);
+ diag_preamble_.insert (diag_preamble_.end (),
+ make_move_iterator (ls.begin ()),
+ make_move_iterator (ls.end ()));
+ ls.clear ();
+
+ // Also move the body_temp_dir flag, if it is true.
+ //
+ if (script_->body_temp_dir)
+ {
+ script_->diag_preamble_temp_dir = true;
+ script_->body_temp_dir = false;
+ }
- diag_name_ = nullopt;
- diag_name2_ = nullopt;
+ // Similar to the depdb preamble collection, instruct the parser
+ // to save the depdb builtin line separately from the script
+ // lines.
+ //
+ diag_preamble_.push_back (line ());
+ save_line_ = &diag_preamble_.back ();
+
+ diag_weight_ = 4;
+ diag_name_ = nullopt;
+ diag_name2_ = nullopt;
// Note that the rest of the line contains the builtin argument to
// be printed, thus we parse it in the value lexer mode.
@@ -478,17 +743,16 @@ namespace build2
{
if (a != perform_update_id)
fail (l) << "'depdb' builtin cannot be used to "
- << ctx.meta_operation_table[a.meta_operation ()].name
- << ' ' << ctx.operation_table[a.operation ()];
+ << ctx->meta_operation_table[a.meta_operation ()].name
+ << ' ' << ctx->operation_table[a.operation ()];
}
if (!file_based_)
- fail (l) << "'depdb' builtin can only be used for file-based "
- << "targets";
+ fail (l) << "'depdb' builtin can only be used for file- or "
+ << "file group-based targets";
- if (diag_line_)
- fail (diag_line_->second)
- << "'diag' builtin call before 'depdb' call" <<
+ if (!diag_preamble_.empty ())
+ fail (diag_loc ()) << "'diag' builtin call before 'depdb' call" <<
info (l) << "'depdb' call is here";
// Note that the rest of the line contains the builtin command
@@ -567,8 +831,18 @@ namespace build2
fail (l) << "multiple 'depdb dyndep' calls" <<
info (depdb_dyndep_->first) << "previous call is here";
- if (peek () == type::word && peeked ().value == "--byproduct")
- depdb_dyndep_byproduct_ = true;
+ if (peek () == type::word)
+ {
+ const string& v (peeked ().value);
+
+ // Note: --byproduct and --dyn-target are mutually
+ // exclusive.
+ //
+ if (v == "--byproduct")
+ depdb_dyndep_byproduct_ = true;
+ else if (v == "--dyn-target")
+ depdb_dyndep_dyn_target_ = true;
+ }
}
else
{
@@ -577,6 +851,8 @@ namespace build2
info (depdb_dyndep_->first) << "'depdb dyndep' call is here";
}
+ depdb_value_ = depdb_value_ || (v == "string" || v == "hash");
+
// Move the script body to the end of the depdb preamble.
//
// Note that at this (pre-parsing) stage we cannot evaluate if
@@ -601,10 +877,12 @@ namespace build2
script_->body_temp_dir = false;
}
- // Reset the impure function call info since it's valid for the
- // depdb preamble.
+ // Reset the impure function call and computed variable
+ // expansion tracking since both are valid for the depdb
+ // preamble.
//
impure_func_ = nullopt;
+ computed_var_ = nullopt;
// Instruct the parser to save the depdb builtin line separately
// from the script lines, when it is fully parsed. Note that the
@@ -656,10 +934,53 @@ namespace build2
//
// This is also the reason why we add a diag frame.
//
+ // The problem turned out to be worse than originally thought: we
+ // may call a function (for example, as part of if) with invalid
+ // arguments. And this could happen in the depdb preamble, which
+ // means we cannot fix this by moving the depdb builtin (which must
+ // come after the preamble). So let's peek at what's ahead and omit
+ // the expansion if it's anything iffy, namely, eval context or
+ // function call.
+ //
+ bool skip_diag (false);
if (pre_parse_ && diag_weight_ != 4)
{
- pre_parse_ = false; // Make parse_names() perform expansions.
- pre_parse_suspended_ = true;
+ // Based on the buildfile expansion parsing logic.
+ //
+ if (tt == type::lparen) // Evaluation context.
+ skip_diag = true;
+ else if (tt == type::dollar)
+ {
+ type ptt (peek (lexer_mode::variable));
+
+ if (!peeked ().separated)
+ {
+ if (ptt == type::lparen)
+ {
+ // While strictly speaking this can also be a function call,
+ // this is highly unusual and we will assume it's a variable
+ // expansion.
+ }
+ else if (ptt == type::word)
+ {
+ pair<char, bool> r (lexer_->peek_char ());
+
+ if (r.first == '(' && !r.second) // Function call.
+ skip_diag = true;
+ }
+ }
+ }
+
+ if (!skip_diag)
+ {
+ // Sanity check: we should not be suspending the pre-parse mode
+ // turned on by the base parser.
+ //
+ assert (top_pre_parse_);
+
+ pre_parse_ = false; // Make parse_names() perform expansions.
+ pre_parse_suspended_ = true;
+ }
}
auto df = make_diag_frame (
@@ -687,7 +1008,7 @@ namespace build2
pre_parse_ = true;
}
- if (pre_parse_ && diag_weight_ == 4)
+ if (pre_parse_ && (diag_weight_ == 4 || skip_diag))
return nullopt;
}
@@ -709,6 +1030,19 @@ namespace build2
return nullopt;
}
+ // If this is a value of the special cmdline type, then only do
+ // certain tests below if the value is not quoted and doesn't contain
+ // any characters that would be consumed by re-lexing.
+ //
+ // This is somewhat of a hack but handling this properly would not
+ // only require unquoting but also keeping track of which special
+ // characters were quoted (and thus should be treated literally) and
+ // which were not (and thus should act as separators, etc).
+ //
+ bool qs (pr.type != nullptr &&
+ pr.type->is_a<cmdline> () &&
+ need_cmdline_relex (ns[0].value));
+
// We have to handle process_path[_ex] and executable target. The
// process_path[_ex] we may have to recognize syntactically because
// of the loss of type, for example:
@@ -742,10 +1076,14 @@ namespace build2
pp_vt = pr.type;
ns.clear ();
}
- else if (ns[0].file ())
+ else if (ns[0].file () && !qs)
{
// Find the end of the value.
//
+ // Note that here we ignore the whole cmdline issue (see above)
+ // for the further values assuming that they are unquoted and
+ // don't contain any special characters.
+ //
auto b (ns.begin ());
auto i (value_traits<process_path_ex>::find_end (ns));
@@ -812,40 +1150,45 @@ namespace build2
//
else if (!ns[0].simple ())
{
- if (const target* t = search_existing (
- ns[0], *scope_, ns[0].pair ? ns[1].dir : empty_dir_path))
+ if (!qs)
{
- if (const auto* et = t->is_a<exe> ())
+ // This could be a script from src so search like a prerequisite.
+ //
+ if (const target* t = search_existing (
+ ns[0], *scope_, ns[0].pair ? ns[1].dir : empty_dir_path))
{
- if (pre_parse_)
+ if (const auto* et = t->is_a<exe> ())
{
- if (auto* n = et->lookup_metadata<string> ("name"))
+ if (pre_parse_)
{
- set_diag (*n, 3);
- return nullopt;
+ if (auto* n = et->lookup_metadata<string> ("name"))
+ {
+ set_diag (*n, 3);
+ return nullopt;
+ }
+ // Fall through.
}
- // Fall through.
- }
- else
- {
- process_path pp (et->process_path ());
+ else
+ {
+ process_path pp (et->process_path ());
- if (pp.empty ())
- fail (l) << "target " << *et << " is out of date" <<
- info << "consider specifying it as a prerequisite of "
- << environment_->target;
+ if (pp.empty ())
+ fail (l) << "target " << *et << " is out of date" <<
+ info << "consider specifying it as a prerequisite of "
+ << environment_->target;
- ns.erase (ns.begin (), ns.begin () + (ns[0].pair ? 2 : 1));
- return optional<process_path> (move (pp));
+ ns.erase (ns.begin (), ns.begin () + (ns[0].pair ? 2 : 1));
+ return optional<process_path> (move (pp));
+ }
}
- }
- if (pre_parse_)
- {
- diag_record dr (fail (l));
- dr << "unable to deduce low-verbosity script diagnostics name "
- << "from target " << *t;
- suggest_diag (dr);
+ if (pre_parse_)
+ {
+ diag_record dr (fail (l));
+ dr << "unable to deduce low-verbosity script diagnostics name "
+ << "from target " << *t;
+ suggest_diag (dr);
+ }
}
}
@@ -863,26 +1206,29 @@ namespace build2
{
// If we are here, the name is simple and is not part of a pair.
//
- string& v (ns[0].value);
+ if (!qs)
+ {
+ string& v (ns[0].value);
- // Try to interpret the name as a builtin.
- //
- const builtin_info* bi (builtins.find (v));
+ // Try to interpret the name as a builtin.
+ //
+ const builtin_info* bi (builtins.find (v));
- if (bi != nullptr)
- {
- set_diag (move (v), bi->weight);
- return nullopt;
- }
- //
- // Try to interpret the name as a pseudo-builtin.
- //
- // Note that both of them has the zero weight and cannot be picked
- // up as a script name.
- //
- else if (v == "set" || v == "exit")
- {
- return nullopt;
+ if (bi != nullptr)
+ {
+ set_diag (move (v), bi->weight);
+ return nullopt;
+ }
+ //
+ // Try to interpret the name as a pseudo-builtin.
+ //
+ // Note that both of them has the zero weight and cannot be picked
+ // up as a script name.
+ //
+ else if (v == "set" || v == "exit")
+ {
+ return nullopt;
+ }
}
diag_record dr (fail (l));
@@ -907,8 +1253,9 @@ namespace build2
// Note that we rely on "small function object" optimization here.
//
auto exec_cmd = [this] (token& t, build2::script::token_type& tt,
- size_t li,
+ const iteration_index* ii, size_t li,
bool single,
+ const function<command_function>& cf,
const location& ll)
{
// We use the 0 index to signal that this is the only command.
@@ -919,7 +1266,7 @@ namespace build2
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- runner_->run (*environment_, ce, li, ll);
+ runner_->run (*environment_, ce, ii, li, cf, ll);
};
exec_lines (s.body, exec_cmd);
@@ -928,11 +1275,30 @@ namespace build2
runner_->leave (e, s.end_loc);
}
+ // Return true if the specified expression executes the set builtin or
+ // is a for-loop.
+ //
+ static bool
+ valid_preamble_cmd (const command_expr& ce,
+ const function<command_function>& cf)
+ {
+ return find_if (
+ ce.begin (), ce.end (),
+ [&cf] (const expr_term& et)
+ {
+ const process_path& p (et.pipe.back ().program);
+ return p.initial == nullptr &&
+ (p.recall.string () == "set" ||
+ (cf != nullptr && p.recall.string () == "for"));
+ }) != ce.end ();
+ }
+
void parser::
- exec_depdb_preamble (action a, const scope& bs, const file& t,
+ exec_depdb_preamble (action a, const scope& bs, const target& t,
environment& e, const script& s, runner& r,
lines_iterator begin, lines_iterator end,
depdb& dd,
+ dynamic_targets* dyn_targets,
bool* update,
optional<timestamp> mt,
bool* deferred_failure,
@@ -955,28 +1321,34 @@ namespace build2
action a;
const scope& bs;
- const file& t;
+ const target& t;
environment& env;
const script& scr;
depdb& dd;
+ dynamic_targets* dyn_targets;
bool* update;
bool* deferred_failure;
optional<timestamp> mt;
dyndep_byproduct* byp;
- } data {trace, a, bs, t, e, s, dd, update, deferred_failure, mt, byp};
+ } data {
+ trace,
+ a, bs, t,
+ e, s,
+ dd, dyn_targets, update, deferred_failure, mt, byp};
auto exec_cmd = [this, &data] (token& t,
build2::script::token_type& tt,
- size_t li,
+ const iteration_index* ii, size_t li,
bool /* single */,
+ const function<command_function>& cf,
const location& ll)
{
// Note that we never reset the line index to zero (as we do in
- // execute_body()) assuming that there are some script body
- // commands to follow.
+ // execute_body()) assuming that there are some script body commands
+ // to follow.
//
if (tt == type::word && t.value == "depdb")
{
@@ -995,8 +1367,9 @@ namespace build2
//
exec_depdb_dyndep (t, tt,
li, ll,
- data.a, data.bs, const_cast<file&> (data.t),
+ data.a, data.bs, const_cast<target&> (data.t),
data.dd,
+ *data.dyn_targets,
*data.update,
*data.mt,
*data.deferred_failure,
@@ -1006,35 +1379,29 @@ namespace build2
{
names ns (exec_special (t, tt, true /* skip <cmd> */));
+ string v;
+ const char* w (nullptr);
if (cmd == "hash")
{
sha256 cs;
for (const name& n: ns)
to_checksum (cs, n);
- if (data.dd.expect (cs.string ()) != nullptr)
- l4 ([&] {
- data.trace (ll)
- << "'depdb hash' argument change forcing update of "
- << data.t;});
+ v = cs.string ();
+ w = "argument";
}
else if (cmd == "string")
{
- string s;
try
{
- s = convert<string> (move (ns));
+ v = convert<string> (move (ns));
}
catch (const invalid_argument& e)
{
fail (ll) << "invalid 'depdb string' argument: " << e;
}
- if (data.dd.expect (s) != nullptr)
- l4 ([&] {
- data.trace (ll)
- << "'depdb string' argument change forcing update of "
- << data.t;});
+ w = "argument";
}
else if (cmd == "env")
{
@@ -1055,14 +1422,32 @@ namespace build2
fail (ll) << pf << e;
}
- if (data.dd.expect (cs.string ()) != nullptr)
- l4 ([&] {
- data.trace (ll)
- << "'depdb env' environment change forcing update of "
- << data.t;});
+ v = cs.string ();
+ w = "environment";
}
else
assert (false);
+
+ // Prefix the value with the type letter. This serves two
+ // purposes:
+ //
+ // 1. It makes sure the result is never a blank line. We use
+ // blank lines as anchors to skip directly to certain entries
+ // (e.g., dynamic targets).
+ //
+ // 2. It allows us to detect the beginning of prerequisites
+ // since an absolute path will be distinguishable from these
+ // entries (in the future we may want to add an explicit
+ // blank after such custom entries to make this easier).
+ //
+ v.insert (0, 1, ' ');
+ v.insert (0, 1, cmd[0]); // `h`, `s`, or `e`
+
+ if (data.dd.expect (v) != nullptr)
+ l4 ([&] {
+ data.trace (ll)
+ << "'depdb " << cmd << "' " << w << " change forcing "
+ << "update of " << data.t;});
}
}
else
@@ -1070,15 +1455,7 @@ namespace build2
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- // Verify that this expression executes the set builtin.
- //
- if (find_if (ce.begin (), ce.end (),
- [] (const expr_term& et)
- {
- const process_path& p (et.pipe.back ().program);
- return p.initial == nullptr &&
- p.recall.string () == "set";
- }) == ce.end ())
+ if (!valid_preamble_cmd (ce, cf))
{
const replay_tokens& rt (data.scr.depdb_preamble.back ().tokens);
assert (!rt.empty ());
@@ -1089,20 +1466,93 @@ namespace build2
info (rt[0].location ()) << "depdb preamble ends here";
}
- runner_->run (*environment_, ce, li, ll);
+ runner_->run (*environment_, ce, ii, li, cf, ll);
}
};
exec_lines (begin, end, exec_cmd);
}
+ pair<names, location> parser::
+ execute_diag_preamble (const scope& rs, const scope& bs,
+ environment& e, const script& s, runner& r,
+ bool diag, bool enter, bool leave)
+ {
+ tracer trace ("execute_diag_preamble");
+
+ assert (!s.diag_preamble.empty ());
+
+ const line& dl (s.diag_preamble.back ()); // Diag builtin line.
+
+ pre_exec (rs, bs, e, &s, &r);
+
+ if (enter)
+ runner_->enter (e, s.start_loc);
+
+ // Perform the variable assignments.
+ //
+ auto exec_cmd = [&dl, this] (token& t,
+ build2::script::token_type& tt,
+ const iteration_index* ii, size_t li,
+ bool /* single */,
+ const function<command_function>& cf,
+ const location& ll)
+ {
+ // Note that we never reset the line index to zero (as we do in
+ // execute_body()) assuming that there are some script body commands
+ // to follow.
+ //
+ command_expr ce (
+ parse_command_line (t, static_cast<token_type&> (tt)));
+
+ if (!valid_preamble_cmd (ce, cf))
+ {
+ const replay_tokens& rt (dl.tokens);
+ assert (!rt.empty ());
+
+ fail (ll) << "disallowed command in diag preamble" <<
+ info << "only variable assignments are allowed in diag preamble"
+ << info (rt[0].location ()) << "diag preamble ends here";
+ }
+
+ runner_->run (*environment_, ce, ii, li, cf, ll);
+ };
+
+ exec_lines (s.diag_preamble.begin (), s.diag_preamble.end () - 1,
+ exec_cmd);
+
+ // Execute the diag line, if requested.
+ //
+ names ns;
+
+ if (diag)
+ {
+ // Copy the tokens and start playing.
+ //
+ replay_data (replay_tokens (dl.tokens));
+
+ token t;
+ build2::script::token_type tt;
+ next (t, tt);
+
+ ns = exec_special (t, tt, true /* skip_first */);
+
+ replay_stop ();
+ }
+
+ if (leave)
+ runner_->leave (e, s.end_loc);
+
+ return make_pair (ns, dl.tokens.front ().location ());
+ }
+
void parser::
pre_exec (const scope& rs, const scope& bs,
environment& e, const script* s, runner* r)
{
path_ = nullptr; // Set by replays.
- pre_parse_ = false;
+ top_pre_parse_ = pre_parse_ = false;
set_lexer (nullptr);
@@ -1152,22 +1602,37 @@ namespace build2
apply_value_attributes (&var, lhs, move (rhs), kind);
};
- auto exec_if = [this] (token& t, build2::script::token_type& tt,
- size_t li,
- const location& ll)
+ auto exec_cond = [this] (token& t, build2::script::token_type& tt,
+ const iteration_index* ii, size_t li,
+ const location& ll)
{
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- // Assume if-else always involves multiple commands.
+ // Assume a flow control construct always involves multiple
+ // commands.
//
- return runner_->run_if (*environment_, ce, li, ll);
+ return runner_->run_cond (*environment_, ce, ii, li, ll);
};
- build2::script::parser::exec_lines (begin, end,
- exec_set, exec_cmd, exec_if,
- environment_->exec_line,
- &environment_->var_pool);
+ auto exec_for = [this] (const variable& var,
+ value&& val,
+ const attributes& val_attrs,
+ const location&)
+ {
+ value& lhs (environment_->assign (var));
+
+ attributes_.push_back (val_attrs);
+
+ apply_value_attributes (&var, lhs, move (val), type::assign);
+ };
+
+ build2::script::parser::exec_lines (
+ begin, end,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ nullptr /* iteration_index */,
+ environment_->exec_line,
+ &environment_->var_pool);
}
names parser::
@@ -1184,33 +1649,12 @@ namespace build2
: names ();
}
- names parser::
- execute_special (const scope& rs, const scope& bs,
- environment& e,
- const line& ln,
- bool omit_builtin)
- {
- pre_exec (rs, bs, e, nullptr /* script */, nullptr /* runner */);
-
- // Copy the tokens and start playing.
- //
- replay_data (replay_tokens (ln.tokens));
-
- token t;
- build2::script::token_type tt;
- next (t, tt);
-
- names r (exec_special (t, tt, omit_builtin));
-
- replay_stop ();
- return r;
- }
-
void parser::
exec_depdb_dyndep (token& lt, build2::script::token_type& ltt,
size_t li, const location& ll,
- action a, const scope& bs, file& t,
+ action a, const scope& bs, target& t,
depdb& dd,
+ dynamic_targets& dyn_targets,
bool& update,
timestamp mt,
bool& deferred_failure,
@@ -1223,6 +1667,7 @@ namespace build2
depdb_dyndep_options ops;
bool prog (false);
bool byprod (false);
+ bool dyn_tgt (false);
// Prerequisite update filter (--update-*).
//
@@ -1265,11 +1710,9 @@ namespace build2
next (t, tt); // Skip the 'dyndep' command.
- if (tt == type::word && t.value == "--byproduct")
- {
- byprod = true;
+ if (tt == type::word && ((byprod = (t.value == "--byproduct")) ||
+ (dyn_tgt = (t.value == "--dyn-target"))))
next (t, tt);
- }
assert (byprod == (byprod_result != nullptr));
@@ -1488,10 +1931,23 @@ namespace build2
continue;
}
- // Handle --byproduct in the wrong place.
+ // Handle --byproduct and --dyn-target in the wrong place.
//
if (strcmp (a, "--byproduct") == 0)
- fail (ll) << "depdb dyndep: --byproduct must be first option";
+ {
+ fail (ll) << "depdb dyndep: "
+ << (dyn_tgt
+ ? "--byproduct specified with --dyn-target"
+ : "--byproduct must be first option");
+ }
+
+ if (strcmp (a, "--dyn-target") == 0)
+ {
+ fail (ll) << "depdb dyndep: "
+ << (byprod
+ ? "--dyn-target specified with --byproduct"
+ : "--dyn-target must be first option");
+ }
// Handle non-literal --update-*.
//
@@ -1516,29 +1972,31 @@ namespace build2
}
}
- // --what
- //
- const char* what (ops.what_specified ()
- ? ops.what ().c_str ()
- : "file");
-
// --format
//
dyndep_format format (dyndep_format::make);
-
if (ops.format_specified ())
{
const string& f (ops.format ());
- if (f != "make")
+ if (f == "lines") format = dyndep_format::lines;
+ else if (f != "make")
fail (ll) << "depdb dyndep: invalid --format option value '"
<< f << "'";
}
+ // Prerequisite-specific options.
+ //
+
+ // --what
+ //
+ const char* what (ops.what_specified ()
+ ? ops.what ().c_str ()
+ : "file");
+
// --cwd
//
optional<dir_path> cwd;
-
if (ops.cwd_specified ())
{
if (!byprod)
@@ -1558,28 +2016,6 @@ namespace build2
fail (ll) << "depdb dyndep: -I specified with --byproduct";
}
- // --file
- //
- // Note that if --file is specified without a program, then we assume
- // it is one of the static prerequisites.
- //
- optional<path> file;
-
- if (ops.file_specified ())
- {
- file = move (ops.file ());
-
- if (file->relative ())
- {
- if (!cwd)
- fail (ll) << "depdb dyndep: relative path specified with --file";
-
- *file = *cwd / *file;
- }
- }
- else if (!prog)
- fail (ll) << "depdb dyndep: program or --file expected";
-
// --default-type
//
// Get the default prerequisite type falling back to file{} if not
@@ -1591,7 +2027,7 @@ namespace build2
// translation unit would want to make sure it resolves extracted
// system headers to h{} targets analogous to the c module's rule.
//
- const target_type* def_pt;
+ const target_type* def_pt (&file::static_type);
if (ops.default_type_specified ())
{
const string& t (ops.default_type ());
@@ -1599,10 +2035,8 @@ namespace build2
def_pt = bs.find_target_type (t);
if (def_pt == nullptr)
fail (ll) << "depdb dyndep: unknown target type '" << t
- << "' specific with --default-type";
+ << "' specified with --default-type";
}
- else
- def_pt = &file::static_type;
// --adhoc
//
@@ -1612,6 +2046,93 @@ namespace build2
fail (ll) << "depdb dyndep: --adhoc specified with --byproduct";
}
+ // Target-specific options.
+ //
+
+ // --target-what
+ //
+ const char* what_tgt ("file");
+ if (ops.target_what_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-what specified without "
+ << "--dyn-target";
+
+ what_tgt = ops.target_what ().c_str ();
+ }
+
+ // --target-cwd
+ //
+ optional<dir_path> cwd_tgt;
+ if (ops.target_cwd_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-cwd specified without "
+ << "--dyn-target";
+
+ cwd_tgt = move (ops.target_cwd ());
+
+ if (cwd_tgt->relative ())
+ fail (ll) << "depdb dyndep: relative path specified with "
+ << "--target-cwd";
+ }
+
+ // --target-default-type
+ //
+ const target_type* def_tt (&file::static_type);
+ if (ops.target_default_type_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-default-type specified "
+ << "without --dyn-target";
+
+ const string& t (ops.target_default_type ());
+
+ def_tt = bs.find_target_type (t);
+ if (def_tt == nullptr)
+ fail (ll) << "depdb dyndep: unknown target type '" << t
+ << "' specified with --target-default-type";
+ }
+
+ map<string, const target_type*> map_tt;
+ if (ops.target_extension_type_specified ())
+ {
+ if (!dyn_tgt)
+ fail (ll) << "depdb dyndep: --target-extension-type specified "
+ << "without --dyn-target";
+
+ for (pair<const string, string>& p: ops.target_extension_type ())
+ {
+ const target_type* tt (bs.find_target_type (p.second));
+ if (tt == nullptr)
+ fail (ll) << "depdb dyndep: unknown target type '" << p.second
+ << "' specified with --target-extension-type";
+
+ map_tt[p.first] = tt;
+ }
+ }
+
+ // --file (last since need --*cwd)
+ //
+ // Note that if --file is specified without a program, then we assume
+ // it is one of the static prerequisites.
+ //
+ optional<path> file;
+ if (ops.file_specified ())
+ {
+ file = move (ops.file ());
+
+ if (file->relative ())
+ {
+ if (!cwd && !cwd_tgt)
+ fail (ll) << "depdb dyndep: relative path specified with --file";
+
+ *file = (cwd ? *cwd : *cwd_tgt) / *file;
+ }
+ }
+ else if (!prog)
+ fail (ll) << "depdb dyndep: program or --file expected";
+
// Update prerequisite targets.
//
using dyndep = dyndep_rule;
@@ -1632,7 +2153,7 @@ namespace build2
// So there is a nuanced interaction between update=match and
// --update-*.
//
- if ((p.include & 4) != 0)
+ if ((p.include & adhoc_buildscript_rule::include_unmatch) != 0)
{
l6 ([&]{trace << "skipping unmatched " << *pt;});
continue;
@@ -1705,6 +2226,11 @@ namespace build2
update = dyndep::update (
trace, a, *pt, update ? timestamp_unknown : mt) || update;
+ // While implicit, it is for a static prerequisite, so marking it
+ // feels correct.
+ //
+ p.include |= prerequisite_target::include_udm;
+
// Mark as updated (see execute_update_prerequisites() for
// details.
//
@@ -1737,6 +2263,10 @@ namespace build2
return;
}
+ const scope& rs (*bs.root_scope ());
+
+ group* g (t.is_a<group> ()); // If not group then file.
+
// This code is based on the prior work in the cc module (specifically
// extract_headers()) where you can often find more detailed rationale
// for some of the steps performed.
@@ -1834,9 +2364,29 @@ namespace build2
command_expr cmd;
srcout_map so_map;
+ // Save/restore script cleanups.
+ //
+ struct cleanups
+ {
+ build2::script::cleanups ordinary;
+ paths special;
+ };
+ optional<cleanups> script_cleanups;
+
+ auto cleanups_guard = make_guard (
+ [this, &script_cleanups] ()
+ {
+ if (script_cleanups)
+ {
+ swap (environment_->cleanups, script_cleanups->ordinary);
+ swap (environment_->special_cleanups, script_cleanups->special);
+ }
+ });
+
auto init_run = [this, &ctx,
&lt, &ltt, &ll,
- prog, &file, &ops, &cmd, &so_map] ()
+ prog, &file, &ops,
+ &cmd, &so_map, &script_cleanups] ()
{
// Populate the srcout map with the -I$out_base -I$src_base pairs.
//
@@ -1849,6 +2399,10 @@ namespace build2
if (prog)
{
+ script_cleanups = cleanups {};
+ swap (environment_->cleanups, script_cleanups->ordinary);
+ swap (environment_->special_cleanups, script_cleanups->special);
+
cmd = parse_command_line (lt, static_cast<token_type&> (ltt));
// If the output goes to stdout, then this should be a single
@@ -1864,15 +2418,10 @@ namespace build2
// they include the line index in their names to avoid clashes
// between lines).
//
- // Cleanups are not an issue, they will simply replaced. And
+ // Cleanups are not an issue, they will simply be replaced. And
// overriding the contents of the special files seems harmless and
// consistent with what would happen if the command redirects its
// output to a non-special file.
- //
- if (file)
- environment_->clean (
- {build2::script::cleanup_type::always, *file},
- true /* implicit */);
}
};
@@ -1882,7 +2431,7 @@ namespace build2
size_t skip_count (0);
auto add = [this, &trace, what,
- a, &bs, &t, &pts, pts_n = pts.size (),
+ a, &bs, &t, g, &pts, pts_n = pts.size (),
&ops, &map_ext, def_pt, &pfx_map, &so_map,
&dd, &skip_count] (path fp,
size_t* skip,
@@ -1892,6 +2441,61 @@ namespace build2
bool cache (skip == nullptr);
+ // Handle fsdir{} prerequisite separately.
+ //
+ // Note: inspired by inject_fsdir().
+ //
+ if (fp.to_directory ())
+ {
+ if (!cache)
+ {
+ // Note: already absolute since cannot be non-existent.
+ //
+ fp.normalize ();
+ }
+
+ const fsdir* dt (&search<fsdir> (t,
+ path_cast<dir_path> (fp),
+ dir_path (),
+ string (), nullptr, nullptr));
+
+ // Subset of code for file below.
+ //
+ if (!cache)
+ {
+ for (size_t i (0); i != pts_n; ++i)
+ {
+ const prerequisite_target& p (pts[i]);
+
+ if (const target* pt =
+ (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
+ nullptr))
+ {
+ if (dt == pt)
+ return false;
+ }
+ }
+
+ if (*skip != 0)
+ {
+ --(*skip);
+ return false;
+ }
+ }
+
+ match_sync (a, *dt);
+ pts.push_back (
+ prerequisite_target (
+ nullptr, true /* adhoc */, reinterpret_cast<uintptr_t> (dt)));
+
+ if (!cache)
+ dd.expect (fp.representation ());
+
+ skip_count++;
+ return false;
+ }
+
// We can only defer the failure if we will be running the recipe
// body.
//
@@ -1946,13 +2550,26 @@ namespace build2
// Skip if this is one of the targets.
//
+ // Note that for dynamic targets this only works if we see the
+ // targets before prerequisites (like in the make dependency
+ // format).
+ //
if (ops.drop_cycles ())
{
- for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ if (g != nullptr)
{
- if (ft == m)
+ auto& ms (g->members);
+ if (find (ms.begin (), ms.end (), ft) != ms.end ())
return false;
}
+ else
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (ft == m)
+ return false;
+ }
+ }
}
// Skip until where we left off.
@@ -1981,10 +2598,15 @@ namespace build2
{
prerequisite_target& pt (pts.back ());
+ // Note: set the include_target flag for consistency (the
+ // updated_during_match() check does not apply since it's a
+ // dynamic prerequisite).
+ //
if (pt.adhoc ())
{
pt.data = reinterpret_cast<uintptr_t> (pt.target);
pt.target = nullptr;
+ pt.include |= prerequisite_target::include_target;
}
else
pt.data = 1; // Already updated.
@@ -2018,13 +2640,27 @@ namespace build2
<< t;
});
+ // While in the make format targets come before prerequisites, in
+ // depdb we store them after since any change to prerequisites can
+ // invalidate the set of targets. So we save them first and process
+ // later.
+ //
+ // Note also that we need to return them to the caller in case we are
+ // updating.
+
// If nothing so far has invalidated the dependency database, then try
// the cached data before running the program.
//
bool cache (!update);
+ bool skip_blank (false);
for (bool restart (true), first_run (true); restart; cache = false)
{
+ // Clear the state in case we are restarting.
+ //
+ if (dyn_tgt)
+ dyn_targets.clear ();
+
restart = false;
if (cache)
@@ -2033,7 +2669,8 @@ namespace build2
//
assert (skip_count == 0);
- // We should always end with a blank line.
+ // We should always end with a blank line after the list of
+ // dynamic prerequisites.
//
for (;;)
{
@@ -2047,8 +2684,11 @@ namespace build2
break;
}
- if (l->empty ()) // Done, nothing changed.
- return;
+ if (l->empty ()) // Done with prerequisites, nothing changed.
+ {
+ skip_blank = true;
+ break;
+ }
if (optional<bool> r = add (path (move (*l)), nullptr, mt))
{
@@ -2070,6 +2710,52 @@ namespace build2
return;
}
}
+
+ if (!restart) // Nothing changed.
+ {
+ if (dyn_tgt)
+ {
+ // We should always end with a blank line after the list of
+ // dynamic targets.
+ //
+ for (;;)
+ {
+ string* l (dd.read ());
+
+ // If the line is invalid, run the compiler.
+ //
+ if (l == nullptr)
+ {
+ restart = true;
+ break;
+ }
+
+ if (l->empty ()) // Done with targets.
+ break;
+
+ // Split into type and path (see below for background).
+ //
+ size_t p (l->find (' '));
+ if (p == string::npos || // Invalid format.
+ p == 0 || // Empty type.
+ p + 1 == l->size ()) // Empty path.
+ {
+ dd.write (); // Invalidate this line.
+ restart = true;
+ break;
+ }
+
+ string t (*l, 0, p);
+ l->erase (0, p + 1);
+
+ dyn_targets.push_back (
+ dynamic_target {move (t), path (move (*l))});
+ }
+ }
+
+ if (!restart) // Done, nothing changed.
+ break; // Break earliy to keep cache=true.
+ }
}
else
{
@@ -2078,9 +2764,16 @@ namespace build2
init_run ();
first_run = false;
}
- else if (!prog)
+ else
{
- fail (ll) << "generated " << what << " without program to retry";
+ if (!prog)
+ fail (ll) << "generated " << what << " without program to retry";
+
+ // Drop dyndep cleanups accumulated on the previous run.
+ //
+ assert (script_cleanups); // Sanity check.
+ environment_->cleanups.clear ();
+ environment_->special_cleanups.clear ();
}
// Save the timestamp just before we run the command. If we depend
@@ -2101,18 +2794,50 @@ namespace build2
istringstream iss;
if (prog)
{
- string s;
- build2::script::run (*environment_,
- cmd,
- li,
- ll,
- !file ? &s : nullptr);
-
+ // Note: depdb is disallowed inside flow control constructs.
+ //
if (!file)
{
- iss.str (move (s));
+ function<command_function> cf (
+ [&iss]
+ (build2::script::environment&,
+ const strings&,
+ auto_fd in,
+ pipe_command* pipe,
+ const optional<deadline>& dl,
+ const location& ll)
+ {
+ read (move (in),
+ false /* whitespace */,
+ false /* newline */,
+ true /* exact */,
+ [&iss] (string&& s) {iss.str (move (s));},
+ pipe,
+ dl,
+ ll,
+ "depdb-dyndep");
+ });
+
+ build2::script::run (*environment_,
+ cmd,
+ nullptr /* iteration_index */, li,
+ ll,
+ cf, false /* last_cmd */);
+
iss.exceptions (istream::badbit);
}
+ else
+ {
+ build2::script::run (
+ *environment_, cmd, nullptr /* iteration_index */, li, ll);
+
+ // Note: make it a maybe-cleanup in case the command cleans it
+ // up itself.
+ //
+ environment_->clean (
+ {build2::script::cleanup_type::maybe, *file},
+ true /* implicit */);
+ }
}
ifdstream ifs (ifdstream::badbit);
@@ -2180,32 +2905,72 @@ namespace build2
if (r.second.empty ())
continue;
- // @@ TODO: what should we do about targets?
+ // Skip targets unless requested to extract.
//
- // Note that if we take GCC as an example, things are
+ // BTW, if you are wondering why don't we extract targets
+ // by default, take GCC as an example, where things are
// quite messed up: by default it ignores -o and just
// takes the source file name and replaces the extension
// with a platform-appropriate object file extension. One
// can specify a custom target (or even multiple targets)
- // with -MT or with -MQ (quoting). Though MinGW GCC still
- // does not quote `:` with -MQ. So in this case it's
+ // with -MT or with -MQ (quoting). So in this case it's
// definitely easier for the user to ignore the targets
// and just specify everything in the buildfile.
//
- // On the other hand, other tools are likely to produce
- // more sensible output (except perhaps for quoting).
- //
- // @@ Maybe in the lax mode we should only recognize `:`
- // if it's separated on at least one side?
- //
- // Alternatively, we could detect Windows drives in
- // paths and "handle" them (I believe this is what GNU
- // make does). Maybe we should have three formats:
- // make-lax, make, make-strict?
- //
if (r.first == make_type::target)
+ {
+ // NOTE: similar code below.
+ //
+ if (dyn_tgt)
+ {
+ path& f (r.second);
+
+ if (f.relative ())
+ {
+ if (!cwd_tgt)
+ fail (il) << "relative " << what_tgt
+ << " target path '" << f
+ << "' in make dependency declaration" <<
+ info << "consider using --target-cwd to specify "
+ << "relative path base";
+
+ f = *cwd_tgt / f;
+ }
+
+ // Note that unlike prerequisites, here we don't need
+ // normalize_external() since we expect the targets to
+ // be within this project.
+ //
+ try
+ {
+ f.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what_tgt << " target "
+ << "path '" << f.string () << "'";
+ }
+
+ // The target must be within this project.
+ //
+ if (!f.sub (rs.out_path ()))
+ {
+ fail (il) << what_tgt << " target path " << f
+ << " must be inside project output "
+ << "directory " << rs.out_path ();
+ }
+
+ // Note: type is resolved later.
+ //
+ dyn_targets.push_back (
+ dynamic_target {string (), move (f)});
+ }
+
continue;
+ }
+ // NOTE: similar code below.
+ //
if (optional<bool> u = add (move (r.second), &skip, rmt))
{
restart = *u;
@@ -2233,20 +2998,380 @@ namespace build2
break;
}
- break;
+ break; // case
+ }
+ case dyndep_format::lines:
+ {
+ bool tgt (dyn_tgt); // Reading targets or prerequisites.
+
+ for (string l; !restart; ++il.line) // Reuse the buffer.
+ {
+ if (eof (getline (is, l)))
+ break;
+
+ if (l.empty ())
+ {
+ if (!tgt)
+ fail (il) << "blank line in prerequisites list";
+
+ tgt = false; // Targets/prerequisites separating blank.
+ continue;
+ }
+
+ // See if this line start with space to indicate a non-
+ // existent prerequisite. This variable serves both as a
+ // flag and as a position of the beginning of the path.
+ //
+ size_t n (l.front () == ' ' ? 1 : 0);
+
+ if (tgt)
+ {
+ // NOTE: similar code above.
+ //
+ path f;
+ try
+ {
+ // Non-existent target doesn't make sense.
+ //
+ if (n)
+ throw invalid_path ("");
+
+ f = path (l);
+
+ if (f.relative ())
+ {
+ if (!cwd_tgt)
+ fail (il) << "relative " << what_tgt
+ << " target path '" << f
+ << "' in lines dependency declaration" <<
+ info << "consider using --target-cwd to specify "
+ << "relative path base";
+
+ f = *cwd_tgt / f;
+ }
+
+ // Note that unlike prerequisites, here we don't need
+ // normalize_external() since we expect the targets to
+ // be within this project.
+ //
+ f.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what_tgt << " target path '"
+ << l << "'";
+ }
+
+ // The target must be within this project.
+ //
+ if (!f.sub (rs.out_path ()))
+ {
+ fail (il) << what_tgt << " target path " << f
+ << " must be inside project output directory "
+ << rs.out_path ();
+ }
+
+ // Note: type is resolved later.
+ //
+ dyn_targets.push_back (
+ dynamic_target {string (), move (f)});
+ }
+ else
+ {
+ path f;
+ try
+ {
+ f = path (l.c_str () + n, l.size () - n);
+
+ if (f.empty () ||
+ (n && f.to_directory ())) // Non-existent fsdir{}.
+ throw invalid_path ("");
+
+ if (f.relative ())
+ {
+ if (!n)
+ {
+ if (!cwd)
+ fail (il) << "relative " << what
+ << " prerequisite path '" << f
+ << "' in lines dependency declaration" <<
+ info << "consider using --cwd to specify "
+ << "relative path base";
+
+ f = *cwd / f;
+ }
+ }
+ else if (n)
+ {
+ // @@ TODO: non-existent absolute paths.
+ //
+ throw invalid_path ("");
+ }
+ }
+ catch (const invalid_path&)
+ {
+ fail (il) << "invalid " << what << " prerequisite path '"
+ << l << "'";
+ }
+
+ // NOTE: similar code above.
+ //
+ if (optional<bool> u = add (move (f), &skip, rmt))
+ {
+ restart = *u;
+
+ if (restart)
+ {
+ update = true;
+ l6 ([&]{trace << "restarting";});
+ }
+ }
+ else
+ {
+ // Trigger recompilation, mark as expected to fail, and
+ // bail out.
+ //
+ update = true;
+ deferred_failure = true;
+ break;
+ }
+ }
+ }
+
+ break; // case
}
}
+ if (file)
+ ifs.close ();
+
// Bail out early if we have deferred a failure.
//
if (deferred_failure)
return;
+
+ // Clean after each depdb-dyndep execution.
+ //
+ if (prog)
+ clean (*environment_, ll);
}
}
- // Add the terminating blank line (we are updating depdb).
+ // Add the dynamic prerequisites terminating blank line if we are
+ // updating depdb and unless it's already there.
+ //
+ if (!cache && !skip_blank)
+ dd.expect ("");
+
+ // Handle dynamic targets.
//
- dd.expect ("");
+ if (dyn_tgt)
+ {
+ if (g != nullptr && g->members_static == 0 && dyn_targets.empty ())
+ fail (ll) << "group " << *g << " has no static or dynamic members";
+
+ // There is one more level (at least that we know of) to this rabbit
+ // hole: if the set of dynamic targets changes between clean and
+ // update and we do a `clean update` batch, then we will end up with
+ // old targets (as entered by clean from old depdb information)
+ // being present during update. So we need to clean them out.
+ //
+ // Optimize this for a first/single batch (common case) by noticing
+ // that there are only real targets to start with.
+ //
+ // Note that this doesn't affect explicit groups where we reset the
+ // members on each update (see adhoc_rule_buildscript::apply()).
+ //
+ optional<vector<const target*>> dts;
+ if (g == nullptr)
+ {
+ for (const target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (m->decl != target_decl::real)
+ dts = vector<const target*> ();
+ }
+ }
+
+ struct map_ext_data
+ {
+ const char* what_tgt;
+ const map<string, const target_type*>& map_tt;
+ const path* f; // Updated on each iteration.
+ } d {what_tgt, map_tt, nullptr};
+
+ function<dyndep::map_extension_func> map_ext (
+ [this, &d] (const scope& bs, const string& n, const string& e)
+ {
+ small_vector<const target_type*, 2> tts;
+
+ // Check the custom mapping first.
+ //
+ auto i (d.map_tt.find (e));
+ if (i != d.map_tt.end ())
+ tts.push_back (i->second);
+ else
+ {
+ tts = dyndep::map_extension (bs, n, e, nullptr);
+
+ // Issue custom diagnostics suggesting --target-extension-type.
+ //
+ if (tts.size () > 1)
+ {
+ diag_record dr (fail);
+
+ dr << "mapping of " << d.what_tgt << " target path " << *d.f
+ << " to target type is ambiguous";
+
+ for (const target_type* tt: tts)
+ dr << info << "can be " << tt->name << "{}";
+
+ dr << info << "use --target-extension-type to provide custom "
+ << "mapping";
+ }
+ }
+
+ return tts;
+ });
+
+ function<dyndep::group_filter_func> filter;
+ if (g != nullptr)
+ {
+ // Skip static/duplicate members in explicit group.
+ //
+ filter = [] (mtime_target& g, const build2::file& m)
+ {
+ auto& ms (g.as<group> ().members);
+ return find (ms.begin (), ms.end (), &m) == ms.end ();
+ };
+ }
+
+ // Unlike for prerequisites, for targets we store in depdb both the
+ // resolved target type and path. The target type is used in clean
+ // (see adhoc_rule_buildscript::apply()) where we cannot easily get
+ // hold of all the dyndep options to map the path to target type.
+ // So the format of the target line is:
+ //
+ // <type> <path>
+ //
+ string l; // Reuse the buffer.
+ for (dynamic_target& dt: dyn_targets)
+ {
+ const path& f (dt.path);
+
+ d.f = &f; // Current file being mapped.
+
+ // Note that this logic should be consistent with what we have in
+ // adhoc_buildscript_rule::apply() for perform_clean.
+ //
+ const build2::file* ft (nullptr);
+ if (g != nullptr)
+ {
+ pair<const build2::file&, bool> r (
+ dyndep::inject_group_member (
+ what_tgt,
+ a, bs, *g,
+ f, // Can't move since need to return dyn_targets.
+ map_ext, *def_tt, filter));
+
+ // Note: no target_decl shenanigans since reset the members on
+ // each update.
+ //
+ if (!r.second)
+ {
+ dt.type.clear (); // Static indicator.
+ continue;
+ }
+
+ ft = &r.first;
+
+ // Note: we only currently support dynamic file members so it
+ // will be file if first.
+ //
+ g->members.push_back (ft);
+ }
+ else
+ {
+ pair<const build2::file&, bool> r (
+ dyndep::inject_adhoc_group_member (
+ what_tgt,
+ a, bs, t,
+ f, // Can't move since need to return dyn_targets.
+ map_ext, *def_tt));
+
+ // Note that we have to track the dynamic target even if it was
+ // already a member (think `b update && b clean update`).
+ //
+ if (!r.second && r.first.decl == target_decl::real)
+ {
+ dt.type.clear (); // Static indicator.
+ continue;
+ }
+
+ ft = &r.first;
+
+ if (dts)
+ dts->push_back (ft);
+ }
+
+ const char* tn (ft->type ().name);
+
+ if (dt.type.empty ())
+ dt.type = tn;
+ else if (dt.type != tn)
+ {
+ // This can, for example, happen if the user changed the
+ // extension to target type mapping. Say swapped extension
+ // variable values of two target types.
+ //
+ fail << "mapping of " << what_tgt << " target path " << f
+ << " to target type has changed" <<
+ info << "previously mapped to " << dt.type << "{}" <<
+ info << "now mapped to " << tn << "{}" <<
+ info << "perform from scratch rebuild of " << t;
+ }
+
+ if (!cache)
+ {
+ l = dt.type;
+ l += ' ';
+ l += f.string ();
+ dd.expect (l);
+ }
+ }
+
+ // Add the dynamic targets terminating blank line.
+ //
+ if (!cache)
+ dd.expect ("");
+
+ // Clean out old dynamic targets (skip the primary member).
+ //
+ if (dts)
+ {
+ assert (g == nullptr);
+
+ for (target* p (&t); p->adhoc_member != nullptr; )
+ {
+ target* m (p->adhoc_member);
+
+ if (m->decl != target_decl::real)
+ {
+ // While there could be quite a few dynamic targets (think
+ // something like Doxygen), this will hopefully be optimized
+ // down to a contiguous memory region scan for an integer and
+ // so should be fast.
+ //
+ if (find (dts->begin (), dts->end (), m) == dts->end ())
+ {
+ p->adhoc_member = m->adhoc_member; // Drop m.
+ continue;
+ }
+ }
+
+ p = m;
+ }
+ }
+ }
// Reload $< and $> to make sure they contain the newly discovered
// prerequisites and targets.
@@ -2255,7 +3380,8 @@ namespace build2
environment_->set_special_variables (a);
}
- // When add a special variable don't forget to update lexer::word().
+ // When add a special variable don't forget to update lexer::word() and
+ // for-loop parsing in pre_parse_line().
//
bool parser::
special_variable (const string& n) noexcept
@@ -2264,15 +3390,24 @@ namespace build2
}
lookup parser::
- lookup_variable (name&& qual, string&& name, const location& loc)
+ lookup_variable (names&& qual, string&& name, const location& loc)
{
// In the pre-parse mode collect the referenced variable names for the
// script semantics change tracking.
//
+ // Note that during pre-parse a computed (including qualified) name
+ // is signalled as an empty name.
+ //
if (pre_parse_ || pre_parse_suspended_)
{
lookup r;
+ // Note that pre-parse can be switched on by the base parser even
+ // during execute.
+ //
+ if (!top_pre_parse_)
+ return r;
+
// Add the variable name skipping special variables and suppressing
// duplicates, unless the default variables change tracking is
// canceled with `depdb clear`. While at it, check if the script
@@ -2288,10 +3423,8 @@ namespace build2
{
if (pre_parse_suspended_)
{
- const variable* pvar (scope_->ctx.var_pool.find (name));
-
- if (pvar != nullptr)
- r = (*scope_)[*pvar];
+ if (const variable* var = scope_->var_pool ().find (name))
+ r = (*scope_)[*var];
}
if (!depdb_clear_)
@@ -2302,12 +3435,27 @@ namespace build2
vars.push_back (move (name));
}
}
+ else
+ {
+ // What about pre_parse_suspended_? Don't think it makes sense to
+ // diagnose this since it can be indirect (that is, via an
+ // intermediate variable).
+ //
+ if (perform_update_ && file_based_ && !computed_var_)
+ computed_var_ = loc;
+ }
return r;
}
if (!qual.empty ())
- fail (loc) << "qualified variable name";
+ {
+ // Qualified variable is computed and we expect the user to track
+ // its changes manually.
+ //
+ return build2::script::parser::lookup_variable (
+ move (qual), move (name), loc);
+ }
lookup r (environment_->lookup (name));
@@ -2318,13 +3466,13 @@ namespace build2
// diag builtin argument change (which can be affected by such a
// variable expansion) doesn't affect the script semantics and the
// depdb argument is specifically used for the script semantics change
- // tracking. We also omit this check it the depdb builtin is used in
- // the script, assuming that such variables are tracked manually, if
- // required.
+ // tracking. We also omit this check if the depdb "value" (string,
+ // hash) builtin is used in the script, assuming that such variables
+ // are tracked manually, if required.
//
if (script_ != nullptr &&
!script_->depdb_clear &&
- script_->depdb_preamble.empty ())
+ !script_->depdb_value)
{
if (r.defined () && !r.belongs (*environment_))
{
@@ -2342,9 +3490,12 @@ namespace build2
void parser::
lookup_function (string&& name, const location& loc)
{
- if (perform_update_ && file_based_ && !impure_func_)
+ // Note that pre-parse can be switched on by the base parser even
+ // during execute.
+ //
+ if (top_pre_parse_ && perform_update_ && file_based_ && !impure_func_)
{
- const function_overloads* f (ctx.functions.find (name));
+ const function_overloads* f (ctx->functions.find (name));
if (f != nullptr && !f->pure)
impure_func_ = make_pair (move (name), loc);
diff --git a/libbuild2/build/script/parser.hxx b/libbuild2/build/script/parser.hxx
index 362c834..8f86b24 100644
--- a/libbuild2/build/script/parser.hxx
+++ b/libbuild2/build/script/parser.hxx
@@ -28,7 +28,7 @@ namespace build2
// Pre-parse. Issue diagnostics and throw failed in case of an error.
//
public:
- parser (context& c): build2::script::parser (c, false /* relex */) {}
+ parser (context& c): build2::script::parser (c) {}
// Note that the returned script object references the passed path
// name.
@@ -65,11 +65,18 @@ namespace build2
pre_parse_script ();
void
- pre_parse_line (token&, token_type&, bool if_line = false);
+ pre_parse_line (token&, token_type&,
+ optional<line_type> flow_control_type = nullopt);
+
+ void
+ pre_parse_block_line (token&, token_type&, line_type block_type);
void
pre_parse_if_else (token&, token_type&);
+ void
+ pre_parse_loop (token&, token_type&, line_type);
+
command_expr
parse_command_line (token&, token_type&);
@@ -89,7 +96,6 @@ namespace build2
environment&, const script&, runner&,
bool enter = true, bool leave = true);
-
// Execute the first or the second (dyndep) half of the depdb
// preamble.
//
@@ -97,8 +103,10 @@ namespace build2
// runner's enter() function is called before the first preamble/body
// command execution and leave() -- after the last command.
//
+ // Note: target must be file or group.
+ //
void
- execute_depdb_preamble (action a, const scope& base, const file& t,
+ execute_depdb_preamble (action a, const scope& base, const target& t,
environment& e, const script& s, runner& r,
depdb& dd)
{
@@ -113,18 +121,28 @@ namespace build2
dd);
}
+ struct dynamic_target
+ {
+ string type; // Target type name (absent if static member).
+ build2::path path;
+ };
+
+ using dynamic_targets = vector<dynamic_target>;
+
void
execute_depdb_preamble_dyndep (
- action a, const scope& base, file& t,
+ action a, const scope& base, target& t,
environment& e, const script& s, runner& r,
- depdb& dd, bool& update, timestamp mt, bool& deferred_failure)
+ depdb& dd,
+ dynamic_targets& dyn_targets,
+ bool& update, timestamp mt, bool& deferred_failure)
{
exec_depdb_preamble (
a, base, t,
e, s, r,
s.depdb_preamble.begin () + *s.depdb_dyndep,
s.depdb_preamble.end (),
- dd, &update, mt, &deferred_failure);
+ dd, &dyn_targets, &update, mt, &deferred_failure);
}
// This version doesn't actually execute the depdb-dyndep builtin (but
@@ -133,7 +151,7 @@ namespace build2
// depdb-dyndep --byproduct logic (which fits better into the rule
// implementation).
//
- enum class dyndep_format {make};
+ enum class dyndep_format {make, lines};
struct dyndep_byproduct
{
@@ -148,14 +166,17 @@ namespace build2
dyndep_byproduct
execute_depdb_preamble_dyndep_byproduct (
- action a, const scope& base, const file& t,
+ action a, const scope& base, const target& t,
environment& e, const script& s, runner& r,
depdb& dd, bool& update, timestamp mt)
{
+ // Dummies.
+ //
// This is getting a bit ugly (we also don't really need to pass
// depdb here). One day we will find a better way...
//
- bool deferred_failure; // Dymmy.
+ dynamic_targets dyn_targets;
+ bool deferred_failure;
dyndep_byproduct v;
exec_depdb_preamble (
@@ -163,19 +184,26 @@ namespace build2
e, s, r,
s.depdb_preamble.begin () + *s.depdb_dyndep,
s.depdb_preamble.end (),
- dd, &update, mt, &deferred_failure, &v);
+ dd, &dyn_targets, &update, mt, &deferred_failure, &v);
return v;
}
- // Parse a special builtin line into names, performing the variable
- // and pattern expansions. If omit_builtin is true, then omit the
- // builtin name from the result.
+ // If the diag argument is true, then execute the preamble including
+ // the (trailing) diagnostics line and return the resulting names and
+ // its location (see exec_special() for the diagnostics line execution
+ // semantics). Otherwise, execute the preamble excluding the
+ // diagnostics line and return an empty names list and location. If
+ // requested, call the runner's enter() and leave() functions that
+ // initialize/clean up the environment before/after the preamble
+ // execution.
//
- names
- execute_special (const scope& root, const scope& base,
- environment&,
- const line&,
- bool omit_builtin = true);
+ // Note: having both root and base scopes for testing (where we pass
+ // global scope for both).
+ //
+ pair<names, location>
+ execute_diag_preamble (const scope& root, const scope& base,
+ environment&, const script&, runner&,
+ bool diag, bool enter, bool leave);
protected:
// Setup the parser for subsequent exec_*() function calls.
@@ -196,24 +224,34 @@ namespace build2
exec_lines (l.begin (), l.end (), c);
}
+ // Parse a special builtin line into names, performing the variable
+ // and pattern expansions. Optionally, skip the first token (builtin
+ // name, etc).
+ //
names
exec_special (token&, build2::script::token_type&, bool skip_first);
+ // Note: target must be file or group.
+ //
void
- exec_depdb_preamble (action, const scope& base, const file&,
+ exec_depdb_preamble (action, const scope& base, const target&,
environment&, const script&, runner&,
lines_iterator begin, lines_iterator end,
depdb&,
+ dynamic_targets* dyn_targets = nullptr,
bool* update = nullptr,
optional<timestamp> mt = nullopt,
bool* deferred_failure = nullptr,
dyndep_byproduct* = nullptr);
+ // Note: target must be file or group.
+ //
void
exec_depdb_dyndep (token&, build2::script::token_type&,
size_t line_index, const location&,
- action, const scope& base, file&,
+ action, const scope& base, target&,
depdb&,
+ dynamic_targets& dyn_targets,
bool& update,
timestamp,
bool& deferred_failure,
@@ -229,7 +267,7 @@ namespace build2
//
protected:
virtual lookup
- lookup_variable (name&&, string&&, const location&) override;
+ lookup_variable (names&&, string&&, const location&) override;
virtual void
lookup_function (string&&, const location&) override;
@@ -254,9 +292,9 @@ namespace build2
script* script_;
const small_vector<action, 1>* actions_; // Non-NULL during pre-parse.
- // True if this script is for file-based targets and performing update
- // is one of the actions, respectively. Only set for the pre-parse
- // mode.
+ // True if this script is for file- or file group-based targets and
+ // performing update is one of the actions, respectively. Only set for
+ // the pre-parse mode.
//
bool file_based_;
bool perform_update_;
@@ -286,18 +324,24 @@ namespace build2
//
// If the diag builtin is encountered, then its whole line is saved
// (including the leading 'diag' word) for later execution and the
- // diagnostics weight is set to 4.
+ // diagnostics weight is set to 4. The preceding lines, which can only
+ // contain variable assignments (including via the set builtin,
+ // potentially inside the flow control constructs), are also saved.
//
// Any attempt to manually set the custom diagnostics twice (the diag
// builtin after the script name or after another diag builtin) is
// reported as ambiguity.
//
- // At the end of pre-parsing either diag_name_ or diag_line_ (but not
- // both) are present.
+ // If no script name is deduced by the end of pre-parsing and the
+ // script is used for a single operation, then use this operation's
+ // name as a script name.
+ //
+ // At the end of pre-parsing either diag_name_ is present or
+ // diag_preamble_ is not empty (but not both).
//
optional<pair<string, location>> diag_name_;
optional<pair<string, location>> diag_name2_; // Ambiguous script name.
- optional<pair<line, location>> diag_line_;
+ lines diag_preamble_;
uint8_t diag_weight_ = 0;
// Custom dependency change tracking.
@@ -327,10 +371,12 @@ namespace build2
// recipe should be provided.
//
//
- optional<location> depdb_clear_; // depdb-clear location.
+ optional<location> depdb_clear_; // depdb-clear location.
+ bool depdb_value_ = false; // depdb-{string,hash}
optional<pair<location, size_t>>
depdb_dyndep_; // depdb-dyndep location/position.
bool depdb_dyndep_byproduct_ = false; // --byproduct
+ bool depdb_dyndep_dyn_target_ = false; // --dyn-target
lines depdb_preamble_; // Note: excluding depdb-clear.
// If present, the first impure function called in the body of the
@@ -344,7 +390,18 @@ namespace build2
//
optional<pair<string, location>> impure_func_;
- // True during pre-parsing when the pre-parse mode is temporarily
+ // Similar to the impure function above but for a computed (e.g.,
+ // target-qualified) variable expansion. In this case we don't have a
+ // name (it's computed).
+ //
+ optional<location> computed_var_;
+
+ // True if we (rather than the base parser) turned on the pre-parse
+ // mode.
+ //
+ bool top_pre_parse_;
+
+ // True during top-pre-parsing when the pre-parse mode is temporarily
// suspended to perform expansion.
//
bool pre_parse_suspended_ = false;
@@ -354,19 +411,19 @@ namespace build2
// Before the script line gets parsed, it is set to a temporary value
// that will by default be appended to the script. However,
// parse_program() can point it to a different location where the line
- // should be saved instead (e.g., diag_line_, etc) or set it to NULL
- // if the line is handled in an ad-hoc way and should be dropped
- // (e.g., depdb_clear_, etc).
+ // should be saved instead (e.g., diag_preamble_ back, etc) or set it
+ // to NULL if the line is handled in an ad-hoc way and should be
+ // dropped (e.g., depdb_clear_, etc).
//
line* save_line_;
- // The if-else nesting level (and in the future for other flow
- // control constructs).
+ // The flow control constructs nesting level.
//
- // Maintained during pre-parsing and is incremented when the cmd_if or
- // cmd_ifn lines are encountered, which in particular means that it is
- // already incremented by the time the if-condition expression is
- // pre-parsed. Decremented when the cmd_end line is encountered.
+ // Maintained during pre-parsing and is incremented when flow control
+ // construct condition lines are encountered, which in particular
+ // means that it is already incremented by the time the condition
+ // expression is pre-parsed. Decremented when the cmd_end line is
+ // encountered.
//
size_t level_ = 0;
diff --git a/libbuild2/build/script/parser.test.cxx b/libbuild2/build/script/parser.test.cxx
index 5808015..97eac22 100644
--- a/libbuild2/build/script/parser.test.cxx
+++ b/libbuild2/build/script/parser.test.cxx
@@ -29,35 +29,58 @@ namespace build2
class print_runner: public runner
{
public:
- print_runner (bool line): line_ (line) {}
+ print_runner (bool line, bool iterations):
+ line_ (line),
+ iterations_ (iterations) {}
virtual void
enter (environment&, const location&) override {}
virtual void
- run (environment&,
+ run (environment& env,
const command_expr& e,
- size_t i,
- const location&) override
+ const iteration_index* ii, size_t i,
+ const function<command_function>& cf,
+ const location& ll) override
{
+ // If the functions is specified, then just execute it with an empty
+ // stdin so it can perform the housekeeping (stop replaying tokens,
+ // increment line index, etc).
+ //
+ if (cf != nullptr)
+ {
+ assert (e.size () == 1 && !e[0].pipe.empty ());
+
+ const command& c (e[0].pipe.back ());
+
+ // Must be enforced by the caller.
+ //
+ assert (!c.out && !c.err && !c.exit);
+
+ cf (env, c.arguments,
+ fdopen_null (), nullptr /* pipe */,
+ nullopt /* deadline */,
+ ll);
+ }
+
cout << e;
- if (line_)
- cout << " # " << i;
+ if (line_ || iterations_)
+ print_line_info (ii, i);
cout << endl;
}
virtual bool
- run_if (environment&,
- const command_expr& e,
- size_t i,
- const location&) override
+ run_cond (environment&,
+ const command_expr& e,
+ const iteration_index* ii, size_t i,
+ const location&) override
{
cout << "? " << e;
- if (line_)
- cout << " # " << i;
+ if (line_ || iterations_)
+ print_line_info (ii, i);
cout << endl;
@@ -68,16 +91,36 @@ namespace build2
leave (environment&, const location&) override {}
private:
+ void
+ print_line_info (const iteration_index* ii, size_t i) const
+ {
+ cout << " #";
+
+ if (line_)
+ cout << ' ' << i;
+
+ if (iterations_ && ii != nullptr)
+ {
+ string s;
+ for (const iteration_index* i (ii); i != nullptr; i = i->prev)
+ s.insert (0, " i" + to_string (i->index));
+
+ cout << s;
+ }
+ }
+
+ private:
bool line_;
+ bool iterations_;
};
// Usages:
//
- // argv[0] [-l]
+ // argv[0] [-l] [-r]
// argv[0] -b [-t]
// argv[0] -d [-t]
+ // argv[0] -g [-t] [<diag-name>]
// argv[0] -q
- // argv[0] -g [<diag-name>]
//
// In the first form read the script from stdin and trace the script
// body execution to stdout using the custom print runner.
@@ -88,26 +131,33 @@ namespace build2
// In the third form read the script from stdin, parse it and dump the
// depdb preamble lines to stdout.
//
- // In the forth form read the script from stdin, parse it and print
- // line tokens quoting information to stdout.
- //
- // In the fifth form read the script from stdin, parse it and print the
+ // In the forth form read the script from stdin, parse it and print the
// low-verbosity script diagnostics name or custom low-verbosity
// diagnostics to stdout. If the script doesn't deduce any of them, then
// print the diagnostics and exit with non-zero code.
//
+ // In the fifth form read the script from stdin, parse it and print
+ // line tokens quoting information to stdout.
+ //
// -l
// Print the script line number for each executed expression.
//
+ // -r
+ // Print the loop iteration numbers for each executed expression.
+ //
// -b
// Dump the parsed script body to stdout.
//
// -d
// Dump the parsed script depdb preamble to stdout.
//
+ // -g
+ // Dump the low-verbosity script diagnostics name or custom
+ // low-verbosity diagnostics to stdout.
+ //
// -t
- // Print true if the body (-b) or depdb preamble (-d) references the
- // temporary directory and false otherwise.
+ // Print true if the body (-b), depdb preamble (-d), or diag preamble
+ // (-g) references the temporary directory and false otherwise.
//
// -q
// Print the parsed script tokens quoting information to sdout. If a
@@ -117,10 +167,6 @@ namespace build2
// <quoting> := 'S' | 'D' | 'M'
// <completeness> := 'C' | 'P'
//
- // -g
- // Dump the low-verbosity script diagnostics name or custom
- // low-verbosity diagnostics to stdout.
- //
int
main (int argc, char* argv[])
{
@@ -131,11 +177,12 @@ namespace build2
run,
body,
depdb_preamble,
- quoting,
- diag
+ diag,
+ quoting
} m (mode::run);
bool print_line (false);
+ bool print_iterations (false);
optional<string> diag_name;
bool temp_dir (false);
@@ -145,19 +192,23 @@ namespace build2
if (a == "-l")
print_line = true;
+ else if (a == "-r")
+ print_iterations = true;
else if (a == "-b")
m = mode::body;
else if (a == "-d")
m = mode::depdb_preamble;
+ else if (a == "-g")
+ m = mode::diag;
else if (a == "-t")
{
- assert (m == mode::body || m == mode::depdb_preamble);
+ assert (m == mode::body ||
+ m == mode::depdb_preamble ||
+ m == mode::diag);
temp_dir = true;
}
else if (a == "-q")
m = mode::quoting;
- else if (a == "-g")
- m = mode::diag;
else
{
if (m == mode::diag)
@@ -170,8 +221,9 @@ namespace build2
}
}
- assert (!print_line || m == mode::run);
- assert (!diag_name || m == mode::diag);
+ assert (!print_line || m == mode::run || m == mode::diag);
+ assert (!print_iterations || m == mode::run || m == mode::diag);
+ assert (!diag_name || m == mode::diag);
// Fake build system driver, default verbosity.
//
@@ -203,6 +255,8 @@ namespace build2
tt.path (path ("driver"));
+ const scope& bs (tt.base_scope ());
+
small_vector<action, 1> acts {perform_update_id};
// Parse and run.
@@ -210,7 +264,7 @@ namespace build2
parser p (ctx);
path_name nm ("buildfile");
- script s (p.pre_parse (tt.base_scope (), tt.type (), acts,
+ script s (p.pre_parse (bs, tt.type (), acts,
cin, nm,
11 /* line */,
(m != mode::diag
@@ -222,9 +276,29 @@ namespace build2
{
case mode::run:
{
- environment e (perform_update_id, tt, s.body_temp_dir);
- print_runner r (print_line);
- p.execute_body (ctx.global_scope, ctx.global_scope, e, s, r);
+ environment e (perform_update_id, tt, bs, false /* temp_dir */);
+ print_runner r (print_line, print_iterations);
+
+ bool exec_diag (!s.diag_preamble.empty ());
+
+ if (exec_diag)
+ {
+ if (s.diag_preamble_temp_dir)
+ e.set_temp_dir_variable ();
+
+ p.execute_diag_preamble (ctx.global_scope, ctx.global_scope,
+ e, s, r,
+ false /* diag */,
+ true /* enter */,
+ false /* leave */);
+ }
+
+ if (s.body_temp_dir && !s.diag_preamble_temp_dir)
+ e.set_temp_dir_variable ();
+
+ p.execute_body (ctx.global_scope, ctx.global_scope,
+ e, s, r,
+ !exec_diag /* enter */);
break;
}
case mode::diag:
@@ -235,14 +309,26 @@ namespace build2
}
else
{
- assert (s.diag_line);
+ if (!temp_dir)
+ {
+ environment e (perform_update_id,
+ tt,
+ bs,
+ s.diag_preamble_temp_dir);
- environment e (perform_update_id, tt, false /* temp_dir */);
+ print_runner r (print_line, print_iterations);
- cout << "diag: " << p.execute_special (ctx.global_scope,
+ names diag (p.execute_diag_preamble (ctx.global_scope,
ctx.global_scope,
- e,
- *s.diag_line) << endl;
+ e, s, r,
+ true /* diag */,
+ true /* enter */,
+ true /* leave */).first);
+
+ cout << "diag: " << diag << endl;
+ }
+ else
+ cout << (s.diag_preamble_temp_dir ? "true" : "false") << endl;
}
break;
diff --git a/libbuild2/build/script/runner.cxx b/libbuild2/build/script/runner.cxx
index 51139d4..5d9764b 100644
--- a/libbuild2/build/script/runner.cxx
+++ b/libbuild2/build/script/runner.cxx
@@ -28,12 +28,37 @@ namespace build2
//
for (auto i (env.cleanups.begin ()); i != env.cleanups.end (); )
{
- const target* m (&env.target);
- for (; m != nullptr; m = m->adhoc_member)
+ const target* m (nullptr);
+ if (const group* g = env.target.is_a<group> ())
{
- if (const path_target* pm = m->is_a<path_target> ())
- if (i->path == pm->path ())
- break;
+ for (const target* gm: g->members)
+ {
+ if (const path_target* pm = gm->is_a<path_target> ())
+ {
+ if (i->path == pm->path ())
+ {
+ m = gm;
+ break;
+ }
+ }
+ }
+ }
+ else if (const fsdir* fd = env.target.is_a<fsdir> ())
+ {
+ // Compare ignoring the trailing directory separator.
+ //
+ if (path_traits::compare (i->path.string (),
+ fd->dir.string ()) == 0)
+ m = fd;
+ }
+ else
+ {
+ for (m = &env.target; m != nullptr; m = m->adhoc_member)
+ {
+ if (const path_target* pm = m->is_a<path_target> ())
+ if (i->path == pm->path ())
+ break;
+ }
}
if (m != nullptr)
@@ -96,39 +121,43 @@ namespace build2
void default_runner::
run (environment& env,
const command_expr& expr,
- size_t li,
+ const iteration_index* ii, size_t li,
+ const function<command_function>& cf,
const location& ll)
{
if (verb >= 3)
text << ": " << expr;
// Run the expression if we are not in the dry-run mode or if it
- // executes the set or exit builtin and just print the expression
- // otherwise at verbosity level 2 and up.
+ // executes the set or exit builtin or it is a for-loop. Otherwise,
+ // just print the expression otherwise at verbosity level 2 and up.
//
if (!env.context.dry_run ||
find_if (expr.begin (), expr.end (),
- [] (const expr_term& et)
+ [&cf] (const expr_term& et)
{
const process_path& p (et.pipe.back ().program);
return p.initial == nullptr &&
(p.recall.string () == "set" ||
- p.recall.string () == "exit");
+ p.recall.string () == "exit" ||
+ (cf != nullptr &&
+ p.recall.string () == "for"));
}) != expr.end ())
- build2::script::run (env, expr, li, ll);
+ build2::script::run (env, expr, ii, li, ll, cf);
else if (verb >= 2)
text << expr;
}
bool default_runner::
- run_if (environment& env,
- const command_expr& expr,
- size_t li, const location& ll)
+ run_cond (environment& env,
+ const command_expr& expr,
+ const iteration_index* ii, size_t li,
+ const location& ll)
{
if (verb >= 3)
text << ": ?" << expr;
- return build2::script::run_if (env, expr, li, ll);
+ return build2::script::run_cond (env, expr, ii, li, ll);
}
}
}
diff --git a/libbuild2/build/script/runner.hxx b/libbuild2/build/script/runner.hxx
index 558de9b..ec8a948 100644
--- a/libbuild2/build/script/runner.hxx
+++ b/libbuild2/build/script/runner.hxx
@@ -32,17 +32,21 @@ namespace build2
// Location is the start position of this command line in the script.
// It can be used in diagnostics.
//
+ // Optionally, execute the specified function instead of the last
+ // pipe command.
+ //
virtual void
run (environment&,
const command_expr&,
- size_t index,
+ const iteration_index*, size_t index,
+ const function<command_function>&,
const location&) = 0;
virtual bool
- run_if (environment&,
- const command_expr&,
- size_t,
- const location&) = 0;
+ run_cond (environment&,
+ const command_expr&,
+ const iteration_index*, size_t,
+ const location&) = 0;
// Location is the script end location (for diagnostics, etc).
//
@@ -52,9 +56,9 @@ namespace build2
// Run command expressions.
//
- // In dry-run mode don't run the expressions unless they are if-
- // conditions or execute the set or exit builtins, but print them at
- // verbosity level 2 and up.
+ // In dry-run mode don't run the expressions unless they are flow
+ // control construct conditions or execute the set or exit builtins, but
+ // print them at verbosity level 2 and up.
//
class default_runner: public runner
{
@@ -65,14 +69,15 @@ namespace build2
virtual void
run (environment&,
const command_expr&,
- size_t,
+ const iteration_index*, size_t,
+ const function<command_function>&,
const location&) override;
virtual bool
- run_if (environment&,
- const command_expr&,
- size_t,
- const location&) override;
+ run_cond (environment&,
+ const command_expr&,
+ const iteration_index*, size_t,
+ const location&) override;
virtual void
leave (environment&, const location&) override;
diff --git a/libbuild2/build/script/script.cxx b/libbuild2/build/script/script.cxx
index b230ca5..0d96cc3 100644
--- a/libbuild2/build/script/script.cxx
+++ b/libbuild2/build/script/script.cxx
@@ -7,6 +7,8 @@
#include <libbuild2/target.hxx>
+#include <libbuild2/adhoc-rule-buildscript.hxx> // include_unmatch*
+
#include <libbuild2/script/timeout.hxx>
#include <libbuild2/build/script/parser.hxx>
@@ -28,18 +30,20 @@ namespace build2
environment::
environment (action a,
const target_type& t,
+ const scope_type& s,
bool temp,
const optional<timestamp>& dl)
: build2::script::environment (
t.ctx,
- cast<target_triplet> (t.ctx.global_scope["build.host"]),
+ *t.ctx.build_host,
dir_name_view (&work, &wd_name),
temp_dir.path, false /* temp_dir_keep */,
redirect (redirect_type::none),
redirect (redirect_type::merge, 2),
redirect (redirect_type::pass)),
target (t),
- vars (context, false /* global */),
+ scope (s),
+ vars (context, false /* shared */), // Note: managed.
var_ts (var_pool.insert (">")),
var_ps (var_pool.insert ("<")),
script_deadline (to_deadline (dl, false /* success */))
@@ -56,11 +60,27 @@ namespace build2
{
// $>
//
+ // What should it contain for an explicit group? While it may seem
+ // that just the members should be enough (and analogous to the ad
+ // hoc case), this won't let us get the group name for diagnostics.
+ // So the group name followed by all the members seems like the
+ // logical choice.
+ //
names ns;
- for (const target_type* m (&target);
- m != nullptr;
- m = m->adhoc_member)
- m->as_name (ns);
+
+ if (const group* g = target.is_a<group> ())
+ {
+ g->as_name (ns);
+ for (const target_type* m: g->members)
+ m->as_name (ns);
+ }
+ else
+ {
+ for (const target_type* m (&target);
+ m != nullptr;
+ m = m->adhoc_member)
+ m->as_name (ns);
+ }
assign (var_ts) = move (ns);
}
@@ -73,13 +93,25 @@ namespace build2
// much sense, they could be handy to exclude certain prerequisites
// from $< while still treating them as such, especially in rule.
//
+ // While initially we treated update=unmatch prerequisites as
+ // implicitly ad hoc, this turned out to be not quite correct, so
+ // now we add them unless they are explicitly marked ad hoc.
+ //
names ns;
- for (const prerequisite_target& pt: target.prerequisite_targets[a])
+ for (const prerequisite_target& p: target.prerequisite_targets[a])
{
// See adhoc_buildscript_rule::execute_update_prerequisites().
//
- if (pt.target != nullptr && !pt.adhoc ())
- pt.target->as_name (ns);
+ if (const target_type* pt =
+ p.target != nullptr ? (p.adhoc () ? nullptr : p.target) :
+ (p.include & adhoc_buildscript_rule::include_unmatch) != 0 &&
+ (p.include & prerequisite_target::include_adhoc) == 0 &&
+ (p.include & adhoc_buildscript_rule::include_unmatch_adhoc) == 0
+ ? reinterpret_cast<target_type*> (p.data)
+ : nullptr)
+ {
+ pt->as_name (ns);
+ }
}
assign (var_ps) = move (ns);
@@ -154,7 +186,7 @@ namespace build2
}
void environment::
- set_variable (string&& nm,
+ set_variable (string nm,
names&& val,
const string& attrs,
const location& ll)
@@ -233,7 +265,7 @@ namespace build2
// in parallel). Plus, if there is no such variable, then we cannot
// possibly find any value.
//
- const variable* pvar (context.var_pool.find (n));
+ const variable* pvar (scope.var_pool ().find (n));
if (pvar == nullptr)
return lookup_type ();
diff --git a/libbuild2/build/script/script.hxx b/libbuild2/build/script/script.hxx
index 0619253..08f1bf4 100644
--- a/libbuild2/build/script/script.hxx
+++ b/libbuild2/build/script/script.hxx
@@ -20,14 +20,18 @@ namespace build2
namespace script
{
using build2::script::line;
- using build2::script::lines;
using build2::script::line_type;
+ using build2::script::lines;
using build2::script::redirect;
using build2::script::redirect_type;
+ using build2::script::command;
using build2::script::expr_term;
using build2::script::command_expr;
+ using build2::script::iteration_index;
using build2::script::deadline;
using build2::script::timeout;
+ using build2::script::pipe_command;
+ using build2::script::command_function;
// Forward declarations.
//
@@ -44,13 +48,11 @@ namespace build2
class script
{
public:
- using lines_type = build::script::lines;
-
// Note that the variables are not pre-entered into a pool during the
// parsing phase, so the line variable pointers are NULL.
//
- lines_type body;
- bool body_temp_dir = false; // True if the body references $~.
+ lines body;
+ bool body_temp_dir = false; // True if the body references $~.
// Referenced ordinary (non-special) variables.
//
@@ -65,20 +67,24 @@ namespace build2
small_vector<string, 2> vars; // 2 for command and options.
// Command name for low-verbosity diagnostics and custom low-verbosity
- // diagnostics line. Note: cannot be both (see the script parser for
+ // diagnostics line, potentially preceded with the variable
+ // assignments. Note: cannot be both (see the script parser for
// details).
//
optional<string> diag_name;
- optional<line> diag_line;
+ lines diag_preamble;
+ bool diag_preamble_temp_dir = false; // True if refs $~.
// The script's custom dependency change tracking lines (see the
// script parser for details).
//
bool depdb_clear;
+ bool depdb_value; // String or hash.
optional<size_t> depdb_dyndep; // Pos of first dyndep.
bool depdb_dyndep_byproduct = false; // dyndep --byproduct
- lines_type depdb_preamble;
- bool depdb_preamble_temp_dir = false; // True if refs $~.
+ bool depdb_dyndep_dyn_target = false;// dyndep --dyn-target
+ lines depdb_preamble; // Note include vars.
+ bool depdb_preamble_temp_dir = false;// True if refs $~.
location start_loc;
location end_loc;
@@ -87,10 +93,12 @@ namespace build2
class environment: public build2::script::environment
{
public:
+ using scope_type = build2::scope;
using target_type = build2::target;
environment (action,
const target_type&,
+ const scope_type&,
bool temp_dir,
const optional<timestamp>& deadline = nullopt);
@@ -111,11 +119,12 @@ namespace build2
environment& operator= (const environment&) = delete;
public:
- // Primary target this environment is for.
+ // Primary target this environment is for and its base scope;
//
const target_type& target;
+ const scope_type& scope;
- // Script-local variable pool and map.
+ // Script-private variable pool and map.
//
// Note that it may be tempting to reuse the rule-specific variables
// for this but they should not be modified during execution (i.e.,
@@ -161,7 +170,7 @@ namespace build2
size_t exec_line = 1;
virtual void
- set_variable (string&& name,
+ set_variable (string name,
names&&,
const string& attrs,
const location&) override;
diff --git a/libbuild2/buildfile b/libbuild2/buildfile
index 52252d6..6289bbf 100644
--- a/libbuild2/buildfile
+++ b/libbuild2/buildfile
@@ -4,7 +4,7 @@
# NOTE: remember to update bundled_modules in libbuild2/module.cxx if adding a
# new module.
#
-bundled_modules = bash/ bin/ c/ cc/ cxx/ in/ version/
+bundled_modules = bash/ bin/ c/ cc/ cli/ cxx/ in/ version/
./: lib{build2} $bundled_modules
@@ -59,42 +59,76 @@ lib{build2}: cxx{utility-uninstalled}: for_install = false
libul{build2}: config/{hxx ixx txx cxx}{** -host-config -**.test...} \
config/cxx{host-config}
+# Derive ~host and ~build2 configurations from current configuration.
+#
# This will of course blow up spectacularly if we are cross-compiling. But
# let's wait and enjoy the fireworks (and get a sense of why someone would
# want to cross-compile a build system).
#
-config/cxx{host-config}: config/in{host-config}
+# For the ~host configuration we only want c/cxx/cc and bin that they load.
+# For ~build2 we want to keep everything except dist.
+#
+# We also remove comment lines which could be confused with preprocessor
+# directives by some lesser compilers and blank lines between groups of
+# options which could cause spurious rebuilds when we filter out entire
+# groups.
+#
+# For ~host also filter out config.bin.lib/config.bin.*.lib (static/shared
+# library build/link preferences). In particular, we don't want to force
+# config.bin.lib=shared since that will cause static libraries to link shared
+# versions of their prerequisites (see mysql-client for a case where this can
+# make a difference).
+#
+# For ~build2 also filter out config.install.chroot -- we definitely don't
+# want it carried through. Also filter out variables that control tests
+# execution.
+#
+# Finally, for both ~host and ~build2 we keep config.config.environment
+# but strip config.config.hermetic* (we shouldn't be forcing hermiticity
+# on the users of ~host/~build2; they can decide for themselves if they
+# want it).
+#
+build2_config_lines = [strings]
+host_config_lines = [strings]
+
+for l: $regex.replace_lines( \
+ $config.save(), \
+ '^( *(#|(config\.(test[. ]|dist\.|install\.chroot|config\.hermetic))).*|)$', \
+ [null])
{
- # For the ~host configuration we only want c/cxx/cc and bin that they load.
- # For ~build2 we want to keep everything except dist.
- #
- # We also remove comment lines which could be confused with preprocessor
- # directives by some lesser compilers and blank lines between groups of
- # options which could cause spurious rebuilds when we filter out entire
- # groups.
- #
- # For ~build2 also filter out config.install.chroot -- we definitely don't
- # want it carried through. Also filter out variables that control tests
- # execution.
- #
- # Finally, for both ~host and ~build2 we keep config.config.environment
- # but strip config.config.hermetic* (we shouldn't be forcing hermiticity
- # on the users of ~host/~build2; they can decide for themselves if they
- # want it).
- #
- build2_config = $regex.replace_lines( \
- $config.save(), \
- '^( *(#|(config\.(test[. ]|dist\.|install\.chroot|config\.hermetic))).*|)$', \
- [null], \
- return_lines)
+ build2_config_lines += $l
- # Also preserve config.version.
+ # Note: also preserve config.version.
#
- host_config = $regex.replace_lines( \
- $build2_config, \
- '^ *config\.(c[. ]|cxx[. ]|cc[.]|bin[.]|config.environment |version ).*$', \
- '$&', \
- format_no_copy return_lines)
+ if $regex.match( \
+ $l, \
+ ' *config\.(c[. ]|cxx[. ]|cc[.]|bin[.]|config.environment |version ).*')
+ {
+ if! ($regex.match(\
+ $l, \
+ ' *config\.bin\.(lib|exe\.lib|liba\.lib|libs\.lib)[ =].*'))
+ {
+ # Filter out sanitizer options in ~host. We run the toolchain with
+ # various sanitizers on CI but sanitizers cause issues in some packages.
+ # Note that we can have both -fsanitize and -fno-sanitize forms. For
+ # example:
+ #
+ # -fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all
+ #
+ if $regex.match($l, ' *config\.(c|cxx|cc)\.(coptions|loptions)[ =].*')
+ {
+ l = $regex.replace($l, ' ?-f(no-)?sanitize[=-][^ ]+', '')
+ }
+
+ host_config_lines += $l
+ }
+ }
+}
+
+config/cxx{host-config}: config/in{host-config}
+{
+ build2_config = $regex.merge($build2_config_lines, '(.+)', '\1\n')
+ host_config = $regex.merge($host_config_lines, '(.+)', '\1\n')
}
libul{build2}: dist/{hxx ixx txx cxx}{** -**.test...}
@@ -169,14 +203,48 @@ if! $cross
{
{obja objs}{context}: cxx.poptions += \
-DBUILD2_IMPORT_PATH=\"$regex.replace($out_root, '\\', '\\\\')\"
+}
+
+# Note that while the -installed object file should only be linked when we
+# are installing, it will be compiled even in the uninstalled case.
+#
+if ($install.root != [null])
+{
+ # Only if installed.
+ #
+ {obja objs}{utility-installed}: cxx.poptions += \
+ -DBUILD2_INSTALL_LIB=\"$regex.replace(\
+ $install.resolve($install.lib), '\\', '\\\\')\"
- # While this object file should only be linked when we are installing, it
- # will be compiled even in the uninstalled case.
+ # Only if configured.
+ #
+ # Note: strip the last directory component (<project>).
+ #
+ # @@ TMP drop after 0.16.0 release.
#
- if ($install.root != [null])
- {obja objs}{utility-installed}: cxx.poptions += \
- -DBUILD2_INSTALL_LIB=\"$regex.replace(\
- $install.resolve($install.lib), '\\', '\\\\')\"
+ install_buildfile = ($install.buildfile != [null] \
+ ? $directory($install.resolve($install.buildfile)) \
+ :)
+ {obja objs}{utility-installed utility-uninstalled}: cxx.poptions += \
+ -DBUILD2_INSTALL_BUILDFILE=\"$regex.replace($install_buildfile, '\\', '\\\\')\"
+
+ #\
+ {obja objs}{utility-installed utility-uninstalled}: cxx.poptions += \
+ -DBUILD2_INSTALL_BUILDFILE=\"$regex.replace(\
+ $directory($install.resolve($install.buildfile)), '\\', '\\\\')\"
+ #\
+
+ # Data directory or src_root if not installed.
+ #
+ # Note: normalized in both cases.
+ #
+ {obja objs}{utility-installed}: cxx.poptions += \
+ -DBUILD2_INSTALL_DATA=\"$regex.replace(\
+ $install.resolve($install.data), '\\', '\\\\')\"
+
+ {obja objs}{utility-uninstalled}: cxx.poptions += \
+ -DBUILD2_INSTALL_DATA=\"$regex.replace(\
+ $src_root, '\\', '\\\\')\"
}
if ($cxx.target.class != 'windows')
@@ -304,11 +372,11 @@ else
{
# No install for the pre-generated case.
#
- script/hxx{builtin-options}@./ \
- script/ixx{builtin-options}@./: install = false
+ script/hxx{builtin-options}@script/ \
+ script/ixx{builtin-options}@script/: install = false
- build/script/hxx{builtin-options}@./ \
- build/script/ixx{builtin-options}@./: install = false
+ build/script/hxx{builtin-options}@build/script/ \
+ build/script/ixx{builtin-options}@build/script/: install = false
}
# Install into the libbuild2/ subdirectory of, say, /usr/include/
diff --git a/libbuild2/buildspec.cxx b/libbuild2/buildspec.cxx
index bd580ca..2eeaf31 100644
--- a/libbuild2/buildspec.cxx
+++ b/libbuild2/buildspec.cxx
@@ -53,7 +53,7 @@ namespace build2
if (v)
{
names storage;
- os << reverse (v, storage);
+ os << reverse (v, storage, true /* reduce */);
}
else
os << "[null]";
@@ -86,7 +86,7 @@ namespace build2
if (v)
{
names storage;
- os << reverse (v, storage);
+ os << reverse (v, storage, true /* reduce */);
}
else
os << "[null]";
diff --git a/libbuild2/c/init.cxx b/libbuild2/c/init.cxx
index 4c051b0..8bc2f7d 100644
--- a/libbuild2/c/init.cxx
+++ b/libbuild2/c/init.cxx
@@ -6,9 +6,12 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/install/utility.hxx>
+
#include <libbuild2/cc/guess.hxx>
#include <libbuild2/cc/module.hxx>
+#include <libbuild2/cc/target.hxx> // pc*
#include <libbuild2/c/target.hxx>
#ifndef BUILD2_DEFAULT_C
@@ -27,6 +30,7 @@ namespace build2
namespace c
{
using cc::compiler_id;
+ using cc::compiler_type;
using cc::compiler_class;
using cc::compiler_info;
@@ -53,6 +57,26 @@ namespace build2
strings& mode,
const string* v) const
{
+ // The standard is `NN` but can also be `gnuNN`.
+
+ // This helper helps recognize both NN and [cC]NN to avoid an endless
+ // stream of user questions. It can also be used to recognize Nx in
+ // addition to NN (e.g., "23" and "2x").
+ //
+ auto stdcmp = [v] (const char* nn, const char* nx = nullptr)
+ {
+ if (v != nullptr)
+ {
+ const char* s (v->c_str ());
+ if (s[0] == 'c' || s[0] == 'C')
+ s += 1;
+
+ return strcmp (s, nn) == 0 || (nx != nullptr && strcmp (s, nx) == 0);
+ }
+
+ return false;
+ };
+
switch (ci.class_)
{
case compiler_class::msvc:
@@ -77,7 +101,12 @@ namespace build2
// C17/18 is a bug-fix version of C11 so here we assume it is the
// same as C11.
//
- // And it's still early days for C2X.
+ // And it's still early days for C2X. Specifically, there is not
+ // much about C2X in MSVC in the official places and the following
+ // page shows that it's pretty much unimplement at the time of the
+ // MSVC 17.6 release:
+ //
+ // https://en.cppreference.com/w/c/compiler_support/23
//
// From version 16.8 VC now supports /std:c11 and /std:c17 options
// which enable C11/17 conformance. However, as of version 16.10,
@@ -86,17 +115,17 @@ namespace build2
//
if (v == nullptr)
;
- else if (*v != "90")
+ else if (!stdcmp ("90"))
{
uint64_t cver (ci.version.major);
- if ((*v == "99" && cver < 16) || // Since VS2010/10.0.
- ((*v == "11" ||
- *v == "17" ||
- *v == "18") && cver < 18) ||
- (*v == "2x" ))
+ if ((stdcmp ("99") && cver < 16) || // Since VS2010/10.0.
+ ((stdcmp ("11") ||
+ stdcmp ("17") ||
+ stdcmp ("18")) && cver < 18) || // Since VS????/11.0.
+ (stdcmp ("23", "2x") ))
{
- fail << "C" << *v << " is not supported by " << ci.signature <<
+ fail << "C " << *v << " is not supported by " << ci.signature <<
info << "required by " << project (rs) << '@' << rs;
}
}
@@ -113,12 +142,12 @@ namespace build2
{
string o ("-std=");
- if (*v == "2x") o += "c2x"; // GCC 9, Clang 9 (8?).
- else if (*v == "17" ||
- *v == "18") o += "c17"; // GCC 8, Clang 6.
- else if (*v == "11") o += "c1x";
- else if (*v == "99") o += "c9x";
- else if (*v == "90") o += "c90";
+ if (stdcmp ("23", "2x")) o += "c2x"; // GCC 9, Clang 9 (8?).
+ else if (stdcmp ("17") ||
+ stdcmp ("18")) o += "c17"; // GCC 8, Clang 6.
+ else if (stdcmp ("11")) o += "c1x";
+ else if (stdcmp ("99")) o += "c9x";
+ else if (stdcmp ("90")) o += "c90";
else o += *v; // In case the user specifies `gnuNN` or some such.
mode.insert (mode.begin (), move (o));
@@ -128,6 +157,79 @@ namespace build2
}
}
+ // See cc::data::x_{hdr,inc} for background.
+ //
+ static const target_type* const hdr[] =
+ {
+ &h::static_type,
+ nullptr
+ };
+
+ // Note that we include S{} here because .S files can include each other.
+ // (And maybe from inline assembler instructions?)
+ //
+ static const target_type* const inc[] =
+ {
+ &h::static_type,
+ &c::static_type,
+ &m::static_type,
+ &S::static_type,
+ &c_inc::static_type,
+ nullptr
+ };
+
+ bool
+ types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.types module must be loaded in project root";
+
+ // Register target types and configure their "installability".
+ //
+ using namespace install;
+
+ bool install_loaded (cast_false<bool> (rs["install.loaded"]));
+
+ // Note: not registering m{} or S{} (they are registered seperately
+ // by the respective optional .types submodules).
+ //
+ rs.insert_target_type<c> ();
+
+ auto insert_hdr = [&rs, install_loaded] (const target_type& tt)
+ {
+ rs.insert_target_type (tt);
+
+ // Install headers into install.include.
+ //
+ if (install_loaded)
+ install_path (rs, tt, dir_path ("include"));
+ };
+
+ for (const target_type* const* ht (hdr); *ht != nullptr; ++ht)
+ insert_hdr (**ht);
+
+ // @@ PERF: maybe factor this to cc.types?
+ //
+ rs.insert_target_type<cc::pc> ();
+ rs.insert_target_type<cc::pca> ();
+ rs.insert_target_type<cc::pcs> ();
+
+ if (install_loaded)
+ install_path<cc::pc> (rs, dir_path ("pkgconfig"));
+
+ return true;
+ }
+
static const char* const hinters[] = {"cxx", nullptr};
// See cc::module for details on guess_init vs config_init.
@@ -154,15 +256,20 @@ namespace build2
// Enter all the variables and initialize the module data.
//
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
cc::config_data d {
cc::lang::c,
"c",
"c",
+ "obj-c",
BUILD2_DEFAULT_C,
".i",
+ ".mi",
hinters,
@@ -225,8 +332,8 @@ namespace build2
vp["cc.export.libs"],
vp["cc.export.impl_libs"],
- vp["cc.pkconfig.include"],
- vp["cc.pkconfig.lib"],
+ vp["cc.pkgconfig.include"],
+ vp["cc.pkgconfig.lib"],
vp.insert_alias (vp["cc.stdlib"], "c.stdlib"), // Same as cc.stdlib.
@@ -238,6 +345,7 @@ namespace build2
vp["cc.module_name"],
vp["cc.importable"],
vp["cc.reprocess"],
+ vp["cc.serialize"],
vp.insert<string> ("c.preprocessed"), // See cxx.preprocessed.
nullptr, // No __symexport (no modules).
@@ -312,19 +420,6 @@ namespace build2
return true;
}
- static const target_type* const hdr[] =
- {
- &h::static_type,
- nullptr
- };
-
- static const target_type* const inc[] =
- {
- &h::static_type,
- &c::static_type,
- nullptr
- };
-
bool
init (scope& rs,
scope& bs,
@@ -353,8 +448,7 @@ namespace build2
"c.link",
"c.install",
- cm.x_info->id.type,
- cm.x_info->id.variant,
+ cm.x_info->id,
cm.x_info->class_,
cm.x_info->version.major,
cm.x_info->version.minor,
@@ -387,6 +481,7 @@ namespace build2
c::static_type,
nullptr, // No C modules yet.
+ c_inc::static_type,
hdr,
inc
};
@@ -397,15 +492,181 @@ namespace build2
return true;
}
+ bool
+ objc_types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::objc_types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.objc.types module must be loaded in project root";
+
+ // Register the m{} target type.
+ //
+ rs.insert_target_type<m> ();
+
+ return true;
+ }
+
+ bool
+ objc_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::objc_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.objc module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("c"));
+
+ if (mod == nullptr)
+ fail (loc) << "c.objc module must be loaded after c module";
+
+ // Register the target type and "enable" it in the module.
+ //
+ // Note that we must register the target type regardless of whether the
+ // C compiler is capable of compiling Objective-C. But we enable only
+ // if it is.
+ //
+ // Note: see similar code in the cxx module.
+ //
+ load_module (rs, rs, "c.objc.types", loc);
+
+ // Note that while Objective-C is supported by MinGW GCC, it's unlikely
+ // Clang supports it when targeting MSVC or Emscripten. But let's keep
+ // the check simple for now.
+ //
+ if (mod->ctype == compiler_type::gcc ||
+ mod->ctype == compiler_type::clang)
+ mod->x_obj = &m::static_type;
+
+ return true;
+ }
+
+ bool
+ as_cpp_types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::as_cpp_types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.as-cpp.types module must be loaded in project root";
+
+ // Register the S{} target type.
+ //
+ rs.insert_target_type<S> ();
+
+ return true;
+ }
+
+ bool
+ as_cpp_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::as_cpp_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.as-cpp module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("c"));
+
+ if (mod == nullptr)
+ fail (loc) << "c.as-cpp module must be loaded after c module";
+
+ // Register the target type and "enable" it in the module.
+ //
+ // Note that we must register the target type regardless of whether the
+ // C compiler is capable of compiling Assember with C preprocessor. But
+ // we enable only if it is.
+ //
+ load_module (rs, rs, "c.as-cpp.types", loc);
+
+ if (mod->ctype == compiler_type::gcc ||
+ mod->ctype == compiler_type::clang)
+ mod->x_asp = &S::static_type;
+
+ return true;
+ }
+
+ bool
+ predefs_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("c::predefs_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "c.predefs module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("c"));
+
+ if (mod == nullptr)
+ fail (loc) << "c.predefs module must be loaded after c module";
+
+ // Register the c.predefs rule.
+ //
+ // Why invent a separate module instead of just always registering it in
+ // the c module? The reason is performance: this rule will be called for
+ // every C header.
+ //
+ cc::predefs_rule& r (*mod);
+
+ rs.insert_rule<h> (perform_update_id, r.rule_name, r);
+ rs.insert_rule<h> (perform_clean_id, r.rule_name, r);
+ rs.insert_rule<h> (configure_update_id, r.rule_name, r);
+
+ return true;
+ }
+
static const module_functions mod_functions[] =
{
// NOTE: don't forget to also update the documentation in init.hxx if
// changing anything here.
- {"c.guess", nullptr, guess_init},
- {"c.config", nullptr, config_init},
- {"c", nullptr, init},
- {nullptr, nullptr, nullptr}
+ {"c.types", nullptr, types_init},
+ {"c.guess", nullptr, guess_init},
+ {"c.config", nullptr, config_init},
+ {"c.objc.types", nullptr, objc_types_init},
+ {"c.objc", nullptr, objc_init},
+ {"c.as-cpp.types", nullptr, as_cpp_types_init},
+ {"c.as-cpp", nullptr, as_cpp_init},
+ {"c.predefs", nullptr, predefs_init},
+ {"c", nullptr, init},
+ {nullptr, nullptr, nullptr}
};
const module_functions*
diff --git a/libbuild2/c/init.hxx b/libbuild2/c/init.hxx
index 2662bb1..38515c1 100644
--- a/libbuild2/c/init.hxx
+++ b/libbuild2/c/init.hxx
@@ -19,9 +19,22 @@ namespace build2
//
// Submodules:
//
- // `c.guess` -- registers and sets some variables.
- // `c.config` -- loads c.guess and sets more variables.
- // `c` -- loads c.config and registers target types and rules.
+ // `c.types` -- registers target types.
+ // `c.guess` -- registers and sets some variables.
+ // `c.config` -- loads c.guess and sets more variables.
+ // `c` -- loads c.{types,config} and registers rules and
+ // functions.
+ //
+ // `c.objc.types` -- registers m{} target type.
+ // `c.objc` -- loads c.objc.types and enables Objective-C
+ // compilation. Must be loaded after c.
+ //
+ // `c.as-cpp.types` -- registers S{} target type.
+ // `c.as-cpp` -- loads c.as-cpp.types and enables Assembler with C
+ // preprocessor compilation. Must be loaded after c.
+ //
+ // `c.predefs` -- registers rule for generating a C header with
+ // predefined compiler macros. Must be loaded after c.
//
extern "C" LIBBUILD2_C_SYMEXPORT const module_functions*
build2_c_load ();
diff --git a/libbuild2/c/target.hxx b/libbuild2/c/target.hxx
index 333d39f..c9955e3 100644
--- a/libbuild2/c/target.hxx
+++ b/libbuild2/c/target.hxx
@@ -15,6 +15,9 @@ namespace build2
{
using cc::h;
using cc::c;
+ using cc::m;
+ using cc::S;
+ using cc::c_inc;
}
}
diff --git a/libbuild2/cc/buildfile b/libbuild2/cc/buildfile
index e98e3de..7dcd811 100644
--- a/libbuild2/cc/buildfile
+++ b/libbuild2/cc/buildfile
@@ -6,14 +6,33 @@
include ../
impl_libs = ../lib{build2} # Implied interface dependency.
-import impl_libs += libpkgconf%lib{pkgconf}
+libpkgconf = $config.build2.libpkgconf
+
+if $libpkgconf
+ import impl_libs += libpkgconf%lib{pkgconf}
+else
+ import impl_libs += libpkg-config%lib{pkg-config}
include ../bin/
intf_libs = ../bin/lib{build2-bin}
-./: lib{build2-cc}: libul{build2-cc}: {hxx ixx txx cxx}{** -**.test...} \
- h{msvc-setup} \
- $intf_libs $impl_libs
+./: lib{build2-cc}: libul{build2-cc}: \
+ {hxx ixx txx cxx}{** -pkgconfig-lib* -**.test...} \
+ h{msvc-setup}
+
+libul{build2-cc}: cxx{pkgconfig-libpkgconf}: include = $libpkgconf
+libul{build2-cc}: cxx{pkgconfig-libpkg-config}: include = (!$libpkgconf)
+
+libul{build2-cc}: $intf_libs $impl_libs
+
+# libc++ std module interface translation unit.
+#
+# Hopefully temporary, see llvm-project GH issues #73089.
+#
+# @@ TMP: make sure sync'ed with upstream before release (keep this note).
+#
+lib{build2-cc}: file{std.cppm}
+file{std.cppm}@./: install = data/libbuild2/cc/
# Unit tests.
#
@@ -38,6 +57,9 @@ for t: cxx{**.test...}
obja{*}: cxx.poptions += -DLIBBUILD2_CC_STATIC_BUILD
objs{*}: cxx.poptions += -DLIBBUILD2_CC_SHARED_BUILD
+if $libpkgconf
+ cxx.poptions += -DBUILD2_LIBPKGCONF
+
if ($cxx.target.class == 'windows')
cxx.libs += $regex.apply(advapi32 ole32 oleaut32, \
'(.+)', \
diff --git a/libbuild2/cc/common.cxx b/libbuild2/cc/common.cxx
index cd89c79..2a8bc50 100644
--- a/libbuild2/cc/common.cxx
+++ b/libbuild2/cc/common.cxx
@@ -39,6 +39,11 @@ namespace build2
// 3. dependency libs (prerequisite_targets, left to right, depth-first)
// 4. dependency libs (*.libs variables).
//
+ // If proc_opt_group is true, then pass to proc_opt the group rather than
+ // the member if a member was picked (according to linfo) form a group.
+ // This is useful when we only want to see the common options set on the
+ // group.
+ //
// If either proc_opt or proc_lib return false, then any further
// processing of this library or its dependencies is skipped. This can be
// used to "prune" the graph traversal in case of duplicates. Note that
@@ -72,10 +77,14 @@ namespace build2
// not to pick the liba/libs{} member for installed libraries instead
// passing the lib{} group itself. This can be used to match the semantics
// of file_rule which, when matching prerequisites, does not pick the
- // liba/libs{} member (naturally) but just matches the lib{} group.
+ // liba/libs{} member (naturally) but just matches the lib{} group. Note
+ // that currently this truly only works for installed lib{} since non-
+ // installed ones don't have cc.type set. See proc_opt_group for an
+ // alternative way to (potentially) achieve the desired semantics.
//
// Note that if top_li is present, then the target passed to proc_impl,
- // proc_lib, and proc_opt is always a file.
+ // proc_lib, and proc_opt (unless proc_opt_group is true) is always a
+ // file.
//
// The dedup argument is part of the interface dependency deduplication
// functionality, similar to $x.deduplicate_export_libs(). Note, however,
@@ -87,7 +96,7 @@ namespace build2
const scope& top_bs,
optional<linfo> top_li,
const dir_paths& top_sysd,
- const mtime_target& l, // liba/libs{} or lib{}
+ const mtime_target& l, // liba/libs{}, libux{}, or lib{}
bool la,
lflags lf,
const function<bool (const target&,
@@ -102,7 +111,8 @@ namespace build2
const string& lang, // lang from cc.type
bool com, // cc. or x.
bool exp)>& proc_opt, // *.export.
- bool self /*= false*/, // Call proc_lib on l?
+ bool self, // Call proc_lib on l?
+ bool proc_opt_group, // Call proc_opt on group instead of member?
library_cache* cache) const
{
library_cache cache_storage;
@@ -115,8 +125,9 @@ namespace build2
chain.push_back (nullptr);
process_libraries_impl (a, top_bs, top_li, top_sysd,
- l, la, lf,
- proc_impl, proc_lib, proc_opt, self,
+ nullptr, l, la, lf,
+ proc_impl, proc_lib, proc_opt,
+ self, proc_opt_group,
cache, &chain, nullptr);
}
@@ -126,6 +137,7 @@ namespace build2
const scope& top_bs,
optional<linfo> top_li,
const dir_paths& top_sysd,
+ const target* lg,
const mtime_target& l,
bool la,
lflags lf,
@@ -142,6 +154,7 @@ namespace build2
bool com,
bool exp)>& proc_opt,
bool self,
+ bool proc_opt_group,
library_cache* cache,
small_vector<const target*, 32>* chain,
small_vector<const target*, 32>* dedup) const
@@ -149,8 +162,16 @@ namespace build2
// Add the library to the chain.
//
if (self && proc_lib)
+ {
+ if (find (chain->begin (), chain->end (), &l) != chain->end ())
+ fail << "dependency cycle detected involving library " << l;
+
chain->push_back (&l);
+ }
+ // We only lookup public variables so go straight for the public
+ // variable pool.
+ //
auto& vp (top_bs.ctx.var_pool);
do // Breakout loop.
@@ -237,12 +258,14 @@ namespace build2
//
if (proc_opt)
{
+ const target& ol (proc_opt_group && lg != nullptr ? *lg : l);
+
// If all we know is it's a C-common library, then in both cases
// we only look for cc.export.*.
//
if (cc)
{
- if (!proc_opt (l, t, true, true)) break;
+ if (!proc_opt (ol, t, true, true)) break;
}
else
{
@@ -259,24 +282,24 @@ namespace build2
//
// Note: options come from *.export.* variables.
//
- if (!proc_opt (l, t, false, true) ||
- !proc_opt (l, t, true, true)) break;
+ if (!proc_opt (ol, t, false, true) ||
+ !proc_opt (ol, t, true, true)) break;
}
else
{
// For default export we use the same options as were used
// to build the library.
//
- if (!proc_opt (l, t, false, false) ||
- !proc_opt (l, t, true, false)) break;
+ if (!proc_opt (ol, t, false, false) ||
+ !proc_opt (ol, t, true, false)) break;
}
}
else
{
// Interface: only add *.export.* (interface dependencies).
//
- if (!proc_opt (l, t, false, true) ||
- !proc_opt (l, t, true, true)) break;
+ if (!proc_opt (ol, t, false, true) ||
+ !proc_opt (ol, t, true, true)) break;
}
}
}
@@ -332,7 +355,7 @@ namespace build2
// Find system search directories corresponding to this library, i.e.,
// from its project and for its type (C, C++, etc).
//
- auto find_sysd = [&top_sysd, t, cc, same, &bs, &sysd, this] ()
+ auto find_sysd = [&top_sysd, &vp, t, cc, same, &bs, &sysd, this] ()
{
// Use the search dirs corresponding to this library scope/type.
//
@@ -341,7 +364,7 @@ namespace build2
: &cast<dir_paths> (
bs.root_scope ()->vars[same
? x_sys_lib_dirs
- : bs.ctx.var_pool[t + ".sys_lib_dirs"]]);
+ : vp[t + ".sys_lib_dirs"]]);
};
auto find_linfo = [top_li, t, cc, &bs, &l, &li] ()
@@ -364,9 +387,10 @@ namespace build2
for (const prerequisite_target& pt: l.prerequisite_targets[a])
{
// Note: adhoc prerequisites are not part of the library metadata
- // protocol (and we should check for adhoc first to avoid races).
+ // protocol (and we should check for adhoc first to avoid races
+ // during execute).
//
- if (pt == nullptr || pt.adhoc ())
+ if (pt.adhoc () || pt == nullptr)
continue;
if (marked (pt))
@@ -380,12 +404,19 @@ namespace build2
(la = (f = pt->is_a<libux> ())) ||
( f = pt->is_a<libs> ()))
{
+ // See link_rule for details.
+ //
+ const target* g ((pt.include & include_group) != 0
+ ? f->group
+ : nullptr);
+
if (sysd == nullptr) find_sysd ();
if (!li) find_linfo ();
process_libraries_impl (a, bs, *li, *sysd,
- *f, la, pt.data,
- proc_impl, proc_lib, proc_opt, true,
+ g, *f, la, pt.data /* lflags */,
+ proc_impl, proc_lib, proc_opt,
+ true /* self */, proc_opt_group,
cache, chain, nullptr);
}
}
@@ -480,7 +511,7 @@ namespace build2
return make_pair (n, s);
};
- auto proc_intf = [&l, cache, chain,
+ auto proc_intf = [&l, proc_opt_group, cache, chain,
&proc_impl, &proc_lib, &proc_lib_name, &proc_opt,
&sysd, &usrd,
&find_sysd, &find_linfo, &sense_fragment,
@@ -530,77 +561,125 @@ namespace build2
if (sysd == nullptr) find_sysd ();
if (!li) find_linfo ();
- const mtime_target& t (
- resolve_library (a,
+ const mtime_target* t;
+ const target* g;
+
+ const char* w (nullptr);
+ try
+ {
+ pair<const mtime_target&, const target*> p (
+ resolve_library (a,
bs,
- n,
- (n.pair ? (++i)->dir : dir_path ()),
- *li,
- *sysd, usrd,
- cache));
+ n,
+ (n.pair ? (++i)->dir : dir_path ()),
+ *li,
+ *sysd, usrd,
+ cache));
+
+ t = &p.first;
+ g = p.second;
+
+ // Deduplicate.
+ //
+ // Note that dedup_start makes sure we only consider our
+ // interface dependencies while maintaining the "through"
+ // list.
+ //
+ if (dedup != nullptr)
+ {
+ if (find (dedup->begin () + dedup_start,
+ dedup->end (),
+ t) != dedup->end ())
+ {
+ ++i;
+ continue;
+ }
+
+ dedup->push_back (t);
+ }
+ }
+ catch (const non_existent_library& e)
+ {
+ // This is another manifestation of the "mentioned in
+ // *.export.libs but not in prerequisites" case (see below).
+ //
+ t = &e.target;
+ w = "unknown";
+ }
- // Deduplicate.
+ // This can happen if the target is mentioned in *.export.libs
+ // (i.e., it is an interface dependency) but not in the
+ // library's prerequisites (i.e., it is not an implementation
+ // dependency).
//
- // Note that dedup_start makes sure we only consider our
- // interface dependencies while maintaining the "through"
- // list.
+ // Note that we used to just check for path being assigned but
+ // on Windows import-installed DLLs may legally have empty
+ // paths.
//
- if (dedup != nullptr)
+ if (w != nullptr)
+ ; // See above.
+ else if (l.ctx.phase == run_phase::match)
{
- if (find (dedup->begin () + dedup_start,
- dedup->end (),
- &t) != dedup->end ())
+ // We allow not matching installed libraries if all we need
+ // is their options (see compile_rule::apply()).
+ //
+ if (proc_lib || t->base_scope ().root_scope () != nullptr)
{
- ++i;
- continue;
+ if (!t->matched (a))
+ w = "not matched";
}
-
- dedup->push_back (&t);
}
-
- if (proc_lib)
+ else
{
- // This can happen if the target is mentioned in
- // *.export.libs (i.e., it is an interface dependency) but
- // not in the library's prerequisites (i.e., it is not an
- // implementation dependency).
- //
- // Note that we used to just check for path being assigned
- // but on Windows import-installed DLLs may legally have
- // empty paths.
+ // Note that this check we only do if there is proc_lib
+ // (since it's valid to process library's options before
+ // updating it).
//
- const char* w (nullptr);
- if (t.ctx.phase == run_phase::match)
+ if (proc_lib)
{
- size_t o (
- t.state[a].task_count.load (memory_order_consume) -
- t.ctx.count_base ());
-
- if (o != target::offset_applied &&
- o != target::offset_executed)
- w = "not matched";
+ if (t->mtime () == timestamp_unknown)
+ w = "out of date";
}
- else if (t.mtime () == timestamp_unknown)
- w = "out of date";
-
- if (w != nullptr)
- fail << (impl ? "implementation" : "interface")
- << " dependency " << t << " is " << w <<
- info << "mentioned in *.export." << (impl ? "impl_" : "")
- << "libs of target " << l <<
- info << "is it a prerequisite of " << l << "?";
+ }
+
+ if (w != nullptr)
+ {
+ fail << (impl ? "implementation" : "interface")
+ << " dependency " << *t << " is " << w <<
+ info << "mentioned in *.export." << (impl ? "impl_" : "")
+ << "libs of target " << l <<
+ info << "is it a prerequisite of " << l << "?" << endf;
}
// Process it recursively.
//
- // @@ Where can we get the link flags? Should we try to find
- // them in the library's prerequisites? What about
- // installed stuff?
+ bool u;
+ bool la ((u = t->is_a<libux> ()) || t->is_a<liba> ());
+ lflags lf (0);
+
+ // If this is a static library, see if we need to link it
+ // whole.
//
+ if (la && proc_lib)
+ {
+ // Note: go straight for the public variable pool.
+ //
+ const variable& var (t->ctx.var_pool["bin.whole"]);
+
+ // See the link rule for the lookup semantics.
+ //
+ lookup l (
+ t->lookup_original (var, true /* target_only */).first);
+
+ if (l ? cast<bool> (*l) : u)
+ lf |= lflag_whole;
+ }
+
process_libraries_impl (
a, bs, *li, *sysd,
- t, t.is_a<liba> () || t.is_a<libux> (), 0,
- proc_impl, proc_lib, proc_opt, true,
+ g, *t, la, lf,
+ proc_impl, proc_lib, proc_opt,
+ true /* self */, proc_opt_group,
cache, chain, dedup);
}
@@ -746,9 +825,14 @@ namespace build2
//
// If li is absent, then don't pick the liba/libs{} member, returning the
// lib{} target itself. If li is present, then the returned target is
- // always a file.
+ // always a file. The second half of the returned pair is the group, if
+ // the member was picked.
+ //
+ // Note: paths in sysd/usrd are expected to be absolute and normalized.
//
- const mtime_target& common::
+ // Note: may throw non_existent_library.
+ //
+ pair<const mtime_target&, const target*> common::
resolve_library (action a,
const scope& s,
const name& cn,
@@ -769,7 +853,8 @@ namespace build2
// large number of times (see Boost for an extreme example of this).
//
// Note also that for non-utility libraries we know that only the link
- // order from linfo is used.
+ // order from linfo is used. While not caching it and always picking an
+ // alternative could also work, we cache it to avoid the lookup.
//
if (cache != nullptr)
{
@@ -789,7 +874,7 @@ namespace build2
}));
if (i != cache->end ())
- return i->lib;
+ return pair<const mtime_target&, const target*> {i->lib, i->group};
}
else
cache = nullptr; // Do not cache.
@@ -831,26 +916,33 @@ namespace build2
// If this is lib{}/libul{}, pick appropriate member unless we were
// instructed not to.
//
+ const target* g (nullptr);
if (li)
{
if (const libx* l = xt->is_a<libx> ())
+ {
+ g = xt;
xt = link_member (*l, a, *li); // Pick lib*{e,a,s}{}.
+ }
}
auto& t (xt->as<mtime_target> ());
if (cache != nullptr)
- cache->push_back (library_cache_entry {lo, cn.type, cn.value, t});
+ cache->push_back (library_cache_entry {lo, cn.type, cn.value, t, g});
- return t;
+ return pair<const mtime_target&, const target*> {t, g};
}
- // Note that pk's scope should not be NULL (even if dir is absolute).
+ // Action should be absent if called during the load phase. Note that pk's
+ // scope should not be NULL (even if dir is absolute).
+ //
+ // Note: paths in sysd/usrd are expected to be absolute and normalized.
//
// Note: see similar logic in find_system_library().
//
target* common::
- search_library (action act,
+ search_library (optional<action> act,
const dir_paths& sysd,
optional<dir_paths>& usrd,
const prerequisite_key& p,
@@ -858,7 +950,7 @@ namespace build2
{
tracer trace (x, "search_library");
- assert (p.scope != nullptr);
+ assert (p.scope != nullptr && (!exist || act));
context& ctx (p.scope->ctx);
const scope& rs (*p.scope->root_scope ());
@@ -975,6 +1067,21 @@ namespace build2
{
context& ctx (p.scope->ctx);
+ // Whether to look for a binless variant using the common .pc file
+ // (see below).
+ //
+ // Normally we look for a binless version if the binful one was not
+ // found. However, sometimes we may find what looks like a binful
+ // library but on a closer examination realize that there is something
+ // wrong with it (for example, it's not a Windows import library). In
+ // such cases we want to omit looking for a binless library using the
+ // common .pc file since it most likely corresponds to the binful
+ // library (and we may end up in a infinite loop trying to resolve
+ // itself).
+ //
+ bool ba (true);
+ bool bs (true);
+
timestamp mt;
// libs
@@ -1046,6 +1153,31 @@ namespace build2
s->path_mtime (move (f), mt);
}
}
+ else if (!ext && tsys == "darwin")
+ {
+ // Besides .dylib, Mac OS now also has "text-based stub libraries"
+ // that use the .tbd extension. They appear to be similar to
+ // Windows import libraries and contain information such as the
+ // location of the .dylib library, its symbols, etc. For example,
+ // there is /Library/.../MacOSX13.3.sdk/usr/lib/libsqlite3.tbd
+ // which points to /usr/lib/libsqlite3.dylib (but which itself is
+ // invisible/inaccessible, presumably for security).
+ //
+ // Note that for now we are treating the .tbd library as the
+ // shared library but could probably do the more elaborate dance
+ // with ad hoc members like on Windows if really necessary.
+ //
+ se = string ("tbd");
+ f = f.base (); // Remove .dylib.
+ f += ".tbd";
+ mt = mtime (f);
+
+ if (mt != timestamp_nonexistent)
+ {
+ insert_library (ctx, s, name, d, ld, se, exist, trace);
+ s->path_mtime (move (f), mt);
+ }
+ }
}
// liba
@@ -1075,10 +1207,24 @@ namespace build2
if (tsys == "win32-msvc")
{
if (s == nullptr && !sn.empty ())
- s = msvc_search_shared (ld, d, p, exist);
+ {
+ pair<libs*, bool> r (msvc_search_shared (ld, d, p, exist));
+
+ if (r.first != nullptr)
+ s = r.first;
+ else if (!r.second)
+ bs = false;
+ }
if (a == nullptr && !an.empty ())
- a = msvc_search_static (ld, d, p, exist);
+ {
+ pair<liba*, bool> r (msvc_search_static (ld, d, p, exist));
+
+ if (r.first != nullptr)
+ a = r.first;
+ else if (!r.second)
+ ba = false;
+ }
}
// Look for binary-less libraries via pkg-config .pc files. Note that
@@ -1095,7 +1241,10 @@ namespace build2
// is no binful variant.
//
pair<path, path> r (
- pkgconfig_search (d, p.proj, name, na && ns /* common */));
+ pkgconfig_search (d,
+ p.proj,
+ name,
+ na && ns && ba && bs /* common */));
if (na && !r.first.empty ())
{
@@ -1148,6 +1297,8 @@ namespace build2
// making it the only one to allow things to be overriden (e.g.,
// if build2 was moved or some such).
//
+ // Note: build_install_lib is already normalized.
+ //
usrd->insert (usrd->begin (), build_install_lib);
}
}
@@ -1200,20 +1351,87 @@ namespace build2
if (exist)
return r;
- // If we cannot acquire the lock then this mean the target has already
- // been matched and we assume all of this has already been done.
+ // Try to extract library information from pkg-config. We only add the
+ // default macro if we could not extract more precise information. The
+ // idea is that in .pc files that we generate, we copy those macros (or
+ // custom ones) from *.export.poptions.
+ //
+ // @@ Should we add .pc files as ad hoc members so pkgconfig_save() can
+ // use their names when deriving -l-names (this would be especially
+ // helpful for binless libraries to get hold of prefix/suffix, etc).
//
- auto lock = [act] (const target* t) -> target_lock
+ auto load_pc = [this, &trace,
+ act, &p, &name,
+ &sysd, &usrd,
+ pd, &pc, lt, a, s] (pair<bool, bool> metaonly)
{
- auto l (t != nullptr ? build2::lock (act, *t, true) : target_lock ());
+ l5 ([&]{trace << "loading pkg-config information during "
+ << (act ? "match" : "load") << " for "
+ << (a != nullptr ? "static " : "")
+ << (s != nullptr ? "shared " : "")
+ << "member(s) of " << *lt << "; metadata only: "
+ << metaonly.first << " " << metaonly.second;});
+
+ // Add the "using static/shared library" macro (used, for example, to
+ // handle DLL export). The absence of either of these macros would
+ // mean some other build system that cannot distinguish between the
+ // two (and no pkg-config information).
+ //
+ auto add_macro = [this] (target& t, const char* suffix)
+ {
+ // If there is already a value (either in cc.export or x.export),
+ // don't add anything: we don't want to be accumulating defines nor
+ // messing with custom values. And if we are adding, then use the
+ // generic cc.export.
+ //
+ // The only way we could already have this value is if this same
+ // library was also imported as a project (as opposed to installed).
+ // Unlikely but possible. In this case the values were set by the
+ // export stub and we shouldn't touch them.
+ //
+ if (!t.vars[x_export_poptions])
+ {
+ auto p (t.vars.insert (c_export_poptions));
- if (l && l.offset == target::offset_matched)
+ if (p.second)
+ {
+ // The "standard" macro name will be LIB<NAME>_{STATIC,SHARED},
+ // where <name> is the target name. Here we want to strike a
+ // balance between being unique and not too noisy.
+ //
+ string d ("-DLIB");
+
+ d += sanitize_identifier (
+ ucase (const_cast<const string&> (t.name)));
+
+ d += '_';
+ d += suffix;
+
+ strings o;
+ o.push_back (move (d));
+ p.first = move (o);
+ }
+ }
+ };
+
+ if (pc.first.empty () && pc.second.empty ())
{
- assert ((*t)[act].rule == &file_rule::rule_match);
- l.unlock ();
+ if (!pkgconfig_load (act, *p.scope,
+ *lt, a, s,
+ p.proj, name,
+ *pd, sysd, *usrd,
+ metaonly))
+ {
+ if (a != nullptr && !metaonly.first) add_macro (*a, "STATIC");
+ if (s != nullptr && !metaonly.second) add_macro (*s, "SHARED");
+ }
}
-
- return l;
+ else
+ pkgconfig_load (act, *p.scope,
+ *lt, a, s,
+ pc,
+ *pd, sysd, *usrd,
+ metaonly);
};
// Mark as a "cc" library (unless already marked) and set the system
@@ -1234,6 +1452,85 @@ namespace build2
return p.second;
};
+ // Deal with the load phase case. The rest is already hairy enough so
+ // let's not try to weave this logic into that.
+ //
+ if (!act)
+ {
+ assert (ctx.phase == run_phase::load);
+
+ // The overall idea here is to set everything up so that the default
+ // file_rule matches the returned targets, the same way as it would if
+ // multiple operations were executed for the match phase (see below).
+ //
+ // Note however, that there is no guarantee that we won't end up in
+ // the match phase code below even after loading things here. For
+ // example, the same library could be searched from pkgconfig_load()
+ // if specified as -l. And if we try to re-assign group members, then
+ // that would be a race condition. So we use the cc mark to detect
+ // this.
+ //
+ timestamp mt (timestamp_nonexistent);
+ if (a != nullptr) {lt->a = a; a->group = lt; mt = a->mtime ();}
+ if (s != nullptr) {lt->s = s; s->group = lt; mt = s->mtime ();}
+
+ // @@ TODO: we currently always reload pkgconfig for lt (and below).
+ //
+ mark_cc (*lt);
+ lt->mtime (mt); // Note: problematic, see below for details.
+
+ // We can only load metadata from here since we can only do this
+ // during the load phase. But it's also possible that a racing match
+ // phase already found and loaded this library without metadata. So
+ // looks like the only way is to load the metadata incrementally. We
+ // can base this decision on the presense/absense of cc.type and
+ // export.metadata.
+ //
+ pair<bool, bool> metaonly {false, false};
+
+ if (a != nullptr && !mark_cc (*a))
+ {
+ if (a->vars[ctx.var_export_metadata])
+ a = nullptr;
+ else
+ metaonly.first = true;
+ }
+
+ if (s != nullptr && !mark_cc (*s))
+ {
+ if (s->vars[ctx.var_export_metadata])
+ s = nullptr;
+ else
+ metaonly.second = true;
+ }
+
+ // Try to extract library information from pkg-config.
+ //
+ if (a != nullptr || s != nullptr)
+ load_pc (metaonly);
+
+ return r;
+ }
+
+ // If we cannot acquire the lock then this mean the target has already
+ // been matched and we assume all of this has already been done.
+ //
+ auto lock = [a = *act] (const target* t) -> target_lock
+ {
+ auto l (t != nullptr ? build2::lock (a, *t, true) : target_lock ());
+
+ if (l && l.offset == target::offset_matched)
+ {
+ assert ((*t)[a].rule == &file_rule::rule_match);
+ l.unlock ();
+ }
+
+ return l;
+ };
+
+ target_lock al (lock (a));
+ target_lock sl (lock (s));
+
target_lock ll (lock (lt));
// Set lib{} group members to indicate what's available. Note that we
@@ -1243,101 +1540,41 @@ namespace build2
timestamp mt (timestamp_nonexistent);
if (ll)
{
- if (s != nullptr) {lt->s = s; mt = s->mtime ();}
- if (a != nullptr) {lt->a = a; mt = a->mtime ();}
-
// Mark the group since sometimes we use it itself instead of one of
// the liba/libs{} members (see process_libraries_impl() for details).
//
- mark_cc (*lt);
+ // If it's already marked, then it could have been imported during
+ // load (see above).
+ //
+ // @@ TODO: we currently always reload pkgconfig for lt (and above).
+ // Maybe pass NULL lt to pkgconfig_load() in this case?
+ //
+ if (mark_cc (*lt))
+ {
+ if (a != nullptr) {lt->a = a; mt = a->mtime ();}
+ if (s != nullptr) {lt->s = s; mt = s->mtime ();}
+ }
+ else
+ ll.unlock ();
}
- target_lock al (lock (a));
- target_lock sl (lock (s));
-
if (!al) a = nullptr;
if (!sl) s = nullptr;
- if (a != nullptr) a->group = lt;
- if (s != nullptr) s->group = lt;
-
- // If the library already has cc.type, then assume it was either
- // already imported or was matched by a rule.
+ // If the library already has cc.type, then assume it was either already
+ // imported (e.g., during load) or was matched by a rule.
//
if (a != nullptr && !mark_cc (*a)) a = nullptr;
if (s != nullptr && !mark_cc (*s)) s = nullptr;
- // Add the "using static/shared library" macro (used, for example, to
- // handle DLL export). The absence of either of these macros would
- // mean some other build system that cannot distinguish between the
- // two (and no pkg-config information).
- //
- auto add_macro = [this] (target& t, const char* suffix)
- {
- // If there is already a value (either in cc.export or x.export),
- // don't add anything: we don't want to be accumulating defines nor
- // messing with custom values. And if we are adding, then use the
- // generic cc.export.
- //
- // The only way we could already have this value is if this same
- // library was also imported as a project (as opposed to installed).
- // Unlikely but possible. In this case the values were set by the
- // export stub and we shouldn't touch them.
- //
- if (!t.vars[x_export_poptions])
- {
- auto p (t.vars.insert (c_export_poptions));
-
- if (p.second)
- {
- // The "standard" macro name will be LIB<NAME>_{STATIC,SHARED},
- // where <name> is the target name. Here we want to strike a
- // balance between being unique and not too noisy.
- //
- string d ("-DLIB");
-
- d += sanitize_identifier (
- ucase (const_cast<const string&> (t.name)));
-
- d += '_';
- d += suffix;
-
- strings o;
- o.push_back (move (d));
- p.first = move (o);
- }
- }
- };
+ if (a != nullptr) a->group = lt;
+ if (s != nullptr) s->group = lt;
if (ll && (a != nullptr || s != nullptr))
{
- // Try to extract library information from pkg-config. We only add the
- // default macro if we could not extract more precise information. The
- // idea is that in .pc files that we generate, we copy those macros
- // (or custom ones) from *.export.poptions.
+ // Try to extract library information from pkg-config.
//
- // @@ Should we add .pc files as ad hoc members so pkconfig_save() can
- // use their names when deriving -l-names (this would be especially
- // helpful for binless libraries to get hold of prefix/suffix, etc).
- //
- if (pc.first.empty () && pc.second.empty ())
- {
- if (!pkgconfig_load (act, *p.scope,
- *lt, a, s,
- p.proj, name,
- *pd, sysd, *usrd,
- false /* metadata */))
- {
- if (a != nullptr) add_macro (*a, "STATIC");
- if (s != nullptr) add_macro (*s, "SHARED");
- }
- }
- else
- pkgconfig_load (act, *p.scope,
- *lt, a, s,
- pc,
- *pd, sysd, *usrd,
- false /* metadata */);
+ load_pc ({false, false} /* metaonly */);
}
// If we have the lock (meaning this is the first time), set the matched
@@ -1350,10 +1587,38 @@ namespace build2
//
// Note also that these calls clear target data.
//
- if (al) match_rule (al, file_rule::rule_match);
- if (sl) match_rule (sl, file_rule::rule_match);
+ if (a != nullptr) match_rule (al, file_rule::rule_match);
+ if (s != nullptr) match_rule (sl, file_rule::rule_match);
if (ll)
{
+ // @@ Turns out this has a problem: file_rule won't match/execute
+ // group members. So what happens is that if we have two installed
+ // libraries, say lib{build2} that depends on lib{butl}, then
+ // lib{build2} will have lib{butl} as a prerequisite and file_rule
+ // that matches lib{build2} will update lib{butl} (also matched by
+ // file_rule), but not its members. Later, someone (for example,
+ // the newer() call in append_libraries()) will pick one of the
+ // members assuming it is executed and things will go sideways.
+ //
+ // For now we hacked around the issue but the long term solution is
+ // probably to add to the bin module a special rule that is
+ // registered on the global scope and matches the installed lib{}
+ // targets. This rule will have to both update prerequisites like
+ // the file_rule and group members like the lib_rule (or maybe it
+ // can skip prerequisites since one of the member will do that; in
+ // which case maybe we will be able to reuse lib_rule maybe with
+ // the "all members" flag or some such). A few additional
+ // notes/thoughts:
+ //
+ // - Will be able to stop inheriting lib{} from mtime_target.
+ //
+ // - Will need to register for perform_update/clean like in context
+ // as well as for configure as in the config module (feels like
+ // shouldn't need to register for dist).
+ //
+ // - Will need to test batches, immediate import thoroughly (this
+ // stuff is notoriously tricky to get right in all situations).
+ //
match_rule (ll, file_rule::rule_match);
// Also bless the library group with a "trust me it exists" timestamp.
@@ -1362,6 +1627,8 @@ namespace build2
// won't match.
//
lt->mtime (mt);
+
+ ll.unlock (); // Unlock group before members, for good measure.
}
return r;
@@ -1403,5 +1670,85 @@ namespace build2
return r;
}
+
+ void common::
+ append_diag_color_options (cstrings& args) const
+ {
+ switch (cclass)
+ {
+ case compiler_class::msvc:
+ {
+ // MSVC has the /diagnostics: option which has an undocumented value
+ // `color`. It's unclear from which version of MSVC this value is
+ // supported, but it works in 17.0, so let's start from there.
+ //
+ // Note that there is currently no way to disable color in the MSVC
+ // diagnostics specifically (the /diagnostics:* option values are
+ // cumulative and there doesn't seem to be a `color-` value). This
+ // is probably not a big deal since one can just disable the color
+ // globally (--no-diag-color).
+ //
+ // Note that clang-cl appears to use -fansi-escape-codes. See GH
+ // issue #312 for background.
+ //
+ if (show_diag_color ())
+ {
+ if (cvariant.empty () &&
+ (cmaj > 19 || (cmaj == 19 && cmin >= 30)))
+ {
+ // Check for the prefix in case /diagnostics:color- gets added
+ // eventually.
+ //
+ if (!find_option_prefixes ({"/diagnostics:color",
+ "-diagnostics:color"}, args))
+ {
+ args.push_back ("/diagnostics:color");
+ }
+ }
+ }
+
+ break;
+ }
+ case compiler_class::gcc:
+ {
+ // Enable/disable diagnostics color unless a custom option is
+ // specified.
+ //
+ // Supported from GCC 4.9 (8.1 on Windows) and (at least) from Clang
+ // 3.5. Clang supports -f[no]color-diagnostics in addition to the
+ // GCC's spelling.
+ //
+ if (
+#ifndef _WIN32
+ ctype == compiler_type::gcc ? cmaj > 4 || (cmaj == 4 && cmin >= 9) :
+#else
+ ctype == compiler_type::gcc ? cmaj > 8 || (cmaj == 8 && cmin >= 1) :
+#endif
+ ctype == compiler_type::clang ? cmaj > 3 || (cmaj == 3 && cmin >= 5) :
+ false)
+ {
+ if (!(find_option_prefix ("-fdiagnostics-color", args) ||
+ find_option ("-fno-diagnostics-color", args) ||
+ find_option ("-fdiagnostics-plain-output", args) ||
+ (ctype == compiler_type::clang &&
+ (find_option ("-fcolor-diagnostics", args) ||
+ find_option ("-fno-color-diagnostics", args)))))
+ {
+ // Omit -fno-diagnostics-color if stderr is not a terminal (we
+ // know there will be no color in this case and the option will
+ // just add noise, for example, in build logs).
+ //
+ if (const char* o = (
+ show_diag_color () ? "-fdiagnostics-color" :
+ stderr_term ? "-fno-diagnostics-color" :
+ nullptr))
+ args.push_back (o);
+ }
+ }
+
+ break;
+ }
+ }
+ }
}
}
diff --git a/libbuild2/cc/common.hxx b/libbuild2/cc/common.hxx
index 1e74b22..cb85632 100644
--- a/libbuild2/cc/common.hxx
+++ b/libbuild2/cc/common.hxx
@@ -32,10 +32,12 @@ namespace build2
{
lang x_lang;
- const char* x; // Module name ("c", "cxx").
- const char* x_name; // Compiler name ("c", "c++").
- const char* x_default; // Compiler default ("gcc", "g++").
- const char* x_pext; // Preprocessed source extension (".i", ".ii").
+ const char* x; // Module name ("c", "cxx").
+ const char* x_name; // Compiler name ("c", "c++").
+ const char* x_obj_name; // Same for Objective-X ("obj-c", "obj-c++").
+ const char* x_default; // Compiler default ("gcc", "g++").
+ const char* x_pext; // Preprocessed source extension (".i", ".ii").
+ const char* x_obj_pext; // Same for Objective-X (".mi", ".mii").
// Array of modules that can hint us the toolchain, terminate with
// NULL.
@@ -115,6 +117,7 @@ namespace build2
const variable& c_module_name; // cc.module_name
const variable& c_importable; // cc.importable
const variable& c_reprocess; // cc.reprocess
+ const variable& c_serialize; // cc.serialize
const variable& x_preprocessed; // x.preprocessed
const variable* x_symexport; // x.features.symexport
@@ -163,6 +166,7 @@ namespace build2
// Cached values for some commonly-used variables/values.
//
+ const compiler_id& cid; // x.id
compiler_type ctype; // x.id.type
const string& cvariant; // x.id.variant
compiler_class cclass; // x.class
@@ -196,34 +200,68 @@ namespace build2
build2::cc::importable_headers* importable_headers;
// The order of sys_*_dirs is the mode entries first, followed by the
- // compiler built-in entries, and finished off with any extra entries
- // (e.g., fallback directories such as /usr/local/*).
+ // extra entries (e.g., /usr/local/*), followed by the compiler built-in
+ // entries.
+ //
+ // Note that even if we wanted to, we wouldn't be able to support extra
+ // trailing (after built-in) directories since we would need a portable
+ // equivalent of -idirafter for both headers and libraries.
//
const dir_paths& sys_lib_dirs; // x.sys_lib_dirs
const dir_paths& sys_hdr_dirs; // x.sys_hdr_dirs
const dir_paths* sys_mod_dirs; // compiler_info::sys_mod_dirs
- size_t sys_lib_dirs_mode; // Number of leading mode entries (0 if none).
+ size_t sys_lib_dirs_mode; // Number of mode entries (0 if none).
size_t sys_hdr_dirs_mode;
size_t sys_mod_dirs_mode;
- size_t sys_lib_dirs_extra; // First trailing extra entry (size if none).
+ size_t sys_lib_dirs_extra; // Number of extra entries (0 if none).
size_t sys_hdr_dirs_extra;
+ // Note that x_obj is patched in by the x.objx module. So it stays NULL
+ // if Objective-X compilation is not enabled. Similarly for x_asp except
+ // here we don't have duality and it's purely to signal (by the c.as-cpp
+ // module) that it's enabled.
+ //
const target_type& x_src; // Source target type (c{}, cxx{}).
const target_type* x_mod; // Module target type (mxx{}), if any.
+ const target_type& x_inc; // Includable base target type (e.g., c_inc{}).
+ const target_type* x_obj; // Objective-X target type (m{}, mm{}).
+ const target_type* x_asp; // Assembler with CPP target type (S{}).
+
+ // Check if an object (target, prerequisite, etc) is an Objective-X
+ // source.
+ //
+ template <typename T>
+ bool
+ x_objective (const T& t) const
+ {
+ return x_obj != nullptr && t.is_a (*x_obj);
+ }
+
+ // Check if an object (target, prerequisite, etc) is an Assembler with
+ // C preprocessor source.
+ //
+ template <typename T>
+ bool
+ x_assembler_cpp (const T& t) const
+ {
+ return x_asp != nullptr && t.is_a (*x_asp);
+ }
// Array of target types that are considered the X-language headers
// (excluding h{} except for C). Keep them in the most likely to appear
// order with the "real header" first and terminated with NULL.
//
- const target_type* const* x_hdr;
+ const target_type* const* x_hdrs;
+ // Check if an object (target, prerequisite, etc) is a header.
+ //
template <typename T>
bool
x_header (const T& t, bool c_hdr = true) const
{
- for (const target_type* const* ht (x_hdr); *ht != nullptr; ++ht)
+ for (const target_type* const* ht (x_hdrs); *ht != nullptr; ++ht)
if (t.is_a (**ht))
return true;
@@ -234,7 +272,7 @@ namespace build2
// extensions to target types. Keep them in the most likely to appear
// order and terminate with NULL.
//
- const target_type* const* x_inc;
+ const target_type* const* x_incs;
// Aggregate-like constructor with from-base support.
//
@@ -242,8 +280,7 @@ namespace build2
const char* compile,
const char* link,
const char* install,
- compiler_type ct,
- const string& cv,
+ const compiler_id& ci,
compiler_class cl,
uint64_t mj, uint64_t mi,
uint64_t vmj, uint64_t vmi,
@@ -262,13 +299,14 @@ namespace build2
size_t sle, size_t she,
const target_type& src,
const target_type* mod,
- const target_type* const* hdr,
- const target_type* const* inc)
+ const target_type& inc,
+ const target_type* const* hdrs,
+ const target_type* const* incs)
: config_data (cd),
x_compile (compile),
x_link (link),
x_install (install),
- ctype (ct), cvariant (cv), cclass (cl),
+ cid (ci), ctype (ci.type), cvariant (ci.variant), cclass (cl),
cmaj (mj), cmin (mi),
cvmaj (vmj), cvmin (vmi),
cpath (path), cmode (mode),
@@ -283,7 +321,9 @@ namespace build2
sys_lib_dirs_mode (slm), sys_hdr_dirs_mode (shm),
sys_mod_dirs_mode (smm),
sys_lib_dirs_extra (sle), sys_hdr_dirs_extra (she),
- x_src (src), x_mod (mod), x_hdr (hdr), x_inc (inc) {}
+ x_src (src), x_mod (mod), x_inc (inc),
+ x_obj (nullptr), x_asp (nullptr),
+ x_hdrs (hdrs), x_incs (incs) {}
};
class LIBBUILD2_CC_SYMEXPORT common: public data
@@ -300,10 +340,16 @@ namespace build2
string type; // name::type
string value; // name::value
reference_wrapper<const mtime_target> lib;
+ const target* group;
};
using library_cache = small_vector<library_cache_entry, 32>;
+ // The prerequisite_target::include bit that indicates a library
+ // member has been picked from the group.
+ //
+ static const uintptr_t include_group = 0x100;
+
void
process_libraries (
action,
@@ -319,6 +365,7 @@ namespace build2
lflags, const string*, bool)>&,
const function<bool (const target&, const string&, bool, bool)>&,
bool = false,
+ bool = false,
library_cache* = nullptr) const;
void
@@ -327,6 +374,7 @@ namespace build2
const scope&,
optional<linfo>,
const dir_paths&,
+ const target*,
const mtime_target&,
bool,
lflags,
@@ -336,6 +384,7 @@ namespace build2
lflags, const string*, bool)>&,
const function<bool (const target&, const string&, bool, bool)>&,
bool,
+ bool,
library_cache*,
small_vector<const target*, 32>*,
small_vector<const target*, 32>*) const;
@@ -365,7 +414,7 @@ namespace build2
}
public:
- const mtime_target&
+ pair<const mtime_target&, const target*>
resolve_library (action,
const scope&,
const name&,
@@ -375,6 +424,11 @@ namespace build2
optional<dir_paths>&,
library_cache* = nullptr) const;
+ struct non_existent_library
+ {
+ const mtime_target& target;
+ };
+
template <typename T>
static ulock
insert_library (context&,
@@ -387,7 +441,7 @@ namespace build2
tracer&);
target*
- search_library (action,
+ search_library (optional<action>,
const dir_paths&,
optional<dir_paths>&,
const prerequisite_key&,
@@ -407,13 +461,16 @@ namespace build2
// Alternative search logic for VC (msvc.cxx).
//
- bin::liba*
+ // The second half is false if we should poison the binless search via
+ // the common .pc file.
+ //
+ pair<bin::liba*, bool>
msvc_search_static (const process_path&,
const dir_path&,
const prerequisite_key&,
bool existing) const;
- bin::libs*
+ pair<bin::libs*, bool>
msvc_search_shared (const process_path&,
const dir_path&,
const prerequisite_key&,
@@ -433,23 +490,28 @@ namespace build2
bool) const;
void
- pkgconfig_load (action, const scope&,
+ pkgconfig_load (optional<action>, const scope&,
bin::lib&, bin::liba*, bin::libs*,
const pair<path, path>&,
const dir_path&,
const dir_paths&,
const dir_paths&,
- bool) const;
+ pair<bool, bool>) const;
bool
- pkgconfig_load (action, const scope&,
+ pkgconfig_load (optional<action>, const scope&,
bin::lib&, bin::liba*, bin::libs*,
const optional<project_name>&,
const string&,
const dir_path&,
const dir_paths&,
const dir_paths&,
- bool) const;
+ pair<bool, bool>) const;
+
+ // Append compiler-specific diagnostics color options as necessary.
+ //
+ void
+ append_diag_color_options (cstrings&) const;
};
}
}
diff --git a/libbuild2/cc/common.txx b/libbuild2/cc/common.txx
index d14f966..8c80686 100644
--- a/libbuild2/cc/common.txx
+++ b/libbuild2/cc/common.txx
@@ -19,15 +19,18 @@ namespace build2
bool exist,
tracer& trace)
{
- auto p (ctx.targets.insert_locked (T::static_type,
- move (dir),
- path_cast<dir_path> (out.effect),
- name,
- move (ext),
- target_decl::implied,
- trace));
+ auto p (ctx.targets.insert_locked (
+ T::static_type,
+ move (dir),
+ dir_path (out.effect_string ()).normalize (),
+ name,
+ move (ext),
+ target_decl::implied,
+ trace));
+
+ if (exist && p.second)
+ throw non_existent_library {p.first.template as<mtime_target> ()};
- assert (!exist || !p.second);
r = &p.first.template as<T> ();
return move (p.second);
}
diff --git a/libbuild2/cc/compile-rule.cxx b/libbuild2/cc/compile-rule.cxx
index f5d50ec..2e4775e 100644
--- a/libbuild2/cc/compile-rule.cxx
+++ b/libbuild2/cc/compile-rule.cxx
@@ -3,6 +3,7 @@
#include <libbuild2/cc/compile-rule.hxx>
+#include <cerrno>
#include <cstdlib> // exit()
#include <cstring> // strlen(), strchr(), strncmp()
@@ -175,7 +176,7 @@ namespace build2
if (s == "includes") return preprocessed::includes;
if (s == "modules") return preprocessed::modules;
if (s == "all") return preprocessed::all;
- throw invalid_argument ("invalid preprocessed value '" + s + "'");
+ throw invalid_argument ("invalid preprocessed value '" + s + '\'');
}
// Return true if the compiler supports -isystem (GCC class) or
@@ -231,7 +232,8 @@ namespace build2
// Note that we don't really need this for clean (where we only need
// unrefined unit type) so we could make this update-only. But let's keep
- // it simple for now.
+ // it simple for now. Note that now we do need the source prerequisite
+ // type in clean to deal with Objective-X.
//
struct compile_rule::match_data
{
@@ -293,24 +295,25 @@ namespace build2
void compile_rule::
append_sys_hdr_options (T& args) const
{
- assert (sys_hdr_dirs_extra <= sys_hdr_dirs.size ());
+ assert (sys_hdr_dirs_mode + sys_hdr_dirs_extra <= sys_hdr_dirs.size ());
// Note that the mode options are added as part of cmode.
//
auto b (sys_hdr_dirs.begin () + sys_hdr_dirs_mode);
- auto m (sys_hdr_dirs.begin () + sys_hdr_dirs_extra);
- auto e (sys_hdr_dirs.end ());
+ auto x (b + sys_hdr_dirs_extra);
+ // Add extras.
+ //
// Note: starting from 16.10, MSVC gained /external:I option though it
// doesn't seem to affect the order, only "system-ness".
//
append_option_values (
args,
- cclass == compiler_class::gcc ? "-idirafter" :
+ cclass == compiler_class::gcc ? "-isystem" :
cclass == compiler_class::msvc ? (isystem (*this)
? "/external:I"
: "/I") : "-I",
- m, e,
+ b, x,
[] (const dir_path& d) {return d.string ().c_str ();});
// For MSVC if we have no INCLUDE environment variable set, then we
@@ -326,7 +329,7 @@ namespace build2
{
append_option_values (
args, "/I",
- b, m,
+ x, sys_hdr_dirs.end (),
[] (const dir_path& d) {return d.string ().c_str ();});
}
}
@@ -351,6 +354,35 @@ namespace build2
case lang::c: o1 = "/TC"; break;
case lang::cxx: o1 = "/TP"; break;
}
+
+ // Note: /interface and /internalPartition are in addition to /TP.
+ //
+ switch (md.type)
+ {
+ case unit_type::non_modular:
+ case unit_type::module_impl:
+ {
+ break;
+ }
+ case unit_type::module_intf:
+ case unit_type::module_intf_part:
+ {
+ o2 = "/interface";
+ break;
+ }
+ case unit_type::module_impl_part:
+ {
+ o2 = "/internalPartition";
+ break;
+ }
+ case unit_type::module_header:
+ {
+ //@@ MODHDR TODO: /exportHeader
+ assert (false);
+ break;
+ }
+ }
+
break;
}
case compiler_class::gcc:
@@ -369,11 +401,20 @@ namespace build2
case unit_type::module_impl:
{
o1 = "-x";
- switch (x_lang)
+
+ if (x_assembler_cpp (md.src))
+ o2 = "assembler-with-cpp";
+ else
{
- case lang::c: o2 = "c"; break;
- case lang::cxx: o2 = "c++"; break;
+ bool obj (x_objective (md.src));
+
+ switch (x_lang)
+ {
+ case lang::c: o2 = obj ? "objective-c" : "c"; break;
+ case lang::cxx: o2 = obj ? "objective-c++" : "c++"; break;
+ }
}
+
break;
}
case unit_type::module_intf:
@@ -413,9 +454,11 @@ namespace build2
default:
assert (false);
}
+
break;
}
}
+
break;
}
}
@@ -472,9 +515,11 @@ namespace build2
// For a header unit we check the "real header" plus the C header.
//
- if (ut == unit_type::module_header ? p.is_a (**x_hdr) || p.is_a<h> () :
- ut == unit_type::module_intf ? p.is_a (*x_mod) :
- p.is_a (x_src))
+ if (ut == unit_type::module_header ? p.is_a (**x_hdrs) || p.is_a<h> () :
+ ut == unit_type::module_intf ? p.is_a (*x_mod) :
+ p.is_a (x_src) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && p.is_a (*x_obj)))
{
// Save in the target's auxiliary storage.
//
@@ -489,13 +534,16 @@ namespace build2
// Append or hash library options from a pair of *.export.* variables
// (first is x.* then cc.*) recursively, prerequisite libraries first.
+ // If common is true, then only append common options from the lib{}
+ // groups.
//
template <typename T>
void compile_rule::
append_library_options (appended_libraries& ls, T& args,
const scope& bs,
const scope* is, // Internal scope.
- action a, const file& l, bool la, linfo li,
+ action a, const file& l, bool la,
+ linfo li, bool common,
library_cache* lib_cache) const
{
struct data
@@ -509,7 +557,7 @@ namespace build2
//
auto imp = [] (const target& l, bool la) {return la && l.is_a<libux> ();};
- auto opt = [&d, this] (const target& lt,
+ auto opt = [&d, this] (const target& l, // Note: could be lib{}
const string& t, bool com, bool exp)
{
// Note that in our model *.export.poptions are always "interface",
@@ -518,8 +566,6 @@ namespace build2
if (!exp) // Ignore libux.
return true;
- const file& l (lt.as<file> ());
-
// Suppress duplicates.
//
// Compilation is the simple case: we can add the options on the first
@@ -529,6 +575,8 @@ namespace build2
if (find (d.ls.begin (), d.ls.end (), &l) != d.ls.end ())
return false;
+ // Note: go straight for the public variable pool.
+ //
const variable& var (
com
? c_export_poptions
@@ -678,16 +726,24 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0, // lflags unused.
- imp, nullptr, opt, false /* self */, lib_cache);
+ imp, nullptr, opt,
+ false /* self */,
+ common /* proc_opt_group */,
+ lib_cache);
}
void compile_rule::
append_library_options (appended_libraries& ls, strings& args,
const scope& bs,
- action a, const file& l, bool la, linfo li) const
+ action a, const file& l, bool la,
+ linfo li,
+ bool common,
+ bool original) const
{
- const scope* is (isystem (*this) ? effective_iscope (bs) : nullptr);
- append_library_options (ls, args, bs, is, a, l, la, li, nullptr);
+ const scope* is (!original && isystem (*this)
+ ? effective_iscope (bs)
+ : nullptr);
+ append_library_options (ls, args, bs, is, a, l, la, li, common, nullptr);
}
template <typename T>
@@ -728,7 +784,9 @@ namespace build2
append_library_options (ls,
args,
bs, iscope (),
- a, *f, la, li,
+ a, *f, la,
+ li,
+ false /* common */,
&lc);
}
}
@@ -770,6 +828,8 @@ namespace build2
//
if (const scope* rs = l.base_scope ().root_scope ())
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (
com
? c_export_poptions
@@ -810,7 +870,9 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
pt->as<file> (), la, 0, // lflags unused.
- impf, nullptr, optf, false /* self */,
+ impf, nullptr, optf,
+ false /* self */,
+ false /* proc_opt_group */,
&lib_cache);
}
}
@@ -916,7 +978,9 @@ namespace build2
//
// Note: ut is still unrefined.
//
- if (ut == unit_type::module_intf && cast_true<bool> (t[b_binless]))
+ if ((ut == unit_type::module_intf ||
+ ut == unit_type::module_intf_part ||
+ ut == unit_type::module_impl_part) && cast_true<bool> (t[b_binless]))
{
// The module interface unit can be the same as an implementation
// (e.g., foo.mxx and foo.cxx) which means obj*{} targets could
@@ -977,6 +1041,12 @@ namespace build2
// to match it if we may need its modules or importable headers
// (see search_modules(), make_header_sidebuild() for details).
//
+ // Well, that was the case until we've added support for immediate
+ // importation of libraries, which happens during the load phase
+ // and natually leaves the library unmatched. While we could have
+ // returned from search_library() an indication of whether the
+ // library has been matched, this doesn't seem worth the trouble.
+ //
if (p.proj ())
{
pt = search_library (a,
@@ -984,8 +1054,10 @@ namespace build2
usr_lib_dirs,
p.prerequisite);
+#if 0
if (pt != nullptr && !modules)
continue;
+#endif
}
if (pt == nullptr)
@@ -1013,7 +1085,8 @@ namespace build2
{
pt = &p.search (t);
- if (a.operation () == clean_id && !pt->dir.sub (rs.out_path ()))
+ if (pt == dir ||
+ (a.operation () == clean_id && !pt->dir.sub (rs.out_path ())))
continue;
}
@@ -1105,12 +1178,14 @@ namespace build2
// this can very well be happening in parallel. But that's not a
// problem since fsdir{}'s update is idempotent.
//
- fsdir_rule::perform_update_direct (a, t);
+ fsdir_rule::perform_update_direct (a, *dir);
}
// Note: the leading '@' is reserved for the module map prefix (see
// extract_modules()) and no other line must start with it.
//
+ // NOTE: see also the predefs rule if changing anything here.
+ //
depdb dd (tp + ".d");
// First should come the rule name/version.
@@ -1275,7 +1350,7 @@ namespace build2
//
l5 ([&]{trace << "extracting headers from " << src;});
auto& is (tu.module_info.imports);
- psrc = extract_headers (a, bs, t, li, src, md, dd, u, mt, is);
+ extract_headers (a, bs, t, li, src, md, dd, u, mt, is, psrc);
is.clear (); // No longer needed.
}
@@ -1330,6 +1405,10 @@ namespace build2
//
if (mt != timestamp_nonexistent)
{
+ // Appended to by to_module_info() below.
+ //
+ tu.module_info.imports.clear ();
+
u = false;
md.touch = true;
}
@@ -1414,24 +1493,6 @@ namespace build2
extract_modules (a, bs, t, li,
tts, src,
md, move (tu.module_info), dd, u);
-
- // Currently in VC module interface units must be compiled from
- // the original source (something to do with having to detect and
- // store header boundaries in the .ifc files).
- //
- // @@ MODHDR MSVC: should we do the same for header units? I guess
- // we will figure it out when MSVC supports header units.
- //
- // @@ TMP: probably outdated. Probably the same for partitions.
- //
- // @@ See also similar check in extract_headers(), existing entry
- // case.
- //
- if (ctype == compiler_type::msvc)
- {
- if (ut == unit_type::module_intf)
- psrc.second = false;
- }
}
}
@@ -1450,7 +1511,7 @@ namespace build2
// to keep re-validating the file on every subsequent dry-run as well
// on the real run).
//
- if (u && dd.reading () && !ctx.dry_run)
+ if (u && dd.reading () && !ctx.dry_run_option)
dd.touch = timestamp_unknown;
dd.close (false /* mtime_check */);
@@ -1512,10 +1573,13 @@ namespace build2
switch (a)
{
case perform_update_id: return move (md);
- case perform_clean_id: return [this] (action a, const target& t)
+ case perform_clean_id:
{
- return perform_clean (a, t);
- };
+ return [this, srct = &md.src.type ()] (action a, const target& t)
+ {
+ return perform_clean (a, t, *srct);
+ };
+ }
default: return noop_recipe; // Configure update.
}
}
@@ -1612,72 +1676,6 @@ namespace build2
return pm;
}
- // @@ TMP
- //
-#if 0
- // Return the next make prerequisite starting from the specified
- // position and update position to point to the start of the
- // following prerequisite or l.size() if there are none left.
- //
- static string
- next_make (const string& l, size_t& p)
- {
- size_t n (l.size ());
-
- // Skip leading spaces.
- //
- for (; p != n && l[p] == ' '; p++) ;
-
- // Lines containing multiple prerequisites are 80 characters max.
- //
- string r;
- r.reserve (n);
-
- // Scan the next prerequisite while watching out for escape sequences.
- //
- for (; p != n && l[p] != ' '; p++)
- {
- char c (l[p]);
-
- if (p + 1 != n)
- {
- if (c == '$')
- {
- // Got to be another (escaped) '$'.
- //
- if (l[p + 1] == '$')
- ++p;
- }
- else if (c == '\\')
- {
- // This may or may not be an escape sequence depending on whether
- // what follows is "escapable".
- //
- switch (c = l[++p])
- {
- case '\\': break;
- case ' ': break;
- default: c = '\\'; --p; // Restore.
- }
- }
- }
-
- r += c;
- }
-
- // Skip trailing spaces.
- //
- for (; p != n && l[p] == ' '; p++) ;
-
- // Skip final '\'.
- //
- if (p == n - 1 && l[p] == '\\')
- p++;
-
- return r;
- }
-#endif
-
// VC /showIncludes output. The first line is the file being compiled
// (unless clang-cl; handled by our caller). Then we have the list of
// headers, one per line, in this form (text can presumably be
@@ -1873,7 +1871,7 @@ namespace build2
// Any unhandled io_error is handled by the caller as a generic module
// mapper io error. Returning false terminates the communication.
//
- struct compile_rule::module_mapper_state //@@ gcc_module_mapper_state
+ struct compile_rule::gcc_module_mapper_state
{
size_t skip; // Number of depdb entries to skip.
size_t header_units = 0; // Number of header units imported.
@@ -1884,15 +1882,20 @@ namespace build2
optional<const build2::cc::translatable_headers*> translatable_headers;
small_vector<string, 2> batch; // Reuse buffers.
+ size_t batch_n = 0;
- module_mapper_state (size_t s, module_imports& i)
+ gcc_module_mapper_state (size_t s, module_imports& i)
: skip (s), imports (i) {}
};
- bool compile_rule::
- gcc_module_mapper (module_mapper_state& st,
+ // The module mapper is called on one line of input at a time. It should
+ // return nullopt if another line is expected (batch), false if the mapper
+ // interaction should be terminated, and true if it should be continued.
+ //
+ optional<bool> compile_rule::
+ gcc_module_mapper (gcc_module_mapper_state& st,
action a, const scope& bs, file& t, linfo li,
- ifdstream& is,
+ const string& l,
ofdstream& os,
depdb& dd, bool& update, bool& bad_error,
optional<prefix_map>& pfx_map, srcout_map& so_map) const
@@ -1908,35 +1911,40 @@ namespace build2
// Read in the entire batch trying hard to reuse the buffers.
//
- auto& batch (st.batch);
- size_t batch_n (0);
+ small_vector<string, 2>& batch (st.batch);
+ size_t& batch_n (st.batch_n);
- for (;;)
+ // Add the next line.
+ //
{
if (batch.size () == batch_n)
- batch.push_back (string ());
-
- string& r (batch[batch_n]);
-
- if (eof (getline (is, r)))
- break;
+ batch.push_back (l);
+ else
+ batch[batch_n] = l;
batch_n++;
+ }
- if (r.back () != ';')
- break;
+ // Check if more is expected in this batch.
+ //
+ {
+ string& r (batch[batch_n - 1]);
- // Strip the trailing `;` word.
- //
- r.pop_back ();
- r.pop_back ();
- }
+ if (r.back () == ';')
+ {
+ // Strip the trailing `;` word.
+ //
+ r.pop_back ();
+ r.pop_back ();
- if (batch_n == 0) // EOF
- return false;
+ return nullopt;
+ }
+ }
if (verb >= 3)
{
+ // It doesn't feel like buffering this would be useful.
+ //
// Note that we show `;` in requests/responses so that the result
// could be replayed.
//
@@ -1958,23 +1966,211 @@ namespace build2
for (size_t i (0); i != batch_n; ++i)
{
string& r (batch[i]);
+ size_t rn (r.size ());
+
+ // The protocol uses a peculiar quoting/escaping scheme that can be
+ // summarized as follows (see the libcody documentation for details):
+ //
+ // - Words are seperated with spaces and/or tabs.
+ //
+ // - Words need not be quoted if they only containing characters from
+ // the [-+_/%.A-Za-z0-9] set.
+ //
+ // - Otherwise words need to be single-quoted.
+ //
+ // - Inside single-quoted words, the \n \t \' and \\ escape sequences
+ // are recognized.
+ //
+ // Note that we currently don't treat abutted quotes (as in a' 'b) as
+ // a single word (it doesn't seem plausible that we will ever receive
+ // something like this).
+ //
+ size_t b (0), e (0), n; bool q; // Next word.
+
+ auto next = [&r, rn, &b, &e, &n, &q] () -> size_t
+ {
+ if (b != e)
+ b = e;
+
+ // Skip leading whitespaces.
+ //
+ for (; b != rn && (r[b] == ' ' || r[b] == '\t'); ++b) ;
+
+ if (b != rn)
+ {
+ q = (r[b] == '\'');
+
+ // Find first trailing whitespace or closing quote.
+ //
+ for (e = b + 1; e != rn; ++e)
+ {
+ // Note that we deal with invalid quoting/escaping in unquote().
+ //
+ switch (r[e])
+ {
+ case ' ':
+ case '\t':
+ if (q)
+ continue;
+ else
+ break;
+ case '\'':
+ if (q)
+ {
+ ++e; // Include closing quote (hopefully).
+ break;
+ }
+ else
+ {
+ assert (false); // Abutted quote.
+ break;
+ }
+ case '\\':
+ if (++e != rn) // Skip next character (hopefully).
+ continue;
+ else
+ break;
+ default:
+ continue;
+ }
- // @@ TODO: quoting and escaping.
+ break;
+ }
+
+ n = e - b;
+ }
+ else
+ {
+ q = false;
+ e = rn;
+ n = 0;
+ }
+
+ return n;
+ };
+
+ // Unquote into tmp the current word returning false if malformed.
+ //
+ auto unquote = [&r, &b, &n, &q, &tmp] (bool clear = true) -> bool
+ {
+ if (q && n > 1)
+ {
+ size_t e (b + n - 1);
+
+ if (r[b] == '\'' && r[e] == '\'')
+ {
+ if (clear)
+ tmp.clear ();
+
+ size_t i (b + 1);
+ for (; i != e; ++i)
+ {
+ char c (r[i]);
+ if (c == '\\')
+ {
+ if (++i == e)
+ {
+ i = 0;
+ break;
+ }
+
+ c = r[i];
+ if (c == 'n') c = '\n';
+ else if (c == 't') c = '\t';
+ }
+ tmp += c;
+ }
+
+ if (i == e)
+ return true;
+ }
+ }
+
+ return false;
+ };
+
+#if 0
+#define UNQUOTE(x, y) \
+ r = x; rn = r.size (); b = e = 0; \
+ assert (next () && unquote () && tmp == y)
+
+ UNQUOTE ("'foo bar'", "foo bar");
+ UNQUOTE (" 'foo bar' ", "foo bar");
+ UNQUOTE ("'foo\\\\bar'", "foo\\bar");
+ UNQUOTE ("'\\'foo bar'", "'foo bar");
+ UNQUOTE ("'foo bar\\''", "foo bar'");
+ UNQUOTE ("'\\'foo\\\\bar\\''", "'foo\\bar'");
+
+ fail << "all good";
+#endif
+
+ // Escape if necessary the specified string and append to r.
//
- size_t b (0), e (0), n; // Next word.
+ auto escape = [&r] (const string& s)
+ {
+ size_t b (0), e, n (s.size ());
+ while (b != n && (e = s.find_first_of ("\\'\n\t", b)) != string::npos)
+ {
+ r.append (s, b, e - b); // Preceding chunk.
+
+ char c (s[e]);
+ r += '\\';
+ r += (c == '\n' ? 'n' : c == '\t' ? 't' : c);
+ b = e + 1;
+ }
- auto next = [&r, &b, &e, &n] () -> size_t
+ if (b != n)
+ r.append (s, b, e); // Final chunk.
+ };
+
+ // Quote and escape if necessary the specified string and append to r.
+ //
+ auto quote = [&r, &escape] (const string& s)
{
- return (n = next_word (r, b, e, ' ', '\t'));
+ if (find_if (s.begin (), s.end (),
+ [] (char c)
+ {
+ return !((c >= 'a' && c <= 'z') ||
+ (c >= '0' && c <= '9') ||
+ (c >= 'A' && c <= 'Z') ||
+ c == '-' || c == '_' || c == '/' ||
+ c == '.' || c == '+' || c == '%');
+ }) == s.end ())
+ {
+ r += s;
+ }
+ else
+ {
+ r += '\'';
+ escape (s);
+ r += '\'';
+ }
};
+#if 0
+#define QUOTE(x, y) \
+ r.clear (); quote (x); \
+ assert (r == y)
+
+ QUOTE ("foo/Bar-7.h", "foo/Bar-7.h");
+
+ QUOTE ("foo bar", "'foo bar'");
+ QUOTE ("foo\\bar", "'foo\\\\bar'");
+ QUOTE ("'foo bar", "'\\'foo bar'");
+ QUOTE ("foo bar'", "'foo bar\\''");
+ QUOTE ("'foo\\bar'", "'\\'foo\\\\bar\\''");
+
+ fail << "all good";
+#endif
+
next (); // Request name.
- auto name = [&r, b, n] (const char* c) -> bool
+ auto name = [&r, b, n, q] (const char* c) -> bool
{
// We can reasonably assume a command will never be quoted.
//
- return (r.compare (b, n, c) == 0 &&
+ return (!q &&
+ r.compare (b, n, c) == 0 &&
(r[n] == ' ' || r[n] == '\t' || r[n] == '\0'));
};
@@ -2023,7 +2219,17 @@ namespace build2
if (next ())
{
- path f (r, b, n);
+ path f;
+ if (!q)
+ f = path (r, b, n);
+ else if (unquote ())
+ f = path (tmp);
+ else
+ {
+ r = "ERROR 'malformed quoting/escaping in request'";
+ continue;
+ }
+
bool exists (true);
// The TU path we pass to the compiler is always absolute so any
@@ -2034,8 +2240,9 @@ namespace build2
//
if (exists && f.relative ())
{
- tmp.assign (r, b, n);
- r = "ERROR relative header path '"; r += tmp; r += '\'';
+ r = "ERROR 'relative header path ";
+ escape (f.string ());
+ r += '\'';
continue;
}
@@ -2082,7 +2289,7 @@ namespace build2
if (remapped)
{
- r = "ERROR remapping of headers not supported";
+ r = "ERROR 'remapping of headers not supported'";
continue;
}
@@ -2142,8 +2349,8 @@ namespace build2
//
// Note: if ht is NULL, f is still valid.
//
- r = "ERROR unable to update header '";
- r += (ht != nullptr ? ht->path () : f).string ();
+ r = "ERROR 'unable to update header ";
+ escape ((ht != nullptr ? ht->path () : f).string ());
r += '\'';
continue;
}
@@ -2278,17 +2485,27 @@ namespace build2
// original (which we may need to normalize when we read
// this mapping in extract_headers()).
//
- tmp = "@ "; tmp.append (r, b, n); tmp += ' '; tmp += bp;
+ // @@ This still breaks if the header path contains spaces.
+ // GCC bug 110153.
+ //
+ tmp = "@ ";
+ if (!q) tmp.append (r, b, n);
+ else unquote (false /* clear */); // Can't fail.
+ tmp += ' ';
+ tmp += bp;
+
dd.expect (tmp);
st.header_units++;
}
- r = "PATHNAME "; r += bp;
+ r = "PATHNAME ";
+ quote (bp);
}
catch (const failed&)
{
r = "ERROR 'unable to update header unit for ";
- r += hs; r += '\'';
+ escape (hs);
+ r += '\'';
continue;
}
}
@@ -2314,7 +2531,7 @@ namespace build2
// Truncate the response batch and terminate the communication (see
// also libcody issue #22).
//
- tmp.assign (r, b, n);
+ tmp.assign (r, b, n); // Request name (unquoted).
r = "ERROR '"; r += w; r += ' '; r += tmp; r += '\'';
batch_n = i + 1;
term = true;
@@ -2330,6 +2547,9 @@ namespace build2
// Write the response batch.
//
+ // @@ It's theoretically possible that we get blocked writing the
+ // response while the compiler gets blocked writing the diagnostics.
+ //
for (size_t i (0);; )
{
string& r (batch[i]);
@@ -2350,6 +2570,8 @@ namespace build2
os.flush ();
+ batch_n = 0; // Start a new batch.
+
return !term;
}
@@ -2704,7 +2926,7 @@ namespace build2
rs = !exists
? string ("INCLUDE")
: ("ERROR unable to update header '" +
- (ht != nullptr ? ht->path () : f).string () + "'");
+ (ht != nullptr ? ht->path () : f).string () + '\'');
bad_error = true;
break;
@@ -2782,7 +3004,7 @@ namespace build2
}
catch (const failed&)
{
- rs = "ERROR unable to update header unit '" + hp + "'";
+ rs = "ERROR unable to update header unit '" + hp + '\'';
bad_error = true;
break;
}
@@ -2892,7 +3114,7 @@ namespace build2
// single "version" of a header. Seems reasonable.
//
// Note also that while it would have been nice to have a unified cc
- // cache, the map_extension() call is passed x_inc which is module-
+ // cache, the map_extension() call is passed x_incs which is module-
// specific. In other words, we may end up mapping the same header to
// two different targets depending on whether it is included from, say,
// C or C++ translation unit. We could have used a unified cache for
@@ -2956,7 +3178,7 @@ namespace build2
fp, cache, norm,
[this] (const scope& bs, const string& n, const string& e)
{
- return map_extension (bs, n, e, x_inc);
+ return map_extension (bs, n, e, x_incs);
},
h::static_type,
[this, &d] (action a, const scope& bs, const target& t)
@@ -3010,16 +3232,18 @@ namespace build2
return inject_file (trace, "header", a, t, pt, mt, fail);
}
- // Extract and inject header dependencies. Return the preprocessed source
- // file as well as an indication if it is usable for compilation (see
- // below for details).
+ // Extract and inject header dependencies. Return (in result) the
+ // preprocessed source file as well as an indication if it is usable for
+ // compilation (see below for details). Note that result is expected to
+ // be initialized to {entry (), false}. Not using return type due to
+ // GCC bug #107555.
//
// This is also the place where we handle header units which are a lot
// more like auto-generated headers than modules. In particular, if a
// header unit BMI is out-of-date, then we have to re-preprocess this
// translation unit.
//
- pair<file_cache::entry, bool> compile_rule::
+ void compile_rule::
extract_headers (action a,
const scope& bs,
file& t,
@@ -3029,7 +3253,8 @@ namespace build2
depdb& dd,
bool& update,
timestamp mt,
- module_imports& imports) const
+ module_imports& imports,
+ pair<file_cache::entry, bool>& result) const
{
tracer trace (x, "compile_rule::extract_headers");
@@ -3042,9 +3267,16 @@ namespace build2
file_cache::entry psrc;
bool puse (true);
+ // Preprocessed file extension.
+ //
+ const char* pext (x_assembler_cpp (src) ? ".Si" :
+ x_objective (src) ? x_obj_pext :
+ x_pext);
+
// Preprocesor mode that preserves as much information as possible while
// still performing inclusions. Also serves as a flag indicating whether
- // this compiler uses the separate preprocess and compile setup.
+ // this (non-MSVC) compiler uses the separate preprocess and compile
+ // setup.
//
const char* pp (nullptr);
@@ -3055,7 +3287,16 @@ namespace build2
// -fdirectives-only is available since GCC 4.3.0.
//
if (cmaj > 4 || (cmaj == 4 && cmin >= 3))
- pp = "-fdirectives-only";
+ {
+ // Note that for assembler-with-cpp GCC currently forces full
+ // preprocessing in (what appears to be) an attempt to paper over
+ // a deeper issue (see GCC bug 109534). If/when that bug gets
+ // fixed, we can enable this on our side. Note that Clang's
+ // -frewrite-includes also has issues (see below).
+ //
+ if (!x_assembler_cpp (src))
+ pp = "-fdirectives-only";
+ }
break;
}
@@ -3064,7 +3305,16 @@ namespace build2
// -frewrite-includes is available since Clang 3.2.0.
//
if (cmaj > 3 || (cmaj == 3 && cmin >= 2))
- pp = "-frewrite-includes";
+ {
+ // While Clang's -frewrite-includes appears to work, there are
+ // some issues with correctly tracking location information
+ // (manifests itself as wrong line numbers in debug info, for
+ // example). The result also appears to reference the .Si file
+ // instead of the original source file for some reason.
+ //
+ if (!x_assembler_cpp (src))
+ pp = "-frewrite-includes";
+ }
break;
}
@@ -3145,7 +3395,7 @@ namespace build2
//
// GCC's -fdirective-only, on the other hand, processes all the
// directives so they are gone from the preprocessed source. Here is
- // what we are going to do to work around this: we will detect if any
+ // what we are going to do to work around this: we will sense if any
// diagnostics has been written to stderr on the -E run. If that's the
// case (but the compiler indicated success) then we assume they are
// warnings and disable the use of the preprocessed output for
@@ -3183,7 +3433,9 @@ namespace build2
// not found, and there is no problem with outdated generated headers
// since we update/remap them before the compiler has a chance to read
// them. Overall, this "dependency mapper" approach is how it should
- // have been done from the beginning.
+ // have been done from the beginning. Note: that's the ideal world,
+ // the reality is that the required mapper extensions are not (yet)
+ // in libcody/GCC.
// Note: diagnostics sensing is currently only supported if dependency
// info is written to a file (see above).
@@ -3232,13 +3484,13 @@ namespace build2
// The gen argument to init_args() is in/out. The caller signals whether
// to force the generated header support and on return it signals
- // whether this support is enabled. The first call to init_args is
- // expected to have gen false.
+ // whether this support is enabled. If gen is false, then stderr is
+ // expected to be either discarded or merged with sdtout.
//
// Return NULL if the dependency information goes to stdout and a
// pointer to the temporary file path otherwise.
//
- auto init_args = [a, &t, ot, li, reprocess,
+ auto init_args = [a, &t, ot, li, reprocess, pext,
&src, &md, &psrc, &sense_diag, &mod_mapper, &bs,
pp, &env, &args, &args_gen, &args_i, &out, &drm,
&so_map, this]
@@ -3384,16 +3636,6 @@ namespace build2
// Some compile options (e.g., -std, -m) affect the preprocessor.
//
- // Currently Clang supports importing "header modules" even when in
- // the TS mode. And "header modules" support macros which means
- // imports have to be resolved during preprocessing. Which poses a
- // bit of a chicken and egg problem for us. For now, the workaround
- // is to remove the -fmodules-ts option when preprocessing. Hopefully
- // there will be a "pure modules" mode at some point.
- //
- // @@ MODHDR Clang: should be solved with the dynamic module mapper
- // if/when Clang supports it?
- //
// Don't treat warnings as errors.
//
@@ -3422,11 +3664,18 @@ namespace build2
append_options (args, cmode);
append_sys_hdr_options (args); // Extra system header dirs (last).
+ // Note that for MSVC stderr is merged with stdout and is then
+ // parsed, so no append_diag_color_options() call.
+
// See perform_update() for details on the choice of options.
//
+ // NOTE: see also the predefs rule if adding anything here.
+ //
{
- bool sc (find_option_prefix ("/source-charset:", args));
- bool ec (find_option_prefix ("/execution-charset:", args));
+ bool sc (find_option_prefixes (
+ {"/source-charset:", "-source-charset:"}, args));
+ bool ec (find_option_prefixes (
+ {"/execution-charset:", "-execution-charset:"}, args));
if (!sc && !ec)
args.push_back ("/utf-8");
@@ -3442,15 +3691,18 @@ namespace build2
if (cvariant != "clang" && isystem (*this))
{
- if (find_option_prefix ("/external:I", args) &&
- !find_option_prefix ("/external:W", args))
+ if (find_option_prefixes ({"/external:I", "-external:I"}, args) &&
+ !find_option_prefixes ({"/external:W", "-external:W"}, args))
args.push_back ("/external:W0");
}
- if (x_lang == lang::cxx && !find_option_prefix ("/EH", args))
+ if (x_lang == lang::cxx &&
+ !find_option_prefixes ({"/EH", "-EH"}, args))
args.push_back ("/EHsc");
- if (!find_option_prefixes ({"/MD", "/MT"}, args))
+ // NOTE: see similar code in search_modules().
+ //
+ if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args))
args.push_back ("/MD");
args.push_back ("/P"); // Preprocess to file.
@@ -3461,7 +3713,7 @@ namespace build2
msvc_sanitize_cl (args);
- psrc = ctx.fcache.create (t.path () + x_pext, !modules);
+ psrc = ctx.fcache->create (t.path () + pext, !modules);
if (fc)
{
@@ -3480,12 +3732,18 @@ namespace build2
}
case compiler_class::gcc:
{
- append_options (args, cmode,
- cmode.size () - (modules && clang ? 1 : 0));
+ append_options (args, cmode);
append_sys_hdr_options (args); // Extra system header dirs (last).
+ // If not gen, then stderr is discarded.
+ //
+ if (gen)
+ append_diag_color_options (args);
+
// See perform_update() for details on the choice of options.
//
+ // NOTE: see also the predefs rule if adding anything here.
+ //
if (!find_option_prefix ("-finput-charset=", args))
args.push_back ("-finput-charset=UTF-8");
@@ -3497,8 +3755,7 @@ namespace build2
if (ctype == compiler_type::clang && tsys == "win32-msvc")
{
- initializer_list<const char*> os {"-nostdlib", "-nostartfiles"};
- if (!find_options (os, cmode) && !find_options (os, args))
+ if (!find_options ({"-nostdlib", "-nostartfiles"}, args))
{
args.push_back ("-D_MT");
args.push_back ("-D_DLL");
@@ -3608,7 +3865,7 @@ namespace build2
// Preprocessor output.
//
- psrc = ctx.fcache.create (t.path () + x_pext, !modules);
+ psrc = ctx.fcache->create (t.path () + pext, !modules);
args.push_back ("-o");
args.push_back (psrc.path ().string ().c_str ());
}
@@ -3898,13 +4155,13 @@ namespace build2
// If modules are enabled, then we keep the preprocessed output
// around (see apply() for details).
//
- // See apply() for details on the extra MSVC check.
- //
- return modules && (ctype != compiler_type::msvc ||
- md.type != unit_type::module_intf)
- ? make_pair (ctx.fcache.create_existing (t.path () + x_pext),
- true)
- : make_pair (file_cache::entry (), false);
+ if (modules)
+ {
+ result.first = ctx.fcache->create_existing (t.path () + pext);
+ result.second = true;
+ }
+
+ return;
}
// This can be a header or a header unit (mapping).
@@ -3957,7 +4214,7 @@ namespace build2
// Bail out early if we have deferred a failure.
//
- return make_pair (file_cache::entry (), false);
+ return;
}
}
}
@@ -3983,6 +4240,12 @@ namespace build2
process pr;
+ // We use the fdstream_mode::skip mode on stdout (cannot be used
+ // on both) and so dbuf must be destroyed (closed) first.
+ //
+ ifdstream is (ifdstream::badbit);
+ diag_buffer dbuf (ctx);
+
try
{
// Assume the preprocessed output (if produced) is usable
@@ -4003,217 +4266,229 @@ namespace build2
//
bool good_error (false), bad_error (false);
- // If we have no generated header support, then suppress all
- // diagnostics (if things go badly we will restart with this
- // support).
- //
- if (drmp == nullptr) // Dependency info goes to stdout.
+ if (mod_mapper) // Dependency info is implied by mapper requests.
{
- assert (!sense_diag); // Note: could support with fdselect().
+ assert (gen && !sense_diag); // Not used in this mode.
- // For VC with /P the dependency info and diagnostics all go
- // to stderr so redirect it to stdout.
+ // Note that here we use the skip mode on the diagnostics
+ // stream which means we have to use own instance of stdout
+ // stream for the correct destruction order (see below).
//
- pr = process (
- cpath,
- args.data (),
- 0,
- -1,
- cclass == compiler_class::msvc ? 1 : gen ? 2 : -2,
- nullptr, // CWD
- env.empty () ? nullptr : env.data ());
- }
- else // Dependency info goes to a temporary file.
- {
pr = process (cpath,
- args.data (),
- mod_mapper ? -1 : 0,
- mod_mapper ? -1 : 2, // Send stdout to stderr.
- gen ? 2 : sense_diag ? -1 : -2,
+ args,
+ -1,
+ -1,
+ diag_buffer::pipe (ctx),
nullptr, // CWD
env.empty () ? nullptr : env.data ());
- // Monitor for module mapper requests and/or diagnostics. If
- // diagnostics is detected, mark the preprocessed output as
- // unusable for compilation.
- //
- if (mod_mapper || sense_diag)
+ dbuf.open (args[0],
+ move (pr.in_efd),
+ fdstream_mode::non_blocking |
+ fdstream_mode::skip);
+ try
{
- module_mapper_state mm_state (skip_count, imports);
+ gcc_module_mapper_state mm_state (skip_count, imports);
+
+ // Note that while we read both streams until eof in normal
+ // circumstances, we cannot use fdstream_mode::skip for the
+ // exception case on both of them: we may end up being
+ // blocked trying to read one stream while the process may
+ // be blocked writing to the other. So in case of an
+ // exception we only skip the diagnostics and close the
+ // mapper stream hard. The latter (together with closing of
+ // the stdin stream) should happen first so the order of
+ // the following variable is important.
+ //
+ // Note also that we open the stdin stream in the blocking
+ // mode.
+ //
+ ifdstream is (move (pr.in_ofd),
+ fdstream_mode::non_blocking,
+ ifdstream::badbit); // stdout
+ ofdstream os (move (pr.out_fd)); // stdin (badbit|failbit)
+
+ // Read until we reach EOF on all streams.
+ //
+ // Note that if dbuf is not opened, then we automatically
+ // get an inactive nullfd entry.
+ //
+ fdselect_set fds {is.fd (), dbuf.is.fd ()};
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
- const char* w (nullptr);
- try
+ bool more (false);
+ for (string l; ist.fd != nullfd || dst.fd != nullfd; )
{
- // For now we don't need to do both so let's use a simpler
- // blocking implementation. Note that the module mapper
- // also needs to be adjusted when switching to the
- // non-blocking version.
+ // @@ Currently we will accept a (potentially truncated)
+ // line that ends with EOF rather than newline.
//
-#if 1
- assert (mod_mapper != sense_diag);
-
- if (mod_mapper)
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
{
- w = "module mapper request";
-
- // Note: the order is important (see the non-blocking
- // verison for details).
- //
- ifdstream is (move (pr.in_ofd),
- fdstream_mode::skip,
- ifdstream::badbit);
- ofdstream os (move (pr.out_fd));
-
- do
+ if (eof (is))
{
- if (!gcc_module_mapper (mm_state,
- a, bs, t, li,
- is, os,
- dd, update, bad_error,
- pfx_map, so_map))
- break;
-
- } while (!is.eof ());
-
- os.close ();
- is.close ();
- }
-
- if (sense_diag)
- {
- w = "diagnostics";
- ifdstream is (move (pr.in_efd), fdstream_mode::skip);
- puse = puse && (is.peek () == ifdstream::traits_type::eof ());
- is.close ();
- }
-#else
- fdselect_set fds;
- auto add = [&fds] (const auto_fd& afd) -> fdselect_state*
- {
- int fd (afd.get ());
- fdmode (fd, fdstream_mode::non_blocking);
- fds.push_back (fd);
- return &fds.back ();
- };
-
- // Note that while we read both streams until eof in
- // normal circumstances, we cannot use fdstream_mode::skip
- // for the exception case on both of them: we may end up
- // being blocked trying to read one stream while the
- // process may be blocked writing to the other. So in case
- // of an exception we only skip the diagnostics and close
- // the mapper stream hard. The latter should happen first
- // so the order of the following variable is important.
- //
- ifdstream es;
- ofdstream os;
- ifdstream is;
-
- fdselect_state* ds (nullptr);
- if (sense_diag)
- {
- w = "diagnostics";
- ds = add (pr.in_efd);
- es.open (move (pr.in_efd), fdstream_mode::skip);
- }
-
- fdselect_state* ms (nullptr);
- if (mod_mapper)
- {
- w = "module mapper request";
- ms = add (pr.in_ofd);
- is.open (move (pr.in_ofd));
- os.open (move (pr.out_fd)); // Note: blocking.
- }
+ os.close ();
+ is.close ();
- // Set each state pointer to NULL when the respective
- // stream reaches eof.
- //
- while (ds != nullptr || ms != nullptr)
- {
- w = "output";
- ifdselect (fds);
+ if (more)
+ throw_generic_ios_failure (EIO, "unexpected EOF");
- // First read out the diagnostics in case the mapper
- // interaction produces more. To make sure we don't get
- // blocked by full stderr, the mapper should only handle
- // one request at a time.
- //
- if (ds != nullptr && ds->ready)
+ ist.fd = nullfd;
+ }
+ else
{
- w = "diagnostics";
-
- for (char buf[4096];;)
- {
- streamsize c (sizeof (buf));
- streamsize n (es.readsome (buf, c));
-
- if (puse && n > 0)
- puse = false;
-
- if (n < c)
- break;
- }
+ optional<bool> r (
+ gcc_module_mapper (mm_state,
+ a, bs, t, li,
+ l, os,
+ dd, update, bad_error,
+ pfx_map, so_map));
- if (es.eof ())
- {
- es.close ();
- ds->fd = nullfd;
- ds = nullptr;
- }
- }
+ more = !r.has_value ();
- if (ms != nullptr && ms->ready)
- {
- w = "module mapper request";
-
- gcc_module_mapper (mm_state,
- a, bs, t, li,
- is, os,
- dd, update, bad_error,
- pfx_map, so_map);
- if (is.eof ())
+ if (more || *r)
+ l.clear ();
+ else
{
os.close ();
is.close ();
- ms->fd = nullfd;
- ms = nullptr;
+ ist.fd = nullfd;
}
}
+
+ continue;
}
-#endif
- }
- catch (const io_error& e)
- {
- if (pr.wait ())
- fail << "io error handling " << x_lang << " compiler "
- << w << ": " << e;
- // Fall through.
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
}
- if (mod_mapper)
- md.header_units += mm_state.header_units;
+ md.header_units += mm_state.header_units;
+ }
+ catch (const io_error& e)
+ {
+ // Note that diag_buffer handles its own io errors so this
+ // is about mapper stdin/stdout.
+ //
+ if (pr.wait ())
+ fail << "io error handling " << x_lang << " compiler "
+ << "module mapper request: " << e;
+
+ // Fall through.
}
// The idea is to reduce this to the stdout case.
//
- pr.wait ();
-
- // With -MG we want to read dependency info even if there is
- // an error (in case an outdated header file caused it). But
- // with the GCC module mapper an error is non-negotiable, so
- // to speak, and so we want to skip all of that. In fact, we
- // now write directly to depdb without generating and then
+ // We now write directly to depdb without generating and then
// parsing an intermadiate dependency makefile.
//
- pr.in_ofd = (ctype == compiler_type::gcc && mod_mapper)
- ? auto_fd (nullfd)
- : fdopen (*drmp, fdopen_mode::in);
+ pr.wait ();
+ pr.in_ofd = nullfd;
+ }
+ else
+ {
+ // If we have no generated header support, then suppress all
+ // diagnostics (if things go badly we will restart with this
+ // support).
+ //
+ if (drmp == nullptr) // Dependency info goes to stdout.
+ {
+ assert (!sense_diag); // Note: could support if necessary.
+
+ // For VC with /P the dependency info and diagnostics all go
+ // to stderr so redirect it to stdout.
+ //
+ int err (
+ cclass == compiler_class::msvc ? 1 : // stdout
+ !gen ? -2 : // /dev/null
+ diag_buffer::pipe (ctx, sense_diag /* force */));
+
+ pr = process (
+ cpath,
+ args,
+ 0,
+ -1,
+ err,
+ nullptr, // CWD
+ env.empty () ? nullptr : env.data ());
+
+ if (cclass != compiler_class::msvc && gen)
+ {
+ dbuf.open (args[0],
+ move (pr.in_efd),
+ fdstream_mode::non_blocking); // Skip on stdout.
+ }
+ }
+ else // Dependency info goes to temporary file.
+ {
+ // Since we only need to read from one stream (dbuf) let's
+ // use the simpler blocking setup.
+ //
+ int err (
+ !gen && !sense_diag ? -2 : // /dev/null
+ diag_buffer::pipe (ctx, sense_diag /* force */));
+
+ pr = process (cpath,
+ args,
+ 0,
+ 2, // Send stdout to stderr.
+ err,
+ nullptr, // CWD
+ env.empty () ? nullptr : env.data ());
+
+ if (gen || sense_diag)
+ {
+ dbuf.open (args[0], move (pr.in_efd));
+ dbuf.read (sense_diag /* force */);
+ }
+
+ if (sense_diag)
+ {
+ if (!dbuf.buf.empty ())
+ {
+ puse = false;
+ dbuf.buf.clear (); // Discard.
+ }
+ }
+
+ // The idea is to reduce this to the stdout case.
+ //
+ // Note that with -MG we want to read dependency info even
+ // if there is an error (in case an outdated header file
+ // caused it).
+ //
+ pr.wait ();
+ pr.in_ofd = fdopen (*drmp, fdopen_mode::in);
+ }
}
+ // Read and process dependency information, if any.
+ //
if (pr.in_ofd != nullfd)
{
+ // We have two cases here: reading from stdout and potentially
+ // stderr (dbuf) or reading from file (see the process startup
+ // code above for details). If we have to read from two
+ // streams, then we have to use the non-blocking setup. But we
+ // cannot use the non-blocking setup uniformly because on
+ // Windows it's only suppored for pipes. So things are going
+ // to get a bit hairy.
+ //
+ // And there is another twist to this: for MSVC we redirect
+ // stderr to stdout since the header dependency information is
+ // part of the diagnostics. If, however, there is some real
+ // diagnostics, we need to pass it through, potentially with
+ // buffering. The way we achieve this is by later opening dbuf
+ // in the EOF state and using it to buffer or stream the
+ // diagnostics.
+ //
+ bool nb (dbuf.is.is_open ());
+
// We may not read all the output (e.g., due to a restart).
// Before we used to just close the file descriptor to signal
// to the other end that we are not interested in the rest.
@@ -4221,20 +4496,69 @@ namespace build2
// impolite and complains, loudly (broken pipe). So now we are
// going to skip until the end.
//
- ifdstream is (move (pr.in_ofd),
- fdstream_mode::text | fdstream_mode::skip,
- ifdstream::badbit);
+ // Note that this means we are not using skip on dbuf (see
+ // above for the destruction order details).
+ //
+ {
+ fdstream_mode m (fdstream_mode::text |
+ fdstream_mode::skip);
+
+ if (nb)
+ m |= fdstream_mode::non_blocking;
+
+ is.open (move (pr.in_ofd), m);
+ }
+
+ fdselect_set fds;
+ if (nb)
+ fds = {is.fd (), dbuf.is.fd ()};
size_t skip (skip_count);
string l, l2; // Reuse.
for (bool first (true), second (false); !restart; )
{
- if (eof (getline (is, l)))
+ if (nb)
{
- if (bad_error && !l2.empty ())
- text << l2;
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
- break;
+ // We read until we reach EOF on both streams.
+ //
+ if (ist.fd == nullfd && dst.fd == nullfd)
+ break;
+
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
+ {
+ if (eof (is))
+ {
+ ist.fd = nullfd;
+ continue;
+ }
+
+ // Fall through to parse (and clear) the line.
+ }
+ else
+ {
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
+
+ continue;
+ }
+ }
+ else
+ {
+ if (eof (getline (is, l)))
+ {
+ if (bad_error && !l2.empty ()) // MSVC only (see below).
+ dbuf.write (l2, true /* newline */);
+
+ break;
+ }
}
l6 ([&]{trace << "header dependency line '" << l << "'";});
@@ -4285,9 +4609,15 @@ namespace build2
else
{
l2 = l;
- bad_error = true;
+
+ if (!bad_error)
+ {
+ dbuf.open_eof (args[0]);
+ bad_error = true;
+ }
}
+ l.clear ();
continue;
}
@@ -4297,6 +4627,7 @@ namespace build2
}
first = false;
+ l.clear ();
continue;
}
@@ -4304,8 +4635,13 @@ namespace build2
if (f.empty ()) // Some other diagnostics.
{
- text << l;
- bad_error = true;
+ if (!bad_error)
+ {
+ dbuf.open_eof (args[0]);
+ bad_error = true;
+ }
+
+ dbuf.write (l, true /* newline */);
break;
}
@@ -4399,12 +4735,9 @@ namespace build2
if (l.empty () ||
l[0] != '^' || l[1] != ':' || l[2] != ' ')
{
- // @@ Hm, we don't seem to redirect stderr to stdout
- // for this class of compilers so I wonder why
- // we are doing this?
- //
if (!l.empty ())
- text << l;
+ l5 ([&]{trace << "invalid header dependency line '"
+ << l << "'";});
bad_error = true;
break;
@@ -4419,7 +4752,10 @@ namespace build2
// "^: \".
//
if (l.size () == 4 && l[3] == '\\')
+ {
+ l.clear ();
continue;
+ }
else
pos = 3; // Skip "^: ".
@@ -4434,10 +4770,8 @@ namespace build2
if (pos != l.size () && l[pos] == ':')
{
- // @@ Hm, the same as above.
- //
- text << l;
-
+ l5 ([&]{trace << "invalid header dependency line '"
+ << l << "'";});
bad_error = true;
break;
}
@@ -4492,19 +4826,56 @@ namespace build2
}
if (bad_error || md.deferred_failure)
+ {
+ // Note that it may be tempting to finish reading out the
+ // diagnostics before bailing out. But that may end up in
+ // a deadlock if the process gets blocked trying to write
+ // to stdout.
+ //
break;
+ }
+
+ l.clear ();
+ }
+
+ // We may bail out early from the above loop in case of a
+ // restart or error. Which means the stderr stream (dbuf) may
+ // still be open and we need to close it before closing the
+ // stdout stream (which may try to skip).
+ //
+ // In this case we may also end up with incomplete diagnostics
+ // so discard it.
+ //
+ // Generally, it may be tempting to start thinking if we
+ // should discard buffered diagnostics in other cases, such as
+ // restart. But remember that during serial execution it will
+ // go straight to stderr so for consistency (and simplicity)
+ // we should just print it unless there are good reasons not
+ // to (also remember that in the restartable modes we normally
+ // redirect stderr to /dev/null; see the process startup code
+ // for details).
+ //
+ if (dbuf.is.is_open ())
+ {
+ dbuf.is.close ();
+ dbuf.buf.clear ();
}
// Bail out early if we have deferred a failure.
//
+ // Let's ignore any buffered diagnostics in this case since
+ // it would appear after the deferred failure note.
+ //
if (md.deferred_failure)
{
is.close ();
- return make_pair (file_cache::entry (), false);
+ return;
}
- // In case of VC, we are parsing stderr and if things go
- // south, we need to copy the diagnostics for the user to see.
+ // In case of VC, we are parsing redirected stderr and if
+ // things go south, we need to copy the diagnostics for the
+ // user to see. Note that we should have already opened dbuf
+ // at EOF above.
//
if (bad_error && cclass == compiler_class::msvc)
{
@@ -4519,7 +4890,7 @@ namespace build2
l.compare (p.first, 4, "1083") != 0 &&
msvc_header_c1083 (l, p))
{
- diag_stream_lock () << l << endl;
+ dbuf.write (l, true /* newline */);
}
}
}
@@ -4542,27 +4913,42 @@ namespace build2
if (pr.wait ())
{
- if (!bad_error) // Ignore expected successes (we are done).
{
- if (!restart && psrc)
- psrcw.close ();
+ diag_record dr;
- continue;
+ if (bad_error)
+ dr << fail << "expected error exit status from "
+ << x_lang << " compiler";
+
+ if (dbuf.is_open ())
+ dbuf.close (move (dr)); // Throws if error.
}
- fail << "expected error exit status from " << x_lang
- << " compiler";
+ // Ignore expected successes (we are done).
+ //
+ if (!restart && psrc)
+ psrcw.close ();
+
+ continue;
}
else if (pr.exit->normal ())
{
if (good_error) // Ignore expected errors (restart).
+ {
+ if (dbuf.is_open ())
+ dbuf.close ();
+
continue;
+ }
}
// Fall through.
}
catch (const io_error& e)
{
+ // Ignore buffered diagnostics (since reading it could be the
+ // cause of this failure).
+ //
if (pr.wait ())
fail << "unable to read " << x_lang << " compiler header "
<< "dependency output: " << e;
@@ -4571,18 +4957,23 @@ namespace build2
}
assert (pr.exit && !*pr.exit);
- const process_exit& e (*pr.exit);
+ const process_exit& pe (*pr.exit);
// For normal exit we assume the child process issued some
// diagnostics.
//
- if (e.normal ())
+ if (pe.normal ())
{
- // If this run was with the generated header support then we
- // have issued diagnostics and it's time to give up.
+ // If this run was with the generated header support then it's
+ // time to give up.
//
if (gen)
+ {
+ if (dbuf.is_open ())
+ dbuf.close (args, pe, 2 /* verbosity */);
+
throw failed ();
+ }
// Just to recap, being here means something is wrong with the
// source: it can be a missing generated header, it can be an
@@ -4600,7 +4991,12 @@ namespace build2
// or will issue diagnostics.
//
if (restart)
+ {
+ if (dbuf.is_open ())
+ dbuf.close ();
+
l6 ([&]{trace << "trying again without generated headers";});
+ }
else
{
// In some pathological situations we may end up switching
@@ -4625,19 +5021,24 @@ namespace build2
// example, because we have removed all the partially
// preprocessed source files).
//
- if (force_gen_skip && *force_gen_skip == skip_count)
{
- diag_record dr (fail);
+ diag_record dr;
+ if (force_gen_skip && *force_gen_skip == skip_count)
+ {
+ dr <<
+ fail << "inconsistent " << x_lang << " compiler behavior" <<
+ info << "run the following two commands to investigate";
- dr << "inconsistent " << x_lang << " compiler behavior" <<
- info << "run the following two commands to investigate";
+ dr << info;
+ print_process (dr, args.data ()); // No pipes.
- dr << info;
- print_process (dr, args.data ()); // No pipes.
+ init_args ((gen = true));
+ dr << info << "";
+ print_process (dr, args.data ()); // No pipes.
+ }
- init_args ((gen = true));
- dr << info << "";
- print_process (dr, args.data ()); // No pipes.
+ if (dbuf.is_open ())
+ dbuf.close (move (dr)); // Throws if error.
}
restart = true;
@@ -4648,7 +5049,15 @@ namespace build2
continue;
}
else
- run_finish (args, pr); // Throws.
+ {
+ if (dbuf.is_open ())
+ {
+ dbuf.close (args, pe, 2 /* verbosity */);
+ throw failed ();
+ }
+ else
+ run_finish (args, pr, 2 /* verbosity */);
+ }
}
catch (const process_error& e)
{
@@ -4674,7 +5083,9 @@ namespace build2
dd.expect ("");
puse = puse && !reprocess && psrc;
- return make_pair (move (psrc), puse);
+
+ result.first = move (psrc);
+ result.second = puse;
}
// Return the translation unit information (last argument) and its
@@ -4693,6 +5104,18 @@ namespace build2
{
tracer trace (x, "compile_rule::parse_unit");
+ // Scanning .S files with our parser is hazardous since such files
+ // sometimes use `#`-style comments. Presumably real compilers just
+ // ignore them in some way, but it doesn't seem worth it to bother in
+ // our case. Also, the checksum calculation over assembler tokens feels
+ // iffy.
+ //
+ if (x_assembler_cpp (src))
+ {
+ tu.type = unit_type::non_modular;
+ return "";
+ }
+
otype ot (li.type);
// If things go wrong give the user a bit extra context. Let's call it
@@ -4771,8 +5194,6 @@ namespace build2
case compiler_class::msvc: werror = "/WX"; break;
}
- bool clang (ctype == compiler_type::clang);
-
append_options (args, t, c_coptions, werror);
append_options (args, t, x_coptions, werror);
@@ -4787,11 +5208,16 @@ namespace build2
append_options (args, cmode);
append_sys_hdr_options (args);
+ // Note: no append_diag_color_options() call since the
+ // diagnostics is discarded.
+
// See perform_update() for details on the choice of options.
//
{
- bool sc (find_option_prefix ("/source-charset:", args));
- bool ec (find_option_prefix ("/execution-charset:", args));
+ bool sc (find_option_prefixes (
+ {"/source-charset:", "-source-charset:"}, args));
+ bool ec (find_option_prefixes (
+ {"/execution-charset:", "-execution-charset:"}, args));
if (!sc && !ec)
args.push_back ("/utf-8");
@@ -4807,15 +5233,16 @@ namespace build2
if (cvariant != "clang" && isystem (*this))
{
- if (find_option_prefix ("/external:I", args) &&
- !find_option_prefix ("/external:W", args))
+ if (find_option_prefixes ({"/external:I", "-external:I"}, args) &&
+ !find_option_prefixes ({"/external:W", "-external:W"}, args))
args.push_back ("/external:W0");
}
- if (x_lang == lang::cxx && !find_option_prefix ("/EH", args))
+ if (x_lang == lang::cxx &&
+ !find_option_prefixes ({"/EH", "-EH"}, args))
args.push_back ("/EHsc");
- if (!find_option_prefixes ({"/MD", "/MT"}, args))
+ if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args))
args.push_back ("/MD");
args.push_back ("/E");
@@ -4829,10 +5256,12 @@ namespace build2
}
case compiler_class::gcc:
{
- append_options (args, cmode,
- cmode.size () - (modules && clang ? 1 : 0));
+ append_options (args, cmode);
append_sys_hdr_options (args);
+ // Note: no append_diag_color_options() call since the
+ // diagnostics is discarded.
+
// See perform_update() for details on the choice of options.
//
if (!find_option_prefix ("-finput-charset=", args))
@@ -4846,8 +5275,7 @@ namespace build2
if (ctype == compiler_type::clang && tsys == "win32-msvc")
{
- initializer_list<const char*> os {"-nostdlib", "-nostartfiles"};
- if (!find_options (os, cmode) && !find_options (os, args))
+ if (!find_options ({"-nostdlib", "-nostartfiles"}, args))
{
args.push_back ("-D_MT");
args.push_back ("-D_DLL");
@@ -4874,12 +5302,36 @@ namespace build2
//
if (ps)
{
- if (ctype == compiler_type::gcc)
+ switch (ctype)
{
- // Note that only these two *plus* -x do the trick.
- //
- args.push_back ("-fpreprocessed");
- args.push_back ("-fdirectives-only");
+ case compiler_type::gcc:
+ {
+ // Note that only these two *plus* -x do the trick.
+ //
+ args.push_back ("-fpreprocessed");
+ args.push_back ("-fdirectives-only");
+ break;
+ }
+ case compiler_type::clang:
+ {
+ // See below for details.
+ //
+ if (ctype == compiler_type::clang &&
+ cmaj >= (cvariant != "apple" ? 15 : 16))
+ {
+ if (find_options ({"-pedantic", "-pedantic-errors",
+ "-Wpedantic", "-Werror=pedantic"},
+ args))
+ {
+ args.push_back ("-Wno-gnu-line-marker");
+ }
+ }
+
+ break;
+ }
+ case compiler_type::msvc:
+ case compiler_type::icc:
+ assert (false);
}
}
@@ -4933,10 +5385,10 @@ namespace build2
print_process (args);
// We don't want to see warnings multiple times so ignore all
- // diagnostics.
+ // diagnostics (thus no need for diag_buffer).
//
pr = process (cpath,
- args.data (),
+ args,
0, -1, -2,
nullptr, // CWD
env.empty () ? nullptr : env.data ());
@@ -4948,7 +5400,7 @@ namespace build2
fdstream_mode::binary | fdstream_mode::skip);
parser p;
- p.parse (is, path_name (*sp), tu);
+ p.parse (is, path_name (*sp), tu, cid);
is.close ();
@@ -4963,7 +5415,9 @@ namespace build2
if (!modules)
{
if (ut != unit_type::non_modular || !mi.imports.empty ())
- fail << "modules support required by " << src;
+ fail << "modules support required by " << src <<
+ info << "consider enabling modules with "
+ << x << ".features.modules=true in root.build";
}
else
{
@@ -4988,18 +5442,6 @@ namespace build2
ut = md.type;
mi.name = src.path ().string ();
}
-
- // Prior to 15.5 (19.12) VC was not using the 'export module M;'
- // syntax so we use the preprequisite type to distinguish
- // between interface and implementation units.
- //
- // @@ TMP: probably outdated.
- //
- if (ctype == compiler_type::msvc && cmaj == 19 && cmin <= 11)
- {
- if (ut == unit_type::module_impl && src.is_a (*x_mod))
- ut = unit_type::module_intf;
- }
}
// If we were forced to reprocess, assume the checksum is not
@@ -5045,7 +5487,7 @@ namespace build2
info << "then run failing command to display compiler diagnostics";
}
else
- run_finish (args, pr); // Throws.
+ run_finish (args, pr, 2 /* verbosity */); // Throws.
}
catch (const process_error& e)
{
@@ -5214,6 +5656,9 @@ namespace build2
{
tracer trace (x, "compile_rule::search_modules");
+ context& ctx (bs.ctx);
+ const scope& rs (*bs.root_scope ());
+
// NOTE: currently we don't see header unit imports (they are handled by
// extract_headers() and are not in imports).
@@ -5249,7 +5694,7 @@ namespace build2
// So, the fuzzy match: the idea is that each match gets a score, the
// number of characters in the module name that got matched. A match
// with the highest score is used. And we use the (length + 1) for a
- // match against an actual module name.
+ // match against an actual (extracted) module name.
//
// Actually, the scoring system is a bit more elaborate than that.
// Consider module name core.window and two files, window.mxx and
@@ -5277,10 +5722,10 @@ namespace build2
// module (or partition) component. Failed that, we will match `format`
// to `print` because the last character (`t`) is the same.
//
- // For std.* modules we only accept non-fuzzy matches (think std.core vs
- // some core.mxx). And if such a module is unresolved, then we assume it
- // is pre-built and will be found by some other means (e.g., VC's
- // IFCPATH).
+ // For std.* modules we only accept non-fuzzy matches (think std.compat
+ // vs some compat.mxx). And if such a module is unresolved, then we
+ // assume it is pre-built and will be found by some other means (e.g.,
+ // VC's IFCPATH).
//
// Note also that we handle module partitions the same as submodules. In
// other words, for matching, `.` and `:` are treated the same.
@@ -5293,7 +5738,7 @@ namespace build2
// PPPPABBBB
//
// Where PPPP is the primary score, A is the A) score, and BBBB is
- // the B) scope described above. Zero signifies no match.
+ // the B) score described above. Zero signifies no match.
//
// We use decimal instead of binary packing to make it easier for the
// human to separate fields in the trace messages, during debugging,
@@ -5399,6 +5844,31 @@ namespace build2
if (!match)
return 0;
+ // Here is another corner case, the module is async_simple:IOExecutor
+ // and the file names are:
+ //
+ // IOExecutor.mxx
+ // SimpleIOExecutor.mxx
+ //
+ // The above implementation treats the latter as better because
+ // `Simple` in SimpleIOExecutor matches `simple` in async_simple. It's
+ // unclear what we can do about it without potentially breaking other
+ // legitimate cases (think Boost_Simple:IOExecutor). Maybe we could
+ // boost the exact partition name match score, similar to the exact
+ // module match, as some sort of a heuristics? Let's try.
+ //
+ if (fi == 0 && mi != 0 && m[mi - 1] == ':')
+ {
+ // Pretend we matched one short of the next module component. This
+ // way AsyncSimpleIOExecutor.mxx would still be a better match.
+ //
+ while (--mi != 0 && m[mi - 1] != '.')
+ ;
+
+ msep = (mi != 0); // For uncount logic below.
+ mi++; // One short.
+ }
+
// "Uncount" real separators.
//
if (fsep) fi++;
@@ -5427,6 +5897,20 @@ namespace build2
return ps * 100000 + as * 10000 + bs;
};
+#if 0
+ assert (match ("IOExecutor", "async_simple:IOExecutor") >
+ match ("SimpleIOExecutor", "async_simple:IOExecutor"));
+
+ assert (match ("IOExecutor", "async_simple:IOExecutor") <
+ match ("AsyncSimpleIOExecutor", "async_simple:IOExecutor"));
+
+ assert (match ("IOExecutor", "x.async_simple:IOExecutor") >
+ match ("SimpleIOExecutor", "x.async_simple:IOExecutor"));
+
+ assert (match ("IOExecutor", "x.async_simple:IOExecutor") <
+ match ("AsyncSimpleIOExecutor", "x.async_simple:IOExecutor"));
+#endif
+
auto& pts (t.prerequisite_targets[a]);
size_t start (pts.size ()); // Index of the first to be added.
@@ -5441,7 +5925,7 @@ namespace build2
// promise. It has to do with module re-exporting (export import M;).
// In this case (currently) all implementations simply treat it as a
// shallow (from the BMI's point of view) reference to the module (or an
- // implicit import, if you will). Do you see where it's going? Nowever
+ // implicit import, if you will). Do you see where it's going? Nowhere
// good, that's right. This shallow reference means that the compiler
// should be able to find BMIs for all the re-exported modules,
// recursively. The good news is we are actually in a pretty good shape
@@ -5487,6 +5971,7 @@ namespace build2
// so we actually don't need to pass any extra options (unless things
// get moved) but they still need access to the BMIs (and things will
// most likely have to be done differenly for distributed compilation).
+ // @@ Note: no longer the case for Clang either.
//
// So the revised plan: on the off chance that some implementation will
// do it differently we will continue maintaing the imported/re-exported
@@ -5580,6 +6065,8 @@ namespace build2
continue; // Scan the rest to detect if all done.
}
}
+ else
+ assert (name != m.name); // No duplicates.
done = false;
}
@@ -5607,10 +6094,18 @@ namespace build2
//
if (pt->is_a<bmix> ())
{
- const string& n (cast<string> (pt->state[a].vars[c_module_name]));
-
- if (const target** p = check_exact (n))
- *p = pt;
+ // If the extraction of the module information for this BMI failed
+ // and we have deferred failure to compiler diagnostics, then
+ // there will be no module name assigned. It would have been
+ // better to make sure that's the cause, but that won't be easy.
+ //
+ const string* n (cast_null<string> (
+ pt->state[a].vars[c_module_name]));
+ if (n != nullptr)
+ {
+ if (const target** p = check_exact (*n))
+ *p = pt;
+ }
}
else if (pt->is_a (*x_mod))
{
@@ -5619,7 +6114,8 @@ namespace build2
// rule puts them into prerequisite_targets for us).
//
// The module names should be specified but if not assume
- // something else is going on and ignore.
+ // something else is going on (like a deferred failure) and
+ // ignore.
//
// Note also that besides modules, prerequisite_targets may
// contain libraries which are interface dependencies of this
@@ -5632,7 +6128,15 @@ namespace build2
continue;
if (const target** p = check_exact (*n))
- *p = &this->make_module_sidebuild (a, bs, l, *pt, *n); // GCC 4.9
+ {
+ // It seems natural to build a BMI type that corresponds to the
+ // library type. After all, this is where the object file part
+ // of the BMI is going to come from (unless it's a module
+ // interface-only library).
+ //
+ *p = &this->make_module_sidebuild (
+ a, bs, &l, link_type (l).type, *pt, *n).first; // GCC 4.9
+ }
}
// Note that in prerequisite targets we will have the libux{}
// members, not the group.
@@ -5647,112 +6151,295 @@ namespace build2
}
};
- for (prerequisite_member p: group_prerequisite_members (a, t))
+ // Pre-resolve std modules in an ad hoc way for certain compilers.
+ //
+ // @@ TODO: cache x_stdlib value.
+ //
+ if ((ctype == compiler_type::msvc) ||
+ (ctype == compiler_type::clang &&
+ cmaj >= 17 &&
+ cast<string> (rs[x_stdlib]) == "libc++"))
{
- if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
- continue;
-
- const target* pt (p.load ()); // Should be cached for libraries.
+ // Similar logic to check_exact() above.
+ //
+ done = true;
- if (pt != nullptr)
+ for (size_t i (0); i != n; ++i)
{
- const file* lt (nullptr);
-
- if (const libx* l = pt->is_a<libx> ())
- lt = link_member (*l, a, li);
- else if (pt->is_a<liba> () || pt->is_a<libs> () || pt->is_a<libux> ())
- lt = &pt->as<file> ();
+ module_import& m (imports[i]);
- // If this is a library, check its bmi{}s and mxx{}s.
- //
- if (lt != nullptr)
+ if (m.name == "std" || m.name == "std.compat")
{
- find (*lt, find);
+ otype ot (otype::e);
+ const target* mt (nullptr);
- if (done)
- break;
+ switch (ctype)
+ {
+ case compiler_type::clang:
+ {
+ if (m.name != "std")
+ fail << "module " << m.name << " not yet provided by libc++";
- continue;
- }
+ // Find or insert std.cppm (similar code to pkgconfig.cxx).
+ //
+ // Note: build_install_data is absolute and normalized.
+ //
+ mt = &ctx.targets.insert_locked (
+ *x_mod,
+ (dir_path (build_install_data) /= "libbuild2") /= "cc",
+ dir_path (),
+ "std",
+ string ("cppm"), // For C++14 during bootstrap.
+ target_decl::implied,
+ trace).first;
+
+ // Which output type should we use, static or shared? The
+ // correct way would be to detect whether static or shared
+ // version of libc++ is to be linked and use the corresponding
+ // type. And we could do that by looking for -static-libstdc++
+ // in loption (and no, it's not -static-libc++).
+ //
+ // But, looking at the object file produced from std.cppm, it
+ // only contains one symbol, the static object initializer.
+ // And this is unlikely to change since all other non-inline
+ // or template symbols should be in libc++. So feels like it's
+ // not worth the trouble and one variant should be good enough
+ // for both cases. Let's use the shared one for less
+ // surprising diagnostics (as in, "why are you linking obje{}
+ // to a shared library?")
+ //
+ // (Of course, theoretically, std.cppm could detect via a
+ // macro whether it's being compiled with -fPIC or not and do
+ // things differently, but this seems far-fetched).
+ //
+ ot = otype::s;
- // Fall through.
- }
+ break;
+ }
+ case compiler_type::msvc:
+ {
+ // For MSVC, the source files std.ixx and std.compat.ixx are
+ // found in the modules/ subdirectory which is a sibling of
+ // include/ in the MSVC toolset (and "that is a contract with
+ // customers" to quote one of the developers).
+ //
+ // The problem of course is that there are multiple system
+ // header search directories (for example, as specified in the
+ // INCLUDE environment variable) and which one of them is for
+ // the MSVC toolset is not specified. So what we are going to
+ // do is search for one of the well-known standard C++ headers
+ // and assume that the directory where we found it is the one
+ // we are looking for. Or we could look for something
+ // MSVC-specific like vcruntime.h.
+ //
+ dir_path modules;
+ if (optional<path> p = find_system_header (path ("vcruntime.h")))
+ {
+ p->make_directory (); // Strip vcruntime.h.
+ if (p->leaf () == path ("include")) // Sanity check.
+ {
+ modules = path_cast<dir_path> (move (p->make_directory ()));
+ modules /= "modules";
+ }
+ }
- // While it would have been even better not to search for a target, we
- // need to get hold of the corresponding mxx{} (unlikely but possible
- // for bmi{} to have a different name).
- //
- // While we want to use group_prerequisite_members() below, we cannot
- // call resolve_group() since we will be doing it "speculatively" for
- // modules that we may use but also for modules that may use us. This
- // quickly leads to deadlocks. So instead we are going to perform an
- // ad hoc group resolution.
- //
- const target* pg;
- if (p.is_a<bmi> ())
- {
- pg = pt != nullptr ? pt : &p.search (t);
- pt = &search (t, btt, p.key ()); // Same logic as in picking obj*{}.
- }
- else if (p.is_a (btt))
- {
- pg = &search (t, bmi::static_type, p.key ());
- if (pt == nullptr) pt = &p.search (t);
+ if (modules.empty ())
+ fail << "unable to locate MSVC standard modules directory";
+
+ mt = &ctx.targets.insert_locked (
+ *x_mod,
+ move (modules),
+ dir_path (),
+ m.name,
+ string ("ixx"), // For C++14 during bootstrap.
+ target_decl::implied,
+ trace).first;
+
+ // For MSVC it's easier to detect the runtime being used since
+ // it's specified with the compile options (/MT[d], /MD[d]).
+ //
+ // Similar semantics as in extract_headers() except here we
+ // use options visible from the root scope. Note that
+ // find_option_prefixes() looks in reverse, so look in the
+ // cmode, x_coptions, c_coptions order.
+ //
+ initializer_list<const char*> os {"/MD", "/MT", "-MD", "-MT"};
+
+ const string* o;
+ if ((o = find_option_prefixes (os, cmode)) != nullptr ||
+ (o = find_option_prefixes (os, rs, x_coptions)) != nullptr ||
+ (o = find_option_prefixes (os, rs, c_coptions)) != nullptr)
+ {
+ ot = (*o)[2] == 'D' ? otype::s : otype::a;
+ }
+ else
+ ot = otype::s; // The default is /MD.
+
+ break;
+ }
+ case compiler_type::gcc:
+ case compiler_type::icc:
+ assert (false);
+ };
+
+ pair<target&, ulock> tl (
+ this->make_module_sidebuild ( // GCC 4.9
+ a, bs, nullptr, ot, *mt, m.name));
+
+ if (tl.second.owns_lock ())
+ {
+ // Special compile options for the std modules.
+ //
+ if (ctype == compiler_type::clang)
+ {
+ value& v (tl.first.append_locked (x_coptions));
+
+ if (v.null)
+ v = strings {};
+
+ strings& cops (v.as<strings> ());
+
+ switch (ctype)
+ {
+ case compiler_type::clang:
+ {
+ cops.push_back ("-Wno-reserved-module-identifier");
+ break;
+ }
+ case compiler_type::msvc:
+ // It appears nothing special is needed to compile MSVC
+ // standard modules.
+ case compiler_type::gcc:
+ case compiler_type::icc:
+ assert (false);
+ };
+ }
+
+ tl.second.unlock ();
+ }
+
+ pts[start + i].target = &tl.first;
+ m.score = match_max (m.name) + 1;
+ continue; // Scan the rest to detect if all done.
+ }
+
+ done = false;
}
- else
- continue;
+ }
- // Find the mxx{} prerequisite and extract its "file name" for the
- // fuzzy match unless the user specified the module name explicitly.
- //
- for (prerequisite_member p:
- prerequisite_members (a, t, group_prerequisites (*pt, pg)))
+ // Go over prerequisites and try to resolve imported modules with them.
+ //
+ if (!done)
+ {
+ for (prerequisite_member p: group_prerequisite_members (a, t))
{
if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
continue;
- if (p.is_a (*x_mod))
+ const target* pt (p.load ()); // Should be cached for libraries.
+
+ if (pt != nullptr)
{
- // Check for an explicit module name. Only look for an existing
- // target (which means the name can only be specified on the
- // target itself, not target type/pattern-spec).
+ const file* lt (nullptr);
+
+ if (const libx* l = pt->is_a<libx> ())
+ lt = link_member (*l, a, li);
+ else if (pt->is_a<liba> () ||
+ pt->is_a<libs> () ||
+ pt->is_a<libux> ())
+ lt = &pt->as<file> ();
+
+ // If this is a library, check its bmi{}s and mxx{}s.
//
- const target* t (p.search_existing ());
- const string* n (t != nullptr
- ? cast_null<string> (t->vars[c_module_name])
- : nullptr);
- if (n != nullptr)
+ if (lt != nullptr)
{
- if (const target** p = check_exact (*n))
- *p = pt;
+ find (*lt, find);
+
+ if (done)
+ break;
+
+ continue;
}
- else
+
+ // Fall through.
+ }
+
+ // While it would have been even better not to search for a target,
+ // we need to get hold of the corresponding mxx{} (unlikely but
+ // possible for bmi{} to have a different name).
+ //
+ // While we want to use group_prerequisite_members() below, we
+ // cannot call resolve_group() since we will be doing it
+ // "speculatively" for modules that we may use but also for modules
+ // that may use us. This quickly leads to deadlocks. So instead we
+ // are going to perform an ad hoc group resolution.
+ //
+ const target* pg;
+ if (p.is_a<bmi> ())
+ {
+ pg = pt != nullptr ? pt : &p.search (t);
+ pt = &search (t, btt, p.key ()); // Same logic as in picking obj*{}.
+ }
+ else if (p.is_a (btt))
+ {
+ pg = &search (t, bmi::static_type, p.key ());
+ if (pt == nullptr) pt = &p.search (t);
+ }
+ else
+ continue;
+
+ // Find the mxx{} prerequisite and extract its "file name" for the
+ // fuzzy match unless the user specified the module name explicitly.
+ //
+ for (prerequisite_member p:
+ prerequisite_members (a, t, group_prerequisites (*pt, pg)))
+ {
+ if (include (a, t, p) != include_type::normal) // Excluded/ad hoc.
+ continue;
+
+ if (p.is_a (*x_mod))
{
- // Fuzzy match.
+ // Check for an explicit module name. Only look for an existing
+ // target (which means the name can only be specified on the
+ // target itself, not target type/pattern-spec).
//
- string f;
+ const target* mt (p.search_existing ());
+ const string* n (mt != nullptr
+ ? cast_null<string> (mt->vars[c_module_name])
+ : nullptr);
+ if (n != nullptr)
+ {
+ if (const target** p = check_exact (*n))
+ *p = pt;
+ }
+ else
+ {
+ // Fuzzy match.
+ //
+ string f;
- // Add the directory part if it is relative. The idea is to
- // include it into the module match, say hello.core vs
- // hello/mxx{core}.
- //
- // @@ MOD: Why not for absolute? Good question. What if it
- // contains special components, say, ../mxx{core}?
- //
- const dir_path& d (p.dir ());
+ // Add the directory part if it is relative. The idea is to
+ // include it into the module match, say hello.core vs
+ // hello/mxx{core}.
+ //
+ // @@ MOD: Why not for absolute? Good question. What if it
+ // contains special components, say, ../mxx{core}?
+ //
+ const dir_path& d (p.dir ());
- if (!d.empty () && d.relative ())
- f = d.representation (); // Includes trailing slash.
+ if (!d.empty () && d.relative ())
+ f = d.representation (); // Includes trailing slash.
- f += p.name ();
- check_fuzzy (pt, f);
+ f += p.name ();
+ check_fuzzy (pt, f);
+ }
+ break;
}
- break;
}
- }
- if (done)
- break;
+ if (done)
+ break;
+ }
}
// Diagnose unresolved modules.
@@ -5822,9 +6509,12 @@ namespace build2
if (m.score <= match_max (in))
{
- const string& mn (cast<string> (bt->state[a].vars[c_module_name]));
+ // As above (deffered failure).
+ //
+ const string* mn (
+ cast_null<string> (bt->state[a].vars[c_module_name]));
- if (in != mn)
+ if (mn != nullptr && in != *mn)
{
// Note: matched, so the group should be resolved.
//
@@ -5838,7 +6528,7 @@ namespace build2
fail (relative (src))
<< "failed to correctly guess module name from " << p <<
info << "guessed: " << in <<
- info << "actual: " << mn <<
+ info << "actual: " << *mn <<
info << "consider adjusting module interface file names or" <<
info << "consider specifying module name with " << x
<< ".module_name";
@@ -5866,12 +6556,15 @@ namespace build2
if (et == nullptr)
continue; // Unresolved (std.*).
- const string& mn (cast<string> (et->state[a].vars[c_module_name]));
+ // As above (deferred failure).
+ //
+ const string* mn (cast_null<string> (et->state[a].vars[c_module_name]));
- if (find_if (imports.begin (), imports.end (),
- [&mn] (const module_import& i)
+ if (mn != nullptr &&
+ find_if (imports.begin (), imports.end (),
+ [mn] (const module_import& i)
{
- return i.name == mn;
+ return i.name == *mn;
}) == imports.end ())
{
pts.push_back (et);
@@ -5882,10 +6575,10 @@ namespace build2
// but it's probably not worth it if we have a small string
// optimization.
//
- import_type t (mn.find (':') != string::npos
+ import_type t (mn->find (':') != string::npos
? import_type::module_part
: import_type::module_intf);
- imports.push_back (module_import {t, mn, true, 0});
+ imports.push_back (module_import {t, *mn, true, 0});
}
}
}
@@ -5905,6 +6598,10 @@ namespace build2
// Find or create a modules sidebuild subproject returning its root
// directory.
//
+ // @@ Could we omit creating a subproject if the sidebuild scope is the
+ // project scope itself? This would speed up simple examples (and
+ // potentially direct compilation that we may support).
+ //
pair<dir_path, const scope&> compile_rule::
find_modules_sidebuild (const scope& rs) const
{
@@ -6009,13 +6706,18 @@ namespace build2
return pair<dir_path, const scope&> (move (pd), *as);
}
- // Synthesize a dependency for building a module binary interface on
- // the side.
+ // Synthesize a dependency for building a module binary interface of a
+ // library on the side. If library is missing, then assume it's some
+ // ad hoc/system library case (in which case we assume it's binless,
+ // for now).
//
- const file& compile_rule::
+ // The return value semantics is as in target_set::insert_locked().
+ //
+ pair<target&, ulock> compile_rule::
make_module_sidebuild (action a,
const scope& bs,
- const file& lt,
+ const file* lt,
+ otype ot,
const target& mt,
const string& mn) const
{
@@ -6036,24 +6738,20 @@ namespace build2
back_inserter (mf),
[] (char c) {return c == '.' ? '-' : c == ':' ? '+' : c;});
- // It seems natural to build a BMI type that corresponds to the library
- // type. After all, this is where the object file part of the BMI is
- // going to come from (unless it's a module interface-only library).
- //
- const target_type& tt (compile_types (link_type (lt).type).bmi);
+ const target_type& tt (compile_types (ot).bmi);
// Store the BMI target in the subproject root. If the target already
// exists then we assume all this is already done (otherwise why would
// someone have created such a target).
//
- if (const file* bt = bs.ctx.targets.find<file> (
+ if (const target* bt = bs.ctx.targets.find (
tt,
pd,
dir_path (), // Always in the out tree.
mf,
nullopt, // Use default extension.
trace))
- return *bt;
+ return pair<target&, ulock> (const_cast<target&> (*bt), ulock ());
prerequisites ps;
ps.push_back (prerequisite (mt));
@@ -6066,19 +6764,22 @@ namespace build2
//
// Note: lt is matched and so the group is resolved.
//
- ps.push_back (prerequisite (lt));
- for (prerequisite_member p: group_prerequisite_members (a, lt))
+ if (lt != nullptr)
{
- // Ignore update=match.
- //
- lookup l;
- if (include (a, lt, p, &l) != include_type::normal) // Excluded/ad hoc.
- continue;
-
- if (p.is_a<libx> () ||
- p.is_a<liba> () || p.is_a<libs> () || p.is_a<libux> ())
+ ps.push_back (prerequisite (*lt));
+ for (prerequisite_member p: group_prerequisite_members (a, *lt))
{
- ps.push_back (p.as_prerequisite ());
+ // Ignore update=match.
+ //
+ lookup l;
+ if (include (a, *lt, p, &l) != include_type::normal) // Excluded/ad hoc.
+ continue;
+
+ if (p.is_a<libx> () ||
+ p.is_a<liba> () || p.is_a<libs> () || p.is_a<libux> ())
+ {
+ ps.push_back (p.as_prerequisite ());
+ }
}
}
@@ -6091,22 +6792,22 @@ namespace build2
target_decl::implied,
trace,
true /* skip_find */));
- file& bt (p.first.as<file> ());
// Note that this is racy and someone might have created this target
// while we were preparing the prerequisite list.
//
if (p.second)
{
- bt.prerequisites (move (ps));
+ p.first.prerequisites (move (ps));
// Unless this is a binless library, we don't need the object file
// (see config_data::b_binless for details).
//
- bt.vars.assign (b_binless) = (lt.mtime () == timestamp_unreal);
+ p.first.vars.assign (b_binless) = (lt == nullptr ||
+ lt->mtime () == timestamp_unreal);
}
- return bt;
+ return p;
}
// Synthesize a dependency for building a header unit binary interface on
@@ -6222,7 +6923,9 @@ namespace build2
//
process_libraries (a, bs, nullopt, sys_lib_dirs,
*f, la, 0, // lflags unused.
- imp, lib, nullptr, true /* self */,
+ imp, lib, nullptr,
+ true /* self */,
+ false /* proc_opt_group */,
&lib_cache);
if (lt != nullptr)
@@ -6368,7 +7071,7 @@ namespace build2
// Filter cl.exe noise (msvc.cxx).
//
void
- msvc_filter_cl (ifdstream&, const path& src);
+ msvc_filter_cl (diag_buffer&, const path& src);
// Append header unit-related options.
//
@@ -6419,7 +7122,7 @@ namespace build2
// options).
//
void compile_rule::
- append_module_options (environment& env,
+ append_module_options (environment&,
cstrings& args,
small_vector<string, 2>& stor,
action a,
@@ -6430,8 +7133,6 @@ namespace build2
unit_type ut (md.type);
const module_positions& ms (md.modules);
- dir_path stdifc; // See the VC case below.
-
switch (ctype)
{
case compiler_type::gcc:
@@ -6460,15 +7161,12 @@ namespace build2
if (ms.start == 0)
return;
- // Clang embeds module file references so we only need to specify
- // our direct imports.
- //
- // If/when we get the ability to specify the mapping in a file, we
- // will pass the whole list.
+ // If/when we get the ability to specify the mapping in a file.
//
#if 0
// In Clang the module implementation's unit .pcm is special and
- // must be "loaded".
+ // must be "loaded". Note: not anymore, not from Clang 16 and is
+ // deprecated in 17.
//
if (ut == unit_type::module_impl)
{
@@ -6485,10 +7183,7 @@ namespace build2
stor.push_back (move (s));
#else
auto& pts (t.prerequisite_targets[a]);
- for (size_t i (ms.start),
- n (ms.copied != 0 ? ms.copied : pts.size ());
- i != n;
- ++i)
+ for (size_t i (ms.start), n (pts.size ()); i != n; ++i)
{
const target* pt (pts[i]);
@@ -6501,17 +7196,9 @@ namespace build2
const file& f (pt->as<file> ());
string s (relative (f.path ()).string ());
- // In Clang the module implementation's unit .pcm is special and
- // must be "loaded".
- //
- if (ut == unit_type::module_impl && i == ms.start)
- s.insert (0, "-fmodule-file=");
- else
- {
- s.insert (0, 1, '=');
- s.insert (0, cast<string> (f.state[a].vars[c_module_name]));
- s.insert (0, "-fmodule-file=");
- }
+ s.insert (0, 1, '=');
+ s.insert (0, cast<string> (f.state[a].vars[c_module_name]));
+ s.insert (0, "-fmodule-file=");
stor.push_back (move (s));
}
@@ -6523,10 +7210,11 @@ namespace build2
if (ms.start == 0)
return;
+ // MSVC requires a transitive set of interfaces, including
+ // implementation partitions.
+ //
auto& pts (t.prerequisite_targets[a]);
- for (size_t i (ms.start), n (pts.size ());
- i != n;
- ++i)
+ for (size_t i (ms.start), n (pts.size ()); i != n; ++i)
{
const target* pt (pts[i]);
@@ -6537,34 +7225,14 @@ namespace build2
// of these are bmi's.
//
const file& f (pt->as<file> ());
+ string s (relative (f.path ()).string ());
- // In VC std.* modules can only come from a single directory
- // specified with the IFCPATH environment variable or the
- // /module:stdIfcDir option.
- //
- if (std_module (cast<string> (f.state[a].vars[c_module_name])))
- {
- dir_path d (f.path ().directory ());
+ s.insert (0, 1, '=');
+ s.insert (0, cast<string> (f.state[a].vars[c_module_name]));
- if (stdifc.empty ())
- {
- // Go one directory up since /module:stdIfcDir will look in
- // either Release or Debug subdirectories. Keeping the result
- // absolute feels right.
- //
- stor.push_back ("/module:stdIfcDir");
- stor.push_back (d.directory ().string ());
- stdifc = move (d);
- }
- else if (d != stdifc) // Absolute and normalized.
- fail << "multiple std.* modules in different directories";
- }
- else
- {
- stor.push_back ("/module:reference");
- stor.push_back (relative (f.path ()).string ());
- }
+ stor.push_back (move (s));
}
+
break;
}
case compiler_type::icc:
@@ -6575,25 +7243,11 @@ namespace build2
// into storage? Because of potential reallocations.
//
for (const string& a: stor)
- args.push_back (a.c_str ());
-
- if (getenv ("IFCPATH"))
- {
- // VC's IFCPATH takes precedence over /module:stdIfcDir so unset it if
- // we are using our own std modules. Note: IFCPATH saved in guess.cxx.
- //
- if (!stdifc.empty ())
- env.push_back ("IFCPATH");
- }
- else if (stdifc.empty ())
{
- // Add the VC's default directory (should be only one).
- //
- if (sys_mod_dirs != nullptr && !sys_mod_dirs->empty ())
- {
- args.push_back ("/module:stdIfcDir");
- args.push_back (sys_mod_dirs->front ().string ().c_str ());
- }
+ if (ctype == compiler_type::msvc)
+ args.push_back ("/reference");
+
+ args.push_back (a.c_str ());
}
}
@@ -6666,7 +7320,8 @@ namespace build2
// If we are building a module interface or partition, then the target
// is bmi*{} and it may have an ad hoc obj*{} member. For header units
// there is no obj*{} (see the corresponding add_adhoc_member() call in
- // apply()).
+ // apply()). For named modules there may be no obj*{} if this is a
+ // sidebuild (obj*{} is already in the library binary).
//
path relm;
path relo;
@@ -6714,9 +7369,6 @@ namespace build2
small_vector<string, 2> header_args; // Header unit options storage.
small_vector<string, 2> module_args; // Module options storage.
- size_t out_i (0); // Index of the -o option.
- size_t lang_n (0); // Number of lang options.
-
switch (cclass)
{
case compiler_class::msvc:
@@ -6736,14 +7388,20 @@ namespace build2
if (md.pp != preprocessed::all)
append_sys_hdr_options (args); // Extra system header dirs (last).
+ // Note: could be overridden in mode.
+ //
+ append_diag_color_options (args);
+
// Set source/execution charsets to UTF-8 unless a custom charset
// is specified.
//
// Note that clang-cl supports /utf-8 and /*-charset.
//
{
- bool sc (find_option_prefix ("/source-charset:", args));
- bool ec (find_option_prefix ("/execution-charset:", args));
+ bool sc (find_option_prefixes (
+ {"/source-charset:", "-source-charset:"}, args));
+ bool ec (find_option_prefixes (
+ {"/execution-charset:", "-execution-charset:"}, args));
if (!sc && !ec)
args.push_back ("/utf-8");
@@ -6762,8 +7420,8 @@ namespace build2
//
if (cvariant != "clang" && isystem (*this))
{
- if (find_option_prefix ("/external:I", args) &&
- !find_option_prefix ("/external:W", args))
+ if (find_option_prefixes ({"/external:I", "-external:I"}, args) &&
+ !find_option_prefixes ({"/external:W", "-external:W"}, args))
args.push_back ("/external:W0");
}
@@ -6777,7 +7435,9 @@ namespace build2
// For C looks like no /EH* (exceptions supported but no C++ objects
// destroyed) is a reasonable default.
//
- if (x_lang == lang::cxx && !find_option_prefix ("/EH", args))
+
+ if (x_lang == lang::cxx &&
+ !find_option_prefixes ({"/EH", "-EH"}, args))
args.push_back ("/EHsc");
// The runtime is a bit more interesting. At first it may seem like
@@ -6799,7 +7459,7 @@ namespace build2
// unreasonable thing to do). So by default we will always use the
// release runtime.
//
- if (!find_option_prefixes ({"/MD", "/MT"}, args))
+ if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args))
args.push_back ("/MD");
msvc_sanitize_cl (args);
@@ -6822,9 +7482,8 @@ namespace build2
// Note also that what we are doing here appears to be incompatible
// with PCH (/Y* options) and /Gm (minimal rebuild).
//
- // @@ MOD: TODO deal with absent relo.
- //
- if (find_options ({"/Zi", "/ZI"}, args))
+ if (!relo.empty () &&
+ find_options ({"/Zi", "/ZI", "-Zi", "-ZI"}, args))
{
if (fc)
args.push_back ("/Fd:");
@@ -6837,27 +7496,38 @@ namespace build2
args.push_back (out1.c_str ());
}
- if (fc)
- {
- args.push_back ("/Fo:");
- args.push_back (relo.string ().c_str ());
- }
- else
+ if (ut == unit_type::module_intf ||
+ ut == unit_type::module_intf_part ||
+ ut == unit_type::module_impl_part ||
+ ut == unit_type::module_header)
{
- out = "/Fo" + relo.string ();
- args.push_back (out.c_str ());
- }
+ assert (ut != unit_type::module_header); // @@ MODHDR
- // @@ MODHDR MSVC
- // @@ MODPART MSVC
- //
- if (ut == unit_type::module_intf)
- {
relm = relative (tp);
- args.push_back ("/module:interface");
- args.push_back ("/module:output");
+ args.push_back ("/ifcOutput");
args.push_back (relm.string ().c_str ());
+
+ if (relo.empty ())
+ args.push_back ("/ifcOnly");
+ else
+ {
+ args.push_back ("/Fo:");
+ args.push_back (relo.string ().c_str ());
+ }
+ }
+ else
+ {
+ if (fc)
+ {
+ args.push_back ("/Fo:");
+ args.push_back (relo.string ().c_str ());
+ }
+ else
+ {
+ out = "/Fo" + relo.string ();
+ args.push_back (out.c_str ());
+ }
}
// Note: no way to indicate that the source if already preprocessed.
@@ -6872,9 +7542,53 @@ namespace build2
{
append_options (args, cmode);
+ // Clang 15 introduced the unqualified-std-cast-call warning which
+ // warns about unqualified calls to std::move() and std::forward()
+ // (because they can be "hijacked" via ADL). Surprisingly, this
+ // warning is enabled by default, as opposed to with -Wextra or at
+ // least -Wall. It has also proven to be quite disruptive, causing a
+ // large number of warnings in a large number of packages. So we are
+ // going to "remap" it to -Wextra for now and in the future may
+ // "relax" it to -Wall and potentially to being enabled by default.
+ // See GitHub issue #259 for background and details.
+ //
+ if (x_lang == lang::cxx &&
+ ctype == compiler_type::clang &&
+ cmaj >= 15)
+ {
+ bool w (false); // Seen -W[no-]unqualified-std-cast-call
+ optional<bool> extra; // Seen -W[no-]extra
+
+ for (const char* s: reverse_iterate (args))
+ {
+ if (s != nullptr)
+ {
+ if (strcmp (s, "-Wunqualified-std-cast-call") == 0 ||
+ strcmp (s, "-Wno-unqualified-std-cast-call") == 0)
+ {
+ w = true;
+ break;
+ }
+
+ if (!extra) // Last seen option wins.
+ {
+ if (strcmp (s, "-Wextra") == 0) extra = true;
+ else if (strcmp (s, "-Wno-extra") == 0) extra = false;
+ }
+ }
+ }
+
+ if (!w && (!extra || !*extra))
+ args.push_back ("-Wno-unqualified-std-cast-call");
+ }
+
if (md.pp != preprocessed::all)
append_sys_hdr_options (args); // Extra system header dirs (last).
+ // Note: could be overridden in mode.
+ //
+ append_diag_color_options (args);
+
// Set the input charset to UTF-8 unless a custom one is specified.
//
// Note that the execution charset (-fexec-charset) is UTF-8 by
@@ -6928,8 +7642,7 @@ namespace build2
// either -nostdlib or -nostartfiles is specified. Let's do
// the same.
//
- initializer_list<const char*> os {"-nostdlib", "-nostartfiles"};
- if (!find_options (os, cmode) && !find_options (os, args))
+ if (!find_options ({"-nostdlib", "-nostartfiles"}, args))
{
args.push_back ("-D_MT");
args.push_back ("-D_DLL");
@@ -6991,10 +7704,6 @@ namespace build2
append_header_options (env, args, header_args, a, t, md, md.dd);
append_module_options (env, args, module_args, a, t, md, md.dd);
- // Note: the order of the following options is relied upon below.
- //
- out_i = args.size (); // Index of the -o option.
-
if (ut == unit_type::module_intf ||
ut == unit_type::module_intf_part ||
ut == unit_type::module_impl_part ||
@@ -7033,21 +7742,35 @@ namespace build2
}
case compiler_type::clang:
{
- // @@ MOD TODO: deal with absent relo.
+ assert (ut != unit_type::module_header); // @@ MODHDR
relm = relative (tp);
- args.push_back ("-o");
- args.push_back (relm.string ().c_str ());
- args.push_back ("--precompile");
-
// Without this option Clang's .pcm will reference source
- // files. In our case this file may be transient (.ii). Plus,
+ // files. In our case this file may be transient (.ii). Plus,
// it won't play nice with distributed compilation.
//
+ // Note that this sort of appears to be the default from Clang
+ // 17, but not quite, see llvm-project issued #72383.
+ //
args.push_back ("-Xclang");
args.push_back ("-fmodules-embed-all-files");
+ if (relo.empty ())
+ {
+ args.push_back ("-o");
+ args.push_back (relm.string ().c_str ());
+ args.push_back ("--precompile");
+ }
+ else
+ {
+ out1 = "-fmodule-output=" + relm.string ();
+ args.push_back (out1.c_str ());
+ args.push_back ("-o");
+ args.push_back (relo.string ().c_str ());
+ args.push_back ("-c");
+ }
+
break;
}
case compiler_type::msvc:
@@ -7062,7 +7785,7 @@ namespace build2
args.push_back ("-c");
}
- lang_n = append_lang_options (args, md);
+ append_lang_options (args, md);
if (md.pp == preprocessed::all)
{
@@ -7107,23 +7830,44 @@ namespace build2
if (!env.empty ())
env.push_back (nullptr);
+ // We have no choice but to serialize early if we want the command line
+ // printed shortly before actually executing the compiler. Failed that,
+ // it may look like we are still executing in parallel.
+ //
+ scheduler::alloc_guard jobs_ag;
+ if (!ctx.dry_run && cast_false<bool> (t[c_serialize]))
+ jobs_ag = scheduler::alloc_guard (*ctx.sched, phase_unlock (nullptr));
+
// With verbosity level 2 print the command line as if we are compiling
// the source file, not its preprocessed version (so that it's easy to
// copy and re-run, etc). Only at level 3 and above print the real deal.
//
+ // @@ TODO: why don't we print env (here and/or below)? Also link rule.
+ //
if (verb == 1)
- text << x_name << ' ' << s;
+ {
+ const char* name (x_assembler_cpp (s) ? "as-cpp" :
+ x_objective (s) ? x_obj_name :
+ x_name);
+
+ print_diag (name, s, t);
+ }
else if (verb == 2)
print_process (args);
// If we have the (partially) preprocessed output, switch to that.
//
- bool psrc (md.psrc);
+ // But we remember the original source/position to restore later.
+ //
+ bool psrc (md.psrc); // Note: false if cc.reprocess.
bool ptmp (psrc && md.psrc.temporary);
+ pair<size_t, const char*> osrc;
if (psrc)
{
args.pop_back (); // nullptr
+ osrc.second = args.back ();
args.pop_back (); // sp
+ osrc.first = args.size ();
sp = &md.psrc.path ();
@@ -7133,25 +7877,40 @@ namespace build2
{
case compiler_type::gcc:
{
- // The -fpreprocessed is implied by .i/.ii. But not when compiling
- // a header unit (there is no .hi/.hii).
+ // -fpreprocessed is implied by .i/.ii unless compiling a header
+ // unit (there is no .hi/.hii). Also, we would need to pop -x
+ // since it takes precedence over the extension, which would mess
+ // up our osrc logic. So in the end it feels like always passing
+ // explicit -fpreprocessed is the way to go.
//
- if (ut == unit_type::module_header)
- args.push_back ("-fpreprocessed");
- else
- // Pop -x since it takes precedence over the extension.
- //
- // @@ I wonder why bother and not just add -fpreprocessed? Are
- // we trying to save an option or does something break?
- //
- for (; lang_n != 0; --lang_n)
- args.pop_back ();
-
+ // Also note that similarly there is no .Si for .S files.
+ //
+ args.push_back ("-fpreprocessed");
args.push_back ("-fdirectives-only");
break;
}
case compiler_type::clang:
{
+ // Clang 15 and later with -pedantic warns about GNU-style line
+ // markers that it wrote itself in the -frewrite-includes output
+ // (llvm-project issue 63284). So we suppress this warning unless
+ // compiling from source.
+ //
+ // In Apple Clang this warning/option are absent in 14.0.3 (which
+ // is said to be based on vanilla Clang 15.0.5) for some reason
+ // (let's hope it's because they patched it out rather than due to
+ // a misleading _LIBCPP_VERSION value).
+ //
+ if (ctype == compiler_type::clang &&
+ cmaj >= (cvariant != "apple" ? 15 : 16))
+ {
+ if (find_options ({"-pedantic", "-pedantic-errors",
+ "-Wpedantic", "-Werror=pedantic"}, args))
+ {
+ args.push_back ("-Wno-gnu-line-marker");
+ }
+ }
+
// Note that without -x Clang will treat .i/.ii as fully
// preprocessed.
//
@@ -7200,45 +7959,38 @@ namespace build2
file_cache::read psrcr (psrc ? md.psrc.open () : file_cache::read ());
// VC cl.exe sends diagnostics to stdout. It also prints the file
- // name being compiled as the first line. So for cl.exe we redirect
- // stdout to a pipe, filter that noise out, and send the rest to
- // stderr.
+ // name being compiled as the first line. So for cl.exe we filter
+ // that noise out.
//
- // For other compilers redirect stdout to stderr, in case any of
- // them tries to pull off something similar. For sane compilers this
- // should be harmless.
+ // For other compilers also redirect stdout to stderr, in case any
+ // of them tries to pull off something similar. For sane compilers
+ // this should be harmless.
//
bool filter (ctype == compiler_type::msvc);
process pr (cpath,
- args.data (),
- 0, (filter ? -1 : 2), 2,
+ args,
+ 0, 2, diag_buffer::pipe (ctx, filter /* force */),
nullptr, // CWD
env.empty () ? nullptr : env.data ());
- if (filter)
- {
- try
- {
- ifdstream is (
- move (pr.in_ofd), fdstream_mode::text, ifdstream::badbit);
+ diag_buffer dbuf (ctx, args[0], pr);
- msvc_filter_cl (is, *sp);
+ if (filter)
+ msvc_filter_cl (dbuf, *sp);
- // If anything remains in the stream, send it all to stderr.
- // Note that the eof check is important: if the stream is at
- // eof, this and all subsequent writes to the diagnostics stream
- // will fail (and you won't see a thing).
- //
- if (is.peek () != ifdstream::traits_type::eof ())
- diag_stream_lock () << is.rdbuf ();
+ dbuf.read ();
- is.close ();
- }
- catch (const io_error&) {} // Assume exits with error.
+ // Restore the original source if we switched to preprocessed.
+ //
+ if (psrc)
+ {
+ args.resize (osrc.first);
+ args.push_back (osrc.second);
+ args.push_back (nullptr);
}
- run_finish (args, pr);
+ run_finish (dbuf, args, pr, 1 /* verbosity */);
}
catch (const process_error& e)
{
@@ -7250,6 +8002,8 @@ namespace build2
throw failed ();
}
+ jobs_ag.deallocate ();
+
if (md.deferred_failure)
fail << "expected error exit status from " << x_lang << " compiler";
}
@@ -7259,57 +8013,6 @@ namespace build2
if (ptmp && verb >= 3)
md.psrc.temporary = true;
- // Clang's module compilation requires two separate compiler
- // invocations.
- //
- // @@ MODPART: Clang (all of this is probably outdated).
- //
- if (ctype == compiler_type::clang && ut == unit_type::module_intf)
- {
- // Adjust the command line. First discard everything after -o then
- // build the new "tail".
- //
- args.resize (out_i + 1);
- args.push_back (relo.string ().c_str ()); // Produce .o.
- args.push_back ("-c"); // By compiling .pcm.
- args.push_back ("-Wno-unused-command-line-argument");
- args.push_back (relm.string ().c_str ());
- args.push_back (nullptr);
-
- if (verb >= 2)
- print_process (args);
-
- if (!ctx.dry_run)
- {
- // Remove the target file if this fails. If we don't do that, we
- // will end up with a broken build that is up-to-date.
- //
- auto_rmfile rm (relm);
-
- try
- {
- process pr (cpath,
- args.data (),
- 0, 2, 2,
- nullptr, // CWD
- env.empty () ? nullptr : env.data ());
-
- run_finish (args, pr);
- }
- catch (const process_error& e)
- {
- error << "unable to execute " << args[0] << ": " << e;
-
- if (e.child)
- exit (1);
-
- throw failed ();
- }
-
- rm.cancel ();
- }
- }
-
timestamp now (system_clock::now ());
if (!ctx.dry_run)
@@ -7325,25 +8028,27 @@ namespace build2
}
target_state compile_rule::
- perform_clean (action a, const target& xt) const
+ perform_clean (action a, const target& xt, const target_type& srct) const
{
const file& t (xt.as<file> ());
+ // Preprocessed file extension.
+ //
+ const char* pext (x_assembler_cpp (srct) ? ".Si" :
+ x_objective (srct) ? x_obj_pext :
+ x_pext);
+
// Compressed preprocessed file extension.
//
- auto cpext = [this, &t, s = string ()] () mutable -> const char*
- {
- return (s = t.ctx.fcache.compressed_extension (x_pext)).c_str ();
- };
+ string cpext (t.ctx.fcache->compressed_extension (pext));
clean_extras extras;
-
switch (ctype)
{
- case compiler_type::gcc: extras = {".d", x_pext, cpext (), ".t"}; break;
- case compiler_type::clang: extras = {".d", x_pext, cpext ()}; break;
- case compiler_type::msvc: extras = {".d", x_pext, cpext (), ".idb", ".pdb"};break;
- case compiler_type::icc: extras = {".d"}; break;
+ case compiler_type::gcc: extras = {".d", pext, cpext.c_str (), ".t"}; break;
+ case compiler_type::clang: extras = {".d", pext, cpext.c_str ()}; break;
+ case compiler_type::msvc: extras = {".d", pext, cpext.c_str (), ".idb", ".pdb"}; break;
+ case compiler_type::icc: extras = {".d"}; break;
}
return perform_clean_extra (a, t, extras);
diff --git a/libbuild2/cc/compile-rule.hxx b/libbuild2/cc/compile-rule.hxx
index 49d33eb..0886b4b 100644
--- a/libbuild2/cc/compile-rule.hxx
+++ b/libbuild2/cc/compile-rule.hxx
@@ -58,7 +58,7 @@ namespace build2
perform_update (action, const target&, match_data&) const;
target_state
- perform_clean (action, const target&) const;
+ perform_clean (action, const target&, const target_type&) const;
public:
using appended_libraries = small_vector<const target*, 256>;
@@ -66,7 +66,8 @@ namespace build2
void
append_library_options (appended_libraries&, strings&,
const scope&,
- action, const file&, bool, linfo) const;
+ action, const file&, bool, linfo,
+ bool, bool) const;
optional<path>
find_system_header (const path&) const;
@@ -87,7 +88,7 @@ namespace build2
append_library_options (appended_libraries&, T&,
const scope&,
const scope*,
- action, const file&, bool, linfo,
+ action, const file&, bool, linfo, bool,
library_cache*) const;
template <typename T>
@@ -112,12 +113,12 @@ namespace build2
prefix_map
build_prefix_map (const scope&, action, const target&, linfo) const;
- struct module_mapper_state;
+ struct gcc_module_mapper_state;
- bool
- gcc_module_mapper (module_mapper_state&,
+ optional<bool>
+ gcc_module_mapper (gcc_module_mapper_state&,
action, const scope&, file&, linfo,
- ifdstream&, ofdstream&,
+ const string&, ofdstream&,
depdb&, bool&, bool&,
optional<prefix_map>&, srcout_map&) const;
@@ -129,10 +130,11 @@ namespace build2
optional<bool>
inject_header (action, file&, const file&, timestamp, bool) const;
- pair<file_cache::entry, bool>
+ void
extract_headers (action, const scope&, file&, linfo,
const file&, match_data&,
- depdb&, bool&, timestamp, module_imports&) const;
+ depdb&, bool&, timestamp, module_imports&,
+ pair<file_cache::entry, bool>&) const;
string
parse_unit (action, file&, linfo,
@@ -154,8 +156,9 @@ namespace build2
pair<dir_path, const scope&>
find_modules_sidebuild (const scope&) const;
- const file&
- make_module_sidebuild (action, const scope&, const file&,
+ pair<target&, ulock>
+ make_module_sidebuild (action, const scope&,
+ const file*, otype,
const target&, const string&) const;
const file&
diff --git a/libbuild2/cc/functions.cxx b/libbuild2/cc/functions.cxx
index 9b7a3d9..9d408af 100644
--- a/libbuild2/cc/functions.cxx
+++ b/libbuild2/cc/functions.cxx
@@ -52,7 +52,7 @@ namespace build2
//
if (bs->ctx.phase != run_phase::match &&
bs->ctx.phase != run_phase::execute)
- fail << f.name << " can only be called during execution";
+ fail << f.name << " can only be called from recipe";
const module* m (rs->find_module<module> (d.x));
@@ -61,6 +61,9 @@ namespace build2
// We can assume these are present due to function's types signature.
//
+ if (vs[0].null)
+ throw invalid_argument ("null value");
+
names& ts_ns (vs[0].as<names> ()); // <targets>
// In a somewhat hackish way strip the outer operation to match how we
@@ -85,14 +88,29 @@ namespace build2
return value (move (r));
}
- // Common thunk for $x.lib_*(<targets>, <otype> [, ...]) functions.
+ // Common thunk for $x.lib_*(...) functions.
+ //
+ // The two supported function signatures are:
+ //
+ // $x.lib_*(<targets>, <otype> [, ...]])
+ //
+ // $x.lib_*(<targets>)
+ //
+ // For the first signature, the passed targets cannot be library groups
+ // (so they are always file-based) and linfo is always present.
+ //
+ // For the second signature, targets can only be utility libraries
+ // (including the libul{} group).
+ //
+ // If <otype> in the first signature is NULL, then it is treated as
+ // the second signature.
//
struct lib_thunk_data
{
const char* x;
void (*f) (void*, strings&,
const vector_view<value>&, const module&, const scope&,
- action, const file&, bool, linfo);
+ action, const target&, bool, optional<linfo>);
};
static value
@@ -113,20 +131,25 @@ namespace build2
if (bs->ctx.phase != run_phase::match && // See above.
bs->ctx.phase != run_phase::execute)
- fail << f.name << " can only be called during execution";
+ fail << f.name << " can only be called from recipe";
const module* m (rs->find_module<module> (d.x));
if (m == nullptr)
fail << f.name << " called without " << d.x << " module loaded";
- // We can assume these are present due to function's types signature.
+ // We can assume this is present due to function's types signature.
//
+ if (vs[0].null)
+ throw invalid_argument ("null value");
+
names& ts_ns (vs[0].as<names> ()); // <targets>
- names& ot_ns (vs[1].as<names> ()); // <otype>
- linfo li;
+ optional<linfo> li;
+ if (vs.size () > 1 && !vs[1].null)
{
+ names& ot_ns (vs[1].as<names> ()); // <otype>
+
string t (convert<string> (move (ot_ns)));
const target_type* tt (bs->find_target_type (t));
@@ -172,21 +195,22 @@ namespace build2
name& n (*i), o;
const target& t (to_target (*bs, move (n), move (n.pair ? *++i : o)));
- const file* f;
bool la (false);
-
- if ((la = (f = t.is_a<libux> ())) ||
- (la = (f = t.is_a<liba> ())) ||
- ( (f = t.is_a<libs> ())))
+ if (li
+ ? ((la = t.is_a<libux> ()) ||
+ (la = t.is_a<liba> ()) ||
+ ( t.is_a<libs> ()))
+ : ((la = t.is_a<libux> ()) ||
+ ( t.is_a<libul> ())))
{
if (!t.matched (a))
fail << t << " is not matched" <<
info << "make sure this target is listed as prerequisite";
- d.f (ls, r, vs, *m, *bs, a, *f, la, li);
+ d.f (ls, r, vs, *m, *bs, a, t, la, li);
}
else
- fail << t << " is not a library target";
+ fail << t << " is not a library of expected type";
}
return value (move (r));
@@ -213,12 +237,24 @@ namespace build2
void compile_rule::
functions (function_family& f, const char* x)
{
- // $<module>.lib_poptions(<lib-targets>, <otype>)
+ // $<module>.lib_poptions(<lib-targets>[, <otype>[, <original>]])
//
// Return the preprocessor options that should be passed when compiling
// sources that depend on the specified libraries. The second argument
// is the output target type (obje, objs, etc).
//
+ // The output target type may be omitted for utility libraries (libul{}
+ // or libu[eas]{}). In this case, only "common interface" options will
+ // be returned for lib{} dependencies. This is primarily useful for
+ // obtaining poptions to be passed to tools other than C/C++ compilers
+ // (for example, Qt moc).
+ //
+ // If <original> is true, then return the original -I options without
+ // performing any translation (for example, to -isystem or /external:I).
+ // This is the default if <otype> is omitted. To get the translation for
+ // the common interface options, pass [null] for <otype> and true for
+ // <original>.
+ //
// Note that passing multiple targets at once is not a mere convenience:
// this also allows for more effective duplicate suppression.
//
@@ -230,17 +266,32 @@ namespace build2
// Note that this function is not pure.
//
f.insert (".lib_poptions", false).
- insert<lib_thunk_data, names, names> (
+ insert<lib_thunk_data, names, optional<names*>, optional<names>> (
&lib_thunk<appended_libraries>,
lib_thunk_data {
x,
[] (void* ls, strings& r,
- const vector_view<value>&, const module& m, const scope& bs,
- action a, const file& l, bool la, linfo li)
+ const vector_view<value>& vs, const module& m, const scope& bs,
+ action a, const target& l, bool la, optional<linfo> li)
{
+ // If this is libul{}, get the matched member (see bin::libul_rule
+ // for details).
+ //
+ const file& f (
+ la || li
+ ? l.as<file> ()
+ : (la = true,
+ l.prerequisite_targets[a].back ().target->as<file> ()));
+
+ bool common (!li);
+ bool original (vs.size () > 2 ? convert<bool> (vs[2]) : !li);
+
+ if (!li)
+ li = link_info (bs, link_type (f).type);
+
m.append_library_options (
*static_cast<appended_libraries*> (ls), r,
- bs, a, l, la, li);
+ bs, a, f, la, *li, common, original);
}});
// $<module>.find_system_header(<name>)
@@ -318,12 +369,15 @@ namespace build2
x,
[] (void* ls, strings& r,
const vector_view<value>& vs, const module& m, const scope& bs,
- action a, const file& l, bool la, linfo li)
+ action a, const target& l, bool la, optional<linfo> li)
{
lflags lf (0);
bool rel (true);
if (vs.size () > 2)
{
+ if (vs[2].null)
+ throw invalid_argument ("null value");
+
for (const name& f: vs[2].as<names> ())
{
string s (convert<string> (name (f)));
@@ -342,7 +396,8 @@ namespace build2
m.append_libraries (
*static_cast<appended_libraries*> (ls), r,
nullptr /* sha256 */, nullptr /* update */, timestamp_unknown,
- bs, a, l, la, lf, li, nullopt /* for_install */, self, rel);
+ bs, a, l.as<file> (), la, lf, *li,
+ nullopt /* for_install */, self, rel);
}});
// $<module>.lib_rpaths(<lib-targets>, <otype> [, <link> [, <self>]])
@@ -374,13 +429,12 @@ namespace build2
x,
[] (void* ls, strings& r,
const vector_view<value>& vs, const module& m, const scope& bs,
- action a, const file& l, bool la, linfo li)
+ action a, const target& l, bool la, optional<linfo> li)
{
bool link (vs.size () > 2 ? convert<bool> (vs[2]) : false);
bool self (vs.size () > 3 ? convert<bool> (vs[3]) : true);
m.rpath_libraries (*static_cast<rpathed_libraries*> (ls), r,
- bs,
- a, l, la, li, link, self);
+ bs, a, l.as<file> (), la, *li, link, self);
}});
// $cxx.obj_modules(<obj-targets>)
@@ -475,6 +529,9 @@ namespace build2
// We can assume the argument is present due to function's types
// signature.
//
+ if (vs[0].null)
+ throw invalid_argument ("null value");
+
names& r (vs[0].as<names> ());
m->deduplicate_export_libs (*bs,
vector<name> (r.begin (), r.end ()),
diff --git a/libbuild2/cc/gcc.cxx b/libbuild2/cc/gcc.cxx
index 2c844b6..286ba10 100644
--- a/libbuild2/cc/gcc.cxx
+++ b/libbuild2/cc/gcc.cxx
@@ -45,6 +45,13 @@ namespace build2
d = dir_path (o, 2, string::npos);
else
continue;
+
+ // Ignore relative paths. Or maybe we should warn?
+ //
+ if (d.relative ())
+ continue;
+
+ d.normalize ();
}
catch (const invalid_path& e)
{
@@ -52,10 +59,7 @@ namespace build2
<< o << "'";
}
- // Ignore relative paths. Or maybe we should warn?
- //
- if (!d.relative ())
- r.push_back (move (d));
+ r.push_back (move (d));
}
}
@@ -78,6 +82,71 @@ namespace build2
}
#endif
+ // Parse color/semicolon-separated list of search directories (from
+ // -print-search-dirs output, environment variables).
+ //
+ static void
+ parse_search_dirs (const string& v, dir_paths& r,
+ const char* what, const char* what2 = "")
+ {
+ // Now the fun part: figuring out which delimiter is used. Normally it
+ // is ':' but on Windows it is ';' (or can be; who knows for sure). Also
+ // note that these paths are absolute (or should be). So here is what we
+ // are going to do: first look for ';'. If found, then that's the
+ // delimiter. If not found, then there are two cases: it is either a
+ // single Windows path or the delimiter is ':'. To distinguish these two
+ // cases we check if the path starts with a Windows drive.
+ //
+ char d (';');
+ string::size_type e (v.find (d));
+
+ if (e == string::npos &&
+ (v.size () < 2 || v[0] == '/' || v[1] != ':'))
+ {
+ d = ':';
+ e = v.find (d);
+ }
+
+ // Now chop it up. We already have the position of the first delimiter
+ // (if any).
+ //
+ for (string::size_type b (0);; e = v.find (d, (b = e + 1)))
+ {
+ dir_path d;
+ try
+ {
+ string ds (v, b, (e != string::npos ? e - b : e));
+
+ // Skip empty entries (sometimes found in random MinGW toolchains).
+ //
+ if (!ds.empty ())
+ {
+#ifdef _WIN32
+ if (path_traits::is_separator (ds[0]))
+ add_current_drive (ds);
+#endif
+ d = dir_path (move (ds));
+
+ if (d.relative ())
+ throw invalid_path (move (d).string ());
+
+ d.normalize ();
+ }
+ }
+ catch (const invalid_path& e)
+ {
+ fail << "invalid directory '" << e.path << "'" << " in "
+ << what << what2;
+ }
+
+ if (!d.empty () && find (r.begin (), r.end (), d) == r.end ())
+ r.push_back (move (d));
+
+ if (e == string::npos)
+ break;
+ }
+ }
+
// Extract system header search paths from GCC (gcc/g++) or compatible
// (Clang, Intel) using the `-v -E </dev/null` method.
//
@@ -88,14 +157,15 @@ namespace build2
// do this is to run the compiler twice.
//
pair<dir_paths, size_t> config_module::
- gcc_header_search_dirs (const process_path& xc, scope& rs) const
+ gcc_header_search_dirs (const compiler_info& xi, scope& rs) const
{
dir_paths r;
// Note also that any -I and similar that we may specify on the command
- // line are factored into the output.
+ // line are factored into the output. As well as the CPATH, etc.,
+ // environment variable values.
//
- cstrings args {xc.recall_string ()};
+ cstrings args {xi.path.recall_string ()};
append_options (args, rs, x_mode);
// Compile as.
@@ -119,7 +189,7 @@ namespace build2
args.push_back ("-");
args.push_back (nullptr);
- process_env env (xc);
+ process_env env (xi.path);
// For now let's assume that all the platforms other than Windows
// recognize LC_ALL.
@@ -132,119 +202,109 @@ namespace build2
if (verb >= 3)
print_process (env, args);
+ bool found_q (false); // Found `#include "..." ...` marker.
+ bool found_b (false); // Found `#include <...> ...` marker.
+
+ // Open pipe to stderr, redirect stdin and stdout to /dev/null.
+ //
+ process pr (run_start (
+ env,
+ args,
+ -2, /* stdin */
+ -2, /* stdout */
+ -1 /* stderr */));
try
{
- //@@ TODO: why don't we use run_start() here? Because it's unable to
- // open pipe for stderr and we need to change it first, for example,
- // making the err parameter a file descriptor rather than a flag.
- //
+ ifdstream is (
+ move (pr.in_efd), fdstream_mode::skip, ifdstream::badbit);
- // Open pipe to stderr, redirect stdin and stdout to /dev/null.
+ // Normally the system header paths appear between the following
+ // lines:
//
- process pr (xc,
- args.data (),
- -2, /* stdin */
- -2, /* stdout */
- -1, /* stderr */
- nullptr /* cwd */,
- env.vars);
-
- try
+ // #include <...> search starts here:
+ // End of search list.
+ //
+ // The exact text depends on the current locale. What we can rely on
+ // is the presence of the "#include <...>" marker in the "opening"
+ // line and the fact that the paths are indented with a single space
+ // character, unlike the "closing" line.
+ //
+ // Note that on Mac OS we will also see some framework paths among
+ // system header paths, followed with a comment. For example:
+ //
+ // /Library/Frameworks (framework directory)
+ //
+ // For now we ignore framework paths and to filter them out we will
+ // only consider valid paths to existing directories, skipping those
+ // which we fail to normalize or stat. @@ Maybe this is a bit too
+ // loose, especially compared to gcc_library_search_dirs()?
+ //
+ // Note that when there are no paths (e.g., because of -nostdinc),
+ // then GCC prints both #include markers while Clang -- only "...".
+ //
+ for (string s; getline (is, s); )
{
- ifdstream is (
- move (pr.in_efd), fdstream_mode::skip, ifdstream::badbit);
-
- // Normally the system header paths appear between the following
- // lines:
- //
- // #include <...> search starts here:
- // End of search list.
- //
- // The exact text depends on the current locale. What we can rely on
- // is the presence of the "#include <...>" substring in the
- // "opening" line and the fact that the paths are indented with a
- // single space character, unlike the "closing" line.
- //
- // Note that on Mac OS we will also see some framework paths among
- // system header paths, followed with a comment. For example:
- //
- // /Library/Frameworks (framework directory)
- //
- // For now we ignore framework paths and to filter them out we will
- // only consider valid paths to existing directories, skipping those
- // which we fail to normalize or stat. @@ Maybe this is a bit too
- // loose, especially compared to gcc_library_search_dirs()?
- //
- string s;
- for (bool found (false); getline (is, s); )
+ if (!found_q)
+ found_q = s.find ("#include \"...\"") != string::npos;
+ else if (!found_b)
+ found_b = s.find ("#include <...>") != string::npos;
+ else
{
- if (!found)
- found = s.find ("#include <...>") != string::npos;
- else
- {
- if (s[0] != ' ')
- break;
+ if (s[0] != ' ')
+ break;
- dir_path d;
- try
- {
- string ds (s, 1, s.size () - 1);
+ dir_path d;
+ try
+ {
+ string ds (s, 1, s.size () - 1);
#ifdef _WIN32
- if (path_traits::is_separator (ds[0]))
- add_current_drive (ds);
+ if (path_traits::is_separator (ds[0]))
+ add_current_drive (ds);
#endif
- d = dir_path (move (ds));
+ d = dir_path (move (ds));
- if (d.relative () || !exists (d, true))
- continue;
-
- d.normalize ();
- }
- catch (const invalid_path&)
- {
+ if (d.relative () || !exists (d, true))
continue;
- }
- if (find (r.begin (), r.end (), d) == r.end ())
- r.emplace_back (move (d));
+ d.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ continue;
}
+
+ if (find (r.begin (), r.end (), d) == r.end ())
+ r.emplace_back (move (d));
}
+ }
- is.close (); // Don't block.
+ is.close (); // Don't block.
- if (!pr.wait ())
- {
- // We have read stderr so better print some diagnostics.
- //
- diag_record dr (fail);
+ if (!run_wait (args, pr))
+ {
+ // We have read stderr so better print some diagnostics.
+ //
+ diag_record dr (fail);
- dr << "failed to extract " << x_lang << " header search paths" <<
- info << "command line: ";
+ dr << "failed to extract " << x_lang << " header search paths" <<
+ info << "command line: ";
- print_process (dr, args);
- }
- }
- catch (const io_error&)
- {
- pr.wait ();
- fail << "error reading " << x_lang << " compiler -v -E output";
+ print_process (dr, args);
}
}
- catch (const process_error& e)
+ catch (const io_error&)
{
- error << "unable to execute " << args[0] << ": " << e;
-
- if (e.child)
- exit (1);
-
- throw failed ();
+ run_wait (args, pr);
+ fail << "error reading " << x_lang << " compiler -v -E output";
}
- // It's highly unlikely not to have any system directories. More likely
- // we misinterpreted the compiler output.
+ // Note that it's possible that we will have no system directories, for
+ // example, if the user specified -nostdinc. But we must have still seen
+ // at least one marker. Failed that we assume we misinterpreted the
+ // compiler output.
//
- if (r.empty ())
+ if (!found_b && !found_q)
fail << "unable to extract " << x_lang << " compiler system header "
<< "search paths";
@@ -255,7 +315,7 @@ namespace build2
// (Clang, Intel) using the -print-search-dirs option.
//
pair<dir_paths, size_t> config_module::
- gcc_library_search_dirs (const process_path& xc, scope& rs) const
+ gcc_library_search_dirs (const compiler_info& xi, scope& rs) const
{
// The output of -print-search-dirs are a bunch of lines that start with
// "<name>: =" where name can be "install", "programs", or "libraries".
@@ -282,12 +342,12 @@ namespace build2
gcc_extract_library_search_dirs (cast<strings> (rs[x_mode]), r);
size_t rn (r.size ());
- cstrings args {xc.recall_string ()};
+ cstrings args {xi.path.recall_string ()};
append_options (args, rs, x_mode);
args.push_back ("-print-search-dirs");
args.push_back (nullptr);
- process_env env (xc);
+ process_env env (xi.path);
// For now let's assume that all the platforms other than Windows
// recognize LC_ALL.
@@ -302,6 +362,9 @@ namespace build2
// Open pipe to stdout.
//
+ // Note: this function is called in the serial load phase and so no
+ // diagnostics buffering is needed.
+ //
process pr (run_start (env,
args,
0, /* stdin */
@@ -336,63 +399,22 @@ namespace build2
// by that and let run_finish() deal with it.
}
- run_finish (args, pr);
+ run_finish (args, pr, 2 /* verbosity */);
if (l.empty ())
fail << "unable to extract " << x_lang << " compiler system library "
<< "search paths";
- // Now the fun part: figuring out which delimiter is used. Normally it
- // is ':' but on Windows it is ';' (or can be; who knows for sure). Also
- // note that these paths are absolute (or should be). So here is what we
- // are going to do: first look for ';'. If found, then that's the
- // delimiter. If not found, then there are two cases: it is either a
- // single Windows path or the delimiter is ':'. To distinguish these two
- // cases we check if the path starts with a Windows drive.
- //
- char d (';');
- string::size_type e (l.find (d));
-
- if (e == string::npos &&
- (l.size () < 2 || l[0] == '/' || l[1] != ':'))
- {
- d = ':';
- e = l.find (d);
- }
+ parse_search_dirs (l, r, args[0], " -print-search-dirs output");
- // Now chop it up. We already have the position of the first delimiter
- // (if any).
+ // While GCC incorporates the LIBRARY_PATH environment variable value
+ // into the -print-search-dirs output, Clang does not. Also, unlike GCC,
+ // it appears to consider such paths last.
//
- for (string::size_type b (0);; e = l.find (d, (b = e + 1)))
+ if (xi.id.type == compiler_type::clang)
{
- dir_path d;
- try
- {
- string ds (l, b, (e != string::npos ? e - b : e));
-
-#ifdef _WIN32
- if (path_traits::is_separator (ds[0]))
- add_current_drive (ds);
-#endif
-
- d = dir_path (move (ds));
-
- if (d.relative ())
- throw invalid_path (move (d).string ());
-
- d.normalize ();
- }
- catch (const invalid_path& e)
- {
- fail << "invalid directory '" << e.path << "'" << " in "
- << args[0] << " -print-search-dirs output";
- }
-
- if (find (r.begin (), r.end (), d) == r.end ())
- r.emplace_back (move (d));
-
- if (e == string::npos)
- break;
+ if (optional<string> v = getenv ("LIBRARY_PATH"))
+ parse_search_dirs (*v, r, "LIBRARY_PATH environment variable");
}
return make_pair (move (r), rn);
diff --git a/libbuild2/cc/guess.cxx b/libbuild2/cc/guess.cxx
index 9409604..d7e9c63 100644
--- a/libbuild2/cc/guess.cxx
+++ b/libbuild2/cc/guess.cxx
@@ -106,7 +106,7 @@ namespace build2
else if (id.compare (0, p, "icc" ) == 0) type = compiler_type::icc;
else
throw invalid_argument (
- "invalid compiler type '" + string (id, 0, p) + "'");
+ "invalid compiler type '" + string (id, 0, p) + '\'');
if (p != string::npos)
{
@@ -181,12 +181,12 @@ namespace build2
// could also be because there is something wrong with the compiler or
// options but that we simply leave to blow up later).
//
- process pr (run_start (3 /* verbosity */,
+ process pr (run_start (3 /* verbosity */,
xp,
args,
- -1 /* stdin */,
- -1 /* stdout */,
- false /* error */));
+ -1 /* stdin */,
+ -1 /* stdout */,
+ 1 /* stderr (to stdout) */));
string l, r;
try
{
@@ -222,7 +222,7 @@ namespace build2
// that.
}
- if (!run_finish_code (args.data (), pr, l))
+ if (!run_finish_code (args.data (), pr, l, 2 /* verbosity */))
r = "none";
if (r.empty ())
@@ -412,11 +412,13 @@ namespace build2
//
// Note that Visual Studio versions prior to 15.0 are not supported.
//
+ // Note also the directories are absolute and normalized.
+ //
struct msvc_info
{
- dir_path msvc_dir; // VC directory (...\Tools\MSVC\<ver>\).
- dir_path psdk_dir; // Platfor SDK version (under Include/, Lib/, etc).
- string psdk_ver; // Platfor SDK directory (...\Windows Kits\<ver>\).
+ dir_path msvc_dir; // VC tools directory (...\Tools\MSVC\<ver>\).
+ dir_path psdk_dir; // Platform SDK directory (...\Windows Kits\<ver>\).
+ string psdk_ver; // Platform SDK version (under Include/, Lib/, etc).
};
#if defined(_WIN32) && !defined(BUILD2_BOOTSTRAP)
@@ -458,13 +460,16 @@ namespace build2
{0x87, 0xBF, 0xD5, 0x77, 0x83, 0x8F, 0x1D, 0x5C}};
// If cl is not empty, then find an installation that contains this cl.exe
- // path.
+ // path. In this case the path must be absolute and normalized.
//
static optional<msvc_info>
- find_msvc (const path& cl = path ())
+ find_msvc (const path& cl = path ())
{
using namespace butl;
+ assert (cl.empty () ||
+ (cl.absolute () && cl.normalized (false /* sep */)));
+
msvc_info r;
// Try to obtain the MSVC directory.
@@ -530,7 +535,7 @@ namespace build2
// Note: we cannot use bstr_t due to the Clang 9.0 bug #42842.
//
BSTR p;
- if (vs->ResolvePath (L"VC", &p) != S_OK)
+ if (vs->ResolvePath (L"VC", &p) != S_OK)
return dir_path ();
unique_ptr<wchar_t, bstr_deleter> deleter (p);
@@ -636,36 +641,73 @@ namespace build2
return nullopt;
}
- // Read the VC version from the file and bail out on error.
+ // If cl.exe path is not specified, then deduce the default VC tools
+ // directory for this Visual Studio instance. Otherwise, extract the
+ // tools directory from this path.
//
- string vc_ver; // For example, 14.23.28105.
+ // Note that in the latter case we could potentially avoid the above
+ // iterating over the VS instances, but let's make sure that the
+ // specified cl.exe path actually belongs to one of them as a sanity
+ // check.
+ //
+ if (cl.empty ())
+ {
+ // Read the VC version from the file and bail out on error.
+ //
+ string vc_ver; // For example, 14.23.28105.
- path vp (
- r.msvc_dir /
- path ("Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"));
+ path vp (
+ r.msvc_dir /
+ path ("Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"));
- try
- {
- ifdstream is (vp);
- vc_ver = trim (is.read_text ());
- }
- catch (const io_error&) {}
+ try
+ {
+ ifdstream is (vp);
+ vc_ver = trim (is.read_text ());
+ }
+ catch (const io_error&) {}
- // Make sure that the VC version directory exists.
- //
- if (!vc_ver.empty ())
- try
- {
- ((r.msvc_dir /= "Tools") /= "MSVC") /= vc_ver;
+ if (vc_ver.empty ())
+ return nullopt;
+
+ // Make sure that the VC version directory exists.
+ //
+ try
+ {
+ ((r.msvc_dir /= "Tools") /= "MSVC") /= vc_ver;
- if (!dir_exists (r.msvc_dir))
- r.msvc_dir.clear ();
+ if (!dir_exists (r.msvc_dir))
+ return nullopt;
+ }
+ catch (const invalid_path&) {return nullopt;}
+ catch (const system_error&) {return nullopt;}
}
- catch (const invalid_path&) {}
- catch (const system_error&) {}
+ else
+ {
+ (r.msvc_dir /= "Tools") /= "MSVC";
- if (r.msvc_dir.empty ())
- return nullopt;
+ // Extract the VC tools version from the cl.exe path and append it
+ // to r.msvc_dir.
+ //
+ if (!cl.sub (r.msvc_dir))
+ return nullopt;
+
+ // For example, 14.23.28105\bin\Hostx64\x64\cl.exe.
+ //
+ path p (cl.leaf (r.msvc_dir)); // Can't throw.
+
+ auto i (p.begin ()); // Tools version.
+ if (i == p.end ())
+ return nullopt;
+
+ r.msvc_dir /= *i; // Can't throw.
+
+ // For good measure, make sure that the tools version is not the
+ // last component in the cl.exe path.
+ //
+ if (++i == p.end ())
+ return nullopt;
+ }
}
// Try to obtain the latest Platform SDK directory and version.
@@ -719,7 +761,7 @@ namespace build2
//
for (const dir_entry& de:
dir_iterator (r.psdk_dir / dir_path ("Include"),
- false /* ignore_dangling */))
+ dir_iterator::no_follow))
{
if (de.type () == entry_type::directory)
{
@@ -737,6 +779,16 @@ namespace build2
return nullopt;
}
+ try
+ {
+ r.msvc_dir.normalize ();
+ r.psdk_dir.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ return nullopt;
+ }
+
return r;
}
#endif
@@ -777,7 +829,8 @@ namespace build2
// Note: allowed to change pre if succeeds.
//
static guess_result
- guess (const char* xm,
+ guess (context& ctx,
+ const char* xm,
lang xl,
const path& xc,
const strings& x_mo,
@@ -928,10 +981,12 @@ namespace build2
// We try to find the matching installation only for MSVC (for Clang
// we extract this information from the compiler).
//
- if (xc.absolute () &&
- (pt == type::msvc && !pv))
+ if (xc.absolute () && (pt == type::msvc && !pv))
{
- if (optional<msvc_info> mi = find_msvc (xc))
+ path cl (xc); // Absolute but may not be normalized.
+ cl.normalize (); // Can't throw since this is an existing path.
+
+ if (optional<msvc_info> mi = find_msvc (cl))
{
search_info = info_ptr (
new msvc_info (move (*mi)), msvc_info_deleter);
@@ -967,7 +1022,7 @@ namespace build2
#endif
string cache;
- auto run = [&cs, &env, &args, &cache] (
+ auto run = [&ctx, &cs, &env, &args, &cache] (
const char* o,
auto&& f,
bool checksum = false) -> guess_result
@@ -975,9 +1030,10 @@ namespace build2
args[args.size () - 2] = o;
cache.clear ();
return build2::run<guess_result> (
+ ctx,
3 /* verbosity */,
env,
- args.data (),
+ args,
forward<decltype (f)> (f),
false /* error */,
false /* ignore_exit */,
@@ -1024,7 +1080,7 @@ namespace build2
// The gcc -v output will have a last line in the form:
//
- // "gcc version X.Y[.Z][...] ..."
+ // "gcc version X[.Y[.Z]][...] ..."
//
// The "version" word can probably be translated. For example:
//
@@ -1036,6 +1092,7 @@ namespace build2
// gcc version 5.1.0 (Ubuntu 5.1.0-0ubuntu11~14.04.1)
// gcc version 6.0.0 20160131 (experimental) (GCC)
// gcc version 9.3-win32 20200320 (GCC)
+ // gcc version 10-win32 20220324 (GCC)
//
if (cache.empty ())
{
@@ -1275,7 +1332,11 @@ namespace build2
//
const char* evars[] = {"CL=", "_CL_=", nullptr};
- r = build2::run<guess_result> (3, process_env (xp, evars), f, false);
+ r = build2::run<guess_result> (ctx,
+ 3,
+ process_env (xp, evars),
+ f,
+ false);
if (r.empty ())
{
@@ -1426,10 +1487,12 @@ namespace build2
// And VC 16 seems to have the runtime version 14.1 (and not 14.2, as
// one might expect; DLLs are still *140.dll but there are now _1 and _2
// variants for, say, msvcp140.dll). We will, however, call it 14.2
- // (which is the version of the "toolset") in our target triplet.
+ // (which is the version of the "toolset") in our target triplet. And we
+ // will call VC 17 14.3 (which is also the version of the "toolset").
//
// year ver cl crt/dll toolset
//
+ // 2022 17.X 19.3X 14.?/140 14.3X
// 2019 16.X 19.2X 14.2/140 14.2X
// 2017 15.9 19.16 14.1/140 14.16
// 2017 15.8 19.15 14.1/140
@@ -1448,7 +1511,8 @@ namespace build2
//
// _MSC_VER is the numeric cl version, e.g., 1921 for 19.21.
//
- /**/ if (v.major == 19 && v.minor >= 20) return "14.2";
+ /**/ if (v.major == 19 && v.minor >= 30) return "14.3";
+ else if (v.major == 19 && v.minor >= 20) return "14.2";
else if (v.major == 19 && v.minor >= 10) return "14.1";
else if (v.major == 19 && v.minor == 0) return "14.0";
else if (v.major == 18 && v.minor == 0) return "12.0";
@@ -1472,8 +1536,8 @@ namespace build2
// Studio command prompt puts into INCLUDE) including any paths from the
// compiler mode and their count.
//
- // Note that currently we don't add any ATL/MFC or WinRT paths (but could
- // do that probably first checking if they exist/empty).
+ // Note that currently we don't add any ATL/MFC paths (but could do that
+ // probably first checking if they exist/empty).
//
static pair<dir_paths, size_t>
msvc_hdr (const msvc_info& mi, const strings& mo)
@@ -1485,6 +1549,8 @@ namespace build2
msvc_extract_header_search_dirs (mo, r);
size_t rn (r.size ());
+ // Note: the resulting directories are normalized by construction.
+ //
r.push_back (dir_path (mi.msvc_dir) /= "include");
// This path structure only appeared in Platform SDK 10 (if anyone wants
@@ -1498,6 +1564,7 @@ namespace build2
r.push_back (dir_path (d) /= "ucrt" );
r.push_back (dir_path (d) /= "shared");
r.push_back (dir_path (d) /= "um" );
+ r.push_back (dir_path (d) /= "winrt" );
}
return make_pair (move (r), rn);
@@ -1533,6 +1600,8 @@ namespace build2
msvc_extract_library_search_dirs (mo, r);
size_t rn (r.size ());
+ // Note: the resulting directories are normalized by construction.
+ //
r.push_back ((dir_path (mi.msvc_dir) /= "lib") /= cpu);
// This path structure only appeared in Platform SDK 10 (if anyone wants
@@ -1587,7 +1656,8 @@ namespace build2
"LIB", "LINK", "_LINK_", nullptr};
static compiler_info
- guess_msvc (const char* xm,
+ guess_msvc (context&,
+ const char* xm,
lang xl,
const path& xc,
const string* xv,
@@ -1706,7 +1776,7 @@ namespace build2
//
// OS-ABI is where things are not as clear cut. The OS part shouldn't
// probably be just 'windows' since we have Win32 and WinCE. And
- // WinRT. And Universal Windows Platform (UWP). So perhaps the
+ // WinRT. And Universal Windows Platform (UWP). So perhaps the
// following values for OS: 'win32', 'wince', 'winrt', 'winup'.
//
// For 'win32' the ABI part could signal the Microsoft C/C++ runtime
@@ -1858,7 +1928,8 @@ namespace build2
"SDKROOT", "MACOSX_DEPLOYMENT_TARGET", nullptr};
static compiler_info
- guess_gcc (const char* xm,
+ guess_gcc (context& ctx,
+ const char* xm,
lang xl,
const path& xc,
const string* xv,
@@ -1877,7 +1948,7 @@ namespace build2
// though language words can be translated and even rearranged (see
// examples above).
//
- // "gcc version X.Y[.Z][...]"
+ // "gcc version X[.Y[.Z]][...]"
//
compiler_version ver;
{
@@ -1916,7 +1987,10 @@ namespace build2
//
try
{
- semantic_version v (string (s, b, e - b), ".-+");
+ semantic_version v (string (s, b, e - b),
+ semantic_version::allow_omit_minor |
+ semantic_version::allow_build,
+ ".-+");
ver.major = v.major;
ver.minor = v.minor;
ver.patch = v.patch;
@@ -1968,7 +2042,7 @@ namespace build2
//
auto f = [] (string& l, bool) {return move (l);};
- t = run<string> (3, xp, args.data (), f, false);
+ t = run<string> (ctx, 3, xp, args, f, false);
if (t.empty ())
{
@@ -1976,7 +2050,7 @@ namespace build2
<< "falling back to -dumpmachine";});
args[args.size () - 2] = "-dumpmachine";
- t = run<string> (3, xp, args.data (), f, false);
+ t = run<string> (ctx, 3, xp, args, f, false);
}
if (t.empty ())
@@ -2119,9 +2193,9 @@ namespace build2
process pr (run_start (3 /* verbosity */,
xp,
args,
- -2 /* stdin (/dev/null) */,
- -1 /* stdout */,
- false /* error (2>&1) */));
+ -2 /* stdin (to /dev/null) */,
+ -1 /* stdout */,
+ 1 /* stderr (to stdout) */));
clang_msvc_info r;
@@ -2273,7 +2347,7 @@ namespace build2
// that.
}
- if (!run_finish_code (args.data (), pr, l))
+ if (!run_finish_code (args.data (), pr, l, 2 /* verbosity */))
fail << "unable to extract MSVC information from " << xp;
if (const char* w = (
@@ -2291,23 +2365,27 @@ namespace build2
// These are derived from gcc_* plus the sparse documentation (clang(1))
// and source code.
//
+ // Note that for now for Clang targeting MSVC we use msvc_env but should
+ // probably use a combined list.
+ //
// See also the note on environment and caching below if adding any new
// variables.
//
static const char* clang_c_env[] = {
- "CPATH", "C_INCLUDE_PATH",
+ "CPATH", "C_INCLUDE_PATH", "CCC_OVERRIDE_OPTIONS",
"LIBRARY_PATH", "LD_RUN_PATH",
"COMPILER_PATH",
nullptr};
static const char* clang_cxx_env[] = {
- "CPATH", "CPLUS_INCLUDE_PATH",
+ "CPATH", "CPLUS_INCLUDE_PATH", "CCC_OVERRIDE_OPTIONS",
"LIBRARY_PATH", "LD_RUN_PATH",
"COMPILER_PATH",
nullptr};
static compiler_info
- guess_clang (const char* xm,
+ guess_clang (context& ctx,
+ const char* xm,
lang xl,
const path& xc,
const string* xv,
@@ -2346,6 +2424,12 @@ namespace build2
//
// emcc (...) 2.0.8
//
+ // Pre-releases of the vanilla Clang append `rc` or `git` to the
+ // version, unfortunately without a separator. So we will handle these
+ // ad hoc. For example:
+ //
+ // FreeBSD clang version 18.1.0rc (https://github.com/llvm/llvm-project.git llvmorg-18-init-18361-g22683463740e)
+ //
auto extract_version = [] (const string& s, bool patch, const char* what)
-> compiler_version
{
@@ -2360,8 +2444,28 @@ namespace build2
// end of the word position (first space). In fact, we can just
// check if it is >= e.
//
- if (s.find_first_not_of ("1234567890.", b, 11) >= e)
+ size_t p (s.find_first_not_of ("1234567890.", b, 11));
+ if (p >= e)
break;
+
+ // Handle the unseparated `rc` and `git` suffixes.
+ //
+ if (p != string::npos)
+ {
+ if (p + 2 == e && (e - b) > 2 &&
+ s[p] == 'r' && s[p + 1] == 'c')
+ {
+ e -= 2;
+ break;
+ }
+
+ if (p + 3 == e && (e - b) > 3 &&
+ s[p] == 'g' && s[p + 1] == 'i' && s[p + 2] == 't')
+ {
+ e -= 3;
+ break;
+ }
+ }
}
if (b == e)
@@ -2397,7 +2501,14 @@ namespace build2
ver.patch = next ("patch", patch);
if (e != s.size ())
- ver.build.assign (s, e + 1, string::npos);
+ {
+ // Skip the separator (it could also be unseparated `rc` or `git`).
+ //
+ if (s[e] == ' ' || s[e] == '-')
+ e++;
+
+ ver.build.assign (s, e, string::npos);
+ }
return ver;
};
@@ -2421,7 +2532,10 @@ namespace build2
// Some overrides for testing.
//
+ //string s (xv != nullptr ? *xv : "");
+ //
//s = "clang version 3.7.0 (tags/RELEASE_370/final)";
+ //s = "FreeBSD clang version 18.1.0rc (https://github.com/llvm/llvm-project.git llvmorg-18-init-18361-g22683463740e)";
//
//gr.id.variant = "apple";
//s = "Apple LLVM version 7.3.0 (clang-703.0.16.1)";
@@ -2449,10 +2563,21 @@ namespace build2
//
// Specifically, we now look in the libc++'s __config file for the
// _LIBCPP_VERSION and use the previous version as a conservative
- // estimate (note that there could be multiple __config files with
+ // estimate (NOTE: that there could be multiple __config files with
// potentially different versions so compile with -v to see which one
// gets picked up).
//
+ // Also, lately, we started seeing _LIBCPP_VERSION values like 15.0.6
+ // or 16.0.2 which would suggest the base is 15.0.5 or 16.0.1. But
+ // that assumption did not check out with the actual usage. For
+ // example, vanilla Clang 16 should no longer require -fmodules-ts but
+ // the Apple's version (that is presumably based on it) still does. So
+ // the theory here is that Apple upgrades to newer libc++ while
+ // keeping the old compiler. Which means we must be more conservative
+ // and assume something like 15.0.6 is still 14-based. But then you
+ // get -Wunqualified-std-cast-call in 14, which was supposedly only
+ // introduced in Clang 15. So maybe not.
+ //
// Note that this is Apple Clang version and not XCode version.
//
// 4.2 -> 3.2svn
@@ -2472,34 +2597,41 @@ namespace build2
// 12.0.0 -> 9.0
// 12.0.5 -> 10.0 (yes, seriously!)
// 13.0.0 -> 11.0
+ // 13.1.6 -> 12.0
+ // 14.0.0 -> 12.0 (_LIBCPP_VERSION=130000)
+ // 14.0.3 -> 15.0 (_LIBCPP_VERSION=150006)
+ // 15.0.0 -> 16.0 (_LIBCPP_VERSION=160002)
//
uint64_t mj (var_ver->major);
uint64_t mi (var_ver->minor);
uint64_t pa (var_ver->patch);
- if (mj >= 13) {mj = 11; mi = 0;}
- else if (mj == 12 && (mi > 0 || pa >= 5)) {mj = 10; mi = 0;}
- else if (mj == 12) {mj = 9; mi = 0;}
- else if (mj == 11 && (mi > 0 || pa >= 3)) {mj = 8; mi = 0;}
- else if (mj == 11) {mj = 7; mi = 0;}
- else if (mj == 10) {mj = 6; mi = 0;}
- else if (mj == 9 && mi >= 1) {mj = 5; mi = 0;}
- else if (mj == 9) {mj = 4; mi = 0;}
- else if (mj == 8) {mj = 3; mi = 9;}
- else if (mj == 7 && mi >= 3) {mj = 3; mi = 8;}
- else if (mj == 7) {mj = 3; mi = 7;}
- else if (mj == 6 && mi >= 1) {mj = 3; mi = 5;}
- else if (mj == 6) {mj = 3; mi = 4;}
- else if (mj == 5 && mi >= 1) {mj = 3; mi = 3;}
- else if (mj == 5) {mj = 3; mi = 2;}
- else if (mj == 4 && mi >= 2) {mj = 3; mi = 1;}
- else {mj = 3; mi = 0;}
+ if (mj >= 15) {mj = 16; mi = 0; pa = 0;}
+ else if (mj == 14 && (mi > 0 || pa >= 3)) {mj = 15; mi = 0; pa = 0;}
+ else if (mj == 14 || (mj == 13 && mi >= 1)) {mj = 12; mi = 0; pa = 0;}
+ else if (mj == 13) {mj = 11; mi = 0; pa = 0;}
+ else if (mj == 12 && (mi > 0 || pa >= 5)) {mj = 10; mi = 0; pa = 0;}
+ else if (mj == 12) {mj = 9; mi = 0; pa = 0;}
+ else if (mj == 11 && (mi > 0 || pa >= 3)) {mj = 8; mi = 0; pa = 0;}
+ else if (mj == 11) {mj = 7; mi = 0; pa = 0;}
+ else if (mj == 10) {mj = 6; mi = 0; pa = 0;}
+ else if (mj == 9 && mi >= 1) {mj = 5; mi = 0; pa = 0;}
+ else if (mj == 9) {mj = 4; mi = 0; pa = 0;}
+ else if (mj == 8) {mj = 3; mi = 9; pa = 0;}
+ else if (mj == 7 && mi >= 3) {mj = 3; mi = 8; pa = 0;}
+ else if (mj == 7) {mj = 3; mi = 7; pa = 0;}
+ else if (mj == 6 && mi >= 1) {mj = 3; mi = 5; pa = 0;}
+ else if (mj == 6) {mj = 3; mi = 4; pa = 0;}
+ else if (mj == 5 && mi >= 1) {mj = 3; mi = 3; pa = 0;}
+ else if (mj == 5) {mj = 3; mi = 2; pa = 0;}
+ else if (mj == 4 && mi >= 2) {mj = 3; mi = 1; pa = 0;}
+ else {mj = 3; mi = 0; pa = 0;}
ver = compiler_version {
- to_string (mj) + '.' + to_string (mi) + ".0",
+ to_string (mj) + '.' + to_string (mi) + '.' + to_string (pa),
mj,
mi,
- 0,
+ pa,
""};
}
else if (emscr)
@@ -2552,7 +2684,7 @@ namespace build2
// for LC_ALL.
//
auto f = [] (string& l, bool) {return move (l);};
- t = run<string> (3, xp, args.data (), f, false);
+ t = run<string> (ctx, 3, xp, args, f, false);
if (t.empty ())
fail << "unable to extract target architecture from " << xc
@@ -2612,7 +2744,7 @@ namespace build2
const char* cpu (msvc_cpu (tt.cpu));
// Come up with the system library search paths. Ideally we would want
- // to extract this from Clang and -print-search-paths would have been
+ // to extract this from Clang and -print-search-dirs would have been
// the natural way for Clang to report it. But no luck.
//
lib_dirs = msvc_lib (mi, x_mo, cpu);
@@ -2780,7 +2912,8 @@ namespace build2
}
static compiler_info
- guess_icc (const char* xm,
+ guess_icc (context& ctx,
+ const char* xm,
lang xl,
const path& xc,
const string* xv,
@@ -2844,7 +2977,7 @@ namespace build2
//
// @@ TODO: running without the mode options.
//
- s = run<string> (3, env, "-V", f, false);
+ s = run<string> (ctx, 3, env, "-V", f, false);
if (s.empty ())
fail << "unable to extract signature from " << xc << " -V output";
@@ -2970,7 +3103,7 @@ namespace build2
// The -V output is sent to STDERR.
//
- t = run<string> (3, env, args.data (), f, false);
+ t = run<string> (ctx, 3, env, args, f, false);
if (t.empty ())
fail << "unable to extract target architecture from " << xc
@@ -3021,7 +3154,7 @@ namespace build2
//
{
auto f = [] (string& l, bool) {return move (l);};
- t = run<string> (3, xp, "-dumpmachine", f);
+ t = run<string> (ctx, 3, xp, "-dumpmachine", f);
}
if (t.empty ())
@@ -3102,7 +3235,8 @@ namespace build2
static global_cache<compiler_info> cache;
const compiler_info&
- guess (const char* xm,
+ guess (context& ctx,
+ const char* xm,
lang xl,
const string& ec,
const path& xc,
@@ -3176,7 +3310,7 @@ namespace build2
if (pre.type != invalid_compiler_type)
{
- gr = guess (xm, xl, xc, x_mo, xi, pre, cs);
+ gr = guess (ctx, xm, xl, xc, x_mo, xi, pre, cs);
if (gr.empty ())
{
@@ -3192,13 +3326,14 @@ namespace build2
}
if (gr.empty ())
- gr = guess (xm, xl, xc, x_mo, xi, pre, cs);
+ gr = guess (ctx, xm, xl, xc, x_mo, xi, pre, cs);
if (gr.empty ())
fail << "unable to guess " << xl << " compiler type of " << xc <<
info << "use config." << xm << ".id to specify explicitly";
compiler_info (*gf) (
+ context&,
const char*, lang, const path&, const string*, const string*,
const strings&,
const strings*, const strings*,
@@ -3218,7 +3353,8 @@ namespace build2
case compiler_type::icc: gf = &guess_icc; break;
}
- compiler_info r (gf (xm, xl, xc, xv, xt,
+ compiler_info r (gf (ctx,
+ xm, xl, xc, xv, xt,
x_mo, c_po, x_po, c_co, x_co, c_lo, x_lo,
move (gr), cs));
@@ -3376,6 +3512,7 @@ namespace build2
// In the future we will probably have to maintain per-standard additions.
//
static const char* std_importable[] = {
+ "<initializer_list>", // Note: keep first (present in freestanding).
"<algorithm>",
"<any>",
"<array>",
@@ -3400,7 +3537,6 @@ namespace build2
"<fstream>",
"<functional>",
"<future>",
- "<initializer_list>",
"<iomanip>",
"<ios>",
"<iosfwd>",
@@ -3499,6 +3635,9 @@ namespace build2
// is currently not provided by GCC. Though entering missing headers
// should be harmless.
//
+ // Plus, a freestanding implementation may only have a subset of such
+ // headers (see [compliance]).
+ //
pair<const path, importable_headers::groups>* p;
auto add_groups = [&p] (bool imp)
{
@@ -3520,29 +3659,39 @@ namespace build2
}
else
{
+ // While according to [compliance] a freestanding implementation
+ // should provide a subset of headers, including <initializer_list>,
+ // there seem to be cases where no headers are provided at all (see GH
+ // issue #219). So if we cannot find <initializer_list>, we just skip
+ // the whole thing.
+ //
p = hs.insert_angle (sys_hdr_dirs, std_importable[0]);
- assert (p != nullptr);
- add_groups (true);
+ if (p != nullptr)
+ {
+ assert (p != nullptr);
- dir_path d (p->first.directory ());
+ add_groups (true);
- auto add_header = [&hs, &d, &p, add_groups] (const char* f, bool imp)
- {
- path fp (d);
- fp.combine (f + 1, strlen (f) - 2, '\0'); // Assuming simple.
+ dir_path d (p->first.directory ());
- p = &hs.insert_angle (move (fp), f);
- add_groups (imp);
- };
+ auto add_header = [&hs, &d, &p, add_groups] (const char* f, bool imp)
+ {
+ path fp (d);
+ fp.combine (f + 1, strlen (f) - 2, '\0'); // Assuming simple.
- for (size_t i (1);
- i != sizeof (std_importable) / sizeof (std_importable[0]);
- ++i)
- add_header (std_importable[i], true);
+ p = &hs.insert_angle (move (fp), f);
+ add_groups (imp);
+ };
- for (const char* f: std_non_importable)
- add_header (f, false);
+ for (size_t i (1);
+ i != sizeof (std_importable) / sizeof (std_importable[0]);
+ ++i)
+ add_header (std_importable[i], true);
+
+ for (const char* f: std_non_importable)
+ add_header (f, false);
+ }
}
}
}
diff --git a/libbuild2/cc/guess.hxx b/libbuild2/cc/guess.hxx
index 53acc15..7cbbd87 100644
--- a/libbuild2/cc/guess.hxx
+++ b/libbuild2/cc/guess.hxx
@@ -253,7 +253,8 @@ namespace build2
// that most of it will be the same, at least for C and C++.
//
const compiler_info&
- guess (const char* xm, // Module (for var names in diagnostics).
+ guess (context&,
+ const char* xm, // Module (for var names in diagnostics).
lang xl, // Language.
const string& ec, // Environment checksum.
const path& xc, // Compiler path.
diff --git a/libbuild2/cc/init.cxx b/libbuild2/cc/init.cxx
index 75b32bb..e124450 100644
--- a/libbuild2/cc/init.cxx
+++ b/libbuild2/cc/init.cxx
@@ -86,7 +86,10 @@ namespace build2
// Enter variables.
//
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
auto v_t (variable_visibility::target);
@@ -97,13 +100,19 @@ namespace build2
vp.insert<strings> ("config.cc.loptions");
vp.insert<strings> ("config.cc.aoptions");
vp.insert<strings> ("config.cc.libs");
- vp.insert<string> ("config.cc.internal.scope");
+
+ vp.insert<string> ("config.cc.internal.scope");
+
+ vp.insert<bool> ("config.cc.reprocess"); // See cc.preprocess below.
+
+ vp.insert<abs_dir_path> ("config.cc.pkgconfig.sysroot");
vp.insert<strings> ("cc.poptions");
vp.insert<strings> ("cc.coptions");
vp.insert<strings> ("cc.loptions");
vp.insert<strings> ("cc.aoptions");
vp.insert<strings> ("cc.libs");
+
vp.insert<string> ("cc.internal.scope");
vp.insert<strings> ("cc.internal.libs");
@@ -117,8 +126,8 @@ namespace build2
// files instead of the default install.{include,lib}. Relative paths
// are resolved as install paths.
//
- vp.insert<dir_paths> ("cc.pkconfig.include");
- vp.insert<dir_paths> ("cc.pkconfig.lib");
+ vp.insert<dir_paths> ("cc.pkgconfig.include");
+ vp.insert<dir_paths> ("cc.pkgconfig.lib");
// Hint variables (not overridable).
//
@@ -139,13 +148,14 @@ namespace build2
// values are "binless" (library is binless) and "recursively-binless"
// (library and all its prerequisite libraries are binless). Note that
// another indication of a binless library is an empty path, which could
- // easier/faster to check. Note also that there should be no whitespaces
- // of any kind and <lang> is always first.
+ // be easier/faster to check. Note also that there should be no
+ // whitespaces of any kind and <lang> is always first.
//
// This value should be set on the library target as a rule-specific
- // variable by the matching rule. Currently <lang> is used to decide
- // which *.libs to use during static linking. The "cc" language is used
- // in the import installed logic.
+ // variable by the matching rule. It is also saved in the generated
+ // pkg-config files. Currently <lang> is used to decide which *.libs to
+ // use during static linking. The "cc" language is used in the import
+ // installed logic.
//
// Note that this variable cannot be set via the target type/pattern-
// specific mechanism (see process_libraries()).
@@ -173,9 +183,15 @@ namespace build2
// Ability to disable using preprocessed output for compilation.
//
- vp.insert<bool> ("config.cc.reprocess");
vp.insert<bool> ("cc.reprocess");
+ // Execute serially with regards to any other recipe. This is primarily
+ // useful when compiling large translation units or linking large
+ // binaries that require so much memory that doing that in parallel with
+ // other compilation/linking jobs is likely to summon the OOM killer.
+ //
+ vp.insert<bool> ("cc.serialize");
+
// Register scope operation callback.
//
// It feels natural to clean up sidebuilds as a post operation but that
@@ -333,14 +349,24 @@ namespace build2
if (lookup l = lookup_config (rs, "config.cc.reprocess"))
rs.assign ("cc.reprocess") = *l;
+ // config.cc.pkgconfig.sysroot
+ //
+ // Let's look it up instead of just marking for saving to make sure the
+ // path is valid.
+ //
+ // Note: save omitted.
+ //
+ lookup_config (rs, "config.cc.pkgconfig.sysroot");
+
// Load the bin.config module.
//
if (!cast_false<bool> (rs["bin.config.loaded"]))
{
- // Prepare configuration hints. They are only used on the first load
- // of bin.config so we only populate them on our first load.
+ // Prepare configuration hints (pretend it belongs to root scope).
+ // They are only used on the first load of bin.config so we only
+ // populate them on our first load.
//
- variable_map h (rs.ctx);
+ variable_map h (rs);
if (first)
{
diff --git a/libbuild2/cc/install-rule.cxx b/libbuild2/cc/install-rule.cxx
index 0b4d1e1..6758e03 100644
--- a/libbuild2/cc/install-rule.cxx
+++ b/libbuild2/cc/install-rule.cxx
@@ -18,20 +18,67 @@ namespace build2
{
using namespace bin;
+ using posthoc_prerequisite_target =
+ context::posthoc_target::prerequisite_target;
+
// install_rule
//
install_rule::
install_rule (data&& d, const link_rule& l)
: common (move (d)), link_ (l) {}
- const target* install_rule::
+ // Wrap the file_rule's recipe into a data-carrying recipe.
+ //
+ struct install_match_data
+ {
+ build2::recipe recipe;
+ uint64_t options; // Match options.
+ link_rule::libs_paths libs_paths;
+
+ target_state
+ operator() (action a, const target& t)
+ {
+ return recipe (a, t);
+ }
+ };
+
+ bool install_rule::
+ filter (action a, const target& t, const target& m) const
+ {
+ if (!t.is_a<exe> ())
+ {
+ // If runtime-only, filter out all known buildtime target types.
+ //
+ const auto& md (t.data<install_match_data> (a));
+
+ if ((md.options & lib::option_install_buildtime) == 0)
+ {
+ if (m.is_a<liba> () || // Staic library.
+ m.is_a<pc> () || // pkg-config file.
+ m.is_a<libi> ()) // Import library.
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ pair<const target*, uint64_t> install_rule::
filter (const scope* is,
- action a, const target& t, prerequisite_iterator& i) const
+ action a, const target& t, prerequisite_iterator& i,
+ match_extra& me) const
{
// NOTE: see libux_install_rule::filter() if changing anything here.
const prerequisite& p (i->prerequisite);
+ uint64_t options (match_extra::all_options);
+
+ otype ot (link_type (t).type);
+
+ // @@ TMP: drop eventually.
+ //
+#if 0
// If this is a shared library prerequisite, install it as long as it is
// in the installation scope.
//
@@ -43,10 +90,14 @@ namespace build2
//
// Note: we install ad hoc prerequisites by default.
//
- otype ot (link_type (t).type);
+ // Note: at least one must be true since we only register this rule for
+ // exe{}, and lib[as]{} (this makes sure the following if-condition will
+ // always be true for libx{}).
+ //
bool st (t.is_a<exe> () || t.is_a<libs> ()); // Target needs shared.
bool at (t.is_a<liba> () || t.is_a<libs> ()); // Target needs static.
+ assert (st || at);
if ((st && (p.is_a<libx> () || p.is_a<libs> ())) ||
(at && (p.is_a<libx> () || p.is_a<liba> ())))
@@ -59,26 +110,115 @@ namespace build2
if (const libx* l = pt->is_a<libx> ())
pt = link_member (*l, a, link_info (t.base_scope (), ot));
- // Note: not redundant since we are returning a member.
+ // Note: not redundant since we could be returning a member.
//
if ((st && pt->is_a<libs> ()) || (at && pt->is_a<liba> ()))
- return is == nullptr || pt->in (*is) ? pt : nullptr;
+ {
+ // Adjust match options.
+ //
+ if (a.operation () != update_id)
+ {
+ if (t.is_a<exe> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ // This is a library prerequisite of a library target and
+ // runtime-only begets runtime-only.
+ //
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ return make_pair (is == nullptr || pt->in (*is) ? pt : nullptr,
+ options);
+ }
// See through to libu*{} members. Note that we are always in the same
// project (and thus amalgamation).
//
if (pt->is_a<libux> ())
- return pt;
+ {
+ // Adjust match options (similar to above).
+ //
+ if (a.operation () != update_id && !pt->is_a<libue> ())
+ {
+ if (t.is_a<exe> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ return make_pair (pt, options);
+ }
+ }
+#else
+ // Note that at first it may seem like we don't need to install static
+ // library prerequisites of executables. But such libraries may still
+ // have prerequisites that are needed at runtime (say, some data files).
+ // So we install all libraries as long as they are in the installation
+ // scope and deal with runtime vs buildtime distiction using match
+ // options.
+ //
+ // Note: for now we assume these prerequisites never come from see-
+ // through groups.
+ //
+ // Note: we install ad hoc prerequisites by default.
+ //
+ if (p.is_a<libx> () || p.is_a<libs> () || p.is_a<liba> ())
+ {
+ const target* pt (&search (t, p));
+
+ // If this is the lib{}/libu*{} group, pick a member which we would
+ // link. For libu*{} we want the "see through" logic.
+ //
+ if (const libx* l = pt->is_a<libx> ())
+ pt = link_member (*l, a, link_info (t.base_scope (), ot));
+
+ // Adjust match options.
+ //
+ if (a.operation () != update_id)
+ {
+ if (t.is_a<exe> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ // This is a library prerequisite of a library target and
+ // runtime-only begets runtime-only.
+ //
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ // Note: not redundant since we could be returning a member.
+ //
+ if (pt->is_a<libs> () || pt->is_a<liba> ())
+ {
+ return make_pair (is == nullptr || pt->in (*is) ? pt : nullptr,
+ options);
+ }
+ else // libua{} or libus{}
+ {
+ // See through to libu*{} members. Note that we are always in the
+ // same project (and thus amalgamation).
+ //
+ return make_pair (pt, options);
+ }
}
+#endif
// The rest of the tests only succeed if the base filter() succeeds.
//
- const target* pt (file_rule::filter (is, a, t, p));
+ const target* pt (file_rule::filter (is, a, t, p, me).first);
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
- // Don't install executable's prerequisite headers and module
- // interfaces.
+ // Don't install executable's or runtime-only library's prerequisite
+ // headers and module interfaces.
//
// Note that if they come from a group, then we assume the entire
// group is not to be installed.
@@ -88,12 +228,18 @@ namespace build2
//
auto header_source = [this] (const auto& p)
{
- return (x_header (p) ||
- p.is_a (x_src) ||
- (x_mod != nullptr && p.is_a (*x_mod)));
+ return (x_header (p) ||
+ p.is_a (x_src) ||
+ p.is_a (c::static_type) ||
+ p.is_a (S::static_type) ||
+ (x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_obj != nullptr && (p.is_a (*x_obj) ||
+ p.is_a (m::static_type))));
};
- if (t.is_a<exe> ())
+ if (t.is_a<exe> () ||
+ (a.operation () != update_id &&
+ me.cur_options == lib::option_install_runtime))
{
if (header_source (p))
pt = nullptr;
@@ -108,7 +254,7 @@ namespace build2
}
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
}
// Here is a problem: if the user spells the obj*/bmi*{} targets
@@ -138,16 +284,16 @@ namespace build2
{
pt = t.is_a<exe> ()
? nullptr
- : file_rule::filter (is, a, *pt, pm.prerequisite);
+ : file_rule::filter (is, a, *pt, pm.prerequisite, me).first;
break;
}
}
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
}
- return pt;
+ return make_pair (pt, options);
}
bool install_rule::
@@ -160,27 +306,34 @@ namespace build2
file_rule::match (a, t);
}
- // Wrap the file_rule's recipe into a data-carrying recipe.
- //
- struct install_match_data
+ recipe install_rule::
+ apply (action a, target& t, match_extra& me) const
{
- build2::recipe recipe;
- link_rule::libs_paths libs_paths;
-
- target_state
- operator() (action a, const target& t)
+ // Handle match options.
+ //
+ // Do it before calling apply_impl() since we need this information
+ // in the filter() callbacks.
+ //
+ if (a.operation () != update_id)
{
- return recipe (a, t);
+ if (!t.is_a<exe> ())
+ {
+ if (me.new_options == 0)
+ me.new_options = lib::option_install_runtime; // Minimum we can do.
+
+ me.cur_options = me.new_options;
+ }
}
- };
- recipe install_rule::
- apply (action a, target& t) const
- {
- recipe r (file_rule::apply_impl (a, t));
+ recipe r (file_rule::apply_impl (
+ a, t, me,
+ me.cur_options != match_extra::all_options /* reapply */));
if (r == nullptr)
+ {
+ me.cur_options = match_extra::all_options; // Noop for all options.
return noop_recipe;
+ }
if (a.operation () == update_id)
{
@@ -202,29 +355,109 @@ namespace build2
}
else // install or uninstall
{
- // Derive shared library paths and cache them in the target's aux
- // storage if we are un/installing (used in the *_extra() functions
- // below).
- //
- if (file* f = t.is_a<libs> ())
+ file* ls;
+ if ((ls = t.is_a<libs> ()) || t.is_a<liba> ())
{
- if (!f->path ().empty ()) // Not binless.
+ // Derive shared library paths and cache them in the target's aux
+ // storage if we are un/installing (used in the *_extra() functions
+ // below).
+ //
+ link_rule::libs_paths lsp;
+ if (ls != nullptr && !ls->path ().empty ()) // Not binless.
{
const string* p (cast_null<string> (t["bin.lib.prefix"]));
const string* s (cast_null<string> (t["bin.lib.suffix"]));
- return install_match_data {
- move (r),
- link_.derive_libs_paths (*f,
- p != nullptr ? p->c_str (): nullptr,
- s != nullptr ? s->c_str (): nullptr)};
+ lsp = link_.derive_libs_paths (*ls,
+ p != nullptr ? p->c_str (): nullptr,
+ s != nullptr ? s->c_str (): nullptr);
}
+
+ return install_match_data {move (r), me.cur_options, move (lsp)};
}
}
return r;
}
+ void install_rule::
+ apply_posthoc (action a, target& t, match_extra& me) const
+ {
+ // Similar semantics to filter() above for shared libraries specified as
+ // post hoc prerequisites (e.g., plugins).
+ //
+ if (a.operation () != update_id)
+ {
+ for (posthoc_prerequisite_target& p: *me.posthoc_prerequisite_targets)
+ {
+ if (p.target != nullptr && p.target->is_a<libs> ())
+ {
+ if (t.is_a<exe> ())
+ p.match_options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ p.match_options = lib::option_install_runtime;
+ }
+ }
+ }
+ }
+ }
+
+ void install_rule::
+ reapply (action a, target& t, match_extra& me) const
+ {
+ tracer trace ("cc::install_rule::reapply");
+
+ assert (a.operation () != update_id && !t.is_a<exe> ());
+
+ l6 ([&]{trace << "rematching " << t
+ << ", current options " << me.cur_options
+ << ", new options " << me.new_options;});
+
+ me.cur_options |= me.new_options;
+
+ // We also need to update options in install_match_data.
+ //
+ t.data<install_match_data> (a).options = me.cur_options;
+
+ if ((me.new_options & lib::option_install_buildtime) != 0)
+ {
+ // If we are rematched with the buildtime option, propagate it to our
+ // prerequisite libraries.
+ //
+ for (const target* pt: t.prerequisite_targets[a])
+ {
+ if (pt != nullptr && (pt->is_a<liba> () || pt->is_a<libs> () ||
+ pt->is_a<libua> () || pt->is_a<libus> ()))
+ {
+ // Go for all options instead of just install_buildtime to avoid
+ // any further relocking/reapply (we only support runtime-only or
+ // everything).
+ //
+ rematch_sync (a, *pt, match_extra::all_options);
+ }
+ }
+
+ // Also to post hoc.
+ //
+ if (me.posthoc_prerequisite_targets != nullptr)
+ {
+ for (posthoc_prerequisite_target& p: *me.posthoc_prerequisite_targets)
+ {
+ if (p.target != nullptr && p.target->is_a<libs> ())
+ {
+ p.match_options = match_extra::all_options;
+ }
+ }
+ }
+
+ // Also match any additional prerequisites (e.g., headers).
+ //
+ file_rule::reapply_impl (a, t, me);
+ }
+ }
+
bool install_rule::
install_extra (const file& t, const install_dir& id) const
{
@@ -232,14 +465,19 @@ namespace build2
if (t.is_a<libs> ())
{
+ const auto& md (t.data<install_match_data> (perform_install_id));
+
// Here we may have a bunch of symlinks that we need to install.
//
+ // Note that for runtime-only install we only omit the name that is
+ // used for linking (e.g., libfoo.so).
+ //
const scope& rs (t.root_scope ());
- auto& lp (t.data<install_match_data> (perform_install_id).libs_paths);
+ const link_rule::libs_paths& lp (md.libs_paths);
- auto ln = [&rs, &id] (const path& f, const path& l)
+ auto ln = [&t, &rs, &id] (const path& f, const path& l)
{
- install_l (rs, id, f.leaf (), l.leaf (), 2 /* verbosity */);
+ install_l (rs, id, l.leaf (), t, f.leaf (), 2 /* verbosity */);
return true;
};
@@ -253,7 +491,10 @@ namespace build2
if (!in.empty ()) {r = ln (*f, in) || r; f = &in;}
if (!so.empty ()) {r = ln (*f, so) || r; f = &so;}
if (!ld.empty ()) {r = ln (*f, ld) || r; f = &ld;}
- if (!lk.empty ()) {r = ln (*f, lk) || r; }
+ if ((md.options & lib::option_install_buildtime) != 0)
+ {
+ if (!lk.empty ()) {r = ln (*f, lk) || r;}
+ }
}
return r;
@@ -266,14 +507,16 @@ namespace build2
if (t.is_a<libs> ())
{
+ const auto& md (t.data<install_match_data> (perform_uninstall_id));
+
// Here we may have a bunch of symlinks that we need to uninstall.
//
const scope& rs (t.root_scope ());
- auto& lp (t.data<install_match_data> (perform_uninstall_id).libs_paths);
+ const link_rule::libs_paths& lp (md.libs_paths);
- auto rm = [&rs, &id] (const path& l)
+ auto rm = [&rs, &id] (const path& f, const path& l)
{
- return uninstall_f (rs, id, nullptr, l.leaf (), 2 /* verbosity */);
+ return uninstall_l (rs, id, l.leaf (), f.leaf (), 2 /* verbosity */);
};
const path& lk (lp.link);
@@ -281,10 +524,15 @@ namespace build2
const path& so (lp.soname);
const path& in (lp.interm);
- if (!lk.empty ()) r = rm (lk) || r;
- if (!ld.empty ()) r = rm (ld) || r;
- if (!so.empty ()) r = rm (so) || r;
- if (!in.empty ()) r = rm (in) || r;
+ const path* f (lp.real);
+
+ if (!in.empty ()) {r = rm (*f, in) || r; f = &in;}
+ if (!so.empty ()) {r = rm (*f, so) || r; f = &so;}
+ if (!ld.empty ()) {r = rm (*f, ld) || r; f = &ld;}
+ if ((md.options & lib::option_install_buildtime) != 0)
+ {
+ if (!lk.empty ()) {r = rm (*f, lk) || r;}
+ }
}
return r;
@@ -296,22 +544,30 @@ namespace build2
libux_install_rule (data&& d, const link_rule& l)
: common (move (d)), link_ (l) {}
- const target* libux_install_rule::
+ pair<const target*, uint64_t> libux_install_rule::
filter (const scope* is,
- action a, const target& t, prerequisite_iterator& i) const
+ action a, const target& t, prerequisite_iterator& i,
+ match_extra& me) const
{
using file_rule = install::file_rule;
const prerequisite& p (i->prerequisite);
+ uint64_t options (match_extra::all_options);
+
+ otype ot (link_type (t).type);
+
// The "see through" semantics that should be parallel to install_rule
// above. In particular, here we use libue/libua/libus{} as proxies for
// exe/liba/libs{} there.
//
- otype ot (link_type (t).type);
+ // @@ TMP: drop eventually.
+ //
+#if 0
bool st (t.is_a<libue> () || t.is_a<libus> ()); // Target needs shared.
bool at (t.is_a<libua> () || t.is_a<libus> ()); // Target needs static.
+ assert (st || at);
if ((st && (p.is_a<libx> () || p.is_a<libs> ())) ||
(at && (p.is_a<libx> () || p.is_a<liba> ())))
@@ -322,24 +578,85 @@ namespace build2
pt = link_member (*l, a, link_info (t.base_scope (), ot));
if ((st && pt->is_a<libs> ()) || (at && pt->is_a<liba> ()))
- return is == nullptr || pt->in (*is) ? pt : nullptr;
+ {
+ if (a.operation () != update_id)
+ {
+ if (t.is_a<libue> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ return make_pair (is == nullptr || pt->in (*is) ? pt : nullptr,
+ options);
+ }
if (pt->is_a<libux> ())
- return pt;
+ {
+ if (a.operation () != update_id && !pt->is_a<libue> ())
+ {
+ if (t.is_a<libue> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ return make_pair (pt, options);
+ }
}
+#else
+ if (p.is_a<libx> () || p.is_a<libs> () || p.is_a<liba> ())
+ {
+ const target* pt (&search (t, p));
- const target* pt (file_rule::instance.filter (is, a, t, p));
+ if (const libx* l = pt->is_a<libx> ())
+ pt = link_member (*l, a, link_info (t.base_scope (), ot));
+
+ if (a.operation () != update_id)
+ {
+ if (t.is_a<libue> ())
+ options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ options = lib::option_install_runtime;
+ }
+ }
+
+ if (pt->is_a<libs> () || pt->is_a<liba> ())
+ {
+ return make_pair (is == nullptr || pt->in (*is) ? pt : nullptr,
+ options);
+ }
+ else
+ return make_pair (pt, options);
+ }
+#endif
+
+ const target* pt (file_rule::instance.filter (is, a, t, p, me).first);
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
auto header_source = [this] (const auto& p)
{
- return (x_header (p) ||
- p.is_a (x_src) ||
- (x_mod != nullptr && p.is_a (*x_mod)));
+ return (x_header (p) ||
+ p.is_a (x_src) ||
+ p.is_a (c::static_type) ||
+ p.is_a (S::static_type) ||
+ (x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_obj != nullptr && (p.is_a (*x_obj) ||
+ p.is_a (m::static_type))));
};
- if (t.is_a<libue> ())
+ if (t.is_a<libue> () ||
+ (a.operation () != update_id &&
+ me.cur_options == lib::option_install_runtime))
{
if (header_source (p))
pt = nullptr;
@@ -354,7 +671,7 @@ namespace build2
}
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
}
bool g (false);
@@ -370,16 +687,17 @@ namespace build2
{
pt = t.is_a<libue> ()
? nullptr
- : file_rule::instance.filter (is, a, *pt, pm.prerequisite);
+ : file_rule::instance.filter (
+ is, a, *pt, pm.prerequisite, me).first;
break;
}
}
if (pt == nullptr)
- return pt;
+ return make_pair (pt, options);
}
- return pt;
+ return make_pair (pt, options);
}
bool libux_install_rule::
@@ -391,5 +709,81 @@ namespace build2
return link_.sub_match (x_link, update_id, a, t, me) &&
alias_rule::match (a, t);
}
+
+ recipe libux_install_rule::
+ apply (action a, target& t, match_extra& me) const
+ {
+ if (a.operation () != update_id)
+ {
+ if (!t.is_a<libue> ())
+ {
+ if (me.new_options == 0)
+ me.new_options = lib::option_install_runtime;
+
+ me.cur_options = me.new_options;
+ }
+ }
+
+ return alias_rule::apply_impl (
+ a, t, me, me.cur_options != match_extra::all_options /* reapply */);
+ }
+
+ void libux_install_rule::
+ apply_posthoc (action a, target& t, match_extra& me) const
+ {
+ if (a.operation () != update_id)
+ {
+ for (posthoc_prerequisite_target& p: *me.posthoc_prerequisite_targets)
+ {
+ if (p.target != nullptr && p.target->is_a<libs> ())
+ {
+ if (t.is_a<libue> ())
+ p.match_options = lib::option_install_runtime;
+ else
+ {
+ if (me.cur_options == lib::option_install_runtime)
+ p.match_options = lib::option_install_runtime;
+ }
+ }
+ }
+ }
+ }
+
+ void libux_install_rule::
+ reapply (action a, target& t, match_extra& me) const
+ {
+ tracer trace ("cc::linux_install_rule::reapply");
+
+ assert (a.operation () != update_id && !t.is_a<libue> ());
+
+ l6 ([&]{trace << "rematching " << t
+ << ", current options " << me.cur_options
+ << ", new options " << me.new_options;});
+
+ me.cur_options |= me.new_options;
+
+ if ((me.new_options & lib::option_install_buildtime) != 0)
+ {
+ for (const target* pt: t.prerequisite_targets[a])
+ {
+ if (pt != nullptr && (pt->is_a<liba> () || pt->is_a<libs> () ||
+ pt->is_a<libua> () || pt->is_a<libus> ()))
+ rematch_sync (a, *pt, match_extra::all_options);
+ }
+
+ if (me.posthoc_prerequisite_targets != nullptr)
+ {
+ for (posthoc_prerequisite_target& p: *me.posthoc_prerequisite_targets)
+ {
+ if (p.target != nullptr && p.target->is_a<libs> ())
+ {
+ p.match_options = match_extra::all_options;
+ }
+ }
+ }
+
+ alias_rule::reapply_impl (a, t, me);
+ }
+ }
}
}
diff --git a/libbuild2/cc/install-rule.hxx b/libbuild2/cc/install-rule.hxx
index 6998d63..771c33b 100644
--- a/libbuild2/cc/install-rule.hxx
+++ b/libbuild2/cc/install-rule.hxx
@@ -20,7 +20,7 @@ namespace build2
{
class link_rule;
- // Installation rule for exe{} and lib*{}. Here we do:
+ // Installation rule for exe{} and lib[as]{}. Here we do:
//
// 1. Signal to the link rule that this is update for install.
//
@@ -28,17 +28,23 @@ namespace build2
//
// 3. Extra un/installation (e.g., libs{} symlinks).
//
+ // 4. Handling runtime/buildtime match options for lib[as]{}.
+ //
class LIBBUILD2_CC_SYMEXPORT install_rule: public install::file_rule,
virtual common
{
public:
install_rule (data&&, const link_rule&);
- virtual const target*
+ virtual bool
+ filter (action, const target&, const target&) const override;
+
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&, prerequisite_iterator&) const override;
+ action, const target&, prerequisite_iterator&,
+ match_extra&) const override;
- // Note: rule::match() override.
+ // Note: rule::match() override (with hint and match_extra).
//
virtual bool
match (action, target&, const string&, match_extra&) const override;
@@ -46,7 +52,13 @@ namespace build2
using file_rule::match; // Make Clang happy.
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
+
+ virtual void
+ apply_posthoc (action, target&, match_extra&) const override;
+
+ virtual void
+ reapply (action, target&, match_extra&) const override;
virtual bool
install_extra (const file&, const install_dir&) const override;
@@ -58,22 +70,24 @@ namespace build2
const link_rule& link_;
};
- // Installation rule for libu*{}.
+ // Installation rule for libu[eas]{}.
//
// While libu*{} members themselves are not installable, we need to see
// through them in case they depend on stuff that we need to install
// (e.g., headers). Note that we use the alias_rule as a base.
//
- class LIBBUILD2_CC_SYMEXPORT libux_install_rule:
- public install::alias_rule,
- virtual common
+ class LIBBUILD2_CC_SYMEXPORT libux_install_rule: public install::alias_rule,
+ virtual common
{
public:
libux_install_rule (data&&, const link_rule&);
- virtual const target*
+ // Note: utility libraries currently have no ad hoc members.
+
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&, prerequisite_iterator&) const override;
+ action, const target&, prerequisite_iterator&,
+ match_extra&) const override;
// Note: rule::match() override.
//
@@ -82,6 +96,15 @@ namespace build2
using alias_rule::match; // Make Clang happy.
+ virtual recipe
+ apply (action, target&, match_extra&) const override;
+
+ virtual void
+ apply_posthoc (action, target&, match_extra&) const override;
+
+ virtual void
+ reapply (action, target&, match_extra&) const override;
+
private:
const link_rule& link_;
};
diff --git a/libbuild2/cc/lexer+comment.test.testscript b/libbuild2/cc/lexer+comment.test.testscript
index 358865c..381e479 100644
--- a/libbuild2/cc/lexer+comment.test.testscript
+++ b/libbuild2/cc/lexer+comment.test.testscript
@@ -16,6 +16,11 @@ four
/**
six /*
*/
+/* */
+/*
+
+*/
+/**/
EOI
: cxx-comment
diff --git a/libbuild2/cc/lexer+raw-string-literal.test.testscript b/libbuild2/cc/lexer+raw-string-literal.test.testscript
index bca489a..a6455eb 100644
--- a/libbuild2/cc/lexer+raw-string-literal.test.testscript
+++ b/libbuild2/cc/lexer+raw-string-literal.test.testscript
@@ -16,6 +16,7 @@ R"X(a
b)X"
R"X(a\
b)X"
+R""(a)""
EOI
<string literal>
<string literal>
@@ -24,6 +25,7 @@ EOI
<string literal>
<string literal>
<string literal>
+<string literal>
EOO
: prefix
diff --git a/libbuild2/cc/lexer.cxx b/libbuild2/cc/lexer.cxx
index beeb970..d20e0dc 100644
--- a/libbuild2/cc/lexer.cxx
+++ b/libbuild2/cc/lexer.cxx
@@ -214,7 +214,7 @@ namespace build2
// #line <integer> [<string literal>] ...
// # <integer> [<string literal>] ...
//
- // Also diagnose #include while at it.
+ // Also diagnose #include while at it if preprocessed.
//
if (!(c >= '0' && c <= '9'))
{
@@ -222,10 +222,13 @@ namespace build2
if (t.type == type::identifier)
{
- if (t.value == "include")
- fail (l) << "unexpected #include directive";
- else if (t.value != "line")
+ if (t.value != "line")
+ {
+ if (preprocessed_ && t.value == "include")
+ fail (l) << "unexpected #include directive";
+
continue;
+ }
}
else
continue;
@@ -734,8 +737,8 @@ namespace build2
// R"<delimiter>(<raw_characters>)<delimiter>"
//
// Where <delimiter> is a potentially-empty character sequence made of
- // any source character but parentheses, backslash and spaces. It can be
- // at most 16 characters long.
+ // any source character but parentheses, backslash, and spaces (in
+ // particular, it can be `"`). It can be at most 16 characters long.
//
// Note that the <raw_characters> are not processed in any way, not even
// for line continuations.
@@ -750,7 +753,7 @@ namespace build2
{
c = geth ();
- if (eos (c) || c == '\"' || c == ')' || c == '\\' || c == ' ')
+ if (eos (c) || c == ')' || c == '\\' || c == ' ')
fail (l) << "invalid raw string literal";
if (c == '(')
@@ -1108,21 +1111,18 @@ namespace build2
if (eos (c))
fail (p) << "unterminated comment";
- if (c == '*' && (c = peek ()) == '/')
+ if (c == '*')
{
- get (c);
- break;
+ if ((c = peek ()) == '/')
+ {
+ get (c);
+ break;
+ }
}
-
- if (c != '*' && c != '\\')
+ else
{
// Direct buffer scan.
//
- // Note that we should call get() prior to the direct buffer
- // scan (see butl::char_scanner for details).
- //
- get (c);
-
const char* b (gptr_);
const char* e (egptr_);
const char* p (b);
diff --git a/libbuild2/cc/lexer.hxx b/libbuild2/cc/lexer.hxx
index 81e0d97..17d706b 100644
--- a/libbuild2/cc/lexer.hxx
+++ b/libbuild2/cc/lexer.hxx
@@ -12,6 +12,8 @@
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/cc/export.hxx>
+
namespace build2
{
namespace cc
@@ -20,13 +22,15 @@ namespace build2
// sequence of tokens returned is similar to what a real C/C++ compiler
// would see from its preprocessor.
//
- // The input is a (partially-)preprocessed translation unit that may still
- // contain comments, line continuations, and preprocessor directives such
- // as #line, #pragma, but not #include (which is diagnosed). Currently,
- // all preprocessor directives except #line are ignored and no values are
- // saved from literals. The #line directive (and its shorthand notation)
- // is recognized to provide the logical token location. Note that the
- // modules-related pseudo-directives are not recognized or handled.
+ // The input is a potentially (partially-)preprocessed translation unit
+ // that may still contain comments, line continuations, and preprocessor
+ // directives such as #line and #pragma. If the input is said to be
+ // (partially-)preprocessed then #include directives are diagnosed.
+ // Currently, all preprocessor directives except #line are ignored and no
+ // values are saved from literals. The #line directive (and its shorthand
+ // notation) is recognized to provide the logical token location. Note
+ // that the modules-related pseudo-directives are not recognized or
+ // handled.
//
// While at it we also calculate the checksum of the input ignoring
// comments, whitespaces, etc. This is used to detect changes that do not
@@ -80,15 +84,19 @@ namespace build2
// Output the token value in a format suitable for diagnostics.
//
- ostream&
+ LIBBUILD2_CC_SYMEXPORT ostream&
operator<< (ostream&, const token&);
- class lexer: protected butl::char_scanner<>
+ class LIBBUILD2_CC_SYMEXPORT lexer: protected butl::char_scanner<>
{
public:
- lexer (ifdstream& is, const path_name& name)
+ // If preprocessed is true, then assume the input is at least partially
+ // preprocessed and therefore should not contain #include directives.
+ //
+ lexer (ifdstream& is, const path_name& name, bool preprocessed)
: char_scanner (is, false /* crlf */),
name_ (name),
+ preprocessed_ (preprocessed),
fail ("error", &name_),
log_file_ (name)
{
@@ -173,6 +181,8 @@ namespace build2
private:
const path_name& name_;
+ bool preprocessed_;
+
const fail_mark fail;
// Logical file and line as set by the #line directives. Note that the
diff --git a/libbuild2/cc/lexer.test.cxx b/libbuild2/cc/lexer.test.cxx
index 39e4279..82163fe 100644
--- a/libbuild2/cc/lexer.test.cxx
+++ b/libbuild2/cc/lexer.test.cxx
@@ -65,7 +65,7 @@ namespace build2
is.open (fddup (stdin_fd ()));
}
- lexer l (is, in);
+ lexer l (is, in, true /* preprocessed */);
// No use printing eos since we will either get it or loop forever.
//
diff --git a/libbuild2/cc/link-rule.cxx b/libbuild2/cc/link-rule.cxx
index 0c7c0c2..08a60b9 100644
--- a/libbuild2/cc/link-rule.cxx
+++ b/libbuild2/cc/link-rule.cxx
@@ -20,6 +20,8 @@
#include <libbuild2/bin/target.hxx>
#include <libbuild2/bin/utility.hxx>
+#include <libbuild2/install/utility.hxx>
+
#include <libbuild2/cc/target.hxx> // c, pc*
#include <libbuild2/cc/utility.hxx>
@@ -94,7 +96,7 @@ namespace build2
return false;
}
- if (const target* t = search_existing (n, bs, dir_path () /* out */))
+ if (const target* t = search_existing (n, bs))
{
// The same logic as in process_libraries().
//
@@ -290,12 +292,15 @@ namespace build2
if (p.is_a (x_src) ||
(x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && p.is_a (*x_obj)) ||
// Header-only X library (or library with C source and X header).
(library && x_header (p, false /* c_hdr */)))
{
r.seen_x = true;
}
- else if (p.is_a<c> () ||
+ else if (p.is_a<c> () || p.is_a<S> () ||
+ (x_obj != nullptr && p.is_a<m> ()) ||
// Header-only C library.
(library && p.is_a<h> ()))
{
@@ -431,9 +436,12 @@ namespace build2
r.seen_lib = true;
}
// Some other c-common header/source (say C++ in a C rule) other than
- // a C header (we assume everyone can hanle that).
+ // a C header (we assume everyone can hanle that) or some other
+ // #include'able target.
//
- else if (p.is_a<cc> () && !(x_header (p, true /* c_hdr */)))
+ else if (p.is_a<cc> () &&
+ !(x_header (p, true /* c_hdr */)) &&
+ !p.is_a (x_inc) && !p.is_a<c_inc> ())
{
r.seen_cc = true;
break;
@@ -839,7 +847,12 @@ namespace build2
// @@ Isn't libul{} member already picked or am I missing something?
// If not, then we may need the same in recursive-binless logic.
//
- assert (false); // @@ TMP
+#if 0
+ // @@ TMP hm, this hasn't actually been enabled. So may actually
+ // enable and see if it trips up (do git-blame for good measure).
+ //
+ assert (false); // @@ TMP (remove before 0.16.0 release)
+#endif
ux = &link_member (*ul, a, li)->as<libux> ();
}
else if ((ux = pt->is_a<libue> ()) ||
@@ -868,7 +881,6 @@ namespace build2
(type[p += 19] == '\0' || type[p] == ','));
}
-
recipe link_rule::
apply (action a, target& xt, match_extra&) const
{
@@ -900,7 +912,7 @@ namespace build2
// for binless libraries since there could be other output (e.g., .pc
// files).
//
- inject_fsdir (a, t);
+ const fsdir* dir (inject_fsdir (a, t));
// Process prerequisites, pass 1: search and match prerequisite
// libraries, search obj/bmi{} targets, and search targets we do rule
@@ -914,7 +926,7 @@ namespace build2
// We do libraries first in order to indicate that we will execute these
// targets before matching any of the obj/bmi{}. This makes it safe for
// compile::apply() to unmatch them and therefore not to hinder
- // parallelism.
+ // parallelism (or mess up for-install'ness).
//
// We also create obj/bmi{} chain targets because we need to add
// (similar to lib{}) all the bmi{} as prerequisites to all the other
@@ -994,16 +1006,18 @@ namespace build2
// By default update ad hoc headers/sources during match (see
// above).
//
+#if 1
if (!um)
- um = (p.is_a (x_src) ||
- p.is_a<c> () ||
- (x_mod != nullptr && p.is_a (*x_mod)) ||
+ um = (p.is_a (x_src) || p.is_a<c> () || p.is_a<S> () ||
+ (x_mod != nullptr && p.is_a (*x_mod)) ||
+ (x_obj != nullptr && (p.is_a (*x_obj) || p.is_a<m> ())) ||
x_header (p, true));
+#endif
if (*um)
{
pto.target = &p.search (t); // mark 0
- pto.include |= 2;
+ pto.include |= prerequisite_target::include_udm;
update_match = true;
}
}
@@ -1014,17 +1028,20 @@ namespace build2
const target*& pt (pto);
// Mark (2 bits):
+ //
// 0 - lib or update during match
// 1 - src
// 2 - mod
// 3 - obj/bmi and also lib not to be cleaned (and other stuff)
//
- uint8_t m (0);
+ uint8_t mk (0);
bool mod (x_mod != nullptr && p.is_a (*x_mod));
bool hdr (false);
- if (mod || p.is_a (x_src) || p.is_a<c> ())
+ if (mod ||
+ p.is_a (x_src) || p.is_a<c> () || p.is_a<S> () ||
+ (x_obj != nullptr && (p.is_a (*x_obj) || p.is_a<m> ())))
{
binless = binless && (mod ? user_binless : false);
@@ -1112,7 +1129,7 @@ namespace build2
}
pt = &r.first;
- m = mod ? 2 : 1;
+ mk = mod ? 2 : 1;
}
else if (p.is_a<libx> () ||
p.is_a<liba> () ||
@@ -1121,12 +1138,8 @@ namespace build2
{
// Handle imported libraries.
//
- // Note that since the search is rule-specific, we don't cache the
- // target in the prerequisite.
- //
if (p.proj ())
- pt = search_library (
- a, sys_lib_dirs, usr_lib_dirs, p.prerequisite);
+ pt = search_library (a, sys_lib_dirs, usr_lib_dirs, p.prerequisite);
// The rest is the same basic logic as in search_and_match().
//
@@ -1134,13 +1147,17 @@ namespace build2
pt = &p.search (t);
if (skip (*pt))
- m = 3; // Mark so it is not matched.
+ mk = 3; // Mark so it is not matched.
// If this is the lib{}/libul{} group, then pick the appropriate
- // member.
+ // member. Also note this in prerequisite_target::include (used
+ // by process_libraries()).
//
if (const libx* l = pt->is_a<libx> ())
+ {
pt = link_member (*l, a, li);
+ pto.include |= include_group;
+ }
}
else
{
@@ -1184,6 +1201,12 @@ namespace build2
}
pt = &p.search (t);
+
+ if (pt == dir)
+ {
+ pt = nullptr;
+ continue;
+ }
}
if (skip (*pt))
@@ -1202,7 +1225,7 @@ namespace build2
!pt->is_a<hbmix> () &&
cast_false<bool> ((*pt)[b_binless])));
- m = 3;
+ mk = 3;
}
if (user_binless && !binless)
@@ -1215,29 +1238,41 @@ namespace build2
{
// By default update headers during match (see above).
//
+#if 1
if (!um)
um = hdr;
+#endif
if (*um)
{
- if (m != 3)
+ if (mk != 3)
fail << "unable to update during match prerequisite " << p <<
info << "updating this type of prerequisites during match is "
<< "not supported by this rule";
- m = 0;
- pto.include |= 2;
+ mk = 0;
+ pto.include |= prerequisite_target::include_udm;
update_match = true;
}
}
- mark (pt, m);
+ mark (pt, mk);
}
- // Match lib{} and update during match (the only unmarked) in parallel
- // and wait for completion.
+ // Match lib{} first and then update during match (the only unmarked) in
+ // parallel and wait for completion. We need to match libraries first
+ // because matching generated headers/sources may lead to matching some
+ // of the libraries (for example, if generation requires some of the
+ // metadata; think poptions needed by Qt moc).
//
- match_members (a, t, pts, start);
+ {
+ auto mask (prerequisite_target::include_udm);
+
+ match_members (a, t, pts, start, {mask, 0});
+
+ if (update_match)
+ match_members (a, t, pts, start, {mask, mask});
+ }
// Check if we have any binful utility libraries.
//
@@ -1372,7 +1407,7 @@ namespace build2
// prerequisite_target::data.
//
if (update_match)
- update_during_match_prerequisites (trace, a, t, 2 /* mask */);
+ update_during_match_prerequisites (trace, a, t);
// Now that we know for sure whether we are binless, derive file name(s)
// and add ad hoc group members. Note that for binless we still need the
@@ -1517,6 +1552,11 @@ namespace build2
if (wasm.path ().empty ())
wasm.derive_path ();
+ // We don't want to print this member at level 1 diagnostics.
+ //
+ wasm.state[a].assign (ctx.var_backlink) = names {
+ name ("group"), name ("false")};
+
// If we have -pthread then we get additional .worker.js file
// which is used for thread startup. In a somewhat hackish way we
// represent it as an exe{} member to make sure it gets installed
@@ -1540,6 +1580,11 @@ namespace build2
if (worker.path ().empty ())
worker.derive_path ();
+
+ // We don't want to print this member at level 1 diagnostics.
+ //
+ worker.state[a].assign (ctx.var_backlink) = names {
+ name ("group"), name ("false")};
}
}
@@ -1548,22 +1593,31 @@ namespace build2
//
if (!binless && ot != otype::a && tsys == "win32-msvc")
{
- if (find_option ("/DEBUG", t, c_loptions, true) ||
- find_option ("/DEBUG", t, x_loptions, true))
+ const string* o;
+ if ((o = find_option_prefix ("/DEBUG", t, c_loptions, true)) != nullptr ||
+ (o = find_option_prefix ("/DEBUG", t, x_loptions, true)) != nullptr)
{
- const target_type& tt (*bs.find_target_type ("pdb"));
+ if (icasecmp (*o, "/DEBUG:NONE") != 0)
+ {
+ const target_type& tt (*bs.find_target_type ("pdb"));
- // We call the target foo.{exe,dll}.pdb rather than just foo.pdb
- // because we can have both foo.exe and foo.dll in the same
- // directory.
- //
- file& pdb (add_adhoc_member<file> (t, tt, e));
+ // We call the target foo.{exe,dll}.pdb rather than just
+ // foo.pdb because we can have both foo.exe and foo.dll in the
+ // same directory.
+ //
+ file& pdb (add_adhoc_member<file> (t, tt, e));
- // Note that the path is derived from the exe/dll path (so it
- // will include the version in case of a dll).
- //
- if (pdb.path ().empty ())
- pdb.derive_path (t.path ());
+ // Note that the path is derived from the exe/dll path (so it
+ // will include the version in case of a dll).
+ //
+ if (pdb.path ().empty ())
+ pdb.derive_path (t.path ());
+
+ // We don't want to print this member at level 1 diagnostics.
+ //
+ pdb.state[a].assign (ctx.var_backlink) = names {
+ name ("group"), name ("false")};
+ }
}
}
@@ -1585,6 +1639,13 @@ namespace build2
// we will use its bin.lib to decide what will be installed and in
// perform_update() we will confirm that it is actually installed.
//
+ // This, of course, works only if we actually have explicit lib{}.
+ // But the user could only have liba{} (common in testing frameworks
+ // that provide main()) or only libs{} (e.g., plugin that can also
+ // be linked). It's also theoretically possible to have both liba{}
+ // and libs{} but no lib{}, in which case it feels correct not to
+ // generate the common file at all.
+ //
if (ot != otype::e)
{
// Note that here we always use the lib name prefix, even on
@@ -1596,7 +1657,13 @@ namespace build2
// Note also that the order in which we are adding these members
// is important (see add_addhoc_member() for details).
//
- if (ot == otype::a || !link_members (rs).a)
+ if (operator>= (t.group->decl, target_decl::implied) // @@ VC14
+ ? ot == (link_members (rs).a ? otype::a : otype::s)
+ : search_existing (ctx,
+ ot == otype::a
+ ? libs::static_type
+ : liba::static_type,
+ t.dir, t.out, t.name) == nullptr)
{
auto& pc (add_adhoc_member<pc> (t));
@@ -1629,14 +1696,13 @@ namespace build2
// exists (windows_rpath_assembly() does take care to clean it up
// if not used).
//
-#ifdef _WIN32
- target& dir =
-#endif
+ target& dir (
add_adhoc_member (t,
fsdir::static_type,
path_cast<dir_path> (t.path () + ".dlls"),
t.out,
- string () /* name */);
+ string () /* name */,
+ nullopt /* ext */));
// By default our backlinking logic will try to symlink the
// directory and it can even be done on Windows using junctions.
@@ -1650,9 +1716,15 @@ namespace build2
// Wine. So we only resort to copy-link'ing if we are running on
// Windows.
//
+ // We also don't want to print this member at level 1 diagnostics.
+ //
+ dir.state[a].assign (ctx.var_backlink) = names {
#ifdef _WIN32
- dir.state[a].assign (ctx.var_backlink) = "copy";
+ name ("copy"), name ("false")
+#else
+ name ("group"), name ("false")
#endif
+ };
}
}
}
@@ -1678,20 +1750,20 @@ namespace build2
// 1 - completion
// 2 - verification
//
- uint8_t m (unmark (pt));
+ uint8_t mk (unmark (pt));
- if (m == 3) // obj/bmi or lib not to be cleaned
+ if (mk == 3) // obj/bmi or lib not to be cleaned
{
- m = 1; // Just completion.
+ mk = 1; // Just completion.
// Note that if this is a library not to be cleaned, we keep it
// marked for completion (see the next phase).
}
- else if (m == 1 || m == 2) // Source/module chain.
+ else if (mk == 1 || mk == 2) // Source/module chain.
{
- bool mod (m == 2);
+ bool mod (mk == 2); // p is_a x_mod
- m = 1;
+ mk = 1;
const target& rt (*pt);
bool group (!p.prerequisite.belongs (t)); // Group's prerequisite.
@@ -1842,7 +1914,10 @@ namespace build2
// Most of the time we will have just a single source so fast-
// path that case.
//
- if (p1.is_a (mod ? *x_mod : x_src) || p1.is_a<c> ())
+ if (mod
+ ? p1.is_a (*x_mod)
+ : (p1.is_a (x_src) || p1.is_a<c> () || p1.is_a<S> () ||
+ (x_obj != nullptr && (p1.is_a (*x_obj) || p1.is_a<m> ()))))
{
src = true;
continue; // Check the rest of the prerequisites.
@@ -1855,8 +1930,12 @@ namespace build2
p1.is_a<libx> () ||
p1.is_a<liba> () || p1.is_a<libs> () || p1.is_a<libux> () ||
p1.is_a<bmi> () || p1.is_a<bmix> () ||
- (p.is_a (mod ? *x_mod : x_src) && x_header (p1)) ||
- (p.is_a<c> () && p1.is_a<h> ()))
+ ((mod ||
+ p.is_a (x_src) ||
+ (x_asp != nullptr && p.is_a (*x_asp)) ||
+ (x_obj != nullptr && p.is_a (*x_obj))) && x_header (p1)) ||
+ ((p.is_a<c> () || p.is_a<S> () ||
+ (x_obj != nullptr && p.is_a<m> ())) && p1.is_a<h> ()))
continue;
fail << "synthesized dependency for prerequisite " << p
@@ -1869,11 +1948,11 @@ namespace build2
if (!src)
fail << "synthesized dependency for prerequisite " << p
<< " would be incompatible with existing target " << *pt <<
- info << "no existing c/" << x_name << " source prerequisite" <<
+ info << "no existing C/" << x_lang << " source prerequisite" <<
info << "specify corresponding " << rtt.name << "{} "
<< "dependency explicitly";
- m = 2; // Needs verification.
+ mk = 2; // Needs verification.
}
}
else // lib*{} or update during match
@@ -1885,6 +1964,8 @@ namespace build2
bool u;
if ((u = pt->is_a<libux> ()) || pt->is_a<liba> ())
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (ctx.var_pool["bin.whole"]); // @@ Cache.
// See the bin module for the lookup semantics discussion. Note
@@ -1913,7 +1994,7 @@ namespace build2
}
}
- mark (pt, m);
+ mark (pt, mk);
}
// Process prerequisites, pass 3: match everything and verify chains.
@@ -1929,7 +2010,7 @@ namespace build2
bool adhoc (pts[i].adhoc ());
const target*& pt (pts[i++]);
- uint8_t m;
+ uint8_t mk;
if (pt == nullptr)
{
@@ -1939,13 +2020,13 @@ namespace build2
continue;
pt = &p.search (t);
- m = 1; // Mark for completion.
+ mk = 1; // Mark for completion.
}
else
{
- m = unmark (pt);
+ mk = unmark (pt);
- if (m == 0)
+ if (mk == 0)
continue; // Already matched.
// If this is a library not to be cleaned, we can finally blank it
@@ -1959,7 +2040,7 @@ namespace build2
}
match_async (a, *pt, ctx.count_busy (), t[a].task_count);
- mark (pt, m);
+ mark (pt, mk);
}
wg.wait ();
@@ -1974,15 +2055,15 @@ namespace build2
// Skipped or not marked for completion.
//
- uint8_t m;
- if (pt == nullptr || (m = unmark (pt)) == 0)
+ uint8_t mk;
+ if (pt == nullptr || (mk = unmark (pt)) == 0)
continue;
match_complete (a, *pt);
// Nothing else to do if not marked for verification.
//
- if (m == 1)
+ if (mk == 1)
continue;
// Finish verifying the existing dependency (which is now matched)
@@ -1994,7 +2075,10 @@ namespace build2
for (prerequisite_member p1: group_prerequisite_members (a, *pt))
{
- if (p1.is_a (mod ? *x_mod : x_src) || p1.is_a<c> ())
+ if (mod
+ ? p1.is_a (*x_mod)
+ : (p1.is_a (x_src) || p1.is_a<c> () || p1.is_a<S> () ||
+ (x_obj != nullptr && (p1.is_a (*x_obj) || p1.is_a<m> ()))))
{
// Searching our own prerequisite is ok, p1 must already be
// resolved.
@@ -2177,17 +2261,47 @@ namespace build2
*type != "cc" &&
type->compare (0, 3, "cc,") != 0)
{
- auto& md (l->data<link_rule::match_data> (d.a));
- assert (md.for_install); // Must have been executed.
+ auto* md (l->try_data<link_rule::match_data> (d.a));
+
+ if (md == nullptr)
+ fail << "library " << *l << " is not built with cc module-based "
+ << "link rule" <<
+ info << "mark it as generic with cc.type=cc target-specific "
+ << "variable";
+
+ assert (md->for_install); // Must have been executed.
// The user will get the target name from the context info.
//
- if (*md.for_install != *d.for_install)
+ if (*md->for_install != *d.for_install)
fail << "incompatible " << *l << " build" <<
- info << "library is built " << (*md.for_install ? "" : "not ")
+ info << "library is built " << (*md->for_install ? "" : "not ")
<< "for install";
}
+ auto newer = [&d, l] ()
+ {
+ // @@ Work around the unexecuted member for installed libraries
+ // issue (see search_library() for details).
+ //
+ // Note that the member may not even be matched, let alone
+ // executed, so we have to go through the group to detect this
+ // case (if the group is not matched, then the member got to be).
+ //
+#if 0
+ return l->newer (d.mt);
+#else
+ const target* g (l->group);
+ target_state s (g != nullptr &&
+ g->matched (d.a, memory_order_acquire) &&
+ g->state[d.a].rule == &file_rule::rule_match
+ ? target_state::unchanged
+ : l->executed_state (d.a));
+
+ return l->newer (d.mt, s);
+#endif
+ };
+
if (d.li.type == otype::a)
{
// Linking a utility library to a static library.
@@ -2215,7 +2329,7 @@ namespace build2
// Check if this library renders us out of date.
//
if (d.update != nullptr)
- *d.update = *d.update || l->newer (d.mt);
+ *d.update = *d.update || newer ();
for (const target* pt: l->prerequisite_targets[d.a])
{
@@ -2254,7 +2368,7 @@ namespace build2
// Check if this library renders us out of date.
//
if (d.update != nullptr)
- *d.update = *d.update || l->newer (d.mt);
+ *d.update = *d.update || newer ();
// On Windows a shared library is a DLL with the import library as
// an ad hoc group member. MinGW though can link directly to DLLs
@@ -2335,6 +2449,8 @@ namespace build2
//
if (const target* g = exp && l.is_a<libs> () ? l.group : &l)
{
+ // Note: go straight for the public variable pool.
+ //
const variable& var (
com
? (exp ? c_export_loptions : c_loptions)
@@ -2353,7 +2469,9 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
l, la,
- lf, imp, lib, opt, self,
+ lf, imp, lib, opt,
+ self,
+ false /* proc_opt_group */,
lib_cache);
}
@@ -2367,9 +2485,14 @@ namespace build2
// Use -rpath-link only on targets that support it (Linux, *BSD). Note
// that we don't really need it for top-level libraries.
//
+ // Note that more recent versions of FreeBSD are using LLVM lld without
+ // any mentioning of -rpath-link in the man pages.
+ //
+ auto have_link = [this] () {return tclass == "linux" || tclass == "bsd";};
+
if (link)
{
- if (tclass != "linux" && tclass != "bsd")
+ if (!have_link ())
return;
}
@@ -2399,8 +2522,56 @@ namespace build2
{
rpathed_libraries& ls;
strings& args;
- bool link;
- } d {ls, args, link};
+ bool rpath;
+ bool rpath_link;
+ } d {ls, args, false, false};
+
+ if (link)
+ d.rpath_link = true;
+ else
+ {
+ // While one would naturally expect -rpath to be a superset of
+ // -rpath-link, according to GNU ld:
+ //
+ // "The -rpath option is also used when locating shared objects which
+ // are needed by shared objects explicitly included in the link; see
+ // the description of the -rpath-link option. Searching -rpath in
+ // this way is only supported by native linkers and cross linkers
+ // which have been configured with the --with-sysroot option."
+ //
+ // So we check if this is cross-compilation and request both options
+ // if that's the case (we have no easy way of detecting whether the
+ // linker has been configured with the --with-sysroot option, whatever
+ // that means, so we will just assume the worst case).
+ //
+ d.rpath = true;
+
+ if (have_link ())
+ {
+ // Detecting cross-compilation is not as easy as it seems. Comparing
+ // complete target triplets proved too strict. For example, we may be
+ // running on x86_64-apple-darwin17.7.0 while the compiler is
+ // targeting x86_64-apple-darwin17.3.0. Also, there is the whole i?86
+ // family of CPUs which, at least for linking, should probably be
+ // considered the same.
+ //
+ const target_triplet& h (*bs.ctx.build_host);
+ const target_triplet& t (ctgt);
+
+ auto x86 = [] (const string& c)
+ {
+ return (c.size () == 4 &&
+ c[0] == 'i' &&
+ (c[1] >= '3' && c[1] <= '6') &&
+ c[2] == '8' &&
+ c[3] == '6');
+ };
+
+ if (t.system != h.system ||
+ (t.cpu != h.cpu && !(x86 (t.cpu) && x86 (h.cpu))))
+ d.rpath_link = true;
+ }
+ }
auto lib = [&d, this] (
const target* const* lc,
@@ -2422,13 +2593,22 @@ namespace build2
auto append = [&d] (const string& f)
{
- string o (d.link ? "-Wl,-rpath-link," : "-Wl,-rpath,");
-
size_t p (path::traits_type::rfind_separator (f));
assert (p != string::npos);
- o.append (f, 0, (p != 0 ? p : 1)); // Don't include trailing slash.
- d.args.push_back (move (o));
+ if (d.rpath)
+ {
+ string o ("-Wl,-rpath,");
+ o.append (f, 0, (p != 0 ? p : 1)); // Don't include trailing slash.
+ d.args.push_back (move (o));
+ }
+
+ if (d.rpath_link)
+ {
+ string o ("-Wl,-rpath-link,");
+ o.append (f, 0, (p != 0 ? p : 1));
+ d.args.push_back (move (o));
+ }
};
if (l != nullptr)
@@ -2505,7 +2685,10 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0 /* lflags */,
- imp, lib, nullptr, false /* self */, lib_cache);
+ imp, lib, nullptr,
+ false /* self */,
+ false /* proc_opt_group */,
+ lib_cache);
}
void link_rule::
@@ -2567,7 +2750,7 @@ namespace build2
// Filter link.exe noise (msvc.cxx).
//
void
- msvc_filter_link (ifdstream&, const file&, otype);
+ msvc_filter_link (diag_buffer&, const file&, otype);
// Translate target CPU to the link.exe/lib.exe /MACHINE option.
//
@@ -2655,7 +2838,7 @@ namespace build2
// those that don't match. Note that we have to do it after updating
// prerequisites to keep the dependency counts straight.
//
- if (const variable* var_fi = ctx.var_pool.find ("for_install"))
+ if (const variable* var_fi = rs.var_pool ().find ("for_install"))
{
// Parallel prerequisites/prerequisite_targets loop.
//
@@ -2681,7 +2864,7 @@ namespace build2
// (Re)generate pkg-config's .pc file. While the target itself might be
// up-to-date from a previous run, there is no guarantee that .pc exists
// or also up-to-date. So to keep things simple we just regenerate it
- // unconditionally (and avoid doing so on uninstall; see pkconfig_save()
+ // unconditionally (and avoid doing so on uninstall; see pkgconfig_save()
// for details).
//
// Also, if you are wondering why don't we just always produce this .pc,
@@ -2691,7 +2874,7 @@ namespace build2
// There is a further complication: we may have no intention of
// installing the library but still need to update it for install (see
// install_scope() for background). In which case we may still not have
- // the installation directories. We handle this in pkconfig_save() by
+ // the installation directories. We handle this in pkgconfig_save() by
// skipping the generation of .pc files (and letting the install rule
// complain if we do end up trying to install them).
//
@@ -2708,8 +2891,12 @@ namespace build2
if (!m->is_a (la ? pca::static_type : pcs::static_type))
{
- if (t.group->matched (a))
+ if (operator>= (t.group->decl, target_decl::implied) // @@ VC14
+ ? t.group->matched (a)
+ : true)
+ {
pkgconfig_save (a, t, la, true /* common */, binless);
+ }
else
// Mark as non-existent not to confuse the install rule.
//
@@ -2821,14 +3008,19 @@ namespace build2
try
{
+ // We assume that what we write to stdin is small enough to
+ // fit into the pipe's buffer without blocking.
+ //
process pr (rc,
args,
- -1 /* stdin */,
- 1 /* stdout */,
- 2 /* stderr */,
- nullptr /* cwd */,
+ -1 /* stdin */,
+ 1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */,
+ nullptr /* cwd */,
env_ptrs.empty () ? nullptr : env_ptrs.data ());
+ diag_buffer dbuf (ctx, args[0], pr);
+
try
{
ofdstream os (move (pr.out_fd));
@@ -2852,7 +3044,8 @@ namespace build2
// was caused by that and let run_finish() deal with it.
}
- run_finish (args, pr);
+ dbuf.read ();
+ run_finish (dbuf, args, pr, 2 /* verbosity */);
}
catch (const process_error& e)
{
@@ -2907,6 +3100,8 @@ namespace build2
{
// For VC we use link.exe directly.
//
+ // Note: go straight for the public variable pool.
+ //
const string& cs (
cast<string> (
rs[tsys == "win32-msvc"
@@ -3116,10 +3311,72 @@ namespace build2
rpath_libraries (sargs, bs, a, t, li, for_install /* link */);
lookup l;
-
if ((l = t["bin.rpath"]) && !l->empty ())
+ {
+ // See if we need to make the specified paths relative using the
+ // $ORIGIN (Linux, BSD) or @loader_path (Mac OS) mechanisms.
+ //
+ optional<dir_path> origin;
+ if (for_install && cast_false<bool> (rs["install.relocatable"]))
+ {
+ // Note that both $ORIGIN and @loader_path will be expanded to
+ // the path of the binary that we are building (executable or
+ // shared library) as opposed to top-level executable.
+ //
+ path p (install::resolve_file (t));
+
+ // If the file is not installable then the install.relocatable
+ // semantics does not apply, naturally.
+ //
+ if (!p.empty ())
+ origin = p.directory ();
+ }
+
+ bool origin_used (false);
for (const dir_path& p: cast<dir_paths> (l))
- sargs.push_back ("-Wl,-rpath," + p.string ());
+ {
+ string o ("-Wl,-rpath,");
+
+ // Note that we only rewrite absolute paths so if the user
+ // specified $ORIGIN or @loader_path manually, we will pass it
+ // through as is.
+ //
+ if (origin && p.absolute ())
+ {
+ dir_path l;
+ try
+ {
+ l = p.relative (*origin);
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make rpath " << p << " relative to "
+ << *origin <<
+ info << "required for relocatable installation";
+ }
+
+ o += (tclass == "macos" ? "@loader_path" : "$ORIGIN");
+
+ if (!l.empty ())
+ {
+ o += path_traits::directory_separator;
+ o += l.string ();
+ }
+
+ origin_used = true;
+ }
+ else
+ o += p.string ();
+
+ sargs.push_back (move (o));
+ }
+
+ // According to the Internet, `-Wl,-z,origin` is not needed except
+ // potentially for older BSDs.
+ //
+ if (origin_used && tclass == "bsd")
+ sargs.push_back ("-Wl,-z,origin");
+ }
if ((l = t["bin.rpath_link"]) && !l->empty ())
{
@@ -3153,25 +3410,24 @@ namespace build2
// Extra system library dirs (last).
//
- assert (sys_lib_dirs_extra <= sys_lib_dirs.size ());
+ assert (sys_lib_dirs_mode + sys_lib_dirs_extra <= sys_lib_dirs.size ());
+
+ // Note that the mode options are added as part of cmode.
+ //
+ auto b (sys_lib_dirs.begin () + sys_lib_dirs_mode);
+ auto x (b + sys_lib_dirs_extra);
if (tsys == "win32-msvc")
{
// If we have no LIB environment variable set, then we add all of
// them. But we want extras to come first.
//
- // Note that the mode options are added as part of cmode.
- //
- auto b (sys_lib_dirs.begin () + sys_lib_dirs_mode);
- auto m (sys_lib_dirs.begin () + sys_lib_dirs_extra);
- auto e (sys_lib_dirs.end ());
-
- for (auto i (m); i != e; ++i)
+ for (auto i (b); i != x; ++i)
sargs1.push_back ("/LIBPATH:" + i->string ());
if (!getenv ("LIB"))
{
- for (auto i (b); i != m; ++i)
+ for (auto i (x), e (sys_lib_dirs.end ()); i != e; ++i)
sargs1.push_back ("/LIBPATH:" + i->string ());
}
@@ -3182,7 +3438,7 @@ namespace build2
append_option_values (
args,
"-L",
- sys_lib_dirs.begin () + sys_lib_dirs_extra, sys_lib_dirs.end (),
+ b, x,
[] (const dir_path& d) {return d.string ().c_str ();});
}
}
@@ -3278,7 +3534,7 @@ namespace build2
&cs, &update, mt,
bs, a, *f, la, p.data, li,
for_install, true, true, &lc);
- f = nullptr; // Timestamp checked by hash_libraries().
+ f = nullptr; // Timestamp checked by append_libraries().
}
else
{
@@ -3367,6 +3623,10 @@ namespace build2
//
path relt (relative (tp));
+ path reli; // Import library.
+ if (lt.shared_library () && (tsys == "win32-msvc" || tsys == "mingw32"))
+ reli = relative (find_adhoc_member<libi> (t)->path ());
+
const process_path* ld (nullptr);
if (lt.static_library ())
{
@@ -3498,7 +3758,7 @@ namespace build2
// derived from the import library by changing the extension.
// Lucky for us -- there is no option to name it.
//
- out2 += relative (find_adhoc_member<libi> (t)->path ()).string ();
+ out2 += reli.string ();
}
else
{
@@ -3511,14 +3771,17 @@ namespace build2
// If we have /DEBUG then name the .pdb file. It is an ad hoc group
// member.
//
- if (find_option ("/DEBUG", args, true))
+ if (const char* o = find_option_prefix ("/DEBUG", args, true))
{
- const file& pdb (
- *find_adhoc_member<file> (t, *bs.find_target_type ("pdb")));
+ if (icasecmp (o, "/DEBUG:NONE") != 0)
+ {
+ const file& pdb (
+ *find_adhoc_member<file> (t, *bs.find_target_type ("pdb")));
- out1 = "/PDB:";
- out1 += relative (pdb.path ()).string ();
- args.push_back (out1.c_str ());
+ out1 = "/PDB:";
+ out1 += relative (pdb.path ()).string ();
+ args.push_back (out1.c_str ());
+ }
}
out = "/OUT:" + relt.string ();
@@ -3532,6 +3795,8 @@ namespace build2
{
ld = &cpath;
+ append_diag_color_options (args);
+
// Add the option that triggers building a shared library and
// take care of any extras (e.g., import library).
//
@@ -3547,8 +3812,7 @@ namespace build2
// On Windows libs{} is the DLL and an ad hoc group member
// is the import library.
//
- const file& imp (*find_adhoc_member<libi> (t));
- out = "-Wl,--out-implib=" + relative (imp.path ()).string ();
+ out = "-Wl,--out-implib=" + reli.string ();
args.push_back (out.c_str ());
}
}
@@ -3705,17 +3969,43 @@ namespace build2
try_rmfile (relt, true);
}
+ // We have no choice but to serialize early if we want the command line
+ // printed shortly before actually executing the linker. Failed that, it
+ // may look like we are still executing in parallel.
+ //
+ scheduler::alloc_guard jobs_ag;
+ if (!ctx.dry_run && cast_false<bool> (t[c_serialize]))
+ jobs_ag = scheduler::alloc_guard (*ctx.sched, phase_unlock (nullptr));
+
if (verb == 1)
- text << (lt.static_library () ? "ar " : "ld ") << t;
+ print_diag (lt.static_library () ? "ar" : "ld", t);
else if (verb == 2)
print_process (args);
+ // Do any necessary fixups to the command line to make it runnable.
+ //
+ // Notice the split in the diagnostics: at verbosity level 1 we print
+ // the "logical" command line while at level 2 and above -- what we are
+ // actually executing.
+ //
+ // We also need to save the original for the diag_buffer::close() call
+ // below if at verbosity level 1.
+ //
+ cstrings oargs;
+
// Adjust linker parallelism.
//
+ // Note that we are not going to bother with oargs for this.
+ //
+ // Note also that we now have scheduler::serialize() which allows us to
+ // block until full parallelism is available (this mode can currently
+ // be forced with cc.serialize=true; maybe we should invent something
+ // like config.cc.link_serialize or some such which can be used when
+ // LTO is enabled).
+ //
string jobs_arg;
- scheduler::alloc_guard jobs_extra;
- if (!lt.static_library ())
+ if (!ctx.dry_run && !lt.static_library ())
{
switch (ctype)
{
@@ -3731,8 +4021,10 @@ namespace build2
auto i (find_option_prefix ("-flto", args.rbegin (), args.rend ()));
if (i != args.rend () && strcmp (*i, "-flto=auto") == 0)
{
- jobs_extra = scheduler::alloc_guard (ctx.sched, 0);
- jobs_arg = "-flto=" + to_string (1 + jobs_extra.n);
+ if (jobs_ag.n == 0) // Might already have (see above).
+ jobs_ag = scheduler::alloc_guard (*ctx.sched, 0);
+
+ jobs_arg = "-flto=" + to_string (1 + jobs_ag.n);
*i = jobs_arg.c_str ();
}
break;
@@ -3750,8 +4042,10 @@ namespace build2
strcmp (*i, "-flto=thin") == 0 &&
!find_option_prefix ("-flto-jobs=", args))
{
- jobs_extra = scheduler::alloc_guard (ctx.sched, 0);
- jobs_arg = "-flto-jobs=" + to_string (1 + jobs_extra.n);
+ if (jobs_ag.n == 0) // Might already have (see above).
+ jobs_ag = scheduler::alloc_guard (*ctx.sched, 0);
+
+ jobs_arg = "-flto-jobs=" + to_string (1 + jobs_ag.n);
args.insert (i.base (), jobs_arg.c_str ()); // After -flto=thin.
}
break;
@@ -3762,12 +4056,6 @@ namespace build2
}
}
- // Do any necessary fixups to the command line to make it runnable.
- //
- // Notice the split in the diagnostics: at verbosity level 1 we print
- // the "logical" command line while at level 2 and above -- what we are
- // actually executing.
- //
// On Windows we need to deal with the command line length limit. The
// best workaround seems to be passing (part of) the command line in an
// "options file" ("response file" in Microsoft's terminology). Both
@@ -3853,19 +4141,20 @@ namespace build2
fail << "unable to write to " << f << ": " << e;
}
+ if (verb == 1)
+ oargs = args;
+
// Replace input arguments with @file.
//
targ = '@' + f.string ();
args.resize (args_input);
args.push_back (targ.c_str());
args.push_back (nullptr);
-
- //@@ TODO: leave .t file if linker failed and verb > 2?
}
}
#endif
- if (verb > 2)
+ if (verb >= 3)
print_process (args);
// Remove the target file if any of the subsequent (after the linker)
@@ -3883,52 +4172,51 @@ namespace build2
{
// VC tools (both lib.exe and link.exe) send diagnostics to stdout.
// Also, link.exe likes to print various gratuitous messages. So for
- // link.exe we redirect stdout to a pipe, filter that noise out, and
- // send the rest to stderr.
+ // link.exe we filter that noise out.
//
// For lib.exe (and any other insane linker that may try to pull off
// something like this) we are going to redirect stdout to stderr.
// For sane compilers this should be harmless.
//
// Note that we don't need this for LLD's link.exe replacement which
- // is quiet.
+ // is thankfully quiet.
//
bool filter (tsys == "win32-msvc" &&
!lt.static_library () &&
cast<string> (rs["bin.ld.id"]) != "msvc-lld");
process pr (*ld,
- args.data (),
- 0 /* stdin */,
- (filter ? -1 : 2) /* stdout */,
- 2 /* stderr */,
- nullptr /* cwd */,
+ args,
+ 0 /* stdin */,
+ 2 /* stdout */,
+ diag_buffer::pipe (ctx, filter /* force */) /* stderr */,
+ nullptr /* cwd */,
env_ptrs.empty () ? nullptr : env_ptrs.data ());
+ diag_buffer dbuf (ctx, args[0], pr);
+
if (filter)
+ msvc_filter_link (dbuf, t, ot);
+
+ dbuf.read ();
+
{
- try
- {
- ifdstream is (
- move (pr.in_ofd), fdstream_mode::text, ifdstream::badbit);
+ bool e (pr.wait ());
- msvc_filter_link (is, t, ot);
+#ifdef _WIN32
+ // Keep the options file if we have shown it.
+ //
+ if (!e && verb >= 3)
+ trm.cancel ();
+#endif
- // If anything remains in the stream, send it all to stderr.
- // Note that the eof check is important: if the stream is at
- // eof, this and all subsequent writes to the diagnostics stream
- // will fail (and you won't see a thing).
- //
- if (is.peek () != ifdstream::traits_type::eof ())
- diag_stream_lock () << is.rdbuf ();
+ dbuf.close (oargs.empty () ? args : oargs,
+ *pr.exit,
+ 1 /* verbosity */);
- is.close ();
- }
- catch (const io_error&) {} // Assume exits with error.
+ if (!e)
+ throw failed ();
}
-
- run_finish (args, pr);
- jobs_extra.deallocate ();
}
catch (const process_error& e)
{
@@ -3950,12 +4238,24 @@ namespace build2
throw failed ();
}
- // Clean up executable's import library (see above for details).
+ // Clean up executable's import library (see above for details). And
+ // make sure we have an import library for a shared library.
//
- if (lt.executable () && tsys == "win32-msvc")
+ if (tsys == "win32-msvc")
{
- try_rmfile (relt + ".lib", true /* ignore_errors */);
- try_rmfile (relt + ".exp", true /* ignore_errors */);
+ if (lt.executable ())
+ {
+ try_rmfile (relt + ".lib", true /* ignore_errors */);
+ try_rmfile (relt + ".exp", true /* ignore_errors */);
+ }
+ else if (lt.shared_library ())
+ {
+ if (!file_exists (reli,
+ false /* follow_symlinks */,
+ true /* ignore_error */))
+ fail << "linker did not produce import library " << reli <<
+ info << "perhaps this library does not export any symbols?";
+ }
}
// Set executable bit on the .js file so that it can be run with a
@@ -3987,12 +4287,17 @@ namespace build2
print_process (args);
if (!ctx.dry_run)
- run (rl,
+ {
+ run (ctx,
+ rl,
args,
- dir_path () /* cwd */,
+ 1 /* finish_verbosity */,
env_ptrs.empty () ? nullptr : env_ptrs.data ());
+ }
}
+ jobs_ag.deallocate ();
+
// For Windows generate (or clean up) rpath-emulating assembly.
//
if (tclass == "windows")
@@ -4172,5 +4477,25 @@ namespace build2
return perform_clean_extra (a, t, extras, adhoc_extras);
}
+
+ const target* link_rule::
+ import (const prerequisite_key& pk,
+ const optional<string>&,
+ const location&) const
+ {
+ tracer trace (x, "link_rule::import");
+
+ // @@ TODO: do we want to make metadata loading optional?
+ //
+ optional<dir_paths> usr_lib_dirs;
+ const target* r (search_library (nullopt /* action */,
+ sys_lib_dirs, usr_lib_dirs,
+ pk));
+
+ if (r == nullptr)
+ l4 ([&]{trace << "unable to find installed library " << pk;});
+
+ return r;
+ }
}
}
diff --git a/libbuild2/cc/link-rule.hxx b/libbuild2/cc/link-rule.hxx
index 14a70d2..9b491c2 100644
--- a/libbuild2/cc/link-rule.hxx
+++ b/libbuild2/cc/link-rule.hxx
@@ -59,6 +59,11 @@ namespace build2
target_state
perform_clean (action, const target&, match_data&) const;
+ virtual const target*
+ import (const prerequisite_key&,
+ const optional<string>&,
+ const location&) const override;
+
public:
// Library handling.
//
diff --git a/libbuild2/cc/module.cxx b/libbuild2/cc/module.cxx
index c930d49..cf6c6e4 100644
--- a/libbuild2/cc/module.cxx
+++ b/libbuild2/cc/module.cxx
@@ -11,10 +11,7 @@
#include <libbuild2/bin/target.hxx>
-#include <libbuild2/cc/target.hxx> // pc*
-
#include <libbuild2/config/utility.hxx>
-#include <libbuild2/install/utility.hxx>
#include <libbuild2/cc/guess.hxx>
@@ -30,6 +27,8 @@ namespace build2
{
tracer trace (x, "guess_init");
+ context& ctx (rs.ctx);
+
bool cc_loaded (cast_false<bool> (rs["cc.core.guess.loaded"]));
// Adjust module priority (compiler). Also order cc module before us
@@ -41,7 +40,10 @@ namespace build2
config::save_module (rs, x, 250);
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// Must already exist.
//
@@ -55,7 +57,7 @@ namespace build2
// config.x
//
- strings mode;
+ strings omode; // Original mode.
{
// Normally we will have a persistent configuration and computing the
// default value every time will be a waste. So try without a default
@@ -139,21 +141,34 @@ namespace build2
fail << "invalid path '" << s << "' in " << config_x;
}
- mode.assign (++v.begin (), v.end ());
+ omode.assign (++v.begin (), v.end ());
// Save original path/mode in *.config.path/mode.
//
rs.assign (x_c_path) = xc;
- rs.assign (x_c_mode) = mode;
+ rs.assign (x_c_mode) = omode;
+
+ // Merge the configured mode options into user-specified (which must
+ // be done before loading the *.guess module).
+ //
+ // In particular, this ability to specify the compiler mode in a
+ // buildfile is useful in embedded development where the project may
+ // need to hardcode things like -target, -nostdinc, etc.
+ //
+ const strings& mode (cast<strings> (rs.assign (x_mode) += omode));
// Figure out which compiler we are dealing with, its target, etc.
//
// Note that we could allow guess() to modify mode to support
// imaginary options (such as /MACHINE for cl.exe). Though it's not
// clear what cc.mode would contain (original or modified). Note that
- // we are now folding *.std options into mode options.
+ // we are now adding *.std options into mode options.
+ //
+ // @@ But can't the language standard options alter things like search
+ // directories?
//
x_info = &build2::cc::guess (
+ ctx,
x, x_lang,
rs.root_extra->environment_checksum,
move (xc),
@@ -180,7 +195,8 @@ namespace build2
if (config_sub)
{
- ct = run<string> (3,
+ ct = run<string> (ctx,
+ 3,
*config_sub,
xi.target.c_str (),
[] (string& l, bool) {return move (l);});
@@ -218,9 +234,10 @@ namespace build2
// Assign values to variables that describe the compiler.
//
+ // Note: x_mode is dealt with above.
+ //
rs.assign (x_path) = process_path_ex (
xi.path, x_name, xi.checksum, env_checksum);
- const strings& xm (cast<strings> (rs.assign (x_mode) = move (mode)));
rs.assign (x_id) = xi.id.string ();
rs.assign (x_id_type) = to_string (xi.id.type);
@@ -265,9 +282,9 @@ namespace build2
//
if (!cc_loaded)
{
- // Prepare configuration hints.
+ // Prepare configuration hints (pretend it belongs to root scope).
//
- variable_map h (rs.ctx);
+ variable_map h (rs);
// Note that all these variables have already been registered.
//
@@ -278,8 +295,8 @@ namespace build2
if (!xi.pattern.empty ())
h.assign ("config.cc.pattern") = xi.pattern;
- if (!xm.empty ())
- h.assign ("config.cc.mode") = xm;
+ if (!omode.empty ())
+ h.assign ("config.cc.mode") = move (omode);
h.assign (c_runtime) = xi.runtime;
h.assign (c_stdlib) = xi.c_stdlib;
@@ -350,6 +367,8 @@ namespace build2
# ifdef __APPLE__
static const dir_path a_usr_inc (
"/Library/Developer/CommandLineTools/SDKs/MacOSX*.sdk/usr/include");
+ static const dir_path a_usr_lib (
+ "/Library/Developer/CommandLineTools/SDKs/MacOSX*.sdk/usr/lib");
# endif
#endif
@@ -376,7 +395,9 @@ namespace build2
//
if (!cast_false<bool> (rs["cc.core.config.loaded"]))
{
- variable_map h (rs.ctx);
+ // Prepare configuration hints (pretend it belongs to root scope).
+ //
+ variable_map h (rs);
if (!xi.bin_pattern.empty ())
h.assign ("config.bin.pattern") = xi.bin_pattern;
@@ -602,10 +623,10 @@ namespace build2
switch (xi.class_)
{
case compiler_class::gcc:
- lib_dirs = gcc_library_search_dirs (xi.path, rs);
+ lib_dirs = gcc_library_search_dirs (xi, rs);
break;
case compiler_class::msvc:
- lib_dirs = msvc_library_search_dirs (xi.path, rs);
+ lib_dirs = msvc_library_search_dirs (xi, rs);
break;
}
}
@@ -619,10 +640,10 @@ namespace build2
switch (xi.class_)
{
case compiler_class::gcc:
- hdr_dirs = gcc_header_search_dirs (xi.path, rs);
+ hdr_dirs = gcc_header_search_dirs (xi, rs);
break;
case compiler_class::msvc:
- hdr_dirs = msvc_header_search_dirs (xi.path, rs);
+ hdr_dirs = msvc_header_search_dirs (xi, rs);
break;
}
}
@@ -640,8 +661,8 @@ namespace build2
sys_hdr_dirs_mode = hdr_dirs.second;
sys_mod_dirs_mode = mod_dirs ? mod_dirs->second : 0;
- sys_lib_dirs_extra = lib_dirs.first.size ();
- sys_hdr_dirs_extra = hdr_dirs.first.size ();
+ sys_lib_dirs_extra = 0;
+ sys_hdr_dirs_extra = 0;
#ifndef _WIN32
// Add /usr/local/{include,lib}. We definitely shouldn't do this if we
@@ -657,11 +678,11 @@ namespace build2
// on the next invocation.
//
{
- auto& is (hdr_dirs.first);
+ auto& hs (hdr_dirs.first);
auto& ls (lib_dirs.first);
- bool ui (find (is.begin (), is.end (), usr_inc) != is.end ());
- bool uli (find (is.begin (), is.end (), usr_loc_inc) != is.end ());
+ bool ui (find (hs.begin (), hs.end (), usr_inc) != hs.end ());
+ bool uli (find (hs.begin (), hs.end (), usr_loc_inc) != hs.end ());
#ifdef __APPLE__
// On Mac OS starting from 10.14 there is no longer /usr/include.
@@ -684,15 +705,28 @@ namespace build2
//
// Is Apple's /usr/include.
//
- if (!ui && !uli)
+ // Also, it appears neither Clang nor GCC report MacOSX*.sdk/usr/lib
+ // with -print-search-dirs but they do search in there. So we add it
+ // to our list if we see MacOSX*.sdk/usr/include.
+ //
+ auto aui (find_if (hs.begin (), hs.end (),
+ [] (const dir_path& d)
+ {
+ return path_match (d, a_usr_inc);
+ }));
+
+ if (aui != hs.end ())
{
- for (const dir_path& d: is)
+ if (!ui)
+ ui = true;
+
+ if (find_if (ls.begin (), ls.end (),
+ [] (const dir_path& d)
+ {
+ return path_match (d, a_usr_lib);
+ }) == ls.end ())
{
- if (path_match (d, a_usr_inc))
- {
- ui = true;
- break;
- }
+ ls.push_back (aui->directory () /= "lib");
}
}
#endif
@@ -700,18 +734,29 @@ namespace build2
{
bool ull (find (ls.begin (), ls.end (), usr_loc_lib) != ls.end ());
- // Many platforms don't search in /usr/local/lib by default (but do
- // for headers in /usr/local/include). So add it as the last option.
+ // Many platforms don't search in /usr/local/lib by default but do
+ // for headers in /usr/local/include.
+ //
+ // Note that customarily /usr/local/include is searched before
+ // /usr/include so we add /usr/local/lib before built-in entries
+ // (there isn't really a way to add it after since all we can do is
+ // specify it with -L).
//
if (!ull && exists (usr_loc_lib, true /* ignore_error */))
- ls.push_back (usr_loc_lib);
+ {
+ ls.insert (ls.begin () + sys_lib_dirs_mode, usr_loc_lib);
+ ++sys_lib_dirs_extra;
+ }
// FreeBSD is at least consistent: it searches in neither. Quoting
// its wiki: "FreeBSD can't even find libraries that it installed."
// So let's help it a bit.
//
if (!uli && exists (usr_loc_inc, true /* ignore_error */))
- is.push_back (usr_loc_inc);
+ {
+ hs.insert (hs.begin () + sys_hdr_dirs_mode, usr_loc_inc);
+ ++sys_hdr_dirs_extra;
+ }
}
}
#endif
@@ -815,8 +860,11 @@ namespace build2
dr << "\n hdr dirs";
for (size_t i (0); i != incs.size (); ++i)
{
- if (i == sys_hdr_dirs_extra)
+ if ((sys_hdr_dirs_mode != 0 && i == sys_hdr_dirs_mode) ||
+ (sys_hdr_dirs_extra != 0 &&
+ i == sys_hdr_dirs_extra + sys_hdr_dirs_mode))
dr << "\n --";
+
dr << "\n " << incs[i];
}
}
@@ -826,8 +874,11 @@ namespace build2
dr << "\n lib dirs";
for (size_t i (0); i != libs.size (); ++i)
{
- if (i == sys_lib_dirs_extra)
+ if ((sys_lib_dirs_mode != 0 && i == sys_lib_dirs_mode) ||
+ (sys_lib_dirs_extra != 0 &&
+ i == sys_lib_dirs_extra + sys_lib_dirs_mode))
dr << "\n --";
+
dr << "\n " << libs[i];
}
}
@@ -948,40 +999,7 @@ namespace build2
// Register target types and configure their "installability".
//
- bool install_loaded (cast_false<bool> (rs["install.loaded"]));
-
- {
- using namespace install;
-
- rs.insert_target_type (x_src);
-
- auto insert_hdr = [&rs, install_loaded] (const target_type& tt)
- {
- rs.insert_target_type (tt);
-
- // Install headers into install.include.
- //
- if (install_loaded)
- install_path (rs, tt, dir_path ("include"));
- };
-
- // Note: module (x_mod) is in x_hdr.
- //
- for (const target_type* const* ht (x_hdr); *ht != nullptr; ++ht)
- insert_hdr (**ht);
-
- // Also register the C header for C-derived languages.
- //
- if (*x_hdr != &h::static_type)
- insert_hdr (h::static_type);
-
- rs.insert_target_type<pc> ();
- rs.insert_target_type<pca> ();
- rs.insert_target_type<pcs> ();
-
- if (install_loaded)
- install_path<pc> (rs, dir_path ("pkgconfig"));
- }
+ load_module (rs, rs, (string (x) += ".types"), loc);
// Register rules.
//
@@ -1079,8 +1097,11 @@ namespace build2
// them in case they depend on stuff that we need to install (see the
// install rule implementations for details).
//
- if (install_loaded)
+ if (cast_false<bool> (rs["install.loaded"]))
{
+ // Note: we rely quite heavily in these rule implementations that
+ // these are the only target types they are registered for.
+
const install_rule& ir (*this);
r.insert<exe> (perform_install_id, x_install, ir);
diff --git a/libbuild2/cc/module.hxx b/libbuild2/cc/module.hxx
index 2a8611b..4213516 100644
--- a/libbuild2/cc/module.hxx
+++ b/libbuild2/cc/module.hxx
@@ -17,6 +17,7 @@
#include <libbuild2/cc/compile-rule.hxx>
#include <libbuild2/cc/link-rule.hxx>
#include <libbuild2/cc/install-rule.hxx>
+#include <libbuild2/cc/predefs-rule.hxx>
#include <libbuild2/cc/export.hxx>
@@ -115,18 +116,18 @@ namespace build2
// Defined in gcc.cxx.
//
pair<dir_paths, size_t>
- gcc_header_search_dirs (const process_path&, scope&) const;
+ gcc_header_search_dirs (const compiler_info&, scope&) const;
pair<dir_paths, size_t>
- gcc_library_search_dirs (const process_path&, scope&) const;
+ gcc_library_search_dirs (const compiler_info&, scope&) const;
// Defined in msvc.cxx.
//
pair<dir_paths, size_t>
- msvc_header_search_dirs (const process_path&, scope&) const;
+ msvc_header_search_dirs (const compiler_info&, scope&) const;
pair<dir_paths, size_t>
- msvc_library_search_dirs (const process_path&, scope&) const;
+ msvc_library_search_dirs (const compiler_info&, scope&) const;
};
class LIBBUILD2_CC_SYMEXPORT module: public build2::module,
@@ -134,7 +135,8 @@ namespace build2
public link_rule,
public compile_rule,
public install_rule,
- public libux_install_rule
+ public libux_install_rule,
+ public predefs_rule
{
public:
explicit
@@ -143,7 +145,8 @@ namespace build2
link_rule (move (d)),
compile_rule (move (d), rs),
install_rule (move (d), *this),
- libux_install_rule (move (d), *this) {}
+ libux_install_rule (move (d), *this),
+ predefs_rule (move (d)) {}
void
init (scope&,
diff --git a/libbuild2/cc/msvc.cxx b/libbuild2/cc/msvc.cxx
index f95cab0..d21969c 100644
--- a/libbuild2/cc/msvc.cxx
+++ b/libbuild2/cc/msvc.cxx
@@ -164,18 +164,21 @@ namespace build2
// Filter cl.exe and link.exe noise.
//
+ // Note: must be followed with the dbuf.read() call.
+ //
void
- msvc_filter_cl (ifdstream& is, const path& src)
+ msvc_filter_cl (diag_buffer& dbuf, const path& src)
+ try
{
// While it appears VC always prints the source name (event if the
// file does not exist), let's do a sanity check. Also handle the
// command line errors/warnings which come before the file name.
//
- for (string l; !eof (getline (is, l)); )
+ for (string l; !eof (getline (dbuf.is, l)); )
{
if (l != src.leaf ().string ())
{
- diag_stream_lock () << l << endl;
+ dbuf.write (l, true /* newline */);
if (msvc_sense_diag (l, 'D').first != string::npos)
continue;
@@ -184,14 +187,19 @@ namespace build2
break;
}
}
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << dbuf.args0 << " stderr: " << e;
+ }
void
- msvc_filter_link (ifdstream& is, const file& t, otype lt)
+ msvc_filter_link (diag_buffer& dbuf, const file& t, otype lt)
+ try
{
// Filter lines until we encounter something we don't recognize. We also
// have to assume the messages can be translated.
//
- for (string l; getline (is, l); )
+ for (string l; getline (dbuf.is, l); )
{
// " Creating library foo\foo.dll.lib and object foo\foo.dll.exp"
//
@@ -216,12 +224,15 @@ namespace build2
// /INCREMENTAL causes linker to sometimes issue messages but now I
// can't quite reproduce it.
- //
- diag_stream_lock () << l << endl;
+ dbuf.write (l, true /* newline */);
break;
}
}
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << dbuf.args0 << " stderr: " << e;
+ }
void
msvc_extract_header_search_dirs (const strings& v, dir_paths& r)
@@ -253,6 +264,13 @@ namespace build2
}
else
continue;
+
+ // Ignore relative paths. Or maybe we should warn?
+ //
+ if (d.relative ())
+ continue;
+
+ d.normalize ();
}
catch (const invalid_path& e)
{
@@ -260,10 +278,7 @@ namespace build2
<< o << "'";
}
- // Ignore relative paths. Or maybe we should warn?
- //
- if (!d.relative ())
- r.push_back (move (d));
+ r.push_back (move (d));
}
}
@@ -284,6 +299,13 @@ namespace build2
d = dir_path (o, 9, string::npos);
else
continue;
+
+ // Ignore relative paths. Or maybe we should warn?
+ //
+ if (d.relative ())
+ continue;
+
+ d.normalize ();
}
catch (const invalid_path& e)
{
@@ -291,10 +313,7 @@ namespace build2
<< o << "'";
}
- // Ignore relative paths. Or maybe we should warn?
- //
- if (!d.relative ())
- r.push_back (move (d));
+ r.push_back (move (d));
}
}
@@ -313,7 +332,7 @@ namespace build2
{
try
{
- r.push_back (dir_path (move (d)));
+ r.push_back (dir_path (move (d)).normalize ());
}
catch (const invalid_path&)
{
@@ -326,7 +345,7 @@ namespace build2
// Extract system header search paths from MSVC.
//
pair<dir_paths, size_t> config_module::
- msvc_header_search_dirs (const process_path&, scope& rs) const
+ msvc_header_search_dirs (const compiler_info&, scope& rs) const
{
// MSVC doesn't have any built-in paths and all of them either come from
// the INCLUDE environment variable or are specified explicitly on the
@@ -354,7 +373,7 @@ namespace build2
// Extract system library search paths from MSVC.
//
pair<dir_paths, size_t> config_module::
- msvc_library_search_dirs (const process_path&, scope& rs) const
+ msvc_library_search_dirs (const compiler_info&, scope& rs) const
{
// MSVC doesn't seem to have any built-in paths and all of them either
// come from the LIB environment variable or are specified explicitly on
@@ -379,9 +398,22 @@ namespace build2
// Inspect the file and determine if it is static or import library.
// Return otype::e if it is neither (which we quietly ignore).
//
+ static global_cache<otype> library_type_cache;
+
static otype
library_type (const process_path& ld, const path& l)
{
+ string key;
+ {
+ sha256 cs;
+ cs.append (ld.effect_string ());
+ cs.append (l.string ());
+ key = cs.string ();
+
+ if (const otype* r = library_type_cache.find (key))
+ return *r;
+ }
+
// The are several reasonably reliable methods to tell whether it is a
// static or import library. One is lib.exe /LIST -- if there aren't any
// .obj members, then it is most likely an import library (it can also
@@ -422,9 +454,9 @@ namespace build2
//
process pr (run_start (ld,
args,
- 0 /* stdin */,
- -1 /* stdout */,
- false /* error */));
+ 0 /* stdin */,
+ -1 /* stdout */,
+ 1 /* stderr (to stdout) */));
bool obj (false), dll (false);
string s;
@@ -447,14 +479,11 @@ namespace build2
// libhello\hello.lib.obj
// hello-0.1.0-a.0.19700101000000.dll
//
- // Archive member name at 746: [...]hello.dll[/][ ]*
- // Archive member name at 8C70: [...]hello.lib.obj[/][ ]*
- //
size_t n (s.size ());
for (; n != 0 && s[n - 1] == ' '; --n) ; // Skip trailing spaces.
- if (n >= 7) // At least ": X.obj" or ": X.dll".
+ if (n >= 5) // At least "X.obj" or "X.dll".
{
n -= 4; // Beginning of extension.
@@ -480,7 +509,7 @@ namespace build2
io = true;
}
- if (!run_finish_code (args, pr, s) || io)
+ if (!run_finish_code (args, pr, s, 2 /* verbosity */) || io)
{
diag_record dr;
dr << warn << "unable to detect " << l << " library type, ignoring" <<
@@ -489,23 +518,25 @@ namespace build2
return otype::e;
}
- if (obj && dll)
+ otype r;
+ if (obj != dll)
+ r = obj ? otype::a : otype::s;
+ else
{
- warn << l << " looks like hybrid static/import library, ignoring";
- return otype::e;
- }
+ if (obj && dll)
+ warn << l << " looks like hybrid static/import library, ignoring";
- if (!obj && !dll)
- {
- warn << l << " looks like empty static or import library, ignoring";
- return otype::e;
+ if (!obj && !dll)
+ warn << l << " looks like empty static or import library, ignoring";
+
+ r = otype::e;
}
- return obj ? otype::a : otype::s;
+ return library_type_cache.insert (move (key), r);
}
template <typename T>
- static T*
+ static pair<T*, bool>
msvc_search_library (const process_path& ld,
const dir_path& d,
const prerequisite_key& p,
@@ -551,20 +582,26 @@ namespace build2
//
timestamp mt (mtime (f));
- if (mt != timestamp_nonexistent && library_type (ld, f) == lt)
+ pair<T*, bool> r (nullptr, true);
+
+ if (mt != timestamp_nonexistent)
{
- // Enter the target.
- //
- T* t;
- common::insert_library (p.scope->ctx, t, name, d, ld, e, exist, trace);
- t->path_mtime (move (f), mt);
- return t;
+ if (library_type (ld, f) == lt)
+ {
+ // Enter the target.
+ //
+ common::insert_library (
+ p.scope->ctx, r.first, name, d, ld, e, exist, trace);
+ r.first->path_mtime (move (f), mt);
+ }
+ else
+ r.second = false; // Don't search for binless.
}
- return nullptr;
+ return r;
}
- liba* common::
+ pair<bin::liba*, bool> common::
msvc_search_static (const process_path& ld,
const dir_path& d,
const prerequisite_key& p,
@@ -572,14 +609,21 @@ namespace build2
{
tracer trace (x, "msvc_search_static");
- liba* r (nullptr);
+ liba* a (nullptr);
+ bool b (true);
- auto search = [&r, &ld, &d, &p, exist, &trace] (
+ auto search = [&a, &b, &ld, &d, &p, exist, &trace] (
const char* pf, const char* sf) -> bool
{
- r = msvc_search_library<liba> (
- ld, d, p, otype::a, pf, sf, exist, trace);
- return r != nullptr;
+ pair<liba*, bool> r (msvc_search_library<liba> (
+ ld, d, p, otype::a, pf, sf, exist, trace));
+
+ if (r.first != nullptr)
+ a = r.first;
+ else if (!r.second)
+ b = false;
+
+ return a != nullptr;
};
// Try:
@@ -592,10 +636,10 @@ namespace build2
search ("", "") ||
search ("lib", "") ||
search ("", "lib") ||
- search ("", "_static") ? r : nullptr;
+ search ("", "_static") ? make_pair (a, true) : make_pair (nullptr, b);
}
- libs* common::
+ pair<bin::libs*, bool> common::
msvc_search_shared (const process_path& ld,
const dir_path& d,
const prerequisite_key& pk,
@@ -606,12 +650,14 @@ namespace build2
assert (pk.scope != nullptr);
libs* s (nullptr);
+ bool b (true);
- auto search = [&s, &ld, &d, &pk, exist, &trace] (
+ auto search = [&s, &b, &ld, &d, &pk, exist, &trace] (
const char* pf, const char* sf) -> bool
{
- if (libi* i = msvc_search_library<libi> (
- ld, d, pk, otype::s, pf, sf, exist, trace))
+ pair<libi*, bool> r (msvc_search_library<libi> (
+ ld, d, pk, otype::s, pf, sf, exist, trace));
+ if (r.first != nullptr)
{
ulock l (
insert_library (
@@ -619,6 +665,8 @@ namespace build2
if (!exist)
{
+ libi* i (r.first);
+
if (l.owns_lock ())
{
s->adhoc_member = i; // We are first.
@@ -632,6 +680,8 @@ namespace build2
s->path_mtime (path (), i->mtime ());
}
}
+ else if (!r.second)
+ b = false;
return s != nullptr;
};
@@ -644,7 +694,7 @@ namespace build2
return
search ("", "") ||
search ("lib", "") ||
- search ("", "dll") ? s : nullptr;
+ search ("", "dll") ? make_pair (s, true) : make_pair (nullptr, b);
}
}
}
diff --git a/libbuild2/cc/parser.cxx b/libbuild2/cc/parser.cxx
index dc5093f..f62847e 100644
--- a/libbuild2/cc/parser.cxx
+++ b/libbuild2/cc/parser.cxx
@@ -15,9 +15,11 @@ namespace build2
using type = token_type;
void parser::
- parse (ifdstream& is, const path_name& in, unit& u)
+ parse (ifdstream& is, const path_name& in, unit& u, const compiler_id& cid)
{
- lexer l (is, in);
+ cid_ = &cid;
+
+ lexer l (is, in, true /* preprocessed */);
l_ = &l;
u_ = &u;
@@ -82,6 +84,12 @@ namespace build2
// to call it __import) or it can have a special attribute (GCC
// currently marks it with [[__translated]]).
//
+ // Similarly, MSVC drops the `module;` marker and replaces all
+ // other `module` keywords with `__preprocessed_module`.
+ //
+ // Clang doesn't appear to rewrite anything, at least as of
+ // version 18.
+ //
if (bb == 0 && t.first)
{
const string& id (t.value); // Note: tracks t.
@@ -102,7 +110,9 @@ namespace build2
// Fall through.
}
- if (id == "module")
+ if (id == "module" ||
+ (cid_->type == compiler_type::msvc &&
+ id == "__preprocessed_module"))
{
location_value l (get_location (t));
l_->next (t);
@@ -113,7 +123,9 @@ namespace build2
else
n = false;
}
- else if (id == "import" /*|| id == "__import"*/)
+ else if (id == "import" /* ||
+ (cid_->type == compiler_type::gcc &&
+ id == "__import")*/)
{
l_->next (t);
@@ -181,7 +193,7 @@ namespace build2
//
pair<string, bool> np (parse_module_name (t, true /* partition */));
- // Should be {}-balanced.
+ // Skip attributes (should be {}-balanced).
//
for (;
t.type != type::eos && t.type != type::semi && !t.first;
@@ -262,7 +274,7 @@ namespace build2
return;
}
- // Should be {}-balanced.
+ // Skip attributes (should be {}-balanced).
//
for (;
t.type != type::eos && t.type != type::semi && !t.first;
diff --git a/libbuild2/cc/parser.hxx b/libbuild2/cc/parser.hxx
index 1fbf1a3..0c2eb2d 100644
--- a/libbuild2/cc/parser.hxx
+++ b/libbuild2/cc/parser.hxx
@@ -10,6 +10,7 @@
#include <libbuild2/diagnostics.hxx>
#include <libbuild2/cc/types.hxx>
+#include <libbuild2/cc/guess.hxx> // compiler_id
namespace build2
{
@@ -23,16 +24,19 @@ namespace build2
class parser
{
public:
+ // The compiler_id argument should identify the compiler that has done
+ // the preprocessing.
+ //
unit
- parse (ifdstream& is, const path_name& n)
+ parse (ifdstream& is, const path_name& n, const compiler_id& cid)
{
unit r;
- parse (is, n, r);
+ parse (is, n, r, cid);
return r;
}
void
- parse (ifdstream&, const path_name&, unit&);
+ parse (ifdstream&, const path_name&, unit&, const compiler_id&);
private:
void
@@ -54,6 +58,7 @@ namespace build2
string checksum; // Translation unit checksum.
private:
+ const compiler_id* cid_;
lexer* l_;
unit* u_;
diff --git a/libbuild2/cc/parser.test.cxx b/libbuild2/cc/parser.test.cxx
index 1d5930a..2270d32 100644
--- a/libbuild2/cc/parser.test.cxx
+++ b/libbuild2/cc/parser.test.cxx
@@ -44,7 +44,7 @@ namespace build2
}
parser p;
- unit u (p.parse (is, in));
+ unit u (p.parse (is, in, compiler_id (compiler_type::gcc, "")));
switch (u.type)
{
diff --git a/libbuild2/cc/pkgconfig-libpkg-config.cxx b/libbuild2/cc/pkgconfig-libpkg-config.cxx
new file mode 100644
index 0000000..ecbc019
--- /dev/null
+++ b/libbuild2/cc/pkgconfig-libpkg-config.cxx
@@ -0,0 +1,271 @@
+// file : libbuild2/cc/pkgconfig-libpkg-config.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_BOOTSTRAP
+
+#include <libbuild2/cc/pkgconfig.hxx>
+
+#include <new> // std::bad_alloc
+
+#include <libbuild2/diagnostics.hxx>
+
+namespace build2
+{
+ namespace cc
+ {
+ // The package dependency traversal depth limit.
+ //
+ static const int max_depth = 100;
+
+ static void
+ error_handler (unsigned int,
+ const char* file,
+ size_t line,
+ const char* msg,
+ const pkg_config_client_t*,
+ const void*)
+ {
+ if (file != nullptr)
+ {
+ path_name n (file);
+ const location l (n, static_cast<uint64_t> (line));
+ error (l) << msg;
+ }
+ else
+ error << msg;
+ }
+
+ // Deleters.
+ //
+ struct fragments_deleter
+ {
+ void operator() (pkg_config_list_t* f) const
+ {
+ pkg_config_fragment_free (f);
+ }
+ };
+
+ // Convert fragments to strings. Skip the -I/-L options that refer to
+ // system directories.
+ //
+ static strings
+ to_strings (const pkg_config_list_t& frags,
+ char type,
+ const pkg_config_list_t& sysdirs)
+ {
+ assert (type == 'I' || type == 'L');
+
+ strings r;
+ auto add = [&r] (const pkg_config_fragment_t* frag)
+ {
+ string s;
+ if (frag->type != '\0')
+ {
+ s += '-';
+ s += frag->type;
+ }
+
+ s += frag->data;
+ r.push_back (move (s));
+ };
+
+ // Option that is separated from its value, for example:
+ //
+ // -I /usr/lib
+ //
+ const pkg_config_fragment_t* opt (nullptr);
+
+ pkg_config_node_t *node;
+ LIBPKG_CONFIG_FOREACH_LIST_ENTRY(frags.head, node)
+ {
+ auto frag (static_cast<const pkg_config_fragment_t*> (node->data));
+
+ // Add the separated option and directory, unless the latest is a
+ // system one.
+ //
+ if (opt != nullptr)
+ {
+ assert (frag->type == '\0'); // See pkg_config_fragment_add().
+
+ if (!pkg_config_path_match_list (frag->data, &sysdirs))
+ {
+ add (opt);
+ add (frag);
+ }
+
+ opt = nullptr;
+ continue;
+ }
+
+ // Skip the -I/-L option if it refers to a system directory.
+ //
+ if (frag->type == type)
+ {
+ // The option is separated from a value, that will (presumably)
+ // follow.
+ //
+ if (*frag->data == '\0')
+ {
+ opt = frag;
+ continue;
+ }
+
+ if (pkg_config_path_match_list (frag->data, &sysdirs))
+ continue;
+ }
+
+ add (frag);
+ }
+
+ if (opt != nullptr) // Add the dangling option.
+ add (opt);
+
+ return r;
+ }
+
+ // Note that some libpkg-config functions can potentially return NULL,
+ // failing to allocate the required memory block. However, we will not
+ // check the returned value for NULL as the library doesn't do so, prior
+ // to filling the allocated structures. So such a code complication on our
+ // side would be useless. Also, for some functions the NULL result has a
+ // special semantics, for example "not found". @@ TODO: can we fix this?
+ // This is now somewhat addressed, see the eflags argument in
+ // pkg_config_pkg_find().
+ //
+ pkgconfig::
+ pkgconfig (path_type p,
+ const dir_paths& pc_dirs,
+ const dir_paths& sys_lib_dirs,
+ const dir_paths& sys_hdr_dirs)
+ : path (move (p))
+ {
+ auto add_dirs = [] (pkg_config_list_t& dir_list,
+ const dir_paths& dirs,
+ bool suppress_dups)
+ {
+ for (const auto& d: dirs)
+ pkg_config_path_add (d.string ().c_str (), &dir_list, suppress_dups);
+ };
+
+ // Initialize the client handle.
+ //
+ // Note: omit initializing the filters from environment/defaults.
+ //
+ unique_ptr<pkg_config_client_t, void (*) (pkg_config_client_t*)> c (
+ pkg_config_client_new (&error_handler,
+ nullptr /* handler_data */,
+ false /* init_filters */),
+ [] (pkg_config_client_t* c) {pkg_config_client_free (c);});
+
+ if (c == nullptr)
+ throw std::bad_alloc ();
+
+ add_dirs (c->filter_libdirs, sys_lib_dirs, false /* suppress_dups */);
+ add_dirs (c->filter_includedirs, sys_hdr_dirs, false /* suppress_dups */);
+
+ // Note that the loaded file directory is added to the (for now empty)
+ // .pc file search list. Also note that loading of the dependency
+ // packages is delayed until the flags retrieval, and their file
+ // directories are not added to the search list.
+ //
+ // @@ Hm, is there a way to force this resolution? But we may not
+ // need this (e.g., only loading from variables).
+ //
+ unsigned int e;
+ pkg_ = pkg_config_pkg_find (c.get (), path.string ().c_str (), &e);
+
+ if (pkg_ == nullptr)
+ {
+ if (e == LIBPKG_CONFIG_ERRF_OK)
+ fail << "package '" << path << "' not found";
+ else
+ // Diagnostics should have already been issued except for allocation
+ // errors.
+ //
+ fail << "unable to load package '" << path << "'";
+ }
+
+ // Add the .pc file search directories.
+ //
+ assert (c->dir_list.length == 1); // Package file directory (see above).
+ add_dirs (c->dir_list, pc_dirs, true /* suppress_dups */);
+
+ client_ = c.release ();
+ }
+
+ void pkgconfig::
+ free ()
+ {
+ assert (client_ != nullptr && pkg_ != nullptr);
+
+ pkg_config_pkg_unref (client_, pkg_);
+ pkg_config_client_free (client_);
+ }
+
+ strings pkgconfig::
+ cflags (bool stat) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ pkg_config_client_set_flags (
+ client_,
+ // Walk through the private package dependencies (Requires.private)
+ // besides the public ones while collecting the flags. Note that we do
+ // this for both static and shared linking. @@ Hm, I wonder why...?
+ //
+ LIBPKG_CONFIG_PKG_PKGF_SEARCH_PRIVATE |
+
+ // Collect flags from Cflags.private besides those from Cflags for the
+ // static linking.
+ //
+ (stat
+ ? LIBPKG_CONFIG_PKG_PKGF_ADD_PRIVATE_FRAGMENTS
+ : 0));
+
+ pkg_config_list_t f = LIBPKG_CONFIG_LIST_INITIALIZER; // Empty list.
+ int e (pkg_config_pkg_cflags (client_, pkg_, &f, max_depth));
+
+ if (e != LIBPKG_CONFIG_ERRF_OK)
+ throw failed (); // Assume the diagnostics is issued.
+
+ unique_ptr<pkg_config_list_t, fragments_deleter> fd (&f);
+ return to_strings (f, 'I', client_->filter_includedirs);
+ }
+
+ strings pkgconfig::
+ libs (bool stat) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ pkg_config_client_set_flags (
+ client_,
+ // Additionally collect flags from the private dependency packages
+ // (see above) and from the Libs.private value for the static linking.
+ //
+ (stat
+ ? LIBPKG_CONFIG_PKG_PKGF_SEARCH_PRIVATE |
+ LIBPKG_CONFIG_PKG_PKGF_ADD_PRIVATE_FRAGMENTS
+ : 0));
+
+ pkg_config_list_t f = LIBPKG_CONFIG_LIST_INITIALIZER; // Empty list.
+ int e (pkg_config_pkg_libs (client_, pkg_, &f, max_depth));
+
+ if (e != LIBPKG_CONFIG_ERRF_OK)
+ throw failed (); // Assume the diagnostics is issued.
+
+ unique_ptr<pkg_config_list_t, fragments_deleter> fd (&f);
+ return to_strings (f, 'L', client_->filter_libdirs);
+ }
+
+ optional<string> pkgconfig::
+ variable (const char* name) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ const char* r (pkg_config_tuple_find (client_, &pkg_->vars, name));
+ return r != nullptr ? optional<string> (r) : nullopt;
+ }
+ }
+}
+
+#endif // BUILD2_BOOTSTRAP
diff --git a/libbuild2/cc/pkgconfig-libpkgconf.cxx b/libbuild2/cc/pkgconfig-libpkgconf.cxx
new file mode 100644
index 0000000..f3754d3
--- /dev/null
+++ b/libbuild2/cc/pkgconfig-libpkgconf.cxx
@@ -0,0 +1,355 @@
+// file : libbuild2/cc/pkgconfig-libpkgconf.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef BUILD2_BOOTSTRAP
+
+#include <libbuild2/cc/pkgconfig.hxx>
+
+#include <libbuild2/diagnostics.hxx>
+
+// Note that the libpkgconf library did not used to provide the version macro
+// that we could use to compile the code conditionally against different API
+// versions. Thus, we need to sense the pkgconf_client_new() function
+// signature ourselves to call it properly.
+//
+namespace details
+{
+ void*
+ pkgconf_cross_personality_default (); // Never called.
+}
+
+using namespace details;
+
+template <typename H>
+static inline pkgconf_client_t*
+call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*),
+ H error_handler,
+ void* error_handler_data)
+{
+ return f (error_handler, error_handler_data);
+}
+
+template <typename H, typename P>
+static inline pkgconf_client_t*
+call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*, P),
+ H error_handler,
+ void* error_handler_data)
+{
+ return f (error_handler,
+ error_handler_data,
+ ::pkgconf_cross_personality_default ());
+}
+
+namespace build2
+{
+ namespace cc
+ {
+ // The libpkgconf library is not thread-safe, even on the pkgconf_client_t
+ // level (see issue #128 for details). While it seems that the obvious
+ // thread-safety issues are fixed, the default personality initialization,
+ // which is still not thread-safe. So let's keep the mutex for now not to
+ // introduce potential issues.
+ //
+ static mutex pkgconf_mutex;
+
+ // The package dependency traversal depth limit.
+ //
+ static const int pkgconf_max_depth = 100;
+
+ // Normally the error_handler() callback can be called multiple times to
+ // report a single error (once per message line), to produce a multi-line
+ // message like this:
+ //
+ // Package foo was not found in the pkg-config search path.\n
+ // Perhaps you should add the directory containing `foo.pc'\n
+ // to the PKG_CONFIG_PATH environment variable\n
+ // Package 'foo', required by 'bar', not found\n
+ //
+ // For the above example callback will be called 4 times. To suppress all
+ // the junk we will use PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS to get just:
+ //
+ // Package 'foo', required by 'bar', not found\n
+ //
+ // Also disable merging options like -framework into a single fragment, if
+ // possible.
+ //
+ static const int pkgconf_flags =
+ PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS
+ | PKGCONF_PKG_PKGF_SKIP_PROVIDES
+#ifdef PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS
+ | PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS
+#endif
+ ;
+
+#if defined(LIBPKGCONF_VERSION) && LIBPKGCONF_VERSION >= 10900
+ static bool
+ pkgconf_error_handler (const char* msg,
+ const pkgconf_client_t*,
+ void*)
+#else
+ static bool
+ pkgconf_error_handler (const char* msg,
+ const pkgconf_client_t*,
+ const void*)
+#endif
+ {
+ error << runtime_error (msg); // Sanitize the message (trailing dot).
+ return true;
+ }
+
+ // Deleters. Note that they are thread-safe.
+ //
+ struct fragments_deleter
+ {
+ void operator() (pkgconf_list_t* f) const {pkgconf_fragment_free (f);}
+ };
+
+ // Convert fragments to strings. Skip the -I/-L options that refer to system
+ // directories.
+ //
+ static strings
+ to_strings (const pkgconf_list_t& frags,
+ char type,
+ const pkgconf_list_t& sysdirs)
+ {
+ assert (type == 'I' || type == 'L');
+
+ strings r;
+
+ auto add = [&r] (const pkgconf_fragment_t* frag)
+ {
+ string s;
+ if (frag->type != '\0')
+ {
+ s += '-';
+ s += frag->type;
+ }
+
+ s += frag->data;
+ r.push_back (move (s));
+ };
+
+ // Option that is separated from its value, for example:
+ //
+ // -I /usr/lib
+ //
+ const pkgconf_fragment_t* opt (nullptr);
+
+ pkgconf_node_t *node;
+ PKGCONF_FOREACH_LIST_ENTRY(frags.head, node)
+ {
+ auto frag (static_cast<const pkgconf_fragment_t*> (node->data));
+
+ // Add the separated option and directory, unless the latest is a
+ // system one.
+ //
+ if (opt != nullptr)
+ {
+ // Note that we should restore the directory path that was
+ // (mis)interpreted as an option, for example:
+ //
+ // -I -Ifoo
+ //
+ // In the above example option '-I' is followed by directory
+ // '-Ifoo', which is represented by libpkgconf library as fragment
+ // 'foo' with type 'I'.
+ //
+ if (!pkgconf_path_match_list (
+ frag->type == '\0'
+ ? frag->data
+ : (string ({'-', frag->type}) + frag->data).c_str (),
+ &sysdirs))
+ {
+ add (opt);
+ add (frag);
+ }
+
+ opt = nullptr;
+ continue;
+ }
+
+ // Skip the -I/-L option if it refers to a system directory.
+ //
+ if (frag->type == type)
+ {
+ // The option is separated from a value, that will (presumably)
+ // follow.
+ //
+ if (*frag->data == '\0')
+ {
+ opt = frag;
+ continue;
+ }
+
+ if (pkgconf_path_match_list (frag->data, &sysdirs))
+ continue;
+ }
+
+ add (frag);
+ }
+
+ if (opt != nullptr) // Add the dangling option.
+ add (opt);
+
+ return r;
+ }
+
+ // Note that some libpkgconf functions can potentially return NULL,
+ // failing to allocate the required memory block. However, we will not
+ // check the returned value for NULL as the library doesn't do so, prior
+ // to filling the allocated structures. So such a code complication on our
+ // side would be useless. Also, for some functions the NULL result has a
+ // special semantics, for example "not found".
+ //
+ pkgconfig::
+ pkgconfig (path_type p,
+ const dir_paths& pc_dirs,
+ const dir_paths& sys_lib_dirs,
+ const dir_paths& sys_hdr_dirs)
+ : path (move (p))
+ {
+ auto add_dirs = [] (pkgconf_list_t& dir_list,
+ const dir_paths& dirs,
+ bool suppress_dups,
+ bool cleanup = false)
+ {
+ if (cleanup)
+ {
+ pkgconf_path_free (&dir_list);
+ dir_list = PKGCONF_LIST_INITIALIZER;
+ }
+
+ for (const auto& d: dirs)
+ pkgconf_path_add (d.string ().c_str (), &dir_list, suppress_dups);
+ };
+
+ mlock l (pkgconf_mutex);
+
+ // Initialize the client handle.
+ //
+ unique_ptr<pkgconf_client_t, void (*) (pkgconf_client_t*)> c (
+ call_pkgconf_client_new (&pkgconf_client_new,
+ pkgconf_error_handler,
+ nullptr /* handler_data */),
+ [] (pkgconf_client_t* c) {pkgconf_client_free (c);});
+
+ pkgconf_client_set_flags (c.get (), pkgconf_flags);
+
+ // Note that the system header and library directory lists are
+ // automatically pre-filled by the pkgconf_client_new() call (see
+ // above). We will re-create these lists from scratch.
+ //
+ add_dirs (c->filter_libdirs,
+ sys_lib_dirs,
+ false /* suppress_dups */,
+ true /* cleanup */);
+
+ add_dirs (c->filter_includedirs,
+ sys_hdr_dirs,
+ false /* suppress_dups */,
+ true /* cleanup */);
+
+ // Note that the loaded file directory is added to the (yet empty)
+ // search list. Also note that loading of the prerequisite packages is
+ // delayed until flags retrieval, and their file directories are not
+ // added to the search list.
+ //
+ pkg_ = pkgconf_pkg_find (c.get (), path.string ().c_str ());
+
+ if (pkg_ == nullptr)
+ fail << "package '" << path << "' not found or invalid";
+
+ // Add the .pc file search directories.
+ //
+ assert (c->dir_list.length == 1); // Package file directory (see above).
+ add_dirs (c->dir_list, pc_dirs, true /* suppress_dups */);
+
+ client_ = c.release ();
+ }
+
+ void pkgconfig::
+ free ()
+ {
+ assert (pkg_ != nullptr);
+
+ mlock l (pkgconf_mutex);
+ pkgconf_pkg_unref (client_, pkg_);
+ pkgconf_client_free (client_);
+ }
+
+ strings pkgconfig::
+ cflags (bool stat) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ mlock l (pkgconf_mutex);
+
+ pkgconf_client_set_flags (
+ client_,
+ pkgconf_flags |
+
+ // Walk through the private package dependencies (Requires.private)
+ // besides the public ones while collecting the flags. Note that we do
+ // this for both static and shared linking.
+ //
+ PKGCONF_PKG_PKGF_SEARCH_PRIVATE |
+
+ // Collect flags from Cflags.private besides those from Cflags for the
+ // static linking.
+ //
+ (stat
+ ? PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS
+ : 0));
+
+ pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization.
+ int e (pkgconf_pkg_cflags (client_, pkg_, &f, pkgconf_max_depth));
+
+ if (e != PKGCONF_PKG_ERRF_OK)
+ throw failed (); // Assume the diagnostics is issued.
+
+ unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter.
+ return to_strings (f, 'I', client_->filter_includedirs);
+ }
+
+ strings pkgconfig::
+ libs (bool stat) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ mlock l (pkgconf_mutex);
+
+ pkgconf_client_set_flags (
+ client_,
+ pkgconf_flags |
+
+ // Additionally collect flags from the private dependency packages
+ // (see above) and from the Libs.private value for the static linking.
+ //
+ (stat
+ ? PKGCONF_PKG_PKGF_SEARCH_PRIVATE |
+ PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS
+ : 0));
+
+ pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization.
+ int e (pkgconf_pkg_libs (client_, pkg_, &f, pkgconf_max_depth));
+
+ if (e != PKGCONF_PKG_ERRF_OK)
+ throw failed (); // Assume the diagnostics is issued.
+
+ unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter.
+ return to_strings (f, 'L', client_->filter_libdirs);
+ }
+
+ optional<string> pkgconfig::
+ variable (const char* name) const
+ {
+ assert (client_ != nullptr); // Must not be empty.
+
+ mlock l (pkgconf_mutex);
+ const char* r (pkgconf_tuple_find (client_, &pkg_->vars, name));
+ return r != nullptr ? optional<string> (r) : nullopt;
+ }
+ }
+}
+
+#endif // BUILD2_BOOTSTRAP
diff --git a/libbuild2/cc/pkgconfig.cxx b/libbuild2/cc/pkgconfig.cxx
index a48d99c..046fbc8 100644
--- a/libbuild2/cc/pkgconfig.cxx
+++ b/libbuild2/cc/pkgconfig.cxx
@@ -1,13 +1,6 @@
// file : libbuild2/cc/pkgconfig.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-// In order not to complicate the bootstrap procedure with libpkgconf building
-// exclude functionality that involves reading of .pc files.
-//
-#ifndef BUILD2_BOOTSTRAP
-# include <libpkgconf/libpkgconf.h>
-#endif
-
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
#include <libbuild2/context.hxx>
@@ -25,430 +18,14 @@
#include <libbuild2/cc/utility.hxx>
#include <libbuild2/cc/common.hxx>
+#include <libbuild2/cc/pkgconfig.hxx>
#include <libbuild2/cc/compile-rule.hxx>
#include <libbuild2/cc/link-rule.hxx>
-#ifndef BUILD2_BOOTSTRAP
-
-// Note that the libpkgconf library doesn't provide the version macro that we
-// could use to compile the code conditionally against different API versions.
-// Thus, we need to sense the pkgconf_client_new() function signature
-// ourselves to call it properly.
-//
-namespace details
-{
- void*
- pkgconf_cross_personality_default (); // Never called.
-}
-
-using namespace details;
-
-template <typename H>
-static inline pkgconf_client_t*
-call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*),
- H error_handler,
- void* error_handler_data)
-{
- return f (error_handler, error_handler_data);
-}
-
-template <typename H, typename P>
-static inline pkgconf_client_t*
-call_pkgconf_client_new (pkgconf_client_t* (*f) (H, void*, P),
- H error_handler,
- void* error_handler_data)
-{
- return f (error_handler,
- error_handler_data,
- ::pkgconf_cross_personality_default ());
-}
-
-#endif
-
-using namespace std;
-using namespace butl;
+using namespace std; // VC16
namespace build2
{
-#ifndef BUILD2_BOOTSTRAP
-
- // Load package information from a .pc file. Filter out the -I/-L options
- // that refer to system directories. This makes sure all the system search
- // directories are "pushed" to the back which minimizes the chances of
- // picking up wrong (e.g., old installed version) header/library.
- //
- // Note that the prerequisite package .pc files search order is as follows:
- //
- // - in directory of the specified file
- // - in pc_dirs directories (in the natural order)
- //
- class pkgconf
- {
- public:
- using path_type = build2::path;
-
- path_type path;
-
- public:
- explicit
- pkgconf (path_type,
- const dir_paths& pc_dirs,
- const dir_paths& sys_hdr_dirs,
- const dir_paths& sys_lib_dirs);
-
- // Create a special empty object. Querying package information on such
- // an object is illegal.
- //
- pkgconf () = default;
-
- ~pkgconf ();
-
- // Movable-only type.
- //
- pkgconf (pkgconf&& p)
- : path (move (p.path)),
- client_ (p.client_),
- pkg_ (p.pkg_)
- {
- p.client_ = nullptr;
- p.pkg_ = nullptr;
- }
-
- pkgconf&
- operator= (pkgconf&& p)
- {
- if (this != &p)
- {
- this->~pkgconf ();
- new (this) pkgconf (move (p)); // Assume noexcept move-construction.
- }
- return *this;
- }
-
- pkgconf (const pkgconf&) = delete;
- pkgconf& operator= (const pkgconf&) = delete;
-
- strings
- cflags (bool stat) const;
-
- strings
- libs (bool stat) const;
-
- optional<string>
- variable (const char*) const;
-
- optional<string>
- variable (const string& s) const {return variable (s.c_str ());}
-
- private:
- // Keep them as raw pointers not to deal with API thread-unsafety in
- // deleters and introducing additional mutex locks.
- //
- pkgconf_client_t* client_ = nullptr;
- pkgconf_pkg_t* pkg_ = nullptr;
- };
-
- // Currently the library is not thread-safe, even on the pkgconf_client_t
- // level (see issue #128 for details).
- //
- // @@ An update: seems that the obvious thread-safety issues are fixed.
- // However, let's keep mutex locking for now not to introduce potential
- // issues before we make sure that there are no other ones.
- //
- static mutex pkgconf_mutex;
-
- // The package dependency traversal depth limit.
- //
- static const int pkgconf_max_depth = 100;
-
- // Normally the error_handler() callback can be called multiple times to
- // report a single error (once per message line), to produce a multi-line
- // message like this:
- //
- // Package foo was not found in the pkg-config search path.\n
- // Perhaps you should add the directory containing `foo.pc'\n
- // to the PKG_CONFIG_PATH environment variable\n
- // Package 'foo', required by 'bar', not found\n
- //
- // For the above example callback will be called 4 times. To suppress all the
- // junk we will use PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS to get just:
- //
- // Package 'foo', required by 'bar', not found\n
- //
- // Also disable merging options like -framework into a single fragment, if
- // possible.
- //
- static const int pkgconf_flags =
- PKGCONF_PKG_PKGF_SIMPLIFY_ERRORS
-#ifdef PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS
- | PKGCONF_PKG_PKGF_DONT_MERGE_SPECIAL_FRAGMENTS
-#endif
- ;
-
- static bool
- pkgconf_error_handler (const char* msg, const pkgconf_client_t*, const void*)
- {
- error << runtime_error (msg); // Sanitize the message.
- return true;
- }
-
- // Deleters. Note that they are thread-safe.
- //
- struct fragments_deleter
- {
- void operator() (pkgconf_list_t* f) const {pkgconf_fragment_free (f);}
- };
-
- // Convert fragments to strings. Skip the -I/-L options that refer to system
- // directories.
- //
- static strings
- to_strings (const pkgconf_list_t& frags,
- char type,
- const pkgconf_list_t& sysdirs)
- {
- assert (type == 'I' || type == 'L');
-
- strings r;
-
- auto add = [&r] (const pkgconf_fragment_t* frag)
- {
- string s;
- if (frag->type != '\0')
- {
- s += '-';
- s += frag->type;
- }
-
- s += frag->data;
- r.push_back (move (s));
- };
-
- // Option that is separated from its value, for example:
- //
- // -I /usr/lib
- //
- const pkgconf_fragment_t* opt (nullptr);
-
- pkgconf_node_t *node;
- PKGCONF_FOREACH_LIST_ENTRY(frags.head, node)
- {
- auto frag (static_cast<const pkgconf_fragment_t*> (node->data));
-
- // Add the separated option and directory, unless the latest is a system
- // one.
- //
- if (opt != nullptr)
- {
- // Note that we should restore the directory path that was
- // (mis)interpreted as an option, for example:
- //
- // -I -Ifoo
- //
- // In the above example option '-I' is followed by directory '-Ifoo',
- // which is represented by libpkgconf library as fragment 'foo' with
- // type 'I'.
- //
- if (!pkgconf_path_match_list (
- frag->type == '\0'
- ? frag->data
- : (string ({'-', frag->type}) + frag->data).c_str (),
- &sysdirs))
- {
- add (opt);
- add (frag);
- }
-
- opt = nullptr;
- continue;
- }
-
- // Skip the -I/-L option if it refers to a system directory.
- //
- if (frag->type == type)
- {
- // The option is separated from a value, that will (presumably) follow.
- //
- if (*frag->data == '\0')
- {
- opt = frag;
- continue;
- }
-
- if (pkgconf_path_match_list (frag->data, &sysdirs))
- continue;
- }
-
- add (frag);
- }
-
- if (opt != nullptr) // Add the dangling option.
- add (opt);
-
- return r;
- }
-
- // Note that some libpkgconf functions can potentially return NULL, failing
- // to allocate the required memory block. However, we will not check the
- // returned value for NULL as the library doesn't do so, prior to filling the
- // allocated structures. So such a code complication on our side would be
- // useless. Also, for some functions the NULL result has a special semantics,
- // for example "not found".
- //
- pkgconf::
- pkgconf (path_type p,
- const dir_paths& pc_dirs,
- const dir_paths& sys_lib_dirs,
- const dir_paths& sys_hdr_dirs)
- : path (move (p))
- {
- auto add_dirs = [] (pkgconf_list_t& dir_list,
- const dir_paths& dirs,
- bool suppress_dups,
- bool cleanup = false)
- {
- if (cleanup)
- {
- pkgconf_path_free (&dir_list);
- dir_list = PKGCONF_LIST_INITIALIZER;
- }
-
- for (const auto& d: dirs)
- pkgconf_path_add (d.string ().c_str (), &dir_list, suppress_dups);
- };
-
- mlock l (pkgconf_mutex);
-
- // Initialize the client handle.
- //
- unique_ptr<pkgconf_client_t, void (*) (pkgconf_client_t*)> c (
- call_pkgconf_client_new (&pkgconf_client_new,
- pkgconf_error_handler,
- nullptr /* handler_data */),
- [] (pkgconf_client_t* c) {pkgconf_client_free (c);});
-
- pkgconf_client_set_flags (c.get (), pkgconf_flags);
-
- // Note that the system header and library directory lists are
- // automatically pre-filled by the pkgconf_client_new() call (see above).
- // We will re-create these lists from scratch.
- //
- add_dirs (c->filter_libdirs,
- sys_lib_dirs,
- false /* suppress_dups */,
- true /* cleanup */);
-
- add_dirs (c->filter_includedirs,
- sys_hdr_dirs,
- false /* suppress_dups */,
- true /* cleanup */);
-
- // Note that the loaded file directory is added to the (yet empty) search
- // list. Also note that loading of the prerequisite packages is delayed
- // until flags retrieval, and their file directories are not added to the
- // search list.
- //
- pkg_ = pkgconf_pkg_find (c.get (), path.string ().c_str ());
-
- if (pkg_ == nullptr)
- fail << "package '" << path << "' not found or invalid";
-
- // Add the .pc file search directories.
- //
- assert (c->dir_list.length == 1); // Package file directory (see above).
- add_dirs (c->dir_list, pc_dirs, true /* suppress_dups */);
-
- client_ = c.release ();
- }
-
- pkgconf::
- ~pkgconf ()
- {
- if (client_ != nullptr) // Not empty.
- {
- assert (pkg_ != nullptr);
-
- mlock l (pkgconf_mutex);
- pkgconf_pkg_unref (client_, pkg_);
- pkgconf_client_free (client_);
- }
- }
-
- strings pkgconf::
- cflags (bool stat) const
- {
- assert (client_ != nullptr); // Must not be empty.
-
- mlock l (pkgconf_mutex);
-
- pkgconf_client_set_flags (
- client_,
- pkgconf_flags |
-
- // Walk through the private package dependencies (Requires.private)
- // besides the public ones while collecting the flags. Note that we do
- // this for both static and shared linking.
- //
- PKGCONF_PKG_PKGF_SEARCH_PRIVATE |
-
- // Collect flags from Cflags.private besides those from Cflags for the
- // static linking.
- //
- (stat
- ? PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS
- : 0));
-
- pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization.
- int e (pkgconf_pkg_cflags (client_, pkg_, &f, pkgconf_max_depth));
-
- if (e != PKGCONF_PKG_ERRF_OK)
- throw failed (); // Assume the diagnostics is issued.
-
- unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter.
- return to_strings (f, 'I', client_->filter_includedirs);
- }
-
- strings pkgconf::
- libs (bool stat) const
- {
- assert (client_ != nullptr); // Must not be empty.
-
- mlock l (pkgconf_mutex);
-
- pkgconf_client_set_flags (
- client_,
- pkgconf_flags |
-
- // Additionally collect flags from the private dependency packages
- // (see above) and from the Libs.private value for the static linking.
- //
- (stat
- ? PKGCONF_PKG_PKGF_SEARCH_PRIVATE |
- PKGCONF_PKG_PKGF_MERGE_PRIVATE_FRAGMENTS
- : 0));
-
- pkgconf_list_t f = PKGCONF_LIST_INITIALIZER; // Aggregate initialization.
- int e (pkgconf_pkg_libs (client_, pkg_, &f, pkgconf_max_depth));
-
- if (e != PKGCONF_PKG_ERRF_OK)
- throw failed (); // Assume the diagnostics is issued.
-
- unique_ptr<pkgconf_list_t, fragments_deleter> fd (&f); // Auto-deleter.
- return to_strings (f, 'L', client_->filter_libdirs);
- }
-
- optional<string> pkgconf::
- variable (const char* name) const
- {
- assert (client_ != nullptr); // Must not be empty.
-
- mlock l (pkgconf_mutex);
- const char* r (pkgconf_tuple_find (client_, &pkg_->vars, name));
- return r != nullptr ? optional<string> (r) : nullopt;
- }
-
-#endif
-
namespace cc
{
using namespace bin;
@@ -457,6 +34,9 @@ namespace build2
//
// @@ TODO: handle empty values (save as ''?)
//
+ // Note: may contain variable expansions (e.g, ${pcfiledir}) so unclear
+ // if can use quoting.
+ //
static string
escape (const string& s)
{
@@ -507,6 +87,11 @@ namespace build2
return make_pair (r, d);
}
+ // In order not to complicate the bootstrap procedure with libpkg-config
+ // building, exclude functionality that involves reading of .pc files.
+ //
+#ifndef BUILD2_BOOTSTRAP
+
// Try to find a .pc file in the pkgconfig/ subdirectory of libd, trying
// several names derived from stem. If not found, return false. If found,
// load poptions, loptions, libs, and modules, set the corresponding
@@ -523,9 +108,8 @@ namespace build2
// Also note that the bootstrapped version of build2 will not search for
// .pc files, always returning false (see above for the reasoning).
//
-#ifndef BUILD2_BOOTSTRAP
- // Derive pkgconf search directories from the specified library search
+ // Derive pkg-config search directories from the specified library search
// directory passing them to the callback function for as long as it
// returns false (e.g., not found). Return true if the callback returned
// true.
@@ -569,8 +153,8 @@ namespace build2
return false;
}
- // Search for the .pc files in the pkgconf directories that correspond to
- // the specified library directory. If found, return static (first) and
+ // Search for the .pc files in the pkg-config directories that correspond
+ // to the specified library directory. If found, return static (first) and
// shared (second) library .pc files. If common is false, then only
// consider our .static/.shared files.
//
@@ -580,6 +164,8 @@ namespace build2
const string& stem,
bool common) const
{
+ tracer trace (x, "pkgconfig_search");
+
// When it comes to looking for .pc files we have to decide where to
// search (which directory(ies)) as well as what to search for (which
// names). Suffix is our ".shared" or ".static" extension.
@@ -601,28 +187,36 @@ namespace build2
// then you get something like zlib which calls it zlib.pc. So let's
// just do it.
//
- f = dir;
- f /= "lib";
- f += stem;
- f += sfx;
- f += ".pc";
- if (exists (f))
- return f;
+ // And as you think you've covered all the bases, someone decides to
+ // play with the case (libXau.* vs xau.pc). So let's also try the
+ // lower-case versions of the stem unless we are on a case-insensitive
+ // filesystem.
+ //
+ auto check = [&dir, & sfx, &f] (const string& n)
+ {
+ f = dir;
+ f /= n;
+ f += sfx;
+ f += ".pc";
+ return exists (f);
+ };
- f = dir;
- f /= stem;
- f += sfx;
- f += ".pc";
- if (exists (f))
+ if (check ("lib" + stem) || check (stem))
return f;
+#ifndef _WIN32
+ string lstem (lcase (stem));
+
+ if (lstem != stem)
+ {
+ if (check ("lib" + lstem) || check (lstem))
+ return f;
+ }
+#endif
+
if (proj)
{
- f = dir;
- f /= proj->string ();
- f += sfx;
- f += ".pc";
- if (exists (f))
+ if (check (proj->string ()))
return f;
}
@@ -662,15 +256,18 @@ namespace build2
if (pkgconfig_derive (libd, check))
{
+ l6 ([&]{trace << "found " << libd << stem << " in "
+ << (d.a.empty () ? d.a : d.s).directory ();});
+
r.first = move (d.a);
r.second = move (d.s);
}
return r;
- };
+ }
bool common::
- pkgconfig_load (action a,
+ pkgconfig_load (optional<action> act,
const scope& s,
lib& lt,
liba* at,
@@ -680,7 +277,7 @@ namespace build2
const dir_path& libd,
const dir_paths& top_sysd,
const dir_paths& top_usrd,
- bool meta) const
+ pair<bool, bool> metaonly) const
{
assert (at != nullptr || st != nullptr);
@@ -690,12 +287,16 @@ namespace build2
if (p.first.empty () && p.second.empty ())
return false;
- pkgconfig_load (a, s, lt, at, st, p, libd, top_sysd, top_usrd, meta);
+ pkgconfig_load (
+ act, s, lt, at, st, p, libd, top_sysd, top_usrd, metaonly);
return true;
}
+ // Action should be absent if called during the load phase. If metaonly is
+ // true then only load the metadata.
+ //
void common::
- pkgconfig_load (action a,
+ pkgconfig_load (optional<action> act,
const scope& s,
lib& lt,
liba* at,
@@ -704,7 +305,7 @@ namespace build2
const dir_path& libd,
const dir_paths& top_sysd,
const dir_paths& top_usrd,
- bool meta) const
+ pair<bool /* a */, bool /* s */> metaonly) const
{
tracer trace (x, "pkgconfig_load");
@@ -715,24 +316,120 @@ namespace build2
assert (!ap.empty () || !sp.empty ());
- // Extract --cflags and set them as lib?{}:export.poptions. Note that we
- // still pass --static in case this is pkgconf which has Cflags.private.
+ const scope& rs (*s.root_scope ());
+
+ const dir_path* sysroot (
+ cast_null<abs_dir_path> (rs["config.cc.pkgconfig.sysroot"]));
+
+ // Append -I<dir> or -L<dir> option suppressing duplicates. Also handle
+ // the sysroot rewrite.
//
- auto parse_cflags = [&trace, this] (target& t,
- const pkgconf& pc,
- bool la)
+ auto append_dir = [sysroot] (strings& ops, string&& o)
{
+ char c (o[1]);
+
+ // @@ Should we normalize the path for good measure? But on the other
+ // hand, most of the time when it's not normalized, it will likely
+ // be "consistently-relative", e.g., something like
+ // ${prefix}/lib/../include. I guess let's wait and see for some
+ // real-world examples.
+ //
+ // Well, we now support generating relocatable .pc files that have
+ // a bunch of -I${pcfiledir}/../../include and -L${pcfiledir}/.. .
+ //
+ // On the other hand, there could be symlinks involved and just
+ // normalize() may not be correct.
+ //
+ // Note that we do normalize -L paths in the usrd logic later
+ // (but not when setting as *.export.loptions).
+
+ if (sysroot != nullptr)
+ {
+ // Notes:
+ //
+ // - The path might not be absolute (we only rewrite absolute ones).
+ //
+ // - Do this before duplicate suppression since options in ops
+ // already have the sysroot rewritten.
+ //
+ // - Check if the path already starts with sysroot since some .pc
+ // files might already be in a good shape (e.g., because they use
+ // ${pcfiledir} to support relocation properly).
+ //
+ const char* op (o.c_str () + 2);
+ size_t on (o.size () - 2);
+
+ if (path_traits::absolute (op, on))
+ {
+ const string& s (sysroot->string ());
+
+ const char* sp (s.c_str ());
+ size_t sn (s.size ());
+
+ if (!path_traits::sub (op, on, sp, sn)) // Already in sysroot.
+ {
+ // Find the first directory seperator that seperates the root
+ // component from the rest of the path (think /usr/include,
+ // c:\install\include). We need to replace the root component
+ // with sysroot. If there is no separator (say, -Ic:) or the
+ // path after the separator is empty (say, -I/), then we replace
+ // the entire path.
+ //
+ size_t p (path_traits::find_separator (o, 2));
+ if (p == string::npos || p + 1 == o.size ())
+ p = o.size ();
+
+ o.replace (2, p - 2, s);
+ }
+ }
+ }
+
+ for (const string& x: ops)
+ {
+ if (x.size () > 2 && x[0] == '-' && x[1] == c)
+ {
+ if (path_traits::compare (x.c_str () + 2, x.size () - 2,
+ o.c_str () + 2, o.size () - 2) == 0)
+ return; // Duplicate.
+ }
+ }
+
+ ops.push_back (move (o));
+ };
+
+ // Extract --cflags and set them as lib?{}:export.poptions returing the
+ // pointer to the set value. If [as]pops are not NULL, then only keep
+ // options that are present in both.
+ //
+ auto parse_cflags =[&trace,
+ this,
+ &append_dir] (target& t,
+ const pkgconfig& pc,
+ bool la,
+ const strings* apops = nullptr,
+ const strings* spops = nullptr)
+ -> const strings*
+ {
+ // Note that we normalize `-[IDU] <arg>` to `-[IDU]<arg>`.
+ //
strings pops;
- bool arg (false);
- for (auto& o: pc.cflags (la))
+ char arg ('\0'); // Option with pending argument.
+ for (string& o: pc.cflags (la))
{
if (arg)
{
// Can only be an argument for -I, -D, -U options.
//
- pops.push_back (move (o));
- arg = false;
+ o.insert (0, 1, arg);
+ o.insert (0, 1, '-');
+
+ if (arg == 'I')
+ append_dir (pops, move (o));
+ else
+ pops.push_back (move (o));
+
+ arg = '\0';
continue;
}
@@ -743,8 +440,15 @@ namespace build2
if (n >= 2 &&
o[0] == '-' && (o[1] == 'I' || o[1] == 'D' || o[1] == 'U'))
{
- pops.push_back (move (o));
- arg = (n == 2);
+ if (n > 2)
+ {
+ if (o[1] == 'I')
+ append_dir (pops, move (o));
+ else
+ pops.push_back (move (o));
+ }
+ else
+ arg = o[1];
continue;
}
@@ -753,7 +457,7 @@ namespace build2
}
if (arg)
- fail << "argument expected after " << pops.back () <<
+ fail << "argument expected after -" << arg <<
info << "while parsing pkg-config --cflags " << pc.path;
if (!pops.empty ())
@@ -766,19 +470,45 @@ namespace build2
// export stub and we shouldn't touch them.
//
if (p.second)
+ {
+ // If required, only keep common stuff. While removing the entries
+ // is not the most efficient way, it is simple.
+ //
+ if (apops != nullptr || spops != nullptr)
+ {
+ for (auto i (pops.begin ()); i != pops.end (); )
+ {
+ if ((apops != nullptr && find (
+ apops->begin (), apops->end (), *i) == apops->end ()) ||
+ (spops != nullptr && find (
+ spops->begin (), spops->end (), *i) == spops->end ()))
+ i = pops.erase (i);
+ else
+ ++i;
+ }
+ }
+
p.first = move (pops);
+ return &p.first.as<strings> ();
+ }
}
+
+ return nullptr;
};
// Parse --libs into loptions/libs (interface and implementation). If
// ps is not NULL, add each resolved library target as a prerequisite.
//
- auto parse_libs = [a, &s, top_sysd, this] (target& t,
- bool binless,
- const pkgconf& pc,
- bool la,
- prerequisites* ps)
+ auto parse_libs = [this,
+ &append_dir,
+ act, &s, top_sysd] (target& t,
+ bool binless,
+ const pkgconfig& pc,
+ bool la,
+ prerequisites* ps)
{
+ // Note that we normalize `-L <arg>` to `-L<arg>`.
+ //
strings lops;
vector<name> libs;
@@ -795,15 +525,21 @@ namespace build2
// library. What we do at the moment is stop recognizing just library
// names (without -l) after seeing an unknown option.
//
- bool arg (false), first (true), known (true), have_L;
- for (auto& o: pc.libs (la))
+ bool first (true), known (true), have_L (false);
+
+ string self; // The library itself (-l of just name/path).
+
+ char arg ('\0'); // Option with pending argument.
+ for (string& o: pc.libs (la))
{
if (arg)
{
- // Can only be an argument for an loption.
+ // Can only be an argument for an -L option.
//
- lops.push_back (move (o));
- arg = false;
+ o.insert (0, 1, arg);
+ o.insert (0, 1, '-');
+ append_dir (lops, move (o));
+ arg = '\0';
continue;
}
@@ -813,15 +549,17 @@ namespace build2
//
if (n >= 2 && o[0] == '-' && o[1] == 'L')
{
+ if (n > 2)
+ append_dir (lops, move (o));
+ else
+ arg = o[1];
have_L = true;
- lops.push_back (move (o));
- arg = (n == 2);
continue;
}
// See if that's -l, -pthread, or just the library name/path.
//
- if ((known && o[0] != '-') ||
+ if ((known && n != 0 && o[0] != '-') ||
(n > 2 && o[0] == '-' && (o[1] == 'l' || o == "-pthread")))
{
// Unless binless, the first one is the library itself, which we
@@ -829,28 +567,36 @@ namespace build2
// be some other library, but we haven't encountered such a beast
// yet.
//
+ // What we have enountered (e.g., in the Magick++ library) is the
+ // library itself repeated in Libs.private. So now we save it and
+ // filter all its subsequent occurences.
+ //
+ // @@ To be safe we probably shouldn't rely on the position and
+ // filter out all occurrences of the library itself (by name?)
+ // and complain if none were encountered.
+ //
+ // Note also that the same situation can occur if we have a
+ // binful library for which we could not find the library
+ // binary and are treating it as binless. We now have a diag
+ // frame around the call to search_library() to help diagnose
+ // such situations.
+ //
if (first)
{
first = false;
if (!binless)
+ {
+ self = move (o);
+ continue;
+ }
+ }
+ else
+ {
+ if (!binless && o == self)
continue;
}
- // @@ If by some reason this is the library itself (doesn't go
- // first or libpkgconf parsed libs in some bizarre way) we will
- // have a dependency cycle by trying to lock its target inside
- // search_library() as by now it is already locked. To be safe
- // we probably shouldn't rely on the position and filter out
- // all occurrences of the library itself (by name?) and
- // complain if none were encountered.
- //
- // Note also that the same situation can occur if we have a
- // binful library for which we could not find the library
- // binary and are treating it as binless. We now have a diag
- // frame around the call to search_library() to help diagnose
- // such situations.
- //
libs.push_back (name (move (o)));
continue;
}
@@ -862,7 +608,7 @@ namespace build2
}
if (arg)
- fail << "argument expected after " << lops.back () <<
+ fail << "argument expected after -" << arg <<
info << "while parsing pkg-config --libs " << pc.path;
// Space-separated list of escaped library flags.
@@ -870,7 +616,7 @@ namespace build2
auto lflags = [&pc, la] () -> string
{
string r;
- for (const auto& o: pc.libs (la))
+ for (const string& o: pc.libs (la))
{
if (!r.empty ())
r += ' ';
@@ -879,7 +625,7 @@ namespace build2
return r;
};
- if (first && !binless)
+ if (!binless && self.empty ())
fail << "library expected in '" << lflags () << "'" <<
info << "while parsing pkg-config --libs " << pc.path;
@@ -920,11 +666,15 @@ namespace build2
if (l[0] != '-') // e.g., just shell32.lib
continue;
else if (cmp ("advapi32") ||
+ cmp ("authz") ||
cmp ("bcrypt") ||
+ cmp ("comdlg32") ||
cmp ("crypt32") ||
- cmp ("dbgeng") ||
cmp ("d2d1") ||
cmp ("d3d", 3) || // d3d*
+ cmp ("dbgeng") ||
+ cmp ("dbghelp") ||
+ cmp ("dnsapi") ||
cmp ("dwmapi") ||
cmp ("dwrite") ||
cmp ("dxgi") ||
@@ -935,7 +685,9 @@ namespace build2
cmp ("imm32") ||
cmp ("iphlpapi") ||
cmp ("kernel32") ||
+ cmp ("mincore") ||
cmp ("mpr") ||
+ cmp ("msimg32") ||
cmp ("mswsock") ||
cmp ("msxml", 5) || // msxml*
cmp ("netapi32") ||
@@ -944,9 +696,11 @@ namespace build2
cmp ("ole32") ||
cmp ("oleaut32") ||
cmp ("opengl32") ||
+ cmp ("powrprof") ||
cmp ("psapi") ||
cmp ("rpcrt4") ||
cmp ("secur32") ||
+ cmp ("setupapi") ||
cmp ("shell32") ||
cmp ("shlwapi") ||
cmp ("synchronization") ||
@@ -954,6 +708,8 @@ namespace build2
cmp ("userenv") ||
cmp ("uuid") ||
cmp ("version") ||
+ cmp ("windowscodecs") ||
+ cmp ("winhttp") ||
cmp ("winmm") ||
cmp ("winspool") ||
cmp ("ws2") ||
@@ -1003,7 +759,11 @@ namespace build2
}
else if (tclass == "macos")
{
- if (l == "-lSystem")
+ // Note that Mac OS has libiconv in /usr/lib/ which only comes
+ // in the shared variant. So we treat it as system.
+ //
+ if (l == "-lSystem" ||
+ l == "-liconv")
continue;
}
else if (tclass == "bsd")
@@ -1020,18 +780,13 @@ namespace build2
{
usrd = dir_paths ();
- for (auto i (lops.begin ()); i != lops.end (); ++i)
+ for (const string& o: lops)
{
- const string& o (*i);
-
- if (o.size () >= 2 && o[0] == '-' && o[1] == 'L')
+ // Note: always in the -L<dir> form (see above).
+ //
+ if (o.size () > 2 && o[0] == '-' && o[1] == 'L')
{
- string p;
-
- if (o.size () == 2)
- p = *++i; // We've verified it's there.
- else
- p = string (o, 2);
+ string p (o, 2);
try
{
@@ -1042,6 +797,7 @@ namespace build2
<< lflags () << "'" <<
info << "while parsing pkg-config --libs " << pc.path;
+ d.normalize ();
usrd->push_back (move (d));
}
catch (const invalid_path& e)
@@ -1072,7 +828,7 @@ namespace build2
dr << info (f) << "while resolving pkg-config dependency " << l;
});
- lt = search_library (a, top_sysd, usrd, pk);
+ lt = search_library (act, top_sysd, usrd, pk);
}
if (lt != nullptr)
@@ -1121,24 +877,16 @@ namespace build2
{
// Translate -L to /LIBPATH.
//
- for (auto i (lops.begin ()); i != lops.end (); )
+ for (string& o: lops)
{
- string& o (*i);
size_t n (o.size ());
- if (n >= 2 && o[0] == '-' && o[1] == 'L')
+ // Note: always in the -L<dir> form (see above).
+ //
+ if (n > 2 && o[0] == '-' && o[1] == 'L')
{
o.replace (0, 2, "/LIBPATH:");
-
- if (n == 2)
- {
- o += *++i; // We've verified it's there.
- i = lops.erase (i);
- continue;
- }
}
-
- ++i;
}
}
@@ -1201,16 +949,23 @@ namespace build2
return r;
};
- // Parse user metadata and set extracted variables on the specified
- // target.
+ // Parse the build2.metadata variable value and, if user is true,
+ // extract the user metadata, if any, and set extracted variables on the
+ // specified target.
//
- auto parse_metadata = [&next] (const target&,
- pkgconf& pc,
- const string& md)
+ auto parse_metadata = [&next] (target& t,
+ pkgconfig& pc,
+ const string& md,
+ bool user)
{
const location loc (pc.path);
+ context& ctx (t.ctx);
+
optional<uint64_t> ver;
+ optional<string> pfx;
+
+ variable_pool* vp (nullptr); // Resolve lazily.
string s;
for (size_t b (0), e (0); !(s = next (md, b, e)).empty (); )
@@ -1223,12 +978,25 @@ namespace build2
}
catch (const invalid_argument& e)
{
- fail (loc) << "invalid version in metadata variable; " << e;
+ fail (loc) << "invalid version in build2.metadata variable: "
+ << e;
}
if (*ver != 1)
fail (loc) << "unexpected metadata version " << *ver;
+ if (!user)
+ return;
+
+ continue;
+ }
+
+ if (!pfx)
+ {
+ if (s.empty ())
+ fail (loc) << "empty variable prefix in build2.metadata varible";
+
+ pfx = s;
continue;
}
@@ -1239,17 +1007,13 @@ namespace build2
if (p == string::npos)
fail (loc) << "expected name/type pair instead of '" << s << "'";
- //@@ We cannot insert a variable since we are not in the load phase
- // (but if we were to somehow support immediate importation of
- // libraries, we would be in the load phase).
- //
- string var (s, 0, p);
+ string vn (s, 0, p);
string tn (s, p + 1);
- optional<string> val (pc.variable (var));
+ optional<string> val (pc.variable (vn));
if (!val)
- fail (loc) << "metadata variable " << var << " not set";
+ fail (loc) << "metadata variable " << vn << " not set";
pair<const value_type*, bool> vt (metadata_type (tn));
if (vt.first == nullptr)
@@ -1263,27 +1027,39 @@ namespace build2
: name (move (s)));
}
- value v (vt.first);
- v.assign (move (ns), nullptr /* @@ TODO: var */);
+ // These should be public (qualified) variables so go straight for
+ // the public variable pool.
+ //
+ if (vp == nullptr)
+ vp = &ctx.var_pool.rw (); // Load phase if user==true.
+
+ const variable& var (vp->insert (move (vn)));
- //names storage;
- //text << var << " = " << reverse (v, storage);
+ value& v (t.assign (var));
+ v.assign (move (ns), &var);
+ typify (v, *vt.first, &var);
}
if (!ver)
- fail (loc) << "version expected in metadata variable";
+ fail (loc) << "version expected in build2.metadata variable";
- // @@ TODO: we should probably also set export.metadata?
+ if (!pfx)
+ return; // No user metadata.
+
+ // Set export.metadata to indicate the presence of user metadata.
+ //
+ t.assign (ctx.var_export_metadata) = names {
+ name (std::to_string (*ver)), name (move (*pfx))};
};
// Parse modules, enter them as targets, and add them to the
// prerequisites.
//
auto parse_modules = [&trace, this,
- &next, &s, &lt] (const pkgconf& pc,
+ &next, &s, &lt] (const pkgconfig& pc,
prerequisites& ps)
{
- optional<string> val (pc.variable ("cxx_modules"));
+ optional<string> val (pc.variable ("cxx.modules"));
if (!val)
return;
@@ -1299,17 +1075,25 @@ namespace build2
p == 0 || // Empty name.
p == m.size () - 1) // Empty path.
fail << "invalid module information in '" << *val << "'" <<
- info << "while parsing pkg-config --variable=cxx_modules "
+ info << "while parsing pkg-config --variable=cxx.modules "
<< pc.path;
string mn (m, 0, p);
path mp (m, p + 1, string::npos);
+
+ // Must be absolute but may not be normalized due to a relocatable
+ // .pc file. We assume there are no symlink shenanigans that would
+ // require realize().
+ //
+ if (!mp.normalized ())
+ mp.normalize ();
+
path mf (mp.leaf ());
// Extract module properties, if any.
//
- optional<string> pp (pc.variable ("cxx_module_preprocessed." + mn));
- optional<string> se (pc.variable ("cxx_module_symexport." + mn));
+ optional<string> pp (pc.variable ("cxx.module_preprocessed." + mn));
+ optional<string> se (pc.variable ("cxx.module_symexport." + mn));
// Replace the partition separator.
//
@@ -1328,7 +1112,7 @@ namespace build2
target_decl::implied,
trace));
- target& mt (tl.first);
+ file& mt (tl.first.as<file> ());
// If the target already exists, then setting its variables is not
// MT-safe. So currently we only do it if we have the lock (and thus
@@ -1346,6 +1130,7 @@ namespace build2
//
if (tl.second.owns_lock ())
{
+ mt.path (move (mp));
mt.vars.assign (c_module_name) = move (mn);
// Set module properties. Note that if unspecified we should still
@@ -1381,12 +1166,12 @@ namespace build2
// the prerequisites.
//
auto parse_headers = [&trace, this,
- &next, &s, &lt] (const pkgconf& pc,
+ &next, &s, &lt] (const pkgconfig& pc,
const target_type& tt,
const char* lang,
prerequisites& ps)
{
- string var (string (lang) + "_importable_headers");
+ string var (string (lang) + ".importable_headers");
optional<string> val (pc.variable (var));
if (!val)
@@ -1396,6 +1181,14 @@ namespace build2
for (size_t b (0), e (0); !(h = next (*val, b, e)).empty (); )
{
path hp (move (h));
+
+ // Must be absolute but may not be normalized due to a relocatable
+ // .pc file. We assume there are no symlink shenanigans that would
+ // require realize().
+ //
+ if (!hp.normalized ())
+ hp.normalize ();
+
path hf (hp.leaf ());
auto tl (
@@ -1408,7 +1201,7 @@ namespace build2
target_decl::implied,
trace));
- target& ht (tl.first);
+ file& ht (tl.first.as<file> ());
// If the target already exists, then setting its variables is not
// MT-safe. So currently we only do it if we have the lock (and thus
@@ -1417,6 +1210,7 @@ namespace build2
//
if (tl.second.owns_lock ())
{
+ ht.path (move (hp));
ht.vars.assign (c_importable) = true;
tl.second.unlock ();
}
@@ -1433,19 +1227,10 @@ namespace build2
}
};
- // For now we only populate prerequisites for lib{}. To do it for
- // liba{} would require weeding out duplicates that are already in
- // lib{}.
- //
- // Currently, this information is only used by the modules machinery to
- // resolve module names to module files (but we cannot only do this if
- // modules are enabled since the same installed library can be used by
- // multiple builds).
+ // Load the information from the pkg-config files.
//
- prerequisites prs;
-
- pkgconf apc;
- pkgconf spc;
+ pkgconfig apc;
+ pkgconfig spc;
// Create the .pc files search directory list.
//
@@ -1453,9 +1238,16 @@ namespace build2
// Note that we rely on the "small function object" optimization here.
//
- auto add_pc_dir = [&pc_dirs] (dir_path&& d) -> bool
+ auto add_pc_dir = [&trace, &pc_dirs] (dir_path&& d) -> bool
{
- pc_dirs.emplace_back (move (d));
+ // Suppress duplicated.
+ //
+ if (find (pc_dirs.begin (), pc_dirs.end (), d) == pc_dirs.end ())
+ {
+ l6 ([&]{trace << "search path " << d;});
+ pc_dirs.emplace_back (move (d));
+ }
+
return false;
};
@@ -1465,18 +1257,115 @@ namespace build2
bool pa (at != nullptr && !ap.empty ());
if (pa || sp.empty ())
- apc = pkgconf (ap, pc_dirs, sys_lib_dirs, sys_hdr_dirs);
+ apc = pkgconfig (ap, pc_dirs, sys_lib_dirs, sys_hdr_dirs);
bool ps (st != nullptr && !sp.empty ());
if (ps || ap.empty ())
- spc = pkgconf (sp, pc_dirs, sys_lib_dirs, sys_hdr_dirs);
+ spc = pkgconfig (sp, pc_dirs, sys_lib_dirs, sys_hdr_dirs);
+
+ // Load the user metadata if we are in the load phase. Otherwise just
+ // determine if we have metadata.
+ //
+ // Note also that we are not failing here if the metadata was requested
+ // but not present (potentially only partially) letting the caller
+ // (i.e., the import machinery) verify that the export.metadata was set
+ // on the target being imported. This would also allow supporting
+ // optional metadata.
+ //
+ bool apc_meta (false);
+ bool spc_meta (false);
+ if (!act)
+ {
+ // We can only do it during the load phase.
+ //
+ assert (lt.ctx.phase == run_phase::load);
+
+ pkgconfig& ipc (ps ? spc : apc); // As below.
+
+ // Since it's not easy to say if things are the same, we load a copy
+ // into the group and each member, if any.
+ //
+ // @@ TODO: check if already loaded? Don't we have the same problem
+ // below with reloading the rest for lt? What if we passed NULL
+ // in this case (and I suppose another bool in metaonly)?
+ //
+ if (optional<string> md = ipc.variable ("build2.metadata"))
+ parse_metadata (lt, ipc, *md, true);
+
+ if (pa)
+ {
+ if (optional<string> md = apc.variable ("build2.metadata"))
+ {
+ parse_metadata (*at, apc, *md, true);
+ apc_meta = true;
+ }
+ }
+
+ if (ps)
+ {
+ if (optional<string> md = spc.variable ("build2.metadata"))
+ {
+ parse_metadata (*st, spc, *md, true);
+ spc_meta = true;
+ }
+ }
+
+ // If we only need metadata, then we are done.
+ //
+ if (at != nullptr && metaonly.first)
+ {
+ pa = false;
+ at = nullptr;
+ }
+
+ if (st != nullptr && metaonly.second)
+ {
+ ps = false;
+ st = nullptr;
+ }
+
+ if (at == nullptr && st == nullptr)
+ return;
+ }
+ else
+ {
+ if (pa)
+ {
+ if (optional<string> md = apc.variable ("build2.metadata"))
+ {
+ parse_metadata (*at, apc, *md, false);
+ apc_meta = true;
+ }
+ }
+
+ if (ps)
+ {
+ if (optional<string> md = spc.variable ("build2.metadata"))
+ {
+ parse_metadata (*st, spc, *md, false);
+ spc_meta = true;
+ }
+ }
+ }
// Sort out the interface dependencies (which we are setting on lib{}).
// If we have the shared .pc variant, then we use that. Otherwise --
// static but extract without the --static option (see also the saving
// logic).
//
- pkgconf& ipc (ps ? spc : apc); // Interface package info.
+ pkgconfig& ipc (ps ? spc : apc); // Interface package info.
+ bool ipc_meta (ps ? spc_meta : apc_meta);
+
+ // For now we only populate prerequisites for lib{}. To do it for
+ // liba{} would require weeding out duplicates that are already in
+ // lib{}.
+ //
+ // Currently, this information is only used by the modules machinery to
+ // resolve module names to module files (but we cannot only do this if
+ // modules are enabled since the same installed library can be used by
+ // multiple builds).
+ //
+ prerequisites prs;
parse_libs (
lt,
@@ -1485,18 +1374,34 @@ namespace build2
false,
&prs);
+ const strings* apops (nullptr);
if (pa)
{
- parse_cflags (*at, apc, true);
+ apops = parse_cflags (*at, apc, true);
parse_libs (*at, at->path ().empty (), apc, true, nullptr);
}
+ const strings* spops (nullptr);
if (ps)
- parse_cflags (*st, spc, false);
+ spops = parse_cflags (*st, spc, false);
+
+ // Also set common poptions for the group. In particular, this makes
+ // sure $lib_poptions() in the "common interface" mode works for the
+ // installed libraries.
+ //
+ // Note that if there are no poptions set for either, then we cannot
+ // possibly have a common subset.
+ //
+ if (apops != nullptr || spops != nullptr)
+ parse_cflags (lt, ipc, false, apops, spops);
+
+ // @@ TODO: we can now load cc.type if there is metadata (but need to
+ // return this rather than set, see search_library() for
+ // details).
// Load the bin.whole flag (whole archive).
//
- if (at != nullptr)
+ if (at != nullptr && (pa ? apc_meta : spc_meta))
{
// Note that if unspecified we leave it unset letting the consumer
// override it, if necessary (see the bin.lib lookup semantics for
@@ -1508,52 +1413,19 @@ namespace build2
}
}
- // Load user metadata, if requested.
- //
- // Note that we cannot just load if there is the metadata variable since
- // this could be some third-party .pc file which happens to set the same
- // variable.
- //
- // Note also that we are not failing here if the metadata was requested
- // but not present (potentially only partially) letting the caller
- // (i.e., the import machinery) verify that the export.metadata was set
- // on the target being imported. This would also allow supporting
- // optional metadata.
- //
- if (meta)
- {
- // We can only do it during the load phase.
- //
- assert (lt.ctx.phase == run_phase::load);
-
- // Since it's not easy to say if things are the same, we load a copy
- // into the group and each member, if any.
- //
- if (optional<string> md = ipc.variable ("metadata"))
- parse_metadata (lt, ipc, *md);
-
- if (pa)
- if (optional<string> md = apc.variable ("metadata"))
- parse_metadata (*at, apc, *md);
-
- if (ps)
- if (optional<string> md = spc.variable ("metadata"))
- parse_metadata (*st, spc, *md);
- }
-
// For now we assume static and shared variants export the same set of
// modules/importable headers. While technically possible, having
// different sets will most likely lead to all sorts of complications
// (at least for installed libraries) and life is short.
//
- if (modules)
+ if (modules && ipc_meta)
{
parse_modules (ipc, prs);
// We treat headers outside of any project as C headers (see
// enter_header() for details).
//
- parse_headers (ipc, h::static_type /* **x_hdr */, x, prs);
+ parse_headers (ipc, h::static_type /* **x_hdrs */, x, prs);
parse_headers (ipc, h::static_type, "c", prs);
}
@@ -1574,7 +1446,7 @@ namespace build2
}
bool common::
- pkgconfig_load (action,
+ pkgconfig_load (optional<action>,
const scope&,
lib&,
liba*,
@@ -1584,13 +1456,13 @@ namespace build2
const dir_path&,
const dir_paths&,
const dir_paths&,
- bool) const
+ pair<bool, bool>) const
{
return false;
}
void common::
- pkgconfig_load (action,
+ pkgconfig_load (optional<action>,
const scope&,
lib&,
liba*,
@@ -1599,7 +1471,7 @@ namespace build2
const dir_path&,
const dir_paths&,
const dir_paths&,
- bool) const
+ pair<bool, bool>) const
{
assert (false); // Should never be called.
}
@@ -1613,6 +1485,11 @@ namespace build2
// file must be generated based on the static library to get accurate
// Libs.private.
//
+ // The other things that we omit from the common variant are -l options
+ // for binless libraries (so that it's usable from other build systems) as
+ // well as metadata (which could become incomplete due the previous
+ // omissions; for example, importable headers metadata).
+ //
void link_rule::
pkgconfig_save (action a,
const file& l,
@@ -1647,7 +1524,7 @@ namespace build2
// This is the lib{} group if we are generating the common file and the
// target itself otherwise.
//
- const file& g (common ? l.group->as<file> () : l);
+ const target& g (common ? *l.group : l);
// By default we assume things go into install.{include, lib}.
//
@@ -1668,7 +1545,7 @@ namespace build2
{
bool f (ldirs.empty ());
- ldirs.push_back (resolve_dir (g, d, !f /* fail_unknown */));
+ ldirs.push_back (resolve_dir (g, d, {}, !f /* fail_unknown */));
if (f && ldirs.back ().empty ())
break;
@@ -1677,6 +1554,7 @@ namespace build2
else
ldirs.push_back (resolve_dir (g,
cast<dir_path> (g["install.lib"]),
+ {},
false /* fail_unknown */));
if (!ldirs.empty () && ldirs.front ().empty ())
@@ -1699,14 +1577,79 @@ namespace build2
// Note that generation can take some time if we have a large number of
// prerequisite libraries.
//
- if (verb)
- text << "pc " << *t;
- else if (verb >= 2)
+ if (verb >= 2)
text << "cat >" << p;
+ else if (verb)
+ print_diag ("pc", g, *t);
if (ctx.dry_run)
return;
+ // See if we should be generating a relocatable .pc file and if so get
+ // its installation location. The plan is to make all absolute paths
+ // that we write relative to this location and prefix them with the
+ // built-in ${pcfiledir} variable (which supported by everybody: the
+ // original pkg-config, pkgconf, and our libpkg-config library).
+ //
+ dir_path rel_base;
+ if (cast_false<bool> (rs["install.relocatable"]))
+ {
+ path f (install::resolve_file (*t));
+ if (!f.empty ()) // Shouldn't happen but who knows.
+ rel_base = f.directory ();
+ }
+
+ // Note: reloc_*path() expect absolute and normalized paths.
+ //
+ // Note also that reloc_path() can be used on dir_path to get the path
+ // without the trailing slash.
+ //
+ auto reloc_path = [&rel_base,
+ s = string ()] (const path& p,
+ const char* what) mutable
+ -> const string&
+ {
+ if (rel_base.empty ())
+ return p.string ();
+
+ try
+ {
+ s = p.relative (rel_base).string ();
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make " << what << " path " << p << " relative to "
+ << rel_base;
+ }
+
+ if (!s.empty ()) s.insert (0, 1, path_traits::directory_separator);
+ s.insert (0, "${pcfiledir}");
+ return s;
+ };
+
+ auto reloc_dir_path = [&rel_base,
+ s = string ()] (const dir_path& p,
+ const char* what) mutable
+ -> const string&
+ {
+ if (rel_base.empty ())
+ return (s = p.representation ());
+
+ try
+ {
+ s = p.relative (rel_base).representation ();
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make " << what << " path " << p << " relative to "
+ << rel_base;
+ }
+
+ if (!s.empty ()) s.insert (0, 1, path_traits::directory_separator);
+ s.insert (0, "${pcfiledir}");
+ return s;
+ };
+
auto_rmfile arm (p);
try
@@ -1724,6 +1667,20 @@ namespace build2
fail << "no version variable in project " << n <<
info << "while generating " << p;
+ // When comparing versions, pkg-config uses RPM semantics, which is
+ // basically comparing each all-digit/alpha fragments in order.
+ // This means, for example, a semver with a pre-release will be
+ // compared incorrectly (pre-release will be greater than the final
+ // version). We could detect if this project uses stdver and chop
+ // off any pre-release information (so, essentially only saving the
+ // major.minor.patch part). But that means such .pc files will
+ // contain inaccurate version information. And seeing that we don't
+ // recommend using pkg-config (rather primitive) package dependency
+ // support, having complete version information for documentation
+ // seems more important.
+ //
+ // @@ Maybe still makes sense to only save version.project_id?
+ //
const string& v (cast<string> (vl));
os << "Name: " << n << endl;
@@ -1844,7 +1801,7 @@ namespace build2
//
os << "Cflags:";
for (const dir_path& d: idirs)
- os << " -I" << escape (d.string ());
+ os << " -I" << escape (reloc_path (d, "header search"));
save_poptions (x_export_poptions);
save_poptions (c_export_poptions);
os << endl;
@@ -1864,7 +1821,7 @@ namespace build2
// necessary to resolve its binful dependencies.
//
for (const dir_path& d: ldirs)
- os << " -L" << escape (d.string ());
+ os << " -L" << escape (reloc_path (d, "library search"));
// Now process ourselves as if we were being linked to something (so
// pretty similar to link_rule::append_libraries()). We also reuse
@@ -1880,7 +1837,8 @@ namespace build2
appended_libraries* pls; // Previous.
appended_libraries* ls; // Current.
strings& args;
- } d {os, nullptr, &ls, args};
+ bool common;
+ } d {os, nullptr, &ls, args, common};
auto imp = [&priv] (const target&, bool la) {return priv && la;};
@@ -1924,7 +1882,17 @@ namespace build2
if (l != nullptr)
{
if (l->is_a<libs> () || l->is_a<liba> ()) // See through libux.
- d.args.push_back (save_library_target (*l));
+ {
+ // Omit binless libraries from the common .pc file (see
+ // above).
+ //
+ // Note that in this case we still want to recursively
+ // traverse such libraries since they may still link to some
+ // non-binless system libraries (-lm, etc).
+ //
+ if (!d.common || !l->path ().empty ())
+ d.args.push_back (save_library_target (*l));
+ }
}
else
{
@@ -1946,7 +1914,7 @@ namespace build2
//@@ TODO: should we filter -L similar to -I?
//@@ TODO: how will the Libs/Libs.private work?
- //@@ TODO: remember to use escape()
+ //@@ TODO: remember to use reloc_*() and escape().
if (d.pls != nullptr && d.pls->find (l) != nullptr)
return true;
@@ -1967,7 +1935,10 @@ namespace build2
library_cache lib_cache;
process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0, // Link flags.
- imp, lib, opt, !binless /* self */, &lib_cache);
+ imp, lib, opt,
+ !binless /* self */,
+ false /* proc_opt_group */, // @@ !priv?
+ &lib_cache);
for (const string& a: args)
os << ' ' << a;
@@ -1989,199 +1960,326 @@ namespace build2
process_libraries (a, bs, li, sys_lib_dirs,
l, la, 0, // Link flags.
- imp, lib, opt, false /* self */, &lib_cache);
+ imp, lib, opt,
+ false /* self */,
+ false /* proc_opt_group */, // @@ !priv?
+ &lib_cache);
for (const string& a: args)
os << ' ' << a;
os << endl;
- // Save the bin.whole (whole archive) flag (see the link rule for
- // details on the lookup semantics).
- //
- if (cast_false<bool> (
- l.lookup_original (ctx.var_pool["bin.whole"],
- true /* target_only */).first))
- {
- os << endl
- << "bin.whole = true"
- << endl;
- }
+ // See also bin.whole below.
}
}
- // Save user metadata.
+ // Save metadata unless this is the common .pc file (see above).
//
+ if (common)
{
- // The export.metadata value should start with the version followed
- // by the metadata variable prefix.
- //
- lookup lu (g[ctx.var_export_metadata]); // Target visibility.
+ os.close ();
+ arm.cancel ();
+ return;
+ }
- if (lu && !lu->empty ())
- {
- const names& ns (cast<names> (lu));
+ // The build2.metadata variable is a general indication of the
+ // metadata being present. Its value is the metadata version
+ // optionally followed by the user metadata variable prefix and
+ // variable list (see below for details). Having only the version
+ // indicates the absense of user metadata.
+ //
+ // See if we have the user metadata.
+ //
+ lookup um (g[ctx.var_export_metadata]); // Target visibility.
+
+ if (um && !um->empty ())
+ {
+ const names& ns (cast<names> (um));
- // First verify the version.
+ // First verify the version.
+ //
+ uint64_t ver;
+ try
+ {
+ // Note: does not change the passed name.
//
- uint64_t ver;
- try
- {
- // Note: does not change the passed name.
- //
- ver = value_traits<uint64_t>::convert (
- ns[0], ns[0].pair ? &ns[1] : nullptr);
- }
- catch (const invalid_argument& e)
- {
- fail << "invalid metadata version in library " << g << ": " << e;
- }
+ ver = value_traits<uint64_t>::convert (
+ ns[0], ns[0].pair ? &ns[1] : nullptr);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "invalid metadata version in library " << g << ": " << e
+ << endf;
+ }
- if (ver != 1)
- fail << "unexpected metadata version " << ver << " in library "
- << g;
+ if (ver != 1)
+ fail << "unexpected metadata version " << ver << " in library "
+ << g;
- // Next verify the metadata variable prefix.
- //
- if (ns.size () != 2 || !ns[1].simple ())
- fail << "invalid metadata variable prefix in library " << g;
+ // Next verify the metadata variable prefix.
+ //
+ if (ns.size () != 2 || !ns[1].simple ())
+ fail << "invalid metadata variable prefix in library " << g;
- const string& pfx (ns[1].value);
+ const string& pfx (ns[1].value);
- // Now find all the target-specific variables with this prefix.
- //
- // If this is the common .pc file, then we only look in the
- // group. Otherwise, in the member and the group.
- //
- // We only expect a handful of variables so let's use a vector and
- // linear search instead of a map.
- //
- using binding = pair<const variable*, const value*>;
- vector<binding> vars;
+ // Now find all the target-specific variables with this prefix.
+ //
+ // If this is the common .pc file, then we only look in the group.
+ // Otherwise, in the member and the group.
+ //
+ // To allow setting different values for the for-install and
+ // development build cases (required when a library comes with
+ // additional "assets"), we recognize the special .for_install
+ // variable name suffix: if there is a both <prefix>.<name> and
+ // <prefix>.<name>.for_install variables, then here we take the
+ // value from the latter. Note that we don't consider just
+ // <prefix>.for_install as special (so it's available to the user).
+ //
+ // We only expect a handful of variables so let's use a vector and
+ // linear search instead of a map.
+ //
+ struct binding
+ {
+ const string* name; // Name to be saved (without .for_install).
+ const variable* var; // Actual variable (potentially .for_install).
+ const value* val; // Actual value.
+ };
+ vector<binding> vars;
- auto append = [&pfx, &vars] (const target& t, bool dup)
+ auto append = [&l, &pfx, &vars,
+ tmp = string ()] (const target& t, bool dup) mutable
+ {
+ for (auto p (t.vars.lookup_namespace (pfx));
+ p.first != p.second;
+ ++p.first)
{
- for (auto p (t.vars.lookup_namespace (pfx));
- p.first != p.second;
- ++p.first)
+ const variable* var (&p.first->first.get ());
+
+ // Handle .for_install.
+ //
+ // The plan is as follows: if this is .for_install, then just
+ // verify we also have the value without the suffix and skip
+ // it. Otherwise, check if there also the .for_install variant
+ // and if so, use that instead. While we could probably do this
+ // more efficiently by remembering what we saw in vars, this is
+ // not performance-sensitive and so we keep it simple for now.
+ //
+ const string* name;
{
- const variable& var (p.first->first);
+ const string& v (var->name);
+ size_t n (v.size ());
- if (dup)
+ if (n > pfx.size () + 1 + 12 && // <prefix>..for_install
+ v.compare (n - 12, 12, ".for_install") == 0)
{
- if (find_if (vars.begin (), vars.end (),
- [&var] (const binding& p)
- {
- return p.first == &var;
- }) != vars.end ())
- continue;
+ tmp.assign (v, 0, n - 12);
+
+ if (t.vars.find (tmp) == t.vars.end ())
+ fail << v << " variant without " << tmp << " in library "
+ << l;
+
+ continue;
}
+ else
+ {
+ name = &v;
- // Re-lookup the value in order to apply target type/pattern
- // specific prepends/appends.
- //
- lookup l (t[var]);
- assert (l.defined ());
+ tmp = v; tmp += ".for_install";
- vars.emplace_back (&var, l.value);
+ auto i (t.vars.find (tmp));
+ if (i != t.vars.end ())
+ var = &i->first.get ();
+ }
}
- };
- append (g, false);
+ if (dup)
+ {
+ if (find_if (vars.begin (), vars.end (),
+ [name] (const binding& p)
+ {
+ return *p.name == *name;
+ }) != vars.end ())
+ continue;
+ }
- if (!common)
- {
- if (l.group != nullptr)
- append (*l.group, true);
- }
+ // Re-lookup the value in order to apply target type/pattern
+ // specific prepends/appends.
+ //
+ lookup l (t[*var]);
+ assert (l.defined ());
- // First write the `metadata` variable with the version and all
- // the variable names/types (which don't require any escaping).
- //
- os << endl
- << "metadata = " << ver;
+ vars.push_back (binding {name, var, l.value});
+ }
+ };
- for (const binding& b: vars)
- {
- const string& n (b.first->name);
- const value& v (*b.second);
+ append (g, false);
- // There is no notion of NULL in pkg-config variables and it's
- // probably best not to conflate them with empty.
- //
- if (v.null)
- fail << "null value in exported variable " << n
- << " of library " << l;
+ if (!common)
+ {
+ if (l.group != nullptr)
+ append (*l.group, true);
+ }
- if (v.type == nullptr)
- fail << "untyped value in exported variable " << n
- << " of library " << l;
+ // First write the build2.metadata variable with the version,
+ // prefix, and all the variable names/types (which should not
+ // require any escaping).
+ //
+ os << endl
+ << "build2.metadata = " << ver << ' ' << pfx;
- // Tighten this to only a sensible subset of types (see
- // parsing/serialization code for some of the potential
- // problems).
- //
- if (!metadata_type (v.type->name).first)
- fail << "unsupported value type " << v.type->name
- << " in exported variable " << n << " of library " << l;
+ for (const binding& b: vars)
+ {
+ const variable& var (*b.var);
+ const value& val (*b.val);
- os << ' ' << n << '/' << v.type->name;
- }
+ // There is no notion of NULL in pkg-config variables and it's
+ // probably best not to conflate them with empty.
+ //
+ if (val.null)
+ fail << "null value in exported variable " << var
+ << " of library " << l;
- os << endl;
+ if (val.type == nullptr)
+ fail << "untyped value in exported variable " << var
+ << " of library " << l;
- // Now the variables themselves.
+ // Tighten this to only a sensible subset of types (see
+ // parsing/serialization code for some of the potential problems).
//
- string s; // Reuse the buffer.
- for (const binding& b: vars)
- {
- const string& n (b.first->name);
- const value& v (*b.second);
+ if (!metadata_type (val.type->name).first)
+ fail << "unsupported value type " << val.type->name
+ << " in exported variable " << var << " of library " << l;
- names ns;
- names_view nv (reverse (v, ns));
+ os << " \\" << endl
+ << *b.name << '/' << val.type->name;
+ }
- os << n << " =";
+ os << endl
+ << endl;
- auto append = [&l, &n, &s] (const name& v)
+ // Now the variables themselves.
+ //
+ string s; // Reuse the buffer.
+ for (const binding& b: vars)
+ {
+ const variable& var (*b.var);
+ const value& val (*b.val);
+
+ names ns;
+ names_view nv (reverse (val, ns, true /* reduce */));
+
+ os << *b.name << " =";
+
+ auto append = [&rel_base,
+ &reloc_path,
+ &reloc_dir_path,
+ &l, &var, &val, &s] (const name& v)
+ {
+ // If this is absolute path or dir_path, then attempt to
+ // relocate. Without that the result will not be relocatable.
+ //
+ if (v.simple ())
{
- if (v.simple ())
- s += v.value;
- else if (v.directory ())
- s += v.dir.representation ();
+ path p;
+ if (!rel_base.empty () &&
+ val.type != nullptr &&
+ (val.type->is_a<path> () || val.type->is_a<paths> ()) &&
+ (p = path (v.value)).absolute ())
+ {
+ p.normalize ();
+ s += reloc_path (p, var.name.c_str ());
+ }
else
- // It seems like we shouldn't end up here due to the type
- // check but let's keep it for good measure.
- //
- fail << "simple or directory value expected instead of '"
- << v << "' in exported variable " << n
- << " of library " << l;
- };
-
- for (auto i (nv.begin ()); i != nv.end (); ++i)
+ s += v.value;
+ }
+ else if (v.directory ())
{
- s.clear ();
- append (*i);
-
- if (i->pair)
+ if (!rel_base.empty () && v.dir.absolute ())
{
- // @@ What if the value contains the pair character? Maybe
- // quote the halves in this case? Note: need to handle in
- // parse_metadata() above if enable here. Note: none of
- // the types currently allowed use pairs.
+ dir_path p (v.dir);
+ p.normalize ();
+ s += reloc_dir_path (p, var.name.c_str ());
+ }
+ else
+ s += v.dir.representation ();
+ }
+ else
+ // It seems like we shouldn't end up here due to the type
+ // check but let's keep it for good measure.
+ //
+ fail << "simple or directory value expected instead of '"
+ << v << "' in exported variable " << var << " of library "
+ << l;
+ };
+
+ for (auto i (nv.begin ()); i != nv.end (); ++i)
+ {
+ s.clear ();
+ append (*i);
+
+ if (i->pair)
+ {
+ // @@ What if the value contains the pair character? Maybe
+ // quote the halves in this case? Note: need to handle in
+ // parse_metadata() above if enable here. Note: none of the
+ // types currently allowed use pairs.
#if 0
- s += i->pair;
- append (*++i);
+ s += i->pair;
+ append (*++i);
#else
- fail << "pair in exported variable " << n << " of library "
- << l;
+ fail << "pair in exported variable " << var << " of library "
+ << l;
#endif
- }
-
- os << ' ' << escape (s);
}
- os << endl;
+ os << ' ' << escape (s);
}
+
+ os << endl;
+ }
+ }
+ else
+ {
+ // No user metadata.
+ //
+ os << endl
+ << "build2.metadata = 1" << endl;
+ }
+
+ // Save cc.type (see init() for the format documentation).
+ //
+ // Note that this value is set by link_rule and therefore should
+ // be there.
+ //
+ {
+ const string& t (
+ cast<string> (
+ l.state[a].lookup_original (
+ c_type, true /* target_only */).first));
+
+ // If common, then only save the language (the rest could be
+ // static/shared-specific; strictly speaking even the language could
+ // be, but that seems far fetched).
+ //
+ os << endl
+ << "cc.type = " << (common ? string (t, 0, t.find (',')) : t)
+ << endl;
+ }
+
+ // Save the bin.whole (whole archive) flag (see the link rule for
+ // details on the lookup semantics).
+ //
+ if (la)
+ {
+ // Note: go straight for the public variable pool.
+ //
+ if (cast_false<bool> (l.lookup_original (
+ ctx.var_pool["bin.whole"],
+ true /* target_only */).first))
+ {
+ os << endl
+ << "bin.whole = true" << endl;
}
}
@@ -2263,7 +2361,7 @@ namespace build2
move (pp),
symexport});
}
- else if (pt->is_a (**x_hdr) || pt->is_a<h> ())
+ else if (pt->is_a (**this->x_hdrs) || pt->is_a<h> ())
{
if (cast_false<bool> ((*pt)[c_importable]))
{
@@ -2288,7 +2386,7 @@ namespace build2
if (size_t n = mods.size ())
{
os << endl
- << "cxx_modules =";
+ << "cxx.modules =";
// The partition separator (`:`) is not a valid character in the
// variable name. In fact, from the pkg-config source we can see
@@ -2306,33 +2404,35 @@ namespace build2
// Module names shouldn't require escaping.
//
os << (n != 1 ? " \\\n" : " ")
- << m.name << '=' << escape (m.file.string ());
+ << m.name << '='
+ << escape (reloc_path (m.file, "module interface"));
}
os << endl;
// Module-specific properties. The format is:
//
- // <lang>_module_<property>.<module> = <value>
+ // <lang>.module_<property>.<module> = <value>
//
for (const module& m: mods)
{
if (!m.preprocessed.empty ())
- os << "cxx_module_preprocessed." << m.name << " = "
+ os << "cxx.module_preprocessed." << m.name << " = "
<< m.preprocessed << endl;
if (m.symexport)
- os << "cxx_module_symexport." << m.name << " = true" << endl;
+ os << "cxx.module_symexport." << m.name << " = true" << endl;
}
}
if (size_t n = c_hdrs.size ())
{
os << endl
- << "c_importable_headers =";
+ << "c.importable_headers =";
for (const path& h: c_hdrs)
- os << (n != 1 ? " \\\n" : " ") << escape (h.string ());
+ os << (n != 1 ? " \\\n" : " ")
+ << escape (reloc_path (h, "header unit"));
os << endl;
}
@@ -2340,10 +2440,11 @@ namespace build2
if (size_t n = x_hdrs.size ())
{
os << endl
- << x << "_importable_headers =";
+ << x << ".importable_headers =";
for (const path& h: x_hdrs)
- os << (n != 1 ? " \\\n" : " ") << escape (h.string ());
+ os << (n != 1 ? " \\\n" : " ")
+ << escape (reloc_path (h, "header unit"));
os << endl;
}
diff --git a/libbuild2/cc/pkgconfig.hxx b/libbuild2/cc/pkgconfig.hxx
new file mode 100644
index 0000000..a1bcdee
--- /dev/null
+++ b/libbuild2/cc/pkgconfig.hxx
@@ -0,0 +1,129 @@
+// file : libbuild2/cc/pkgconfig.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CC_PKGCONFIG_HXX
+#define LIBBUILD2_CC_PKGCONFIG_HXX
+
+// In order not to complicate the bootstrap procedure with libpkg-config
+// building, exclude functionality that involves reading of .pc files.
+//
+#ifndef BUILD2_BOOTSTRAP
+
+#ifndef BUILD2_LIBPKGCONF
+# include <libpkg-config/pkg-config.h>
+#else
+# include <libpkgconf/libpkgconf.h>
+#endif
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+namespace build2
+{
+ namespace cc
+ {
+ // Load package information from a .pc file. Filter out the -I/-L options
+ // that refer to system directories. This makes sure all the system search
+ // directories are "pushed" to the back which minimizes the chances of
+ // picking up wrong (e.g., old installed version) header/library.
+ //
+ // Note that the prerequisite package .pc files search order is as
+ // follows:
+ //
+ // - in the directory of the specified file
+ // - in pc_dirs directories (in the specified order)
+ //
+ // Issue diagnostics and throw failed on any errors.
+ //
+ class pkgconfig
+ {
+ public:
+ using path_type = build2::path;
+
+ path_type path;
+
+ public:
+ pkgconfig (path_type,
+ const dir_paths& pc_dirs,
+ const dir_paths& sys_hdr_dirs,
+ const dir_paths& sys_lib_dirs);
+
+ // Create an unloaded/empty object. Querying package information on such
+ // an object is illegal.
+ //
+ pkgconfig () = default;
+ ~pkgconfig ();
+
+ // Movable-only type.
+ //
+ pkgconfig (pkgconfig&&) noexcept;
+ pkgconfig& operator= (pkgconfig&&) noexcept;
+
+ pkgconfig (const pkgconfig&) = delete;
+ pkgconfig& operator= (const pkgconfig&) = delete;
+
+ strings
+ cflags (bool static_) const;
+
+ strings
+ libs (bool static_) const;
+
+ optional<string>
+ variable (const char*) const;
+
+ optional<string>
+ variable (const string& s) const {return variable (s.c_str ());}
+
+ private:
+ void
+ free ();
+
+#ifndef BUILD2_LIBPKGCONF
+ pkg_config_client_t* client_ = nullptr;
+ pkg_config_pkg_t* pkg_ = nullptr;
+#else
+ pkgconf_client_t* client_ = nullptr;
+ pkgconf_pkg_t* pkg_ = nullptr;
+#endif
+ };
+
+ inline pkgconfig::
+ ~pkgconfig ()
+ {
+ if (client_ != nullptr) // Not empty.
+ free ();
+ }
+
+ inline pkgconfig::
+ pkgconfig (pkgconfig&& p) noexcept
+ : path (move (p.path)),
+ client_ (p.client_),
+ pkg_ (p.pkg_)
+ {
+ p.client_ = nullptr;
+ p.pkg_ = nullptr;
+ }
+
+ inline pkgconfig& pkgconfig::
+ operator= (pkgconfig&& p) noexcept
+ {
+ if (this != &p)
+ {
+ if (client_ != nullptr) // Not empty.
+ free ();
+
+ path = move (p.path);
+ client_ = p.client_;
+ pkg_ = p.pkg_;
+
+ p.client_ = nullptr;
+ p.pkg_ = nullptr;
+ }
+ return *this;
+ }
+ }
+}
+
+#endif // BUILD2_BOOTSTRAP
+
+#endif // LIBBUILD2_CC_PKGCONFIG_HXX
diff --git a/libbuild2/cc/predefs-rule.cxx b/libbuild2/cc/predefs-rule.cxx
new file mode 100644
index 0000000..e74192d
--- /dev/null
+++ b/libbuild2/cc/predefs-rule.cxx
@@ -0,0 +1,379 @@
+// file : libbuild2/cc/predefs-rule.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/cc/predefs-rule.hxx>
+
+#include <libbuild2/depdb.hxx>
+#include <libbuild2/target.hxx>
+#include <libbuild2/context.hxx>
+#include <libbuild2/algorithm.hxx>
+#include <libbuild2/filesystem.hxx>
+#include <libbuild2/diagnostics.hxx>
+
+namespace build2
+{
+ namespace cc
+ {
+ predefs_rule::
+ predefs_rule (data&& d)
+ : common (move (d)),
+ rule_name (string (x) += ".predefs"),
+ rule_id (rule_name + " 1")
+ {
+ }
+
+ bool predefs_rule::
+ match (action, target&, const string& hint, match_extra&) const
+ {
+ tracer trace (x, "predefs_rule::match");
+
+ // We only match with an explicit hint (failed that, we will turn every
+ // header into predefs).
+ //
+ if (hint == rule_name)
+ {
+ // Don't match if unsupported compiler. In particular, this allows the
+ // user to provide a fallback rule.
+ //
+ switch (cclass)
+ {
+ case compiler_class::gcc: return true;
+ case compiler_class::msvc:
+ {
+ // Only MSVC 19.20 or later. Not tested with clang-cl.
+ //
+ if (cvariant.empty () && (cmaj > 19 || (cmaj == 19 && cmin >= 20)))
+ return true;
+
+ l4 ([&]{trace << "unsupported compiler/version";});
+ break;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ recipe predefs_rule::
+ apply (action a, target& xt, match_extra&) const
+ {
+ file& t (xt.as<file> ());
+ t.derive_path ();
+
+ // Inject dependency on the output directory.
+ //
+ inject_fsdir (a, t);
+
+ if (a == perform_update_id)
+ {
+ return [this] (action a, const target& xt)
+ {
+ return perform_update (a, xt);
+ };
+ }
+ else if (a == perform_clean_id)
+ {
+ return [] (action a, const target& t)
+ {
+ // Also remove the temporary input source file in case it wasn't
+ // removed at the end of the update.
+ //
+ return perform_clean_extra (a, t.as<file> (), {".d", ".t"});
+ };
+ }
+ else
+ return noop_recipe; // Configure update.
+ }
+
+ // Filter noise, sanitize options (msvc.cxx).
+ //
+ void
+ msvc_filter_cl (diag_buffer&, const path& src);
+
+ void
+ msvc_sanitize_cl (cstrings&);
+
+ target_state predefs_rule::
+ perform_update (action a, const target& xt) const
+ {
+ tracer trace (x, "predefs_rule::perform_update");
+
+ const file& t (xt.as<file> ());
+ const path& tp (t.path ());
+
+ context& ctx (t.ctx);
+
+ const scope& rs (t.root_scope ());
+
+ // Execute prerequisites (the output directory being the only one thus
+ // not mtime checking).
+ //
+ execute_prerequisites (a, t);
+
+ // Use depdb to track changes to options, compiler, etc (similar to
+ // the compile_rule).
+ //
+ depdb dd (tp + ".d");
+ {
+ // First should come the rule name/version.
+ //
+ if (dd.expect (rule_id) != nullptr)
+ l4 ([&]{trace << "rule mismatch forcing update of " << t;});
+
+ // Then the compiler checksum.
+ //
+ if (dd.expect (cast<string> (rs[x_checksum])) != nullptr)
+ l4 ([&]{trace << "compiler mismatch forcing update of " << t;});
+
+ // Then the compiler environment checksum.
+ //
+ if (dd.expect (env_checksum) != nullptr)
+ l4 ([&]{trace << "environment mismatch forcing update of " << t;});
+
+ // Finally the options checksum (as below).
+ //
+ {
+ sha256 cs;
+ append_options (cs, t, c_coptions);
+ append_options (cs, t, x_coptions);
+ append_options (cs, cmode);
+
+ if (dd.expect (cs.string ()) != nullptr)
+ l4 ([&]{trace << "options mismatch forcing update of " << t;});
+ }
+ }
+
+ // Update if depdb mismatch.
+ //
+ bool update (dd.writing () || dd.mtime > t.load_mtime ());
+
+ dd.close ();
+
+ if (!update)
+ return target_state::unchanged; // No mtime-based prerequisites.
+
+ // Prepare the compiler command-line.
+ //
+ cstrings args {cpath.recall_string ()};
+
+ // Append compile options.
+ //
+ // Note that any command line macros that we specify with -D will end up
+ // in the predefs, which is something we don't want. So no poptions.
+ //
+ append_options (args, t, c_coptions);
+ append_options (args, t, x_coptions);
+ append_options (args, cmode);
+
+ // The output and input paths, relative to the working directory for
+ // easier to read diagnostics.
+ //
+ path relo (relative (tp));
+ path reli;
+
+ // Add compiler-specific command-line arguments.
+ //
+ switch (cclass)
+ {
+ case compiler_class::gcc:
+ {
+ // Add implied options which may affect predefs, similar to the
+ // compile rule.
+ //
+ if (!find_option_prefix ("-finput-charset=", args))
+ args.push_back ("-finput-charset=UTF-8");
+
+ if (ctype == compiler_type::clang && tsys == "win32-msvc")
+ {
+ if (!find_options ({"-nostdlib", "-nostartfiles"}, args))
+ {
+ args.push_back ("-D_MT");
+ args.push_back ("-D_DLL");
+ }
+ }
+
+ if (ctype == compiler_type::clang && cvariant == "emscripten")
+ {
+ if (x_lang == lang::cxx)
+ {
+ if (!find_option_prefix ("DISABLE_EXCEPTION_CATCHING=", args))
+ {
+ args.push_back ("-s");
+ args.push_back ("DISABLE_EXCEPTION_CATCHING=0");
+ }
+ }
+ }
+
+ args.push_back ("-E"); // Stop after the preprocessing stage.
+ args.push_back ("-dM"); // Generate #define directives.
+
+ // Output.
+ //
+ args.push_back ("-o");
+ args.push_back (relo.string ().c_str ());
+
+ // Input.
+ //
+ args.push_back ("-x");
+ switch (x_lang)
+ {
+ case lang::c: args.push_back ("c"); break;
+ case lang::cxx: args.push_back ("c++"); break;
+ }
+
+ // With GCC and Clang we can compile /dev/null as stdin by
+ // specifying `-` and thus omitting the temporary file.
+ //
+ args.push_back ("-");
+
+ break;
+ }
+ case compiler_class::msvc:
+ {
+ // Add implied options which may affect predefs, similar to the
+ // compile rule.
+ //
+ {
+ // Note: these affect the _MSVC_EXECUTION_CHARACTER_SET, _UTF8
+ // macros.
+ //
+ bool sc (find_option_prefixes (
+ {"/source-charset:", "-source-charset:"}, args));
+ bool ec (find_option_prefixes (
+ {"/execution-charset:", "-execution-charset:"}, args));
+
+ if (!sc && !ec)
+ args.push_back ("/utf-8");
+ else
+ {
+ if (!sc)
+ args.push_back ("/source-charset:UTF-8");
+
+ if (!ec)
+ args.push_back ("/execution-charset:UTF-8");
+ }
+ }
+
+ if (x_lang == lang::cxx)
+ {
+ if (!find_option_prefixes ({"/EH", "-EH"}, args))
+ args.push_back ("/EHsc");
+ }
+
+ if (!find_option_prefixes ({"/MD", "/MT", "-MD", "-MT"}, args))
+ args.push_back ("/MD");
+
+ msvc_sanitize_cl (args);
+
+ args.push_back ("/nologo");
+
+ // /EP may seem like it contradicts /P but it's the recommended
+ // way to suppress `#line`s from the output of the /P option (see
+ // /P in the "MSVC Compiler Options" documentation).
+ //
+ args.push_back ("/P"); // Write preprocessor output to a file.
+ args.push_back ("/EP"); // Preprocess to stdout without `#line`s.
+
+ args.push_back ("/PD"); // Print all macro definitions.
+ args.push_back ("/Zc:preprocessor"); // Preproc. conformance mode.
+
+ // Output (note that while the /Fi: variant is only availbale
+ // starting with VS2013, /Zc:preprocessor is only available in
+ // starting from VS2019).
+ //
+ args.push_back ("/Fi:");
+ args.push_back (relo.string ().c_str ());
+
+ // Input.
+ //
+ switch (x_lang)
+ {
+ case lang::c: args.push_back ("/TC"); break;
+ case lang::cxx: args.push_back ("/TP"); break;
+ }
+
+ // Input path.
+ //
+ // Note that with MSVC we have to use a temporary file. In
+ // particular compiling `nul` does not work.
+ //
+ reli = relo + ".t";
+ args.push_back (reli.string ().c_str ());
+
+ break;
+ }
+ }
+
+ args.push_back (nullptr);
+
+ // Run the compiler.
+ //
+ if (verb >= 2)
+ print_process (args);
+ else if (verb)
+ print_diag ((string (x_name) + "-predefs").c_str (), t);
+
+ if (!ctx.dry_run)
+ {
+ // Create an empty temporary input source file, if necessary.
+ //
+ auto_rmfile rmi;
+ if (!reli.empty ())
+ {
+ rmi = auto_rmfile (reli);
+
+ if (exists (reli, false /* follow_symlinks */))
+ rmfile (ctx, reli, 3 /* verbosity */);
+
+ touch (ctx, reli, true /* create */, 3 /* verbosity */);
+ }
+
+ try
+ {
+ // VC cl.exe sends diagnostics to stdout. It also prints the file
+ // name being compiled as the first line. So for cl.exe we filter
+ // that noise out.
+ //
+ // For other compilers also redirect stdout to stderr, in case any
+ // of them tries to pull off something similar. For sane compilers
+ // this should be harmless.
+ //
+ // We also redirect stdin to /dev/null in case that's used instead
+ // of the temporary file.
+ //
+ // Note: similar logic as in compile_rule.
+ //
+ bool filter (ctype == compiler_type::msvc);
+
+ process pr (cpath,
+ args,
+ -2, /* stdin */
+ 2, /* stdout */
+ diag_buffer::pipe (ctx, filter /* force */) /* stderr */);
+
+ diag_buffer dbuf (ctx, args[0], pr);
+
+ if (filter)
+ msvc_filter_cl (dbuf, reli);
+
+ dbuf.read ();
+
+ run_finish (dbuf, args, pr, 1 /* verbosity */);
+ dd.check_mtime (tp);
+ }
+ catch (const process_error& e)
+ {
+ error << "unable to execute " << args[0] << ": " << e;
+
+ if (e.child)
+ exit (1);
+
+ throw failed ();
+ }
+ }
+
+ t.mtime (system_clock::now ());
+ return target_state::changed;
+ }
+ }
+}
diff --git a/libbuild2/cc/predefs-rule.hxx b/libbuild2/cc/predefs-rule.hxx
new file mode 100644
index 0000000..60aa063
--- /dev/null
+++ b/libbuild2/cc/predefs-rule.hxx
@@ -0,0 +1,45 @@
+// file : libbuild2/cc/predefs-rule.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CC_PREDEFS_RULE_HXX
+#define LIBBUILD2_CC_PREDEFS_RULE_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/rule.hxx>
+
+#include <libbuild2/cc/types.hxx>
+#include <libbuild2/cc/common.hxx>
+
+#include <libbuild2/cc/export.hxx>
+
+namespace build2
+{
+ namespace cc
+ {
+ class LIBBUILD2_CC_SYMEXPORT predefs_rule: public rule,
+ virtual common
+ {
+ public:
+ const string rule_name;
+
+ explicit
+ predefs_rule (data&&);
+
+ virtual bool
+ match (action, target&, const string&, match_extra&) const override;
+
+ virtual recipe
+ apply (action, target&, match_extra&) const override;
+
+ target_state
+ perform_update (action, const target&) const;
+
+ private:
+ const string rule_id;
+ };
+ }
+}
+
+#endif // LIBBUILD2_CC_PREDEFS_RULE_HXX
diff --git a/libbuild2/cc/std.cppm b/libbuild2/cc/std.cppm
new file mode 100644
index 0000000..5368d1c
--- /dev/null
+++ b/libbuild2/cc/std.cppm
@@ -0,0 +1,6781 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// WARNING, this entire header is generated by
+// utils/generate_std_cppm_in.py
+// DO NOT MODIFY!
+
+module;
+
+#include <__config>
+
+#if _LIBCPP_VERSION < 170000
+#error libc++ version 17.0.0 or later required
+#endif
+
+// The headers of Table 24: C++ library headers [tab:headers.cpp]
+// and the headers of Table 25: C++ headers for C library facilities [tab:headers.cpp.c]
+#include <algorithm>
+#include <any>
+#include <array>
+#if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
+# include <atomic>
+#endif
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <barrier>
+#endif
+#include <bit>
+#include <bitset>
+#include <cassert>
+#include <cctype>
+#include <cerrno>
+#include <cfenv>
+#include <cfloat>
+#include <charconv>
+#include <chrono>
+#include <cinttypes>
+#include <climits>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <clocale>
+#endif
+#include <cmath>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <codecvt>
+#endif
+#include <compare>
+#include <complex>
+#include <concepts>
+#include <condition_variable>
+#include <coroutine>
+#include <csetjmp>
+#include <csignal>
+#include <cstdarg>
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <cuchar>
+#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)
+# include <cwchar>
+#endif
+#if !defined(_LIBCPP_HAS_NO_WIDE_CHARACTERS)
+# include <cwctype>
+#endif
+#include <deque>
+#include <exception>
+#include <execution>
+#include <expected>
+#include <filesystem>
+#include <format>
+#include <forward_list>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <fstream>
+#endif
+#include <functional>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <future>
+#endif
+#include <initializer_list>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <iomanip>
+#endif
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <ios>
+#endif
+#include <iosfwd>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <iostream>
+#endif
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <istream>
+#endif
+#include <iterator>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <latch>
+#endif
+#include <limits>
+#include <list>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <locale>
+#endif
+#include <map>
+#include <mdspan>
+#include <memory>
+#include <memory_resource>
+#include <mutex>
+#include <new>
+#include <numbers>
+#include <numeric>
+#include <optional>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <ostream>
+#endif
+#include <print>
+#include <queue>
+#include <random>
+#include <ranges>
+#include <ratio>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <regex>
+#endif
+#include <scoped_allocator>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <semaphore>
+#endif
+#include <set>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <shared_mutex>
+#endif
+#include <source_location>
+#include <span>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <sstream>
+#endif
+#include <stack>
+#include <stdexcept>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <stop_token>
+#endif
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <streambuf>
+#endif
+#include <string>
+#include <string_view>
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+# include <strstream>
+#endif
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+#if __has_include(<syncstream>)
+# define _LIPCPP_HAS_YES_SYNCSTREAM
+# include <syncstream>
+#endif
+#endif
+#include <system_error>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+# include <thread>
+#endif
+#include <tuple>
+#include <type_traits>
+#include <typeindex>
+#include <typeinfo>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <valarray>
+#include <variant>
+#include <vector>
+#include <version>
+
+#if 0
+// *** Headers not yet available ***
+#if __has_include(<debugging>)
+# error "update the header information for <debugging> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<debugging>)
+#if __has_include(<flat_map>)
+# error "update the header information for <flat_map> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<flat_map>)
+#if __has_include(<flat_set>)
+# error "update the header information for <flat_set> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<flat_set>)
+#if __has_include(<generator>)
+# error "update the header information for <generator> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<generator>)
+#if __has_include(<hazard_pointer>)
+# error "update the header information for <hazard_pointer> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<hazard_pointer>)
+#if __has_include(<linalg>)
+# error "update the header information for <linalg> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<linalg>)
+#if __has_include(<rcu>)
+# error "update the header information for <rcu> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<rcu>)
+#if __has_include(<spanstream>)
+# error "update the header information for <spanstream> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<spanstream>)
+#if __has_include(<stacktrace>)
+# error "update the header information for <stacktrace> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<stacktrace>)
+#if __has_include(<stdfloat>)
+# error "update the header information for <stdfloat> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<stdfloat>)
+#if __has_include(<text_encoding>)
+# error "update the header information for <text_encoding> in libcxx/utils/generate_std_cppm_in.py"
+#endif // __has_include(<text_encoding>)
+#endif
+
+export module std;
+
+// algorithm.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ namespace ranges {
+ // [algorithms.results], algorithm result types
+ using std::ranges::in_found_result;
+ using std::ranges::in_fun_result;
+ using std::ranges::in_in_out_result;
+ using std::ranges::in_in_result;
+ using std::ranges::in_out_out_result;
+ using std::ranges::in_out_result;
+ // using std::ranges::in_value_result;
+ using std::ranges::min_max_result;
+ // using std::ranges::out_value_result;
+ } // namespace ranges
+
+ // [alg.nonmodifying], non-modifying sequence operations
+ // [alg.all.of], all of
+ using std::all_of;
+ namespace ranges {
+ using std::ranges::all_of;
+ }
+
+ // [alg.any.of], any of
+ using std::any_of;
+ namespace ranges {
+ using std::ranges::any_of;
+ }
+
+ // [alg.none.of], none of
+ using std::none_of;
+ namespace ranges {
+ using std::ranges::none_of;
+ }
+
+ // [alg.contains], contains
+#if 0
+ namespace ranges {
+ using std::ranges::contains;
+ using std::ranges::contains_subrange;
+ } // namespace ranges
+#endif
+
+ // [alg.foreach], for each
+ using std::for_each;
+
+ namespace ranges {
+ using std::ranges::for_each;
+ using std::ranges::for_each_result;
+ } // namespace ranges
+
+ using std::for_each_n;
+
+ namespace ranges {
+ using std::ranges::for_each_n_result;
+
+ using std::ranges::for_each_n;
+ } // namespace ranges
+
+ // [alg.find], find
+ using std::find;
+ using std::find_if;
+ using std::find_if_not;
+
+ namespace ranges {
+ using std::ranges::find;
+ using std::ranges::find_if;
+ using std::ranges::find_if_not;
+ } // namespace ranges
+
+ namespace ranges {
+#if 0
+ using std::ranges::find_last;
+ using std::ranges::find_last_if;
+ using std::ranges::find_last_if_not;
+#endif
+ } // namespace ranges
+
+ // [alg.find.end], find end
+ using std::find_end;
+
+ namespace ranges {
+ using std::ranges::find_end;
+ }
+
+ // [alg.find.first.of], find first
+ using std::find_first_of;
+
+ namespace ranges {
+ using std::ranges::find_first_of;
+ }
+
+ // [alg.adjacent.find], adjacent find
+ using std::adjacent_find;
+
+ namespace ranges {
+ using std::ranges::adjacent_find;
+ }
+
+ // [alg.count], count
+ using std::count;
+ using std::count_if;
+
+ namespace ranges {
+ using std::ranges::count;
+ using std::ranges::count_if;
+ } // namespace ranges
+
+ // [mismatch], mismatch
+ using std::mismatch;
+
+ namespace ranges {
+ using std::ranges::mismatch_result;
+
+ using std::ranges::mismatch;
+ } // namespace ranges
+
+ // [alg.equal], equal
+ using std::equal;
+
+ namespace ranges {
+ using std::ranges::equal;
+ }
+
+ // [alg.is.permutation], is permutation
+ using std::is_permutation;
+
+ namespace ranges {
+ using std::ranges::is_permutation;
+ }
+
+ // [alg.search], search
+ using std::search;
+
+ namespace ranges {
+ using std::ranges::search;
+ }
+
+ using std::search_n;
+
+ namespace ranges {
+ using std::ranges::search_n;
+ }
+
+ namespace ranges {
+#if _LIBCPP_STD_VER >= 23
+ // [alg.starts.with], starts with
+ using std::ranges::starts_with;
+
+#if _LIBCPP_VERSION >= 180000
+ // [alg.ends.with], ends with
+ using std::ranges::ends_with;
+#endif
+
+# if 0
+ // [alg.fold], fold
+ using std::ranges::fold_left;
+ using std::ranges::fold_left_first;
+ using std::ranges::fold_right;
+ using std::ranges::fold_right_last;
+ using std::ranges::fold_left_with_iter;
+ using std::ranges::fold_left_with_iter_result;
+ using std::ranges::fold_left_with_iter;
+ using std::ranges::fold_left_first_with_iter;
+ using std::ranges::fold_left_first_with_iter;
+# endif
+#endif // _LIBCPP_STD_VER >= 23
+ } // namespace ranges
+
+ // [alg.modifying.operations], mutating sequence operations
+ // [alg.copy], copy
+ using std::copy;
+
+ namespace ranges {
+ using std::ranges::copy;
+ using std::ranges::copy_result;
+ } // namespace ranges
+
+ using std::copy_n;
+
+ namespace ranges {
+ using std::ranges::copy_n;
+ using std::ranges::copy_n_result;
+ } // namespace ranges
+
+ using std::copy_if;
+
+ namespace ranges {
+ using std::ranges::copy_if;
+ using std::ranges::copy_if_result;
+ } // namespace ranges
+
+ using std::copy_backward;
+
+ namespace ranges {
+ using std::ranges::copy_backward;
+ using std::ranges::copy_backward_result;
+ } // namespace ranges
+
+ // [alg.move], move
+ using std::move;
+
+ namespace ranges {
+ using std::ranges::move;
+ using std::ranges::move_result;
+ } // namespace ranges
+
+ using std::move_backward;
+
+ namespace ranges {
+ using std::ranges::move_backward;
+ using std::ranges::move_backward_result;
+ } // namespace ranges
+
+ // [alg.swap], swap
+ using std::swap_ranges;
+
+ namespace ranges {
+ using std::ranges::swap_ranges;
+ using std::ranges::swap_ranges_result;
+ } // namespace ranges
+
+ using std::iter_swap;
+
+ // [alg.transform], transform
+ using std::transform;
+
+ namespace ranges {
+ using std::ranges::binary_transform_result;
+ using std::ranges::unary_transform_result;
+
+ using std::ranges::transform;
+
+ } // namespace ranges
+
+ using std::replace;
+ using std::replace_if;
+
+ namespace ranges {
+ using std::ranges::replace;
+ using std::ranges::replace_if;
+ } // namespace ranges
+
+ using std::replace_copy;
+ using std::replace_copy_if;
+
+ namespace ranges {
+ using std::ranges::replace_copy;
+ using std::ranges::replace_copy_if;
+ using std::ranges::replace_copy_if_result;
+ using std::ranges::replace_copy_result;
+ } // namespace ranges
+
+ // [alg.fill], fill
+ using std::fill;
+ using std::fill_n;
+
+ namespace ranges {
+ using std::ranges::fill;
+ using std::ranges::fill_n;
+ } // namespace ranges
+
+ // [alg.generate], generate
+ using std::generate;
+ using std::generate_n;
+
+ namespace ranges {
+ using std::ranges::generate;
+ using std::ranges::generate_n;
+ } // namespace ranges
+
+ // [alg.remove], remove
+ using std::remove;
+ using std::remove_if;
+
+ namespace ranges {
+ using std::ranges::remove;
+ using std::ranges::remove_if;
+ } // namespace ranges
+
+ using std::remove_copy;
+ using std::remove_copy_if;
+ namespace ranges {
+ using std::ranges::remove_copy;
+ using std::ranges::remove_copy_if;
+ using std::ranges::remove_copy_if_result;
+ using std::ranges::remove_copy_result;
+ } // namespace ranges
+
+ // [alg.unique], unique
+ using std::unique;
+
+ namespace ranges {
+ using std::ranges::unique;
+ }
+
+ using std::unique_copy;
+
+ namespace ranges {
+ using std::ranges::unique_copy;
+ using std::ranges::unique_copy_result;
+ } // namespace ranges
+
+ // [alg.reverse], reverse
+ using std::reverse;
+
+ namespace ranges {
+ using std::ranges::reverse;
+ }
+
+ using std::reverse_copy;
+
+ namespace ranges {
+ using std::ranges::reverse_copy;
+ using std::ranges::reverse_copy_result;
+ } // namespace ranges
+
+ // [alg.rotate], rotate
+ using std::rotate;
+
+ namespace ranges {
+ using std::ranges::rotate;
+ }
+
+ using std::rotate_copy;
+
+ namespace ranges {
+ using std::ranges::rotate_copy;
+ using std::ranges::rotate_copy_result;
+ } // namespace ranges
+
+ // [alg.random.sample], sample
+ using std::sample;
+
+ namespace ranges {
+ using std::ranges::sample;
+ }
+
+ // [alg.random.shuffle], shuffle
+ using std::shuffle;
+
+ namespace ranges {
+ using std::ranges::shuffle;
+ }
+
+ // [alg.shift], shift
+ using std::shift_left;
+
+ namespace ranges {
+ // using std::ranges::shift_left;
+ }
+
+ using std::shift_right;
+
+ namespace ranges {
+ // using std::ranges::shift_right;
+ }
+
+ // [alg.sorting], sorting and related operations
+ // [alg.sort], sorting
+ using std::sort;
+
+ namespace ranges {
+ using std::ranges::sort;
+ }
+
+ using std::stable_sort;
+
+ namespace ranges {
+ using std::ranges::stable_sort;
+ }
+
+ using std::partial_sort;
+
+ namespace ranges {
+ using std::ranges::partial_sort;
+ }
+ using std::partial_sort_copy;
+
+ namespace ranges {
+ using std::ranges::partial_sort_copy;
+ using std::ranges::partial_sort_copy_result;
+ } // namespace ranges
+
+ using std::is_sorted;
+ using std::is_sorted_until;
+
+ namespace ranges {
+ using std::ranges::is_sorted;
+ using std::ranges::is_sorted_until;
+ } // namespace ranges
+
+ // [alg.nth.element], Nth element
+ using std::nth_element;
+
+ namespace ranges {
+ using std::ranges::nth_element;
+ }
+
+ // [alg.binary.search], binary search
+ using std::lower_bound;
+
+ namespace ranges {
+ using std::ranges::lower_bound;
+ }
+
+ using std::upper_bound;
+
+ namespace ranges {
+ using std::ranges::upper_bound;
+ }
+
+ using std::equal_range;
+
+ namespace ranges {
+ using std::ranges::equal_range;
+ }
+
+ using std::binary_search;
+
+ namespace ranges {
+ using std::ranges::binary_search;
+ }
+
+ // [alg.partitions], partitions
+ using std::is_partitioned;
+
+ namespace ranges {
+ using std::ranges::is_partitioned;
+ }
+
+ using std::partition;
+
+ namespace ranges {
+ using std::ranges::partition;
+ }
+
+ using std::stable_partition;
+
+ namespace ranges {
+ using std::ranges::stable_partition;
+ }
+
+ using std::partition_copy;
+
+ namespace ranges {
+ using std::ranges::partition_copy;
+ using std::ranges::partition_copy_result;
+ } // namespace ranges
+
+ using std::partition_point;
+
+ namespace ranges {
+ using std::ranges::partition_point;
+ }
+ // [alg.merge], merge
+ using std::merge;
+ namespace ranges {
+ using std::ranges::merge;
+ using std::ranges::merge_result;
+ } // namespace ranges
+
+ using std::inplace_merge;
+
+ namespace ranges {
+ using std::ranges::inplace_merge;
+ }
+
+ // [alg.set.operations], set operations
+ using std::includes;
+ namespace ranges {
+ using std::ranges::includes;
+ }
+
+ using std::set_union;
+
+ namespace ranges {
+ using std::ranges::set_union;
+ using std::ranges::set_union_result;
+ } // namespace ranges
+
+ using std::set_intersection;
+ namespace ranges {
+ using std::ranges::set_intersection;
+ using std::ranges::set_intersection_result;
+ } // namespace ranges
+
+ using std::set_difference;
+
+ namespace ranges {
+ using std::ranges::set_difference;
+ using std::ranges::set_difference_result;
+ } // namespace ranges
+
+ using std::set_symmetric_difference;
+
+ namespace ranges {
+ using std::ranges::set_symmetric_difference_result;
+
+ using std::ranges::set_symmetric_difference;
+ } // namespace ranges
+
+ // [alg.heap.operations], heap operations
+ using std::push_heap;
+
+ namespace ranges {
+ using std::ranges::push_heap;
+ }
+
+ using std::pop_heap;
+
+ namespace ranges {
+ using std::ranges::pop_heap;
+ }
+
+ using std::make_heap;
+
+ namespace ranges {
+ using std::ranges::make_heap;
+ }
+
+ using std::sort_heap;
+
+ namespace ranges {
+ using std::ranges::sort_heap;
+ }
+
+ using std::is_heap;
+
+ namespace ranges {
+ using std::ranges::is_heap;
+ }
+
+ using std::is_heap_until;
+
+ namespace ranges {
+ using std::ranges::is_heap_until;
+ }
+
+ // [alg.min.max], minimum and maximum
+ using std::min;
+
+ namespace ranges {
+ using std::ranges::min;
+ }
+
+ using std::max;
+
+ namespace ranges {
+ using std::ranges::max;
+ }
+
+ using std::minmax;
+
+ namespace ranges {
+ using std::ranges::minmax_result;
+
+ using std::ranges::minmax;
+ } // namespace ranges
+
+ using std::min_element;
+
+ namespace ranges {
+ using std::ranges::min_element;
+ }
+
+ using std::max_element;
+
+ namespace ranges {
+ using std::ranges::max_element;
+ }
+
+ using std::minmax_element;
+
+ namespace ranges {
+ using std::ranges::minmax_element_result;
+
+ using std::ranges::minmax_element;
+ } // namespace ranges
+ // [alg.clamp], bounded value
+ using std::clamp;
+
+ namespace ranges {
+ using std::ranges::clamp;
+ }
+
+ // [alg.lex.comparison], lexicographical comparison
+ using std::lexicographical_compare;
+
+ namespace ranges {
+ using std::ranges::lexicographical_compare;
+ }
+
+ // [alg.three.way], three-way comparison algorithms
+ using std::lexicographical_compare_three_way;
+
+ // [alg.permutation.generators], permutations
+ using std::next_permutation;
+
+ namespace ranges {
+ using std::ranges::next_permutation_result;
+
+ using std::ranges::next_permutation;
+ } // namespace ranges
+
+ using std::prev_permutation;
+
+ namespace ranges {
+ using std::ranges::prev_permutation_result;
+
+ using std::ranges::prev_permutation;
+ } // namespace ranges
+
+} // namespace std
+
+// any.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [any.bad.any.cast], class bad_any_cast
+ using std::bad_any_cast;
+
+ // [any.class], class any
+ using std::any;
+
+ // [any.nonmembers], non-member functions
+ using std::any_cast;
+ using std::make_any;
+ using std::swap;
+
+} // namespace std
+
+// array.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [array], class template array
+ using std::array;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ // [array.special], specialized algorithms
+ using std::swap;
+
+ // [array.creation], array creation functions
+ using std::to_array;
+
+ // [array.tuple], tuple interface
+ using std::get;
+ using std::tuple_element;
+ using std::tuple_size;
+
+} // namespace std
+
+// atomic.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [atomics.order], order and consistency
+ using std::memory_order;
+ using std::memory_order_acq_rel;
+ using std::memory_order_acquire;
+ using std::memory_order_consume;
+ using std::memory_order_relaxed;
+ using std::memory_order_release;
+ using std::memory_order_seq_cst;
+
+ using std::kill_dependency;
+
+ // [atomics.ref.generic], class template atomic_ref
+ // [atomics.ref.pointer], partial specialization for pointers
+ // using std::atomic_ref;
+
+ // [atomics.types.generic], class template atomic
+ using std::atomic;
+
+ // [atomics.nonmembers], non-member functions
+ using std::atomic_compare_exchange_strong;
+ using std::atomic_compare_exchange_strong_explicit;
+ using std::atomic_compare_exchange_weak;
+ using std::atomic_compare_exchange_weak_explicit;
+ using std::atomic_exchange;
+ using std::atomic_exchange_explicit;
+ using std::atomic_is_lock_free;
+ using std::atomic_load;
+ using std::atomic_load_explicit;
+ using std::atomic_store;
+ using std::atomic_store_explicit;
+
+ using std::atomic_fetch_add;
+ using std::atomic_fetch_add_explicit;
+ using std::atomic_fetch_and;
+ using std::atomic_fetch_and_explicit;
+ using std::atomic_fetch_or;
+ using std::atomic_fetch_or_explicit;
+ using std::atomic_fetch_sub;
+ using std::atomic_fetch_sub_explicit;
+ using std::atomic_fetch_xor;
+ using std::atomic_fetch_xor_explicit;
+ using std::atomic_notify_all;
+ using std::atomic_notify_one;
+ using std::atomic_wait;
+ using std::atomic_wait_explicit;
+
+ // [atomics.alias], type aliases
+ using std::atomic_bool;
+ using std::atomic_char;
+ using std::atomic_char16_t;
+ using std::atomic_char32_t;
+ using std::atomic_char8_t;
+ using std::atomic_int;
+ using std::atomic_llong;
+ using std::atomic_long;
+ using std::atomic_schar;
+ using std::atomic_short;
+ using std::atomic_uchar;
+ using std::atomic_uint;
+ using std::atomic_ullong;
+ using std::atomic_ulong;
+ using std::atomic_ushort;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::atomic_wchar_t;
+#endif
+
+ using std::atomic_int16_t;
+ using std::atomic_int32_t;
+ using std::atomic_int64_t;
+ using std::atomic_int8_t;
+ using std::atomic_uint16_t;
+ using std::atomic_uint32_t;
+ using std::atomic_uint64_t;
+ using std::atomic_uint8_t;
+
+ using std::atomic_int_least16_t;
+ using std::atomic_int_least32_t;
+ using std::atomic_int_least64_t;
+ using std::atomic_int_least8_t;
+ using std::atomic_uint_least16_t;
+ using std::atomic_uint_least32_t;
+ using std::atomic_uint_least64_t;
+ using std::atomic_uint_least8_t;
+
+ using std::atomic_int_fast16_t;
+ using std::atomic_int_fast32_t;
+ using std::atomic_int_fast64_t;
+ using std::atomic_int_fast8_t;
+ using std::atomic_uint_fast16_t;
+ using std::atomic_uint_fast32_t;
+ using std::atomic_uint_fast64_t;
+ using std::atomic_uint_fast8_t;
+
+ using std::atomic_intmax_t;
+ using std::atomic_intptr_t;
+ using std::atomic_ptrdiff_t;
+ using std::atomic_size_t;
+ using std::atomic_uintmax_t;
+ using std::atomic_uintptr_t;
+
+ using std::atomic_signed_lock_free;
+ using std::atomic_unsigned_lock_free;
+
+ // [atomics.flag], flag type and operations
+ using std::atomic_flag;
+
+ using std::atomic_flag_clear;
+ using std::atomic_flag_clear_explicit;
+ using std::atomic_flag_test;
+ using std::atomic_flag_test_and_set;
+ using std::atomic_flag_test_and_set_explicit;
+ using std::atomic_flag_test_explicit;
+
+ using std::atomic_flag_notify_all;
+ using std::atomic_flag_notify_one;
+ using std::atomic_flag_wait;
+ using std::atomic_flag_wait_explicit;
+
+ // [atomics.fences], fences
+ using std::atomic_signal_fence;
+ using std::atomic_thread_fence;
+
+ // [depr.atomics.nonmembers]
+ using std::atomic_init;
+
+} // namespace std
+
+// barrier.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ using std::barrier;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// bit.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [bit.cast], bit_cast
+ using std::bit_cast;
+
+#if _LIBCPP_STD_VER >= 23
+ // [bit.byteswap], byteswap
+ using std::byteswap;
+#endif
+
+ // [bit.pow.two], integral powers of 2
+ using std::bit_ceil;
+ using std::bit_floor;
+ using std::bit_width;
+ using std::has_single_bit;
+
+ // [bit.rotate], rotating
+ using std::rotl;
+ using std::rotr;
+
+ // [bit.count], counting
+ using std::countl_one;
+ using std::countl_zero;
+ using std::countr_one;
+ using std::countr_zero;
+ using std::popcount;
+
+ // [bit.endian], endian
+ using std::endian;
+} // namespace std
+
+// bitset.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::bitset;
+
+ // [bitset.operators], bitset operators
+ using std::operator&;
+ using std::operator|;
+ using std::operator^;
+ using std::operator>>;
+ using std::operator<<;
+
+ // [bitset.hash], hash support
+ using std::hash;
+
+} // namespace std
+
+// cassert.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
+
+// cctype.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::isalnum;
+ using std::isalpha;
+ using std::isblank;
+ using std::iscntrl;
+ using std::isdigit;
+ using std::isgraph;
+ using std::islower;
+ using std::isprint;
+ using std::ispunct;
+ using std::isspace;
+ using std::isupper;
+ using std::isxdigit;
+ using std::tolower;
+ using std::toupper;
+} // namespace std
+
+// cerrno.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
+
+// cfenv.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // types
+ using std::fenv_t;
+ using std::fexcept_t;
+
+ // functions
+ using std::feclearexcept;
+ using std::fegetexceptflag;
+ using std::feraiseexcept;
+ using std::fesetexceptflag;
+ using std::fetestexcept;
+
+ using std::fegetround;
+ using std::fesetround;
+
+ using std::fegetenv;
+ using std::feholdexcept;
+ using std::fesetenv;
+ using std::feupdateenv;
+
+} // namespace std
+
+// cfloat.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
+
+// charconv.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // floating-point format for primitive numerical conversion
+ using std::chars_format;
+
+ // chars_format is a bitmask type.
+ // [bitmask.types] specified operators
+ using std::operator&;
+ using std::operator&=;
+ using std::operator^;
+ using std::operator^=;
+ using std::operator|;
+ using std::operator|=;
+ using std::operator~;
+
+ // [charconv.to.chars], primitive numerical output conversion
+ using std::to_chars_result;
+
+ using std::to_chars;
+
+ // [charconv.from.chars], primitive numerical input conversion
+ using std::from_chars_result;
+
+ using std::from_chars;
+} // namespace std
+
+// chrono.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ namespace chrono {
+ using std::chrono::duration;
+ using std::chrono::time_point;
+
+ } // namespace chrono
+
+ using std::common_type;
+
+ namespace chrono {
+
+ // [time.traits], customization traits
+ using std::chrono::treat_as_floating_point;
+ using std::chrono::treat_as_floating_point_v;
+
+ using std::chrono::duration_values;
+
+ // using std::chrono::is_clock;
+ // using std::chrono::is_clock_v;
+
+ // [time.duration.nonmember], duration arithmetic
+ using std::chrono::operator+;
+ using std::chrono::operator-;
+ using std::chrono::operator*;
+ using std::chrono::operator/;
+ using std::chrono::operator%;
+
+ // [time.duration.comparisons], duration comparisons
+ using std::chrono::operator==;
+ using std::chrono::operator!=;
+ using std::chrono::operator<;
+ using std::chrono::operator>;
+ using std::chrono::operator<=;
+ using std::chrono::operator>=;
+ using std::chrono::operator<=>;
+
+ // [time.duration.cast], conversions
+ using std::chrono::ceil;
+ using std::chrono::duration_cast;
+ using std::chrono::floor;
+ using std::chrono::round;
+
+ // [time.duration.io], duration I/O
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::chrono::operator<<;
+#endif
+ // using std::chrono::from_stream;
+
+ // convenience typedefs
+ using std::chrono::days;
+ using std::chrono::hours;
+ using std::chrono::microseconds;
+ using std::chrono::milliseconds;
+ using std::chrono::minutes;
+ using std::chrono::months;
+ using std::chrono::nanoseconds;
+ using std::chrono::seconds;
+ using std::chrono::weeks;
+ using std::chrono::years;
+
+ // [time.point.nonmember], time_point arithmetic
+
+ // [time.point.comparisons], time_point comparisons
+
+ // [time.point.cast], conversions
+ using std::chrono::time_point_cast;
+
+ // [time.duration.alg], specialized algorithms
+ using std::chrono::abs;
+
+ // [time.clock.system], class system_clock
+ using std::chrono::system_clock;
+
+ using std::chrono::sys_days;
+ using std::chrono::sys_seconds;
+ using std::chrono::sys_time;
+
+#if 0
+ // [time.clock.utc], class utc_clock
+ using std::chrono::utc_clock;
+
+ using std::chrono::utc_seconds;
+ using std::chrono::utc_time;
+
+ using std::chrono::leap_second_info;
+
+ using std::chrono::get_leap_second_info;
+ // [time.clock.tai], class tai_clock
+ using std::chrono::tai_clock;
+
+ using std::chrono::tai_seconds;
+ using std::chrono::tai_time;
+
+ // [time.clock.gps], class gps_clock
+ using std::chrono::gps_clock;
+
+ using std::chrono::gps_seconds;
+ using std::chrono::gps_time;
+#endif
+ // [time.clock.file], type file_clock
+ using std::chrono::file_clock;
+
+ using std::chrono::file_time;
+
+#ifndef _LIBCPP_HAS_NO_MONOTONIC_CLOCK
+ // [time.clock.steady], class steady_clock
+ using std::chrono::steady_clock;
+#endif
+
+ // [time.clock.hires], class high_resolution_clock
+ using std::chrono::high_resolution_clock;
+
+ // [time.clock.local], local time
+ using std::chrono::local_days;
+ using std::chrono::local_seconds;
+ using std::chrono::local_t;
+ using std::chrono::local_time;
+
+ // [time.clock.cast], time_point conversions
+ // using std::chrono::clock_time_conversion;
+
+ // using std::chrono::clock_cast;
+
+ // [time.cal.last], class last_spec
+ using std::chrono::last_spec;
+
+ // [time.cal.day], class day
+ using std::chrono::day;
+
+ // [time.cal.month], class month
+ using std::chrono::month;
+
+ // [time.cal.year], class year
+ using std::chrono::year;
+
+ // [time.cal.wd], class weekday
+ using std::chrono::weekday;
+
+ // [time.cal.wdidx], class weekday_indexed
+ using std::chrono::weekday_indexed;
+
+ // [time.cal.wdlast], class weekday_last
+ using std::chrono::weekday_last;
+
+ // [time.cal.md], class month_day
+ using std::chrono::month_day;
+
+ // [time.cal.mdlast], class month_day_last
+ using std::chrono::month_day_last;
+
+ // [time.cal.mwd], class month_weekday
+ using std::chrono::month_weekday;
+
+ // [time.cal.mwdlast], class month_weekday_last
+ using std::chrono::month_weekday_last;
+
+ // [time.cal.ym], class year_month
+ using std::chrono::year_month;
+
+ // [time.cal.ymd], class year_month_day
+ using std::chrono::year_month_day;
+
+ // [time.cal.ymdlast], class year_month_day_last
+ using std::chrono::year_month_day_last;
+
+ // [time.cal.ymwd], class year_month_weekday
+ using std::chrono::year_month_weekday;
+
+ // [time.cal.ymwdlast], class year_month_weekday_last
+ using std::chrono::year_month_weekday_last;
+
+ // [time.cal.operators], civil calendar conventional syntax operators
+
+ // [time.hms], class template hh_mm_ss
+ using std::chrono::hh_mm_ss;
+
+ // [time.12], 12/24 hour functions
+ using std::chrono::is_am;
+ using std::chrono::is_pm;
+ using std::chrono::make12;
+ using std::chrono::make24;
+
+#if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \
+ !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+
+# ifdef _LIBCPP_ENABLE_EXPERIMENTAL
+ // [time.zone.db], time zone database
+ using std::chrono::tzdb;
+ using std::chrono::tzdb_list;
+
+ // [time.zone.db.access], time zone database access
+ // using std::chrono::current_zone;
+ using std::chrono::get_tzdb;
+ using std::chrono::get_tzdb_list;
+ // using std::chrono::locate_zone;
+
+ // [time.zone.db.remote], remote time zone database support
+ using std::chrono::reload_tzdb;
+ using std::chrono::remote_version;
+
+# endif // !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) &&
+ // !defined(_LIBCPP_HAS_NO_LOCALIZATION)
+
+# if 0
+ // [time.zone.exception], exception classes
+ using std::chrono::ambiguous_local_time;
+ using std::chrono::nonexistent_local_time;
+
+ // [time.zone.info], information classes
+ using std::chrono::sys_info;
+
+ // [time.zone.timezone], class time_zone
+ using std::chrono::choose;
+ using std::chrono::time_zone;
+
+ // [time.zone.zonedtraits], class template zoned_traits
+ using std::chrono::zoned_traits;
+
+ // [time.zone.zonedtime], class template zoned_time
+ using std::chrono::zoned_time;
+
+ using std::chrono::zoned_seconds;
+
+ // [time.zone.leap], leap second support
+ using std::chrono::leap_second;
+
+ // [time.zone.link], class time_zone_link
+ using std::chrono::time_zone_link;
+
+ // [time.format], formatting
+ using std::chrono::local_time_format;
+# endif
+#endif // _LIBCPP_ENABLE_EXPERIMENTAL
+ } // namespace chrono
+
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::formatter;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+
+ namespace chrono {
+ // using std::chrono::parse;
+
+ // calendrical constants
+ using std::chrono::last;
+
+ using std::chrono::Friday;
+ using std::chrono::Monday;
+ using std::chrono::Saturday;
+ using std::chrono::Sunday;
+ using std::chrono::Thursday;
+ using std::chrono::Tuesday;
+ using std::chrono::Wednesday;
+
+ using std::chrono::April;
+ using std::chrono::August;
+ using std::chrono::December;
+ using std::chrono::February;
+ using std::chrono::January;
+ using std::chrono::July;
+ using std::chrono::June;
+ using std::chrono::March;
+ using std::chrono::May;
+ using std::chrono::November;
+ using std::chrono::October;
+ using std::chrono::September;
+
+ } // namespace chrono
+
+} // namespace std
+export namespace std::inline literals::inline chrono_literals {
+ // [time.duration.literals], suffixes for duration literals
+ using std::literals::chrono_literals::operator""h;
+ using std::literals::chrono_literals::operator""min;
+ using std::literals::chrono_literals::operator""s;
+ using std::literals::chrono_literals::operator""ms;
+ using std::literals::chrono_literals::operator""us;
+ using std::literals::chrono_literals::operator""ns;
+
+ // [using std::literals::chrono_literals::.cal.day.nonmembers], non-member functions
+ using std::literals::chrono_literals::operator""d;
+
+ // [using std::literals::chrono_literals::.cal.year.nonmembers], non-member functions
+ using std::literals::chrono_literals::operator""y;
+} // namespace std::inline literals::inline chrono_literals
+
+// cinttypes.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::imaxdiv_t;
+
+ using std::imaxabs;
+ using std::imaxdiv;
+ using std::strtoimax;
+ using std::strtoumax;
+ using std::wcstoimax;
+ using std::wcstoumax;
+
+ // abs is conditionally here, but always present in cmath.cppm. To avoid
+ // conflicing declarations omit the using here.
+
+ // div is conditionally here, but always present in cstdlib.cppm. To avoid
+ // conflicing declarations omit the using here.
+} // namespace std
+
+// climits.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
+
+// clocale.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::lconv;
+
+ using std::localeconv;
+ using std::setlocale;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// cmath.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ using std::double_t;
+ using std::float_t;
+
+ using std::acos;
+ using std::acosf;
+ using std::acosl;
+
+ using std::asin;
+ using std::asinf;
+ using std::asinl;
+
+ using std::atan;
+ using std::atanf;
+ using std::atanl;
+
+ using std::atan2;
+ using std::atan2f;
+ using std::atan2l;
+
+ using std::cos;
+ using std::cosf;
+ using std::cosl;
+
+ using std::sin;
+ using std::sinf;
+ using std::sinl;
+
+ using std::tan;
+ using std::tanf;
+ using std::tanl;
+
+ using std::acosh;
+ using std::acoshf;
+ using std::acoshl;
+
+ using std::asinh;
+ using std::asinhf;
+ using std::asinhl;
+
+ using std::atanh;
+ using std::atanhf;
+ using std::atanhl;
+
+ using std::cosh;
+ using std::coshf;
+ using std::coshl;
+
+ using std::sinh;
+ using std::sinhf;
+ using std::sinhl;
+
+ using std::tanh;
+ using std::tanhf;
+ using std::tanhl;
+
+ using std::exp;
+ using std::expf;
+ using std::expl;
+
+ using std::exp2;
+ using std::exp2f;
+ using std::exp2l;
+
+ using std::expm1;
+ using std::expm1f;
+ using std::expm1l;
+
+ using std::frexp;
+ using std::frexpf;
+ using std::frexpl;
+
+ using std::ilogb;
+ using std::ilogbf;
+ using std::ilogbl;
+
+ using std::ldexp;
+ using std::ldexpf;
+ using std::ldexpl;
+
+ using std::log;
+ using std::logf;
+ using std::logl;
+
+ using std::log10;
+ using std::log10f;
+ using std::log10l;
+
+ using std::log1p;
+ using std::log1pf;
+ using std::log1pl;
+
+ using std::log2;
+ using std::log2f;
+ using std::log2l;
+
+ using std::logb;
+ using std::logbf;
+ using std::logbl;
+
+ using std::modf;
+ using std::modff;
+ using std::modfl;
+
+ using std::scalbn;
+ using std::scalbnf;
+ using std::scalbnl;
+
+ using std::scalbln;
+ using std::scalblnf;
+ using std::scalblnl;
+
+ using std::cbrt;
+ using std::cbrtf;
+ using std::cbrtl;
+
+ // [c.math.abs], absolute values
+ using std::abs;
+
+ using std::fabs;
+ using std::fabsf;
+ using std::fabsl;
+
+ using std::hypot;
+ using std::hypotf;
+ using std::hypotl;
+
+ // [c.math.hypot3], three-dimensional hypotenuse
+
+ using std::pow;
+ using std::powf;
+ using std::powl;
+
+ using std::sqrt;
+ using std::sqrtf;
+ using std::sqrtl;
+
+ using std::erf;
+ using std::erff;
+ using std::erfl;
+
+ using std::erfc;
+ using std::erfcf;
+ using std::erfcl;
+
+ using std::lgamma;
+ using std::lgammaf;
+ using std::lgammal;
+
+ using std::tgamma;
+ using std::tgammaf;
+ using std::tgammal;
+
+ using std::ceil;
+ using std::ceilf;
+ using std::ceill;
+
+ using std::floor;
+ using std::floorf;
+ using std::floorl;
+
+ using std::nearbyint;
+ using std::nearbyintf;
+ using std::nearbyintl;
+
+ using std::rint;
+ using std::rintf;
+ using std::rintl;
+
+ using std::lrint;
+ using std::lrintf;
+ using std::lrintl;
+
+ using std::llrint;
+ using std::llrintf;
+ using std::llrintl;
+
+ using std::round;
+ using std::roundf;
+ using std::roundl;
+
+ using std::lround;
+ using std::lroundf;
+ using std::lroundl;
+
+ using std::llround;
+ using std::llroundf;
+ using std::llroundl;
+
+ using std::trunc;
+ using std::truncf;
+ using std::truncl;
+
+ using std::fmod;
+ using std::fmodf;
+ using std::fmodl;
+
+ using std::remainder;
+ using std::remainderf;
+ using std::remainderl;
+
+ using std::remquo;
+ using std::remquof;
+ using std::remquol;
+
+ using std::copysign;
+ using std::copysignf;
+ using std::copysignl;
+
+ using std::nan;
+ using std::nanf;
+ using std::nanl;
+
+ using std::nextafter;
+ using std::nextafterf;
+ using std::nextafterl;
+
+ using std::nexttoward;
+ using std::nexttowardf;
+ using std::nexttowardl;
+
+ using std::fdim;
+ using std::fdimf;
+ using std::fdiml;
+
+ using std::fmax;
+ using std::fmaxf;
+ using std::fmaxl;
+
+ using std::fmin;
+ using std::fminf;
+ using std::fminl;
+
+ using std::fma;
+ using std::fmaf;
+ using std::fmal;
+
+ // [c.math.lerp], linear interpolation
+ using std::lerp;
+
+ // [c.math.fpclass], classification / comparison functions
+ using std::fpclassify;
+ using std::isfinite;
+ using std::isgreater;
+ using std::isgreaterequal;
+ using std::isinf;
+ using std::isless;
+ using std::islessequal;
+ using std::islessgreater;
+ using std::isnan;
+ using std::isnormal;
+ using std::isunordered;
+ using std::signbit;
+
+ // [sf.cmath], mathematical special functions
+#if 0
+ // [sf.cmath.assoc.laguerre], associated Laguerre polynomials
+ using std::assoc_laguerre;
+ using std::assoc_laguerref;
+ using std::assoc_laguerrel;
+
+ // [sf.cmath.assoc.legendre], associated Legendre functions
+ using std::assoc_legendre;
+ using std::assoc_legendref;
+ using std::assoc_legendrel;
+
+ // [sf.cmath.beta], beta function
+ using std::beta;
+ using std::betaf;
+ using std::betal;
+
+ // [sf.cmath.comp.ellint.1], complete elliptic integral of the first kind
+ using std::comp_ellint_1;
+ using std::comp_ellint_1f;
+ using std::comp_ellint_1l;
+
+ // [sf.cmath.comp.ellint.2], complete elliptic integral of the second kind
+ using std::comp_ellint_2;
+ using std::comp_ellint_2f;
+ using std::comp_ellint_2l;
+
+ // [sf.cmath.comp.ellint.3], complete elliptic integral of the third kind
+ using std::comp_ellint_3;
+ using std::comp_ellint_3f;
+ using std::comp_ellint_3l;
+
+ // [sf.cmath.cyl.bessel.i], regular modified cylindrical Bessel functions
+ using std::cyl_bessel_i;
+ using std::cyl_bessel_if;
+ using std::cyl_bessel_il;
+
+ // [sf.cmath.cyl.bessel.j], cylindrical Bessel functions of the first kind
+ using std::cyl_bessel_j;
+ using std::cyl_bessel_jf;
+ using std::cyl_bessel_jl;
+
+ // [sf.cmath.cyl.bessel.k], irregular modified cylindrical Bessel functions
+ using std::cyl_bessel_k;
+ using std::cyl_bessel_kf;
+ using std::cyl_bessel_kl;
+
+ // [sf.cmath.cyl.neumann], cylindrical Neumann functions
+ // cylindrical Bessel functions of the second kind
+ using std::cyl_neumann;
+ using std::cyl_neumannf;
+ using std::cyl_neumannl;
+
+ // [sf.cmath.ellint.1], incomplete elliptic integral of the first kind
+ using std::ellint_1;
+ using std::ellint_1f;
+ using std::ellint_1l;
+
+ // [sf.cmath.ellint.2], incomplete elliptic integral of the second kind
+ using std::ellint_2;
+ using std::ellint_2f;
+ using std::ellint_2l;
+
+ // [sf.cmath.ellint.3], incomplete elliptic integral of the third kind
+ using std::ellint_3;
+ using std::ellint_3f;
+ using std::ellint_3l;
+
+ // [sf.cmath.expint], exponential integral
+ using std::expint;
+ using std::expintf;
+ using std::expintl;
+
+ // [sf.cmath.hermite], Hermite polynomials
+ using std::hermite;
+ using std::hermitef;
+ using std::hermitel;
+
+ // [sf.cmath.laguerre], Laguerre polynomials
+ using std::laguerre;
+ using std::laguerref;
+ using std::laguerrel;
+
+ // [sf.cmath.legendre], Legendre polynomials
+ using std::legendre;
+ using std::legendref;
+ using std::legendrel;
+
+ // [sf.cmath.riemann.zeta], Riemann zeta function
+ using std::riemann_zeta;
+ using std::riemann_zetaf;
+ using std::riemann_zetal;
+
+ // [sf.cmath.sph.bessel], spherical Bessel functions of the first kind
+ using std::sph_bessel;
+ using std::sph_besself;
+ using std::sph_bessell;
+
+ // [sf.cmath.sph.legendre], spherical associated Legendre functions
+ using std::sph_legendre;
+ using std::sph_legendref;
+ using std::sph_legendrel;
+
+ // [sf.cmath.sph.neumann], spherical Neumann functions;
+ // spherical Bessel functions of the second kind
+ using std::sph_neumann;
+ using std::sph_neumannf;
+ using std::sph_neumannl;
+#endif
+} // namespace std
+
+// codecvt.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::codecvt_mode;
+
+ using std::codecvt_utf16;
+ using std::codecvt_utf8;
+ using std::codecvt_utf8_utf16;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// compare.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [cmp.categories], comparison category types
+ using std::partial_ordering;
+ using std::strong_ordering;
+ using std::weak_ordering;
+
+ // named comparison functions
+ using std::is_eq;
+ using std::is_gt;
+ using std::is_gteq;
+ using std::is_lt;
+ using std::is_lteq;
+ using std::is_neq;
+
+ // [cmp.common], common comparison category type
+ using std::common_comparison_category;
+ using std::common_comparison_category_t;
+
+ // [cmp.concept], concept three_way_comparable
+ using std::three_way_comparable;
+ using std::three_way_comparable_with;
+
+ // [cmp.result], result of three-way comparison
+ using std::compare_three_way_result;
+
+ using std::compare_three_way_result_t;
+
+ // [comparisons.three.way], class compare_three_way
+ using std::compare_three_way;
+
+ // [cmp.alg], comparison algorithms
+ inline namespace __cpo {
+ using std::__cpo::compare_partial_order_fallback;
+ using std::__cpo::compare_strong_order_fallback;
+ using std::__cpo::compare_weak_order_fallback;
+ using std::__cpo::partial_order;
+ using std::__cpo::strong_order;
+ using std::__cpo::weak_order;
+ } // namespace __cpo
+
+} // namespace std
+
+// complex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [complex], class template complex
+ using std::complex;
+
+ // [complex.ops], operators
+ using std::operator+;
+ using std::operator-;
+ using std::operator*;
+ using std::operator/;
+
+ using std::operator==;
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::operator>>;
+ using std::operator<<;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+
+ // [complex.value.ops], values
+ using std::imag;
+ using std::real;
+
+ using std::abs;
+ using std::arg;
+ using std::norm;
+
+ using std::conj;
+ using std::polar;
+ using std::proj;
+
+ // [complex.transcendentals], transcendentals
+ using std::acos;
+ using std::asin;
+ using std::atan;
+
+ using std::acosh;
+ using std::asinh;
+ using std::atanh;
+
+ using std::cos;
+ using std::cosh;
+ using std::exp;
+ using std::log;
+ using std::log10;
+
+ using std::pow;
+
+ using std::sin;
+ using std::sinh;
+ using std::sqrt;
+ using std::tan;
+ using std::tanh;
+
+ // [complex.literals], complex literals
+ inline namespace literals {
+ inline namespace complex_literals {
+ using std::operator""il;
+ using std::operator""i;
+ using std::operator""if;
+ } // namespace complex_literals
+ } // namespace literals
+
+} // namespace std
+
+// concepts.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [concepts.lang], language-related concepts
+ // [concept.same], concept same_as
+ using std::same_as;
+
+ // [concept.derived], concept derived_from
+ using std::derived_from;
+
+ // [concept.convertible], concept convertible_to
+ using std::convertible_to;
+
+ // [concept.commonref], concept common_reference_with
+ using std::common_reference_with;
+
+ // [concept.common], concept common_with
+ using std::common_with;
+
+ // [concepts.arithmetic], arithmetic concepts
+ using std::floating_point;
+ using std::integral;
+ using std::signed_integral;
+ using std::unsigned_integral;
+
+ // [concept.assignable], concept assignable_from
+ using std::assignable_from;
+
+ // [concept.swappable], concept swappable
+ namespace ranges {
+ inline namespace __cpo {
+ using std::ranges::__cpo::swap;
+ }
+ } // namespace ranges
+
+ using std::swappable;
+ using std::swappable_with;
+
+ // [concept.destructible], concept destructible
+ using std::destructible;
+
+ // [concept.constructible], concept constructible_from
+ using std::constructible_from;
+
+ // [concept.default.init], concept default_initializable
+ using std::default_initializable;
+
+ // [concept.moveconstructible], concept move_constructible
+ using std::move_constructible;
+
+ // [concept.copyconstructible], concept copy_constructible
+ using std::copy_constructible;
+
+ // [concepts.compare], comparison concepts
+ // [concept.equalitycomparable], concept equality_comparable
+ using std::equality_comparable;
+ using std::equality_comparable_with;
+
+ // [concept.totallyordered], concept totally_ordered
+ using std::totally_ordered;
+ using std::totally_ordered_with;
+
+ // [concepts.object], object concepts
+ using std::copyable;
+ using std::movable;
+ using std::regular;
+ using std::semiregular;
+
+ // [concepts.callable], callable concepts
+ // [concept.invocable], concept invocable
+ using std::invocable;
+
+ // [concept.regularinvocable], concept regular_invocable
+ using std::regular_invocable;
+
+ // [concept.predicate], concept predicate
+ using std::predicate;
+
+ // [concept.relation], concept relation
+ using std::relation;
+
+ // [concept.equiv], concept equivalence_relation
+ using std::equivalence_relation;
+
+ // [concept.strictweakorder], concept strict_weak_order
+ using std::strict_weak_order;
+
+} // namespace std
+
+// condition_variable.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.condition.condvar], class condition_variable
+ using std::condition_variable;
+ // [thread.condition.condvarany], class condition_variable_any
+ using std::condition_variable_any;
+
+ // [thread.condition.nonmember], non-member functions
+ using std::notify_all_at_thread_exit;
+
+ using std::cv_status;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// coroutine.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+
+ // [coroutine.traits], coroutine traits
+ using std::coroutine_traits;
+
+ // [coroutine.handle], coroutine handle
+ using std::coroutine_handle;
+
+ // [coroutine.handle.compare], comparison operators
+ using std::operator==;
+ using std::operator<=>;
+
+ // [coroutine.handle.hash], hash support
+ using std::hash;
+
+ // [coroutine.noop], no-op coroutines
+ using std::noop_coroutine;
+ using std::noop_coroutine_handle;
+ using std::noop_coroutine_promise;
+
+ // [coroutine.trivial.awaitables], trivial awaitables
+ using std::suspend_always;
+ using std::suspend_never;
+} // namespace std
+
+// csetjmp.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::jmp_buf;
+ using std::longjmp;
+} // namespace std
+
+// csignal.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::sig_atomic_t;
+
+ // [support.signal], signal handlers
+ using std::signal;
+
+ using std::raise;
+
+} // namespace std
+
+// cstdarg.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::va_list;
+} // namespace std
+
+// cstddef.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::max_align_t;
+ using std::nullptr_t;
+ using std::ptrdiff_t;
+ using std::size_t;
+
+ using std::byte;
+
+ // [support.types.byteops], byte type operations
+ using std::operator<<=;
+ using std::operator<<;
+ using std::operator>>=;
+ using std::operator>>;
+ using std::operator|=;
+ using std::operator|;
+ using std::operator&=;
+ using std::operator&;
+ using std::operator^=;
+ using std::operator^;
+ using std::operator~;
+ using std::to_integer;
+} // namespace std
+
+// cstdint.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // signed
+ using std::int8_t _LIBCPP_USING_IF_EXISTS;
+ using std::int16_t _LIBCPP_USING_IF_EXISTS;
+ using std::int32_t _LIBCPP_USING_IF_EXISTS;
+ using std::int64_t _LIBCPP_USING_IF_EXISTS;
+
+ using std::int_fast16_t;
+ using std::int_fast32_t;
+ using std::int_fast64_t;
+ using std::int_fast8_t;
+
+ using std::int_least16_t;
+ using std::int_least32_t;
+ using std::int_least64_t;
+ using std::int_least8_t;
+
+ using std::intmax_t;
+
+ using std::intptr_t _LIBCPP_USING_IF_EXISTS;
+
+ // unsigned
+ using std::uint8_t _LIBCPP_USING_IF_EXISTS;
+ using std::uint16_t _LIBCPP_USING_IF_EXISTS;
+ using std::uint32_t _LIBCPP_USING_IF_EXISTS;
+ using std::uint64_t _LIBCPP_USING_IF_EXISTS;
+
+ using std::uint_fast16_t;
+ using std::uint_fast32_t;
+ using std::uint_fast64_t;
+ using std::uint_fast8_t;
+
+ using std::uint_least16_t;
+ using std::uint_least32_t;
+ using std::uint_least64_t;
+ using std::uint_least8_t;
+
+ using std::uintmax_t;
+
+ using std::uintptr_t _LIBCPP_USING_IF_EXISTS;
+} // namespace std
+
+// cstdio.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::FILE;
+ using std::fpos_t;
+ using std::size_t;
+
+ using std::clearerr;
+ using std::fclose;
+ using std::feof;
+ using std::ferror;
+ using std::fflush;
+ using std::fgetc;
+ using std::fgetpos;
+ using std::fgets;
+ using std::fopen;
+ using std::fprintf;
+ using std::fputc;
+ using std::fputs;
+ using std::fread;
+ using std::freopen;
+ using std::fscanf;
+ using std::fseek;
+ using std::fsetpos;
+ using std::ftell;
+ using std::fwrite;
+ using std::getc;
+ using std::getchar;
+ using std::perror;
+ using std::printf;
+ using std::putc;
+ using std::putchar;
+ using std::puts;
+ using std::remove;
+ using std::rename;
+ using std::rewind;
+ using std::scanf;
+ using std::setbuf;
+ using std::setvbuf;
+ using std::snprintf;
+ using std::sprintf;
+ using std::sscanf;
+ using std::tmpfile;
+ using std::tmpnam;
+ using std::ungetc;
+ using std::vfprintf;
+ using std::vfscanf;
+ using std::vprintf;
+ using std::vscanf;
+ using std::vsnprintf;
+ using std::vsprintf;
+ using std::vsscanf;
+} // namespace std
+
+// cstdlib.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::div_t;
+ using std::ldiv_t;
+ using std::lldiv_t;
+ using std::size_t;
+
+ // [support.start.term], start and termination
+ using std::_Exit;
+ using std::abort;
+ using std::at_quick_exit;
+ using std::atexit;
+ using std::exit;
+ using std::quick_exit;
+
+ using std::getenv;
+ using std::system;
+
+ // [c.malloc], C library memory allocation
+ using std::aligned_alloc;
+ using std::calloc;
+ using std::free;
+ using std::malloc;
+ using std::realloc;
+
+ using std::atof;
+ using std::atoi;
+ using std::atol;
+ using std::atoll;
+ using std::strtod;
+ using std::strtof;
+ using std::strtol;
+ using std::strtold;
+ using std::strtoll;
+ using std::strtoul;
+ using std::strtoull;
+
+ // [c.mb.wcs], multibyte / wide string and character conversion functions
+ using std::mblen;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::mbstowcs;
+ using std::mbtowc;
+ using std::wcstombs;
+ using std::wctomb;
+#endif
+ // [alg.c.library], C standard library algorithms
+ using std::bsearch;
+ using std::qsort;
+
+ // [c.math.rand], low-quality random number generation
+ using std::rand;
+ using std::srand;
+
+ // [c.math.abs], absolute values
+ using std::abs;
+
+ using std::labs;
+ using std::llabs;
+
+ using std::div;
+ using std::ldiv;
+ using std::lldiv;
+} // namespace std
+
+// cstring.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::size_t;
+
+ using std::memchr;
+ using std::memcmp;
+ using std::memcpy;
+ using std::memmove;
+ using std::memset;
+ using std::strcat;
+ using std::strchr;
+ using std::strcmp;
+ using std::strcoll;
+ using std::strcpy;
+ using std::strcspn;
+ using std::strerror;
+ using std::strlen;
+ using std::strncat;
+ using std::strncmp;
+ using std::strncpy;
+ using std::strpbrk;
+ using std::strrchr;
+ using std::strspn;
+ using std::strstr;
+ using std::strtok;
+ using std::strxfrm;
+} // namespace std
+
+// ctime.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::clock_t;
+ using std::size_t;
+ using std::time_t;
+
+ using std::timespec;
+ using std::tm;
+
+ using std::asctime;
+ using std::clock;
+ using std::ctime;
+ using std::difftime;
+ using std::gmtime;
+ using std::localtime;
+ using std::mktime;
+ using std::strftime;
+ using std::time;
+ using std::timespec_get;
+} // namespace std
+
+// cuchar.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // Note the Standard does not mark these symbols optional, but libc++'s header
+ // does. So this seems strictly not to be conforming.
+
+ // mbstate_t is conditionally here, but always present in cwchar.cppm. To avoid
+ // conflicing declarations omit the using here.
+
+ // size_t is conditionally here, but always present in cstddef.cppm. To avoid
+ // conflicing declarations omit the using here.
+
+#if !defined(_LIBCPP_HAS_NO_C8RTOMB_MBRTOC8)
+ using std::mbrtoc8 _LIBCPP_USING_IF_EXISTS;
+ using std::c8rtomb _LIBCPP_USING_IF_EXISTS;
+#endif
+ using std::mbrtoc16 _LIBCPP_USING_IF_EXISTS;
+ using std::c16rtomb _LIBCPP_USING_IF_EXISTS;
+ using std::mbrtoc32 _LIBCPP_USING_IF_EXISTS;
+ using std::c32rtomb _LIBCPP_USING_IF_EXISTS;
+} // namespace std
+
+// cwchar.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::mbstate_t;
+ using std::size_t;
+ using std::wint_t;
+
+ using std::tm;
+
+ using std::btowc;
+ using std::fgetwc;
+ using std::fgetws;
+ using std::fputwc;
+ using std::fputws;
+ using std::fwide;
+ using std::fwprintf;
+ using std::fwscanf;
+ using std::getwc;
+ using std::getwchar;
+ using std::putwc;
+ using std::putwchar;
+ using std::swprintf;
+ using std::swscanf;
+ using std::ungetwc;
+ using std::vfwprintf;
+ using std::vfwscanf;
+ using std::vswprintf;
+ using std::vswscanf;
+ using std::vwprintf;
+ using std::vwscanf;
+ using std::wcscat;
+ using std::wcschr;
+ using std::wcscmp;
+ using std::wcscoll;
+ using std::wcscpy;
+ using std::wcscspn;
+ using std::wcsftime;
+ using std::wcslen;
+ using std::wcsncat;
+ using std::wcsncmp;
+ using std::wcsncpy;
+ using std::wcspbrk;
+ using std::wcsrchr;
+ using std::wcsspn;
+ using std::wcsstr;
+ using std::wcstod;
+ using std::wcstof;
+ using std::wcstok;
+ using std::wcstol;
+ using std::wcstold;
+ using std::wcstoll;
+ using std::wcstoul;
+ using std::wcstoull;
+ using std::wcsxfrm;
+ using std::wctob;
+ using std::wmemchr;
+ using std::wmemcmp;
+ using std::wmemcpy;
+ using std::wmemmove;
+ using std::wmemset;
+ using std::wprintf;
+ using std::wscanf;
+
+ // [c.mb.wcs], multibyte / wide string and character conversion functions
+ using std::mbrlen;
+ using std::mbrtowc;
+ using std::mbsinit;
+ using std::mbsrtowcs;
+ using std::wcrtomb;
+ using std::wcsrtombs;
+#endif // _LIBCPP_HAS_NO_WIDE_CHARACTERS
+} // namespace std
+
+// cwctype.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wctrans_t;
+ using std::wctype_t;
+ using std::wint_t;
+
+ using std::iswalnum;
+ using std::iswalpha;
+ using std::iswblank;
+ using std::iswcntrl;
+ using std::iswctype;
+ using std::iswdigit;
+ using std::iswgraph;
+ using std::iswlower;
+ using std::iswprint;
+ using std::iswpunct;
+ using std::iswspace;
+ using std::iswupper;
+ using std::iswxdigit;
+ using std::towctrans;
+ using std::towlower;
+ using std::towupper;
+ using std::wctrans;
+ using std::wctype;
+#endif // _LIBCPP_HAS_NO_WIDE_CHARACTERS
+} // namespace std
+
+// deque.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [deque], class template deque
+ using std::deque;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [deque.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::deque;
+ }
+} // namespace std
+
+// exception.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::bad_exception;
+ using std::current_exception;
+ using std::exception;
+ using std::exception_ptr;
+ using std::get_terminate;
+ using std::make_exception_ptr;
+ using std::nested_exception;
+ using std::rethrow_exception;
+ using std::rethrow_if_nested;
+ using std::set_terminate;
+ using std::terminate;
+ using std::terminate_handler;
+ using std::throw_with_nested;
+ using std::uncaught_exception;
+ using std::uncaught_exceptions;
+} // namespace std
+
+// execution.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifdef _LIBCPP_ENABLE_EXPERIMENTAL
+export namespace std {
+ // [execpol.type], execution policy type trait
+ using std::is_execution_policy;
+ using std::is_execution_policy_v;
+} // namespace std
+
+export namespace std::execution {
+ // [execpol.seq], sequenced execution policy
+ using std::execution::sequenced_policy;
+
+ // [execpol.par], parallel execution policy
+ using std::execution::parallel_policy;
+
+ // [execpol.parunseq], parallel and unsequenced execution policy
+ using std::execution::parallel_unsequenced_policy;
+
+ // [execpol.unseq], unsequenced execution policy
+ using std::execution::unsequenced_policy;
+
+ // [execpol.objects], execution policy objects
+ using std::execution::par;
+ using std::execution::par_unseq;
+ using std::execution::seq;
+ using std::execution::unseq;
+} // namespace std::execution
+#endif // _LIBCPP_ENABLE_EXPERIMENTAL
+
+// expected.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if _LIBCPP_STD_VER >= 23
+ // [expected.unexpected], class template unexpected
+ using std::unexpected;
+
+ // [expected.bad], class template bad_expected_access
+ using std::bad_expected_access;
+
+ // in-place construction of unexpected values
+ using std::unexpect;
+ using std::unexpect_t;
+
+ // [expected.expected], class template expected
+ using std::expected;
+#endif // _LIBCPP_STD_VER >= 23
+} // namespace std
+
+// filesystem.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std::filesystem {
+ // [fs.class.path], paths
+ using std::filesystem::path;
+
+ // [fs.path.nonmember], path non-member functions
+ using std::filesystem::hash_value;
+ using std::filesystem::swap;
+
+ // [fs.class.filesystem.error], filesystem errors
+ using std::filesystem::filesystem_error;
+
+#ifndef _LIBCPP_HAS_NO_FILESYSTEM
+ // [fs.class.directory.entry], directory entries
+ using std::filesystem::directory_entry;
+
+ // [fs.class.directory.iterator], directory iterators
+ using std::filesystem::directory_iterator;
+
+ // [fs.dir.itr.nonmembers], range access for directory iterators
+ using std::filesystem::begin;
+ using std::filesystem::end;
+
+ // [fs.class.rec.dir.itr], recursive directory iterators
+ using std::filesystem::recursive_directory_iterator;
+#endif // _LIBCPP_HAS_NO_FILESYSTEM
+
+ // [fs.rec.dir.itr.nonmembers], range access for recursive directory iterators
+
+ // [fs.class.file.status], file status
+ using std::filesystem::file_status;
+ using std::filesystem::space_info;
+
+ // [fs.enum], enumerations
+ using std::filesystem::copy_options;
+ using std::filesystem::directory_options;
+ using std::filesystem::file_type;
+ using std::filesystem::perm_options;
+ using std::filesystem::perms;
+
+ using std::filesystem::file_time_type;
+
+ // several of these enums are a bitmask type.
+ // [bitmask.types] specified operators
+ using std::filesystem::operator&;
+ using std::filesystem::operator&=;
+ using std::filesystem::operator^;
+ using std::filesystem::operator^=;
+ using std::filesystem::operator|;
+ using std::filesystem::operator|=;
+ using std::filesystem::operator~;
+
+#ifndef _LIBCPP_HAS_NO_FILESYSTEM
+ // [fs.op.funcs], filesystem operations
+ using std::filesystem::absolute;
+ using std::filesystem::canonical;
+ using std::filesystem::copy;
+ using std::filesystem::copy_file;
+ using std::filesystem::copy_symlink;
+ using std::filesystem::create_directories;
+ using std::filesystem::create_directory;
+ using std::filesystem::create_directory_symlink;
+ using std::filesystem::create_hard_link;
+ using std::filesystem::create_symlink;
+ using std::filesystem::current_path;
+ using std::filesystem::equivalent;
+ using std::filesystem::exists;
+ using std::filesystem::file_size;
+ using std::filesystem::hard_link_count;
+
+ using std::filesystem::is_block_file;
+ using std::filesystem::is_character_file;
+ using std::filesystem::is_directory;
+ using std::filesystem::is_empty;
+ using std::filesystem::is_fifo;
+ using std::filesystem::is_other;
+ using std::filesystem::is_regular_file;
+ using std::filesystem::is_socket;
+ using std::filesystem::is_symlink;
+
+ using std::filesystem::last_write_time;
+ using std::filesystem::permissions;
+ using std::filesystem::proximate;
+ using std::filesystem::read_symlink;
+ using std::filesystem::relative;
+ using std::filesystem::remove;
+
+ using std::filesystem::remove_all;
+ using std::filesystem::rename;
+ using std::filesystem::resize_file;
+ using std::filesystem::space;
+ using std::filesystem::status;
+ using std::filesystem::status_known;
+ using std::filesystem::symlink_status;
+ using std::filesystem::temp_directory_path;
+ using std::filesystem::weakly_canonical;
+#endif // _LIBCPP_HAS_NO_FILESYSTEM
+
+ // [depr.fs.path.factory]
+ using std::filesystem::u8path;
+} // namespace std::filesystem
+
+// [fs.path.hash], hash support
+export namespace std {
+ using std::hash;
+}
+
+export namespace std::ranges {
+#ifndef _LIBCPP_HAS_NO_FILESYSTEM
+ using std::ranges::enable_borrowed_range;
+ using std::ranges::enable_view;
+#endif // _LIBCPP_HAS_NO_FILESYSTEM
+} // namespace std::ranges
+
+// flat_map.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ // [flat.map], class template flat_­map
+ using std::flat_map;
+
+ using std::sorted_unique;
+ using std::sorted_unique_t;
+
+ using std::uses_allocator;
+
+ // [flat.map.erasure], erasure for flat_­map
+ using std::erase_if;
+
+ // [flat.multimap], class template flat_­multimap
+ using std::flat_multimap;
+
+ using std::sorted_equivalent;
+ using std::sorted_equivalent_t;
+
+ // [flat.multimap.erasure], erasure for flat_­multimap
+#endif
+} // namespace std
+
+// flat_set.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ // [flat.set], class template flat_­set
+ using std::flat_set;
+
+ using std::sorted_unique;
+ using std::sorted_unique_t;
+
+ using std::uses_allocator;
+
+ // [flat.set.erasure], erasure for flat_­set
+ using std::erase_if;
+
+ // [flat.multiset], class template flat_­multiset
+ using std::flat_multiset;
+
+ using std::sorted_equivalent;
+ using std::sorted_equivalent_t;
+#endif
+} // namespace std
+
+// format.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [format.context], class template basic_format_context
+ using std::basic_format_context;
+ using std::format_context;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wformat_context;
+#endif
+
+ // [format.args], class template basic_format_args
+ using std::basic_format_args;
+ using std::format_args;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wformat_args;
+#endif
+
+ // [format.fmt.string], class template basic_format_string
+ using std::basic_format_string;
+ using std::format_string;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wformat_string;
+#endif
+
+ // [format.functions], formatting functions
+ using std::format;
+ using std::format_to;
+ using std::vformat;
+ using std::vformat_to;
+
+ using std::format_to_n;
+ using std::format_to_n_result;
+ using std::formatted_size;
+
+ // [format.formatter], formatter
+ using std::formatter;
+
+#if _LIBCPP_STD_VER >= 23
+ // [format.formattable], concept formattable
+ using std::formattable;
+#endif
+
+ // [format.parse.ctx], class template basic_format_parse_context
+ using std::basic_format_parse_context;
+ using std::format_parse_context;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wformat_parse_context;
+#endif
+
+#if _LIBCPP_STD_VER >= 23
+ // [format.range], formatting of ranges
+ // [format.range.fmtkind], variable template format_kind
+ using std::format_kind;
+ using std::range_format;
+
+ // [format.range.formatter], class template range_formatter
+ using std::range_formatter;
+#endif // _LIBCPP_STD_VER >= 23
+
+ // [format.arg], class template basic_format_arg
+ using std::basic_format_arg;
+ using std::visit_format_arg;
+
+ // [format.arg.store], class template format-arg-store
+ using std::make_format_args;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::make_wformat_args;
+#endif
+
+ // [format.error], class format_error
+ using std::format_error;
+} // namespace std
+
+// forward_list.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [forward.list], class template forward_list
+ using std::forward_list;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [forward.list.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::forward_list;
+ }
+} // namespace std
+
+// fstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_filebuf;
+
+# ifndef _LIBCPP_HAS_NO_FILESYSTEM
+ using std::swap;
+# endif
+
+ using std::filebuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wfilebuf;
+# endif
+
+ using std::basic_ifstream;
+
+ using std::ifstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wifstream;
+# endif
+
+ using std::basic_ofstream;
+
+ using std::ofstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wofstream;
+# endif
+
+ using std::basic_fstream;
+
+ using std::fstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wfstream;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// functional.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [func.invoke], invoke
+ using std::invoke;
+#if _LIBCPP_STD_VER >= 23
+ using std::invoke_r;
+#endif
+
+ // [refwrap], reference_wrapper
+ using std::reference_wrapper;
+
+ using std::cref;
+ using std::ref;
+
+ // [arithmetic.operations], arithmetic operations
+ using std::divides;
+ using std::minus;
+ using std::modulus;
+ using std::multiplies;
+ using std::negate;
+ using std::plus;
+ // [comparisons], comparisons
+ using std::equal_to;
+ using std::greater;
+ using std::greater_equal;
+ using std::less;
+ using std::less_equal;
+ using std::not_equal_to;
+
+ // [comparisons.three.way], class compare_three_way
+ using std::compare_three_way;
+
+ // [logical.operations], logical operations
+ using std::logical_and;
+ using std::logical_not;
+ using std::logical_or;
+
+ // [bitwise.operations], bitwise operations
+ using std::bit_and;
+ using std::bit_not;
+ using std::bit_or;
+ using std::bit_xor;
+
+ // [func.identity], identity
+ using std::identity;
+
+ // [func.not.fn], function template not_fn
+ using std::not_fn;
+
+ // [func.bind.partial], function templates bind_front and bind_back
+ // using std::bind_back;
+ using std::bind_front;
+
+ // [func.bind], bind
+ using std::is_bind_expression;
+ using std::is_bind_expression_v;
+ using std::is_placeholder;
+ using std::is_placeholder_v;
+
+ using std::bind;
+
+ namespace placeholders {
+ // M is the implementation-defined number of placeholders
+ using std::placeholders::_1;
+ using std::placeholders::_10;
+ using std::placeholders::_2;
+ using std::placeholders::_3;
+ using std::placeholders::_4;
+ using std::placeholders::_5;
+ using std::placeholders::_6;
+ using std::placeholders::_7;
+ using std::placeholders::_8;
+ using std::placeholders::_9;
+ } // namespace placeholders
+
+ // [func.memfn], member function adaptors
+ using std::mem_fn;
+
+ // [func.wrap], polymorphic function wrappers
+ using std::bad_function_call;
+
+ using std::function;
+
+ using std::swap;
+
+ using std::operator==;
+
+ // [func.wrap.move], move only wrapper
+ // using std::move_only_function;
+
+ // [func.search], searchers
+ using std::default_searcher;
+
+ using std::boyer_moore_searcher;
+
+ using std::boyer_moore_horspool_searcher;
+
+ // [unord.hash], class template hash
+ using std::hash;
+
+ namespace ranges {
+ // [range.cmp], concept-constrained comparisons
+ using std::ranges::equal_to;
+ using std::ranges::greater;
+ using std::ranges::greater_equal;
+ using std::ranges::less;
+ using std::ranges::less_equal;
+ using std::ranges::not_equal_to;
+ } // namespace ranges
+} // namespace std
+
+// future.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ using std::future_errc;
+ using std::future_status;
+ using std::launch;
+
+ // launch is a bitmask type.
+ // [bitmask.types] specified operators
+ using std::operator&;
+ using std::operator&=;
+ using std::operator^;
+ using std::operator^=;
+ using std::operator|;
+ using std::operator|=;
+ using std::operator~;
+
+ // [futures.errors], error handling
+ using std::is_error_code_enum;
+ using std::make_error_code;
+ using std::make_error_condition;
+
+ using std::future_category;
+
+ // [futures.future.error], class future_error
+ using std::future_error;
+
+ // [futures.promise], class template promise
+ using std::promise;
+
+ using std::swap;
+
+ using std::uses_allocator;
+
+ // [futures.unique.future], class template future
+ using std::future;
+
+ // [futures.shared.future], class template shared_future
+ using std::shared_future;
+
+ // [futures.task], class template packaged_task
+ using std::packaged_task;
+
+ // [futures.async], function template async
+ using std::async;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// generator.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ using std::generator;
+#endif
+} // namespace std
+
+// hazard_pointer.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+# if _LIBCPP_STD_VER >= 23
+ // 4.1.3, class template hazard_pointer_obj_base
+ using std::hazard_pointer_obj_base;
+ // 4.1.4, class hazard_pointer
+ using std::hazard_pointer;
+ // 4.1.5, Construct non-empty hazard_pointer
+ using std::make_hazard_pointer;
+ // 4.1.6, Hazard pointer swap
+ using std::swap;
+# endif // _LIBCPP_STD_VER >= 23
+#endif
+} // namespace std
+
+// initializer_list.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::initializer_list;
+
+ // [support.initlist.range], initializer list range access
+ using std::begin;
+ using std::end;
+} // namespace std
+
+// iomanip.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::get_money;
+ using std::get_time;
+ using std::put_money;
+ using std::put_time;
+ using std::resetiosflags;
+ using std::setbase;
+ using std::setfill;
+ using std::setiosflags;
+ using std::setprecision;
+ using std::setw;
+
+ using std::quoted;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// ios.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::fpos;
+ // based on [tab:fpos.operations]
+ using std::operator!=; // Note not affected by P1614, seems like a bug.
+ using std::operator-;
+ using std::operator==;
+
+ using std::streamoff;
+ using std::streamsize;
+
+ using std::basic_ios;
+ using std::ios_base;
+
+ // [std.ios.manip], manipulators
+ using std::boolalpha;
+ using std::noboolalpha;
+
+ using std::noshowbase;
+ using std::showbase;
+
+ using std::noshowpoint;
+ using std::showpoint;
+
+ using std::noshowpos;
+ using std::showpos;
+
+ using std::noskipws;
+ using std::skipws;
+
+ using std::nouppercase;
+ using std::uppercase;
+
+ using std::nounitbuf;
+ using std::unitbuf;
+
+ // [adjustfield.manip], adjustfield
+ using std::internal;
+ using std::left;
+ using std::right;
+
+ // [basefield.manip], basefield
+ using std::dec;
+ using std::hex;
+ using std::oct;
+
+ // [floatfield.manip], floatfield
+ using std::defaultfloat;
+ using std::fixed;
+ using std::hexfloat;
+ using std::scientific;
+
+ // [error.reporting], error reporting
+ using std::io_errc;
+
+ using std::iostream_category;
+ using std::is_error_code_enum;
+ using std::make_error_code;
+ using std::make_error_condition;
+
+ // [iosfwd.syn]
+ using std::ios;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wios;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// iosfwd.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::streampos;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstreampos;
+#endif
+ using std::u16streampos;
+ using std::u32streampos;
+ using std::u8streampos;
+
+#ifdef _LIBCPP_HAS_YES_SYNCSTREAM
+ using std::basic_osyncstream;
+ using std::basic_syncbuf;
+#endif
+
+ using std::istreambuf_iterator;
+ using std::ostreambuf_iterator;
+
+#ifdef _LIBCPP_HAS_YES_SYNCSTREAM
+ using std::osyncstream;
+ using std::syncbuf;
+#endif
+
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+#ifdef _LIBCPP_HAS_YES_SYNCSTREAM
+ using std::wosyncstream;
+ using std::wsyncbuf;
+#endif
+#endif
+
+ using std::fpos;
+} // namespace std
+
+// iostream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::cerr;
+ using std::cin;
+ using std::clog;
+ using std::cout;
+
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcerr;
+ using std::wcin;
+ using std::wclog;
+ using std::wcout;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// istream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_istream;
+
+ using std::istream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wistream;
+# endif
+
+ using std::basic_iostream;
+
+ using std::iostream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wiostream;
+# endif
+
+ using std::ws;
+
+ using std::operator>>;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// iterator.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [iterator.assoc.types], associated types
+ // [incrementable.traits], incrementable traits
+ using std::incrementable_traits;
+ using std::iter_difference_t;
+
+ using std::indirectly_readable_traits;
+ using std::iter_value_t;
+
+ // [iterator.traits], iterator traits
+ using std::iterator_traits;
+
+ using std::iter_reference_t;
+
+ namespace ranges {
+ // [iterator.cust], customization point objects
+ inline namespace __cpo {
+ // [iterator.cust.move], ranges::iter_move
+ using std::ranges::__cpo::iter_move;
+
+ // [iterator.cust.swap], ranges::iter_swap
+ using std::ranges::__cpo::iter_swap;
+ } // namespace __cpo
+ } // namespace ranges
+
+ using std::iter_rvalue_reference_t;
+
+ // [iterator.concepts], iterator concepts
+ // [iterator.concept.readable], concept indirectly_readable
+ using std::indirectly_readable;
+
+ using std::iter_common_reference_t;
+
+ // [iterator.concept.writable], concept indirectly_writable
+ using std::indirectly_writable;
+
+ // [iterator.concept.winc], concept weakly_incrementable
+ using std::weakly_incrementable;
+
+ // [iterator.concept.inc], concept incrementable
+ using std::incrementable;
+
+ // [iterator.concept.iterator], concept input_or_output_iterator
+ using std::input_or_output_iterator;
+
+ // [iterator.concept.sentinel], concept sentinel_for
+ using std::sentinel_for;
+
+ // [iterator.concept.sizedsentinel], concept sized_sentinel_for
+ using std::disable_sized_sentinel_for;
+
+ using std::sized_sentinel_for;
+
+ // [iterator.concept.input], concept input_iterator
+ using std::input_iterator;
+
+ // [iterator.concept.output], concept output_iterator
+ using std::output_iterator;
+
+ // [iterator.concept.forward], concept forward_iterator
+ using std::forward_iterator;
+
+ // [iterator.concept.bidir], concept bidirectional_iterator
+ using std::bidirectional_iterator;
+
+ // [iterator.concept.random.access], concept random_access_iterator
+ using std::random_access_iterator;
+
+ // [iterator.concept.contiguous], concept contiguous_iterator
+ using std::contiguous_iterator;
+
+ // [indirectcallable], indirect callable requirements
+ // [indirectcallable.indirectinvocable], indirect callables
+ using std::indirectly_unary_invocable;
+
+ using std::indirectly_regular_unary_invocable;
+
+ using std::indirect_unary_predicate;
+
+ using std::indirect_binary_predicate;
+
+ using std::indirect_equivalence_relation;
+
+ using std::indirect_strict_weak_order;
+
+ using std::indirect_result_t;
+
+ // [projected], projected
+ using std::projected;
+
+ // [alg.req], common algorithm requirements
+ // [alg.req.ind.move], concept indirectly_movable
+ using std::indirectly_movable;
+
+ using std::indirectly_movable_storable;
+
+ // [alg.req.ind.copy], concept indirectly_copyable
+ using std::indirectly_copyable;
+
+ using std::indirectly_copyable_storable;
+
+ // [alg.req.ind.swap], concept indirectly_swappable
+ using std::indirectly_swappable;
+
+ // [alg.req.ind.cmp], concept indirectly_comparable
+ using std::indirectly_comparable;
+
+ // [alg.req.permutable], concept permutable
+ using std::permutable;
+
+ // [alg.req.mergeable], concept mergeable
+ using std::mergeable;
+
+ // [alg.req.sortable], concept sortable
+ using std::sortable;
+
+ // [iterator.primitives], primitives
+ // [std.iterator.tags], iterator tags
+ using std::bidirectional_iterator_tag;
+ using std::contiguous_iterator_tag;
+ using std::forward_iterator_tag;
+ using std::input_iterator_tag;
+ using std::output_iterator_tag;
+ using std::random_access_iterator_tag;
+
+ // [iterator.operations], iterator operations
+ using std::advance;
+ using std::distance;
+ using std::next;
+ using std::prev;
+
+ // [range.iter.ops], range iterator operations
+ namespace ranges {
+ // [range.iter.op.advance], ranges​::​advance
+ using std::ranges::advance;
+
+ // [range.iter.op.distance], ranges​::​distance
+ using std::ranges::distance;
+
+ // [range.iter.op.next], ranges​::​next
+ using std::ranges::next;
+
+ // [range.iter.op.prev], ranges​::​prev
+ using std::ranges::prev;
+ } // namespace ranges
+
+ // [predef.iterators], predefined iterators and sentinels
+ // [reverse.iterators], reverse iterators
+ using std::reverse_iterator;
+
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ using std::operator-;
+ using std::operator+;
+
+ using std::make_reverse_iterator;
+
+ // using std::disable_sized_sentinel_for;
+
+ // [insert.iterators], insert iterators
+ using std::back_insert_iterator;
+ using std::back_inserter;
+
+ using std::front_insert_iterator;
+ using std::front_inserter;
+
+ using std::insert_iterator;
+ using std::inserter;
+
+ // [const.iterators], constant iterators and sentinels
+ // [const.iterators.alias], alias templates
+ // using std::const_iterator;
+ // using std::const_sentinel;
+ // using std::iter_const_reference_t;
+
+ // [const.iterators.iterator], class template basic_const_iterator
+ // using std::basic_const_iterator;
+
+ // using std::common_type;
+
+ // using std::make_const_iterator;
+
+ // [move.iterators], move iterators and sentinels
+ using std::move_iterator;
+
+ using std::make_move_iterator;
+
+ using std::move_sentinel;
+
+ using std::common_iterator;
+
+ // [default.sentinel], default sentinel
+ using std::default_sentinel;
+ using std::default_sentinel_t;
+
+ // [iterators.counted], counted iterators
+ using std::counted_iterator;
+
+ // [unreachable.sentinel], unreachable sentinel
+ using std::unreachable_sentinel;
+ using std::unreachable_sentinel_t;
+
+ // [stream.iterators], stream iterators
+ using std::istream_iterator;
+
+ using std::ostream_iterator;
+
+ using std::istreambuf_iterator;
+ using std::ostreambuf_iterator;
+
+ // [iterator.range], range access
+ using std::begin;
+ using std::cbegin;
+ using std::cend;
+ using std::crbegin;
+ using std::crend;
+ using std::end;
+ using std::rbegin;
+ using std::rend;
+
+ using std::empty;
+ using std::size;
+ using std::ssize;
+
+ using std::data;
+
+ // [depr.iterator]
+ using std::iterator;
+} // namespace std
+
+// latch.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ using std::latch;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// limits.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [fp.style], floating-point type properties
+ using std::float_denorm_style;
+ using std::float_round_style;
+
+ // [numeric.limits], class template numeric_­limits
+ using std::numeric_limits;
+} // namespace std
+
+// list.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [list], class template list
+ using std::list;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [list.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::list;
+ }
+} // namespace std
+
+// locale.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ // [locale], locale
+ using std::has_facet;
+ using std::locale;
+ using std::use_facet;
+
+ // [locale.convenience], convenience interfaces
+ using std::isalnum;
+ using std::isalpha;
+ using std::isblank;
+ using std::iscntrl;
+ using std::isdigit;
+ using std::isgraph;
+ using std::islower;
+ using std::isprint;
+ using std::ispunct;
+ using std::isspace;
+ using std::isupper;
+ using std::isxdigit;
+ using std::tolower;
+ using std::toupper;
+
+ // [category.ctype], ctype
+ using std::codecvt;
+ using std::codecvt_base;
+ using std::codecvt_byname;
+ using std::ctype;
+ using std::ctype_base;
+ using std::ctype_byname;
+
+ // [category.numeric], numeric
+ using std::num_get;
+ using std::num_put;
+ using std::numpunct;
+ using std::numpunct_byname;
+
+ // [category.collate], collation
+ using std::collate;
+ using std::collate_byname;
+
+ // [category.time], date and time
+ using std::time_base;
+ using std::time_get;
+ using std::time_get_byname;
+ using std::time_put;
+ using std::time_put_byname;
+
+ // [category.monetary], money
+ using std::money_base;
+ using std::money_get;
+ using std::money_put;
+ using std::moneypunct;
+ using std::moneypunct_byname;
+
+ // [category.messages], message retrieval
+ using std::messages;
+ using std::messages_base;
+ using std::messages_byname;
+
+ // [depr.conversions.buffer]
+ using std::wbuffer_convert;
+
+ // [depr.conversions.string]
+ using std::wstring_convert;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// map.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [map], class template map
+ using std::map;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [map.erasure], erasure for map
+ using std::erase_if;
+
+ // [multimap], class template multimap
+ using std::multimap;
+
+ namespace pmr {
+ using std::pmr::map;
+ using std::pmr::multimap;
+ } // namespace pmr
+} // namespace std
+
+// mdspan.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if _LIBCPP_STD_VER >= 23
+ // [mdspan.extents], class template extents
+ using std::extents;
+
+ // [mdspan.extents.dextents], alias template dextents
+ using std::dextents;
+
+ // [mdspan.layout], layout mapping
+ using std::layout_left;
+ using std::layout_right;
+#if _LIBCPP_VERSION >= 180000
+ using std::layout_stride;
+#endif
+
+ // [mdspan.accessor.default], class template default_accessor
+ using std::default_accessor;
+
+ // [mdspan.mdspan], class template mdspan
+ using std::mdspan;
+#endif // _LIBCPP_STD_VER >= 23
+} // namespace std
+
+// memory.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [pointer.traits], pointer traits
+ using std::pointer_traits;
+
+ // [pointer.conversion], pointer conversion
+ using std::to_address;
+
+ // [ptr.align], pointer alignment
+ using std::align;
+ using std::assume_aligned;
+
+ // [obj.lifetime], explicit lifetime management
+ // using std::start_lifetime_as;
+ // using std::start_lifetime_as_array;
+
+ // [allocator.tag], allocator argument tag
+ using std::allocator_arg;
+ using std::allocator_arg_t;
+
+ // [allocator.uses], uses_allocator
+ using std::uses_allocator;
+
+ // [allocator.uses.trait], uses_allocator
+ using std::uses_allocator_v;
+
+ // [allocator.uses.construction], uses-allocator construction
+ using std::uses_allocator_construction_args;
+
+ using std::make_obj_using_allocator;
+ using std::uninitialized_construct_using_allocator;
+
+ // [allocator.traits], allocator traits
+ using std::allocator_traits;
+
+#if _LIBCPP_STD_VER >= 23
+ using std::allocation_result;
+
+ using std::allocate_at_least;
+#endif
+
+ // [default.allocator], the default allocator
+ using std::allocator;
+ using std::operator==;
+
+ // [specialized.addressof], addressof
+ using std::addressof;
+
+ // [specialized.algorithms], specialized algorithms
+ // [special.mem.concepts], special memory concepts
+
+ using std::uninitialized_default_construct;
+ using std::uninitialized_default_construct_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_default_construct;
+ using std::ranges::uninitialized_default_construct_n;
+ } // namespace ranges
+
+ using std::uninitialized_value_construct;
+ using std::uninitialized_value_construct_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_value_construct;
+ using std::ranges::uninitialized_value_construct_n;
+ } // namespace ranges
+
+ using std::uninitialized_copy;
+ using std::uninitialized_copy_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_copy;
+ using std::ranges::uninitialized_copy_result;
+
+ using std::ranges::uninitialized_copy_n;
+ using std::ranges::uninitialized_copy_n_result;
+ } // namespace ranges
+
+ using std::uninitialized_move;
+ using std::uninitialized_move_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_move;
+ using std::ranges::uninitialized_move_result;
+
+ using std::ranges::uninitialized_move_n;
+ using std::ranges::uninitialized_move_n_result;
+ } // namespace ranges
+
+ using std::uninitialized_fill;
+ using std::uninitialized_fill_n;
+
+ namespace ranges {
+ using std::ranges::uninitialized_fill;
+ using std::ranges::uninitialized_fill_n;
+ } // namespace ranges
+
+ // [specialized.construct], construct_at
+ using std::construct_at;
+
+ namespace ranges {
+ using std::ranges::construct_at;
+ }
+ // [specialized.destroy], destroy
+ using std::destroy;
+ using std::destroy_at;
+ using std::destroy_n;
+
+ namespace ranges {
+ using std::ranges::destroy;
+ using std::ranges::destroy_at;
+ using std::ranges::destroy_n;
+ } // namespace ranges
+
+ // [unique.ptr], class template unique_ptr
+ using std::default_delete;
+ using std::unique_ptr;
+
+ using std::make_unique;
+ using std::make_unique_for_overwrite;
+
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ using std::operator<<;
+
+ // [util.smartptr.weak.bad], class bad_weak_ptr
+ using std::bad_weak_ptr;
+
+ // [util.smartptr.shared], class template shared_ptr
+ using std::shared_ptr;
+
+ // [util.smartptr.shared.create], shared_ptr creation
+ using std::allocate_shared;
+ using std::allocate_shared_for_overwrite;
+ using std::make_shared;
+ using std::make_shared_for_overwrite;
+
+ // [util.smartptr.shared.spec], shared_ptr specialized algorithms
+ using std::swap;
+
+ // [util.smartptr.shared.cast], shared_ptr casts
+ using std::const_pointer_cast;
+ using std::dynamic_pointer_cast;
+ using std::reinterpret_pointer_cast;
+ using std::static_pointer_cast;
+
+ using std::get_deleter;
+
+ // [util.smartptr.shared.io], shared_ptr I/O
+
+ // [util.smartptr.weak], class template weak_ptr
+ using std::weak_ptr;
+
+ // [util.smartptr.weak.spec], weak_ptr specialized algorithms
+
+ // [util.smartptr.ownerless], class template owner_less
+ using std::owner_less;
+
+ // [util.smartptr.enab], class template enable_shared_from_this
+ using std::enable_shared_from_this;
+
+ // [util.smartptr.hash], hash support
+ using std::hash;
+
+ // [util.smartptr.atomic], atomic smart pointers
+ // using std::atomic;
+
+ // [out.ptr.t], class template out_ptr_t
+ // using std::out_ptr_t;
+
+ // [out.ptr], function template out_ptr
+ // using std::out_ptr;
+
+ // [inout.ptr.t], class template inout_ptr_t
+ // using std::inout_ptr_t;
+
+ // [inout.ptr], function template inout_ptr
+ // using std::inout_ptr;
+
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [depr.util.smartptr.shared.atomic]
+ using std::atomic_is_lock_free;
+
+ using std::atomic_load;
+ using std::atomic_load_explicit;
+
+ using std::atomic_store;
+ using std::atomic_store_explicit;
+
+ using std::atomic_exchange;
+ using std::atomic_exchange_explicit;
+
+ using std::atomic_compare_exchange_strong;
+ using std::atomic_compare_exchange_strong_explicit;
+ using std::atomic_compare_exchange_weak;
+ using std::atomic_compare_exchange_weak_explicit;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// memory_resource.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std::pmr {
+ // [mem.res.class], class memory_resource
+ using std::pmr::memory_resource;
+
+ using std::pmr::operator==;
+
+ // [mem.poly.allocator.class], class template polymorphic_allocator
+ using std::pmr::polymorphic_allocator;
+
+ // [mem.res.global], global memory resources
+ using std::pmr::get_default_resource;
+ using std::pmr::new_delete_resource;
+ using std::pmr::null_memory_resource;
+ using std::pmr::set_default_resource;
+
+ // [mem.res.pool], pool resource classes
+ using std::pmr::monotonic_buffer_resource;
+ using std::pmr::pool_options;
+ using std::pmr::synchronized_pool_resource;
+ using std::pmr::unsynchronized_pool_resource;
+} // namespace std::pmr
+
+// mutex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.mutex.class], class mutex
+ using std::mutex;
+ // [thread.mutex.recursive], class recursive_mutex
+ using std::recursive_mutex;
+ // [thread.timedmutex.class] class timed_mutex
+ using std::timed_mutex;
+ // [thread.timedmutex.recursive], class recursive_timed_mutex
+ using std::recursive_timed_mutex;
+
+ using std::adopt_lock_t;
+ using std::defer_lock_t;
+ using std::try_to_lock_t;
+
+ using std::adopt_lock;
+ using std::defer_lock;
+ using std::try_to_lock;
+
+ // [thread.lock], locks
+ using std::lock_guard;
+ using std::scoped_lock;
+ using std::unique_lock;
+
+ using std::swap;
+
+ // [thread.lock.algorithm], generic locking algorithms
+ using std::lock;
+ using std::try_lock;
+#endif // _LIBCPP_HAS_NO_THREADS
+
+ using std::once_flag;
+
+ using std::call_once;
+} // namespace std
+
+// new.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [alloc.errors], storage allocation errors
+ using std::bad_alloc;
+ using std::bad_array_new_length;
+
+ using std::destroying_delete;
+ using std::destroying_delete_t;
+
+ // global operator new control
+ using std::align_val_t;
+
+ using std::nothrow;
+ using std::nothrow_t;
+
+ using std::get_new_handler;
+ using std::new_handler;
+ using std::set_new_handler;
+
+ // [ptr.launder], pointer optimization barrier
+ using std::launder;
+#if 0
+ // [hardware.interference], hardware interference size
+ using std::hardware_constructive_interference_size;
+ using std::hardware_destructive_interference_size;
+#endif
+} // namespace std
+
+export {
+ using ::operator new;
+ using ::operator delete;
+ using ::operator new[];
+ using ::operator delete[];
+} // export
+
+// numbers.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std::numbers {
+ using std::numbers::e_v;
+ using std::numbers::egamma_v;
+ using std::numbers::inv_pi_v;
+ using std::numbers::inv_sqrt3_v;
+ using std::numbers::inv_sqrtpi_v;
+ using std::numbers::ln10_v;
+ using std::numbers::ln2_v;
+ using std::numbers::log10e_v;
+ using std::numbers::log2e_v;
+ using std::numbers::phi_v;
+ using std::numbers::pi_v;
+ using std::numbers::sqrt2_v;
+ using std::numbers::sqrt3_v;
+
+ using std::numbers::e;
+ using std::numbers::egamma;
+ using std::numbers::inv_pi;
+ using std::numbers::inv_sqrt3;
+ using std::numbers::inv_sqrtpi;
+ using std::numbers::ln10;
+ using std::numbers::ln2;
+ using std::numbers::log10e;
+ using std::numbers::log2e;
+ using std::numbers::phi;
+ using std::numbers::pi;
+ using std::numbers::sqrt2;
+ using std::numbers::sqrt3;
+} // namespace std::numbers
+
+// numeric.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [accumulate], accumulate
+ using std::accumulate;
+
+ // [reduce], reduce
+ using std::reduce;
+
+ // [inner.product], inner product
+ using std::inner_product;
+
+ // [transform.reduce], transform reduce
+ using std::transform_reduce;
+
+ // [partial.sum], partial sum
+ using std::partial_sum;
+
+ // [exclusive.scan], exclusive scan
+ using std::exclusive_scan;
+
+ // [inclusive.scan], inclusive scan
+ using std::inclusive_scan;
+
+ // [transform.exclusive.scan], transform exclusive scan
+ using std::transform_exclusive_scan;
+
+ // [transform.inclusive.scan], transform inclusive scan
+ using std::transform_inclusive_scan;
+
+ // [adjacent.difference], adjacent difference
+ using std::adjacent_difference;
+
+ // [numeric.iota], iota
+ using std::iota;
+
+ namespace ranges {
+ // using std::ranges::iota_result;
+ // using std::ranges::iota;
+ } // namespace ranges
+
+ // [numeric.ops.gcd], greatest common divisor
+ using std::gcd;
+
+ // [numeric.ops.lcm], least common multiple
+ using std::lcm;
+
+ // [numeric.ops.midpoint], midpoint
+ using std::midpoint;
+} // namespace std
+
+// optional.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [optional.optional], class template optional
+ using std::optional;
+
+ // [optional.nullopt], no-value state indicator
+ using std::nullopt;
+ using std::nullopt_t;
+
+ // [optional.bad.access], class bad_optional_access
+ using std::bad_optional_access;
+
+ // [optional.relops], relational operators
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ // [optional.specalg], specialized algorithms
+ using std::swap;
+
+ using std::make_optional;
+
+ // [optional.hash], hash support
+ using std::hash;
+} // namespace std
+
+// ostream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_ostream;
+
+ using std::ostream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wostream;
+# endif
+
+ using std::endl;
+ using std::ends;
+ using std::flush;
+
+# if 0
+ using std::emit_on_flush;
+ using std::flush_emit;
+ using std::noemit_on_flush;
+# endif
+ using std::operator<<;
+
+# if 0
+ // [ostream.formatted.print], print functions
+ using std::print;
+ using std::println;
+
+ using std::vprint_nonunicode;
+ using std::vprint_unicode;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// print.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if _LIBCPP_STD_VER >= 23
+ // [print.fun], print functions
+ using std::print;
+ using std::println;
+
+ using std::vprint_nonunicode;
+# ifndef _LIBCPP_HAS_NO_UNICODE
+ using std::vprint_unicode;
+# endif // _LIBCPP_HAS_NO_UNICODE
+#endif // _LIBCPP_STD_VER >= 23
+} // namespace std
+
+// queue.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [queue], class template queue
+ using std::queue;
+
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ using std::swap;
+ using std::uses_allocator;
+
+ // [priority.queue], class template priority_queue
+ using std::priority_queue;
+} // namespace std
+
+// random.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [rand.req.urng], uniform random bit generator requirements
+ using std::uniform_random_bit_generator;
+
+ // [rand.eng.lcong], class template linear_congruential_engine
+ using std::linear_congruential_engine;
+
+ // [rand.eng.mers], class template mersenne_twister_engine
+ using std::mersenne_twister_engine;
+
+ // [rand.eng.sub], class template subtract_with_carry_engine
+ using std::subtract_with_carry_engine;
+
+ // [rand.adapt.disc], class template discard_block_engine
+ using std::discard_block_engine;
+
+ // [rand.adapt.ibits], class template independent_bits_engine
+ using std::independent_bits_engine;
+
+ // [rand.adapt.shuf], class template shuffle_order_engine
+ using std::shuffle_order_engine;
+
+ // [rand.predef], engines and engine adaptors with predefined parameters
+ using std::knuth_b;
+ using std::minstd_rand;
+ using std::minstd_rand0;
+ using std::mt19937;
+ using std::mt19937_64;
+ using std::ranlux24;
+ using std::ranlux24_base;
+ using std::ranlux48;
+ using std::ranlux48_base;
+
+ using std::default_random_engine;
+
+#ifndef _LIBCPP_HAS_NO_RANDOM_DEVICE
+ // [rand.device], class random_device
+ using std::random_device;
+#endif
+
+ // [rand.util.seedseq], class seed_seq
+ using std::seed_seq;
+
+ // [rand.util.canonical], function template generate_canonical
+ using std::generate_canonical;
+
+ // [rand.dist.uni.int], class template uniform_int_distribution
+ using std::uniform_int_distribution;
+
+ // [rand.dist.uni.real], class template uniform_real_distribution
+ using std::uniform_real_distribution;
+
+ // [rand.dist.bern.bernoulli], class bernoulli_distribution
+ using std::bernoulli_distribution;
+
+ // [rand.dist.bern.bin], class template binomial_distribution
+ using std::binomial_distribution;
+
+ // [rand.dist.bern.geo], class template geometric_distribution
+ using std::geometric_distribution;
+
+ // [rand.dist.bern.negbin], class template negative_binomial_distribution
+ using std::negative_binomial_distribution;
+
+ // [rand.dist.pois.poisson], class template poisson_distribution
+ using std::poisson_distribution;
+
+ // [rand.dist.pois.exp], class template exponential_distribution
+ using std::exponential_distribution;
+
+ // [rand.dist.pois.gamma], class template gamma_distribution
+ using std::gamma_distribution;
+
+ // [rand.dist.pois.weibull], class template weibull_distribution
+ using std::weibull_distribution;
+
+ // [rand.dist.pois.extreme], class template extreme_value_distribution
+ using std::extreme_value_distribution;
+
+ // [rand.dist.norm.normal], class template normal_distribution
+ using std::normal_distribution;
+
+ // [rand.dist.norm.lognormal], class template lognormal_distribution
+ using std::lognormal_distribution;
+
+ // [rand.dist.norm.chisq], class template chi_squared_distribution
+ using std::chi_squared_distribution;
+
+ // [rand.dist.norm.cauchy], class template cauchy_distribution
+ using std::cauchy_distribution;
+
+ // [rand.dist.norm.f], class template fisher_f_distribution
+ using std::fisher_f_distribution;
+
+ // [rand.dist.norm.t], class template student_t_distribution
+ using std::student_t_distribution;
+
+ // [rand.dist.samp.discrete], class template discrete_distribution
+ using std::discrete_distribution;
+
+ // [rand.dist.samp.pconst], class template piecewise_constant_distribution
+ using std::piecewise_constant_distribution;
+
+ // [rand.dist.samp.plinear], class template piecewise_linear_distribution
+ using std::piecewise_linear_distribution;
+} // namespace std
+
+// ranges.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ namespace ranges {
+ inline namespace __cpo {
+ // [range.access], range access
+ using std::ranges::__cpo::begin;
+ using std::ranges::__cpo::cbegin;
+ using std::ranges::__cpo::cend;
+ using std::ranges::__cpo::crbegin;
+ using std::ranges::__cpo::crend;
+ using std::ranges::__cpo::end;
+ using std::ranges::__cpo::rbegin;
+ using std::ranges::__cpo::rend;
+
+ using std::ranges::__cpo::cdata;
+ using std::ranges::__cpo::data;
+ using std::ranges::__cpo::empty;
+ using std::ranges::__cpo::size;
+ using std::ranges::__cpo::ssize;
+ } // namespace __cpo
+
+ // [range.range], ranges
+ using std::ranges::range;
+
+ using std::ranges::enable_borrowed_range;
+
+ using std::ranges::borrowed_range;
+
+ // using std::ranges::const_iterator_t;
+ // using std::ranges::const_sentinel_t;
+ using std::ranges::iterator_t;
+ // using std::ranges::range_const_reference_t;
+ using std::ranges::range_common_reference_t;
+ using std::ranges::range_difference_t;
+ using std::ranges::range_reference_t;
+ using std::ranges::range_rvalue_reference_t;
+ using std::ranges::range_size_t;
+ using std::ranges::range_value_t;
+ using std::ranges::sentinel_t;
+
+ // [range.sized], sized ranges
+ using std::ranges::disable_sized_range;
+ using std::ranges::sized_range;
+
+ // [range.view], views
+ using std::ranges::enable_view;
+ using std::ranges::view;
+ using std::ranges::view_base;
+
+ // [range.refinements], other range refinements
+ using std::ranges::bidirectional_range;
+ using std::ranges::common_range;
+ // using std::ranges::constant_range;
+ using std::ranges::contiguous_range;
+ using std::ranges::forward_range;
+ using std::ranges::input_range;
+ using std::ranges::output_range;
+ using std::ranges::random_access_range;
+ using std::ranges::viewable_range;
+
+ // [view.interface], class template view_­interface
+ using std::ranges::view_interface;
+
+ // [range.subrange], sub-ranges
+ using std::ranges::subrange;
+ using std::ranges::subrange_kind;
+
+ using std::ranges::get;
+ } // namespace ranges
+
+ using std::ranges::get;
+
+ namespace ranges {
+
+ // [range.dangling], dangling iterator handling
+ using std::ranges::dangling;
+
+ // [range.elementsof], class template elements_­of
+ // using std::ranges::elements_of;
+
+ using std::ranges::borrowed_iterator_t;
+
+ using std::ranges::borrowed_subrange_t;
+
+#if _LIBCPP_STD_VER >= 23
+ // [range.utility.conv], range conversions
+ using std::ranges::to;
+#endif
+
+ // [range.empty], empty view
+ using std::ranges::empty_view;
+
+ namespace views {
+ using std::ranges::views::empty;
+ }
+
+ // [range.single], single view
+ using std::ranges::single_view;
+
+ namespace views {
+ using std::ranges::views::single;
+ } // namespace views
+
+ // [range.iota], iota view
+ using std::ranges::iota_view;
+
+ namespace views {
+ using std::ranges::views::iota;
+ } // namespace views
+
+#if _LIBCPP_STD_VER >= 23
+ // [range.repeat], repeat view
+ using std::ranges::repeat_view;
+
+ namespace views {
+ using std::ranges::views::repeat;
+ } // namespace views
+#endif // _LIBCPP_STD_VER >= 23
+
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ // [range.istream], istream view
+ using std::ranges::basic_istream_view;
+ using std::ranges::istream_view;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::ranges::wistream_view;
+# endif
+
+ namespace views {
+ using std::ranges::views::istream;
+ }
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+
+ // [range.adaptor.object], range adaptor objects
+ // using std::ranges::range_adaptor_closure;
+
+ // [range.all], all view
+ namespace views {
+ using std::ranges::views::all;
+ using std::ranges::views::all_t;
+ } // namespace views
+
+ // [range.ref.view], ref view
+ using std::ranges::ref_view;
+
+ // [range.owning.view], owning view
+ using std::ranges::owning_view;
+
+#if _LIBCPP_STD_VER >= 23
+ // [range.as.rvalue], as rvalue view
+ using std::ranges::as_rvalue_view;
+
+ namespace views {
+ using std::ranges::views::as_rvalue;
+ } // namespace views
+#endif // _LIBCPP_STD_VER >= 23
+
+ // [range.filter], filter view
+ using std::ranges::filter_view;
+
+ namespace views {
+ using std::ranges::views::filter;
+ } // namespace views
+
+ // [range.transform], transform view
+ using std::ranges::transform_view;
+
+ namespace views {
+ using std::ranges::views::transform;
+ } // namespace views
+
+ // [range.take], take view
+ using std::ranges::take_view;
+
+ namespace views {
+ using std::ranges::views::take;
+ } // namespace views
+
+ // [range.take.while], take while view
+ using std::ranges::take_while_view;
+
+ namespace views {
+ using std::ranges::views::take_while;
+ } // namespace views
+
+ // [range.drop], drop view
+ using std::ranges::drop_view;
+
+ namespace views {
+ using std::ranges::views::drop;
+ } // namespace views
+
+ // [range.drop.while], drop while view
+ using std::ranges::drop_while_view;
+
+ namespace views {
+ using std::ranges::views::drop_while;
+ } // namespace views
+
+#ifdef _LIBCPP_ENABLE_EXPERIMENTAL
+ using std::ranges::join_view;
+
+ namespace views {
+ using std::ranges::views::join;
+ } // namespace views
+#endif // _LIBCPP_ENABLE_EXPERIMENTAL
+#if 0
+ using std::ranges::join_with_view;
+
+ namespace views {
+ using std::ranges::views::join_with;
+ } // namespace views
+#endif
+ using std::ranges::lazy_split_view;
+
+ // [range.split], split view
+ using std::ranges::split_view;
+
+ namespace views {
+ using std::ranges::views::lazy_split;
+ using std::ranges::views::split;
+ } // namespace views
+
+ // [range.counted], counted view
+ namespace views {
+ using std::ranges::views::counted;
+ } // namespace views
+
+ // [range.common], common view
+ using std::ranges::common_view;
+
+ namespace views {
+ using std::ranges::views::common;
+ } // namespace views
+
+ // [range.reverse], reverse view
+ using std::ranges::reverse_view;
+
+ namespace views {
+ using std::ranges::views::reverse;
+ } // namespace views
+
+ // [range.as.const], as const view
+#if 0
+ using std::ranges::as_const_view;
+
+ namespace views {
+ using std::ranges::views::as_const;
+ } // namespace views
+#endif
+ // [range.elements], elements view
+ using std::ranges::elements_view;
+
+ using std::ranges::keys_view;
+ using std::ranges::values_view;
+
+ namespace views {
+ using std::ranges::views::elements;
+ using std::ranges::views::keys;
+ using std::ranges::views::values;
+ } // namespace views
+
+#if _LIBCPP_STD_VER >= 23
+ // [range.zip], zip view
+ using std::ranges::zip_view;
+
+ namespace views {
+ using std::ranges::views::zip;
+ } // namespace views
+#endif // _LIBCPP_STD_VER >= 23
+
+#if 0
+ // [range.zip.transform], zip transform view
+ using std::ranges::zip_transform_view;
+
+ namespace views {
+ using std::ranges::views::zip_transform;
+ }
+
+ using std::ranges::adjacent_view;
+
+ namespace views {
+ using std::ranges::views::adjacent;
+ using std::ranges::views::pairwise;
+ } // namespace views
+
+ using std::ranges::adjacent_transform_view;
+
+ namespace views {
+ using std::ranges::views::adjacent_transform;
+ using std::ranges::views::pairwise_transform;
+ } // namespace views
+
+ using std::ranges::chunk_view;
+
+ using std::ranges::chunk_view<V>;
+
+ namespace views {
+ using std::ranges::views::chunk;
+ }
+
+ using std::ranges::slide_view;
+
+ namespace views {
+ using std::ranges::views::slide;
+ }
+#endif
+
+#if _LIBCPP_STD_VER >= 23
+#if _LIBCPP_VERSION >= 180000
+ // [range.chunk.by], chunk by view
+ using std::ranges::chunk_by_view;
+
+ namespace views {
+ using std::ranges::views::chunk_by;
+ }
+#endif
+#endif // _LIBCPP_STD_VER >= 23
+
+#if 0
+ // [range.stride], stride view
+ using std::ranges::stride_view;
+
+ namespace views {
+ using std::ranges::views::stride;
+ }
+
+ using std::ranges::cartesian_product_view;
+
+ namespace views {
+ using std::ranges::views::cartesian_product;
+ }
+#endif
+ } // namespace ranges
+
+ namespace views = ranges::views;
+
+ using std::tuple_element;
+ using std::tuple_size;
+
+#if _LIBCPP_STD_VER >= 23
+ using std::from_range;
+ using std::from_range_t;
+#endif // _LIBCPP_STD_VER >= 23
+} // namespace std
+
+// ratio.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [ratio.ratio], class template ratio
+ using std::ratio;
+
+ // [ratio.arithmetic], ratio arithmetic
+ using std::ratio_add;
+ using std::ratio_divide;
+ using std::ratio_multiply;
+ using std::ratio_subtract;
+
+ // [ratio.comparison], ratio comparison
+ using std::ratio_equal;
+ using std::ratio_greater;
+ using std::ratio_greater_equal;
+ using std::ratio_less;
+ using std::ratio_less_equal;
+ using std::ratio_not_equal;
+
+ using std::ratio_equal_v;
+ using std::ratio_greater_equal_v;
+ using std::ratio_greater_v;
+ using std::ratio_less_equal_v;
+ using std::ratio_less_v;
+ using std::ratio_not_equal_v;
+
+ // [ratio.si], convenience SI typedefs
+ using std::atto;
+ using std::centi;
+ using std::deca;
+ using std::deci;
+ using std::exa;
+ using std::femto;
+ using std::giga;
+ using std::hecto;
+ using std::kilo;
+ using std::mega;
+ using std::micro;
+ using std::milli;
+ using std::nano;
+ using std::peta;
+ using std::pico;
+ using std::tera;
+
+ // These are not supported by libc++, due to the range of intmax_t
+ // using std::yocto;
+ // using std::yotta;
+ // using std::zepto;
+ // using std::zetta
+} // namespace std
+
+// rcu.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+# if _LIBCPP_STD_VER >= 23
+ // 2.2.3, class template rcu_obj_base using std::rcu_obj_base;
+ // 2.2.4, class rcu_domain
+ using std::rcu_domain;
+ using std::rcu_default_domain();
+ using std::rcu_barrier;
+ using std::rcu_retire;
+ using std::rcu_synchronize;
+# endif // _LIBCPP_STD_VER >= 23
+#endif
+} // namespace std
+
+// regex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ // [re.const], regex constants
+ namespace regex_constants {
+ using std::regex_constants::error_type;
+ using std::regex_constants::match_flag_type;
+ using std::regex_constants::syntax_option_type;
+
+ // regex_constants is a bitmask type.
+ // [bitmask.types] specified operators
+ using std::regex_constants::operator&;
+ using std::regex_constants::operator&=;
+ using std::regex_constants::operator^;
+ using std::regex_constants::operator^=;
+ using std::regex_constants::operator|;
+ using std::regex_constants::operator|=;
+ using std::regex_constants::operator~;
+
+ } // namespace regex_constants
+
+ // [re.badexp], class regex_error
+ using std::regex_error;
+
+ // [re.traits], class template regex_traits
+ using std::regex_traits;
+
+ // [re.regex], class template basic_regex
+ using std::basic_regex;
+
+ using std::regex;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wregex;
+# endif
+
+ // [re.regex.swap], basic_regex swap
+ using std::swap;
+
+ // [re.submatch], class template sub_match
+ using std::sub_match;
+
+ using std::csub_match;
+ using std::ssub_match;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcsub_match;
+ using std::wssub_match;
+# endif
+
+ // [re.submatch.op], sub_match non-member operators
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::operator<<;
+
+ // [re.results], class template match_results
+ using std::match_results;
+
+ using std::cmatch;
+ using std::smatch;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcmatch;
+ using std::wsmatch;
+# endif
+
+ // match_results comparisons
+
+ // [re.results.swap], match_results swap
+
+ // [re.alg.match], function template regex_match
+ using std::regex_match;
+
+ // [re.alg.search], function template regex_search
+ using std::regex_search;
+
+ // [re.alg.replace], function template regex_replace
+ using std::regex_replace;
+
+ // [re.regiter], class template regex_iterator
+ using std::regex_iterator;
+
+ using std::cregex_iterator;
+ using std::sregex_iterator;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcregex_iterator;
+ using std::wsregex_iterator;
+# endif
+
+ // [re.tokiter], class template regex_token_iterator
+ using std::regex_token_iterator;
+
+ using std::cregex_token_iterator;
+ using std::sregex_token_iterator;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wcregex_token_iterator;
+ using std::wsregex_token_iterator;
+# endif
+
+ namespace pmr {
+ using std::pmr::match_results;
+
+ using std::pmr::cmatch;
+ using std::pmr::smatch;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::pmr::wcmatch;
+ using std::pmr::wsmatch;
+# endif
+ } // namespace pmr
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// scoped_allocator.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // class template scoped_allocator_adaptor
+ using std::scoped_allocator_adaptor;
+
+ // [scoped.adaptor.operators], scoped allocator operators
+ using std::operator==;
+
+} // namespace std
+
+// semaphore.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.sema.cnt], class template counting_semaphore
+ using std::counting_semaphore;
+
+ using std::binary_semaphore;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// set.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [set], class template set
+ using std::set;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [set.erasure], erasure for set
+ using std::erase_if;
+
+ // [multiset], class template multiset
+ using std::multiset;
+
+ namespace pmr {
+ using std::pmr::multiset;
+ using std::pmr::set;
+ } // namespace pmr
+} // namespace std
+
+// shared_mutex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.sharedmutex.class], class shared_­mutex
+ using std::shared_mutex;
+ // [thread.sharedtimedmutex.class], class shared_­timed_­mutex
+ using std::shared_timed_mutex;
+ // [thread.lock.shared], class template shared_­lock
+ using std::shared_lock;
+ using std::swap;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// source_location.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::source_location;
+} // namespace std
+
+// span.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // constants
+ using std::dynamic_extent;
+
+ // [views.span], class template span
+ using std::span;
+
+ namespace ranges {
+ using std::ranges::enable_borrowed_range;
+ using std::ranges::enable_view;
+ } // namespace ranges
+
+ // [span.objectrep], views of object representation
+ using std::as_bytes;
+
+ using std::as_writable_bytes;
+} // namespace std
+
+// spanstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ using std::basic_spanbuf;
+
+ using std::swap;
+
+ using std::spanbuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wspanbuf;
+# endif
+
+ using std::basic_ispanstream;
+
+ using std::ispanstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wispanstream;
+# endif
+
+ using std::basic_ospanstream;
+
+ using std::ospanstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wospanstream;
+# endif
+
+ using std::basic_spanstream;
+
+ using std::spanstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wspanstream;
+# endif
+#endif
+} // namespace std
+
+// sstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_stringbuf;
+
+ using std::swap;
+
+ using std::stringbuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstringbuf;
+# endif
+
+ using std::basic_istringstream;
+
+ using std::istringstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wistringstream;
+# endif
+
+ using std::basic_ostringstream;
+
+ using std::ostringstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wostringstream;
+# endif
+
+ using std::basic_stringstream;
+
+ using std::stringstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstringstream;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// stack.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [stack], class template stack
+ using std::stack;
+
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ using std::swap;
+ using std::uses_allocator;
+} // namespace std
+
+// stacktrace.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+ // [stacktrace.entry], class stacktrace_­entry
+ using std::stacktrace_entry;
+
+ // [stacktrace.basic], class template basic_­stacktrace
+ using std::basic_stacktrace;
+
+ // basic_­stacktrace typedef-names
+ using std::stacktrace;
+
+ // [stacktrace.basic.nonmem], non-member functions
+ using std::swap;
+
+ using std::to_string;
+
+ using std::operator<<;
+
+ namespace pmr {
+ using std::pmr::stacktrace;
+ }
+
+ // [stacktrace.basic.hash], hash support
+ using std::hash;
+#endif
+} // namespace std
+
+// stdexcept.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::domain_error;
+ using std::invalid_argument;
+ using std::length_error;
+ using std::logic_error;
+ using std::out_of_range;
+ using std::overflow_error;
+ using std::range_error;
+ using std::runtime_error;
+ using std::underflow_error;
+} // namespace std
+
+// stdfloat.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if defined(__STDCPP_FLOAT16_T__)
+ using std::float16_t;
+#endif
+#if defined(__STDCPP_FLOAT32_T__)
+ using std::float32_t;
+#endif
+#if defined(__STDCPP_FLOAT64_T__)
+ using std::float64_t;
+#endif
+#if defined(__STDCPP_FLOAT128_T__)
+ using std::float128_t;
+#endif
+#if defined(__STDCPP_BFLOAT16_T__)
+ using std::bfloat16_t;
+#endif
+} // namespace std
+
+// stop_token.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+# ifdef _LIBCPP_ENABLE_EXPERIMENTAL
+ // [stoptoken], class stop_­token
+ using std::stop_token;
+
+ // [stopsource], class stop_­source
+ using std::stop_source;
+
+ // no-shared-stop-state indicator
+ using std::nostopstate;
+ using std::nostopstate_t;
+
+ // [stopcallback], class template stop_­callback
+ using std::stop_callback;
+# endif // _LIBCPP_ENABLE_EXPERIMENTAL
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// streambuf.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::basic_streambuf;
+ using std::streambuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstreambuf;
+# endif
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// string.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [char.traits], character traits
+ using std::char_traits;
+
+ // [basic.string], basic_string
+ using std::basic_string;
+
+ using std::operator+;
+ using std::operator==;
+ using std::operator<=>;
+
+ // [string.special], swap
+ using std::swap;
+
+ // [string.io], inserters and extractors
+ using std::operator>>;
+ using std::operator<<;
+ using std::getline;
+
+ // [string.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ // basic_string typedef-names
+ using std::string;
+ using std::u16string;
+ using std::u32string;
+ using std::u8string;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstring;
+#endif
+
+ // [string.conversions], numeric conversions
+ using std::stod;
+ using std::stof;
+ using std::stoi;
+ using std::stol;
+ using std::stold;
+ using std::stoll;
+ using std::stoul;
+ using std::stoull;
+ using std::to_string;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::to_wstring;
+#endif
+
+ namespace pmr {
+ using std::pmr::basic_string;
+ using std::pmr::string;
+ using std::pmr::u16string;
+ using std::pmr::u32string;
+ using std::pmr::u8string;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::pmr::wstring;
+#endif
+ } // namespace pmr
+
+ // [basic.string.hash], hash support
+ using std::hash;
+
+ // TODO MODULES is this a bug?
+#if _LIBCPP_STD_VER >= 23
+ using std::operator""s;
+#else
+ inline namespace literals {
+ inline namespace string_literals {
+ // [basic.string.literals], suffix for basic_string literals
+ using std::literals::string_literals::operator""s;
+ } // namespace string_literals
+ } // namespace literals
+#endif
+} // namespace std
+
+// string_view.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [string.view.template], class template basic_string_view
+ using std::basic_string_view;
+
+ namespace ranges {
+ using std::ranges::enable_borrowed_range;
+ using std::ranges::enable_view;
+ } // namespace ranges
+
+ // [string.view.comparison], non-member comparison functions
+ using std::operator==;
+ using std::operator<=>;
+
+ // [string.view.io], inserters and extractors
+ using std::operator<<;
+
+ // basic_string_view typedef-names
+ using std::string_view;
+ using std::u16string_view;
+ using std::u32string_view;
+ using std::u8string_view;
+#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wstring_view;
+#endif
+
+ // [string.view.hash], hash support
+ using std::hash;
+
+ inline namespace literals {
+ inline namespace string_view_literals {
+ // [string.view.literals], suffix for basic_string_view literals
+ using std::literals::string_view_literals::operator""sv;
+ } // namespace string_view_literals
+ } // namespace literals
+} // namespace std
+
+// strstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::istrstream;
+ using std::ostrstream;
+ using std::strstream;
+ using std::strstreambuf;
+#endif // _LIBCPP_HAS_NO_LOCALIZATION
+} // namespace std
+
+// syncstream.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifdef _LIBCPP_HAS_YES_SYNCSTREAM
+
+export namespace std {
+#if !defined(_LIBCPP_HAS_NO_LOCALIZATION) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM)
+ using std::basic_syncbuf;
+
+ // [syncstream.syncbuf.special], specialized algorithms
+ using std::swap;
+
+ using std::syncbuf;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wsyncbuf;
+# endif
+ using std::basic_osyncstream;
+
+ using std::osyncstream;
+# ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
+ using std::wosyncstream;
+# endif
+#endif // !defined(_LIBCPP_HAS_NO_LOCALIZATION) && !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM)
+} // namespace std
+
+#endif
+
+// system_error.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::error_category;
+ using std::generic_category;
+ using std::system_category;
+
+ using std::error_code;
+ using std::error_condition;
+ using std::system_error;
+
+ using std::is_error_code_enum;
+ using std::is_error_condition_enum;
+
+ using std::errc;
+
+ // [syserr.errcode.nonmembers], non-member functions
+ using std::make_error_code;
+
+ using std::operator<<;
+
+ // [syserr.errcondition.nonmembers], non-member functions
+ using std::make_error_condition;
+
+ // [syserr.compare], comparison operator functions
+ using std::operator==;
+ using std::operator<=>;
+
+ // [syserr.hash], hash support
+ using std::hash;
+
+ // [syserr], system error support
+ using std::is_error_code_enum_v;
+ using std::is_error_condition_enum_v;
+} // namespace std
+
+// text_encoding.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#if 0
+# if _LIBCPP_STD_VER >= 23
+ using std::text_encoding;
+
+ // hash support
+ using std::hash;
+# endif // _LIBCPP_STD_VER >= 23
+#endif
+} // namespace std
+
+// thread.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+#ifndef _LIBCPP_HAS_NO_THREADS
+ // [thread.thread.class], class thread
+ using std::thread;
+
+ using std::swap;
+
+ // [thread.jthread.class], class jthread
+# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN)
+ using std::jthread;
+# endif
+
+ // [thread.thread.this], namespace this_thread
+ namespace this_thread {
+ using std::this_thread::get_id;
+
+ using std::this_thread::sleep_for;
+ using std::this_thread::sleep_until;
+ using std::this_thread::yield;
+ } // namespace this_thread
+
+ // [thread.thread.id]
+ using std::operator==;
+ using std::operator<=>;
+# ifndef _LIBCPP_HAS_NO_LOCALIZATION
+ using std::operator<<;
+# endif // _LIBCPP_HAS_NO_LOCALIZATION
+
+# if _LIBCPP_STD_VER >= 23
+ using std::formatter;
+# endif
+
+ using std::hash;
+#endif // _LIBCPP_HAS_NO_THREADS
+} // namespace std
+
+// tuple.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [tuple.tuple], class template tuple
+ using std::tuple;
+
+ // [tuple.like], concept tuple-like
+
+#if _LIBCPP_STD_VER >= 23
+ // [tuple.common.ref], common_reference related specializations
+ using std::basic_common_reference;
+ using std::common_type;
+#endif
+
+ // [tuple.creation], tuple creation functions
+ using std::ignore;
+
+ using std::forward_as_tuple;
+ using std::make_tuple;
+ using std::tie;
+ using std::tuple_cat;
+
+ // [tuple.apply], calling a function with a tuple of arguments
+ using std::apply;
+
+ using std::make_from_tuple;
+
+ // [tuple.helper], tuple helper classes
+ using std::tuple_element;
+ using std::tuple_size;
+
+ // [tuple.elem], element access
+ using std::get;
+ using std::tuple_element_t;
+
+ // [tuple.rel], relational operators
+ using std::operator==;
+ using std::operator<=>;
+
+ // [tuple.traits], allocator-related traits
+ using std::uses_allocator;
+
+ // [tuple.special], specialized algorithms
+ using std::swap;
+
+ // [tuple.helper], tuple helper classes
+ using std::tuple_size_v;
+} // namespace std
+
+// type_traits.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [meta.help], helper class
+ using std::integral_constant;
+
+ using std::bool_constant;
+ using std::false_type;
+ using std::true_type;
+
+ // [meta.unary.cat], primary type categories
+ using std::is_array;
+ using std::is_class;
+ using std::is_enum;
+ using std::is_floating_point;
+ using std::is_function;
+ using std::is_integral;
+ using std::is_lvalue_reference;
+ using std::is_member_function_pointer;
+ using std::is_member_object_pointer;
+ using std::is_null_pointer;
+ using std::is_pointer;
+ using std::is_rvalue_reference;
+ using std::is_union;
+ using std::is_void;
+
+ // [meta.unary.comp], composite type categories
+ using std::is_arithmetic;
+ using std::is_compound;
+ using std::is_fundamental;
+ using std::is_member_pointer;
+ using std::is_object;
+ using std::is_reference;
+ using std::is_scalar;
+
+ // [meta.unary.prop], type properties
+ using std::is_abstract;
+ using std::is_aggregate;
+ using std::is_const;
+ using std::is_empty;
+ using std::is_final;
+ using std::is_polymorphic;
+ using std::is_standard_layout;
+ using std::is_trivial;
+ using std::is_trivially_copyable;
+ using std::is_volatile;
+
+ using std::is_bounded_array;
+#if _LIBCPP_STD_VER >= 23
+ using std::is_scoped_enum;
+#endif
+ using std::is_signed;
+ using std::is_unbounded_array;
+ using std::is_unsigned;
+
+ using std::is_constructible;
+ using std::is_copy_constructible;
+ using std::is_default_constructible;
+ using std::is_move_constructible;
+
+ using std::is_assignable;
+ using std::is_copy_assignable;
+ using std::is_move_assignable;
+
+ using std::is_swappable;
+ using std::is_swappable_with;
+
+ using std::is_destructible;
+
+ using std::is_trivially_constructible;
+ using std::is_trivially_copy_constructible;
+ using std::is_trivially_default_constructible;
+ using std::is_trivially_move_constructible;
+
+ using std::is_trivially_assignable;
+ using std::is_trivially_copy_assignable;
+ using std::is_trivially_destructible;
+ using std::is_trivially_move_assignable;
+
+ using std::is_nothrow_constructible;
+ using std::is_nothrow_copy_constructible;
+ using std::is_nothrow_default_constructible;
+ using std::is_nothrow_move_constructible;
+
+ using std::is_nothrow_assignable;
+ using std::is_nothrow_copy_assignable;
+ using std::is_nothrow_move_assignable;
+
+ using std::is_nothrow_swappable;
+ using std::is_nothrow_swappable_with;
+
+ using std::is_nothrow_destructible;
+
+ // using std::is_implicit_lifetime;
+
+ using std::has_virtual_destructor;
+
+ using std::has_unique_object_representations;
+
+ // using std::reference_constructs_from_temporary;
+ // using std::reference_converts_from_temporary;
+
+ // [meta.unary.prop.query], type property queries
+ using std::alignment_of;
+ using std::extent;
+ using std::rank;
+
+ // [meta.rel], type relations
+ using std::is_base_of;
+ using std::is_convertible;
+ // using std::is_layout_compatible;
+ using std::is_nothrow_convertible;
+ // using std::is_pointer_interconvertible_base_of;
+ using std::is_same;
+
+ using std::is_invocable;
+ using std::is_invocable_r;
+
+ using std::is_nothrow_invocable;
+ using std::is_nothrow_invocable_r;
+
+ // [meta.trans.cv], const-volatile modifications
+ using std::add_const;
+ using std::add_cv;
+ using std::add_volatile;
+ using std::remove_const;
+ using std::remove_cv;
+ using std::remove_volatile;
+
+ using std::add_const_t;
+ using std::add_cv_t;
+ using std::add_volatile_t;
+ using std::remove_const_t;
+ using std::remove_cv_t;
+ using std::remove_volatile_t;
+
+ // [meta.trans.ref], reference modifications
+ using std::add_lvalue_reference;
+ using std::add_rvalue_reference;
+ using std::remove_reference;
+
+ using std::add_lvalue_reference_t;
+ using std::add_rvalue_reference_t;
+ using std::remove_reference_t;
+
+ // [meta.trans.sign], sign modifications
+ using std::make_signed;
+ using std::make_unsigned;
+
+ using std::make_signed_t;
+ using std::make_unsigned_t;
+
+ // [meta.trans.arr], array modifications
+ using std::remove_all_extents;
+ using std::remove_extent;
+
+ using std::remove_all_extents_t;
+ using std::remove_extent_t;
+
+ // [meta.trans.ptr], pointer modifications
+ using std::add_pointer;
+ using std::remove_pointer;
+
+ using std::add_pointer_t;
+ using std::remove_pointer_t;
+
+ // [meta.trans.other], other transformations
+ using std::basic_common_reference;
+ using std::common_reference;
+ using std::common_type;
+ using std::conditional;
+ using std::decay;
+ using std::enable_if;
+ using std::invoke_result;
+ using std::remove_cvref;
+ using std::type_identity;
+ using std::underlying_type;
+ using std::unwrap_ref_decay;
+ using std::unwrap_reference;
+
+ using std::common_reference_t;
+ using std::common_type_t;
+ using std::conditional_t;
+ using std::decay_t;
+ using std::enable_if_t;
+ using std::invoke_result_t;
+ using std::remove_cvref_t;
+ using std::type_identity_t;
+ using std::underlying_type_t;
+ using std::unwrap_ref_decay_t;
+ using std::unwrap_reference_t;
+ using std::void_t;
+
+ // [meta.logical], logical operator traits
+ using std::conjunction;
+ using std::disjunction;
+ using std::negation;
+
+ // [meta.unary.cat], primary type categories
+ using std::is_array_v;
+ using std::is_class_v;
+ using std::is_enum_v;
+ using std::is_floating_point_v;
+ using std::is_function_v;
+ using std::is_integral_v;
+ using std::is_lvalue_reference_v;
+ using std::is_member_function_pointer_v;
+ using std::is_member_object_pointer_v;
+ using std::is_null_pointer_v;
+ using std::is_pointer_v;
+ using std::is_rvalue_reference_v;
+ using std::is_union_v;
+ using std::is_void_v;
+
+ // [meta.unary.comp], composite type categories
+ using std::is_arithmetic_v;
+ using std::is_compound_v;
+ using std::is_fundamental_v;
+ using std::is_member_pointer_v;
+ using std::is_object_v;
+ using std::is_reference_v;
+ using std::is_scalar_v;
+
+ // [meta.unary.prop], type properties
+ using std::has_unique_object_representations_v;
+ using std::has_virtual_destructor_v;
+ using std::is_abstract_v;
+ using std::is_aggregate_v;
+ using std::is_assignable_v;
+ using std::is_bounded_array_v;
+ using std::is_const_v;
+ using std::is_constructible_v;
+ using std::is_copy_assignable_v;
+ using std::is_copy_constructible_v;
+ using std::is_default_constructible_v;
+ using std::is_destructible_v;
+ using std::is_empty_v;
+ using std::is_final_v;
+ // using std::is_implicit_lifetime_v;
+ using std::is_move_assignable_v;
+ using std::is_move_constructible_v;
+ using std::is_nothrow_assignable_v;
+ using std::is_nothrow_constructible_v;
+ using std::is_nothrow_copy_assignable_v;
+ using std::is_nothrow_copy_constructible_v;
+ using std::is_nothrow_default_constructible_v;
+ using std::is_nothrow_destructible_v;
+ using std::is_nothrow_move_assignable_v;
+ using std::is_nothrow_move_constructible_v;
+ using std::is_nothrow_swappable_v;
+ using std::is_nothrow_swappable_with_v;
+ using std::is_polymorphic_v;
+#if _LIBCPP_STD_VER >= 23
+ using std::is_scoped_enum_v;
+#endif
+ using std::is_signed_v;
+ using std::is_standard_layout_v;
+ using std::is_swappable_v;
+ using std::is_swappable_with_v;
+ using std::is_trivial_v;
+ using std::is_trivially_assignable_v;
+ using std::is_trivially_constructible_v;
+ using std::is_trivially_copy_assignable_v;
+ using std::is_trivially_copy_constructible_v;
+ using std::is_trivially_copyable_v;
+ using std::is_trivially_default_constructible_v;
+ using std::is_trivially_destructible_v;
+ using std::is_trivially_move_assignable_v;
+ using std::is_trivially_move_constructible_v;
+ using std::is_unbounded_array_v;
+ using std::is_unsigned_v;
+ using std::is_volatile_v;
+ // using std::reference_constructs_from_temporary_v;
+ // using std::reference_converts_from_temporary_v;
+
+ // [meta.unary.prop.query], type property queries
+ using std::alignment_of_v;
+ using std::extent_v;
+ using std::rank_v;
+
+ // [meta.rel], type relations
+ using std::is_base_of_v;
+ using std::is_convertible_v;
+ using std::is_invocable_r_v;
+ using std::is_invocable_v;
+ // using std::is_layout_compatible_v;
+ using std::is_nothrow_convertible_v;
+ using std::is_nothrow_invocable_r_v;
+ using std::is_nothrow_invocable_v;
+ // using std::is_pointer_interconvertible_base_of_v;
+ using std::is_same_v;
+
+ // [meta.logical], logical operator traits
+ using std::conjunction_v;
+ using std::disjunction_v;
+ using std::negation_v;
+
+ // [meta.member], member relationships
+ // using std::is_corresponding_member;
+ // using std::is_pointer_interconvertible_with_class;
+
+ // [meta.const.eval], constant evaluation context
+ using std::is_constant_evaluated;
+
+ // [depr.meta.types]
+ using std::aligned_storage;
+ using std::aligned_storage_t;
+ using std::aligned_union;
+ using std::aligned_union_t;
+ using std::is_pod;
+ using std::is_pod_v;
+} // namespace std
+
+// typeindex.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::hash;
+ using std::type_index;
+} // namespace std
+
+// typeinfo.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::bad_cast;
+ using std::bad_typeid;
+ using std::type_info;
+} // namespace std
+
+// unordered_map.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [unord.map], class template unordered_­map
+ using std::unordered_map;
+
+ // [unord.multimap], class template unordered_­multimap
+ using std::unordered_multimap;
+
+ using std::operator==;
+
+ using std::swap;
+
+ // [unord.map.erasure], erasure for unordered_­map
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::unordered_map;
+ using std::pmr::unordered_multimap;
+ } // namespace pmr
+} // namespace std
+
+// unordered_set.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [unord.set], class template unordered_­set
+ using std::unordered_set;
+
+ // [unord.multiset], class template unordered_­multiset
+ using std::unordered_multiset;
+
+ using std::operator==;
+
+ using std::swap;
+
+ // [unord.set.erasure], erasure for unordered_­set
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::unordered_multiset;
+ using std::pmr::unordered_set;
+ } // namespace pmr
+} // namespace std
+
+// utility.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [utility.swap], swap
+ using std::swap;
+
+ // [utility.exchange], exchange
+ using std::exchange;
+
+ // [forward], forward/move
+ using std::forward;
+#if _LIBCPP_STD_VER >= 23
+ using std::forward_like;
+#endif
+ using std::move;
+ using std::move_if_noexcept;
+
+ // [utility.as.const], as_const
+ using std::as_const;
+
+ // [declval], declval
+ using std::declval;
+
+ // [utility.intcmp], integer comparison functions
+ using std::cmp_equal;
+ using std::cmp_not_equal;
+
+ using std::cmp_greater;
+ using std::cmp_greater_equal;
+ using std::cmp_less;
+ using std::cmp_less_equal;
+
+ using std::in_range;
+
+#if _LIBCPP_STD_VER >= 23
+ // [utility.underlying], to_underlying
+ using std::to_underlying;
+
+ // [utility.unreachable], unreachable
+ using std::unreachable;
+#endif // _LIBCPP_STD_VER >= 23
+
+ // [intseq], compile-time integer sequences
+ using std::index_sequence;
+ using std::integer_sequence;
+
+ using std::make_index_sequence;
+ using std::make_integer_sequence;
+
+ using std::index_sequence_for;
+
+ // [pairs], class template pair
+ using std::pair;
+
+#if _LIBCPP_STD_VER >= 23
+ using std::basic_common_reference;
+ using std::common_type;
+#endif
+ // [pairs.spec], pair specialized algorithms
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::make_pair;
+
+ // [pair.astuple], tuple-like access to pair
+ using std::tuple_element;
+ using std::tuple_size;
+
+ using std::get;
+
+ // [pair.piecewise], pair piecewise construction
+ using std::piecewise_construct;
+ using std::piecewise_construct_t;
+
+ // in-place construction
+ using std::in_place;
+ using std::in_place_t;
+
+ using std::in_place_type;
+ using std::in_place_type_t;
+
+ using std::in_place_index;
+ using std::in_place_index_t;
+
+ // [depr.relops]
+ namespace rel_ops {
+ using rel_ops::operator!=;
+ using rel_ops::operator>;
+ using rel_ops::operator<=;
+ using rel_ops::operator>=;
+ } // namespace rel_ops
+} // namespace std
+
+// valarray.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ using std::gslice;
+ using std::gslice_array;
+ using std::indirect_array;
+ using std::mask_array;
+ using std::slice;
+ using std::slice_array;
+ using std::valarray;
+
+ using std::swap;
+
+ using std::operator*;
+ using std::operator/;
+ using std::operator%;
+ using std::operator+;
+ using std::operator-;
+
+ using std::operator^;
+ using std::operator&;
+ using std::operator|;
+
+ using std::operator<<;
+ using std::operator>>;
+
+ using std::operator&&;
+ using std::operator||;
+
+ using std::operator==;
+ using std::operator!=;
+
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+
+ using std::abs;
+ using std::acos;
+ using std::asin;
+ using std::atan;
+
+ using std::atan2;
+
+ using std::cos;
+ using std::cosh;
+ using std::exp;
+ using std::log;
+ using std::log10;
+
+ using std::pow;
+
+ using std::sin;
+ using std::sinh;
+ using std::sqrt;
+ using std::tan;
+ using std::tanh;
+
+ using std::begin;
+ using std::end;
+} // namespace std
+
+// variant.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [variant.variant], class template variant
+ using std::variant;
+
+ // [variant.helper], variant helper classes
+ using std::variant_alternative;
+ using std::variant_npos;
+ using std::variant_size;
+ using std::variant_size_v;
+
+ // [variant.get], value access
+ using std::get;
+ using std::get_if;
+ using std::holds_alternative;
+ using std::variant_alternative_t;
+
+ // [variant.relops], relational operators
+ using std::operator==;
+ using std::operator!=;
+ using std::operator<;
+ using std::operator>;
+ using std::operator<=;
+ using std::operator>=;
+ using std::operator<=>;
+
+ // [variant.visit], visitation
+ using std::visit;
+
+ // [variant.monostate], class monostate
+ using std::monostate;
+
+ // [variant.specalg], specialized algorithms
+ using std::swap;
+
+ // [variant.bad.access], class bad_variant_access
+ using std::bad_variant_access;
+
+ // [variant.hash], hash support
+ using std::hash;
+} // namespace std
+
+// vector.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // [vector], class template vector
+ using std::vector;
+
+ using std::operator==;
+ using std::operator<=>;
+
+ using std::swap;
+
+ // [vector.erasure], erasure
+ using std::erase;
+ using std::erase_if;
+
+ namespace pmr {
+ using std::pmr::vector;
+ }
+
+ // hash support
+ using std::hash;
+
+#if _LIBCPP_STD_VER >= 23
+ // [vector.bool.fmt], formatter specialization for vector<bool>
+ using std::formatter;
+#endif
+} // namespace std
+
+// version.inc
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+export namespace std {
+ // This module exports nothing.
+} // namespace std
diff --git a/libbuild2/cc/target.cxx b/libbuild2/cc/target.cxx
index 3f71eb1..6a518dd 100644
--- a/libbuild2/cc/target.cxx
+++ b/libbuild2/cc/target.cxx
@@ -25,7 +25,6 @@ namespace build2
};
extern const char h_ext_def[] = "h";
-
const target_type h::static_type
{
"h",
@@ -40,7 +39,6 @@ namespace build2
};
extern const char c_ext_def[] = "c";
-
const target_type c::static_type
{
"c",
@@ -54,8 +52,48 @@ namespace build2
target_type::flag::none
};
- extern const char pc_ext[] = "pc"; // VC14 rejects constexpr.
+ extern const char m_ext_def[] = "m";
+ const target_type m::static_type
+ {
+ "m",
+ &cc::static_type,
+ &target_factory<m>,
+ nullptr, /* fixed_extension */
+ &target_extension_var<m_ext_def>,
+ &target_pattern_var<m_ext_def>,
+ nullptr,
+ &file_search,
+ target_type::flag::none
+ };
+
+ extern const char S_ext_def[] = "S";
+ const target_type S::static_type
+ {
+ "S",
+ &cc::static_type,
+ &target_factory<S>,
+ nullptr, /* fixed_extension */
+ &target_extension_var<S_ext_def>,
+ &target_pattern_var<S_ext_def>,
+ nullptr,
+ &file_search,
+ target_type::flag::none
+ };
+
+ const target_type c_inc::static_type
+ {
+ "c_inc",
+ &cc::static_type,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ &target_search,
+ target_type::flag::none
+ };
+ extern const char pc_ext[] = "pc"; // VC14 rejects constexpr.
const target_type pc::static_type
{
"pc",
@@ -70,7 +108,6 @@ namespace build2
};
extern const char pca_ext[] = "static.pc"; // VC14 rejects constexpr.
-
const target_type pca::static_type
{
"pca",
@@ -85,7 +122,6 @@ namespace build2
};
extern const char pcs_ext[] = "shared.pc"; // VC14 rejects constexpr.
-
const target_type pcs::static_type
{
"pcs",
diff --git a/libbuild2/cc/target.hxx b/libbuild2/cc/target.hxx
index fbac790..01f2d6e 100644
--- a/libbuild2/cc/target.hxx
+++ b/libbuild2/cc/target.hxx
@@ -68,6 +68,57 @@ namespace build2
static const target_type static_type;
};
+ // Objective-C source file (the same rationale for having it here as for
+ // c{} above).
+ //
+ class LIBBUILD2_CC_SYMEXPORT m: public cc
+ {
+ public:
+ m (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // Assembler with C preprocessor source file (the same rationale for
+ // having it here as for c{} above).
+ //
+ class LIBBUILD2_CC_SYMEXPORT S: public cc
+ {
+ public:
+ S (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // This is an abstract base target for deriving additional targets that
+ // can be #include'd in C translation units (the same rationale for having
+ // it here as for c{} above). In particular, only such targets will be
+ // considered to reverse-lookup extensions to target types (see
+ // dyndep_rule::map_extension() for background).
+ //
+ class LIBBUILD2_CC_SYMEXPORT c_inc: public cc
+ {
+ public:
+ c_inc (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
// pkg-config file targets.
//
class LIBBUILD2_CC_SYMEXPORT pc: public file // .pc (common)
diff --git a/libbuild2/cc/types.cxx b/libbuild2/cc/types.cxx
index 8ee4fa9..c6cfae9 100644
--- a/libbuild2/cc/types.cxx
+++ b/libbuild2/cc/types.cxx
@@ -6,6 +6,7 @@
#include <libbuild2/cc/utility.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
@@ -123,6 +124,8 @@ namespace build2
size_t importable_headers::
insert_angle_pattern (const dir_paths& sys_hdr_dirs, const string& pat)
{
+ tracer trace ("importable_headers::insert_angle_pattern");
+
assert (pat.front () == '<' && pat.back () == '>' && path_pattern (pat));
// First see if it has already been inserted.
@@ -172,7 +175,17 @@ namespace build2
try
{
- path_search (f, process, dir);
+ path_search (
+ f,
+ process,
+ dir,
+ path_match_flags::follow_symlinks,
+ [&trace] (const dir_entry& de)
+ {
+ l5 ([&]{trace << "skipping inaccessible/dangling entry "
+ << de.base () / de.path ();});
+ return true;
+ });
}
catch (const system_error& e)
{
diff --git a/libbuild2/cc/windows-rpath.cxx b/libbuild2/cc/windows-rpath.cxx
index a8bf104..eb62ad1 100644
--- a/libbuild2/cc/windows-rpath.cxx
+++ b/libbuild2/cc/windows-rpath.cxx
@@ -45,6 +45,8 @@ namespace build2
// Return the greatest (newest) timestamp of all the DLLs that we will be
// adding to the assembly or timestamp_nonexistent if there aren't any.
//
+ // Note: called during the execute phase.
+ //
timestamp link_rule::
windows_rpath_timestamp (const file& t,
const scope& bs,
@@ -88,7 +90,18 @@ namespace build2
//
if (l->is_a<libs> () && !l->path ().empty ()) // Also covers binless.
{
- timestamp t (l->load_mtime ());
+ // Handle the case where the library is a member of a group (for
+ // example, people are trying to hack something up with pre-built
+ // libraries; see GH issue #366).
+ //
+ timestamp t;
+ if (l->group_state (action () /* inner */))
+ {
+ t = l->group->is_a<mtime_target> ()->mtime ();
+ assert (t != timestamp_unknown);
+ }
+ else
+ t = l->load_mtime ();
if (t > r)
r = t;
@@ -128,7 +141,9 @@ namespace build2
library_cache lib_cache;
for (const prerequisite_target& pt: t.prerequisite_targets[a])
{
- if (pt == nullptr || pt.adhoc ())
+ // Note: during execute so check for ad hoc first to avoid data races.
+ //
+ if (pt.adhoc () || pt == nullptr)
continue;
bool la;
@@ -139,7 +154,9 @@ namespace build2
( f = pt->is_a<libs> ()))
process_libraries (a, bs, li, sys_lib_dirs,
*f, la, pt.data,
- imp, lib, nullptr, true /* self */,
+ imp, lib, nullptr,
+ true /* self */,
+ false /* proc_opt_group */,
&lib_cache);
}
@@ -253,7 +270,9 @@ namespace build2
library_cache lib_cache;
for (const prerequisite_target& pt: t.prerequisite_targets[a])
{
- if (pt == nullptr || pt.adhoc ())
+ // Note: during execute so check for ad hoc first to avoid data races.
+ //
+ if (pt.adhoc () || pt == nullptr)
continue;
bool la;
@@ -264,7 +283,9 @@ namespace build2
( f = pt->is_a<libs> ()))
process_libraries (a, bs, li, sys_lib_dirs,
*f, la, pt.data,
- imp, lib, nullptr, true /* self */,
+ imp, lib, nullptr,
+ true /* self */,
+ false /* proc_opt_group */,
&lib_cache);
}
diff --git a/libbuild2/cli/buildfile b/libbuild2/cli/buildfile
new file mode 100644
index 0000000..9b6e4eb
--- /dev/null
+++ b/libbuild2/cli/buildfile
@@ -0,0 +1,71 @@
+# file : libbuild2/cli/buildfile
+# license : MIT; see accompanying LICENSE file
+
+# NOTE: shared imports should go into root.build.
+#
+include ../
+impl_libs = ../lib{build2} # Implied interface dependency.
+
+include ../cxx/
+intf_libs = ../cxx/lib{build2-cxx}
+
+./: lib{build2-cli}: libul{build2-cli}: {hxx ixx txx cxx}{** -**.test...} \
+ $intf_libs $impl_libs
+
+# Unit tests.
+#
+exe{*.test}:
+{
+ test = true
+ install = false
+}
+
+for t: cxx{**.test...}
+{
+ d = $directory($t)
+ n = $name($t)...
+
+ ./: $d/exe{$n}: $t $d/{hxx ixx txx}{+$n} $d/testscript{+$n}
+ $d/exe{$n}: libul{build2-cli}: bin.whole = false
+}
+
+# Build options.
+#
+obja{*}: cxx.poptions += -DLIBBUILD2_CLI_STATIC_BUILD
+objs{*}: cxx.poptions += -DLIBBUILD2_CLI_SHARED_BUILD
+
+# Export options.
+#
+lib{build2-cli}:
+{
+ cxx.export.poptions = "-I$out_root" "-I$src_root"
+ cxx.export.libs = $intf_libs
+}
+
+liba{build2-cli}: cxx.export.poptions += -DLIBBUILD2_CLI_STATIC
+libs{build2-cli}: cxx.export.poptions += -DLIBBUILD2_CLI_SHARED
+
+# For pre-releases use the complete version to make sure they cannot be used
+# in place of another pre-release or the final version. See the version module
+# for details on the version.* variable values.
+#
+# And because this is a build system module, we also embed the same value as
+# the interface version (note that we cannot use build.version.interface for
+# bundled modules because we could be built with a different version of the
+# build system).
+#
+ver = ($version.pre_release \
+ ? "$version.project_id" \
+ : "$version.major.$version.minor")
+
+lib{build2-cli}: bin.lib.version = @"-$ver"
+libs{build2-cli}: bin.lib.load_suffix = "-$ver"
+
+# Install into the libbuild2/cli/ subdirectory of, say, /usr/include/
+# recreating subdirectories.
+#
+{hxx ixx txx}{*}:
+{
+ install = include/libbuild2/cli/
+ install.subdirs = true
+}
diff --git a/libbuild2/cli/export.hxx b/libbuild2/cli/export.hxx
new file mode 100644
index 0000000..67c1eb9
--- /dev/null
+++ b/libbuild2/cli/export.hxx
@@ -0,0 +1,37 @@
+// file : libbuild2/cli/export.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#pragma once
+
+// Normally we don't export class templates (but do complete specializations),
+// inline functions, and classes with only inline member functions. Exporting
+// classes that inherit from non-exported/imported bases (e.g., std::string)
+// will end up badly. The only known workarounds are to not inherit or to not
+// export. Also, MinGW GCC doesn't like seeing non-exported functions being
+// used before their inline definition. The workaround is to reorder code. In
+// the end it's all trial and error.
+
+#if defined(LIBBUILD2_CLI_STATIC) // Using static.
+# define LIBBUILD2_CLI_SYMEXPORT
+#elif defined(LIBBUILD2_CLI_STATIC_BUILD) // Building static.
+# define LIBBUILD2_CLI_SYMEXPORT
+#elif defined(LIBBUILD2_CLI_SHARED) // Using shared.
+# ifdef _WIN32
+# define LIBBUILD2_CLI_SYMEXPORT __declspec(dllimport)
+# else
+# define LIBBUILD2_CLI_SYMEXPORT
+# endif
+#elif defined(LIBBUILD2_CLI_SHARED_BUILD) // Building shared.
+# ifdef _WIN32
+# define LIBBUILD2_CLI_SYMEXPORT __declspec(dllexport)
+# else
+# define LIBBUILD2_CLI_SYMEXPORT
+# endif
+#else
+// If none of the above macros are defined, then we assume we are being used
+// by some third-party build system that cannot/doesn't signal the library
+// type. Note that this fallback works for both static and shared but in case
+// of shared will be sub-optimal compared to having dllimport.
+//
+# define LIBBUILD2_CLI_SYMEXPORT // Using static or shared.
+#endif
diff --git a/build2/cli/init.cxx b/libbuild2/cli/init.cxx
index a00fd7f..581fdaf 100644
--- a/build2/cli/init.cxx
+++ b/libbuild2/cli/init.cxx
@@ -1,7 +1,7 @@
-// file : build2/cli/init.cxx -*- C++ -*-
+// file : libbuild2/cli/init.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <build2/cli/init.hxx>
+#include <libbuild2/cli/init.hxx>
#include <libbuild2/file.hxx>
#include <libbuild2/scope.hxx>
@@ -13,9 +13,9 @@
#include <libbuild2/cxx/target.hxx>
-#include <build2/cli/rule.hxx>
-#include <build2/cli/module.hxx>
-#include <build2/cli/target.hxx>
+#include <libbuild2/cli/rule.hxx>
+#include <libbuild2/cli/module.hxx>
+#include <libbuild2/cli/target.hxx>
namespace build2
{
@@ -72,7 +72,9 @@ namespace build2
// Enter metadata variables.
//
- auto& vp (rs.var_pool ());
+ // They are all qualified so go straight for the public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
auto& v_ver (vp.insert<string> ("cli.version"));
auto& v_sum (vp.insert<string> ("cli.checksum"));
diff --git a/build2/cli/init.hxx b/libbuild2/cli/init.hxx
index 1c54316..6d23795 100644
--- a/build2/cli/init.hxx
+++ b/libbuild2/cli/init.hxx
@@ -1,14 +1,16 @@
-// file : build2/cli/init.hxx -*- C++ -*-
+// file : libbuild2/cli/init.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef BUILD2_CLI_INIT_HXX
-#define BUILD2_CLI_INIT_HXX
+#ifndef LIBBUILD2_CLI_INIT_HXX
+#define LIBBUILD2_CLI_INIT_HXX
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
#include <libbuild2/module.hxx>
+#include <libbuild2/cli/export.hxx>
+
namespace build2
{
namespace cli
@@ -21,9 +23,9 @@ namespace build2
// `cli.config` -- load `cli.guess` and set the rest of the variables.
// `cli` -- load `cli.config` and register targets and rules.
//
- extern "C" const module_functions*
+ extern "C" LIBBUILD2_CLI_SYMEXPORT const module_functions*
build2_cli_load ();
}
}
-#endif // BUILD2_CLI_INIT_HXX
+#endif // LIBBUILD2_CLI_INIT_HXX
diff --git a/build2/cli/module.hxx b/libbuild2/cli/module.hxx
index 70f6ba8..ba10540 100644
--- a/build2/cli/module.hxx
+++ b/libbuild2/cli/module.hxx
@@ -1,15 +1,15 @@
-// file : build2/cli/module.hxx -*- C++ -*-
+// file : libbuild2/cli/module.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef BUILD2_CLI_MODULE_HXX
-#define BUILD2_CLI_MODULE_HXX
+#ifndef LIBBUILD2_CLI_MODULE_HXX
+#define LIBBUILD2_CLI_MODULE_HXX
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
#include <libbuild2/module.hxx>
-#include <build2/cli/rule.hxx>
+#include <libbuild2/cli/rule.hxx>
namespace build2
{
@@ -27,4 +27,4 @@ namespace build2
}
}
-#endif // BUILD2_CLI_MODULE_HXX
+#endif // LIBBUILD2_CLI_MODULE_HXX
diff --git a/build2/cli/rule.cxx b/libbuild2/cli/rule.cxx
index 364c90b..996ca51 100644
--- a/build2/cli/rule.cxx
+++ b/libbuild2/cli/rule.cxx
@@ -1,7 +1,7 @@
-// file : build2/cli/rule.cxx -*- C++ -*-
+// file : libbuild2/cli/rule.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <build2/cli/rule.hxx>
+#include <libbuild2/cli/rule.hxx>
#include <libbuild2/depdb.hxx>
#include <libbuild2/scope.hxx>
@@ -11,7 +11,7 @@
#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
-#include <build2/cli/target.hxx>
+#include <libbuild2/cli/target.hxx>
namespace build2
{
@@ -222,6 +222,8 @@ namespace build2
const cli_cxx& t (xt.as<cli_cxx> ());
const path& tp (t.h->path ());
+ context& ctx (t.ctx);
+
// Update prerequisites and determine if any relevant ones render us
// out-of-date. Note that currently we treat all the prerequisites as
// potentially affecting the result (think prologues/epilogues, CLI
@@ -323,11 +325,11 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "cli " << s;
+ print_diag ("cli", s, t);
- if (!t.ctx.dry_run)
+ if (!ctx.dry_run)
{
- run (pp, args);
+ run (ctx, pp, args, 1 /* finish_verbosity */);
dd.check_mtime (tp);
}
diff --git a/build2/cli/rule.hxx b/libbuild2/cli/rule.hxx
index 0538c57..0132b44 100644
--- a/build2/cli/rule.hxx
+++ b/libbuild2/cli/rule.hxx
@@ -1,14 +1,16 @@
-// file : build2/cli/rule.hxx -*- C++ -*-
+// file : libbuild2/cli/rule.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef BUILD2_CLI_RULE_HXX
-#define BUILD2_CLI_RULE_HXX
+#ifndef LIBBUILD2_CLI_RULE_HXX
+#define LIBBUILD2_CLI_RULE_HXX
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
#include <libbuild2/rule.hxx>
+#include <libbuild2/cli/export.hxx>
+
namespace build2
{
namespace cli
@@ -23,7 +25,8 @@ namespace build2
// @@ Redo as two separate rules?
//
- class compile_rule: public simple_rule, virtual data
+ class LIBBUILD2_CLI_SYMEXPORT compile_rule: public simple_rule,
+ private virtual data
{
public:
compile_rule (data&& d): data (move (d)) {}
@@ -40,4 +43,4 @@ namespace build2
}
}
-#endif // BUILD2_CLI_RULE_HXX
+#endif // LIBBUILD2_CLI_RULE_HXX
diff --git a/build2/cli/target.cxx b/libbuild2/cli/target.cxx
index 37eee97..22ae75c 100644
--- a/build2/cli/target.cxx
+++ b/libbuild2/cli/target.cxx
@@ -1,7 +1,7 @@
-// file : build2/cli/target.cxx -*- C++ -*-
+// file : libbuild2/cli/target.cxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <build2/cli/target.hxx>
+#include <libbuild2/cli/target.hxx>
#include <libbuild2/context.hxx>
diff --git a/build2/cli/target.hxx b/libbuild2/cli/target.hxx
index f27ee89..8efb837 100644
--- a/build2/cli/target.hxx
+++ b/libbuild2/cli/target.hxx
@@ -1,8 +1,8 @@
-// file : build2/cli/target.hxx -*- C++ -*-
+// file : libbuild2/cli/target.hxx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#ifndef BUILD2_CLI_TARGET_HXX
-#define BUILD2_CLI_TARGET_HXX
+#ifndef LIBBUILD2_CLI_TARGET_HXX
+#define LIBBUILD2_CLI_TARGET_HXX
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
@@ -11,11 +11,13 @@
#include <libbuild2/cxx/target.hxx>
+#include <libbuild2/cli/export.hxx>
+
namespace build2
{
namespace cli
{
- class cli: public file
+ class LIBBUILD2_CLI_SYMEXPORT cli: public file
{
public:
cli (context& c, dir_path d, dir_path o, string n)
@@ -37,7 +39,8 @@ namespace build2
const cxx::ixx* i = nullptr;
};
- class cli_cxx: public mtime_target, public cli_cxx_members
+ class LIBBUILD2_CLI_SYMEXPORT cli_cxx: public mtime_target,
+ public cli_cxx_members
{
public:
cli_cxx (context& c, dir_path d, dir_path o, string n)
@@ -55,4 +58,4 @@ namespace build2
}
}
-#endif // BUILD2_CLI_TARGET_HXX
+#endif // LIBBUILD2_CLI_TARGET_HXX
diff --git a/libbuild2/common-options.cxx b/libbuild2/common-options.cxx
index 5c58a62..03e7e60 100644
--- a/libbuild2/common-options.cxx
+++ b/libbuild2/common-options.cxx
@@ -30,7 +30,7 @@ namespace build2
// unknown_option
//
unknown_option::
- ~unknown_option () throw ()
+ ~unknown_option () noexcept
{
}
@@ -41,7 +41,7 @@ namespace build2
}
const char* unknown_option::
- what () const throw ()
+ what () const noexcept
{
return "unknown option";
}
@@ -49,7 +49,7 @@ namespace build2
// unknown_argument
//
unknown_argument::
- ~unknown_argument () throw ()
+ ~unknown_argument () noexcept
{
}
@@ -60,7 +60,7 @@ namespace build2
}
const char* unknown_argument::
- what () const throw ()
+ what () const noexcept
{
return "unknown argument";
}
@@ -68,7 +68,7 @@ namespace build2
// missing_value
//
missing_value::
- ~missing_value () throw ()
+ ~missing_value () noexcept
{
}
@@ -79,7 +79,7 @@ namespace build2
}
const char* missing_value::
- what () const throw ()
+ what () const noexcept
{
return "missing option value";
}
@@ -87,7 +87,7 @@ namespace build2
// invalid_value
//
invalid_value::
- ~invalid_value () throw ()
+ ~invalid_value () noexcept
{
}
@@ -102,7 +102,7 @@ namespace build2
}
const char* invalid_value::
- what () const throw ()
+ what () const noexcept
{
return "invalid option value";
}
@@ -116,7 +116,7 @@ namespace build2
}
const char* eos_reached::
- what () const throw ()
+ what () const noexcept
{
return "end of argument stream reached";
}
@@ -124,7 +124,7 @@ namespace build2
// file_io_failure
//
file_io_failure::
- ~file_io_failure () throw ()
+ ~file_io_failure () noexcept
{
}
@@ -135,7 +135,7 @@ namespace build2
}
const char* file_io_failure::
- what () const throw ()
+ what () const noexcept
{
return "unable to open file or read failure";
}
@@ -143,7 +143,7 @@ namespace build2
// unmatched_quote
//
unmatched_quote::
- ~unmatched_quote () throw ()
+ ~unmatched_quote () noexcept
{
}
@@ -154,7 +154,7 @@ namespace build2
}
const char* unmatched_quote::
- what () const throw ()
+ what () const noexcept
{
return "unmatched quote";
}
@@ -587,10 +587,31 @@ namespace build2
struct parser<bool>
{
static void
- parse (bool& x, scanner& s)
+ parse (bool& x, bool& xs, scanner& s)
{
- s.next ();
- x = true;
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
}
};
@@ -700,6 +721,56 @@ namespace build2
}
};
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
template <typename X, typename T, T X::*M>
void
thunk (X& x, scanner& s)
@@ -707,6 +778,14 @@ namespace build2
parser<T>::parse (x.*M, s);
}
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
template <typename X, typename T, T X::*M, bool X::*S>
void
thunk (X& x, scanner& s)
@@ -718,7 +797,6 @@ namespace build2
}
#include <map>
-#include <cstring>
namespace build2
{
diff --git a/libbuild2/common-options.hxx b/libbuild2/common-options.hxx
index f52fc3c..f90f563 100644
--- a/libbuild2/common-options.hxx
+++ b/libbuild2/common-options.hxx
@@ -94,7 +94,7 @@ namespace build2
{
public:
virtual
- ~unknown_option () throw ();
+ ~unknown_option () noexcept;
unknown_option (const std::string& option);
@@ -105,7 +105,7 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
private:
std::string option_;
@@ -115,7 +115,7 @@ namespace build2
{
public:
virtual
- ~unknown_argument () throw ();
+ ~unknown_argument () noexcept;
unknown_argument (const std::string& argument);
@@ -126,7 +126,7 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
private:
std::string argument_;
@@ -136,7 +136,7 @@ namespace build2
{
public:
virtual
- ~missing_value () throw ();
+ ~missing_value () noexcept;
missing_value (const std::string& option);
@@ -147,7 +147,7 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
private:
std::string option_;
@@ -157,7 +157,7 @@ namespace build2
{
public:
virtual
- ~invalid_value () throw ();
+ ~invalid_value () noexcept;
invalid_value (const std::string& option,
const std::string& value,
@@ -176,7 +176,7 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
private:
std::string option_;
@@ -191,14 +191,14 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
};
class LIBBUILD2_SYMEXPORT file_io_failure: public exception
{
public:
virtual
- ~file_io_failure () throw ();
+ ~file_io_failure () noexcept;
file_io_failure (const std::string& file);
@@ -209,7 +209,7 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
private:
std::string file_;
@@ -219,7 +219,7 @@ namespace build2
{
public:
virtual
- ~unmatched_quote () throw ();
+ ~unmatched_quote () noexcept;
unmatched_quote (const std::string& argument);
@@ -230,7 +230,7 @@ namespace build2
print (::std::ostream&) const;
virtual const char*
- what () const throw ();
+ what () const noexcept;
private:
std::string argument_;
diff --git a/libbuild2/config/functions.cxx b/libbuild2/config/functions.cxx
index 3e1b8a3..b1a61a2 100644
--- a/libbuild2/config/functions.cxx
+++ b/libbuild2/config/functions.cxx
@@ -23,8 +23,8 @@ namespace build2
// $config.origin()
//
- // Return the origin of the specified configuration variable value.
- // Possible result values and their semantics are as follows:
+ // Return the origin of the value of the specified configuration
+ // variable. Possible result values and their semantics are as follows:
//
// undefined
// The variable is undefined.
@@ -60,33 +60,15 @@ namespace build2
if (s == nullptr)
fail << "config.origin() called out of project" << endf;
- string n (convert<string> (move (name)));
+ switch (origin (*s, convert<string> (move (name))).first)
+ {
+ case variable_origin::undefined: return "undefined";
+ case variable_origin::default_: return "default";
+ case variable_origin::buildfile: return "buildfile";
+ case variable_origin::override_: return "override";
+ }
- // Make sure this is a config.* variable. This could matter since we
- // reply on the semantics of value::extra. We could also detect
- // special variables like config.booted, some config.config.*, etc.,
- // (see config_save() for details) but that seems harmless.
- //
- if (n.compare (0, 7, "config.") != 0)
- fail << "non-config.* variable passed to config.origin()" << endf;
-
- const variable* var (s->ctx.var_pool.find (n));
-
- if (var == nullptr)
- return "undefined";
-
- pair<lookup, size_t> org (s->lookup_original (*var));
- pair<lookup, size_t> ovr (var->overrides == nullptr
- ? org
- : s->lookup_override (*var, org));
-
- if (!ovr.first.defined ())
- return "undefined";
-
- if (org.first != ovr.first)
- return "override";
-
- return org.first->extra ? "default" : "buildfile";
+ return ""; // Should not reach.
};
// $config.save()
@@ -113,7 +95,7 @@ namespace build2
// See save_config() for details.
//
assert (s->ctx.phase == run_phase::load);
- module* mod (s->rw ().find_module<module> (module::name));
+ const module* mod (s->find_module<module> (module::name));
if (mod == nullptr)
fail << "config.save() called without config module";
diff --git a/libbuild2/config/init.cxx b/libbuild2/config/init.cxx
index 73c9d37..38590ae 100644
--- a/libbuild2/config/init.cxx
+++ b/libbuild2/config/init.cxx
@@ -26,6 +26,8 @@ namespace build2
{
namespace config
{
+ static const file_rule file_rule_ (true /* check_type */);
+
void
functions (function_map&); // functions.cxx
@@ -39,7 +41,7 @@ namespace build2
save_environment (const value& d, const value* b, names& storage)
{
if (b == nullptr)
- return make_pair (reverse (d, storage), "=");
+ return make_pair (reverse (d, storage, true /* reduce */), "=");
// The plan is to iterator over environment variables adding those that
// are not in base to storage. There is, however, a complication: we may
@@ -100,7 +102,10 @@ namespace build2
// reserved to not be valid module names (`build`). We also currently
// treat `import` as special.
//
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// NOTE: all config.** variables are by default made (via a pattern) to
// be overridable with global visibility. So we must override this if a
@@ -234,7 +239,7 @@ namespace build2
? &extra.module_as<module> ()
: nullptr);
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true /* public */));
// Note: config.* is pattern-typed to global visibility.
//
@@ -261,6 +266,21 @@ namespace build2
// implementation undefined them after loading config.build). See also
// config.config.unload.
//
+ // Besides names, variables can also be specified as patterns in the
+ // config.<prefix>.(*|**)[<suffix>] form where `*` matches single
+ // component names (i.e., `foo` but not `foo.bar`), and `**` matches
+ // single and multi-component names. Currently only single wildcard (`*`
+ // or `**`) is supported. Additionally, a pattern in the
+ // config.<prefix>(*|**) form (i.e., without `.` after <prefix>) matches
+ // config.<prefix>.(*|**) plus config.<prefix> itself (but not
+ // config.<prefix>foo).
+ //
+ // For example, to disfigure all the project configuration variables
+ // (while preserving all the module configuration variables; note
+ // quoting to prevent pattern expansion):
+ //
+ // b config.config.disfigure="'config.hello**'"
+ //
// Note that this variable is not saved in config.build and is expected
// to always be specified as a command line override.
//
@@ -411,8 +431,15 @@ namespace build2
auto load_config_file = [&load_config] (const path& f, const location& l)
{
path_name fn (f);
- ifdstream ifs;
- load_config (open_file_or_stdin (fn, ifs), fn, l);
+ try
+ {
+ ifdstream ifs;
+ load_config (open_file_or_stdin (fn, ifs), fn, l);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read buildfile " << fn << ": " << e;
+ }
};
// Load config.build unless requested not to.
@@ -477,16 +504,112 @@ namespace build2
// Undefine variables specified with config.config.disfigure.
//
- if (const strings* vs = cast_null<strings> (rs[c_d]))
+ if (const strings* ns = cast_null<strings> (rs[c_d]))
{
- for (const string& v: *vs)
+ auto p (rs.vars.lookup_namespace ("config"));
+
+ for (auto i (p.first); i != p.second; )
{
- // An unknown variable can't possibly be defined.
+ const variable& var (i->first);
+
+ // This can be one of the overrides (__override, __prefix, etc),
+ // which we skip.
//
- if (const variable* var = vp.find (v))
+ if (!var.override ())
{
- rs.vars.erase (*var); // Undefine.
+ bool m (false);
+
+ for (const string& n: *ns)
+ {
+ if (n.compare (0, 7, "config.") != 0)
+ fail << "config.* variable expected in "
+ << "config.config.disfigure instead of '" << n << "'";
+
+ size_t p (n.find ('*'));
+
+ if (p == string::npos)
+ {
+ if ((m = var.name == n))
+ break;
+ }
+ else
+ {
+ // Pattern in one of these forms:
+ //
+ // config.<prefix>.(*|**)[<suffix>]
+ // config.<prefix>(*|**)
+ //
+ // BTW, an alternative way to handle this would be to
+ // translate it to a path and use our path_match() machinery,
+ // similar to how we do it for build config include/exclude.
+ // Perhaps one day when/if we decide to support multiple
+ // wildcards.
+ //
+ if (p == 7)
+ fail << "config.<prefix>* pattern expected in "
+ << "config.config.disfigure instead of '" << n << "'";
+
+ bool r (n[p + 1] == '*'); // Recursive.
+
+ size_t pe; // Prefix end/size.
+ if (n[p - 1] != '.')
+ {
+ // Second form should have no suffix.
+ //
+ if (p + (r ? 2 : 1) != n.size ())
+ fail << "config.<prefix>(*|**) pattern expected in "
+ << "config.config.disfigure instead of '" << n << "'";
+
+ // Match just <prefix>.
+ //
+ if ((m = n.compare (0, p, var.name) == 0))
+ break;
+
+ pe = p;
+ }
+ else
+ pe = p - 1;
+
+ // Match <prefix> followed by `.`.
+ //
+ if (n.compare (0, pe, var.name, 0, pe) != 0 ||
+ var.name[pe] != '.')
+ continue;
+
+ // Match suffix.
+ //
+ size_t sb (p + (r ? 2 : 1)); // Suffix begin.
+ size_t sn (n.size () - sb); // Suffix size.
+
+ size_t te; // Stem end.
+ if (sn == 0) // No suffix.
+ te = var.name.size ();
+ else
+ {
+ if (var.name.size () < pe + 1 + sn) // Too short.
+ continue;
+
+ te = var.name.size () - sn;
+
+ if (n.compare (sb, sn, var.name, te, sn) != 0)
+ continue;
+ }
+
+ // Match stem.
+ //
+ if ((m = r || var.name.find ('.', pe + 1) >= te))
+ break;
+ }
+ }
+
+ if (m)
+ {
+ i = rs.vars.erase (i); // Undefine.
+ continue;
+ }
}
+
+ ++i;
}
}
@@ -591,19 +714,23 @@ namespace build2
// Register alias and fallback rule for the configure meta-operation.
//
- // We need this rule for out-of-any-project dependencies (e.g.,
- // libraries imported from /usr/lib). We are registring it on the
- // global scope similar to builtin rules.
- //
- rs.global_scope ().insert_rule<mtime_target> (
- configure_id, 0, "config.file", file_rule::instance);
-
rs.insert_rule<alias> (configure_id, 0, "config.alias", alias_rule::instance);
// This allows a custom configure rule while doing nothing by default.
//
rs.insert_rule<target> (configure_id, 0, "config.noop", noop_rule::instance);
- rs.insert_rule<file> (configure_id, 0, "config.noop", noop_rule::instance);
+
+ // We need this rule for out-of-any-project dependencies (for example,
+ // libraries imported from /usr/lib). We are registering it on the
+ // global scope similar to builtin rules.
+ //
+ // Note: use target instead of anything more specific (such as
+ // mtime_target) in order not to take precedence over the rules above.
+ //
+ // See a similar rule in the dist module.
+ //
+ rs.global_scope ().insert_rule<target> (
+ configure_id, 0, "config.file", file_rule_);
return true;
}
diff --git a/libbuild2/config/module.hxx b/libbuild2/config/module.hxx
index 82b79be..8d3ff67 100644
--- a/libbuild2/config/module.hxx
+++ b/libbuild2/config/module.hxx
@@ -160,7 +160,7 @@ namespace build2
save_module (scope&, const char*, int);
const saved_variable*
- find_variable (const variable& var)
+ find_variable (const variable& var) const
{
auto i (saved_modules.find_sup (var.name));
if (i != saved_modules.end ())
diff --git a/libbuild2/config/operation.cxx b/libbuild2/config/operation.cxx
index ed98f90..5983e4b 100644
--- a/libbuild2/config/operation.cxx
+++ b/libbuild2/config/operation.cxx
@@ -61,8 +61,10 @@ namespace build2
path f (src_root / rs.root_extra->out_root_file);
- if (verb)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ if (verb >= 2)
+ text << "cat >" << f;
+ else if (verb)
+ print_diag ("save", f);
try
{
@@ -132,7 +134,8 @@ namespace build2
bool r;
if (c.compare (p, 4 , "save") == 0) r = true;
else if (c.compare (p, 4 , "drop") == 0) r = false;
- else fail << "invalid config.config.persist action '" << c << "'";
+ else fail << "invalid config.config.persist action '" << c << "'"
+ << endf;
bool w (false);
if ((p += 4) != c.size ())
@@ -161,11 +164,18 @@ namespace build2
// and this function can be called from a buildfile (probably only
// during serial execution but still).
//
+ // We could also be configuring multiple projects (including from
+ // pkg_configure() in bpkg) but feels like we should be ok since we
+ // only modify this project's root scope data which should not affect
+ // any other project.
+ //
+ // See also save_environment() for a similar issue.
+ //
void
save_config (const scope& rs,
ostream& os, const path_name& on,
bool inherit,
- module& mod,
+ const module& mod,
const project_set& projects)
{
context& ctx (rs.ctx);
@@ -179,7 +189,7 @@ namespace build2
if (v)
{
storage.clear ();
- dr << "'" << reverse (v, storage) << "'";
+ dr << "'" << reverse (v, storage, true /* reduce */) << "'";
}
else
dr << "[null]";
@@ -207,6 +217,8 @@ namespace build2
// saved according to config.config.persist potentially warning if the
// variable would otherwise be dropped.
//
+ // Note: go straight for the public variable pool.
+ //
auto& vp (ctx.var_pool);
for (auto p (rs.vars.lookup_namespace ("config"));
@@ -247,6 +259,24 @@ namespace build2
continue;
}
+ // A common reason behind an unused config.import.* value is an
+ // unused dependency. That is, there is depends in manifest but no
+ // import in buildfile (or import could be conditional in which case
+ // depends should also be conditional). So let's suggest this
+ // possibility. Note that the project name may have been sanitized
+ // to a variable name. Oh, well, better than nothing.
+ //
+ auto info_import = [] (diag_record& dr, const string& var)
+ {
+ if (var.compare (0, 14, "config.import.") == 0)
+ {
+ size_t p (var.find ('.', 14));
+
+ dr << info << "potentially unused dependency on "
+ << string (var, 14, p == string::npos ? p : p - 14);
+ }
+ };
+
const value& v (p.first->second);
pair<bool, bool> r (save_config_variable (*var,
@@ -255,7 +285,7 @@ namespace build2
true /* unused */));
if (r.first) // save
{
- mod.save_variable (*var, 0);
+ const_cast<module&> (mod).save_variable (*var, 0);
if (r.second) // warn
{
@@ -274,6 +304,7 @@ namespace build2
diag_record dr;
dr << warn (on) << "saving no longer used variable " << *var;
+ info_import (dr, var->name);
if (verb >= 2)
info_value (dr, v);
}
@@ -284,6 +315,7 @@ namespace build2
{
diag_record dr;
dr << warn (on) << "dropping no longer used variable " << *var;
+ info_import (dr, var->name);
info_value (dr, v);
}
}
@@ -509,8 +541,8 @@ namespace build2
// Handle the save_default_commented flag.
//
- if ((org.first.defined () && org.first->extra) && // Default value.
- org.first == ovr.first && // Not overriden.
+ if (org.first.defined () && org.first->extra == 1 && // Default.
+ org.first == ovr.first && // No override.
(flags & save_default_commented) != 0)
{
os << first () << '#' << n << " =" << endl;
@@ -527,7 +559,7 @@ namespace build2
pair<names_view, const char*> p (
sv.save != nullptr
? sv.save (v, base, storage)
- : make_pair (reverse (v, storage), "="));
+ : make_pair (reverse (v, storage, true /* reduce */), "="));
// Might becomes empty after a custom save function had at it.
//
@@ -556,7 +588,7 @@ namespace build2
save_config (const scope& rs,
const path& f,
bool inherit,
- module& mod,
+ const module& mod,
const project_set& projects)
{
path_name fn (f);
@@ -564,8 +596,10 @@ namespace build2
if (f.string () == "-")
fn.name = "<stdout>";
- if (verb)
- text << (verb >= 2 ? "cat >" : "save ") << fn;
+ if (verb >= 2)
+ text << "cat >" << fn;
+ else if (verb)
+ print_diag ("save", fn);
try
{
@@ -582,6 +616,9 @@ namespace build2
// Update config.config.environment value for a hermetic configuration.
//
+ // @@ We are modifying the module. See also save_config() for a similar
+ // issue.
+ //
static void
save_environment (scope& rs, module& mod)
{
@@ -636,6 +673,8 @@ namespace build2
}
}
+ // Note: go straight for the public variable pool.
+ //
value& v (rs.assign (*rs.ctx.var_pool.find ("config.config.environment")));
// Note that setting new config.config.environment value invalidates the
@@ -652,9 +691,9 @@ namespace build2
static void
configure_project (action a,
- scope& rs,
+ const scope& rs,
const variable* c_s, // config.config.save
- module& mod,
+ const module& mod,
project_set& projects)
{
tracer trace ("configure_project");
@@ -674,7 +713,7 @@ namespace build2
//
if (out_root != src_root)
{
- mkdir_p (out_root / rs.root_extra->build_dir);
+ mkdir_p (out_root / rs.root_extra->build_dir, 1);
mkdir (out_root / rs.root_extra->bootstrap_dir, 2);
}
@@ -688,7 +727,7 @@ namespace build2
// for the other half of this logic).
//
if (cast_false<bool> (rs["config.config.hermetic"]))
- save_environment (rs, mod);
+ save_environment (const_cast<scope&> (rs), const_cast<module&> (mod));
// Save src-root.build unless out_root is the same as src.
//
@@ -751,14 +790,14 @@ namespace build2
{
const dir_path& pd (p.second);
dir_path out_nroot (out_root / pd);
- scope& nrs (ctx.scopes.find_out (out_nroot).rw ());
+ const scope& nrs (ctx.scopes.find_out (out_nroot));
// Skip this subproject if it is not loaded or doesn't use the
// config module.
//
if (nrs.out_path () == out_nroot)
{
- if (module* m = nrs.find_module<module> (module::name))
+ if (const module* m = nrs.find_module<module> (module::name))
{
configure_project (a, nrs, c_s, *m, projects);
}
@@ -811,6 +850,8 @@ namespace build2
// Don't translate default to update. In our case unspecified
// means configure everything.
//
+ // Note: see pkg_configure() in bpkg if changing anything here.
+ //
return o;
}
@@ -847,6 +888,8 @@ namespace build2
static void
configure_pre (context&, const values& params, const location& l)
{
+ // Note: see pkg_configure() in bpkg if changing anything here.
+ //
forward (params, "configure", l); // Validate.
}
@@ -870,7 +913,9 @@ namespace build2
fail (l) << "forwarding to source directory " << rs.src_path ();
}
else
- load (params, rs, buildfile, out_base, src_base, l); // Normal load.
+ // Normal load.
+ //
+ perform_load (params, rs, buildfile, out_base, src_base, l);
}
static void
@@ -890,7 +935,7 @@ namespace build2
ts.push_back (&rs);
}
else
- search (params, rs, bs, bf, tk, l, ts); // Normal search.
+ perform_search (params, rs, bs, bf, tk, l, ts); // Normal search.
}
static void
@@ -910,6 +955,8 @@ namespace build2
context& ctx (fwd ? ts[0].as<scope> ().ctx : ts[0].as<target> ().ctx);
+ // Note: go straight for the public variable pool.
+ //
const variable* c_s (ctx.var_pool.find ("config.config.save"));
if (c_s->overrides == nullptr)
@@ -964,20 +1011,28 @@ namespace build2
ctx.current_operation (*oif);
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, true /* inner */, location ());
+
phase_lock pl (ctx, run_phase::match);
match_sync (action (configure_id, id), t);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, true /* inner */);
}
}
configure_project (a,
- rs->rw (),
+ *rs,
c_s,
- *rs->rw ().find_module<module> (module::name),
+ *rs->find_module<module> (module::name),
projects);
}
}
}
+ // NOTE: see pkg_configure() in bpkg if changing anything here.
+ //
const meta_operation_info mo_configure {
configure_id,
"configure",
@@ -1276,6 +1331,8 @@ namespace build2
// Add the default config.config.persist value unless there is a custom
// one (specified as a command line override).
//
+ // Note: go straight for the public variable pool.
+ //
const variable& var (*ctx.var_pool.find ("config.config.persist"));
if (!rs[var].defined ())
@@ -1392,7 +1449,8 @@ namespace build2
string ("config"), /* config_module */
nullopt, /* config_file */
true, /* buildfile */
- "the create meta-operation");
+ "the create meta-operation",
+ 1 /* verbosity */);
save_config (ctx, d);
}
diff --git a/libbuild2/config/operation.hxx b/libbuild2/config/operation.hxx
index 9e2a91e..1662941 100644
--- a/libbuild2/config/operation.hxx
+++ b/libbuild2/config/operation.hxx
@@ -15,8 +15,8 @@ namespace build2
{
class module;
- extern const meta_operation_info mo_configure;
- extern const meta_operation_info mo_disfigure;
+ LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_configure;
+ LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_disfigure;
const string&
preprocess_create (context&,
@@ -37,7 +37,7 @@ namespace build2
save_config (const scope& rs,
ostream&, const path_name&,
bool inherit,
- module&,
+ const module&,
const project_set&);
// See config.config.hermetic.environment.
diff --git a/libbuild2/config/types.hxx b/libbuild2/config/types.hxx
new file mode 100644
index 0000000..3cdc5e3
--- /dev/null
+++ b/libbuild2/config/types.hxx
@@ -0,0 +1,25 @@
+// file : libbuild2/config/types.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_CONFIG_TYPES_HXX
+#define LIBBUILD2_CONFIG_TYPES_HXX
+
+#include <libbuild2/types.hxx>
+
+namespace build2
+{
+ namespace config
+ {
+ // The origin of the value of a configuration variable.
+ //
+ enum class variable_origin
+ {
+ undefined, // Undefined.
+ default_, // Default value from the config directive.
+ buildfile, // Value from a buildfile, normally config.build.
+ override_ // Value from a command line override.
+ };
+ }
+}
+
+#endif // LIBBUILD2_CONFIG_TYPES_HXX
diff --git a/libbuild2/config/utility.cxx b/libbuild2/config/utility.cxx
index a78b263..6574367 100644
--- a/libbuild2/config/utility.cxx
+++ b/libbuild2/config/utility.cxx
@@ -32,7 +32,7 @@ namespace build2
// Treat an inherited value that was set to default as new.
//
- if (l.defined () && l->extra)
+ if (l.defined () && l->extra == 1)
n = true;
if (var.overrides != nullptr)
@@ -81,7 +81,9 @@ namespace build2
const string& n,
initializer_list<const char*> ig)
{
- auto& vp (rs.var_pool ());
+ // Note: go straight for the public variable pool.
+ //
+ auto& vp (rs.ctx.var_pool);
// Search all outer scopes for any value in this namespace.
//
@@ -128,7 +130,7 @@ namespace build2
// Pattern-typed as bool.
//
const variable& var (
- rs.var_pool ().insert ("config." + n + ".configured"));
+ rs.var_pool (true).insert ("config." + n + ".configured"));
save_variable (rs, var);
@@ -142,7 +144,7 @@ namespace build2
// Pattern-typed as bool.
//
const variable& var (
- rs.var_pool ().insert ("config." + n + ".configured"));
+ rs.var_pool (true).insert ("config." + n + ".configured"));
save_variable (rs, var);
@@ -156,5 +158,56 @@ namespace build2
else
return false;
}
+
+ pair<variable_origin, lookup>
+ origin (const scope& rs, const string& n)
+ {
+ // Note: go straight for the public variable pool.
+ //
+ const variable* var (rs.ctx.var_pool.find (n));
+
+ if (var == nullptr)
+ {
+ if (n.compare (0, 7, "config.") != 0)
+ throw invalid_argument ("config.* variable expected");
+
+ return make_pair (variable_origin::undefined, lookup ());
+ }
+
+ return origin (rs, *var);
+ }
+
+ pair<variable_origin, lookup>
+ origin (const scope& rs, const variable& var)
+ {
+ // Make sure this is a config.* variable. This could matter since we
+ // rely on the semantics of value::extra. We could also detect
+ // special variables like config.booted, some config.config.*, etc.,
+ // (see config_save() for details) but that seems harmless.
+ //
+ if (var.name.compare (0, 7, "config.") != 0)
+ throw invalid_argument ("config.* variable expected");
+
+ return origin (rs, var, rs.lookup_original (var));
+ }
+
+ pair<variable_origin, lookup>
+ origin (const scope& rs, const variable& var, pair<lookup, size_t> org)
+ {
+ pair<lookup, size_t> ovr (var.overrides == nullptr
+ ? org
+ : rs.lookup_override (var, org));
+
+ if (!ovr.first.defined ())
+ return make_pair (variable_origin::undefined, lookup ());
+
+ if (org.first != ovr.first)
+ return make_pair (variable_origin::override_, ovr.first);
+
+ return make_pair (org.first->extra == 1
+ ? variable_origin::default_
+ : variable_origin::buildfile,
+ org.first);
+ }
}
}
diff --git a/libbuild2/config/utility.hxx b/libbuild2/config/utility.hxx
index cb82ea6..1e2ff53 100644
--- a/libbuild2/config/utility.hxx
+++ b/libbuild2/config/utility.hxx
@@ -11,6 +11,8 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/variable.hxx>
+#include <libbuild2/config/types.hxx>
+
#include <libbuild2/export.hxx>
namespace build2
@@ -62,6 +64,11 @@ namespace build2
// omitted value(s) is the default (see a note in lookup_config()
// documentation for details).
//
+ // The below lookup_*() functions mark the default value by setting
+ // value::extra to 1. Note that it's exactly 1 and not "not 0" since other
+ // values could have other meaning (see, for example, package skeleton
+ // in bpkg).
+ //
const uint64_t save_default_commented = 0x01; // Based on value::extra.
const uint64_t save_null_omitted = 0x02; // Treat NULL as undefined.
const uint64_t save_empty_omitted = 0x04; // Treat empty as undefined.
@@ -264,6 +271,8 @@ namespace build2
const string& var,
uint64_t save_flags = 0)
{
+ // Note: go straight for the public variable pool.
+ //
return lookup_config (rs, rs.ctx.var_pool[var], save_flags);
}
@@ -273,6 +282,8 @@ namespace build2
const string& var,
uint64_t save_flags = 0)
{
+ // Note: go straight for the public variable pool.
+ //
return lookup_config (new_value, rs, rs.ctx.var_pool[var], save_flags);
}
@@ -360,6 +371,8 @@ namespace build2
uint64_t save_flags = 0,
bool override = false)
{
+ // Note: go straight for the public variable pool.
+ //
return lookup_config (rs,
rs.ctx.var_pool[var],
std::forward<T> (default_value), // VC14
@@ -376,6 +389,8 @@ namespace build2
uint64_t save_flags = 0,
bool override = false)
{
+ // Note: go straight for the public variable pool.
+ //
return lookup_config (new_value,
rs,
rs.ctx.var_pool[var],
@@ -420,7 +435,7 @@ namespace build2
const V* cv (
cast_null<V> (
lookup_config (rs,
- rs.var_pool ().insert<V> ("config." + var),
+ rs.var_pool (true).insert<V> ("config." + var),
std::forward<T> (default_value)))); // VC14
value& v (bs.assign<V> (move (var)));
@@ -438,7 +453,7 @@ namespace build2
const V* cv (
cast_null<V> (
lookup_config (rs,
- rs.var_pool ().insert<V> ("config." + var),
+ rs.var_pool (true).insert<V> ("config." + var),
std::forward<T> (default_value)))); // VC14
value& v (bs.append<V> (move (var)));
@@ -501,6 +516,25 @@ namespace build2
//
LIBBUILD2_SYMEXPORT bool
unconfigured (scope& rs, const string& var, bool value);
+
+ // Return the origin of the value of the specified configuration variable
+ // plus the value itself. See $config.origin() for details.
+ //
+ // Throws invalid_argument if the passed variable is not config.*.
+ //
+ LIBBUILD2_SYMEXPORT pair<variable_origin, lookup>
+ origin (const scope& rs, const string& name);
+
+ LIBBUILD2_SYMEXPORT pair<variable_origin, lookup>
+ origin (const scope& rs, const variable&);
+
+ // As above but using the result of scope::lookup_original() or
+ // semantically equivalent (e.g., lookup_namespace()).
+ //
+ // Note that this version does not check that the variable is config.*.
+ //
+ LIBBUILD2_SYMEXPORT pair<variable_origin, lookup>
+ origin (const scope& rs, const variable&, pair<lookup, size_t> original);
}
}
diff --git a/libbuild2/config/utility.txx b/libbuild2/config/utility.txx
index b88f76c..71e41fd 100644
--- a/libbuild2/config/utility.txx
+++ b/libbuild2/config/utility.txx
@@ -58,7 +58,7 @@ namespace build2
if (!l.defined () || (def_ovr && !l.belongs (rs)))
{
value& v (rs.assign (var) = std::forward<T> (def_val)); // VC14
- v.extra = true; // Default value flag.
+ v.extra = 1; // Default value flag.
n = (sflags & save_default_commented) == 0; // Absence means default.
l = lookup (v, var, rs);
@@ -66,7 +66,7 @@ namespace build2
}
// Treat an inherited value that was set to default as new.
//
- else if (l->extra)
+ else if (l->extra == 1)
n = (sflags & save_default_commented) == 0; // Absence means default.
if (var.overrides != nullptr)
diff --git a/libbuild2/context.cxx b/libbuild2/context.cxx
index 7b465b4..6e4fd6f 100644
--- a/libbuild2/context.cxx
+++ b/libbuild2/context.cxx
@@ -45,6 +45,7 @@ namespace build2
scope_map scopes;
target_set targets;
variable_pool var_pool;
+ variable_patterns var_patterns;
variable_overrides var_overrides;
function_map functions;
@@ -52,32 +53,267 @@ namespace build2
variable_override_cache global_override_cache;
strings global_var_overrides;
- data (context& c): scopes (c), targets (c), var_pool (&c /* global */) {}
+ data (context& c)
+ : scopes (c),
+ targets (c),
+ var_pool (&c /* shared */, nullptr /* outer */, &var_patterns),
+ var_patterns (&c /* shared */, &var_pool) {}
};
+ void context::
+ reserve (reserves res)
+ {
+ assert (phase == run_phase::load);
+
+ if (res.targets != 0)
+ data_->targets.map_.reserve (res.targets);
+
+ if (res.variables != 0)
+ data_->var_pool.map_.reserve (res.variables);
+ }
+
+ pair<char, variable_override> context::
+ parse_variable_override (const string& s, size_t i, bool buildspec)
+ {
+ istringstream is (s);
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ // Similar to buildspec we do "effective escaping" of the special `'"\$(`
+ // characters (basically what's escapable inside a double-quoted literal
+ // plus the single quote; note, however, that we exclude line
+ // continuations and `)` since they would make directory paths on Windows
+ // unusable).
+ //
+ path_name in ("<cmdline>");
+ lexer l (is, in, 1 /* line */, "\'\"\\$(");
+
+ // At the buildfile level the scope-specific variable should be separated
+ // from the directory with a whitespace, for example:
+ //
+ // ./ foo=$bar
+ //
+ // However, requiring this for command line variables would be too
+ // inconvinient so we support both.
+ //
+ // We also have the optional visibility modifier as a first character of
+ // the variable name:
+ //
+ // ! - global
+ // % - project
+ // / - scope
+ //
+ // The last one clashes a bit with the directory prefix:
+ //
+ // ./ /foo=bar
+ // .//foo=bar
+ //
+ // But that's probably ok (the need for a scope-qualified override with
+ // scope visibility should be pretty rare). Note also that to set the
+ // value on the global scope we use !.
+ //
+ // And so the first token should be a word which can be either a variable
+ // name (potentially with the directory qualification) or just the
+ // directory, in which case it should be followed by another word
+ // (unqualified variable name). To avoid treating any of the visibility
+ // modifiers as special we use the cmdvar mode.
+ //
+ l.mode (lexer_mode::cmdvar);
+ token t (l.next ());
+
+ optional<dir_path> dir;
+ if (t.type == token_type::word)
+ {
+ string& v (t.value);
+ size_t p (path::traits_type::rfind_separator (v));
+
+ if (p != string::npos && p != 0) // If first then visibility.
+ {
+ if (p == v.size () - 1)
+ {
+ // Separate directory.
+ //
+ dir = dir_path (move (v));
+ t = l.next ();
+
+ // Target-specific overrides are not yet supported (and probably
+ // never will be; the beast is already complex enough).
+ //
+ if (t.type == token_type::colon)
+ {
+ diag_record dr (fail);
+
+ dr << "'" << s << "' is a target-specific override";
+
+ if (buildspec)
+ dr << info << "use double '--' to treat this argument as "
+ << "buildspec";
+ }
+ }
+ else
+ {
+ // Combined directory.
+ //
+ // If double separator (visibility marker), then keep the first in
+ // name.
+ //
+ if (p != 0 && path::traits_type::is_separator (v[p - 1]))
+ --p;
+
+ dir = dir_path (t.value, 0, p + 1); // Include the separator.
+ t.value.erase (0, p + 1); // Erase the separator.
+ }
+
+ if (dir->relative ())
+ {
+ // Handle the special relative to base scope case (.../).
+ //
+ auto i (dir->begin ());
+
+ if (*i == "...")
+ dir = dir_path (++i, dir->end ()); // Note: can become empty.
+ else
+ dir->complete (); // Relative to CWD.
+ }
+
+ if (dir->absolute ())
+ dir->normalize ();
+ }
+ }
+
+ token_type tt (l.next ().type);
+
+ // The token should be the variable name followed by =, +=, or =+.
+ //
+ if (t.type != token_type::word || t.value.empty () ||
+ (tt != token_type::assign &&
+ tt != token_type::prepend &&
+ tt != token_type::append))
+ {
+ diag_record dr (fail);
+
+ dr << "expected variable assignment instead of '" << s << "'";
+
+ if (buildspec)
+ dr << info << "use double '--' to treat this argument as buildspec";
+ }
+
+ // Take care of the visibility. Note that here we rely on the fact that
+ // none of these characters are lexer's name separators.
+ //
+ char c (t.value[0]);
+
+ if (path::traits_type::is_separator (c))
+ c = '/'; // Normalize.
+
+ string n (t.value, c == '!' || c == '%' || c == '/' ? 1 : 0);
+
+ // Make sure it is qualified.
+ //
+ // We can support overridable public unqualified variables (which must
+ // all be pre-entered by the end of this constructor) but we will need
+ // to detect their names here in an ad hoc manner (we cannot enter them
+ // before this logic because of the "untyped override" requirement).
+ //
+ // Note: issue the same diagnostics as in variable_pool::update().
+ //
+ if (n.find ('.') == string::npos)
+ fail << "variable " << n << " cannot be overridden";
+
+ if (c == '!' && dir)
+ fail << "scope-qualified global override of variable " << n;
+
+ // Pre-enter the main variable. Note that we rely on all the overridable
+ // variables with global visibility to be known (either entered or
+ // handled via a pattern) at this stage.
+ //
+ variable_pool& vp (data_->var_pool);
+ variable& var (
+ const_cast<variable&> (vp.insert (n, true /* overridable */)));
+
+ const variable* o;
+ {
+ variable_visibility v (c == '/' ? variable_visibility::scope :
+ c == '%' ? variable_visibility::project :
+ variable_visibility::global);
+
+ const char* k (tt == token_type::assign ? "__override" :
+ tt == token_type::append ? "__suffix" : "__prefix");
+
+ unique_ptr<variable> p (
+ new variable {
+ n + '.' + to_string (i + 1) + '.' + k,
+ &vp /* owner */,
+ nullptr /* aliases */,
+ nullptr /* type */,
+ nullptr /* overrides */,
+ v});
+
+ // Back link.
+ //
+ p->aliases = p.get ();
+ if (var.overrides != nullptr)
+ swap (p->aliases,
+ const_cast<variable*> (var.overrides.get ())->aliases);
+
+ // Forward link.
+ //
+ p->overrides = move (var.overrides);
+ var.overrides = move (p);
+
+ o = var.overrides.get ();
+ }
+
+ // Currently we expand project overrides in the global scope to keep
+ // things simple. Pass original variable for diagnostics. Use current
+ // working directory as pattern base.
+ //
+ scope& gs (global_scope.rw ());
+
+ parser p (*this);
+ pair<value, token> r (p.parse_variable_value (l, gs, &work, var));
+
+ if (r.second.type != token_type::eos)
+ fail << "unexpected " << r.second << " in variable assignment "
+ << "'" << s << "'";
+
+ // Make sure the value is not typed.
+ //
+ if (r.first.type != nullptr)
+ fail << "typed override of variable " << n;
+
+ return make_pair (
+ c,
+ variable_override {var, *o, move (dir), move (r.first)});
+ }
+
context::
context (scheduler& s,
global_mutexes& ms,
file_cache& fc,
- bool mo,
+ optional<match_only_level> mo,
bool nem,
bool dr,
+ bool ndb,
bool kg,
const strings& cmd_vars,
+ reserves res,
optional<context*> mc,
- const loaded_modules_lock* ml)
+ const module_libraries_lock* ml,
+ const function<var_override_function>& var_ovr_func)
: data_ (new data (*this)),
- sched (s),
- mutexes (ms),
- fcache (fc),
+ sched (&s),
+ mutexes (&ms),
+ fcache (&fc),
match_only (mo),
no_external_modules (nem),
dry_run_option (dr),
+ no_diag_buffer (ndb),
keep_going (kg),
phase_mutex (*this),
scopes (data_->scopes),
targets (data_->targets),
var_pool (data_->var_pool),
+ var_patterns (data_->var_patterns),
var_overrides (data_->var_overrides),
functions (data_->functions),
global_scope (create_global_scope (data_->scopes)),
@@ -90,12 +326,17 @@ namespace build2
? optional<unique_ptr<context>> (nullptr)
: nullopt)
{
+ // NOTE: see also the bare minimum version below if adding anything here.
+
tracer trace ("context");
l6 ([&]{trace << "initializing build state";});
+ reserve (res);
+
scope_map& sm (data_->scopes);
variable_pool& vp (data_->var_pool);
+ variable_patterns& vpats (data_->var_patterns);
insert_builtin_functions (functions);
@@ -104,7 +345,7 @@ namespace build2
//
meta_operation_table.insert ("noop");
meta_operation_table.insert ("perform");
- meta_operation_table.insert ("configure");
+ meta_operation_table.insert ("configure"); // bpkg assumes no process.
meta_operation_table.insert ("disfigure");
if (config_preprocess_create != nullptr)
@@ -134,11 +375,12 @@ namespace build2
// Any variable assigned on the global scope should natually have the
// global visibility.
//
- auto set = [&gs, &vp] (const char* var, auto val)
+ auto set = [&gs, &vp] (const char* var, auto val) -> const value&
{
using T = decltype (val);
value& v (gs.assign (vp.insert<T> (var, variable_visibility::global)));
v = move (val);
+ return v;
};
// Build system mode.
@@ -179,10 +421,10 @@ namespace build2
//
set ("build.verbosity", uint64_t (verb));
- // Build system progress diagnostics.
+ // Build system diagnostics progress and color.
//
- // Note that it can be true, false, or NULL if progress was neither
- // requested nor suppressed.
+ // Note that these can be true, false, or NULL if neither requested nor
+ // suppressed explicitly.
//
{
value& v (gs.assign (vp.insert<bool> ("build.progress", v_g)));
@@ -190,6 +432,18 @@ namespace build2
v = *diag_progress_option;
}
+ {
+ value& v (gs.assign (vp.insert<bool> ("build.diag_color", v_g)));
+ if (diag_color_option)
+ v = *diag_color_option;
+ }
+
+ // These are the "effective" values that incorporate a suitable default
+ // if neither requested nor suppressed explicitly.
+ //
+ set ("build.show_progress", show_progress (verb_never));
+ set ("build.show_diag_color", show_diag_color ());
+
// Build system version (similar to what we do in the version module
// except here we don't include package epoch/revision).
//
@@ -245,7 +499,8 @@ namespace build2
// Did the user ask us to use config.guess?
//
string orig (config_guess
- ? run<string> (3,
+ ? run<string> (*this,
+ 3,
*config_guess,
[](string& l, bool) {return move (l);})
: BUILD2_HOST_TRIPLET);
@@ -268,7 +523,7 @@ namespace build2
set ("build.host.version", t.version);
set ("build.host.class", t.class_);
- set ("build.host", move (t));
+ build_host = &set ("build.host", move (t)).as<target_triplet> ();
}
catch (const invalid_argument& e)
{
@@ -292,6 +547,7 @@ namespace build2
t.insert<path_target> ();
t.insert<file> ();
+ t.insert<group> ();
t.insert<alias> ();
t.insert<dir> ();
t.insert<fsdir> ();
@@ -326,215 +582,51 @@ namespace build2
// Note that some config.config.* variables have project visibility thus
// the match argument is false.
//
- vp.insert_pattern ("config.**", nullopt, true, v_g, true, false);
+ vpats.insert ("config.**", nullopt, true, v_g, true, false);
// Parse and enter the command line variables. We do it before entering
// any other variables so that all the variables that are overriden are
// marked as such first. Then, as we enter variables, we can verify that
// the override is alowed.
//
- for (size_t i (0); i != cmd_vars.size (); ++i)
{
- const string& s (cmd_vars[i]);
+ size_t i (0);
+ for (; i != cmd_vars.size (); ++i)
+ {
+ const string& s (cmd_vars[i]);
- istringstream is (s);
- is.exceptions (istringstream::failbit | istringstream::badbit);
+ pair<char, variable_override> p (
+ parse_variable_override (s, i, true /* buildspec */));
- // Similar to buildspec we do "effective escaping" and only for ['"\$(]
- // (basically what's necessary inside a double-quoted literal plus the
- // single quote).
- //
- path_name in ("<cmdline>");
- lexer l (is, in, 1 /* line */, "\'\"\\$(");
+ char c (p.first);
+ variable_override& vo (p.second);
- // At the buildfile level the scope-specific variable should be
- // separated from the directory with a whitespace, for example:
- //
- // ./ foo=$bar
- //
- // However, requiring this for command line variables would be too
- // inconvinient so we support both.
- //
- // We also have the optional visibility modifier as a first character of
- // the variable name:
- //
- // ! - global
- // % - project
- // / - scope
- //
- // The last one clashes a bit with the directory prefix:
- //
- // ./ /foo=bar
- // .//foo=bar
- //
- // But that's probably ok (the need for a scope-qualified override with
- // scope visibility should be pretty rare). Note also that to set the
- // value on the global scope we use !.
- //
- // And so the first token should be a word which can be either a
- // variable name (potentially with the directory qualification) or just
- // the directory, in which case it should be followed by another word
- // (unqualified variable name). To avoid treating any of the visibility
- // modifiers as special we use the cmdvar mode.
- //
- l.mode (lexer_mode::cmdvar);
- token t (l.next ());
-
- optional<dir_path> dir;
- if (t.type == token_type::word)
- {
- string& v (t.value);
- size_t p (path::traits_type::rfind_separator (v));
-
- if (p != string::npos && p != 0) // If first then visibility.
+ // Global and absolute scope overrides we can enter directly. Project
+ // and relative scope ones will be entered later for each project.
+ //
+ if (c == '!' || (vo.dir && vo.dir->absolute ()))
{
- if (p == v.size () - 1)
- {
- // Separate directory.
- //
- dir = dir_path (move (v));
- t = l.next ();
-
- // Target-specific overrides are not yet supported (and probably
- // never will be; the beast is already complex enough).
- //
- if (t.type == token_type::colon)
- fail << "'" << s << "' is a target-specific override" <<
- info << "use double '--' to treat this argument as buildspec";
- }
- else
- {
- // Combined directory.
- //
- // If double separator (visibility marker), then keep the first in
- // name.
- //
- if (p != 0 && path::traits_type::is_separator (v[p - 1]))
- --p;
-
- dir = dir_path (t.value, 0, p + 1); // Include the separator.
- t.value.erase (0, p + 1); // Erase the separator.
- }
+ scope& s (c == '!' ? gs : *sm.insert_out (*vo.dir)->second.front ());
- if (dir->relative ())
- {
- // Handle the special relative to base scope case (.../).
- //
- auto i (dir->begin ());
-
- if (*i == "...")
- dir = dir_path (++i, dir->end ()); // Note: can become empty.
- else
- dir->complete (); // Relative to CWD.
- }
+ auto p (s.vars.insert (vo.ovr));
+ assert (p.second); // Variable name is unique.
- if (dir->absolute ())
- dir->normalize ();
+ value& v (p.first);
+ v = move (vo.val);
}
- }
-
- token_type tt (l.next ().type);
-
- // The token should be the variable name followed by =, +=, or =+.
- //
- if (t.type != token_type::word || t.value.empty () ||
- (tt != token_type::assign &&
- tt != token_type::prepend &&
- tt != token_type::append))
- {
- fail << "expected variable assignment instead of '" << s << "'" <<
- info << "use double '--' to treat this argument as buildspec";
- }
-
- // Take care of the visibility. Note that here we rely on the fact that
- // none of these characters are lexer's name separators.
- //
- char c (t.value[0]);
-
- if (path::traits_type::is_separator (c))
- c = '/'; // Normalize.
-
- string n (t.value, c == '!' || c == '%' || c == '/' ? 1 : 0);
-
- if (c == '!' && dir)
- fail << "scope-qualified global override of variable " << n;
-
- // Pre-enter the main variable. Note that we rely on all the overridable
- // variables with global visibility to be known (either entered or
- // handled via a pettern) at this stage.
- //
- variable& var (
- const_cast<variable&> (vp.insert (n, true /* overridable */)));
+ else
+ data_->var_overrides.push_back (move (vo));
- const variable* o;
- {
- variable_visibility v (c == '/' ? variable_visibility::scope :
- c == '%' ? variable_visibility::project :
- variable_visibility::global);
-
- const char* k (tt == token_type::assign ? "__override" :
- tt == token_type::append ? "__suffix" : "__prefix");
-
- unique_ptr<variable> p (
- new variable {
- n + '.' + to_string (i + 1) + '.' + k,
- nullptr /* aliases */,
- nullptr /* type */,
- nullptr /* overrides */,
- v});
-
- // Back link.
- //
- p->aliases = p.get ();
- if (var.overrides != nullptr)
- swap (p->aliases,
- const_cast<variable*> (var.overrides.get ())->aliases);
-
- // Forward link.
+ // Save global overrides for nested contexts.
//
- p->overrides = move (var.overrides);
- var.overrides = move (p);
-
- o = var.overrides.get ();
- }
-
- // Currently we expand project overrides in the global scope to keep
- // things simple. Pass original variable for diagnostics. Use current
- // working directory as pattern base.
- //
- parser p (*this);
- pair<value, token> r (p.parse_variable_value (l, gs, &work, var));
-
- if (r.second.type != token_type::eos)
- fail << "unexpected " << r.second << " in variable assignment "
- << "'" << s << "'";
-
- // Make sure the value is not typed.
- //
- if (r.first.type != nullptr)
- fail << "typed override of variable " << n;
-
- // Global and absolute scope overrides we can enter directly. Project
- // and relative scope ones will be entered later for each project.
- //
- if (c == '!' || (dir && dir->absolute ()))
- {
- scope& s (c == '!' ? gs : *sm.insert_out (*dir)->second.front ());
-
- auto p (s.vars.insert (*o));
- assert (p.second); // Variable name is unique.
-
- value& v (p.first);
- v = move (r.first);
+ if (c == '!')
+ data_->global_var_overrides.push_back (s);
}
- else
- data_->var_overrides.push_back (
- variable_override {var, *o, move (dir), move (r.first)});
- // Save global overrides for nested contexts.
+ // Parse any ad hoc project-wide overrides.
//
- if (c == '!')
- data_->global_var_overrides.push_back (s);
+ if (var_ovr_func != nullptr)
+ var_ovr_func (*this, i);
}
// Enter remaining variable patterns and builtin variables.
@@ -543,24 +635,26 @@ namespace build2
const auto v_t (variable_visibility::target);
const auto v_q (variable_visibility::prereq);
- vp.insert_pattern<bool> ("config.**.configured", false, v_p);
+ vpats.insert<bool> ("config.**.configured", false, v_p);
- // file.cxx:import() (note: order is important; see insert_pattern()).
+ // file.cxx:import()
+ //
+ // Note: the order is important (see variable_patterns::insert()).
//
// Note that if any are overriden, they are "pre-typed" by the config.**
// pattern above and we just "add" the types.
//
- vp.insert_pattern<abs_dir_path> ("config.import.*", true, v_g, true);
- vp.insert_pattern<path> ("config.import.**", true, v_g, true);
+ vpats.insert<abs_dir_path> ("config.import.*", true, v_g, true);
+ vpats.insert<path> ("config.import.**", true, v_g, true);
// module.cxx:boot/init_module().
//
// Note that we also have the config.<module>.configured variable (see
// above).
//
- vp.insert_pattern<bool> ("**.booted", false /* overridable */, v_p);
- vp.insert_pattern<bool> ("**.loaded", false, v_p);
- vp.insert_pattern<bool> ("**.configured", false, v_p);
+ vpats.insert<bool> ("**.booted", false /* overridable */, v_p);
+ vpats.insert<bool> ("**.loaded", false, v_p);
+ vpats.insert<bool> ("**.configured", false, v_p);
var_src_root = &vp.insert<dir_path> ("src_root");
var_out_root = &vp.insert<dir_path> ("out_root");
@@ -588,13 +682,15 @@ namespace build2
var_extension = &vp.insert<string> ("extension", v_t);
var_update = &vp.insert<string> ("update", v_q);
var_clean = &vp.insert<bool> ("clean", v_t);
- var_backlink = &vp.insert<string> ("backlink", v_t);
+ var_backlink = &vp.insert ("backlink", v_t); // Untyped.
var_include = &vp.insert<string> ("include", v_q);
// Backlink executables and (generated) documentation by default.
//
- gs.target_vars[exe::static_type]["*"].assign (var_backlink) = "true";
- gs.target_vars[doc::static_type]["*"].assign (var_backlink) = "true";
+ gs.target_vars[exe::static_type]["*"].assign (var_backlink) =
+ names {name ("true")};
+ gs.target_vars[doc::static_type]["*"].assign (var_backlink) =
+ names {name ("true")};
// Register builtin rules.
//
@@ -609,6 +705,46 @@ namespace build2
r.insert<mtime_target> (perform_update_id, "build.file", file_rule::instance);
r.insert<mtime_target> (perform_clean_id, "build.file", file_rule::instance);
}
+
+ // End of initialization.
+ //
+ load_generation = 1;
+ }
+
+ context::
+ context ()
+ : data_ (new data (*this)),
+ sched (nullptr),
+ mutexes (nullptr),
+ fcache (nullptr),
+ match_only (nullopt),
+ no_external_modules (true),
+ dry_run_option (false),
+ no_diag_buffer (false),
+ keep_going (false),
+ phase_mutex (*this),
+ scopes (data_->scopes),
+ targets (data_->targets),
+ var_pool (data_->var_pool),
+ var_patterns (data_->var_patterns),
+ var_overrides (data_->var_overrides),
+ functions (data_->functions),
+ global_scope (create_global_scope (data_->scopes)),
+ global_target_types (data_->global_target_types),
+ global_override_cache (data_->global_override_cache),
+ global_var_overrides (data_->global_var_overrides),
+ modules_lock (nullptr),
+ module_context (nullptr)
+ {
+ variable_pool& vp (data_->var_pool);
+
+ var_src_root = &vp.insert<dir_path> ("src_root");
+ var_out_root = &vp.insert<dir_path> ("out_root");
+
+ var_project = &vp.insert<project_name> ("project");
+ var_amalgamation = &vp.insert<dir_path> ("amalgamation");
+
+ load_generation = 1;
}
context::
@@ -620,7 +756,8 @@ namespace build2
void context::
enter_project_overrides (scope& rs,
const dir_path& out_base,
- const variable_overrides& ovrs)
+ const variable_overrides& ovrs,
+ scope* as)
{
// The mildly tricky part here is to distinguish the situation where we
// are bootstrapping the same project multiple times. The first override
@@ -645,7 +782,7 @@ namespace build2
scope& s (
o.dir
? *sm.insert_out ((out_base / *o.dir).normalize ())->second.front ()
- : *rs.weak_scope ());
+ : *(as != nullptr ? as : (as = rs.weak_scope ())));
auto p (s.vars.insert (o.ovr));
@@ -688,6 +825,7 @@ namespace build2
}
current_mif = &mif;
+ current_mdata = current_data_ptr (nullptr, null_current_data_deleter);
current_on = 0; // Reset.
}
@@ -701,38 +839,22 @@ namespace build2
current_oname = oif.name;
current_inner_oif = &inner_oif;
current_outer_oif = outer_oif;
+ current_inner_odata = current_data_ptr (nullptr, null_current_data_deleter);
+ current_outer_odata = current_data_ptr (nullptr, null_current_data_deleter);
current_on++;
current_mode = inner_oif.mode;
current_diag_noise = diag_noise;
- auto find_ovar = [this] (const char* n)
- {
- const variable* v (var_pool.find (n));
-
- // The operation variable should have prerequisite or target visibility.
- //
- assert (v != nullptr &&
- (v->visibility == variable_visibility::prereq ||
- v->visibility == variable_visibility::target));
-
- return v;
- };
-
- current_inner_ovar =
- inner_oif.var_name != nullptr
- ? find_ovar (inner_oif.var_name)
- : nullptr;
-
- current_outer_ovar =
- outer_oif != nullptr && outer_oif->var_name != nullptr
- ? find_ovar (outer_oif->var_name)
- : nullptr;
-
// Reset counters (serial execution).
//
dependency_count.store (0, memory_order_relaxed);
target_count.store (0, memory_order_relaxed);
skip_count.store (0, memory_order_relaxed);
+ resolve_count.store (0, memory_order_relaxed);
+
+ // Clear accumulated targets with post hoc prerequisites.
+ //
+ current_posthoc_targets.clear ();
}
bool run_phase_mutex::
@@ -767,11 +889,11 @@ namespace build2
{
++contention; // Protected by m_.
- ctx_.sched.deactivate (false /* external */);
+ ctx_.sched->deactivate (false /* external */);
for (; ctx_.phase != n; v->wait (l)) ;
r = !fail_;
l.unlock (); // Important: activate() can block.
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
}
else
r = !fail_;
@@ -783,9 +905,9 @@ namespace build2
{
if (!lm_.try_lock ())
{
- ctx_.sched.deactivate (false /* external */);
+ ctx_.sched->deactivate (false /* external */);
lm_.lock ();
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
++contention_load; // Protected by lm_.
}
@@ -835,9 +957,9 @@ namespace build2
// relock().
//
if (o == run_phase::match && n == run_phase::execute)
- ctx_.sched.push_phase ();
+ ctx_.sched->push_phase ();
else if (o == run_phase::execute && n == run_phase::match)
- ctx_.sched.pop_phase ();
+ ctx_.sched->pop_phase ();
if (v != nullptr)
{
@@ -892,9 +1014,9 @@ namespace build2
// unlock().
//
if (o == run_phase::match && n == run_phase::execute)
- ctx_.sched.push_phase ();
+ ctx_.sched->push_phase ();
else if (o == run_phase::execute && n == run_phase::match)
- ctx_.sched.pop_phase ();
+ ctx_.sched->pop_phase ();
// Notify others that could be waiting for this phase.
//
@@ -908,11 +1030,11 @@ namespace build2
{
++contention; // Protected by m_.
- ctx_.sched.deactivate (false /* external */);
+ ctx_.sched->deactivate (false /* external */);
for (; ctx_.phase != n; v->wait (l)) ;
r = !fail_;
l.unlock (); // Important: activate() can block.
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
}
}
@@ -927,9 +1049,9 @@ namespace build2
//
s = false;
- ctx_.sched.deactivate (false /* external */);
+ ctx_.sched->deactivate (false /* external */);
lm_.lock ();
- ctx_.sched.activate (false /* external */);
+ ctx_.sched->activate (false /* external */);
++contention_load; // Protected by lm_.
}
@@ -1002,35 +1124,35 @@ namespace build2
// phase_unlock
//
phase_unlock::
- phase_unlock (context& c, bool u, bool d)
- : ctx (u ? &c : nullptr), lock (nullptr)
+ phase_unlock (context* c, bool d)
+ : ctx (c), lock_ (nullptr)
{
- if (u && !d)
+ if (ctx != nullptr && !d)
unlock ();
}
void phase_unlock::
unlock ()
{
- if (ctx != nullptr && lock == nullptr)
+ if (ctx != nullptr && lock_ == nullptr)
{
- lock = phase_lock_instance;
- assert (&lock->ctx == ctx);
+ lock_ = phase_lock_instance;
+ assert (&lock_->ctx == ctx);
phase_lock_instance = nullptr; // Note: not lock->prev.
- ctx->phase_mutex.unlock (lock->phase);
+ ctx->phase_mutex.unlock (lock_->phase);
- //text << this_thread::get_id () << " phase unlock " << lock->phase;
+ //text << this_thread::get_id () << " phase unlock " << lock_->phase;
}
}
- phase_unlock::
- ~phase_unlock () noexcept (false)
+ void phase_unlock::
+ lock ()
{
- if (lock != nullptr)
+ if (lock_ != nullptr)
{
- bool r (ctx->phase_mutex.lock (lock->phase));
- phase_lock_instance = lock;
+ bool r (ctx->phase_mutex.lock (lock_->phase));
+ phase_lock_instance = lock_;
// Fail unless we are already failing. Note that we keep the phase
// locked since there will be phase_lock down the stack to unlock it.
@@ -1038,10 +1160,16 @@ namespace build2
if (!r && !uncaught_exception ())
throw failed ();
- //text << this_thread::get_id () << " phase lock " << lock->phase;
+ //text << this_thread::get_id () << " phase lock " << lock_->phase;
}
}
+ phase_unlock::
+ ~phase_unlock () noexcept (false)
+ {
+ lock ();
+ }
+
// phase_switch
//
phase_switch::
diff --git a/libbuild2/context.hxx b/libbuild2/context.hxx
index 6eb85f7..33fc892 100644
--- a/libbuild2/context.hxx
+++ b/libbuild2/context.hxx
@@ -21,7 +21,7 @@
namespace build2
{
class file_cache;
- class loaded_modules_lock;
+ class module_libraries_lock;
class LIBBUILD2_SYMEXPORT run_phase_mutex
{
@@ -120,6 +120,16 @@ namespace build2
}
};
+ // Match-only level.
+ //
+ // See the --match-only and --load-only options for background.
+ //
+ enum class match_only_level
+ {
+ alias, // Match only alias{} targets.
+ all // Match all targets.
+ };
+
// A build context encapsulates the state of a build. It is possible to have
// multiple build contexts provided they are non-overlapping, that is, they
// don't try to build the same projects (note that this is currently not
@@ -142,9 +152,9 @@ namespace build2
// instead go the multiple communicating schedulers route, a la the job
// server).
//
- // The loaded_modules state (module.hxx) is shared among all the contexts
+ // The module_libraries state (module.hxx) is shared among all the contexts
// (there is no way to have multiple shared library loading "contexts") and
- // is protected by loaded_modules_lock. A nested context should normally
+ // is protected by module_libraries_lock. A nested context should normally
// inherit this lock value from its outer context.
//
// Note also that any given thread should not participate in multiple
@@ -160,17 +170,66 @@ namespace build2
//
class LIBBUILD2_SYMEXPORT context
{
+ public:
+ // In order to perform each operation the build system goes through the
+ // following phases:
+ //
+ // load - load the buildfiles
+ // match - search prerequisites and match rules
+ // execute - execute the matched rule
+ //
+ // The build system starts with a "serial load" phase and then continues
+ // with parallel match and execute. Match, however, can be interrupted
+ // both with load and execute.
+ //
+ // Match can be interrupted with "exclusive load" in order to load
+ // additional buildfiles. Similarly, it can be interrupted with (parallel)
+ // execute in order to build targetd required to complete the match (for
+ // example, generated source code or source code generators themselves).
+ //
+ // Such interruptions are performed by phase change that is protected by
+ // phase_mutex (which is also used to synchronize the state changes
+ // between phases).
+ //
+ // Serial load can perform arbitrary changes to the build state. Exclusive
+ // load, however, can only perform "island appends". That is, it can
+ // create new "nodes" (variables, scopes, etc) but not (semantically)
+ // change already existing nodes or invalidate any references to such (the
+ // idea here is that one should be able to load additional buildfiles as
+ // long as they don't interfere with the existing build state). The
+ // "islands" are identified by the load_generation number (1 for the
+ // initial/serial load). It is incremented in case of a phase switch and
+ // can be stored in various "nodes" to verify modifications are only done
+ // "within the islands". Another example of invalidation would be
+ // insertion of a new scope "under" an existing target thus changing its
+ // scope hierarchy (and potentially even its base scope). This would be
+ // bad because we may have made decisions based on the original hierarchy,
+ // for example, we may have queried a variable which in the new hierarchy
+ // would "see" a new value from the newly inserted scope.
+ //
+ // The special load_generation value 0 indicates initialization before
+ // anything has been loaded. Currently, it is changed to 1 at the end
+ // of the context constructor.
+ //
+ // Note must come (and thus initialized) before the data_ member.
+ //
+ run_phase phase = run_phase::load;
+ size_t load_generation = 0;
+
+ private:
struct data;
unique_ptr<data> data_;
public:
- scheduler& sched;
- global_mutexes& mutexes;
- file_cache& fcache;
+ // These are only NULL for the "bare minimum" context (see below).
+ //
+ scheduler* sched;
+ global_mutexes* mutexes;
+ file_cache* fcache;
- // Match only flag (see --match-only but also dist).
+ // Match only flag/level (see --{load,match}-only but also dist).
//
- bool match_only;
+ optional<match_only_level> match_only;
// Skip booting external modules flag (see --no-external-modules).
//
@@ -211,6 +270,10 @@ namespace build2
bool dry_run = false;
bool dry_run_option;
+ // Diagnostics buffering flag (--no-diag-buffer).
+ //
+ bool no_diag_buffer;
+
// Keep going flag.
//
// Note that setting it to false is not of much help unless we are running
@@ -219,44 +282,13 @@ namespace build2
//
bool keep_going;
- // In order to perform each operation the build system goes through the
- // following phases:
+ // Targets to trace (see the --trace-* options).
//
- // load - load the buildfiles
- // match - search prerequisites and match rules
- // execute - execute the matched rule
- //
- // The build system starts with a "serial load" phase and then continues
- // with parallel match and execute. Match, however, can be interrupted
- // both with load and execute.
+ // Note that these must be set after construction and must remain valid
+ // for the lifetime of the context instance.
//
- // Match can be interrupted with "exclusive load" in order to load
- // additional buildfiles. Similarly, it can be interrupted with (parallel)
- // execute in order to build targetd required to complete the match (for
- // example, generated source code or source code generators themselves).
- //
- // Such interruptions are performed by phase change that is protected by
- // phase_mutex (which is also used to synchronize the state changes
- // between phases).
- //
- // Serial load can perform arbitrary changes to the build state. Exclusive
- // load, however, can only perform "island appends". That is, it can
- // create new "nodes" (variables, scopes, etc) but not (semantically)
- // change already existing nodes or invalidate any references to such (the
- // idea here is that one should be able to load additional buildfiles as
- // long as they don't interfere with the existing build state). The
- // "islands" are identified by the load_generation number (0 for the
- // initial/serial load). It is incremented in case of a phase switch and
- // can be stored in various "nodes" to verify modifications are only done
- // "within the islands". Another example of invalidation would be
- // insertion of a new scope "under" an existing target thus changing its
- // scope hierarchy (and potentially even its base scope). This would be
- // bad because we may have made decisions based on the original hierarchy,
- // for example, we may have queried a variable which in the new hierarchy
- // would "see" a new value from the newly inserted scope.
- //
- run_phase phase = run_phase::load;
- size_t load_generation = 0;
+ const vector<name>* trace_match = nullptr;
+ const vector<name>* trace_execute = nullptr;
// A "tri-mutex" that keeps all the threads in one of the three phases.
// When a thread wants to switch a phase, it has to wait for all the other
@@ -301,11 +333,6 @@ namespace build2
const operation_info* current_inner_oif;
const operation_info* current_outer_oif;
- // Current operation-specific variables.
- //
- const variable* current_inner_ovar;
- const variable* current_outer_ovar;
-
action
current_action () const
{
@@ -324,6 +351,22 @@ namespace build2
(current_mname.empty () && current_oname == mo));
};
+ // Meta/operation-specific context-global auxiliary data storage.
+ //
+ // Note: cleared by current_[meta_]operation() below. Normally set by
+ // meta/operation-specific callbacks from [mate_]operation_info.
+ //
+ // Note also: watch out for MT-safety in the data itself.
+ //
+ static void
+ null_current_data_deleter (void* p) { assert (p == nullptr); }
+
+ using current_data_ptr = unique_ptr<void, void (*) (void*)>;
+
+ current_data_ptr current_mdata = {nullptr, null_current_data_deleter};
+ current_data_ptr current_inner_odata = {nullptr, null_current_data_deleter};
+ current_data_ptr current_outer_odata = {nullptr, null_current_data_deleter};
+
// Current operation number (1-based) in the meta-operation batch.
//
size_t current_on;
@@ -362,26 +405,46 @@ namespace build2
// decremented after such recipe has been executed. If such a recipe has
// skipped executing the operation, then it should increment the skip
// count. These two counters are used for progress monitoring and
- // diagnostics.
+ // diagnostics. The resolve count keeps track of the number of targets
+ // matched but not executed as a result of the resolve_members() calls
+ // (see also target::resolve_counted).
//
atomic_count dependency_count;
atomic_count target_count;
atomic_count skip_count;
+ atomic_count resolve_count;
// Build state (scopes, targets, variables, etc).
//
const scope_map& scopes;
target_set& targets;
- const variable_pool& var_pool;
+ const variable_pool& var_pool; // Public variables pool.
+ const variable_patterns& var_patterns; // Public variables patterns.
const variable_overrides& var_overrides; // Project and relative scope.
function_map& functions;
- // Enter project-wide (as opposed to global) variable overrides.
+ // Current targets with post hoc prerequisites.
//
- void
- enter_project_overrides (scope& rs,
- const dir_path& out_base,
- const variable_overrides&);
+ // Note that we don't expect many of these so a simple mutex should be
+ // sufficient. Note also that we may end up adding more entries as we
+ // match existing so use list for node and iterator stability. See
+ // match_poshoc() for details.
+ //
+ struct posthoc_target
+ {
+ struct prerequisite_target
+ {
+ const build2::target* target;
+ uint64_t match_options;
+ };
+
+ build2::action action;
+ reference_wrapper<const build2::target> target;
+ vector<prerequisite_target> prerequisite_targets;
+ };
+
+ list<posthoc_target> current_posthoc_targets;
+ mutex current_posthoc_targets_mutex;
// Global scope.
//
@@ -390,6 +453,10 @@ namespace build2
variable_override_cache& global_override_cache;
const strings& global_var_overrides;
+ // Cached values (from global scope).
+ //
+ const target_triplet* build_host; // build.host
+
// Cached variables.
//
@@ -419,8 +486,8 @@ namespace build2
const variable* var_import_build2;
const variable* var_import_target;
- // The import.metadata variable and the --build2-metadata option are used
- // to pass the metadata compatibility version.
+ // The import.metadata export stub variable and the --build2-metadata
+ // executable option are used to pass the metadata compatibility version.
//
// This serves both as an indication that the metadata is required (can be
// useful, for example, in cases where it is expensive to calculate) as
@@ -432,7 +499,8 @@ namespace build2
// The export.metadata value should start with the version followed by the
// metadata variable prefix (for example, cli in cli.version).
//
- // The following metadata variable names have pre-defined meaning:
+ // The following metadata variable names have pre-defined meaning for
+ // executable targets (exe{}; see also process_path_ex):
//
// <var-prefix>.name = [string] # Stable name for diagnostics.
// <var-prefix>.version = [string] # Version for diagnostics.
@@ -442,7 +510,8 @@ namespace build2
// If the <var-prefix>.name variable is missing, it is set to the target
// name as imported.
//
- // See also process_path_ex.
+ // Note that the same mechanism is used for library user metadata (see
+ // cc::pkgconfig_{load,save}() for details).
//
const variable* var_import_metadata;
const variable* var_export_metadata;
@@ -473,18 +542,51 @@ namespace build2
//
const variable* var_clean;
- // Forwarded configuration backlink mode. Valid values are:
+ // Forwarded configuration backlink mode. The value has two components
+ // in the form:
+ //
+ // <mode> [<print>]
+ //
+ // Valid <mode> values are:
//
// false - no link.
// true - make a link using appropriate mechanism.
// symbolic - make a symbolic link.
// hard - make a hard link.
// copy - make a copy.
- // overwrite - copy over but don't remove on clean (committed gen code).
+ // overwrite - copy over but don't remove on clean.
+ // group - inherit the group mode (only valid for group members).
//
- // Note that it can be set by a matching rule as a rule-specific variable.
+ // While the <print> component should be either true or false and can be
+ // used to suppress printing of specific ad hoc group members at verbosity
+ // level 1. Note that it cannot be false for the primary member.
//
- // [string] target visibility
+ // Note that this value can be set by a matching rule as a rule-specific
+ // variable.
+ //
+ // Note also that the overwrite mode was originally meant for handling
+ // pregenerated source code. But in the end this did not pan out for
+ // the following reasons:
+ //
+ // 1. This would mean that the pregenerated and regenerated files end up
+ // in the same place (e.g., depending on the develop mode) and it's
+ // hard to make this work without resorting to a conditional graph.
+ //
+ // This could potentially be addressed by allowing backlink to specify
+ // a different location (similar to dist).
+ //
+ // 2. This support for pregenerated source code would be tied to forwarded
+ // configurations.
+ //
+ // Nevertheless, there may be a kernel of an idea here in that we may be
+ // able to provide a built-in "post-copy" mechanism which would allow one
+ // to have a pregenerated setup even when using non-ad hoc recipes
+ // (currently we just manually diff/copy stuff at the end of a recipe).
+ // (Or maybe we should stick to ad hoc recipes with post-diff/copy and
+ // just expose a mechanism to delegate to a different rule, which we
+ // already have).
+ //
+ // [names] target visibility
//
const variable* var_backlink;
@@ -512,7 +614,7 @@ namespace build2
// operations. The initial idea was to extend this value to allow
// specifying the operation (e.g., clean@false). However, later we
// realized that we could reuse the "operation-specific variables"
- // (update, clean, install, test; see context::current_ovar) with a more
+ // (update, clean, install, test; see project_operation_info) with a more
// natural-looking and composable result. Plus, this allows for
// operation-specific "modifiers", for example, "unmatch" and "update
// during match" logic for update (see var_update for details) or
@@ -565,9 +667,9 @@ namespace build2
dir_path old_src_root;
dir_path new_src_root;
- // NULL if this context hasn't already locked the loaded_modules state.
+ // NULL if this context hasn't already locked the module_libraries state.
//
- const loaded_modules_lock* modules_lock;
+ const module_libraries_lock* modules_lock;
// Nested context for updating build system modules and ad hoc recipes.
//
@@ -584,17 +686,76 @@ namespace build2
// properly setup context (including, normally, a self-reference in
// modules_context).
//
- explicit
+ // The var_override_function callback can be used to parse ad hoc project-
+ // wide variable overrides (see parse_variable_override()). This has to
+ // happen at a specific point during context construction (see the
+ // implementation for details).
+ //
+ // Note: see also the trace_* data members that, if needed, must be set
+ // separately, after construction.
+ //
+ struct reserves
+ {
+ size_t targets;
+ size_t variables;
+
+ reserves (): targets (0), variables (0) {}
+ reserves (size_t t, size_t v): targets (t), variables (v) {}
+ };
+
+ using var_override_function = void (context&, size_t&);
+
context (scheduler&,
global_mutexes&,
file_cache&,
- bool match_only = false,
+ optional<match_only_level> match_only = nullopt,
bool no_external_modules = false,
bool dry_run = false,
+ bool no_diag_buffer = false,
bool keep_going = true,
const strings& cmd_vars = {},
+ reserves = {0, 160},
optional<context*> module_context = nullptr,
- const loaded_modules_lock* inherited_mudules_lock = nullptr);
+ const module_libraries_lock* inherited_modules_lock = nullptr,
+ const function<var_override_function>& = nullptr);
+
+ // Special context with bare minimum of initializations. It is only
+ // guaranteed to be sufficiently initialized to call extract_variable().
+ //
+ // Note that for this purpose you may omit calls to init_diag() and
+ // init().
+ //
+ context ();
+
+ // Reserve elements in containers to avoid re-allocation/re-hashing. Zero
+ // values are ignored (that is, the corresponding container reserve()
+ // function is not called). Can only be called in the load phase.
+ //
+ void
+ reserve (reserves);
+
+ // Parse a variable override returning its type in the first half of the
+ // pair. Index is the variable index (used to derive unique name) and if
+ // buildspec is true then assume `--` is used as a separator between
+ // variables and buildscpec and issue appropriate diagnostics.
+ //
+ // Note: should only be called from the var_override_function constructor
+ // callback.
+ //
+ pair<char, variable_override>
+ parse_variable_override (const string& var, size_t index, bool buildspec);
+
+ // Enter project-wide (as opposed to global) variable overrides.
+ //
+ // If the amalgamation scope is specified, then use it instead of
+ // rs.weak_scope() to set overrides with global visibility (make sure you
+ // understand the implications before doing this).
+ //
+ void
+ enter_project_overrides (scope& rs,
+ const dir_path& out_base,
+ const variable_overrides&,
+ scope* amalgamation = nullptr);
// Set current meta-operation and operation.
//
@@ -688,14 +849,20 @@ namespace build2
//
struct LIBBUILD2_SYMEXPORT phase_unlock
{
- phase_unlock (context&, bool unlock = true, bool delay = false);
+ explicit phase_unlock (context*, bool delay = false);
+ explicit phase_unlock (context& ctx, bool delay = false)
+ : phase_unlock (&ctx, delay) {}
+
~phase_unlock () noexcept (false);
void
unlock ();
+ void
+ lock ();
+
context* ctx;
- phase_lock* lock;
+ phase_lock* lock_;
};
// Assuming we have a lock on the current phase, temporarily switch to a
@@ -741,8 +908,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- wait_guard (wait_guard&&);
- wait_guard& operator= (wait_guard&&);
+ wait_guard (wait_guard&&) noexcept;
+ wait_guard& operator= (wait_guard&&) noexcept;
wait_guard (const wait_guard&) = delete;
wait_guard& operator= (const wait_guard&) = delete;
diff --git a/libbuild2/context.ixx b/libbuild2/context.ixx
index 4f86c28..6c8c428 100644
--- a/libbuild2/context.ixx
+++ b/libbuild2/context.ixx
@@ -31,7 +31,7 @@ namespace build2
}
inline wait_guard::
- wait_guard (wait_guard&& x)
+ wait_guard (wait_guard&& x) noexcept
: ctx (x.ctx),
start_count (x.start_count),
task_count (x.task_count),
@@ -41,7 +41,7 @@ namespace build2
}
inline wait_guard& wait_guard::
- operator= (wait_guard&& x)
+ operator= (wait_guard&& x) noexcept
{
if (&x != this)
{
@@ -56,8 +56,8 @@ namespace build2
inline void wait_guard::
wait ()
{
- phase_unlock u (*ctx, phase, true /* delay */);
- ctx->sched.wait (start_count, *task_count, u);
+ phase_unlock u (phase ? ctx : nullptr, true /* delay */);
+ ctx->sched->wait (start_count, *task_count, u);
task_count = nullptr;
}
}
diff --git a/libbuild2/cxx/init.cxx b/libbuild2/cxx/init.cxx
index 3a934db..8159d18 100644
--- a/libbuild2/cxx/init.cxx
+++ b/libbuild2/cxx/init.cxx
@@ -7,10 +7,12 @@
#include <libbuild2/diagnostics.hxx>
#include <libbuild2/config/utility.hxx>
+#include <libbuild2/install/utility.hxx>
#include <libbuild2/cc/guess.hxx>
#include <libbuild2/cc/module.hxx>
+#include <libbuild2/cc/target.hxx> // pc*
#include <libbuild2/cxx/target.hxx>
#ifndef BUILD2_DEFAULT_CXX
@@ -62,8 +64,8 @@ namespace build2
uint64_t mi (ci.version.minor);
uint64_t p (ci.version.patch);
- // Besides various `c++NN` we have two special values: `latest` and
- // `experimental`.
+ // Besides various `NN` we have two special values: `latest` and
+ // `experimental`. It can also be `gnu++NN`.
//
// The semantics of the `latest` value is the latest available standard
// that is not necessarily complete or final but is practically usable.
@@ -91,9 +93,27 @@ namespace build2
bool latest (v != nullptr && *v == "latest");
bool experimental (v != nullptr && *v == "experimental");
+ // This helper helps recognize both NN and [cC]++NN to avoid an endless
+ // stream of user questions. It can also be used to recognize Nx in
+ // addition to NN (e.g., "14" and "1y").
+ //
+ auto stdcmp = [v] (const char* nn, const char* nx = nullptr)
+ {
+ if (v != nullptr)
+ {
+ const char* s (v->c_str ());
+ if ((s[0] == 'c' || s[0] == 'C') && s[1] == '+' && s[2] == '+')
+ s += 3;
+
+ return strcmp (s, nn) == 0 || (nx != nullptr && strcmp (s, nx) == 0);
+ }
+
+ return false;
+ };
+
// Feature flags.
//
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true /* public */)); // All qualified.
// Similar to config.cxx.std, config.cxx.features.* overrides
// cxx.features.*.
@@ -157,6 +177,10 @@ namespace build2
i = mode.insert (i, move (o)) + 1;
};
+ // Derive approximate __cplusplus value from the standard if possible.
+ //
+ optional<uint32_t> cplusplus;
+
switch (cl)
{
case compiler_class::msvc:
@@ -186,6 +210,26 @@ namespace build2
{
if (v14_3)
o = "/std:c++latest";
+
+ // According to the documentation:
+ //
+ // "The value of __cplusplus with the /std:c++latest option
+ // depends on the version of Visual Studio. It's always at least
+ // one higher than the highest supported __cplusplus standard
+ // value supported by your version of Visual Studio."
+ //
+ if (v16_11)
+ cplusplus = 202002 + 1;
+ else if (v16_0)
+ cplusplus = 201703 + 1;
+ else if (v14_3)
+ cplusplus = 201402 + 1;
+ else if (mj >= 19)
+ cplusplus = 201402;
+ else if (mj >= 16)
+ cplusplus = 201103;
+ else
+ cplusplus = 199711;
}
else if (latest)
{
@@ -194,55 +238,78 @@ namespace build2
// for this mode. So starting from 16 we only enable it in
// `experimental`.
//
+ // Note: no /std:c++23 yet as of MSVC 17.6.
+ //
if (v16_11)
o = "/std:c++20";
else if (v16_0)
o = "/std:c++17";
else if (v14_3)
o = "/std:c++latest";
+
+ if (v16_11)
+ cplusplus = 202002;
+ else if (v16_0)
+ cplusplus = 201703;
+ else if (v14_3)
+ cplusplus = 201402 + 1;
+ else if (mj >= 19)
+ cplusplus = 201402;
+ else if (mj >= 16)
+ cplusplus = 201103;
+ else
+ cplusplus = 199711;
}
else if (v == nullptr)
- ;
- else if (*v != "98" && *v != "03")
+ {
+ // @@ TODO: map defaults to cplusplus for each version.
+ }
+ else if (!stdcmp ("98") && !stdcmp ("03"))
{
bool sup (false);
- if (*v == "11") // C++11 since VS2010/10.0.
+ if (stdcmp ("11", "0x")) // C++11 since VS2010/10.0.
{
sup = mj >= 16;
+ cplusplus = 201103;
}
- else if (*v == "14") // C++14 since VS2015/14.0.
+ else if (stdcmp ("14", "1y")) // C++14 since VS2015/14.0.
{
sup = mj >= 19;
+ cplusplus = 201402;
}
- else if (*v == "17") // C++17 since VS2015/14.0u2.
+ else if (stdcmp ("17", "1z")) // C++17 since VS2015/14.0u2.
{
// Note: the VC15 compiler version is 19.10.
//
sup = (mj > 19 ||
(mj == 19 && (mi > 0 || (mi == 0 && p >= 23918))));
+ cplusplus = 201703;
}
- else if (*v == "20") // C++20 since VS2019/16.11.
+ else if (stdcmp ("20", "2a")) // C++20 since VS2019/16.11.
{
sup = v16_11;
+ cplusplus = 202002;
}
if (!sup)
- fail << "C++" << *v << " is not supported by " << ci.signature <<
+ fail << "C++ " << *v << " is not supported by " << ci.signature <<
info << "required by " << project (rs) << '@' << rs;
if (v15_3)
{
- if (*v == "20") o = "/std:c++20";
- else if (*v == "17") o = "/std:c++17";
- else if (*v == "14") o = "/std:c++14";
+ if (stdcmp ("20", "2a")) o = "/std:c++20";
+ else if (stdcmp ("17", "1z")) o = "/std:c++17";
+ else if (stdcmp ("14", "1y")) o = "/std:c++14";
}
else if (v14_3)
{
- if (*v == "14") o = "/std:c++14";
- else if (*v == "17") o = "/std:c++latest";
+ if (stdcmp ("14", "1y")) o = "/std:c++14";
+ else if (stdcmp ("17", "1z")) o = "/std:c++latest";
}
}
+ else
+ cplusplus = 199711;
if (!o.empty ())
prepend (move (o));
@@ -268,11 +335,33 @@ namespace build2
{
case compiler_type::gcc:
{
- if (mj >= 11) o = "-std=c++23"; // 23
- else if (mj >= 8) o = "-std=c++2a"; // 20
- else if (mj >= 5) o = "-std=c++1z"; // 17
- else if (mj == 4 && mi >= 8) o = "-std=c++1y"; // 14
- else if (mj == 4 && mi >= 4) o = "-std=c++0x"; // 11
+ if (mj >= 11)
+ {
+ o = "-std=c++23";
+ cplusplus = 202302;
+ }
+ else if (mj >= 8)
+ {
+ o = "-std=c++2a";
+ cplusplus = 202002;
+ }
+ else if (mj >= 5)
+ {
+ o = "-std=c++1z";
+ cplusplus = 201703;
+ }
+ else if (mj == 4 && mi >= 8)
+ {
+ o = "-std=c++1y";
+ cplusplus = 201402;
+ }
+ else if (mj == 4 && mi >= 4)
+ {
+ o = "-std=c++0x";
+ cplusplus = 201103;
+ }
+ else
+ cplusplus = 199711;
break;
}
@@ -290,21 +379,56 @@ namespace build2
// MSVC.
//
- if (mj >= 13) o = "-std=c++2b";
- else if (mj == 10 &&
- latest && tt.system == "win32-msvc") o = "-std=c++17";
- else if (mj >= 5) o = "-std=c++2a";
- else if (mj > 3 || (mj == 3 && mi >= 5)) o = "-std=c++1z";
- else if (mj == 3 && mi >= 4) o = "-std=c++1y";
- else /* ??? */ o = "-std=c++0x";
+ if (mj >= 13)
+ {
+ o = "-std=c++2b";
+ cplusplus = 202302;
+ }
+ else if (mj == 10 && latest && tt.system == "win32-msvc")
+ {
+ o = "-std=c++17";
+ cplusplus = 201703;
+ }
+ else if (mj >= 5)
+ {
+ o = "-std=c++2a";
+ cplusplus = 202002;
+ }
+ else if (mj > 3 || (mj == 3 && mi >= 5))
+ {
+ o = "-std=c++1z";
+ cplusplus = 201703;
+ }
+ else if (mj == 3 && mi >= 4)
+ {
+ o = "-std=c++1y";
+ cplusplus = 201402;
+ }
+ else /* ??? */
+ {
+ o = "-std=c++0x";
+ cplusplus = 201103;
+ }
break;
}
case compiler_type::icc:
{
- if (mj >= 17) o = "-std=c++1z";
- else if (mj > 15 || (mj == 15 && p >= 3)) o = "-std=c++1y";
- else /* ??? */ o = "-std=c++0x";
+ if (mj >= 17)
+ {
+ o = "-std=c++1z";
+ cplusplus = 201703;
+ }
+ else if (mj > 15 || (mj == 15 && p >= 3))
+ {
+ o = "-std=c++1y";
+ cplusplus = 201402;
+ }
+ else /* ??? */
+ {
+ o = "-std=c++0x";
+ cplusplus = 201103;
+ }
break;
}
@@ -313,22 +437,33 @@ namespace build2
}
}
else if (v == nullptr)
- ;
+ {
+ // @@ TODO: map defaults to cplusplus for each version.
+ }
else
{
- // Translate 11 to 0x, 14 to 1y, 17 to 1z, 20 to 2a, and 23 to 2b
- // for compatibility with older versions of the compilers.
+ // Translate 11 to 0x, 14 to 1y, 17 to 1z, 20 to 2a, 23 to 2b, and
+ // 26 to 2c for compatibility with older versions of the
+ // compilers.
+ //
+ // @@ TMP: update C++26 __cplusplus value once known.
//
o = "-std=";
- if (*v == "23") o += "c++2b";
- else if (*v == "20") o += "c++2a";
- else if (*v == "17") o += "c++1z";
- else if (*v == "14") o += "c++1y";
- else if (*v == "11") o += "c++0x";
- else if (*v == "03") o += "c++03";
- else if (*v == "98") o += "c++98";
- else o += *v; // In case the user specifies `gnu++NN` or some such.
+ if (stdcmp ("26", "2c")) {o += "c++2c"; cplusplus = 202400;}
+ else if (stdcmp ("23", "2b")) {o += "c++2b"; cplusplus = 202302;}
+ else if (stdcmp ("20", "2a")) {o += "c++2a"; cplusplus = 202002;}
+ else if (stdcmp ("17", "1z")) {o += "c++1z"; cplusplus = 201703;}
+ else if (stdcmp ("14", "1y")) {o += "c++1y"; cplusplus = 201402;}
+ else if (stdcmp ("11", "0x")) {o += "c++0x"; cplusplus = 201103;}
+ else if (stdcmp ("03") ) {o += "c++03"; cplusplus = 199711;}
+ else if (stdcmp ("98") ) {o += "c++98"; cplusplus = 199711;}
+ else
+ {
+ o += *v; // In case the user specifies `gnu++NN` or some such.
+
+ // @@ TODO: can we still try to derive cplusplus value?
+ }
}
if (!o.empty ())
@@ -338,6 +473,8 @@ namespace build2
}
}
+ // Additional experimental options.
+ //
if (experimental)
{
switch (ct)
@@ -357,85 +494,124 @@ namespace build2
default:
break;
}
+ }
- // Unless disabled by the user, try to enable C++ modules.
- //
- if (!modules.value || *modules.value)
+ // Unless disabled by the user, try to enable C++ modules.
+ //
+ // NOTE: see also diagnostics about modules support required in compile
+ // rule.
+ //
+ if (!modules.value || *modules.value)
+ {
+ switch (ct)
{
- switch (ct)
+ case compiler_type::msvc:
{
- case compiler_type::msvc:
+ // Modules are enabled by default in /std:c++20 and
+ // /std:c++latest with both defining __cpp_modules to 201907
+ // (final C++20 module), at least as of 17.6 (LTS).
+ //
+ // @@ Should we enable modules by default? There are still some
+ // serious bugs, like inability to both `import std;` and
+ // `#include <string>` in the same translation unit (see Visual
+ // Studio issue #10541166).
+ //
+ if (modules.value)
{
- // While modules are supported in VC 15.0 (19.10), there is a
- // bug in the separate interface/implementation unit support
- // which makes them pretty much unusable. This has been fixed in
- // 15.3 (19.11). And 15.5 (19.12) supports the `export module
- // M;` syntax. And 16.4 (19.24) supports the global module
- // fragment. And in 16.8 all the modules-related options have
- // been changed. Seeing that the whole thing is unusable anyway,
- // we disable it for 16.8 or later for now.
- //
- if ((mj > 19 || (mj == 19 && mi >= (modules.value ? 10 : 12))) &&
- (mj < 19 || (mj == 19 && mi < 28) || modules.value))
+ if (cplusplus && *cplusplus < 202002)
{
- prepend (
- mj > 19 || mi >= 24 ?
- "/D__cpp_modules=201810" : // p1103 (merged modules)
- mj == 19 || mi >= 12 ?
- "/D__cpp_modules=201704" : // p0629r0 (export module M;)
- "/D__cpp_modules=201703"); // n4647 ( module M;)
-
- prepend ("/experimental:module");
- modules = true;
+ fail << "support for C++ modules requires C++20 or later" <<
+ info << "standard in use is " << *cplusplus <<
+ info << "required by " << project (rs) << '@' << rs;
}
- break;
- }
- case compiler_type::gcc:
- {
- // We use the module mapper support which is only available
- // since GCC 11. And since we are not yet capable of supporting
- // generated headers via the mapper, we require the user to
- // explicitly request modules.
- //
- if (mj >= 11 && modules.value)
+
+ if (mj < 19 || (mj == 19 && mi < 36))
{
- // Defines __cpp_modules:
- //
- // 11 -- 201810
- //
- prepend ("-fmodules-ts");
- modules = true;
+ fail << "support for C++ modules requires MSVC 17.6 or later" <<
+ info << "C++ compiler is " << ci.signature <<
+ info << "required by " << project (rs) << '@' << rs;
}
- break;
+ modules = true;
}
- case compiler_type::clang:
+
+ break;
+ }
+ case compiler_type::gcc:
+ {
+ // We use the module mapper support which is only available since
+ // GCC 11. And since we are not yet capable of supporting
+ // generated headers via the mapper, we require the user to
+ // explicitly request modules.
+ //
+ // @@ Actually, now that we pre-generate headers by default, this
+ // is probably no longer the reason. But GCC modules being
+ // unusable due to bugs is stil a reason.
+ //
+ if (modules.value)
{
- // At the time of this writing, support for C++20 modules in
- // Clang is incomplete. And starting with Clang 9 (Apple Clang
- // 11.0.3), they are enabled by default in the C++2a mode which
- // breaks the way we set things up for partial preprocessing;
- // see this post for details:
- //
- // http://lists.llvm.org/pipermail/cfe-dev/2019-October/063637.html
- //
- // As a result, for now, we only enable modules if forced with
- // explicit cxx.features.modules=true.
+ if (cplusplus && *cplusplus < 202002)
+ {
+ fail << "support for C++ modules requires C++20 or later" <<
+ info << "standard in use is " << *cplusplus <<
+ info << "required by " << project (rs) << '@' << rs;
+ }
+
+ if (mj < 11)
+ {
+ fail << "support for C++ modules requires GCC 11 or later" <<
+ info << "C++ compiler is " << ci.signature <<
+ info << "required by " << project (rs) << '@' << rs;
+ }
+
+ // Defines __cpp_modules:
//
- // Also see Clang modules support hack in cc::compile.
+ // 11 -- 201810
//
- if (modules.value)
+ prepend ("-fmodules-ts");
+ modules = true;
+ }
+
+ break;
+ }
+ case compiler_type::clang:
+ {
+ // Things (command line options, semantics) changed quite a bit
+ // around Clang 16 so we don't support anything earlier than
+ // that (it's not practically usable anyway).
+ //
+ // Clang enable modules by default in c++20 or later but they
+ // don't yet (as of Clang 18) define __cpp_modules. When they
+ // do, we can consider enabling modules by default on our side.
+ // For now, we only enable modules if forced with explicit
+ // cxx.features.modules=true.
+ //
+ if (modules.value)
+ {
+ if (cplusplus && *cplusplus < 202002)
+ {
+ fail << "support for C++ modules requires C++20 or later" <<
+ info << "standard in use is " << *cplusplus <<
+ info << "required by " << project (rs) << '@' << rs;
+ }
+
+ if (mj < 16)
{
- prepend ("-D__cpp_modules=201704"); // p0629r0
- mode.push_back ("-fmodules-ts"); // For the hack to work.
- modules = true;
+ fail << "support for C++ modules requires Clang 16 or later" <<
+ info << "C++ compiler is " << ci.signature <<
+ info << "required by " << project (rs) << '@' << rs;
}
- break;
+ // See https://github.com/llvm/llvm-project/issues/71364
+ //
+ prepend ("-D__cpp_modules=201907L");
+ modules = true;
}
- case compiler_type::icc:
- break; // No modules support yet.
+
+ break;
}
+ case compiler_type::icc:
+ break; // No modules support yet.
}
}
@@ -443,6 +619,95 @@ namespace build2
//set_feature (concepts);
}
+ // See cc::data::x_{hdr,inc} for background.
+ //
+ static const target_type* const hdr[] =
+ {
+ &hxx::static_type,
+ &ixx::static_type,
+ &txx::static_type,
+ &mxx::static_type,
+ nullptr
+ };
+
+ // Note that we don't include S{} here because none of the files we
+ // compile can plausibly want to include .S. (Maybe in inline assembler
+ // instructions?)
+ //
+ static const target_type* const inc[] =
+ {
+ &hxx::static_type,
+ &h::static_type,
+ &ixx::static_type,
+ &txx::static_type,
+ &mxx::static_type,
+ &cxx::static_type,
+ &c::static_type,
+ &mm::static_type,
+ &m::static_type,
+ &cxx_inc::static_type,
+ &cc::c_inc::static_type,
+ nullptr
+ };
+
+ bool
+ types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("cxx::types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cxx.types module must be loaded in project root";
+
+ // Register target types and configure their "installability".
+ //
+ using namespace install;
+
+ bool install_loaded (cast_false<bool> (rs["install.loaded"]));
+
+ // Note: not registering mm{} (it is registered seperately by the
+ // respective optional .types submodule).
+ //
+ // Note: mxx{} is in hdr. @@ But maybe it shouldn't be...
+ //
+ rs.insert_target_type<cxx> ();
+
+ auto insert_hdr = [&rs, install_loaded] (const target_type& tt)
+ {
+ rs.insert_target_type (tt);
+
+ // Install headers into install.include.
+ //
+ if (install_loaded)
+ install_path (rs, tt, dir_path ("include"));
+ };
+
+ for (const target_type* const* ht (hdr); *ht != nullptr; ++ht)
+ insert_hdr (**ht);
+
+ // Also register the C header for C-derived languages.
+ //
+ insert_hdr (h::static_type);
+
+ // @@ PERF: maybe factor this to cc.types?
+ //
+ rs.insert_target_type<cc::pc> ();
+ rs.insert_target_type<cc::pca> ();
+ rs.insert_target_type<cc::pcs> ();
+
+ if (install_loaded)
+ install_path<cc::pc> (rs, dir_path ("pkgconfig"));
+
+ return true;
+ }
+
static const char* const hinters[] = {"c", nullptr};
// See cc::module for details on guess_init vs config_init.
@@ -469,15 +734,20 @@ namespace build2
// Enter all the variables and initialize the module data.
//
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
cc::config_data d {
cc::lang::cxx,
"cxx",
"c++",
+ "obj-c++",
BUILD2_DEFAULT_CXX,
".ii",
+ ".mii",
hinters,
@@ -668,8 +938,8 @@ namespace build2
vp["cc.export.libs"],
vp["cc.export.impl_libs"],
- vp["cc.pkconfig.include"],
- vp["cc.pkconfig.lib"],
+ vp["cc.pkgconfig.include"],
+ vp["cc.pkgconfig.lib"],
vp.insert<string> ("cxx.stdlib"),
@@ -681,6 +951,7 @@ namespace build2
vp["cc.module_name"],
vp["cc.importable"],
vp["cc.reprocess"],
+ vp["cc.serialize"],
// Ability to signal that source is already (partially) preprocessed.
// Valid values are 'none' (not preprocessed), 'includes' (no #include
@@ -769,27 +1040,6 @@ namespace build2
return true;
}
- static const target_type* const hdr[] =
- {
- &hxx::static_type,
- &ixx::static_type,
- &txx::static_type,
- &mxx::static_type,
- nullptr
- };
-
- static const target_type* const inc[] =
- {
- &hxx::static_type,
- &h::static_type,
- &ixx::static_type,
- &txx::static_type,
- &mxx::static_type,
- &cxx::static_type,
- &c::static_type,
- nullptr
- };
-
bool
init (scope& rs,
scope& bs,
@@ -811,7 +1061,7 @@ namespace build2
auto& cm (
load_module<config_module> (rs, rs, "cxx.config", loc, extra.hints));
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true /* public */)); // All qualified.
bool modules (cast<bool> (rs["cxx.features.modules"]));
@@ -830,8 +1080,7 @@ namespace build2
"cxx.link",
"cxx.install",
- cm.x_info->id.type,
- cm.x_info->id.variant,
+ cm.x_info->id,
cm.x_info->class_,
cm.x_info->version.major,
cm.x_info->version.minor,
@@ -864,6 +1113,7 @@ namespace build2
cxx::static_type,
modules ? &mxx::static_type : nullptr,
+ cxx_inc::static_type,
hdr,
inc
};
@@ -874,15 +1124,120 @@ namespace build2
return true;
}
+ bool
+ objcxx_types_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("cxx::objcxx_types_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cxx.objcxx.types module must be loaded in project root";
+
+ // Register the mm{} target type.
+ //
+ rs.insert_target_type<mm> ();
+
+ return true;
+ }
+
+ bool
+ objcxx_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("cxx::objcxx_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cxx.objcxx module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("cxx"));
+
+ if (mod == nullptr)
+ fail (loc) << "cxx.objcxx module must be loaded after cxx module";
+
+ // Register the target type and "enable" it in the module.
+ //
+ // Note that we must register the target type regardless of whether the
+ // C++ compiler is capable of compiling Objective-C++. But we enable
+ // only if it is.
+ //
+ // Note: see similar code in the c module.
+ //
+ load_module (rs, rs, "cxx.objcxx.types", loc);
+
+ // Note that while Objective-C++ is supported by MinGW GCC, it's
+ // unlikely Clang supports it when targeting MSVC or Emscripten. But
+ // let's keep the check simple for now.
+ //
+ if (mod->ctype == compiler_type::gcc ||
+ mod->ctype == compiler_type::clang)
+ mod->x_obj = &mm::static_type;
+
+ return true;
+ }
+
+ bool
+ predefs_init (scope& rs,
+ scope& bs,
+ const location& loc,
+ bool,
+ bool,
+ module_init_extra&)
+ {
+ tracer trace ("cxx::predefs_init");
+ l5 ([&]{trace << "for " << bs;});
+
+ // We only support root loading (which means there can only be one).
+ //
+ if (rs != bs)
+ fail (loc) << "cxx.predefs module must be loaded in project root";
+
+ module* mod (rs.find_module<module> ("cxx"));
+
+ if (mod == nullptr)
+ fail (loc) << "cxx.predefs module must be loaded after cxx module";
+
+ // Register the cxx.predefs rule.
+ //
+ // Why invent a separate module instead of just always registering it in
+ // the cxx module? The reason is performance: this rule will be called
+ // for every C++ header.
+ //
+ cc::predefs_rule& r (*mod);
+
+ rs.insert_rule<hxx> (perform_update_id, r.rule_name, r);
+ rs.insert_rule<hxx> (perform_clean_id, r.rule_name, r);
+ rs.insert_rule<hxx> (configure_update_id, r.rule_name, r);
+
+ return true;
+ }
+
static const module_functions mod_functions[] =
{
// NOTE: don't forget to also update the documentation in init.hxx if
// changing anything here.
- {"cxx.guess", nullptr, guess_init},
- {"cxx.config", nullptr, config_init},
- {"cxx", nullptr, init},
- {nullptr, nullptr, nullptr}
+ {"cxx.types", nullptr, types_init},
+ {"cxx.guess", nullptr, guess_init},
+ {"cxx.config", nullptr, config_init},
+ {"cxx.objcxx.types", nullptr, objcxx_types_init},
+ {"cxx.objcxx", nullptr, objcxx_init},
+ {"cxx.predefs", nullptr, predefs_init},
+ {"cxx", nullptr, init},
+ {nullptr, nullptr, nullptr}
};
const module_functions*
diff --git a/libbuild2/cxx/init.hxx b/libbuild2/cxx/init.hxx
index 094fea4..a193e74 100644
--- a/libbuild2/cxx/init.hxx
+++ b/libbuild2/cxx/init.hxx
@@ -19,9 +19,19 @@ namespace build2
//
// Submodules:
//
- // `cxx.guess` -- registers and sets some variables.
- // `cxx.config` -- loads cxx.guess and sets more variables.
- // `cxx` -- loads cxx.config and registers target types and rules.
+ // `cxx.types` -- registers target types.
+ // `cxx.guess` -- registers and sets some variables.
+ // `cxx.config` -- loads cxx.guess and sets more variables.
+ // `cxx` -- loads cxx.{types,config} and registers rules
+ // and functions.
+ //
+ // `cxx.objcxx.types` -- registers mm{} target type.
+ // `cxx.objcxx` -- loads cxx.objcxx and enables Objective-C++
+ // compilation.
+ //
+ // `cxx.predefs` -- registers rule for generating a C++ header with
+ // predefined compiler macros. Must be loaded after
+ // cxx.
//
extern "C" LIBBUILD2_CXX_SYMEXPORT const module_functions*
build2_cxx_load ();
diff --git a/libbuild2/cxx/target.cxx b/libbuild2/cxx/target.cxx
index fc50f67..37096c3 100644
--- a/libbuild2/cxx/target.cxx
+++ b/libbuild2/cxx/target.cxx
@@ -3,10 +3,6 @@
#include <libbuild2/cxx/target.hxx>
-#include <libbuild2/context.hxx>
-
-using namespace std;
-
namespace build2
{
namespace cxx
@@ -80,5 +76,32 @@ namespace build2
&file_search,
target_type::flag::none
};
+
+ extern const char mm_ext_def[] = "mm";
+ const target_type mm::static_type
+ {
+ "mm",
+ &cc::static_type,
+ &target_factory<mm>,
+ nullptr, /* fixed_extension */
+ &target_extension_var<mm_ext_def>,
+ &target_pattern_var<mm_ext_def>,
+ nullptr,
+ &file_search,
+ target_type::flag::none
+ };
+
+ const target_type cxx_inc::static_type
+ {
+ "cxx_inc",
+ &cc::static_type,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ &target_search,
+ target_type::flag::none
+ };
}
}
diff --git a/libbuild2/cxx/target.hxx b/libbuild2/cxx/target.hxx
index b20bf00..06e8a67 100644
--- a/libbuild2/cxx/target.hxx
+++ b/libbuild2/cxx/target.hxx
@@ -7,7 +7,6 @@
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
-#include <libbuild2/target.hxx>
#include <libbuild2/cc/target.hxx>
#include <libbuild2/cxx/export.hxx>
@@ -18,6 +17,7 @@ namespace build2
{
using cc::h;
using cc::c;
+ using cc::m;
class LIBBUILD2_CXX_SYMEXPORT hxx: public cc::cc
{
@@ -88,6 +88,40 @@ namespace build2
public:
static const target_type static_type;
};
+
+ // Objective-C++ source file.
+ //
+ class LIBBUILD2_CXX_SYMEXPORT mm: public cc::cc
+ {
+ public:
+ mm (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
+ // This is an abstract base target for deriving additional targets (for
+ // example, Qt moc{}) that can be #include'd in C++ translation units. In
+ // particular, only such targets will be considered to reverse-lookup
+ // extensions to target types (see dyndep_rule::map_extension() for
+ // background).
+ //
+ class LIBBUILD2_CXX_SYMEXPORT cxx_inc: public cc::cc
+ {
+ public:
+ cxx_inc (context& c, dir_path d, dir_path o, string n)
+ : cc (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
}
}
diff --git a/libbuild2/diagnostics.cxx b/libbuild2/diagnostics.cxx
index a2a8444..4a46756 100644
--- a/libbuild2/diagnostics.cxx
+++ b/libbuild2/diagnostics.cxx
@@ -3,8 +3,10 @@
#include <libbuild2/diagnostics.hxx>
-#include <cstring> // strchr()
+#include <cstring> // strchr(), memcpy()
+#include <cstdlib> // getenv()
+#include <libbutl/fdstream.hxx> // fdterm_color()
#include <libbutl/process-io.hxx>
#include <libbuild2/scope.hxx>
@@ -13,39 +15,524 @@
#include <libbuild2/context.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
- // Diagnostics state (verbosity level, progress, etc). Keep disabled until
- // set from options.
+ // Diagnostics state (verbosity level, progress, etc). Keep default/disabled
+ // until set from options.
//
- uint16_t verb = 0;
- bool silent = true;
+ uint16_t verb = 1;
+ bool silent = false;
optional<bool> diag_progress_option;
+ optional<bool> diag_color_option;
bool diag_no_line = false;
bool diag_no_column = false;
bool stderr_term = false;
+ bool stderr_term_color = false;
void
- init_diag (uint16_t v, bool s, optional<bool> p, bool nl, bool nc, bool st)
+ init_diag (uint16_t v,
+ bool s,
+ optional<bool> p,
+ optional<bool> c,
+ bool nl,
+ bool nc,
+ bool st)
{
assert (!s || v == 0);
verb = v;
silent = s;
diag_progress_option = p;
+ diag_color_option = c;
diag_no_line = nl;
diag_no_column = nc;
stderr_term = st;
+
+ if (st)
+ {
+ // @@ TMP: eventually we want to enable on Windows by default.
+ //
+#ifdef _WIN32
+ if (c && *c)
+ {
+#endif
+ stderr_term_color = fdterm_color (stderr_fd (), !c || *c /* enable */);
+
+ // If the user specified --diag-color on POSIX we will trust the color
+ // is supported (e.g., wrong TERM value, etc).
+ //
+ if (!stderr_term_color && c && *c)
+ {
+#ifdef _WIN32
+ fail << "unable to enable diagnostics color support for stderr";
+#else
+ stderr_term_color = true;
+#endif
+ }
+
+#ifdef _WIN32
+ }
+ else
+ stderr_term_color = false;
+#endif
+ }
+ else
+ stderr_term_color = false;
}
// Stream verbosity.
//
const int stream_verb_index = ostream::xalloc ();
+ // print_diag()
+ //
+ void
+ print_diag_impl (const char* p, target_key* l, target_key&& r, const char* c)
+ {
+ // @@ Print directly to diag_stream (and below)? Won't we be holding
+ // the lock longer?
+
+ diag_record dr (text);
+
+ dr << p << ' ';
+
+ if (l != nullptr)
+ {
+ // Omit the @.../ qualification in either lhs or rhs if it's implied by
+ // the other.
+ //
+ // @@ Shouldn't we, strictly speaking, also check that they belong to
+ // the same project? Though it would be far-fetched to use another
+ // project's target from src. Or maybe not.
+ //
+ if (!l->out->empty ())
+ {
+ if (r.out->empty ())
+ l->out = &empty_dir_path;
+ }
+ else if (!r.out->empty ())
+ r.out = &empty_dir_path;
+
+ dr << *l << ' ' << (c == nullptr ? "->" : c) << ' ';
+ }
+
+ dr << r;
+ }
+
+
+ static inline bool
+ print_diag_cmp (const pair<optional<string>, const target_key*>& x,
+ const pair<optional<string>, const target_key*>& y)
+ {
+ return (x.second->dir->compare (*y.second->dir) == 0 &&
+ x.first->compare (*y.first) == 0);
+ }
+
+ // Return true if we have multiple partitions (see below for details).
+ //
+ static bool
+ print_diag_collect (const vector<target_key>& tks,
+ ostringstream& os,
+ stream_verbosity sv,
+ vector<pair<optional<string>, const target_key*>>& ns)
+ {
+ ns.reserve (tks.size ());
+
+ for (const target_key& k: tks)
+ {
+ bool r;
+ if (auto p = k.type->print)
+ r = p (os, k, true /* name_only */);
+ else
+ r = to_stream (os, k, sv, true /* name_only */);
+
+ ns.push_back (make_pair (r ? optional<string> (os.str ()) : nullopt, &k));
+
+ os.clear ();
+ os.str (string ()); // Note: just seekp(0) is not enough.
+ }
+
+ // Partition.
+ //
+ // While at it also determine whether we have multiple partitions.
+ //
+ bool ml (false);
+ for (auto b (ns.begin ()), e (ns.end ()); b != e; )
+ {
+ const pair<optional<string>, const target_key*>& x (*b++);
+
+ // Move all the elements that are equal to x to the front, preserving
+ // order.
+ //
+ b = stable_partition (
+ b, e,
+ [&x] (const pair<optional<string>, const target_key*>& y)
+ {
+ return (x.first && y.first && print_diag_cmp (x, y));
+ });
+
+ if (!ml && b != e)
+ ml = true;
+ }
+
+ return ml;
+ }
+
+ static void
+ print_diag_print (const vector<pair<optional<string>, const target_key*>>& ns,
+ ostringstream& os,
+ stream_verbosity sv,
+ const optional<string>& ml)
+ {
+ for (auto b (ns.begin ()), i (b), e (ns.end ()); i != e; )
+ {
+ if (i != b)
+ os << '\n' << *ml;
+
+ const pair<optional<string>, const target_key*>& p (*i);
+
+ if (!p.first) // Irregular.
+ {
+ os << *p.second;
+ ++i;
+ continue;
+ }
+
+ // Calculate the number of members in this partition.
+ //
+ size_t n (1);
+ for (auto j (i + 1); j != e && j->first && print_diag_cmp (*i, *j); ++j)
+ ++n;
+
+ // Similar code to to_stream(target_key).
+ //
+
+ // Print the directory.
+ //
+ {
+ const target_key& k (*p.second);
+
+ uint16_t dv (sv.path);
+
+ // Note: relative() returns empty for './'.
+ //
+ const dir_path& rd (dv < 1 ? relative (*k.dir) : *k.dir);
+
+ if (!rd.empty ())
+ {
+ if (dv < 1)
+ os << diag_relative (rd);
+ else
+ to_stream (os, rd, true /* representation */);
+ }
+ }
+
+ // Print target types.
+ //
+ {
+ if (n != 1)
+ os << '{';
+
+ for (auto j (i), e (i + n); j != e; ++j)
+ os << (j != i ? " " : "") << j->second->type->name;
+
+ if (n != 1)
+ os << '}';
+ }
+
+ // Print the target name (the same for all members of this partition).
+ //
+ os << '{' << *i->first << '}';
+
+ i += n;
+ }
+ }
+
+ template <typename L> // L can be target_key, path, or string.
+ static void
+ print_diag_impl (const char* p,
+ const L* l, bool lempty,
+ vector<target_key>&& rs,
+ const char* c)
+ {
+ assert (rs.size () > 1);
+
+ // The overall plan is as follows:
+ //
+ // 1. Collect the printed names for all the group members.
+ //
+ // Note if the printed representation is irregular (see
+ // to_stream(target_key) for details). We will print such members each
+ // on a separate line.
+ //
+ // 2. Move the names around so that we end up with contiguous partitions
+ // of targets with the same name.
+ //
+ // 3. Print the partitions, one per line.
+ //
+ // The steps 1-2 are performed by print_diag_impl_common() above.
+ //
+ vector<pair<optional<string>, const target_key*>> ns;
+
+ // Use the diag_record's ostringstream so that we get the appropriate
+ // stream verbosity, etc.
+ //
+ diag_record dr (text);
+ ostringstream& os (dr.os);
+ stream_verbosity sv (stream_verb (os));
+
+ optional<string> ml;
+ if (print_diag_collect (rs, os, sv, ns))
+ ml = string ();
+
+ // Print.
+ //
+ os << p << ' ';
+
+ if (l != nullptr)
+ os << *l << (lempty ? "" : " ") << (c == nullptr ? "->" : c) << ' ';
+
+ if (ml)
+ ml = string (os.str ().size (), ' '); // Indentation.
+
+ print_diag_print (ns, os, sv, ml);
+ }
+
+ template <typename R> // R can be target_key, path, or string.
+ static void
+ print_diag_impl (const char* p,
+ vector<target_key>&& ls, const R& r,
+ const char* c)
+ {
+ assert (ls.size () > 1);
+
+ // As above but for the group on the LHS.
+ //
+ vector<pair<optional<string>, const target_key*>> ns;
+
+ diag_record dr (text);
+ ostringstream& os (dr.os);
+ stream_verbosity sv (stream_verb (os));
+
+ optional<string> ml;
+ if (print_diag_collect (ls, os, sv, ns))
+ ml = string ();
+
+ // Print.
+ //
+ os << p << ' ';
+
+ if (ml)
+ ml = string (os.str ().size (), ' '); // Indentation.
+
+ print_diag_print (ns, os, sv, ml);
+
+ // @@ TODO: make sure `->` is aligned with longest line printed by
+ // print_diag_print(). Currently it can look like this:
+ //
+ // ln /tmp/hello-gcc/hello/hello/{hxx cxx}{hello-types}
+ // /tmp/hello-gcc/hello/hello/{hxx cxx}{hello-stubs}
+ // /tmp/hello-gcc/hello/hello/cxx{hello-ext} -> ./
+ //
+ os << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag_impl (const char* p,
+ target_key* l, vector<target_key>&& rs,
+ const char* c)
+ {
+ // Note: keep this implementation separate from the above for performance.
+ //
+ assert (!rs.empty ());
+
+ if (rs.size () == 1)
+ {
+ print_diag_impl (p, l, move (rs.front ()), c);
+ return;
+ }
+
+ // At the outset handle out-qualification as above. Here we assume that
+ // all the targets in the group have the same out.
+ //
+ if (l != nullptr)
+ {
+ if (!l->out->empty ())
+ {
+ if (rs.front ().out->empty ())
+ l->out = &empty_dir_path;
+ }
+ else if (!rs.front ().out->empty ())
+ {
+ for (target_key& r: rs)
+ r.out = &empty_dir_path;
+ }
+ }
+
+ print_diag_impl<target_key> (p, l, false /* empty */, move (rs), c);
+ }
+
+ // Note: these can't be inline since need the target class definition.
+ //
+ void
+ print_diag (const char* p, const target& l, const target& r, const char* c)
+ {
+ target_key lk (l.key ());
+ print_diag_impl (p, &lk, r.key (), c);
+ }
+
+ void
+ print_diag (const char* p, target_key&& l, const target& r, const char* c)
+ {
+ print_diag_impl (p, &l, r.key (), c);
+ }
+
+ void
+ print_diag (const char* p, const target& l, target_key&& r, const char* c)
+ {
+ target_key lk (l.key ());
+ print_diag_impl (p, &lk, move (r), c);
+ }
+
+ void
+ print_diag (const char* p, const path& l, const target& r, const char* c)
+ {
+ return print_diag (p, l, r.key (), c);
+ }
+
+ void
+ print_diag (const char* p, const path& l, target_key&& r, const char* c)
+ {
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const path& l, vector<target_key>&& rs,
+ const char* c)
+ {
+ assert (!rs.empty ());
+
+ if (rs.size () == 1)
+ print_diag (p, l, move (rs.front ()), c);
+ else
+ print_diag_impl<path> (p, &l, false /* empty */, move (rs), c);
+ }
+
+ void
+ print_diag (const char* p, const string& l, const target& r, const char* c)
+ {
+ return print_diag (p, l, r.key (), c);
+ }
+
+ void
+ print_diag (const char* p, const string& l, target_key&& r, const char* c)
+ {
+ text << p << ' '
+ << l << (l.empty () ? "" : " ")
+ << (c == nullptr ? "->" : c) << ' '
+ << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const string& l, vector<target_key>&& rs,
+ const char* c)
+ {
+ assert (!rs.empty ());
+
+ if (rs.size () == 1)
+ print_diag (p, l, move (rs.front ()), c);
+ else
+ print_diag_impl<string> (p, &l, l.empty (), move (rs), c);
+ }
+
+ void
+ print_diag (const char* p, const target& r)
+ {
+ print_diag_impl (p, nullptr, r.key (), nullptr);
+ }
+
+ void
+ print_diag (const char* p, const dir_path& r)
+ {
+ text << p << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p, const path_name_view& r)
+ {
+ text << p << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const target& l, const path_name_view& r,
+ const char* c)
+ {
+ // @@ TODO: out qualification stripping: only do if p.out is subdir of t
+ // (also below)?
+
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p, const target& l, const dir_path& r, const char* c)
+ {
+ print_diag (p, l.key (), r, c);
+ }
+
+ void
+ print_diag (const char* p, target_key&& l, const dir_path& r, const char* c)
+ {
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ vector<target_key>&& ls, const dir_path& r,
+ const char* c)
+ {
+ assert (!ls.empty ());
+
+ if (ls.size () == 1)
+ print_diag (p, move (ls.front ()), r, c);
+ else
+ print_diag_impl<dir_path> (p, move (ls), r, c);
+ }
+
+ void
+ print_diag (const char* p, const path& l, const dir_path& r, const char* c)
+ {
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const path& l, const path_name_view& r,
+ const char* c)
+ {
+ text << p << ' ' << l << ' ' << (c == nullptr ? "->" : c) << ' ' << r;
+ }
+
+ void
+ print_diag (const char* p,
+ const string& l, const path_name_view& r,
+ const char* c)
+ {
+ text << p << ' '
+ << l << (l.empty () ? "" : " ")
+ << (c == nullptr ? "->" : c) << ' '
+ << r;
+ }
+
+ // print_process()
+ //
void
print_process (const char* const* args, size_t n)
{
@@ -138,6 +625,305 @@ namespace build2
const fail_mark fail ("error");
const fail_end endf;
+ // diag_buffer
+ //
+
+ int diag_buffer::
+ pipe (context& ctx, bool force)
+ {
+ return (ctx.sched->serial () || ctx.no_diag_buffer) && !force ? 2 : -1;
+ }
+
+ void diag_buffer::
+ open (const char* args0, auto_fd&& fd, fdstream_mode m)
+ {
+ assert (state_ == state::closed && args0 != nullptr);
+
+ serial = ctx_.sched->serial ();
+ nobuf = !serial && ctx_.no_diag_buffer;
+
+ if (fd != nullfd)
+ {
+ try
+ {
+ is.open (move (fd), m | fdstream_mode::text);
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << args0 << " stderr: " << e;
+ }
+ }
+
+ this->args0 = args0;
+ state_ = state::opened;
+ }
+
+ void diag_buffer::
+ open_eof (const char* args0)
+ {
+ assert (state_ == state::closed && args0 != nullptr);
+
+ serial = ctx_.sched->serial ();
+ nobuf = !serial && ctx_.no_diag_buffer;
+ this->args0 = args0;
+ state_ = state::eof;
+ }
+
+ bool diag_buffer::
+ read (bool force)
+ {
+ assert (state_ == state::opened);
+
+ bool r;
+ if (is.is_open ())
+ {
+ try
+ {
+ // Copy buffers directly.
+ //
+ auto copy = [this] (fdstreambuf& sb)
+ {
+ const char* p (sb.gptr ());
+ size_t n (sb.egptr () - p);
+
+ // Allocate at least fdstreambuf::buffer_size to reduce
+ // reallocations and memory fragmentation.
+ //
+ size_t i (buf.size ());
+ if (i == 0 && n < fdstreambuf::buffer_size)
+ buf.reserve (fdstreambuf::buffer_size);
+
+ buf.resize (i + n);
+ memcpy (buf.data () + i, p, n);
+
+ sb.gbump (static_cast<int> (n));
+ };
+
+ if (is.blocking ())
+ {
+ if ((serial || nobuf) && !force)
+ {
+ // This is the case where we are called after custom processing.
+ //
+ assert (buf.empty ());
+
+ // Note that the eof check is important: if the stream is at eof,
+ // this and all subsequent writes to the diagnostics stream will
+ // fail (and you won't see a thing).
+ //
+ if (is.peek () != ifdstream::traits_type::eof ())
+ {
+ if (serial)
+ {
+ // Holding the diag lock while waiting for diagnostics from
+ // the child process would be a bad idea in the parallel
+ // build. But it should be harmless in serial.
+ //
+ // @@ TODO: do direct buffer copy.
+ //
+ diag_stream_lock dl;
+ *diag_stream << is.rdbuf ();
+ }
+ else
+ {
+ // Read/write one line at a time not to hold the lock for too
+ // long.
+ //
+ for (string l; !eof (std::getline (is, l)); )
+ {
+ diag_stream_lock dl;
+ *diag_stream << l << '\n';
+ }
+ }
+ }
+ }
+ else
+ {
+ fdstreambuf& sb (*static_cast<fdstreambuf*> (is.rdbuf ()));
+
+ while (is.peek () != istream::traits_type::eof ())
+ copy (sb);
+ }
+
+ r = false;
+ }
+ else
+ {
+ // We do not support finishing off after the custom processing in
+ // the non-blocking mode unless forced to buffer (but could probably
+ // do if necessary).
+ //
+ assert (!(serial || nobuf) || force);
+
+ fdstreambuf& sb (*static_cast<fdstreambuf*> (is.rdbuf ()));
+
+ // Try not to allocate the buffer if there is no diagnostics (the
+ // common case).
+ //
+ // Note that we must read until blocked (0) or EOF (-1).
+ //
+ streamsize n;
+ while ((n = sb.in_avail ()) > 0)
+ copy (sb);
+
+ r = (n != -1);
+ }
+
+ if (!r)
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ // For now we assume (here and pretty much everywhere else) that the
+ // output can't fail.
+ //
+ fail << "unable to read from " << args0 << " stderr: " << e;
+ }
+ }
+ else
+ r = false;
+
+ if (!r)
+ state_ = state::eof;
+
+ return r;
+ }
+
+ void diag_buffer::
+ write (const string& s, bool nl, bool force)
+ {
+ assert (state_ != state::closed);
+
+ // Similar logic to read() above.
+ //
+ if ((serial || nobuf) && !force)
+ {
+ assert (buf.empty ());
+
+ diag_stream_lock dl;
+ *diag_stream << s;
+ if (nl)
+ *diag_stream << '\n';
+ }
+ else
+ {
+ size_t n (s.size () + (nl ? 1 : 0));
+
+ size_t i (buf.size ());
+ if (i == 0 && n < fdstreambuf::buffer_size)
+ buf.reserve (fdstreambuf::buffer_size);
+
+ buf.resize (i + n);
+ memcpy (buf.data () + i, s.c_str (), s.size ());
+
+ if (nl)
+ buf.back () = '\n';
+ }
+ }
+
+ void diag_buffer::
+ close (const char* const* args,
+ const process_exit& pe,
+ uint16_t v,
+ bool omit_normal,
+ const location& loc)
+ {
+ tracer trace ("diag_buffer::close");
+
+ assert (state_ != state::closed);
+
+ // We need to make sure the command line we print on the unsuccessful exit
+ // is inseparable from any buffered diagnostics. So we prepare the record
+ // first and then write both while holding the diagnostics stream lock.
+ //
+ diag_record dr;
+ if (!pe)
+ {
+ // Note: see similar code in run_finish_impl().
+ //
+ if (omit_normal && pe.normal ())
+ {
+ l4 ([&]{trace << "process " << args[0] << " " << pe;});
+ }
+ else
+ {
+ dr << error (loc) << "process " << args[0] << " " << pe;
+
+ if (verb >= 1 && verb <= v)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+ }
+ }
+
+ close (move (dr));
+ }
+
+ void diag_buffer::
+ close (diag_record&& dr)
+ {
+ assert (state_ != state::closed);
+
+ // We may still be in the open state in case of custom processing.
+ //
+ if (state_ == state::opened)
+ {
+ if (is.is_open ())
+ {
+ try
+ {
+ if (is.good ())
+ {
+ if (is.blocking ())
+ {
+ assert (is.peek () == ifdstream::traits_type::eof ());
+ }
+ else
+ {
+ assert (is.rdbuf ()->in_avail () == -1);
+ }
+ }
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << args0 << " stderr: " << e;
+ }
+ }
+
+ state_ = state::eof;
+ }
+
+ // Note: flushing of the diag record may throw.
+ //
+ args0 = nullptr;
+ state_ = state::closed;
+
+ if (!buf.empty () || !dr.empty ())
+ {
+ diag_stream_lock l;
+
+ if (!buf.empty ())
+ {
+ diag_stream->write (buf.data (), static_cast<streamsize> (buf.size ()));
+ buf.clear ();
+ }
+
+ if (!dr.empty ())
+ dr.flush ([] (const butl::diag_record& r)
+ {
+ // Similar to default_writer().
+ //
+ *diag_stream << r.os.str () << '\n';
+ diag_stream->flush ();
+ });
+ else
+ diag_stream->flush ();
+ }
+ }
+
// diag_do(), etc.
//
string
diff --git a/libbuild2/diagnostics.hxx b/libbuild2/diagnostics.hxx
index 179b5c9..ef41f22 100644
--- a/libbuild2/diagnostics.hxx
+++ b/libbuild2/diagnostics.hxx
@@ -21,10 +21,232 @@ namespace build2
//
class failed: public std::exception {};
- // Print process commmand line. If the number of elements is specified
- // (or the second version is used), then it will print the piped multi-
- // process command line, if present. In this case, the expected format
- // is as follows:
+ // Print low-verbosity recipe diagnostics in the forms:
+ //
+ // <prog> <l-target> <comb> <r-target>
+ // <prog> <r-target>
+ //
+ // Where <prog> is an abbreviated/generalized program name, such as c++
+ // (rather than g++ or clang++) or yacc (rather than bison or byacc),
+ // <l-target> is typically the "main" prerequisite target, such as the C++
+ // source file to compile, <r-target> is typically the target being
+ // produced, and <comb> is the combiner, typically "->".
+ //
+ // The second form (without <l-target> and <comb>) should be used when there
+ // is no natural "main" prerequisite, for example, for linking as well as
+ // for programs that act upon the target, such as mkdir, rm, test, etc.
+ //
+ // Note also that these functions omit the @.../ qualification in either
+ // <l-target> or <r-target> if it's implied by the other.
+ //
+ // For example:
+ //
+ // mkdir fsdir{details/}
+ // c++ cxx{hello} -> obje{hello}
+ // ld exe{hello}
+ //
+ // test exe{hello} + testscript
+ //
+ // install exe{hello} -> /usr/bin/
+ // uninstall exe{hello} <- /usr/bin/
+ //
+ // rm exe{hello}
+ // rm obje{hello}
+ // rmdir fsdir{details/}
+ //
+ // Examples of target groups:
+ //
+ // cli cli{foo} -> {hxx cxx}{foo}
+ //
+ // thrift thrift{foo} -> {hxx cxx}{foo-types}
+ // {hxx cxx}{foo-stubs}
+ //
+ // Potentially we could also support target groups for <l-target>:
+ //
+ // tool {hxx cxx}{foo} -> {hxx cxx}{foo-types}
+ //
+ // tool {hxx cxx}{foo-types}
+ // {hxx cxx}{foo-stubs} -> {hxx cxx}{foo-insts}
+ // {hxx cxx}{foo-impls}
+ //
+ // Currently we only support this for the `group -> dir_path` form (used
+ // by the backlink machinery).
+ //
+ // See also the `diag` Buildscript pseudo-builtin which is reduced to one of
+ // the print_diag() calls (adhoc_buildscript_rule::print_custom_diag()). In
+ // particular, if you are adding a new overload, also consider if/how it
+ // should handled there.
+ //
+ // Note: see GH issue #40 for additional background and rationale.
+ //
+ // If <comb> is not specified, then "->" is used by default.
+
+ // prog target -> target
+ // prog target -> group
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const target& l, const target& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ target_key&& l, const target& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const target& l, target_key&& r,
+ const char* comb = nullptr);
+
+ void
+ print_diag (const char* prog,
+ target_key&& l, target_key&& r,
+ const char* comb = nullptr);
+
+ // Note: using small_vector would require target_key definition.
+ //
+ void
+ print_diag (const char* prog,
+ target_key&& l, vector<target_key>&& r,
+ const char* comb = nullptr);
+
+ // prog path -> target
+ // prog path -> group
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, const target& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, target_key&& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, vector<target_key>&& r,
+ const char* comb = nullptr);
+
+ // prog string -> target
+ // prog string -> group
+ //
+ // Use these versions if, for example, input information is passed as an
+ // argument.
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const string& l, const target& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const string& l, target_key&& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const string& l, vector<target_key>&& r,
+ const char* comb = nullptr);
+
+ // prog target
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog, const target&);
+
+ void
+ print_diag (const char* prog, target_key&&);
+
+ // prog group
+ //
+ void
+ print_diag (const char* prog, vector<target_key>&&);
+
+ // prog path
+ //
+ // Special versions for cases like mkdir/rmdir, save, etc.
+ //
+ // Note: use path_name("-") if the result is written to stdout.
+ //
+ void
+ print_diag (const char* prog, const path&);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog, const dir_path&);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog, const path_name_view&);
+
+ // Special versions for ln, cp, rm, install/unistall, dist, etc.
+ //
+ // Note: use path_name ("-") if the result is written to stdout.
+
+ // prog target -> path
+ //
+ void
+ print_diag (const char* prog,
+ const target& l, const path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const target& l, const dir_path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const target& l, const path_name_view& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ target_key&& l, const dir_path& r,
+ const char* comb = nullptr);
+
+ // prog group -> dir_path
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ vector<target_key>&& l, const dir_path& r,
+ const char* comb = nullptr);
+
+ // prog path -> path
+ //
+ void
+ print_diag (const char* prog,
+ const path& l, const path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, const dir_path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const path& l, const path_name_view& r,
+ const char* comb = nullptr);
+
+ // prog string -> path
+ //
+ // Use this version if, for example, input information is passed as an
+ // argument.
+ //
+ void
+ print_diag (const char* prog,
+ const string& l, const path& r,
+ const char* comb = nullptr);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag (const char* prog,
+ const string& l, const path_name_view& r,
+ const char* comb = nullptr);
+
+ // Print process commmand line. If the number of elements is specified (or
+ // the const cstrings& version is used), then it will print the piped multi-
+ // process command line, if present. In this case, the expected format is as
+ // follows:
//
// name1 arg arg ... nullptr
// name2 arg arg ... nullptr
@@ -179,15 +401,25 @@ namespace build2
using butl::diag_progress_lock;
// Return true if progress is to be shown. The max_verb argument is the
- // maximum verbosity level that this type of progress should be shown by
- // default.
+ // maximum verbosity level that this type of progress should be shown at by
+ // default. If it is verb_never, then both min and max verbosity checks are
+ // omitted, assuming the caller takes care of that themselves.
//
inline bool
show_progress (uint16_t max_verb)
{
return diag_progress_option
? *diag_progress_option
- : stderr_term && verb >= 1 && verb <= max_verb;
+ : stderr_term && (max_verb == verb_never ||
+ (verb >= 1 && verb <= max_verb));
+ }
+
+ // Diagnostics color.
+ //
+ inline bool
+ show_diag_color ()
+ {
+ return diag_color_option ? *diag_color_option : stderr_term_color;
}
// Diagnostic facility.
@@ -285,6 +517,8 @@ namespace build2
}
};
+ // Note: diag frames are not applied to text/trace diagnostics.
+ //
template <typename F>
struct diag_frame_impl: diag_frame
{
@@ -483,10 +717,10 @@ namespace build2
const void* data = nullptr)
: basic_mark_base (type,
data,
- [](const butl::diag_record& r)
+ [](const butl::diag_record& r, butl::diag_writer* w)
{
diag_frame::apply (r);
- r.flush ();
+ r.flush (w);
throw failed ();
},
&stream_verb_map,
@@ -512,6 +746,284 @@ namespace build2
LIBBUILD2_SYMEXPORT extern const fail_mark fail;
LIBBUILD2_SYMEXPORT extern const fail_end endf;
+ // Diagnostics buffer.
+ //
+ // The purpose of this class is to handle diagnostics from child processes,
+ // where handle can mean:
+ //
+ // - Buffer it (to avoid interleaving in parallel builds).
+ //
+ // - Stream it (if the input can be split into diagnostic records).
+ //
+ // - Do nothing (in serial builds or if requested not to buffer).
+ //
+ // In the future this class may also be responsible for converting the
+ // diagnostics into the structured form (which means it may need to buffer
+ // even in serial builds).
+ //
+ // The typical usage is as follows:
+ //
+ // process pr (..., diag_buffer::pipe (ctx));
+ // diag_buffer dbuf (ctx, args[0], pr); // Skip.
+ // ifdstream is (move (pr.in_ofd)); // No skip.
+ // ofdstream os (move (pr.out_fd));
+ //
+ // The reason for this somewhat roundabout setup is to make sure the
+ // diag_buffer instance is destroyed before the process instance. This is
+ // important in case an exception is thrown where we want to make sure all
+ // our pipe ends are closed before we wait for the process exit (which
+ // happens in the process destructor).
+ //
+ // And speaking of the destruction order, another thing to keep in mind is
+ // that only one stream can use the skip mode (fdstream_mode::skip; because
+ // skipping is performed in the blocking mode) and the stream that skips
+ // should come first so that all other streams are destroyed/closed before
+ // it (failed that, we may end up in a deadlock). For example:
+ //
+ // process pr (..., diag_buffer::pipe (ctx));
+ // ifdstream is (move (pr.in_ofd), fdstream_mode::skip); // Skip.
+ // diag_buffer dbuf (ctx, args[0], pr, fdstream_mode::none); // No skip.
+ // ofdstream os (move (pr.out_fd));
+ //
+ class LIBBUILD2_SYMEXPORT diag_buffer
+ {
+ public:
+ // If buffering is necessary or force is true, return an "instruction"
+ // (-1) to the process class constructor to open a pipe and redirect
+ // stderr to it. Otherwise, return an "instruction" to inherit stderr (2).
+ //
+ // The force flag is normally used if custom diagnostics processing is
+ // required (filter, split, etc; see read() below).
+ //
+ // Note that the diagnostics buffer must be opened (see below) regardless
+ // of the pipe() result.
+ //
+ static int
+ pipe (context&, bool force = false);
+
+ // Open the diagnostics buffer given the parent end of the pipe (normally
+ // process:in_efd). If it is nullfd, then assume no buffering is
+ // necessary. If mode is non_blocking, then make reading from the parent
+ // end of the pipe non-blocking.
+ //
+ // The args0 argument is the child process program name for diagnostics.
+ // It is expected to remain valid until the call to close() and should
+ // normally be the same as args[0] passed to close().
+ //
+ // Note that the same buffer can go through multiple open-read-close
+ // sequences, for example, to execute multiple commands.
+ //
+ // All the below functions handle io errors, issue suitable diagnostics,
+ // and throw failed. If an exception is thrown from any of them, then the
+ // instance should not be used any further.
+ //
+ // Note that when reading from multiple streams in the non-blocking mode,
+ // only the last stream to be destroyed can normally have the skip mode
+ // since in case of an exception, skipping will be blocking.
+ //
+ diag_buffer (context&,
+ const char* args0,
+ auto_fd&&,
+ fdstream_mode = fdstream_mode::skip);
+
+ // As above, but the parrent end of the pipe (process:in_efd) is passed
+ // via a process instance.
+ //
+ diag_buffer (context&,
+ const char* args0,
+ process&,
+ fdstream_mode = fdstream_mode::skip);
+
+ // As above but with support for the underlying buffer reuse.
+ //
+ // Note that in most cases reusing the buffer is probably not worth the
+ // trouble because we normally don't expect any diagnostics in the common
+ // case. However, if needed, it can be arranged, for example:
+ //
+ // vector<char> buf;
+ //
+ // {
+ // process pr (...);
+ // diag_buffer dbuf (ctx, move (buf), args[0], pr);
+ // dbuf.read ();
+ // dbuf.close ();
+ // buf = move (dbuf.buf);
+ // }
+ //
+ // {
+ // ...
+ // }
+ //
+ // Note also that while there is no guarantee the underlying buffer is
+ // moved when, say, the vector is empty, all the main implementations
+ // always steal the buffer.
+ //
+ diag_buffer (context&,
+ vector<char>&& buf,
+ const char* args0,
+ auto_fd&&,
+ fdstream_mode = fdstream_mode::skip);
+
+ diag_buffer (context&,
+ vector<char>&& buf,
+ const char* args0,
+ process&,
+ fdstream_mode = fdstream_mode::skip);
+
+ // Separate construction and opening.
+ //
+ // Note: be careful with the destruction order (see above for details).
+ //
+ explicit
+ diag_buffer (context&);
+
+ diag_buffer (context&, vector<char>&& buf);
+
+ void
+ open (const char* args0,
+ auto_fd&&,
+ fdstream_mode = fdstream_mode::skip);
+
+ // Open the buffer in the state as if after read() returned false, that
+ // is, the stream corresponding to the parent's end of the pipe reached
+ // EOF and has been closed. This is primarily useful when the diagnostics
+ // is being read in a custom way (for example, it has been merged to
+ // stdout) and all we want is to be able to call write() and close().
+ //
+ void
+ open_eof (const char* args0);
+
+ // Check whether the buffer has been opened with the open() call and
+ // hasn't yet been closed.
+ //
+ // Note that this function returning true does not mean that the pipe was
+ // opened (to check that, call is_open() on the stream member; see below).
+ //
+ bool
+ is_open () const
+ {
+ return state_ != state::closed;
+ }
+
+ // Read the diagnostics from the parent's end of the pipe if one was
+ // opened and buffer/stream it as necessary or forced. Return true if
+ // there could be more diagnostics to read (only possible in the non-
+ // blocking mode) and false otherwise, in which case also close the
+ // stream.
+ //
+ // Note that the force argument here (as well as in write() below) and
+ // in open() above are independent. Specifically, force in open() forces
+ // the opening of the pipe while force in read() and write() forces
+ // the buffering of the diagnostics.
+ //
+ // Instead of calling this function you can perform custom reading and, if
+ // necessary, buffering of the diagnostics by accessing the input stream
+ // (is) and underlying buffer (buf) directly. This can be used to filter,
+ // split the diagnostics into records according to a certain format, etc.
+ // Note that such custom processing implementation should maintain the
+ // overall semantics of diagnostics buffering in that it may only omit
+ // buffering in the serial case or if the diagnostics can be streamed in
+ // atomic records. See also write() below.
+ //
+ // The input stream is opened in the text mode and has the badbit but not
+ // failbit exception mask. The custom processing should also be compatible
+ // with the stream mode (blocking or non). If buffering is performed, then
+ // depending on the expected diagnostics the custom processing may want to
+ // reserve an appropriate initial buffer size to avoid unnecessary
+ // reallocation. As a convenience, in the blocking mode only, if the
+ // stream still contains some diagnostics, then it can be handled by
+ // calling read(). This is useful when needing to process only the inital
+ // part of the diagnostics. The custom processing may also close the
+ // stream manually before calling close().
+ //
+ bool
+ read (bool force = false);
+
+ // Close the parent end of the pipe if one was opened and write out any
+ // buffered diagnostics.
+ //
+ // If the child process exited abnormally or normally with non-0 code,
+ // then print the error diagnostics to this effect. Additionally, if the
+ // verbosity level is between 1 and the specified value, then print the
+ // command line as info after the error. If omit_normal is true, then
+ // don't print either for the normal exit (usually used for custom
+ // diagnostics or when process failure can be tolerated).
+ //
+ // Normally the specified verbosity will be 1 and the command line args
+ // represent the verbosity level 2 (logical) command line. Note that args
+ // should only represent a single command in a pipe (see print_process()
+ // below for details).
+ //
+ // If the diag_buffer instance is destroyed before calling close(), then
+ // any buffered diagnostics is discarded.
+ //
+ // Note: see also run_finish(diag_buffer&).
+ //
+ // @@ TODO: need overload with process_env (see print_process). Also in
+ // run_finish_impl().
+ //
+ void
+ close (const cstrings& args,
+ const process_exit&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = {});
+
+ void
+ close (const char* const* args,
+ const process_exit&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = {});
+
+ // As above but with a custom diag record for the child exit diagnostics,
+ // if any. Note that if the diag record has the fail epilogue, then this
+ // function will throw.
+ //
+ void
+ close (diag_record&& = {});
+
+ // Direct access to the underlying stream and buffer for custom processing
+ // (see read() above for details).
+ //
+ // If serial is true, then we are running serially. If nobuf is true,
+ // then we are running in parallel but diagnostics buffering has been
+ // disabled (--no-diag-buffer). Note that there is a difference: during
+ // the serial execution we are free to hold the diag_stream_lock for as
+ // long as convenient, for example, for the whole duration of child
+ // process execution. Doing the same during parallel execution is very
+ // bad idea and we should read/write the diagnostics in chunks, normally
+ // one line at a time.
+ //
+ public:
+ ifdstream is;
+ vector<char> buf;
+ const char* args0;
+ bool serial;
+ bool nobuf;
+
+ // Buffer or stream a fragment of diagnostics as necessary or forced. If
+ // newline is true, also add a trailing newline.
+ //
+ // This function is normally called from a custom diagnostics processing
+ // implementation (see read() above for details). If nobuf is true, then
+ // the fragment should end on the line boundary to avoid interleaving.
+ //
+ void
+ write (const string&, bool newline, bool force = false);
+
+ private:
+ // Note that we don't seem to need a custom destructor to achieve the
+ // desired semantics: we can assume the process has exited before we are
+ // destroyed (because we supply stderr to its constructor) which means
+ // closing fdstream without reading any futher should be ok.
+ //
+ enum class state {closed, opened, eof};
+
+ context& ctx_;
+ state state_ = state::closed;
+ };
+
// Action phrases, e.g., "configure update exe{foo}", "updating exe{foo}",
// and "updating exe{foo} is configured". Use like this:
//
@@ -577,4 +1089,6 @@ namespace build2
}
}
+#include <libbuild2/diagnostics.ixx>
+
#endif // LIBBUILD2_DIAGNOSTICS_HXX
diff --git a/libbuild2/diagnostics.ixx b/libbuild2/diagnostics.ixx
new file mode 100644
index 0000000..273dfad
--- /dev/null
+++ b/libbuild2/diagnostics.ixx
@@ -0,0 +1,126 @@
+// file : libbuild2/diagnostics.ixx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+namespace build2
+{
+ // print_diag()
+ //
+ LIBBUILD2_SYMEXPORT void
+ print_diag_impl (const char*, target_key*, target_key&&, const char*);
+
+ LIBBUILD2_SYMEXPORT void
+ print_diag_impl (const char*,
+ target_key*, vector<target_key>&& r,
+ const char*);
+
+ inline void
+ print_diag (const char* p, target_key&& l, target_key&& r, const char* c)
+ {
+ print_diag_impl (p, &l, move (r), c);
+ }
+
+ inline void
+ print_diag (const char* p,
+ target_key&& l, vector<target_key>&& r,
+ const char* c)
+ {
+ print_diag_impl (p, &l, move (r), c);
+ }
+
+ inline void
+ print_diag (const char* p, target_key& r)
+ {
+ print_diag_impl (p, nullptr, move (r), nullptr);
+ }
+
+ inline void
+ print_diag (const char* p, vector<target_key>&& r)
+ {
+ print_diag_impl (p, nullptr, move (r), nullptr);
+ }
+
+ inline void
+ print_diag (const char* p, const path& r)
+ {
+ print_diag (p, path_name (&r));
+ }
+
+ inline void
+ print_diag (const char* p, const target& l, const path& r, const char* c)
+ {
+ print_diag (p, l, path_name (&r), c);
+ }
+
+ inline void
+ print_diag (const char* p, const path& l, const path& r, const char* c)
+ {
+ print_diag (p, l, path_name (&r), c);
+ }
+
+ inline void
+ print_diag (const char* p, const string& l, const path& r, const char* c)
+ {
+ print_diag (p, l, path_name (&r), c);
+ }
+
+ // diag_buffer
+ //
+ inline diag_buffer::
+ diag_buffer (context& ctx)
+ : is (ifdstream::badbit), ctx_ (ctx)
+ {
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx, vector<char>&& b)
+ : is (ifdstream::badbit), buf (move (b)), ctx_ (ctx)
+ {
+ buf.clear ();
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx, const char* args0, auto_fd&& fd, fdstream_mode m)
+ : diag_buffer (ctx)
+ {
+ open (args0, move (fd), m);
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx, const char* args0, process& pr, fdstream_mode m)
+ : diag_buffer (ctx)
+ {
+ open (args0, move (pr.in_efd), m);
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx,
+ vector<char>&& b,
+ const char* args0,
+ auto_fd&& fd,
+ fdstream_mode m)
+ : diag_buffer (ctx, move (b))
+ {
+ open (args0, move (fd), m);
+ }
+
+ inline diag_buffer::
+ diag_buffer (context& ctx,
+ vector<char>&& b,
+ const char* args0,
+ process& pr,
+ fdstream_mode m)
+ : diag_buffer (ctx, move (b))
+ {
+ open (args0, move (pr.in_efd), m);
+ }
+
+ inline void diag_buffer::
+ close (const cstrings& args,
+ const process_exit& pe,
+ uint16_t verbosity,
+ bool omit_normal,
+ const location& loc)
+ {
+ close (args.data (), pe, verbosity, omit_normal, loc);
+ }
+}
diff --git a/libbuild2/dist/init.cxx b/libbuild2/dist/init.cxx
index 2b2aaa4..48a3e15 100644
--- a/libbuild2/dist/init.cxx
+++ b/libbuild2/dist/init.cxx
@@ -3,8 +3,9 @@
#include <libbuild2/dist/init.hxx>
-#include <libbuild2/scope.hxx>
#include <libbuild2/file.hxx>
+#include <libbuild2/rule.hxx>
+#include <libbuild2/scope.hxx>
#include <libbuild2/diagnostics.hxx>
#include <libbuild2/config/utility.hxx>
@@ -21,6 +22,7 @@ namespace build2
namespace dist
{
static const rule rule_;
+ static const file_rule file_rule_ (true /* check_type */);
void
boot (scope& rs, const location&, module_boot_extra& extra)
@@ -32,7 +34,34 @@ namespace build2
// Enter module variables. Do it during boot in case they get assigned
// in bootstrap.build (which is customary for, e.g., dist.package).
//
- auto& vp (rs.var_pool ());
+
+ // The dist flag or path. Normally it is a flag (true or false) but can
+ // also be used to remap the distribution location.
+ //
+ // In the latter case it specifies the "imaginary" source location which
+ // is used to derive the corresponding distribution local. This location
+ // can be specified as either a directory path (to remap with the same
+ // file name) or a file path (to remap with a different name). And the
+ // way we distinguish between the two is via the presence/absence of the
+ // trailing directory separator. If the path is relative, then it's
+ // treated relative to the target directory. Note that to make things
+ // less error prone, simple paths without any directory separators are
+ // not allowed (use ./<name> instead).
+ //
+ // Note that if multiple targets end up with the same source location,
+ // the behavior is undefined and no diagnostics is issued.
+ //
+ // Note also that such remapping has no effect in the bootstrap
+ // distribution mode.
+ //
+ // Note: project-private.
+ //
+ rs.var_pool ().insert<path> ("dist", variable_visibility::target);
+
+ // The rest of the variables we enter are qualified so go straight for
+ // the public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// config.dist.archives is a list of archive extensions (e.g., zip,
// tar.gz) that can be optionally prefixed with a directory. If it is
@@ -71,8 +100,6 @@ namespace build2
vp.insert<paths> ("dist.archives");
vp.insert<paths> ("dist.checksums");
- vp.insert<bool> ("dist", variable_visibility::target); // Flag.
-
// Project's package name. Note: if set, must be in bootstrap.build.
//
auto& v_d_p (vp.insert<string> ("dist.package"));
@@ -107,7 +134,7 @@ namespace build2
//
bool s (specified_config (rs, "dist", {"bootstrap"}));
- // dist.root
+ // config.dist.root
//
{
value& v (rs.assign ("dist.root"));
@@ -119,22 +146,24 @@ namespace build2
}
}
- // dist.cmd
+ // config.dist.cmd
+ //
+ // By default we use in-process code for creating directories and
+ // copying files (for performance, especially on Windows). But an
+ // external program (normally install) can be used if configured.
//
{
- value& v (rs.assign<process_path> ("dist.cmd"));
+ value& v (rs.assign<process_path> ("dist.cmd")); // NULL
if (s)
{
- if (lookup l = lookup_config (rs,
- "config.dist.cmd",
- path ("install")))
+ if (lookup l = lookup_config (rs, "config.dist.cmd", nullptr))
v = run_search (cast<path> (l), true);
}
}
- // dist.archives
- // dist.checksums
+ // config.dist.archives
+ // config.dist.checksums
//
{
value& a (rs.assign ("dist.archives"));
@@ -157,7 +186,7 @@ namespace build2
}
}
- // dist.uncommitted
+ // config.dist.uncommitted
//
// Omit it from the configuration unless specified.
//
@@ -182,7 +211,7 @@ namespace build2
l5 ([&]{trace << "for " << rs;});
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true /* public */)); // All qualified.
// Register our wildcard rule. Do it explicitly for the alias to prevent
// something like insert<target>(dist_id, test_id) taking precedence.
@@ -190,6 +219,19 @@ namespace build2
rs.insert_rule<target> (dist_id, 0, "dist", rule_);
rs.insert_rule<alias> (dist_id, 0, "dist.alias", rule_);
+ // We need this rule for out-of-any-project dependencies (for example,
+ // executables imported from /usr/bin, etc). We are registering it on
+ // the global scope similar to builtin rules.
+ //
+ // Note: use target instead of anything more specific (such as
+ // mtime_target) in order not to take precedence over the "dist" rule
+ // above.
+ //
+ // See a similar rule in the config module.
+ //
+ rs.global_scope ().insert_rule<target> (
+ dist_id, 0, "dist.file", file_rule_);
+
// Configuration.
//
// Adjust module priority so that the config.dist.* values are saved at
diff --git a/libbuild2/dist/module.hxx b/libbuild2/dist/module.hxx
index 314dc96..da97939 100644
--- a/libbuild2/dist/module.hxx
+++ b/libbuild2/dist/module.hxx
@@ -10,14 +10,17 @@
#include <libbuild2/module.hxx>
#include <libbuild2/variable.hxx>
+#include <libbuild2/dist/types.hxx>
+
#include <libbuild2/export.hxx>
namespace build2
{
namespace dist
{
- struct LIBBUILD2_SYMEXPORT module: build2::module
+ class LIBBUILD2_SYMEXPORT module: public build2::module
{
+ public:
static const string name;
const variable& var_dist_package;
@@ -38,6 +41,10 @@ namespace build2
adhoc.push_back (move (f));
}
+ // List of postponed prerequisites (see rule for details).
+ //
+ mutable postponed_prerequisites postponed;
+
// Distribution post-processing callbacks.
//
// Only the last component in the pattern may contain wildcards. If the
@@ -69,10 +76,11 @@ namespace build2
// Implementation details.
//
- module (const variable& v_d_p)
- : var_dist_package (v_d_p) {}
+ public:
+ module (const variable& v_d_p): var_dist_package (v_d_p) {}
public:
+ bool distributed = false; // True if this project is being distributed.
vector<path> adhoc;
struct callback
diff --git a/libbuild2/dist/operation.cxx b/libbuild2/dist/operation.cxx
index 01ac7eb..cd88eac 100644
--- a/libbuild2/dist/operation.cxx
+++ b/libbuild2/dist/operation.cxx
@@ -6,6 +6,8 @@
#include <libbutl/sha1.hxx>
#include <libbutl/sha256.hxx>
+#include <libbutl/filesystem.hxx> // try_mkdir_p(), cpfile()
+
#include <libbuild2/file.hxx>
#include <libbuild2/dump.hxx>
#include <libbuild2/scope.hxx>
@@ -15,6 +17,8 @@
#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/dist/types.hxx>
+#include <libbuild2/dist/rule.hxx>
#include <libbuild2/dist/module.hxx>
using namespace std;
@@ -27,14 +31,14 @@ namespace build2
// install -d <dir>
//
static void
- install (const process_path& cmd, const dir_path&);
+ install (const process_path*, context&, const dir_path&);
- // install <file> <dir>
+ // install <file> <dir>[/<name>]
//
// Return the destination file path.
//
static path
- install (const process_path& cmd, const file&, const dir_path&);
+ install (const process_path*, const file&, const dir_path&, const path&);
// tar|zip ... <dir>/<pkg>.<ext> <pkg>
//
@@ -64,6 +68,30 @@ namespace build2
return o;
}
+ static void
+ dist_load_load (const values& vs,
+ scope& rs,
+ const path& bf,
+ const dir_path& out_base,
+ const dir_path& src_base,
+ const location& l)
+ {
+ // @@ TMP: redo after release (do it here and not in execute, also add
+ // custom search and do the other half there).
+ //
+#if 0
+ if (rs.out_path () != out_base || rs.src_path () != src_base)
+ fail (l) << "dist meta-operation target must be project root directory";
+#endif
+
+ // Mark this project as being distributed.
+ //
+ if (auto* m = rs.find_module<module> (module::name))
+ m->distributed = true;
+
+ perform_load (vs, rs, bf, out_base, src_base, l);
+ }
+
// Enter the specified source file as a target of type T. The path is
// expected to be normalized and relative to src_root. If the third
// argument is false, then first check if the file exists. If the fourth
@@ -82,9 +110,7 @@ namespace build2
// Figure out if we need out.
//
- dir_path out (rs.src_path () != rs.out_path ()
- ? out_src (d, rs)
- : dir_path ());
+ dir_path out (!rs.out_eq_src () ? out_src (d, rs) : dir_path ());
const T& t (rs.ctx.targets.insert<T> (
move (d),
@@ -126,7 +152,7 @@ namespace build2
try
{
- for (const dir_entry& e: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& e: dir_iterator (d, dir_iterator::no_follow))
{
const path& n (e.path ());
@@ -212,8 +238,13 @@ namespace build2
fail << "unknown distribution package name" <<
info << "did you forget to set dist.package?";
+ const module& mod (*rs.find_module<module> (module::name));
+
const string& dist_package (cast<string> (l));
- const process_path& dist_cmd (cast<process_path> (rs.vars["dist.cmd"]));
+ const process_path* dist_cmd (
+ cast_null<process_path> (rs.vars["dist.cmd"]));
+
+ dir_path td (dist_root / dir_path (dist_package));
// We used to print 'dist <target>' at verbosity level 1 but that has
// proven to be just noise. Though we still want to print something
@@ -224,79 +255,143 @@ namespace build2
// (e.g., output directory creation) in all the operations below.
//
if (verb == 1)
- text << "dist " << dist_package;
+ print_diag ("dist", src_root, td);
// Get the list of files to distribute.
//
action_targets files;
+ const variable* dist_var (nullptr);
if (tgt != nullptr)
{
l5 ([&]{trace << "load dist " << rs;});
+ dist_var = rs.var_pool ().find ("dist");
+
// Match a rule for every operation supported by this project. Skip
// default_id.
//
// Note that we are not calling operation_pre/post() callbacks here
// since the meta operation is dist and we know what we are doing.
//
- values params;
path_name pn ("<dist>");
const location loc (pn); // Dummy location.
+ action_targets ts {tgt};
+
+ auto process_postponed = [&ctx, &mod] ()
{
- action_targets ts {tgt};
+ if (!mod.postponed.list.empty ())
+ {
+ // Re-grab the phase lock similar to perform_match().
+ //
+ phase_lock l (ctx, run_phase::match);
- auto mog = make_guard ([&ctx] () {ctx.match_only = false;});
- ctx.match_only = true;
+ // Note that we don't need to bother with the mutex since we do
+ // all of this serially. But we can end up with new elements at
+ // the end.
+ //
+ // Strictly speaking, to handle this correctly we would need to do
+ // multiple passes over this list and only give up when we cannot
+ // make any progress since earlier entries that we cannot resolve
+ // could be "fixed" by later entries. But this feels far-fetched
+ // and so let's wait for a real example before complicating this.
+ //
+ for (auto i (mod.postponed.list.begin ());
+ i != mod.postponed.list.end ();
+ ++i)
+ rule::match_postponed (*i);
+ }
+ };
+
+ auto mog = make_guard ([&ctx] () {ctx.match_only = nullopt;});
+ ctx.match_only = match_only_level::all;
- const operations& ops (rs.root_extra->operations);
- for (operations::size_type id (default_id + 1); // Skip default_id.
- id < ops.size ();
- ++id)
+ const operations& ops (rs.root_extra->operations);
+ for (operations::size_type id (default_id + 1); // Skip default_id.
+ id < ops.size ();
+ ++id)
+ {
+ if (const operation_info* oif = ops[id])
{
- if (const operation_info* oif = ops[id])
- {
- // Skip aliases (e.g., update-for-install). In fact, one can
- // argue the default update should be sufficient since it is
- // assumed to update all prerequisites and we no longer support
- // ad hoc stuff like test.input. Though here we are using the
- // dist meta-operation, not perform.
- //
- if (oif->id != id)
- continue;
+ // Skip aliases (e.g., update-for-install). In fact, one can argue
+ // the default update should be sufficient since it is assumed to
+ // update all prerequisites and we no longer support ad hoc stuff
+ // like test.input. Though here we are using the dist
+ // meta-operation, not perform.
+ //
+ if (oif->id != id)
+ continue;
- // Use standard (perform) match.
- //
- if (auto pp = oif->pre_operation)
+ // Use standard (perform) match.
+ //
+ if (auto pp = oif->pre_operation)
+ {
+ if (operation_id pid = pp (ctx, {}, dist_id, loc))
{
- if (operation_id pid = pp (ctx, params, dist_id, loc))
- {
- const operation_info* poif (ops[pid]);
- ctx.current_operation (*poif, oif, false /* diag_noise */);
- action a (dist_id, poif->id, oif->id);
- match (params, a, ts,
- 1 /* diag (failures only) */,
- false /* progress */);
- }
+ const operation_info* poif (ops[pid]);
+ ctx.current_operation (*poif, oif, false /* diag_noise */);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, false /* inner */, loc);
+
+ if (poif->operation_pre != nullptr)
+ poif->operation_pre (ctx, {}, true /* inner */, loc);
+
+ action a (dist_id, poif->id, oif->id);
+ mod.postponed.list.clear ();
+ perform_match ({}, a, ts,
+ 1 /* diag (failures only) */,
+ false /* progress */);
+ process_postponed ();
+
+ if (poif->operation_post != nullptr)
+ poif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, false /* inner */);
}
+ }
+
+ ctx.current_operation (*oif, nullptr, false /* diag_noise */);
- ctx.current_operation (*oif, nullptr, false /* diag_noise */);
- action a (dist_id, oif->id);
- match (params, a, ts,
- 1 /* diag (failures only) */,
- false /* progress */);
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, true /* inner */, loc);
- if (auto po = oif->post_operation)
+ action a (dist_id, oif->id);
+ mod.postponed.list.clear ();
+ perform_match ({}, a, ts,
+ 1 /* diag (failures only) */,
+ false /* progress */);
+ process_postponed ();
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, true /* inner */);
+
+ if (auto po = oif->post_operation)
+ {
+ if (operation_id pid = po (ctx, {}, dist_id))
{
- if (operation_id pid = po (ctx, params, dist_id))
- {
- const operation_info* poif (ops[pid]);
- ctx.current_operation (*poif, oif, false /* diag_noise */);
- action a (dist_id, poif->id, oif->id);
- match (params, a, ts,
- 1 /* diag (failures only) */,
- false /* progress */);
- }
+ const operation_info* poif (ops[pid]);
+ ctx.current_operation (*poif, oif, false /* diag_noise */);
+
+ if (oif->operation_pre != nullptr)
+ oif->operation_pre (ctx, {}, false /* inner */, loc);
+
+ if (poif->operation_pre != nullptr)
+ poif->operation_pre (ctx, {}, true /* inner */, loc);
+
+ action a (dist_id, poif->id, oif->id);
+ mod.postponed.list.clear ();
+ perform_match ({}, a, ts,
+ 1 /* diag (failures only) */,
+ false /* progress */);
+ process_postponed ();
+
+ if (poif->operation_post != nullptr)
+ poif->operation_post (ctx, {}, true /* inner */);
+
+ if (oif->operation_post != nullptr)
+ oif->operation_post (ctx, {}, false /* inner */);
}
}
}
@@ -305,7 +400,7 @@ namespace build2
// Add ad hoc files and buildfiles that are not normally loaded as
// part of the project, for example, the export stub. They will still
// be ignored on the next step if the user explicitly marked them
- // dist=false.
+ // with dist=false.
//
auto add_adhoc = [] (const scope& rs)
{
@@ -352,7 +447,7 @@ namespace build2
dir_path out_nroot (out_root / pd);
const scope& nrs (ctx.scopes.find_out (out_nroot));
- if (nrs.out_path () != out_nroot) // This subproject not loaded.
+ if (nrs.out_path () != out_nroot) // This subproject is not loaded.
continue;
if (!nrs.src_path ().sub (src_root)) // Not a strong amalgamation.
@@ -368,50 +463,96 @@ namespace build2
// Note that we are not showing progress here (e.g., "N targets to
// distribute") since it will be useless (too fast).
//
- const variable& dist_var (ctx.var_pool["dist"]);
-
- for (const auto& pt: ctx.targets)
+ auto see_through = [] (const target& t)
{
- file* ft (pt->is_a<file> ());
-
- if (ft == nullptr) // Not a file.
- continue;
+ return ((t.type ().flags & target_type::flag::see_through) ==
+ target_type::flag::see_through);
+ };
- if (ft->dir.sub (src_root))
+ auto collect = [&trace, &dist_var,
+ &src_root, &out_root] (const file& ft)
+ {
+ if (ft.dir.sub (src_root))
{
// Include unless explicitly excluded.
//
- auto l ((*ft)[dist_var]);
-
- if (l && !cast<bool> (l))
- l5 ([&]{trace << "excluding " << *ft;});
- else
- files.push_back (ft);
+ if (const path* v = cast_null<path> (ft[dist_var]))
+ {
+ if (v->string () == "false")
+ {
+ l5 ([&]{trace << "excluding " << ft;});
+ return false;
+ }
+ }
- continue;
+ return true;
}
-
- if (ft->dir.sub (out_root))
+ else if (ft.dir.sub (out_root))
{
// Exclude unless explicitly included.
//
- auto l ((*ft)[dist_var]);
+ if (const path* v = cast_null<path> (ft[dist_var]))
+ {
+ if (v->string () != "false")
+ {
+ l5 ([&]{trace << "including " << ft;});
+ return true;
+ }
+ }
+
+ return false;
+ }
+ else
+ return false; // Out of project.
+ };
- if (l && cast<bool> (l))
+ for (const auto& pt: ctx.targets)
+ {
+ // Collect see-through groups if they are marked with dist=true.
+ //
+ // Note that while it's possible that only their certain members are
+ // marked as such (e.g., via a pattern), we will still require
+ // dist=true on the group itself (and potentially dist=false on some
+ // of its members) for such cases because we don't want to update
+ // every see-through group only to discover that most of them don't
+ // have anything to distribute.
+ //
+ if (see_through (*pt))
+ {
+ if (const path* v = cast_null<path> ((*pt)[dist_var]))
{
- l5 ([&]{trace << "including " << *ft;});
- files.push_back (ft);
+ if (v->string () != "false")
+ {
+ l5 ([&]{trace << "including group " << *pt;});
+ files.push_back (pt.get ());
+ }
}
continue;
}
+
+ file* ft (pt->is_a<file> ());
+
+ if (ft == nullptr) // Not a file.
+ continue;
+
+ // Skip member of see-through groups since after dist_* their list
+ // can be incomplete (or even bogus, e.g., the "representative
+ // sample"). Instead, we will collect them during perfrom_update
+ // below.
+ //
+ if (ft->group != nullptr && see_through (*ft->group))
+ continue;
+
+ if (collect (*ft))
+ files.push_back (ft);
}
// Make sure what we need to distribute is up to date.
//
{
if (mo_perform.meta_operation_pre != nullptr)
- mo_perform.meta_operation_pre (ctx, params, loc);
+ mo_perform.meta_operation_pre (ctx, {}, loc);
// This is a hack since according to the rules we need to completely
// reset the state. We could have done that (i.e., saved target
@@ -427,25 +568,75 @@ namespace build2
ctx.current_on = on + 1;
if (mo_perform.operation_pre != nullptr)
- mo_perform.operation_pre (ctx, params, update_id);
+ mo_perform.operation_pre (ctx, {}, update_id);
ctx.current_operation (op_update, nullptr, false /* diag_noise */);
+ if (op_update.operation_pre != nullptr)
+ op_update.operation_pre (ctx, {}, true /* inner */, loc);
+
action a (perform_update_id);
- mo_perform.match (params, a, files,
+ mo_perform.match ({}, a, files,
1 /* diag (failures only) */,
prog /* progress */);
- mo_perform.execute (params, a, files,
+ mo_perform.execute ({}, a, files,
1 /* diag (failures only) */,
prog /* progress */);
+ // Replace see-through groups (which now should have their members
+ // resolved) with members.
+ //
+ for (auto i (files.begin ()); i != files.end (); )
+ {
+ const target& t (i->as<target> ());
+ if (see_through (t))
+ {
+ group_view gv (t.group_members (a)); // Go directly.
+
+ if (gv.members == nullptr)
+ fail << "unable to resolve see-through group " << t
+ << " members";
+
+ i = files.erase (i); // Drop the group itself.
+
+ for (size_t j (0); j != gv.count; ++j)
+ {
+ if (const target* m = gv.members[j])
+ {
+ if (const file* ft = m->is_a<file> ())
+ {
+ // Note that a rule may only link-up its members to groups
+ // if/when matched (for example, the cli.cxx{} group). It
+ // feels harmless for us to do the linking here.
+ //
+ if (ft->group == nullptr)
+ const_cast<file*> (ft)->group = &t;
+ else
+ assert (ft->group == &t); // Sanity check.
+
+ if (collect (*ft))
+ {
+ i = files.insert (i, ft); // Insert instead of the group.
+ i++; // Stay after the group.
+ }
+ }
+ }
+ }
+ }
+ else
+ ++i;
+ }
+
+ if (op_update.operation_post != nullptr)
+ op_update.operation_post (ctx, {}, true /* inner */);
+
if (mo_perform.operation_post != nullptr)
- mo_perform.operation_post (ctx, params, update_id);
+ mo_perform.operation_post (ctx, {}, update_id);
if (mo_perform.meta_operation_post != nullptr)
- mo_perform.meta_operation_post (ctx, params);
+ mo_perform.meta_operation_post (ctx, {});
}
}
else
@@ -471,37 +662,80 @@ namespace build2
//
auto_project_env penv (rs);
- dir_path td (dist_root / dir_path (dist_package));
-
// Clean up the target directory.
//
if (rmdir_r (ctx, td, true, 2) == rmdir_status::not_empty)
fail << "unable to clean target directory " << td;
auto_rmdir rm_td (td); // Clean it up if things go bad.
- install (dist_cmd, td);
+ install (dist_cmd, ctx, td);
// Copy over all the files. Apply post-processing callbacks.
//
- const module& mod (*rs.find_module<module> (module::name));
-
prog = prog && show_progress (1 /* max_verb */);
size_t prog_percent (0);
for (size_t i (0), n (files.size ()); i != n; ++i)
{
- const file& t (*files[i].as<target> ().is_a<file> ());
+ const file& t (files[i].as<target> ().as<file> ()); // Only files.
// Figure out where this file is inside the target directory.
//
- bool src (t.dir.sub (src_root));
- dir_path dl (src ? t.dir.leaf (src_root) : t.dir.leaf (out_root));
+ // First see if the path has been remapped (unless bootstrap).
+ //
+ const path* rp (nullptr);
+ if (tgt != nullptr)
+ {
+ if ((rp = cast_null<path> (t[dist_var])) != nullptr)
+ {
+ if (rp->string () == "true") // Wouldn't be here if false.
+ rp = nullptr;
+ }
+ }
+
+ bool src;
+ path rn;
+ dir_path dl;
+ if (rp == nullptr)
+ {
+ src = t.dir.sub (src_root);
+ dl = src ? t.dir.leaf (src_root) : t.dir.leaf (out_root);
+ }
+ else
+ {
+ // Sort the remapped path into name (if any) and directory,
+ // completing the latter if relative.
+ //
+ bool n (!rp->to_directory ());
+
+ if (n)
+ {
+ if (rp->simple ())
+ {
+ fail << "expected true, false, of path in the dist variable "
+ << "value of target " << t <<
+ info << "specify ./" << *rp << " to remap the name";
+ }
+
+ rn = rp->leaf ();
+ }
+
+ dir_path rd (n ? rp->directory () : path_cast<dir_path> (*rp));
+
+ if (rd.relative ())
+ rd = t.dir / rd;
+
+ rd.normalize ();
+
+ src = rd.sub (src_root);
+ dl = src ? rd.leaf (src_root) : rd.leaf (out_root);
+ }
dir_path d (td / dl);
if (!exists (d))
- install (dist_cmd, d);
+ install (dist_cmd, ctx, d);
- path r (install (dist_cmd, t, d));
+ path r (install (dist_cmd, t, d, rn));
// See if this file is in a subproject.
//
@@ -655,60 +889,131 @@ namespace build2
// install -d <dir>
//
static void
- install (const process_path& cmd, const dir_path& d)
+ install (const process_path* cmd, context& ctx, const dir_path& d)
{
- path reld (relative (d));
+ path reld;
+ cstrings args;
- cstrings args {cmd.recall_string (), "-d"};
+ if (cmd != nullptr || verb >= 2)
+ {
+ reld = relative (d);
- args.push_back ("-m");
- args.push_back ("755");
- args.push_back (reld.string ().c_str ());
- args.push_back (nullptr);
+ args.push_back (cmd != nullptr ? cmd->recall_string () : "install");
+ args.push_back ("-d");
+ args.push_back ("-m");
+ args.push_back ("755");
+ args.push_back (reld.string ().c_str ());
+ args.push_back (nullptr);
- if (verb >= 2)
- print_process (args);
+ if (verb >= 2)
+ print_process (args);
+ }
- run (cmd, args);
+ if (cmd != nullptr)
+ run (ctx, *cmd, args, 1 /* finish_verbosity */);
+ else
+ {
+ try
+ {
+ // Note that mode has no effect on Windows, which is probably for
+ // the best.
+ //
+ try_mkdir_p (d, 0755);
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to create directory " << d << ": " << e;
+ }
+ }
}
- // install <file> <dir>
+ // install <file> <dir>[/<name>]
//
static path
- install (const process_path& cmd, const file& t, const dir_path& d)
+ install (const process_path* cmd,
+ const file& t,
+ const dir_path& d,
+ const path& n)
{
- dir_path reld (relative (d));
- path relf (relative (t.path ()));
-
- cstrings args {cmd.recall_string ()};
+ const path& f (t.path ());
+ path r (d / (n.empty () ? f.leaf () : n));
- // Preserve timestamps. This could becomes important if, for
- // example, we have pre-generated sources. Note that the
- // install-sh script doesn't support this option, while both
- // Linux and BSD install's do.
+ // Assume the file is executable if the owner has execute permission,
+ // in which case we make it executable for everyone.
//
- args.push_back ("-p");
+ bool exe ((path_perms (f) & permissions::xu) == permissions::xu);
- // Assume the file is executable if the owner has execute
- // permission, in which case we make it executable for
- // everyone.
- //
- args.push_back ("-m");
- args.push_back (
- (path_perms (t.path ()) & permissions::xu) == permissions::xu
- ? "755"
- : "644");
+ path relf, reld;
+ cstrings args;
- args.push_back (relf.string ().c_str ());
- args.push_back (reld.string ().c_str ());
- args.push_back (nullptr);
+ if (cmd != nullptr || verb >= 2)
+ {
+ relf = relative (f);
+ reld = relative (d);
- if (verb >= 2)
- print_process (args);
+ if (!n.empty ()) // Leave as just directory if no custom name.
+ reld /= n;
+
+ args.push_back (cmd != nullptr ? cmd->recall_string () : "install");
+
+ // Preserve timestamps. This could becomes important if, for example,
+ // we have pre-generated sources. Note that the install-sh script
+ // doesn't support this option, while both Linux and BSD install's do.
+ //
+ args.push_back ("-p");
+
+ // Assume the file is executable if the owner has execute permission,
+ // in which case we make it executable for everyone.
+ //
+ args.push_back ("-m");
+ args.push_back (exe ? "755" : "644");
+ args.push_back (relf.string ().c_str ());
+ args.push_back (reld.string ().c_str ());
+ args.push_back (nullptr);
+
+ if (verb >= 2)
+ print_process (args);
+ }
- run (cmd, args);
+ if (cmd != nullptr)
+ run (t.ctx, *cmd, args, 1 /* finish_verbosity */);
+ else
+ {
+ permissions perm (permissions::ru | permissions::wu |
+ permissions::rg |
+ permissions::ro); // 644
+ if (exe)
+ perm |= permissions::xu | permissions::xg | permissions::xo; // 755
+
+ try
+ {
+ // Note that we don't pass cpflags::overwrite_content which means
+ // this will fail if the file already exists. Since we clean up the
+ // destination directory, this will detect cases where we have
+ // multiple source files with the same distribution destination.
+ //
+ cpfile (f,
+ r,
+ cpflags::overwrite_permissions | cpflags::copy_timestamps,
+ perm);
+ }
+ catch (const system_error& e)
+ {
+ if (e.code ().category () == generic_category () &&
+ e.code ().value () == EEXIST)
+ {
+ // @@ TMP (added in 0.16.0).
+ //
+ warn << "multiple files are distributed as " << r <<
+ info << "second file is " << f <<
+ info << "this warning will become error in the future";
+ }
+ else
+ fail << "unable to copy " << f << " to " << r << ": " << e;
+ }
+ }
- return d / relf.leaf ();
+ return r;
}
static path
@@ -718,13 +1023,15 @@ namespace build2
const dir_path& dir,
const string& e)
{
+ // NOTE: similar code in bpkg (system-package-manager-archive.cxx).
+
path an (pkg + '.' + e);
// Delete old archive for good measure.
//
path ap (dir / an);
if (exists (ap, false))
- rmfile (ctx, ap);
+ rmfile (ctx, ap, 3 /* verbosity */);
// Use zip for .zip archives. Also recognize and handle a few well-known
// tar.xx cases (in case tar doesn't support -a or has other issues like
@@ -740,7 +1047,7 @@ namespace build2
if (e == "zip")
{
- // On Windows we use libarchive's bsdtar (zip is an MSYS executabales).
+ // On Windows we use libarchive's bsdtar (zip is an MSYS executable).
//
// While not explicitly stated, the compression-level option works
// for zip archives.
@@ -765,15 +1072,28 @@ namespace build2
// On Windows we use libarchive's bsdtar with auto-compression (tar
// itself and quite a few compressors are MSYS executables).
//
+ // OpenBSD tar does not support --format but it appear ustar is the
+ // default (while this is not said explicitly in tar(1), it is said in
+ // pax(1) and confirmed on the mailing list). Nor does it support -a,
+ // at least as of 7.1 but we will let this play out naturally, in case
+ // this support gets added.
+ //
+ // Note also that our long-term plan is to switch to libarchive in
+ // order to generate reproducible archives.
+ //
const char* l (nullptr); // Compression level (option).
#ifdef _WIN32
- const char* tar = "bsdtar";
+ args = {"bsdtar", "--format", "ustar"};
if (e == "tar.gz")
l = "--options=compression-level=9";
#else
- const char* tar = "tar";
+ args = {"tar"
+#ifndef __OpenBSD__
+ , "--format", "ustar"
+#endif
+ };
// For gzip it's a good idea to use -9 by default. For bzip2, -9 is
// the default. And for xz, -9 is not recommended as the default due
@@ -791,13 +1111,10 @@ namespace build2
if (c != nullptr)
{
- args = {tar,
- "--format", "ustar",
- "-cf", "-",
- pkg.c_str (),
- nullptr};
-
- i = args.size ();
+ args.push_back ("-cf");
+ args.push_back ("-");
+ args.push_back (pkg.c_str ());
+ args.push_back (nullptr); i = args.size ();
args.push_back (c);
if (l != nullptr)
args.push_back (l);
@@ -818,20 +1135,13 @@ namespace build2
}
else
#endif
- if (e == "tar")
- args = {tar,
- "--format", "ustar",
- "-cf", ap.string ().c_str (),
- pkg.c_str (),
- nullptr};
- else
{
- args = {tar,
- "--format", "ustar",
- "-a"};
-
- if (l != nullptr)
- args.push_back (l);
+ if (e != "tar")
+ {
+ args.push_back ("-a");
+ if (l != nullptr)
+ args.push_back (l);
+ }
args.push_back ("-cf");
args.push_back (ap.string ().c_str ());
@@ -851,19 +1161,20 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << args[0] << ' ' << ap;
+ print_diag (args[0], dir / dir_path (pkg), ap);
process apr;
process cpr;
- // Change the archiver's working directory to dist_root.
+ // Change the archiver's working directory to root.
+ //
+ // Note: this function is called during serial execution and so no
+ // diagnostics buffering is needed (here and below).
//
- apr = run_start (app,
+ apr = run_start (process_env (app, root),
args,
0 /* stdin */,
- (i != 0 ? -1 : 1) /* stdout */,
- true /* error */,
- root);
+ (i != 0 ? -1 : 1) /* stdout */);
// Start the compressor if required.
//
@@ -875,10 +1186,17 @@ namespace build2
out_fd.get () /* stdout */);
cpr.in_ofd.reset (); // Close the archiver's stdout on our side.
- run_finish (args.data () + i, cpr);
}
- run_finish (args.data (), apr);
+ // Delay throwing until we diagnose both ends of the pipe.
+ //
+ if (!run_finish_code (args.data (),
+ apr,
+ 1 /* verbosity */,
+ false /* omit_normal */) ||
+ !(i == 0 || run_finish_code (args.data () + i, cpr, 1, false)))
+ throw failed ();
+
out_rm.cancel ();
return ap;
@@ -897,7 +1215,7 @@ namespace build2
//
path cp (dir / cn);
if (exists (cp, false))
- rmfile (ctx, cp);
+ rmfile (ctx, cp, 3 /* verbosity */);
auto_rmfile c_rm; // Note: must come first.
auto_fd c_fd;
@@ -936,18 +1254,20 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << args[0] << ' ' << cp;
+ print_diag (args[0], ap, cp);
// Note that to only get the archive name (without the directory) in
// the output we have to run from the archive's directory.
//
- process pr (run_start (pp,
+ // Note: this function is called during serial execution and so no
+ // diagnostics buffering is needed.
+ //
+ process pr (run_start (process_env (pp, ad /* cwd */),
args,
- 0 /* stdin */,
- c_fd.get () /* stdout */,
- true /* error */,
- ad /* cwd */));
- run_finish (args, pr);
+ 0 /* stdin */,
+ c_fd.get () /* stdout */));
+
+ run_finish (args, pr, 1 /* verbosity */);
}
else
{
@@ -967,7 +1287,7 @@ namespace build2
if (verb >= 2)
text << "cat >" << cp;
else if (verb)
- text << e << "sum " << cp;
+ print_diag ((e + "sum").c_str (), ap, cp);
string c;
try
@@ -1011,6 +1331,8 @@ namespace build2
// given the prescribed semantics of adhoc (match/execute but otherwise
// ignore) is followed.
//
+ // Note that we don't need to do anything for posthoc.
+ //
if (i == include_type::excluded)
{
l5 ([&]{trace << "overriding exclusion of " << p;});
@@ -1034,12 +1356,12 @@ namespace build2
true, // bootstrap_outer
nullptr, // meta-operation pre
&dist_operation_pre,
- &load, // normal load
- &search, // normal search
- nullptr, // no match (see dist_execute()).
+ &dist_load_load,
+ &perform_search, // normal search
+ nullptr, // no match (see dist_execute()).
&dist_load_execute,
- nullptr, // operation post
- nullptr, // meta-operation post
+ nullptr, // operation post
+ nullptr, // meta-operation post
&dist_include
};
@@ -1070,7 +1392,7 @@ namespace build2
init_config (rs);
}
- void
+ static void
dist_bootstrap_search (const values&,
const scope& rs,
const scope&,
diff --git a/libbuild2/dist/rule.cxx b/libbuild2/dist/rule.cxx
index ac3d440..c63f7f3 100644
--- a/libbuild2/dist/rule.cxx
+++ b/libbuild2/dist/rule.cxx
@@ -8,6 +8,9 @@
#include <libbuild2/algorithm.hxx>
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/dist/types.hxx>
+#include <libbuild2/dist/module.hxx>
+
using namespace std;
namespace build2
@@ -27,17 +30,34 @@ namespace build2
const dir_path& src_root (rs.src_path ());
const dir_path& out_root (rs.out_path ());
- // If we can, go inside see-through groups.
+ // Note that we don't go inside see-through groups since the members for
+ // dist_* may be incomplete (or even bogus, e.g., the "representative
+ // sample"). Instead, for see-through groups our plan is as follows:
+ //
+ // 1. Here we match them as groups (so that we still match all their
+ // prerequisites).
+ //
+ // 2. In dist_project() we collect them along with files after dist_*
+ // but before perform_update. Here we also skip files that are
+ // members of see-through groups (which we may still get).
//
- for (prerequisite_member pm:
- group_prerequisite_members (a, t, members_mode::maybe))
+ // 3. During perform_update we collect all the see-through group
+ // members, similar to files on step (2).
+ //
+ for (const prerequisite& p: group_prerequisites (t))
{
// Note: no exclusion tests, we want all of them (and see also the
- // dist_include() override).
+ // dist_include() override). But if we don't ignore post hoc ones
+ // here, we will end up with a cycle (they will still be handled
+ // by the post-pass).
+ //
+ lookup l; // Ignore any operation-specific values.
+ if (include (a, t, p, &l) == include_type::posthoc)
+ continue;
// Skip prerequisites imported from other projects.
//
- if (pm.proj ())
+ if (p.proj)
continue;
// We used to always search and match but that resulted in the
@@ -56,18 +76,18 @@ namespace build2
// @@ Note that this is still an issue in a custom dist rule.
//
const target* pt (nullptr);
- if (pm.is_a<file> ())
+ if (p.is_a<file> ())
{
- pt = pm.load ();
+ pt = p.target.load ();
if (pt == nullptr)
{
- const prerequisite& p (pm.prerequisite);
-
// Search for an existing target or existing file in src.
//
+ // Note: see also similar code in match_postponed() below.
+ //
const prerequisite_key& k (p.key ());
- pt = k.tk.type->search (t, k);
+ pt = k.tk.type->search (t.ctx, &t, k);
if (pt == nullptr)
{
@@ -79,15 +99,23 @@ namespace build2
!p.dir.sub (out_root))
continue;
- fail << "prerequisite " << k << " is not existing source file "
- << "nor known output target" << endf;
+ // This can be order-dependent: for example libs{} prerequisite
+ // may be unknown because we haven't matched the lib{} group
+ // yet. So we postpone this for later (see match_postponed()).
+ //
+ const module& mod (*rs.find_module<module> (module::name));
+
+ mlock l (mod.postponed.mutex);
+ mod.postponed.list.push_back (
+ postponed_prerequisite {a, t, p, t.state[a].rule->first});
+ continue;
}
search_custom (p, *pt); // Cache.
}
}
else
- pt = &pm.search (t);
+ pt = &search (t, p);
// Don't match targets that are outside of our project.
//
@@ -97,5 +125,39 @@ namespace build2
return noop_recipe; // We will never be executed.
}
+
+ void rule::
+ match_postponed (const postponed_prerequisite& pp)
+ {
+ action a (pp.action);
+ const target& t (pp.target);
+ const prerequisite& p (pp.prereq);
+
+ const prerequisite_key& k (p.key ());
+ const target* pt (k.tk.type->search (t.ctx, &t, k));
+
+ if (pt == nullptr)
+ {
+ // Note that we do loose the diag frame that we normally get when
+ // failing during match. So let's mention the target/rule manually.
+ //
+ fail << "prerequisite " << k << " is not existing source file nor "
+ << "known output target" <<
+ info << "while applying rule " << pp.rule << " to " << diag_do (a, t);
+ }
+
+ search_custom (p, *pt); // Cache.
+
+ // It's theoretically possible that the target gets entered but nobody
+ // else depends on it but us. So we need to make sure it's matched
+ // (since it, in turns, can pull in other targets). Note that this could
+ // potentially add new postponed prerequisites to the list.
+ //
+ if (!pt->matched (a))
+ {
+ if (pt->dir.sub (t.root_scope ().out_path ()))
+ match_direct_sync (a, *pt);
+ }
+ }
}
}
diff --git a/libbuild2/dist/rule.hxx b/libbuild2/dist/rule.hxx
index a864015..69ab3d9 100644
--- a/libbuild2/dist/rule.hxx
+++ b/libbuild2/dist/rule.hxx
@@ -11,6 +11,10 @@
#include <libbuild2/action.hxx>
#include <libbuild2/target.hxx>
+#include <libbuild2/dist/types.hxx>
+
+#include <libbuild2/export.hxx>
+
namespace build2
{
namespace dist
@@ -19,20 +23,28 @@ namespace build2
//
// A custom rule (usually the same as perform_update) may be necessary to
// establish group links (so that we see the dist variable set on a group)
- // or to see through non-see-through groups (like lib{}; see the
- // bin::lib_rule for an example). Note that in the latter case the rule
- // should "see" all its members for the dist case.
+ // or to see through non-see-through groups (like lib{}, obj{}; see rule
+ // in the bin module for an example). Note that in the latter case the
+ // rule should "see" all its members for the dist case.
//
- class rule: public simple_rule
+ class LIBBUILD2_SYMEXPORT rule: public simple_rule
{
public:
rule () {}
+ // Always matches (returns true).
+ //
virtual bool
match (action, target&) const override;
+ // Matches all the prerequisites (including from group) and returns
+ // noop_recipe (which will never be executed).
+ //
virtual recipe
apply (action, target&) const override;
+
+ static void
+ match_postponed (const postponed_prerequisite&);
};
}
}
diff --git a/libbuild2/dist/types.hxx b/libbuild2/dist/types.hxx
new file mode 100644
index 0000000..b833951
--- /dev/null
+++ b/libbuild2/dist/types.hxx
@@ -0,0 +1,41 @@
+// file : libbuild2/dist/types.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_DIST_TYPES_HXX
+#define LIBBUILD2_DIST_TYPES_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/forward.hxx>
+
+#include <libbuild2/prerequisite-key.hxx>
+
+namespace build2
+{
+ namespace dist
+ {
+ // List of prerequisites that could not be searched to a target and were
+ // postponed for later re-search. This can happen, for example, because a
+ // prerequisite would resolve to a member of a group that hasn't been
+ // matched yet (for example, libs{} of lib{}). See rule::apply() for
+ // details.
+ //
+ // Note that we are using list instead of vector because new elements can
+ // be added at the end while we are iterating over the list.
+ //
+ struct postponed_prerequisite
+ {
+ build2::action action;
+ reference_wrapper<const build2::target> target;
+ reference_wrapper<const prerequisite> prereq;
+ string rule;
+ };
+
+ struct postponed_prerequisites
+ {
+ build2::mutex mutex;
+ build2::list<postponed_prerequisite> list;
+ };
+ }
+}
+
+#endif // LIBBUILD2_DIST_TYPES_HXX
diff --git a/libbuild2/dump.cxx b/libbuild2/dump.cxx
index 14078da..9b7f5b1 100644
--- a/libbuild2/dump.cxx
+++ b/libbuild2/dump.cxx
@@ -3,6 +3,11 @@
#include <libbuild2/dump.hxx>
+#ifndef BUILD2_BOOTSTRAP
+# include <iostream> // cout
+# include <unordered_map>
+#endif
+
#include <libbuild2/rule.hxx>
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
@@ -11,6 +16,7 @@
#include <libbuild2/diagnostics.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
@@ -49,10 +55,321 @@ namespace build2
if (v)
{
names storage;
- os << (a ? " " : "") << reverse (v, storage);
+ os << (a ? " " : "") << reverse (v, storage, true /* reduce */);
+ }
+ }
+
+#ifndef BUILD2_BOOTSTRAP
+
+ static string
+ quoted_target_name (const names_view& ns, bool rel)
+ {
+ ostringstream os;
+ stream_verb (os, stream_verbosity (rel ? 0 : 1, 0));
+ to_stream (os, ns, quote_mode::effective, '@');
+ return os.str ();
+ }
+
+ static void
+ dump_quoted_target_name (json::stream_serializer& j,
+ const names_view& ns,
+ bool rel)
+ {
+ j.value (quoted_target_name (ns, rel));
+ }
+
+ static string
+ quoted_target_name (const target& t, bool rel)
+ {
+ names ns (t.as_name ()); // Note: potentially adds an extension.
+
+ // Don't print target names relative if the target is in src and out!=src.
+ // Failed that, we will end up with pointless ../../../... paths.
+ //
+ // It may also seem that we can omit @-qualification in this case, since
+ // it is implied by the containing scope. However, keep in mind that the
+ // target may not be directly in this scope. We could make it relative,
+ // though.
+ //
+ if (rel && !t.out.empty ())
+ {
+ // Make the out relative ourselves and then disable relative for src.
+ //
+ dir_path& o (ns.back ().dir);
+ o = relative (o); // Note: may return empty path.
+ if (o.empty ())
+ o = dir_path (".");
+
+ rel = false;
+ }
+
+ return quoted_target_name (ns, rel);
+ }
+
+ void
+ dump_quoted_target_name (json::stream_serializer& j,
+ const target& t,
+ bool rel)
+ {
+ j.value (quoted_target_name (t, rel));
+ }
+
+ using target_name_cache = unordered_map<const target*, string>;
+
+ static void
+ dump_quoted_target_name (json::stream_serializer& j,
+ const target& t,
+ target_name_cache& tc)
+ {
+ auto i (tc.find (&t));
+ if (i == tc.end ())
+ i = tc.emplace (&t, quoted_target_name (t, false /* relative */)).first;
+
+ j.value (i->second);
+ }
+
+ void
+ dump_display_target_name (json::stream_serializer& j,
+ const target& t,
+ bool rel)
+ {
+ // Note: see the quoted version above for details.
+
+ target_key tk (t.key ());
+
+ dir_path o;
+ if (rel && !tk.out->empty ())
+ {
+ o = relative (*tk.out);
+ if (o.empty ())
+ o = dir_path (".");
+ tk.out = &o;
+
+ rel = false;
}
+
+ // Change the stream verbosity to print relative if requested and omit
+ // extension.
+ //
+ ostringstream os;
+ stream_verb (os, stream_verbosity (rel ? 0 : 1, 0));
+ os << tk;
+ j.value (os.str ());
}
+ static void
+ dump_value (json::stream_serializer& j, const value& v)
+ {
+ // Hints.
+ //
+ // Note that the pair hint should only be used for simple names.
+ //
+ optional<bool> h_array;
+ optional<bool> h_pair; // true/false - second/first is optional.
+
+ if (v.null)
+ {
+ j.value (nullptr);
+ return;
+ }
+ else if (v.type != nullptr)
+ {
+ const value_type& t (*v.type);
+
+ auto s_array = [&j] (const auto& vs)
+ {
+ j.begin_array ();
+ for (const auto& v: vs) j.value (v);
+ j.end_array ();
+ };
+
+ auto s_array_string = [&j] (const auto& vs)
+ {
+ j.begin_array ();
+ for (const auto& v: vs) j.value (v.string ());
+ j.end_array ();
+ };
+
+ // Note: check in the derived-first order.
+ //
+ if (t.is_a<bool> ()) j.value (v.as<bool> ());
+ else if (t.is_a<int64_t> ()) j.value (v.as<int64_t> ());
+ else if (t.is_a<uint64_t> ()) j.value (v.as<uint64_t> ());
+ else if (t.is_a<string> ()) j.value (v.as<string> ());
+ else if (t.is_a<path> ()) j.value (v.as<path> ().string ());
+ else if (t.is_a<dir_path> ()) j.value (v.as<dir_path> ().string ());
+ else if (t.is_a<target_triplet> ()) j.value (v.as<target_triplet> ().string ());
+ else if (t.is_a<project_name> ()) j.value (v.as<project_name> ().string ());
+ else if (t.is_a<int64s> ()) s_array (v.as<int64s> ());
+ else if (t.is_a<uint64s> ()) s_array (v.as<uint64s> ());
+ else if (t.is_a<strings> ()) s_array (v.as<strings> ());
+ else if (t.is_a<paths> ()) s_array_string (v.as<paths> ());
+ else if (t.is_a<dir_paths> ()) s_array_string (v.as<dir_paths> ());
+ else
+ {
+ // Note: check in the derived-first order.
+ //
+ if (t.is_a<name> ()) h_array = false;
+ else if (t.is_a<name_pair> ())
+ {
+ h_array = false;
+ h_pair = true;
+ }
+ else if (t.is_a<process_path_ex> ())
+ {
+ // Decide on array dynamically.
+ h_pair = true;
+ }
+ else if (t.is_a<process_path> ())
+ {
+ h_array = false;
+ h_pair = true;
+ }
+ else if (t.is_a<cmdline> () ||
+ t.is_a<vector<name>> ())
+ {
+ h_array = true;
+ }
+ else if (t.is_a<vector<pair<string, string>>> () ||
+ t.is_a<vector<pair<string, optional<string>>>> () ||
+ t.is_a<vector<pair<string, optional<bool>>>> () ||
+ t.is_a<map<string, string>> () ||
+ t.is_a<map<string, optional<string>>> () ||
+ t.is_a<map<string, optional<bool>>> () ||
+ t.is_a<map<project_name, dir_path>> ())
+ {
+ h_array = true;
+ h_pair = true;
+ }
+ else if (t.is_a<map<optional<string>, string>> () ||
+ t.is_a<vector<pair<optional<string>, string>>> ())
+ {
+ h_array = true;
+ h_pair = false;
+ }
+
+ goto fall_through;
+ }
+
+ return;
+
+ fall_through:
+ ;
+ }
+
+ names storage;
+ names_view ns (reverse (v, storage, true /* reduce */));
+
+ if (ns.empty ())
+ {
+ // When it comes to representing an empty value, our options are: empty
+ // array ([]), empty object ({}), or an absent member. The latter feels
+ // closer to null than empty, so that's out. After some experimentation,
+ // it feels the best choice is to use array unless we know for sure it
+ // is not, in which case we use an object if it's a pair and empty
+ // string otherwise (the empty string makes sense because we serialize
+ // complex names as target names; see below).
+ //
+ if (!h_array || *h_array)
+ {
+ j.begin_array ();
+ j.end_array ();
+ }
+ else
+ {
+ if (h_pair)
+ {
+ j.begin_object ();
+ j.end_object ();
+ }
+ else
+ j.value ("");
+ }
+ }
+ else
+ {
+ if (!h_array)
+ h_array = ns.size () > 2 || (ns.size () == 2 && !ns.front ().pair);
+
+ if (*h_array)
+ j.begin_array ();
+
+ // While it may be tempting to try to provide a heterogeneous array
+ // (i.e., all strings, all objects, all pairs), in case of pairs we
+ // actually don't know whether a non-pair element is first or second
+ // (it's up to interpretation; though we do hint which one is optional
+ // for typed values above). So we serialize each name in its most
+ // appropriate form.
+ //
+ auto simple = [] (const name& n)
+ {
+ return n.simple () || n.directory () || n.file ();
+ };
+
+ auto s_simple = [&j] (const name& n)
+ {
+ if (n.simple ())
+ j.value (n.value);
+ else if (n.directory ())
+ j.value (n.dir.string ());
+ else if (n.file ())
+ {
+ // Note: both must be present due to earlier checks.
+ //
+ j.value ((n.dir / n.value).string ());
+ }
+ else
+ return false;
+
+ return true;
+ };
+
+ for (auto i (ns.begin ()), e (ns.end ()); i != e; )
+ {
+ const name& l (*i++);
+ const name* r (l.pair ? &*i++ : nullptr);
+
+ optional<bool> hp (h_pair);
+
+ if (!hp && r != nullptr && simple (l) && simple (*r))
+ hp = true;
+
+ if (hp)
+ {
+ // Pair of simple names.
+ //
+ j.begin_object ();
+
+ if (r != nullptr)
+ {
+ j.member_name ("first"); s_simple (l);
+ j.member_name ("second"); s_simple (*r);
+ }
+ else
+ {
+ j.member_name (*hp ? "first" : "second"); s_simple (l);
+ }
+
+ j.end_object ();
+ }
+ else if (r == nullptr && s_simple (l))
+ ;
+ else
+ {
+ // If complex name (or pair thereof), then assume a target name.
+ //
+ dump_quoted_target_name (j,
+ names_view (&l, r != nullptr ? 2 : 1),
+ false /* relative */);
+ }
+ }
+
+ if (*h_array)
+ j.end_array ();
+ }
+ }
+#endif
+
enum class variable_kind {scope, tt_pat, target, rule, prerequisite};
static void
@@ -83,6 +400,10 @@ namespace build2
const variable& var (p.first);
const value& v (p.second);
+ // On one hand it might be helpful to print the visibility. On the
+ // other, it is always specified which means there will be a lot of
+ // noise. So probably not.
+ //
if (var.type != nullptr)
os << '[' << var.type->name << "] ";
@@ -123,6 +444,68 @@ namespace build2
}
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_variable (json::stream_serializer& j,
+ const variable_map& vm,
+ const variable_map::const_iterator& vi,
+ const scope& s,
+ variable_kind k)
+ {
+ // Note: see the buildfile version above for comments.
+
+ assert (k != variable_kind::tt_pat); // TODO
+
+ const auto& p (*vi);
+ const variable& var (p.first);
+ const value& v (p.second);
+
+ lookup l (v, var, vm);
+ if (k != variable_kind::prerequisite)
+ {
+ if (var.override ())
+ return; // Ignore.
+
+ if (var.overrides != nullptr)
+ {
+ l = s.lookup_override (
+ var,
+ make_pair (l, 1),
+ k == variable_kind::target || k == variable_kind::rule,
+ k == variable_kind::rule).first;
+
+ assert (l.defined ()); // We at least have the original.
+ }
+ }
+
+ // Note that we do not distinguish between variable/value type.
+ //
+ // An empty value of a non-array type is represented as an empty object
+ // ({}).
+ //
+#if 0
+ struct variable
+ {
+ string name;
+ optional<string> type;
+ json_value value; // string|number|boolean|null|object|array
+ };
+#endif
+
+ j.begin_object ();
+
+ j.member ("name", var.name);
+
+ if (l->type != nullptr)
+ j.member ("type", l->type->name);
+
+ j.member_name ("value");
+ dump_value (j, *l);
+
+ j.end_object ();
+ }
+#endif
+
static void
dump_variables (ostream& os,
string& ind,
@@ -139,6 +522,20 @@ namespace build2
}
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_variables (json::stream_serializer& j,
+ const variable_map& vars,
+ const scope& s,
+ variable_kind k)
+ {
+ for (auto i (vars.begin ()), e (vars.end ()); i != e; ++i)
+ {
+ dump_variable (j, vars, i, s, k);
+ }
+ }
+#endif
+
// Dump target type/pattern-specific variables.
//
static void
@@ -208,7 +605,7 @@ namespace build2
for (action a: r.actions)
os << ' ' << re.meta_operations[a.meta_operation ()]->name <<
- '(' << re.operations[a.operation ()]->name << ')';
+ '(' << re.operations[a.operation ()].info->name << ')';
os << endl;
r.dump_text (os, ind);
@@ -244,10 +641,27 @@ namespace build2
}
}
+ // Similar to target::matched() but for the load phase.
+ //
+ static inline bool
+ matched (const target& t, action a)
+ {
+ // Note: running serial and task_count is 0 before any operation has
+ // started.
+ //
+ if (size_t c = t[a].task_count.load (memory_order_relaxed))
+ {
+ if (c == t.ctx.count_applied () || c == t.ctx.count_executed ())
+ return true;
+ }
+
+ return false;
+ }
+
static void
- dump_target (optional<action> a,
- ostream& os,
+ dump_target (ostream& os,
string& ind,
+ optional<action> a,
const target& t,
const scope& s,
bool rel)
@@ -256,6 +670,9 @@ namespace build2
// scope. To achieve this we are going to temporarily lower the stream
// path verbosity to level 0.
//
+ // @@ Not if in src and out != src? Otherwise end up with ../../../...
+ // See JSON version for the state of the art.
+ //
stream_verbosity osv, nsv;
if (rel)
{
@@ -289,7 +706,7 @@ namespace build2
os << "rule_hint=";
if (v.operation != default_id)
- os << s.root_scope ()->root_extra->operations[v.operation]->name
+ os << s.root_scope ()->root_extra->operations[v.operation].info->name
<< '@';
os << v.hint;
@@ -317,32 +734,26 @@ namespace build2
// If the target has been matched to a rule, we also print resolved
// prerequisite targets.
//
- // Note: running serial and task_count is 0 before any operation has
- // started.
- //
const prerequisite_targets* pts (nullptr);
{
action inner; // @@ Only for the inner part of the action currently.
- if (size_t c = t[inner].task_count.load (memory_order_relaxed))
+ if (matched (t, inner))
{
- if (c == t.ctx.count_applied () || c == t.ctx.count_executed ())
- {
- pts = &t.prerequisite_targets[inner];
+ pts = &t.prerequisite_targets[inner];
- bool f (false);
- for (const target* pt: *pts)
+ bool f (false);
+ for (const target* pt: *pts)
+ {
+ if (pt != nullptr)
{
- if (pt != nullptr)
- {
- f = true;
- break;
- }
+ f = true;
+ break;
}
-
- if (!f)
- pts = nullptr;
}
+
+ if (!f)
+ pts = nullptr;
}
}
@@ -506,10 +917,318 @@ namespace build2
stream_verb (os, osv);
}
+#ifndef BUILD2_BOOTSTRAP
static void
- dump_scope (optional<action> a,
- ostream& os,
+ dump_target (json::stream_serializer& j,
+ optional<action> a,
+ const target& t,
+ const scope& s,
+ bool rel,
+ target_name_cache& tcache)
+ {
+ // Note: see the buildfile version above for comments.
+
+ // Note that the target name (and display_name) are relative to the
+ // containing scope (if any).
+ //
+#if 0
+ struct prerequisite
+ {
+ string name; // Quoted/qualified name.
+ string type;
+ vector<variable> variables; // Prerequisite variables.
+ };
+
+ struct loaded_target
+ {
+ string name; // Quoted/qualified name.
+ string display_name;
+ string type; // Target type.
+ //string declaration;
+ optional<string> group; // Quoted/qualified group target name.
+
+ vector<variable> variables; // Target variables.
+
+ vector<prerequisite> prerequisites;
+ };
+
+ // @@ TODO: target attributes (rule_hint)
+
+ struct prerequisite_target
+ {
+ string name; // Target name (always absolute).
+ string type;
+ bool adhoc;
+ };
+
+ struct operation_state
+ {
+ string rule; // null if direct recipe match
+
+ optional<string> state; // unchanged|changed|group
+
+ vector<variable> variables; // Rule variables.
+
+ vector<prerequisite_target> prerequisite_targets;
+ };
+
+ struct matched_target
+ {
+ string name;
+ string display_name;
+ string type;
+ //string declaration;
+ optional<string> group;
+
+ optional<path> path; // Absent if not path-based target, not assigned.
+
+ vector<variable> variables;
+
+ optional<operation_state> outer_operation; // null if not matched.
+ operation_state inner_operation; // null if not matched.
+ };
+#endif
+
+ j.begin_object ();
+
+ j.member_name ("name");
+ dump_quoted_target_name (j, t, rel /* relative */);
+
+ j.member_name ("display_name");
+ dump_display_target_name (j, t, rel /* relative */);
+
+ j.member ("type", t.type ().name);
+
+ // @@ This value currently doesn't make much sense:
+ //
+ // - why are all the system headers prereq-new?
+ //
+ // - why is synthesized obje{} prereq-new?
+ //
+#if 0
+ {
+ const char* v (nullptr);
+ switch (t.decl)
+ {
+ case target_decl::prereq_new: v = "prerequisite-new"; break;
+ case target_decl::prereq_file: v = "prerequisite-file"; break;
+ case target_decl::implied: v = "implied"; break;
+ case target_decl::real: v = "real"; break;
+ }
+ j.member ("declaration", v);
+ }
+#endif
+
+ if (t.group != nullptr)
+ {
+ j.member_name ("group");
+ dump_quoted_target_name (j, *t.group, tcache);
+ }
+
+ if (a)
+ {
+ const string* v (nullptr);
+
+ if (t.is_a<dir> () || t.is_a<fsdir> ())
+ {
+ v = &t.dir.string ();
+ }
+ else if (const auto* pt = t.is_a<path_target> ())
+ {
+ const path& p (pt->path ());
+
+ if (!p.empty ())
+ v = &p.string ();
+ }
+
+ if (v != nullptr)
+ j.member ("path", *v);
+ }
+
+ // Target variables.
+ //
+ if (!t.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, t.vars, s, variable_kind::target);
+ j.end_array ();
+ }
+
+ // Prerequisites.
+ //
+ if (!a)
+ {
+ const prerequisites& ps (t.prerequisites ());
+
+ if (!ps.empty ())
+ {
+ j.member_begin_array ("prerequisites");
+
+ for (const prerequisite& p: ps)
+ {
+ j.begin_object ();
+
+ {
+ // Cobble together an equivalent of dump_quoted_target_name().
+ //
+ prerequisite_key pk (p.key ());
+ target_key& tk (pk.tk);
+
+ // It's possible that the containing scope differs from
+ // prerequisite's. This, for example, happens when we copy the
+ // prerequisite for a synthesized obj{} dependency that happens to
+ // be in a subdirectory, as in exe{foo}:src/cxx{foo}. In this
+ // case, we need to rebase relative paths to the containing scope.
+ //
+ dir_path d, o;
+ if (p.scope != s)
+ {
+ if (tk.out->empty ())
+ {
+ if (tk.dir->relative ())
+ {
+ d = (p.scope.out_path () / *tk.dir).relative (s.out_path ());
+ tk.dir = &d;
+ }
+ }
+ else
+ {
+ if (tk.dir->relative ())
+ {
+ d = (p.scope.src_path () / *tk.dir).relative (s.src_path ());
+ tk.dir = &d;
+ }
+
+ if (tk.out->relative ())
+ {
+ o = (p.scope.out_path () / *tk.out).relative (s.out_path ());
+ if (o.empty ())
+ o = dir_path (".");
+ tk.out = &o;
+ }
+ }
+ }
+
+ // If prerequisite paths are absolute, keep them absolute.
+ //
+ ostringstream os;
+ stream_verb (os, stream_verbosity (1, 0));
+
+ if (pk.proj)
+ os << *pk.proj << '%';
+
+ to_stream (os, pk.tk.as_name (), quote_mode::effective, '@');
+
+ j.member ("name", os.str ());
+ }
+
+ j.member ("type", p.type.name);
+
+ if (!p.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, p.vars, s, variable_kind::prerequisite);
+ j.end_array ();
+ }
+
+ j.end_object ();
+ }
+
+ j.end_array ();
+ }
+ }
+ else
+ {
+ // Matched rules and their state (prerequisite_targets, vars, etc).
+ //
+ auto dump_opstate = [&tcache, &j, &s, &t] (action a)
+ {
+ const target::opstate& o (t[a]);
+
+ j.begin_object ();
+
+ j.member ("rule", o.rule != nullptr ? o.rule->first.c_str () : nullptr);
+
+ // It feels natural to omit the unknown state, as if it corresponded
+ // to absent in optional<target_state>.
+ //
+ if (o.state != target_state::unknown)
+ {
+ assert (o.state == target_state::unchanged ||
+ o.state == target_state::changed ||
+ o.state == target_state::group);
+
+ j.member ("state", to_string (o.state));
+ }
+
+ if (!o.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, o.vars, s, variable_kind::rule);
+ j.end_array ();
+ }
+
+ {
+ bool first (true);
+ for (const prerequisite_target& pt: t.prerequisite_targets[a])
+ {
+ if (pt.target == nullptr)
+ continue;
+
+ if (first)
+ {
+ j.member_begin_array ("prerequisite_targets");
+ first = false;
+ }
+
+ j.begin_object ();
+
+ j.member_name ("name");
+ dump_quoted_target_name (j, *pt.target, tcache);
+
+ j.member ("type", pt.target->type ().name);
+
+ if (pt.adhoc ())
+ j.member ("adhoc", true);
+
+ j.end_object ();
+ }
+
+ if (!first)
+ j.end_array ();
+ }
+
+ j.end_object ();
+ };
+
+ if (a->outer ())
+ {
+ j.member_name ("outer_operation");
+ if (matched (t, *a))
+ dump_opstate (*a);
+ else
+ j.value (nullptr);
+ }
+
+ {
+ action ia (a->inner_action ());
+
+ j.member_name ("inner_operation");
+ if (matched (t, ia))
+ dump_opstate (ia);
+ else
+ j.value (nullptr);
+ }
+ }
+
+ j.end_object ();
+ }
+#endif
+
+ static void
+ dump_scope (ostream& os,
string& ind,
+ optional<action> a,
scope_map::const_iterator& i,
bool rel)
{
@@ -584,21 +1303,25 @@ namespace build2
// disabled amalgamation will be printed directly inside the global
// scope).
//
- for (auto e (p.ctx.scopes.end ());
- (i != e &&
- i->second.front () != nullptr &&
- i->second.front ()->parent_scope () == &p); )
+ for (auto e (p.ctx.scopes.end ()); i != e; )
{
- if (vb || rb || sb)
+ if (i->second.front () == nullptr)
+ ++i; // Skip over src paths.
+ else if (i->second.front ()->parent_scope () != &p)
+ break; // Moved past our parent.
+ else
{
- os << endl;
- vb = rb = false;
- }
+ if (vb || rb || sb)
+ {
+ os << endl;
+ vb = rb = false;
+ }
- os << endl; // Extra newline between scope blocks.
+ os << endl; // Extra newline between scope blocks.
- dump_scope (a, os, ind, i, true /* relative */);
- sb = true;
+ dump_scope (os, ind, a, i, true /* relative */);
+ sb = true;
+ }
}
// Targets.
@@ -620,7 +1343,7 @@ namespace build2
}
os << endl; // Extra newline between targets.
- dump_target (a, os, ind, t, p, true /* relative */);
+ dump_target (os, ind, a, t, p, true /* relative */);
tb = true;
}
@@ -631,45 +1354,245 @@ namespace build2
<< ind << '}';
}
+#ifndef BUILD2_BOOTSTRAP
+ static void
+ dump_scope (json::stream_serializer& j,
+ optional<action> a,
+ scope_map::const_iterator& i,
+ bool rel,
+ target_name_cache& tcache)
+ {
+ // Note: see the buildfile version above for additional comments.
+
+ const scope& p (*i->second.front ());
+ const dir_path& d (i->first);
+ ++i;
+
+#if 0
+ struct scope
+ {
+ // The out_path member is relative to the parent scope. It is empty for
+ // the special global scope. The src_path member is absent if the same
+ // as out_path (in-source build or scope outside of project).
+ //
+ string out_path;
+ optional<string> src_path;
+
+ vector<variable> variables; // Non-type/pattern scope variables.
+
+ vector<scope> scopes; // Immediate children.
+
+ vector<loaded_target|matched_target> targets;
+ };
+#endif
+
+ j.begin_object ();
+
+ if (d.empty ())
+ j.member ("out_path", ""); // Global scope.
+ else
+ {
+ const dir_path& rd (rel ? relative (d) : d);
+ j.member ("out_path", rd.empty () ? string (".") : rd.string ());
+
+ if (!p.out_eq_src ())
+ j.member ("src_path", p.src_path ().string ());
+ }
+
+ const dir_path* orb (relative_base);
+ relative_base = &d;
+
+ // Scope variables.
+ //
+ if (!p.vars.empty ())
+ {
+ j.member_begin_array ("variables");
+ dump_variables (j, p.vars, p, variable_kind::scope);
+ j.end_array ();
+ }
+
+ // Nested scopes of which we are an immediate parent.
+ //
+ {
+ bool first (true);
+ for (auto e (p.ctx.scopes.end ()); i != e; )
+ {
+ if (i->second.front () == nullptr)
+ ++i;
+ else if (i->second.front ()->parent_scope () != &p)
+ break;
+ else
+ {
+ if (first)
+ {
+ j.member_begin_array ("scopes");
+ first = false;
+ }
+
+ dump_scope (j, a, i, true /* relative */, tcache);
+ }
+ }
+
+ if (!first)
+ j.end_array ();
+ }
+
+ // Targets.
+ //
+ {
+ bool first (true);
+ for (const auto& pt: p.ctx.targets)
+ {
+ const target& t (*pt);
+
+ if (&p != &t.base_scope ()) // @@ PERF
+ continue;
+
+ // Skip targets that haven't been matched for this action.
+ //
+ if (a)
+ {
+ if (!(matched (t, a->inner_action ()) ||
+ (a->outer () && matched (t, *a))))
+ continue;
+ }
+
+ if (first)
+ {
+ j.member_begin_array ("targets");
+ first = false;
+ }
+
+ dump_target (j, a, t, p, true /* relative */, tcache);
+ }
+
+ if (!first)
+ j.end_array ();
+ }
+
+ relative_base = orb;
+ j.end_object ();
+ }
+#endif
+
void
- dump (const context& c, optional<action> a)
+ dump (const context& c, optional<action> a, dump_format fmt)
{
auto i (c.scopes.begin ());
assert (i->second.front () == &c.global_scope);
- // We don't lock diag_stream here as dump() is supposed to be called from
- // the main thread prior/after to any other threads being spawned.
- //
- string ind;
- ostream& os (*diag_stream);
- dump_scope (a, os, ind, i, false /* relative */);
- os << endl;
+ switch (fmt)
+ {
+ case dump_format::buildfile:
+ {
+ // We don't lock diag_stream here as dump() is supposed to be called
+ // from the main thread prior/after to any other threads being
+ // spawned.
+ //
+ string ind;
+ ostream& os (*diag_stream);
+ dump_scope (os, ind, a, i, false /* relative */);
+ os << endl;
+ break;
+ }
+ case dump_format::json:
+ {
+#ifndef BUILD2_BOOTSTRAP
+ target_name_cache tc;
+ json::stream_serializer j (cout, 0 /* indent */);
+ dump_scope (j, a, i, false /* relative */, tc);
+ cout << endl;
+#else
+ assert (false);
+#endif
+ break;
+ }
+ }
}
void
- dump (const scope& s, const char* cind)
+ dump (const scope* s, optional<action> a, dump_format fmt, const char* cind)
{
- const scope_map& m (s.ctx.scopes);
- auto i (m.find_exact (s.out_path ()));
- assert (i != m.end () && i->second.front () == &s);
+ scope_map::const_iterator i;
+ if (s != nullptr)
+ {
+ const scope_map& m (s->ctx.scopes);
+ i = m.find_exact (s->out_path ());
+ assert (i != m.end () && i->second.front () == s);
+ }
- string ind (cind);
- ostream& os (*diag_stream);
- dump_scope (nullopt /* action */, os, ind, i, false /* relative */);
- os << endl;
+ switch (fmt)
+ {
+ case dump_format::buildfile:
+ {
+ string ind (cind);
+ ostream& os (*diag_stream);
+
+ if (s != nullptr)
+ dump_scope (os, ind, a, i, false /* relative */);
+ else
+ os << ind << "<no known scope to dump>";
+
+ os << endl;
+ break;
+ }
+ case dump_format::json:
+ {
+#ifndef BUILD2_BOOTSTRAP
+ target_name_cache tc;
+ json::stream_serializer j (cout, 0 /* indent */);
+
+ if (s != nullptr)
+ dump_scope (j, a, i, false /* relative */, tc);
+ else
+ j.value (nullptr);
+
+ cout << endl;
+#else
+ assert (false);
+#endif
+ break;
+ }
+ }
}
void
- dump (const target& t, const char* cind)
+ dump (const target* t, optional<action> a, dump_format fmt, const char* cind)
{
- string ind (cind);
- ostream& os (*diag_stream);
- dump_target (nullopt /* action */,
- os,
- ind,
- t,
- t.base_scope (),
- false /* relative */);
- os << endl;
+ const scope* bs (t != nullptr ? &t->base_scope () : nullptr);
+
+ switch (fmt)
+ {
+ case dump_format::buildfile:
+ {
+ string ind (cind);
+ ostream& os (*diag_stream);
+
+ if (t != nullptr)
+ dump_target (os, ind, a, *t, *bs, false /* relative */);
+ else
+ os << ind << "<no known target to dump>";
+
+ os << endl;
+ break;
+ }
+ case dump_format::json:
+ {
+#ifndef BUILD2_BOOTSTRAP
+ target_name_cache tc;
+ json::stream_serializer j (cout, 0 /* indent */);
+
+ if (t != nullptr)
+ dump_target (j, a, *t, *bs, false /* relative */, tc);
+ else
+ j.value (nullptr);
+
+ cout << endl;
+#else
+ assert (false);
+#endif
+ break;
+ }
+ }
}
}
diff --git a/libbuild2/dump.hxx b/libbuild2/dump.hxx
index 6ec6944..1a1a080 100644
--- a/libbuild2/dump.hxx
+++ b/libbuild2/dump.hxx
@@ -4,6 +4,10 @@
#ifndef LIBBUILD2_DUMP_HXX
#define LIBBUILD2_DUMP_HXX
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/serializer.hxx>
+#endif
+
#include <libbuild2/types.hxx>
#include <libbuild2/forward.hxx>
#include <libbuild2/utility.hxx>
@@ -14,18 +18,40 @@
namespace build2
{
+ enum class dump_format {buildfile, json};
+
// Dump the build state to diag_stream. If action is specified, then assume
// rules have been matched for this action and dump action-specific
// information (like rule-specific variables).
//
+ // If scope or target is NULL, then assume not found and write a format-
+ // appropriate indication.
+ //
+ LIBBUILD2_SYMEXPORT void
+ dump (const context&, optional<action>, dump_format);
+
LIBBUILD2_SYMEXPORT void
- dump (const context&, optional<action> = nullopt);
+ dump (const scope*, optional<action>, dump_format, const char* ind = "");
LIBBUILD2_SYMEXPORT void
- dump (const scope&, const char* ind = "");
+ dump (const target*, optional<action>, dump_format, const char* ind = "");
+#ifndef BUILD2_BOOTSTRAP
+ // Dump (effectively) quoted target name, optionally relative (to the out
+ // tree).
+ //
+ LIBBUILD2_SYMEXPORT void
+ dump_quoted_target_name (butl::json::stream_serializer&,
+ const target&,
+ bool relative = false);
+
+ // Dump display target name, optionally relative (to the out tree).
+ //
LIBBUILD2_SYMEXPORT void
- dump (const target&, const char* ind = "");
+ dump_display_target_name (butl::json::stream_serializer&,
+ const target&,
+ bool relative = false);
+#endif
}
#endif // LIBBUILD2_DUMP_HXX
diff --git a/libbuild2/dyndep.cxx b/libbuild2/dyndep.cxx
index 61fa8cb..68260fb 100644
--- a/libbuild2/dyndep.cxx
+++ b/libbuild2/dyndep.cxx
@@ -5,6 +5,7 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
+#include <libbuild2/search.hxx>
#include <libbuild2/context.hxx>
#include <libbuild2/algorithm.hxx>
#include <libbuild2/filesystem.hxx>
@@ -56,9 +57,45 @@ namespace build2
return r;
}
+ // Check if the specified prerequisite is updated during match by any other
+ // prerequisites of the specified target, recursively.
+ //
+ static bool
+ updated_during_match (action a, const target& t, size_t pts_n,
+ const target& pt)
+ {
+ const auto& pts (t.prerequisite_targets[a]);
+
+ for (size_t i (0); i != pts_n; ++i)
+ {
+ const prerequisite_target& p (pts[i]);
+
+ // If include_target flag is specified, then p.data contains the
+ // target pointer.
+ //
+ if (const target* xt =
+ (p.target != nullptr ? p.target :
+ ((p.include & prerequisite_target::include_target) != 0
+ ? reinterpret_cast<target*> (p.data)
+ : nullptr)))
+ {
+ if (xt == &pt && (p.include & prerequisite_target::include_udm) != 0)
+ return true;
+
+ if (size_t n = xt->prerequisite_targets[a].size ())
+ {
+ if (updated_during_match (a, *xt, n, pt))
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
optional<bool> dyndep_rule::
inject_existing_file (tracer& trace, const char* what,
- action a, target& t,
+ action a, target& t, size_t pts_n,
const file& pt,
timestamp mt,
bool f,
@@ -81,8 +118,11 @@ namespace build2
recipe_function* const* rf (pt[a].recipe.target<recipe_function*> ());
if (rf == nullptr || *rf != &noop_action)
{
- fail << what << ' ' << pt << " has non-noop recipe" <<
- info << "consider listing it as static prerequisite of " << t;
+ if (pts_n == 0 || !updated_during_match (a, t, pts_n, pt))
+ {
+ fail << what << ' ' << pt << " has non-noop recipe" <<
+ info << "consider listing it as static prerequisite of " << t;
+ }
}
bool r (update (trace, a, pt, mt));
@@ -96,21 +136,27 @@ namespace build2
void dyndep_rule::
verify_existing_file (tracer&, const char* what,
- action a, const target& t,
+ action a, const target& t, size_t pts_n,
const file& pt)
{
diag_record dr;
- if (pt.matched (a))
+ if (pt.matched (a, memory_order_acquire))
{
recipe_function* const* rf (pt[a].recipe.target<recipe_function*> ());
if (rf == nullptr || *rf != &noop_action)
{
- dr << fail << what << ' ' << pt << " has non-noop recipe";
+ if (pts_n == 0 || !updated_during_match (a, t, pts_n, pt))
+ {
+ dr << fail << what << ' ' << pt << " has non-noop recipe";
+ }
}
}
else if (pt.decl == target_decl::real)
{
+ // Note that this target could not possibly be updated during match
+ // since it's not matched.
+ //
dr << fail << what << ' ' << pt << " is explicitly declared as "
<< "target and may have non-noop recipe";
}
@@ -119,12 +165,6 @@ namespace build2
dr << info << "consider listing it as static prerequisite of " << t;
}
- // Reverse-lookup target type(s) from file name/extension.
- //
- // If the list of base target types is specified, then only these types and
- // those derived from them are considered. Otherwise, any file-based type is
- // considered but not the file type itself.
- //
small_vector<const target_type*, 2> dyndep_rule::
map_extension (const scope& bs,
const string& n, const string& e,
@@ -384,6 +424,7 @@ namespace build2
action a, const scope& bs, const target& t,
path& fp, bool cache, bool norm,
bool insert,
+ bool dynamic,
const function<dyndep_rule::map_extension_func>& map_extension,
const target_type& fallback,
const function<dyndep_rule::prefix_map_func>& get_pfx_map,
@@ -392,14 +433,26 @@ namespace build2
// NOTE: see enter_header() caching logic if changing anyting here with
// regards to the target and base scope usage.
- // Find or maybe insert the target. The directory is only moved from if
- // insert is true. Note that it must be normalized.
+ assert (!insert || t.ctx.phase == run_phase::match);
+
+ // Find or maybe insert the target.
+ //
+ // If insert is false, then don't consider dynamically-created targets
+ // (i.e., those that are not real or implied) unless dynamic is true, in
+ // which case return the target that would have been inserted.
//
- auto find = [&trace, what, &t,
- &map_extension, &fallback] (dir_path&& d,
- path&& f,
- bool insert) -> const file*
+ // The directory is only moved from if insert is true. Note that it must
+ // be normalized.
+ //
+ auto find = [&trace, what, &bs, &t,
+ &map_extension,
+ &fallback] (dir_path&& d,
+ path&& f,
+ bool insert,
+ bool dynamic = false) -> const file*
{
+ context& ctx (t.ctx);
+
// Split the file into its name part and extension. Here we can assume
// the name part is a valid filesystem name.
//
@@ -422,7 +475,7 @@ namespace build2
dir_path out;
// It's possible the extension-to-target type mapping is ambiguous (for
- // example, because both C and X-language headers use the same .h
+ // example, because both C and C++-language headers use the same .h
// extension). In this case we will first try to find one that matches
// an explicit target (similar logic to when insert is false).
//
@@ -435,15 +488,40 @@ namespace build2
// pick the first one (it's highly unlikely the source file extension
// mapping will differ based on the configuration).
//
+ // Note that we also need to remember the base scope for search() below
+ // (failed that, search_existing_file() will refuse to look).
+ //
+ const scope* s (nullptr);
{
- const scope& bs (**t.ctx.scopes.find (d).first);
- if (const scope* rs = bs.root_scope ())
+ // While we cannot accurately associate in the general case, we can do
+ // so if the path belongs to this project.
+ //
+ const scope& rs (*bs.root_scope ());
+ bool src (false);
+ if (d.sub (rs.out_path ()) ||
+ (src = (!rs.out_eq_src () && d.sub (rs.src_path ()))))
{
if (map_extension != nullptr)
tts = map_extension (bs, n, e);
- if (!bs.out_eq_src () && d.sub (bs.src_path ()))
- out = out_src (d, *rs);
+ if (src)
+ out = out_src (d, rs);
+
+ s = &bs;
+ }
+ else
+ {
+ const scope& bs (**ctx.scopes.find (d).first);
+ if (const scope* rs = bs.root_scope ())
+ {
+ if (map_extension != nullptr)
+ tts = map_extension (bs, n, e);
+
+ if (!rs->out_eq_src () && d.sub (rs->src_path ()))
+ out = out_src (d, *rs);
+
+ s = &bs;
+ }
}
}
@@ -453,9 +531,9 @@ namespace build2
if (tts.empty ())
{
// If the project doesn't "know" this extension then we can't possibly
- // find an explicit target of this type.
+ // find a real or implied target of this type.
//
- if (!insert)
+ if (!insert && !dynamic)
{
l6 ([&]{trace << "unknown " << what << ' ' << n << " extension '"
<< e << "'";});
@@ -487,7 +565,7 @@ namespace build2
{
const target_type& tt (*tts[i]);
- if (const target* x = t.ctx.targets.find (tt, d, out, n, e, trace))
+ if (const target* x = ctx.targets.find (tt, d, out, n, e, trace))
{
// What would be the harm in reusing a dynamically-inserted target
// if there is no buildfile-mentioned one? Probably none (since it
@@ -499,8 +577,7 @@ namespace build2
// implied ones because pre-entered members of a target group
// (e.g., cli.cxx) are implied.
//
- if (x->decl == target_decl::real ||
- x->decl == target_decl::implied)
+ if (operator>= (x->decl, target_decl::implied)) // @@ VC14
{
r = x;
break;
@@ -510,7 +587,7 @@ namespace build2
// Cache the dynamic target corresponding to tts[0] since that's
// what we will be inserting (see below).
//
- if (insert && i == 0)
+ if ((insert || dynamic) && i == 0)
f = x;
l6 ([&]{trace << "dynamic target with target type " << tt.name;});
@@ -549,10 +626,25 @@ namespace build2
r = f;
}
- // @@ OPT: move d, out, n
- //
if (r == nullptr && insert)
- r = &search (t, *tts[0], d, out, n, &e, nullptr);
+ {
+ // Like search(t, pk) but don't fail if the target is in src.
+ //
+ // While it may seem like there is not much difference, the caller may
+ // actually do more than just issue more specific diagnostics. For
+ // example, if may defer the failure to the tool diagnostics.
+ //
+#if 0
+ r = &search (t, *tts[0], d, out, n, &e, s);
+#else
+ prerequisite_key pk {nullopt, {tts[0], &d, &out, &n, move (e)}, s};
+
+ r = pk.tk.type->search (ctx, &t, pk);
+
+ if (r == nullptr && pk.tk.out->empty ())
+ r = &create_new_target (ctx, pk);
+#endif
+ }
return static_cast<const file*> (r);
};
@@ -653,7 +745,11 @@ namespace build2
// Maybe for diagnostics (i.e., we will actually try to build
// something there instead of just saying no mapping).
//
- pt = find (pd / d, fp.leaf (), insert && !i->first.empty ());
+ if (i->first.empty ())
+ pt = find (pd / d, fp.leaf (), false);
+ else
+ pt = find (pd / d, fp.leaf (), insert, dynamic);
+
if (pt != nullptr)
{
fp = pd / fp;
@@ -713,7 +809,7 @@ namespace build2
if (pt == nullptr)
{
l6 ([&]{trace << (insert ? "entering " : "finding ") << fp;});
- pt = find (fp.directory (), fp.leaf (), insert);
+ pt = find (fp.directory (), fp.leaf (), insert, dynamic);
}
}
@@ -732,7 +828,7 @@ namespace build2
return enter_file_impl (trace, what,
a, bs, t,
fp, cache, norm,
- true /* insert */,
+ true /* insert */, false,
map_ext, fallback, pfx_map, so_map);
}
@@ -740,6 +836,7 @@ namespace build2
find_file (tracer& trace, const char* what,
action a, const scope& bs, const target& t,
path& fp, bool cache, bool norm,
+ bool dynamic,
const function<map_extension_func>& map_ext,
const target_type& fallback,
const function<prefix_map_func>& pfx_map,
@@ -748,52 +845,264 @@ namespace build2
return enter_file_impl (trace, what,
a, bs, t,
fp, cache, norm,
- false /* insert */,
+ false /* insert */, dynamic,
map_ext, fallback, pfx_map, so_map);
}
- const file& dyndep_rule::
- inject_group_member (action a, const scope& bs, mtime_target& g,
- path p, const target_type& tt)
+ static pair<const file&, bool>
+ inject_group_member_impl (action a, const scope& bs, mtime_target& g,
+ path f, string n, string e,
+ const target_type& tt,
+ const function<dyndep_rule::group_filter_func>& fl)
{
- path n (p.leaf ());
- string e (n.extension ());
+ // NOTE: see adhoc_rule_regex_pattern::apply_group_members() for a variant
+ // of the same code.
- // Assume nobody else can insert these members (seems reasonable seeing
- // that their names are dynamically discovered).
+ // Note that we used to directly match such a member with group_recipe.
+ // But that messes up our dependency counts since we don't really know
+ // whether someone will execute such a member.
+ //
+ // So instead we now just link the member up to the group and rely on the
+ // special semantics in match_rule_impl() for groups with the dyn_members
+ // flag.
+ //
+ assert ((g.type ().flags & target_type::flag::dyn_members) ==
+ target_type::flag::dyn_members);
+
+ // We expect that nobody else can insert these members (seems reasonable
+ // seeing that their names are dynamically discovered).
//
auto l (search_new_locked (
bs.ctx,
tt,
- p.directory (),
+ f.directory (),
dir_path (), // Always in out.
- move (n.make_base ()).string (),
+ move (n),
&e,
&bs));
const file& t (l.first.as<file> ()); // Note: non-const only if have lock.
+ // We don't need to match the group recipe directy from ad hoc
+ // recipes/rules due to the special semantics for explicit group members
+ // in match_rule_impl(). This is what skip_match is for.
+ //
if (l.second)
{
l.first.group = &g;
l.second.unlock ();
- t.path (move (p)); // Only do this once.
+ t.path (move (f));
+ return pair<const file&, bool> (t, true);
}
else
- // Must have been already done (e.g., on previous operation in a
- // batch).
+ {
+ if (fl != nullptr && !fl (g, t))
+ return pair<const file&, bool> (t, false);
+ }
+
+ // Check if we already belong to this group. Note that this not a mere
+ // optimization since we may be in the member->group->member chain and
+ // trying to lock the member the second time would deadlock (this can be
+ // triggered, for example, by dist, which sort of depends on such members
+ // directly... which was not quite correct and is now fixed).
+ //
+ if (t.group == &g) // Note: atomic.
+ t.path (move (f));
+ else
+ {
+ // This shouldn't normally fail since we are the only ones that should
+ // know about this target (otherwise why is it dynamicaly discovered).
+ // However, nothing prevents the user from depending on such a target,
+ // however misguided.
//
- assert (t.group == &g);
+ target_lock tl (lock (a, t));
+
+ if (!tl)
+ fail << "group " << g << " member " << t << " is already matched" <<
+ info << "dynamically extracted group members cannot be used as "
+ << "prerequisites directly, only via group";
+
+ if (t.group == nullptr)
+ tl.target->group = &g;
+ else if (t.group != &g)
+ fail << "group " << g << " member " << t
+ << " is already member of group " << *t.group;
+
+ t.path (move (f));
+ }
+
+ return pair<const file&, bool> (t, true);
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_group_member (action a, const scope& bs, mtime_target& g,
+ path f,
+ const target_type& tt,
+ const function<group_filter_func>& filter)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ return inject_group_member_impl (a, bs, g,
+ move (f), move (n).string (), move (e),
+ tt,
+ filter);
+ }
+
+ static const target_type&
+ map_target_type (const char* what,
+ const scope& bs,
+ const path& f, const string& n, const string& e,
+ const function<dyndep_rule::map_extension_func>& map_ext,
+ const target_type& fallback)
+ {
+ // Map extension to the target type, falling back to the fallback type.
+ //
+ small_vector<const target_type*, 2> tts;
+ if (map_ext != nullptr)
+ tts = map_ext (bs, n, e);
+
+ // Not sure what else we can do in this case.
+ //
+ if (tts.size () > 1)
+ {
+ diag_record dr (fail);
+
+ dr << "mapping of " << what << " target path " << f
+ << " to target type is ambiguous";
+
+ for (const target_type* tt: tts)
+ dr << info << "can be " << tt->name << "{}";
+ }
+
+ const target_type& tt (tts.empty () ? fallback : *tts.front ());
+
+ if (!tt.is_a<file> ())
+ {
+ fail << what << " target path " << f << " mapped to non-file-based "
+ << "target type " << tt.name << "{}";
+ }
+
+ return tt;
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_group_member (const char* what,
+ action a, const scope& bs, mtime_target& g,
+ path f,
+ const function<map_extension_func>& map_ext,
+ const target_type& fallback,
+ const function<group_filter_func>& filter)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ // Map extension to the target type, falling back to the fallback type.
+ //
+ const target_type& tt (
+ map_target_type (what, bs, f, n.string (), e, map_ext, fallback));
+
+ return inject_group_member_impl (a, bs, g,
+ move (f), move (n).string (), move (e),
+ tt,
+ filter);
+ }
+
+ pair<const file&, bool>
+ inject_adhoc_group_member_impl (action, const scope& bs, target& t,
+ path f, string n, string e,
+ const target_type& tt)
+ {
+ // Assume nobody else can insert these members (seems reasonable seeing
+ // that their names are dynamically discovered).
+ //
+ auto l (search_new_locked (
+ bs.ctx,
+ tt,
+ f.directory (),
+ dir_path (), // Always in out.
+ move (n),
+ &e,
+ &bs));
+
+ file* ft (&l.first.as<file> ()); // Note: non-const only if locked.
+
+ // Skip if this is one of the static targets (or a duplicate of the
+ // dynamic target).
+ //
+ // In particular, we expect to skip all the targets that we could not lock
+ // (e.g., in case all of this has already been done for the previous
+ // operation in a batch; make sure to test `update update update` and
+ // `update clean update ...` batches if changing anything here).
+ //
+ // While at it also find the ad hoc members list tail.
+ //
+ const_ptr<target>* tail (&t.adhoc_member);
+ for (target* m (&t); m != nullptr; m = m->adhoc_member)
+ {
+ if (ft == m)
+ {
+ tail = nullptr;
+ break;
+ }
+
+ tail = &m->adhoc_member;
+ }
+
+ if (tail == nullptr)
+ return pair<const file&, bool> (*ft, false);
+
+ if (!l.second)
+ fail << "dynamic target " << *ft << " already exists and cannot be "
+ << "made ad hoc member of group " << t;
+
+ ft->group = &t;
+ l.second.unlock ();
+
+ // We need to be able to distinguish static targets from dynamic (see the
+ // static set hashing in adhoc_buildscript_rule::apply() for details).
+ //
+ assert (ft->decl != target_decl::real);
+
+ *tail = ft;
+ ft->path (move (f));
+
+ return pair<const file&, bool> (*ft, true);
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_adhoc_group_member (action a, const scope& bs, target& t,
+ path f,
+ const target_type& tt)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
+
+ return inject_adhoc_group_member_impl (
+ a, bs, t, move (f), move (n).string (), move (e), tt);
+ }
+
+ pair<const file&, bool> dyndep_rule::
+ inject_adhoc_group_member (const char* what,
+ action a, const scope& bs, target& t,
+ path f,
+ const function<map_extension_func>& map_ext,
+ const target_type& fallback)
+ {
+ path n (f.leaf ());
+ string e (n.extension ());
+ n.make_base ();
- // This shouldn't fail since we are the only ones that should be matching
- // this target.
+ // Map extension to the target type, falling back to the fallback type.
//
- target_lock tl (lock (a, t));
- assert (tl);
+ const target_type& tt (
+ map_target_type (what, bs, f, n.string (), e, map_ext, fallback));
- match_inc_dependents (a, g);
- match_recipe (tl, group_recipe);
- return t;
+ return inject_adhoc_group_member_impl (
+ a, bs, t, move (f), move (n).string (), move (e), tt);
}
}
diff --git a/libbuild2/dyndep.hxx b/libbuild2/dyndep.hxx
index 6632eb6..a0949c4 100644
--- a/libbuild2/dyndep.hxx
+++ b/libbuild2/dyndep.hxx
@@ -36,7 +36,7 @@ namespace build2
// target.
//
// Return the indication of whether it has changed or, if the passed
- // timestamp is not timestamp_unknown, is older than this timestamp. If
+ // timestamp is not timestamp_unknown, is newer than this timestamp. If
// the prerequisite target does not exists nor can be generated (no rule),
// then issue diagnostics and fail if the fail argument is true and return
// nullopt otherwise.
@@ -57,32 +57,39 @@ namespace build2
bool adhoc = false,
uintptr_t data = 0);
- // As above but verify the file is matched with noop_recipe and issue
- // diagnostics and fail otherwise (regardless of the fail flag).
+ // As above but verify the file is matched with noop_recipe or was updated
+ // during match and issue diagnostics and fail otherwise (regardless of
+ // the fail flag). Pass 0 for pts_n if don't want the "was updated during
+ // match" part.
//
// This version (together with verify_existing_file() below) is primarily
// useful for handling dynamic dependencies that are produced as a
// byproduct of recipe execution (and thus must have all the generated
// prerequisites specified statically).
//
+ // Note that this function expects all the static prerequisites of the
+ // target to already be matched and their number passed in pts_n.
+ //
static optional<bool>
inject_existing_file (tracer&, const char* what,
- action, target&,
+ action, target&, size_t pts_n,
const file& prerequiste,
timestamp,
bool fail,
bool adhoc = false,
uintptr_t data = 0);
- // Verify the file is matched with noop_recipe and issue diagnostics and
- // fail otherwise. If the file is not matched, then fail if the target is
- // not implied (that is, declared in a buildfile).
+ // Verify the file is matched with noop_recipe or was updated during match
+ // and issue diagnostics and fail otherwise. If the file is not matched,
+ // then fail if the target is not implied (that is, declared in a
+ // buildfile). Pass 0 for pts_n if don't want the "was updated during
+ // match" part.
//
// Note: can only be called in the execute phase.
//
static void
verify_existing_file (tracer&, const char* what,
- action, const target&,
+ action, const target&, size_t pts_n,
const file& prerequiste);
// Reverse-lookup target type(s) from file name/extension.
@@ -91,6 +98,10 @@ namespace build2
// and those derived from them are considered. Otherwise, any file-based
// type is considered but not the file type itself.
//
+ // It's possible the extension-to-target type mapping is ambiguous (for
+ // example, because both C and C++-language headers use the same .h
+ // extension). So this function can return multiple target types.
+ //
static small_vector<const target_type*, 2>
map_extension (const scope& base,
const string& name, const string& ext,
@@ -207,39 +218,86 @@ namespace build2
const srcout_map& = {});
// As above but do not insert the target if it doesn't already exist. This
- // function also returns NULL if the target exists but is implied (that
- // is, not declared in a buildfile).
+ // function also returns NULL if the target exists but is dynamic (that
+ // is, not real or implied), unless the dynamic argument is true.
//
static pair<const file*, bool>
find_file (tracer&, const char* what,
action, const scope& base, const target&,
path& prerequisite, bool cache, bool normalized,
+ bool dynamic,
const function<map_extension_func>&,
const target_type& fallback,
const function<prefix_map_func>& = nullptr,
const srcout_map& = {});
// Find or insert a target file path as a target of the specified type,
- // make it a member of the specified (non-ad hoc) mtime target group,
- // set its path, and match it with group_recipe.
+ // make it a member of the specified (non-ad hoc) mtime target group and
+ // set its path. Return the target and an indication of whether it was
+ // made a member (can only be false if a filter is provided; see below).
//
// The file path must be absolute and normalized. Note that this function
- // assumes that this member can only be matched via this group.
+ // assumes that this member can only be matched via this group. The group
+ // type must have the target_type::flag::dyn_members flag.
//
- // Note: we can split this function into {enter,match}_group_member()
- // if necessary.
+ // If specified, the group_filter function is called on the target before
+ // making it a group member, skipping it if this function returns false.
+ // Note that the filter is skipped if the target is newly inserted (the
+ // filter is meant to be used to skip duplicates).
//
- static const file&
+ using group_filter_func = bool (mtime_target& g, const file&);
+
+ static pair<const file&, bool>
inject_group_member (action, const scope& base, mtime_target&,
- path, const target_type&);
+ path,
+ const target_type&,
+ const function<group_filter_func>& = nullptr);
template <typename T>
- static const T&
- inject_group_member (action a, const scope& bs, mtime_target& g, path p)
+ static pair<const T&, bool>
+ inject_group_member (action a, const scope& bs, mtime_target& g,
+ path f,
+ const function<group_filter_func>& filter = nullptr)
{
- return inject_group_member (
- a, bs, g, move (p), T::static_type).template as<T> ();
+ auto p (inject_group_member (a, bs, g, move (f), T::static_type, filter));
+ return pair<const T&, bool> (p.first.template as<T> (), p.second);
}
+
+ // As above but the target type is determined using the map_extension
+ // function if specified, falling back to the fallback type if unable to
+ // (the what argument is used for diagnostics during this process).
+ //
+ static pair<const file&, bool>
+ inject_group_member (const char* what,
+ action, const scope& base, mtime_target& g,
+ path,
+ const function<map_extension_func>&,
+ const target_type& fallback,
+ const function<group_filter_func>& = nullptr);
+
+
+ // Find or insert a target file path as a target, make it a member of the
+ // specified ad hoc group unless it already is, and set its path. Return
+ // the target and an indication of whether it was added as a member.
+ //
+ // The file path must be absolute and normalized. Note that this function
+ // assumes that this target can only be known as a member of this group.
+ //
+ static pair<const file&, bool>
+ inject_adhoc_group_member (action, const scope& base, target& g,
+ path,
+ const target_type&);
+
+ // As above but the target type is determined using the map_extension
+ // function if specified, falling back to the fallback type if unable to
+ // (the what argument is used for diagnostics during this process).
+ //
+ static pair<const file&, bool>
+ inject_adhoc_group_member (const char* what,
+ action, const scope& base, target& g,
+ path,
+ const function<map_extension_func>&,
+ const target_type& fallback);
};
}
diff --git a/libbuild2/file-cache.cxx b/libbuild2/file-cache.cxx
index 1c1424f..caaf40c 100644
--- a/libbuild2/file-cache.cxx
+++ b/libbuild2/file-cache.cxx
@@ -28,6 +28,8 @@ namespace build2
if (!comp_path_.empty ())
try_rmfile_ignore_error (comp_path_);
+ // Note: state remains uninit until write::close().
+
pin ();
return write (*this);
}
diff --git a/libbuild2/file-cache.hxx b/libbuild2/file-cache.hxx
index d210685..98c2b67 100644
--- a/libbuild2/file-cache.hxx
+++ b/libbuild2/file-cache.hxx
@@ -119,9 +119,9 @@ namespace build2
// Move-to-NULL-only type.
//
- write (write&&);
+ write (write&&) noexcept;
write (const write&) = delete;
- write& operator= (write&&);
+ write& operator= (write&&) noexcept;
write& operator= (const write&) = delete;
~write ();
@@ -145,9 +145,9 @@ namespace build2
// Move-to-NULL-only type.
//
- read (read&&);
+ read (read&&) noexcept;
read (const read&) = delete;
- read& operator= (read&&);
+ read& operator= (read&&) noexcept;
read& operator= (const read&) = delete;
~read ();
@@ -208,9 +208,9 @@ namespace build2
// Move-to-NULL-only type.
//
- entry (entry&&);
+ entry (entry&&) noexcept;
entry (const entry&) = delete;
- entry& operator= (entry&&);
+ entry& operator= (entry&&) noexcept;
entry& operator= (const entry&) = delete;
~entry ();
diff --git a/libbuild2/file-cache.ixx b/libbuild2/file-cache.ixx
index b4773e7..99be5ad 100644
--- a/libbuild2/file-cache.ixx
+++ b/libbuild2/file-cache.ixx
@@ -65,26 +65,30 @@ namespace build2
}
inline file_cache::entry::
- entry (entry&& e)
+ entry (entry&& e) noexcept
: temporary (e.temporary),
state_ (e.state_),
path_ (move (e.path_)),
comp_path_ (move (e.comp_path_)),
pin_ (e.pin_)
{
+ e.state_ = null;
}
inline file_cache::entry& file_cache::entry::
- operator= (entry&& e)
+ operator= (entry&& e) noexcept
{
if (this != &e)
{
assert (state_ == null);
+
temporary = e.temporary;
state_ = e.state_;
path_ = move (e.path_);
comp_path_ = move (e.comp_path_);
pin_ = e.pin_;
+
+ e.state_ = null;
}
return *this;
}
@@ -105,14 +109,14 @@ namespace build2
}
inline file_cache::write::
- write (write&& e)
+ write (write&& e) noexcept
: entry_ (e.entry_)
{
e.entry_ = nullptr;
}
inline file_cache::write& file_cache::write::
- operator= (write&& e)
+ operator= (write&& e) noexcept
{
if (this != &e)
{
@@ -132,14 +136,14 @@ namespace build2
}
inline file_cache::read::
- read (read&& e)
+ read (read&& e) noexcept
: entry_ (e.entry_)
{
e.entry_ = nullptr;
}
inline file_cache::read& file_cache::read::
- operator= (read&& e)
+ operator= (read&& e) noexcept
{
if (this != &e)
{
diff --git a/libbuild2/file.cxx b/libbuild2/file.cxx
index d3de787..c0957ad 100644
--- a/libbuild2/file.cxx
+++ b/libbuild2/file.cxx
@@ -4,9 +4,11 @@
#include <libbuild2/file.hxx>
#include <cerrno>
+#include <cstring> // strlen()
#include <iomanip> // left, setw()
#include <sstream>
+#include <libbuild2/rule.hxx>
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
#include <libbuild2/context.hxx>
@@ -28,6 +30,8 @@ namespace build2
{
// Standard and alternative build file/directory naming schemes.
//
+ extern const dir_path std_export_dir;
+ extern const dir_path alt_export_dir;
// build:
@@ -35,6 +39,7 @@ namespace build2
const dir_path std_root_dir (dir_path (std_build_dir) /= "root");
const dir_path std_bootstrap_dir (dir_path (std_build_dir) /= "bootstrap");
const dir_path std_build_build_dir (dir_path (std_build_dir) /= "build");
+ const dir_path std_export_dir (dir_path (std_build_dir) /= "export");
const path std_root_file (std_build_dir / "root.build");
const path std_bootstrap_file (std_build_dir / "bootstrap.build");
@@ -52,6 +57,7 @@ namespace build2
const dir_path alt_root_dir (dir_path (alt_build_dir) /= "root");
const dir_path alt_bootstrap_dir (dir_path (alt_build_dir) /= "bootstrap");
const dir_path alt_build_build_dir (dir_path (alt_build_dir) /= "build");
+ const dir_path alt_export_dir (dir_path (alt_build_dir) /= "export");
const path alt_root_file (alt_build_dir / "root.build2");
const path alt_bootstrap_file (alt_build_dir / "bootstrap.build2");
@@ -218,7 +224,7 @@ namespace build2
// Checking for plausability feels expensive since we have to recursively
// traverse the directory tree. Note, however, that if the answer is
// positive, then shortly after we will be traversing this tree anyway and
- // presumably this time getting the data from the cash (we don't really
+ // presumably this time getting the data from the cache (we don't really
// care about the negative answer since this is a degenerate case).
//
optional<path> bf;
@@ -306,7 +312,7 @@ namespace build2
{
tracer trace ("source_once");
- if (!once.buildfiles.insert (bf).second)
+ if (!once.root_extra->insert_buildfile (bf))
{
l5 ([&]{trace << "skipping already sourced " << bf;});
return false;
@@ -357,7 +363,7 @@ namespace build2
//
try
{
- for (const dir_entry& de: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (d, dir_iterator::no_follow))
{
// If this is a link, then type() will try to stat() it. And if the
// link is dangling or points to something inaccessible, it will fail.
@@ -522,10 +528,14 @@ namespace build2
pair<scope&, scope*>
switch_scope (scope& root, const dir_path& out_base, bool proj)
{
+ context& ctx (root.ctx);
+
+ assert (ctx.phase == run_phase::load);
+
// First, enter the scope into the map and see if it is in any project. If
// it is not, then there is nothing else to do.
//
- auto i (root.ctx.scopes.rw (root).insert_out (out_base));
+ auto i (ctx.scopes.rw (root).insert_out (out_base));
scope& base (*i->second.front ());
scope* rs (nullptr);
@@ -581,38 +591,37 @@ namespace build2
fail << "variable out_root expected as first line in " << f << endf;
}
+ scope::root_extra_type::
+ root_extra_type (scope& root, bool a)
+ : altn (a),
+ loaded (false),
+
+ build_ext (a ? alt_build_ext : std_build_ext),
+ build_dir (a ? alt_build_dir : std_build_dir),
+ buildfile_file (a ? alt_buildfile_file : std_buildfile_file),
+ buildignore_file (a ? alt_buildignore_file : std_buildignore_file),
+ root_dir (a ? alt_root_dir : std_root_dir),
+ bootstrap_dir (a ? alt_bootstrap_dir : std_bootstrap_dir),
+ build_build_dir (a ? alt_build_build_dir : std_build_build_dir),
+ bootstrap_file (a ? alt_bootstrap_file : std_bootstrap_file),
+ root_file (a ? alt_root_file : std_root_file),
+ export_file (a ? alt_export_file : std_export_file),
+ src_root_file (a ? alt_src_root_file : std_src_root_file),
+ out_root_file (a ? alt_out_root_file : std_out_root_file),
+
+ var_pool (&root.ctx, &root.ctx.var_pool.rw (root), nullptr)
+ {
+ root.var_pool_ = &var_pool;
+ }
+
static void
setup_root_extra (scope& root, optional<bool>& altn)
{
assert (altn && root.root_extra == nullptr);
- bool a (*altn);
-
- root.root_extra.reset (
- new scope::root_extra_type {
- nullopt /* project */,
- nullopt /* amalgamation */,
- nullopt /* subprojects */,
- a,
- false /* loaded */,
- a ? alt_build_ext : std_build_ext,
- a ? alt_build_dir : std_build_dir,
- a ? alt_buildfile_file : std_buildfile_file,
- a ? alt_buildignore_file : std_buildignore_file,
- a ? alt_root_dir : std_root_dir,
- a ? alt_bootstrap_dir : std_bootstrap_dir,
- a ? alt_build_build_dir : std_build_build_dir,
- a ? alt_bootstrap_file : std_bootstrap_file,
- a ? alt_root_file : std_root_file,
- a ? alt_export_file : std_export_file,
- a ? alt_src_root_file : std_src_root_file,
- a ? alt_out_root_file : std_out_root_file,
- {}, /* meta_operations */
- {}, /* operations */
- {}, /* modules */
- {}, /* override_cache */
- {}, /* target_types */
- {}, /* environment */
- ""} /* environment_checksum */);
+
+ context& ctx (root.ctx);
+
+ root.root_extra.reset (new scope::root_extra_type (root, *altn));
// Enter built-in meta-operation and operation names. Loading of
// modules (via the src bootstrap; see below) can result in
@@ -622,9 +631,9 @@ namespace build2
root.insert_meta_operation (perform_id, mo_perform);
root.insert_meta_operation (info_id, mo_info);
- root.insert_operation (default_id, op_default);
- root.insert_operation (update_id, op_update);
- root.insert_operation (clean_id, op_clean);
+ root.insert_operation (default_id, op_default, nullptr);
+ root.insert_operation (update_id, op_update, ctx.var_update);
+ root.insert_operation (clean_id, op_clean, ctx.var_clean);
}
value&
@@ -843,10 +852,26 @@ namespace build2
try
{
- for (const dir_entry& de: dir_iterator (d, true /* ignore_dangling */))
+ // It's probably possible that a subproject can be a symlink with the
+ // link target, for example, being in a git submodule. Considering that,
+ // it makes sense to warn about dangling symlinks.
+ //
+ for (const dir_entry& de:
+ dir_iterator (d, dir_iterator::detect_dangling))
{
if (de.type () != entry_type::directory)
+ {
+ if (de.type () == entry_type::unknown)
+ {
+ bool sl (de.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << d / de.path ();
+ }
+
continue;
+ }
dir_path sd (d / path_cast<dir_path> (de.path ()));
@@ -947,14 +972,23 @@ namespace build2
rs.root_extra->amalgamation = nullptr;
rs.root_extra->subprojects = nullptr;
+ // See GH issue #322.
+ //
+#if 0
assert (!aovr || aovr->empty ());
+#else
+ if (!(!aovr || aovr->empty ()))
+ fail << "amalgamation directory " << *aovr << " specified for simple "
+ << "project " << src_root <<
+ info << "see https://github.com/build2/build2/issues/322 for details";
+#endif
}
// We assume that bootstrap out cannot load this file explicitly. It
// feels wrong to allow this since that makes the whole bootstrap
// process hard to reason about. But we may try to bootstrap the same
// root scope multiple time.
//
- else if (rs.buildfiles.insert (bf).second)
+ else if (rs.root_extra->insert_buildfile (bf))
{
// Extract the project name and amalgamation variable value so that
// we can make them available while loading bootstrap.build.
@@ -1317,9 +1351,9 @@ namespace build2
// Call module's post-boot functions.
//
- for (size_t i (0); i != root.root_extra->modules.size (); ++i)
+ for (size_t i (0); i != root.root_extra->loaded_modules.size (); ++i)
{
- module_state& s (root.root_extra->modules[i]);
+ module_state& s (root.root_extra->loaded_modules[i]);
if (s.boot_post != nullptr)
boot_post_module (root, s);
@@ -1360,7 +1394,7 @@ namespace build2
}
void
- create_bootstrap_outer (scope& root)
+ create_bootstrap_outer (scope& root, bool subp)
{
context& ctx (root.ctx);
@@ -1408,7 +1442,7 @@ namespace build2
setup_root (rs, forwarded (root, out_root, v.as<dir_path> (), altn));
bootstrap_pre (rs, altn);
- bootstrap_src (rs, altn);
+ bootstrap_src (rs, altn, nullopt, subp);
// bootstrap_post() delayed until after create_bootstrap_outer().
}
else
@@ -1419,7 +1453,7 @@ namespace build2
rs.assign (ctx.var_forwarded) = true; // Only upgrade (see main()).
}
- create_bootstrap_outer (rs);
+ create_bootstrap_outer (rs, subp);
if (!bstrapped)
bootstrap_post (rs);
@@ -1536,11 +1570,11 @@ namespace build2
// Note that init() can load additional modules invalidating iterators.
//
auto init_modules =
- [&root, n = root.root_extra->modules.size ()] (module_boot_init v)
+ [&root, n = root.root_extra->loaded_modules.size ()] (module_boot_init v)
{
for (size_t i (0); i != n; ++i)
{
- module_state& s (root.root_extra->modules[i]);
+ module_state& s (root.root_extra->loaded_modules[i]);
if (s.boot_init && *s.boot_init == v)
init_module (root, root, s.name, s.loc);
@@ -1603,12 +1637,19 @@ namespace build2
init_modules (module_boot_init::after);
}
- // Print the project configuration report, similar to how we do it in
+ // Print the project configuration report(s), similar to how we do it in
// build system modules.
//
- if (!p.config_report.empty () && verb >= (p.config_report_new ? 2 : 3))
+ using config_report = parser::config_report;
+
+ const project_name* proj (nullptr); // Resolve lazily.
+ for (const config_report& cr: p.config_reports)
{
- const project_name& proj (named_project (root)); // Can be empty.
+ if (verb < (cr.new_value ? 2 : 3))
+ continue;
+
+ if (proj == nullptr)
+ proj = &named_project (root); // Can be empty.
// @@ TODO/MAYBE:
//
@@ -1626,46 +1667,74 @@ namespace build2
// config @/tmp/tests
// libhello.tests.remote true
//
- string stem (!proj.empty () ? '.' + proj.variable () + '.' : string ());
+ // If the module name is not empty then it means the config variables
+ // are from the imported project and so we use that for <project>.
+ //
+ string stem (!cr.module.empty ()
+ ? '.' + cr.module.variable () + '.'
+ : (!proj->empty ()
+ ? '.' + proj->variable () + '.'
+ : string ()));
- // Calculate max name length.
+ // Return the variable name for printing.
//
- size_t pad (10);
- for (const pair<lookup, string>& lf: p.config_report)
+ auto name = [&stem] (const config_report::value& cv) -> const char*
{
- lookup l (lf.first);
+ lookup l (cv.val);
- size_t n;
if (l.value == nullptr)
{
- n = l.var->name.size ();
+ if (cv.org.empty ())
+ return l.var->name.c_str ();
+
+ // This case may or may not have the prefix.
+ //
+ size_t p, n (
+ !stem.empty ()
+ ? (p = cv.org.find (stem)) != string::npos ? p + stem.size () : 0
+ : cv.org.compare (0, 7, "config.") == 0 ? 7 : 0);
+
+ return cv.org.c_str () + n;
}
else
{
+ assert (cv.org.empty ()); // Sanity check.
+
size_t p (!stem.empty ()
? l.var->name.find (stem) + stem.size ()
: 7); // "config."
- n = l.var->name.size () - p;
+
+ return l.var->name.c_str () + p;
}
+ };
+
+ // Calculate max name length.
+ //
+ size_t pad (10);
+ for (const config_report::value& cv: cr.values)
+ {
+ size_t n (strlen (name (cv)));
if (n > pad)
pad = n;
}
// Use the special `config` module name (which doesn't have its own
- // report) for project configuration.
+ // report) for project's own configuration.
//
diag_record dr (text);
- dr << "config " << proj << '@' << root;
+ dr << (cr.module.empty () ? "config" : cr.module.string ().c_str ())
+ << ' ' << *proj << '@' << root;
names storage;
- for (const pair<lookup, string>& lf: p.config_report)
+ for (const config_report::value& cv: cr.values)
{
- lookup l (lf.first);
- const string& f (lf.second);
+ lookup l (cv.val);
+ const string& f (cv.fmt);
// If the report variable has been overriden, now is the time to
- // lookup its value.
+ // lookup its value. Note: see also the name() lambda above if
+ // changing anything here.
//
string n;
if (l.value == nullptr)
@@ -1681,24 +1750,26 @@ namespace build2
n = string (l.var->name, p);
}
+ const char* pn (name (cv)); // Print name.
+
dr << "\n ";
- if (const value& v = *l)
+ if (l)
{
storage.clear ();
- auto ns (reverse (v, storage));
+ auto ns (reverse (*l, storage, true /* reduce */));
if (f == "multiline")
{
- dr << n;
+ dr << pn;
for (auto& n: ns)
dr << "\n " << n;
}
else
- dr << left << setw (static_cast<int> (pad)) << n << ' ' << ns;
+ dr << left << setw (static_cast<int> (pad)) << pn << ' ' << ns;
}
else
- dr << left << setw (static_cast<int> (pad)) << n << " [null]";
+ dr << left << setw (static_cast<int> (pad)) << pn << " [null]";
}
}
@@ -1862,7 +1933,9 @@ namespace build2
try
{
// Note: not using run_*() functions since need to be able to suppress
- // all errors, including inability to exec.
+ // all errors, including abnormal, inability to exec, etc., in case of
+ // optional import. Also, no need to buffer diagnostics since in the
+ // serial load.
//
if (verb >= 3)
print_process (args);
@@ -1922,10 +1995,19 @@ namespace build2
return r;
if (!opt)
- error (loc) << "invalid metadata signature in " << args[0]
- << " output" <<
+ {
+ diag_record dr;
+ dr << error (loc) << "invalid metadata signature in " << args[0]
+ << " output" <<
info << "expected '" << s << "'";
+ if (verb >= 1 && verb <= 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+ }
+
goto fail;
}
@@ -1941,16 +2023,27 @@ namespace build2
if (pr.wait ())
{
if (!opt)
- error (loc) << "unable to read metadata from " << args[0];
+ error (loc) << "io error reading metadata from " << args[0];
}
else
{
// The child process presumably issued diagnostics but if it didn't,
- // the result will be very confusing. So let's issue something
- // generic for good measure.
+ // the result will be very confusing. So let's issue something generic
+ // for good measure. But also make it consistent with diagnostics
+ // issued by run_finish().
//
if (!opt)
- error (loc) << "unable to extract metadata from " << args[0];
+ {
+ diag_record dr;
+ dr << error (loc) << "unable to extract metadata from " << args[0] <<
+ info << "process " << args[0] << " " << *pr.exit;
+
+ if (verb >= 1 && verb <= 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
+ }
}
goto fail;
@@ -1966,8 +2059,7 @@ namespace build2
goto fail;
}
- fail:
-
+ fail:
if (opt)
{
metadata_cache.insert (pp.effect_string (), true);
@@ -1996,15 +2088,13 @@ namespace build2
&t);
}
- // Suggest appropriate ways to import the specified target (as type and
- // name) from the specified project.
- //
- static void
+ void
import_suggest (const diag_record& dr,
const project_name& pn,
- const target_type& tt,
+ const target_type* tt,
const string& tn,
- const char* qual = nullptr)
+ bool rule_hint,
+ const char* qual)
{
string pv (pn.variable ());
@@ -2016,15 +2106,19 @@ namespace build2
// Suggest ad hoc import but only if it's a path-based target (doing it
// for lib{} is very confusing).
//
- if (tt.is_a<path_target> ())
+ if (tt != nullptr && tt->is_a<path_target> ())
{
- string v (tt.is_a<exe> () && (pv == tn || pn == tn)
+ string v (tt->is_a<exe> () && (pv == tn || pn == tn)
? "config." + pv
- : "config.import." + pv + '.' + tn + '.' + tt.name);
+ : "config.import." + pv + '.' + tn + '.' + tt->name);
dr << info << "or use " << v << " configuration variable to specify "
<< "its " << (qual != nullptr ? qual : "") << "path";
}
+
+ if (rule_hint)
+ dr << info << "or use rule_hint attribute to specify a rule that can "
+ << "find this target";
}
// Return the processed target name as well as the project directory, if
@@ -2039,6 +2133,9 @@ namespace build2
// Return empty name if an ad hoc import resulted in a NULL target (only
// allowed if optional is true).
//
+ // Note that this function has a side effect of potentially marking some
+ // config.import.* variables as used.
+ //
pair<name, optional<dir_path>>
import_search (bool& new_value,
scope& ibase,
@@ -2070,6 +2167,9 @@ namespace build2
//
// 4. Normal import.
//
+ // @@ PERF: in quite a few places (local, subproject) we could have
+ // returned the scope and save on bootstrap in import_load().
+ //
if (tgt.unqualified ())
{
if (tgt.directory () && tgt.relative ())
@@ -2077,6 +2177,8 @@ namespace build2
if (tgt.absolute ())
{
+ // Ad hoc import.
+ //
// Actualize the directory to be analogous to the config.import.<proj>
// case (which is of abs_dir_path type).
//
@@ -2093,7 +2195,7 @@ namespace build2
fail (loc) << "project-local importation of target " << tgt
<< " from an unnamed project";
- tgt.proj = pn;
+ tgt.proj = pn; // Reduce to normal import.
return make_pair (move (tgt), optional<dir_path> (iroot.out_path ()));
}
@@ -2125,7 +2227,9 @@ namespace build2
// over anything that we may discover. In particular, we will prefer it
// over any bundled subprojects.
//
- auto& vp (iroot.var_pool ());
+ // Note: go straight for the public variable pool.
+ //
+ auto& vp (iroot.var_pool (true /* public */));
using config::lookup_config;
@@ -2295,7 +2399,8 @@ namespace build2
auto df = make_diag_frame (
[&proj, tt, &on] (const diag_record& dr)
{
- import_suggest (dr, proj, *tt, on, "alternative ");
+ import_suggest (
+ dr, proj, tt, on, false, "alternative ");
});
md = extract_metadata (e->process_path (),
@@ -2618,6 +2723,68 @@ namespace build2
fail (loc) << out_root << " is not out_root for " << *proj;
}
+ // Buildfile importation is quite different so handle it separately.
+ //
+ // Note that we don't need to load the project in this case.
+ //
+ // @@ For now we don't out-qualify the resulting target to be able to
+ // re-import it ad hoc (there is currently no support for out-qualified
+ // ad hoc import). Feels like this should be harmless since it's just a
+ // glorified path to a static file that nobody is actually going to use
+ // as a target (e.g., to depend upon).
+ //
+ if (tgt.type == "buildfile")
+ {
+ auto add_ext = [&altn] (string& n)
+ {
+ if (path_traits::find_extension (n) == string::npos)
+ {
+ if (n != (*altn ? alt_buildfile_file : std_buildfile_file).string ())
+ {
+ n += ".";
+ n += *altn ? alt_build_ext : std_build_ext;
+ }
+ }
+ };
+
+ if (proj)
+ {
+ name n;
+
+ if (src_root.empty ())
+ src_root = root->src_path ();
+
+ n.dir = move (src_root);
+ n.dir /= *altn ? alt_export_dir : std_export_dir;
+ if (!tgt.dir.empty ())
+ {
+ n.dir /= tgt.dir;
+ n.dir.normalize ();
+ }
+
+ n.type = tgt.type;
+ n.value = tgt.value;
+ add_ext (n.value);
+
+ pair<names, const scope&> r (names {move (n)}, *root);
+
+ // Cache.
+ //
+ if (cache_out_root.empty ())
+ cache_out_root = move (out_root);
+
+ ctx.import_cache.emplace (
+ import_key {move (cache_out_root), move (tgt), metav}, r);
+
+ return r;
+ }
+ else
+ {
+ add_ext (tgt.value);
+ return pair<names, const scope&> (names {move (tgt)}, *root);
+ }
+ }
+
// Load the imported root scope.
//
if (!root->root_extra->loaded)
@@ -2639,6 +2806,9 @@ namespace build2
if (cache_out_root.empty ())
cache_out_root = out_root;
+ if (src_root.empty ())
+ src_root = root->src_path ();
+
ts.assign (ctx.var_out_root) = move (out_root);
ts.assign (ctx.var_src_root) = move (src_root);
@@ -2669,7 +2839,8 @@ namespace build2
l5 ([&]{trace << "importing " << es;});
// @@ Should we verify these are all unqualified names? Or maybe there
- // is a use-case for the export stub to return a qualified name?
+ // is a use-case for the export stub to return a qualified name? E.g.,
+ // re-export?
//
names v;
{
@@ -2680,7 +2851,7 @@ namespace build2
});
parser p (ctx);
- v = p.parse_export_stub (ifs, path_name (es), gs, ts);
+ v = p.parse_export_stub (ifs, path_name (es), *root, gs, ts);
}
// If there were no export directive executed in an export stub,
@@ -2754,10 +2925,39 @@ namespace build2
}
}
- pair<names, import_kind>
+ const target_type&
+ import_target_type (scope& root,
+ const scope& iroot, const string& n,
+ const location& l)
+ {
+ // NOTE: see similar code in parser::parse_define().
+
+ const target_type* tt (iroot.find_target_type (n));
+ if (tt == nullptr)
+ fail (l) << "unknown imported target type " << n << " in project "
+ << iroot;
+
+ auto p (root.root_extra->target_types.insert (*tt));
+
+ if (!p.second && &p.first.get () != tt)
+ fail (l) << "imported target type " << n << " already defined in project "
+ << root;
+
+ return *tt;
+ }
+
+ static names
+ import2_buildfile (context&, names&&, bool, const location&);
+
+ static const target*
+ import2 (context&, const scope&, names&,
+ const string&, bool, const optional<string>&, bool,
+ const location&);
+
+ import_result<scope>
import (scope& base,
name tgt,
- bool ph2,
+ const optional<string>& ph2,
bool opt,
bool metadata,
const location& loc)
@@ -2784,7 +2984,10 @@ namespace build2
import_result<target> r (
import_direct (base, move (tgt), ph2, opt, metadata, loc));
- return make_pair (move (r.name), r.kind);
+ return import_result<scope> {
+ r.target != nullptr ? r.target->base_scope ().root_scope () : nullptr,
+ move (r.name),
+ r.kind};
}
pair<name, optional<dir_path>> r (
@@ -2800,6 +3003,7 @@ namespace build2
if (!r.second || r.second->empty ())
{
names ns;
+ const target* t (nullptr);
if (r.first.empty ())
{
@@ -2815,17 +3019,25 @@ namespace build2
//
if (ns.back ().qualified ())
{
- if (ph2)
+ if (ns.back ().type == "buildfile")
+ {
+ assert (ph2);
+ ns = import2_buildfile (ctx, move (ns), opt && !r.second, loc);
+ }
+ else if (ph2)
{
// This is tricky: we only want the optional semantics for the
// fallback case.
//
- if (const target* t = import (ctx,
- base.find_prerequisite_key (ns, loc),
- opt && !r.second /* optional */,
- nullopt /* metadata */,
- false /* existing */,
- loc))
+ t = import2 (ctx,
+ base, ns,
+ *ph2,
+ opt && !r.second /* optional */,
+ nullopt /* metadata */,
+ false /* existing */,
+ loc);
+
+ if (t != nullptr)
{
// Note that here r.first was still project-qualified and we
// have no choice but to call as_name(). This shouldn't cause
@@ -2841,40 +3053,99 @@ namespace build2
}
}
- return make_pair (
+ return import_result<scope> {
+ t != nullptr ? t->base_scope ().root_scope () : nullptr,
move (ns),
- r.second.has_value () ? import_kind::adhoc : import_kind::fallback);
+ r.second.has_value () ? import_kind::adhoc : import_kind::fallback};
}
import_kind k (r.first.absolute ()
? import_kind::adhoc
: import_kind::normal);
- return make_pair (
- import_load (base.ctx, move (r), false /* metadata */, loc).first,
- k);
+ pair<names, const scope&> p (
+ import_load (base.ctx, move (r), false /* metadata */, loc));
+
+ return import_result<scope> {&p.second, move (p.first), k};
}
const target*
- import (context& ctx,
- const prerequisite_key& pk,
- bool opt,
- const optional<string>& meta,
- bool exist,
- const location& loc)
+ import2 (context& ctx,
+ const prerequisite_key& pk,
+ const string& hint,
+ bool opt,
+ const optional<string>& meta,
+ bool exist,
+ const location& loc)
{
- tracer trace ("import");
+ tracer trace ("import2");
- assert (!meta || !exist);
+ // Neither hint nor metadata can be requested for existing.
+ //
+ assert (!exist || (!meta && hint.empty ()));
assert (pk.proj);
const project_name& proj (*pk.proj);
- // Target type-specific search.
- //
// Note that if this function returns a target, it should have the
// extension assigned (like the find/insert_target() functions) so that
// as_name() returns a stable name.
+
+ // Rule-specific resolution.
+ //
+ if (!hint.empty ())
+ {
+ assert (pk.scope != nullptr);
+
+ // Note: similar to/inspired by match_rule_impl().
+ //
+ // Search scopes outwards, stopping at the project root.
+ //
+ for (const scope* s (pk.scope);
+ s != nullptr;
+ s = s->root () ? nullptr : s->parent_scope ())
+ {
+ // We only look for rules that are registered for perform(update).
+ //
+ if (const operation_rule_map* om = s->rules[perform_id])
+ {
+ if (const target_type_rule_map* ttm = (*om)[update_id])
+ {
+ // Ignore the target type the rules are registered for (this is
+ // about prerequisite types, not target).
+ //
+ // @@ Note that the same rule could be registered for several
+ // types which means we will keep calling it repeatedly.
+ //
+ for (const auto& p: *ttm)
+ {
+ const name_rule_map& nm (p.second);
+
+ // Filter against the hint.
+ //
+ for (auto p (nm.find_sub (hint)); p.first != p.second; ++p.first)
+ {
+ const string& n (p.first->first);
+ const rule& r (p.first->second);
+
+ auto df = make_diag_frame (
+ [&pk, &n](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while importing " << pk << " using rule "
+ << n;
+ });
+
+ if (const target* t = r.import (pk, meta, loc))
+ return t;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Builtin resolution for certain target types.
//
const target_key& tk (pk.tk);
const target_type& tt (*tk.type);
@@ -2927,8 +3198,7 @@ namespace build2
auto df = make_diag_frame (
[&proj, &tt, &tk] (const diag_record& dr)
{
- import_suggest (
- dr, proj, tt, *tk.name, "alternative ");
+ import_suggest (dr, proj, &tt, *tk.name, false, "alternative ");
});
if (!(md = extract_metadata (pp, *meta, opt, loc)))
@@ -2961,6 +3231,8 @@ namespace build2
return *t;
}
+ // NOTE: see similar code in import2() below if changing anything here.
+
if (opt || exist)
return nullptr;
@@ -2971,7 +3243,137 @@ namespace build2
dr << info << "consider adding its installation location" <<
info << "or explicitly specify its project name";
else
- import_suggest (dr, proj, tt, *tk.name);
+ // Use metadata as proxy for immediate import.
+ //
+ import_suggest (dr, proj, &tt, *tk.name, meta && hint.empty ());
+
+ dr << endf;
+ }
+
+ // As above but with scope/ns instead of pk. This version deals with the
+ // unknown target type case.
+ //
+ static const target*
+ import2 (context& ctx,
+ const scope& base, names& ns,
+ const string& hint,
+ bool opt,
+ const optional<string>& meta,
+ bool exist,
+ const location& loc)
+ {
+ // If we have a rule hint, then it's natural to expect this target type is
+ // known to the importing project. Ditto for project-less import.
+ //
+ const target_type* tt (nullptr);
+ if (hint.empty ())
+ {
+ size_t n;
+ if ((n = ns.size ()) != 0 && n == (ns[0].pair ? 2 : 1))
+ {
+ const name& n (ns.front ());
+
+ if (n.typed () && !n.proj->empty ())
+ {
+ tt = base.find_target_type (n.type);
+
+ if (tt == nullptr)
+ {
+ // A subset of code in the above version of import2().
+ //
+ if (opt || exist)
+ return nullptr;
+
+ diag_record dr;
+ dr << fail (loc) << "unable to import target " << ns;
+ import_suggest (dr, *n.proj, nullptr, string (), meta.has_value ());
+ }
+ }
+ }
+ }
+
+ return import2 (ctx,
+ base.find_prerequisite_key (ns, loc, tt),
+ hint,
+ opt,
+ meta,
+ exist,
+ loc);
+ }
+
+ static names
+ import2_buildfile (context&, names&& ns, bool opt, const location& loc)
+ {
+ tracer trace ("import2_buildfile");
+
+ assert (ns.size () == 1);
+ name n (move (ns.front ()));
+
+ // Our approach doesn't work for targets without a project so let's fail
+ // hard, even if optional.
+ //
+ if (!n.proj || n.proj->empty ())
+ fail (loc) << "unable to import target " << n << " without project name";
+
+ while (!build_install_buildfile.empty ()) // Breakout loop.
+ {
+ path f (build_install_buildfile /
+ dir_path (n.proj->string ()) /
+ n.dir /
+ n.value);
+
+ // See if we need to try with extensions.
+ //
+ bool ext (path_traits::find_extension (n.value) == string::npos &&
+ n.value != std_buildfile_file.string () &&
+ n.value != alt_buildfile_file.string ());
+
+ if (ext)
+ {
+ f += '.';
+ f += std_build_ext;
+ }
+
+ if (!exists (f))
+ {
+ l6 ([&]{trace << "tried " << f;});
+
+ if (ext)
+ {
+ f.make_base ();
+ f += '.';
+ f += alt_build_ext;
+
+ if (!exists (f))
+ {
+ l6 ([&]{trace << "tried " << f;});
+ break;
+ }
+ }
+ else
+ break;
+ }
+
+ // Split the path into the target.
+ //
+ ns = {name (f.directory (), move (n.type), f.leaf ().string ())};
+ return move (ns);
+ }
+
+ if (opt)
+ return names {};
+
+ diag_record dr;
+ dr << fail (loc) << "unable to import target " << n;
+
+ import_suggest (dr, *n.proj, nullptr /* tt */, n.value, false);
+
+ if (build_install_buildfile.empty ())
+ dr << info << "no exported buildfile installation location is "
+ << "configured in build2";
+ else
+ dr << info << "exported buildfile installation location is "
+ << build_install_buildfile;
dr << endf;
}
@@ -2980,7 +3382,7 @@ namespace build2
import_direct (bool& new_value,
scope& base,
name tgt,
- bool ph2,
+ const optional<string>& ph2,
bool opt,
bool metadata,
const location& loc,
@@ -2993,11 +3395,13 @@ namespace build2
l5 ([&]{trace << tgt << " from " << base << " for " << what;});
- assert ((!opt || ph2) && (!metadata || ph2));
+ assert ((!opt || ph2) && (!metadata || ph2) && tgt.type != "buildfile");
context& ctx (base.ctx);
assert (ctx.phase == run_phase::load);
+ scope& root (*base.root_scope ());
+
// Use the original target name as metadata key.
//
auto meta (metadata ? optional<string> (tgt.value) : nullopt);
@@ -3005,6 +3409,12 @@ namespace build2
names ns, rns;
import_kind k;
const target* pt (nullptr);
+ const scope* iroot (nullptr); // Imported root scope.
+
+ // Original project/name as imported for diagnostics.
+ //
+ string oname (meta ? tgt.value : string ());
+ project_name oproj (meta && tgt.proj ? *tgt.proj : project_name ());
pair<name, optional<dir_path>> r (
import_search (new_value,
@@ -3036,12 +3446,13 @@ namespace build2
// This is tricky: we only want the optional semantics for the
// fallback case.
//
- pt = import (ctx,
- base.find_prerequisite_key (ns, loc),
- opt && !r.second,
- meta,
- false /* existing */,
- loc);
+ pt = import2 (ctx,
+ base, ns,
+ *ph2,
+ opt && !r.second,
+ meta,
+ false /* existing */,
+ loc);
}
if (pt == nullptr)
@@ -3058,6 +3469,8 @@ namespace build2
// It's a bit fuzzy in which cases we end up here. So for now we keep
// the original if it's absolute and call as_name() otherwise.
//
+ // @@ TODO: resolve iroot or assume target type should be known?
+ //
if (r.first.absolute ())
rns.push_back (r.first);
@@ -3067,14 +3480,30 @@ namespace build2
else
{
k = r.first.absolute () ? import_kind::adhoc : import_kind::normal;
- rns = ns = import_load (base.ctx, move (r), metadata, loc).first;
+
+ pair<names, const scope&> p (
+ import_load (base.ctx, move (r), metadata, loc));
+
+ rns = ns = move (p.first);
+ iroot = &p.second;
}
if (pt == nullptr)
{
+ // Import (more precisely, alias) the target type into this project
+ // if not known.
+ //
+ const target_type* tt (nullptr);
+ if (iroot != nullptr && !ns.empty ())
+ {
+ const name& n (ns.front ());
+ if (n.typed ())
+ tt = &import_target_type (root, *iroot, n.type, loc);
+ }
+
// Similar logic to perform's search(). Note: modifies ns.
//
- target_key tk (base.find_target_key (ns, loc));
+ target_key tk (base.find_target_key (ns, loc, tt));
pt = ctx.targets.find (tk, trace);
if (pt == nullptr)
fail (loc) << "unknown imported target " << tk;
@@ -3091,10 +3520,20 @@ namespace build2
//
if (meta)
{
+ auto df = make_diag_frame (
+ [&oproj, &oname, &t] (const diag_record& dr)
+ {
+ if (!oproj.empty ())
+ import_suggest (dr, oproj, &t.type (), oname, false, "alternative ");
+ });
+
// The export.metadata value should start with the version followed by
// the metadata variable prefix.
//
- lookup l (t.vars[ctx.var_export_metadata]);
+ // Note: lookup on target, not target::vars since it could come from
+ // the group (think lib{} metadata).
+ //
+ lookup l (t[ctx.var_export_metadata]);
if (l && !l->empty ())
{
const names& ns (cast<names> (l));
@@ -3112,7 +3551,7 @@ namespace build2
catch (const invalid_argument& e)
{
fail (loc) << "invalid metadata version in imported target " << t
- << ": " << e;
+ << ": " << e << endf;
}
if (ver != 1)
@@ -3127,13 +3566,15 @@ namespace build2
const string& pfx (ns[1].value);
- auto& vp (ctx.var_pool.rw ()); // Load phase.
-
// See if we have the stable program name in the <var-prefix>.name
// variable. If its missing, set it to the metadata key (i.e., target
// name as imported) by default.
//
{
+ // Note: go straight for the public variable pool.
+ //
+ auto& vp (ctx.var_pool.rw ()); // Load phase.
+
value& nv (t.assign (vp.insert (pfx + ".name")));
if (!nv)
nv = *meta;
@@ -3144,10 +3585,8 @@ namespace build2
//
if (const auto* e = cast_null<strings> (t.vars[pfx + ".environment"]))
{
- scope& rs (*base.root_scope ());
-
for (const string& v: *e)
- config::save_environment (rs, v);
+ config::save_environment (root, v);
}
}
else
@@ -3157,6 +3596,31 @@ namespace build2
return import_result<target> {pt, move (rns), k};
}
+ path
+ import_buildfile (scope& bs, name n, bool opt, const location& loc)
+ {
+ names r (import (bs,
+ move (n),
+ string () /* phase2 */,
+ opt,
+ false /* metadata */,
+ loc).name);
+
+ path p;
+ if (!r.empty ()) // Optional not found.
+ {
+ // Note: see also parse_import().
+ //
+ assert (r.size () == 1); // See import_load() for details.
+ name& n (r.front ());
+ p = n.dir / n.value; // Should already include extension.
+ }
+ else
+ assert (opt);
+
+ return p;
+ }
+
ostream&
operator<< (ostream& o, const import_result<exe>& r)
{
@@ -3205,13 +3669,23 @@ namespace build2
//
mkdir (d / std_build_dir, verbosity);
+ auto diag = [verbosity] (const path& f)
+ {
+ if (verb >= verbosity)
+ {
+ if (verb >= 2)
+ text << "cat >" << f;
+ else if (verb)
+ print_diag ("save", f);
+ }
+ };
+
// Write build/bootstrap.build.
//
{
path f (d / std_bootstrap_file);
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ diag (f);
try
{
@@ -3257,8 +3731,7 @@ namespace build2
{
path f (d / std_root_file);
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ diag (f);
try
{
@@ -3306,8 +3779,7 @@ namespace build2
{
path f (d / std_build_dir / "config.build"); // std_config_file
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ diag (f);
try
{
@@ -3332,8 +3804,7 @@ namespace build2
{
path f (d / std_buildfile_file);
- if (verb >= verbosity)
- text << (verb >= 2 ? "cat >" : "save ") << f;
+ diag (f);
try
{
diff --git a/libbuild2/file.hxx b/libbuild2/file.hxx
index 0582a8c..36e4c00 100644
--- a/libbuild2/file.hxx
+++ b/libbuild2/file.hxx
@@ -19,6 +19,28 @@ namespace build2
class lexer;
class parser;
+ // The following filesystem entries in the build/ subdirectory are reserved
+ // by the build2 core:
+ //
+ // build/ -- build2 core-internal build state (e.g., recipes)
+ // bootstrap/ -- bootstrap state and hooks
+ // bootstrap.build -- bootstrap buildfile
+ // root/ -- root load hooks
+ // root.build -- root buildfile
+ // export.build -- export stub
+ // export/ -- exported buildfiles
+ //
+ // The build/, bootstrap/, root/, and config.build entries are in .gitignore
+ // as generated by bdep-new.
+ //
+ // The rest of the filesystem entries are shared between the project and the
+ // modules that it loads. In particular, if a project loads module named
+ // <mod>, then the <mod>.build, <mod>/, *.<mod> entries (spelled in any
+ // case) are reserved to this module and should not be used by the project
+ // unless explicitly allowed by the module. By convention, <mod>/build/ is
+ // for module-internal build state (e.g., C++ modules side-build) and is
+ // .gitignore'ed.
+ //
LIBBUILD2_SYMEXPORT extern const dir_path std_build_dir; // build/
// build/root.build
@@ -116,10 +138,11 @@ namespace build2
bool
source_once (scope& root, scope& base, const path&);
- // As above but checks against the specified scope rather than base.
+ // As above but checks against the specified root scope rather than this
+ // root scope.
//
LIBBUILD2_SYMEXPORT bool
- source_once (scope& root, scope& base, const path&, scope& once);
+ source_once (scope& root, scope& base, const path&, scope& once_root);
// Create project's root scope. Only set the src_root variable if the passed
// src_root value is not empty.
@@ -213,10 +236,11 @@ namespace build2
bootstrap_post (scope& root);
// Create and bootstrap outer root scopes, if any. Loading is done by
- // load_root().
+ // load_root(). If subprojects is false, then do not discover or extract
+ // subprojects.
//
LIBBUILD2_SYMEXPORT void
- create_bootstrap_outer (scope& root);
+ create_bootstrap_outer (scope& root, bool subprojects = true);
// Create and bootstrap inner root scopes, if any, recursively.
//
@@ -322,10 +346,14 @@ namespace build2
// original; see the config.import.<proj>.<name>[.<type>] logic for details)
// in which case it should still be passed to import phase 2.
//
- // If phase2 is true then the phase 2 is performed right away (we call it
- // immediate import). Note that if optional is true, phase2 must be true as
- // well (and thus there is no rule-specific logic for optional imports). In
- // case of optional, empty names value is retuned if nothing was found.
+ // If phase2 is present then the phase 2 is performed right away (we call it
+ // immediate import). Note that if optional is true, phase2 must be present
+ // as well (and thus there is no rule-specific logic for optional imports).
+ // In case of optional, empty names value is returned if nothing was found.
+ // The value in phase2 is the optional rule hint that, if not empty, will be
+ // used to lookup a rule that will be asked to resolve the qualified target
+ // (see rule::import()). If it is empty, then built-in resolution logic will
+ // be used for some target types (currently only exe{}).
//
// If metadata is true, then load the target metadata. In this case phase2
// must be true as well.
@@ -333,7 +361,9 @@ namespace build2
// Note also that we return names rather than a single name: while normally
// it will be a single target name, it can be an out-qualified pair (if
// someone wants to return a source target) but it can also be a non-target
- // since we don't restrict what users can import/export.
+ // since we don't restrict what users can import/export. If name has
+ // buildfile type, then the result is an absolute buildfile target to be
+ // included (once) at the point of importation.
//
// Finally, note that import is (and should be kept) idempotent or, more
// precisely, "accumulatively idempotent" in that additional steps may be
@@ -341,10 +371,21 @@ namespace build2
//
enum class import_kind {adhoc, normal, fallback};
- LIBBUILD2_SYMEXPORT pair<names, import_kind>
+ template <typename T>
+ struct import_result
+ {
+ const T* target; // Note: T can be imported target or imported scope.
+ names name;
+ import_kind kind;
+ };
+
+ // Note that import_result<scope>::target may be NULL even if name is not
+ // empty (e.g, out of project target imported via phase 2).
+ //
+ LIBBUILD2_SYMEXPORT import_result<scope>
import (scope& base,
name,
- bool phase2,
+ const optional<string>& phase2,
bool optional,
bool metadata,
const location&);
@@ -352,23 +393,23 @@ namespace build2
// Import phase 2.
//
const target&
- import (context&, const prerequisite_key&);
+ import2 (context&, const prerequisite_key&);
// As above but import the target "here and now" without waiting for phase 2
// (and thus omitting any rule-specific logic). This version of import is,
// for example, used by build system modules to perform an implicit import
// of the corresponding tool.
//
- // If phase2 is false, then the second phase's fallback/default logic is
+ // If phase2 is absent, then the second phase's fallback/default logic is
// only invoked if the import was ad hoc (i.e., a relative path was
// specified via config.import.<proj>.<name>[.<type>]) with NULL returned
// otherwise.
//
- // If phase2 is true and optional is true, then NULL is returned instead of
- // failing if phase 2 could not find anything.
+ // If phase2 is present and optional is true, then NULL is returned instead
+ // of failing if phase 2 could not find anything.
//
// If metadata is true, then load the target metadata. In this case phase2
- // must be true as well.
+ // must be present as well.
//
// The what argument specifies what triggered the import (for example,
// "module load") and is used in diagnostics.
@@ -377,18 +418,20 @@ namespace build2
// target::as_name() for details) as well as the kind of import that was
// performed.
//
- template <typename T>
- struct import_result
- {
- const T* target;
- names name;
- import_kind kind;
- };
+ // Note: cannot be used to import buildfile targets (use import_buildfile()
+ // instead).
+
+ // Print import_direct<exe>() result either as a target for a normal import
+ // or as a process path for ad hoc and fallback imports. Normally used in
+ // build system modules to print the configuration report.
+ //
+ LIBBUILD2_SYMEXPORT ostream&
+ operator<< (ostream&, const import_result<exe>&);
import_result<target>
import_direct (scope& base,
name,
- bool phase2,
+ const optional<string>& phase2,
bool optional,
bool metadata,
const location&,
@@ -403,13 +446,16 @@ namespace build2
import_direct (bool& new_value,
scope& base,
name,
- bool phase2,
+ const optional<string>& phase2,
bool optional,
bool metadata,
const location&,
const char* what = "import");
+ // As above but also cast the target and pass phase2 as bool (primarily
+ // for use in build system modules).
+ //
template <typename T>
import_result<T>
import_direct (scope&,
@@ -424,12 +470,16 @@ namespace build2
bool, bool, bool,
const location&, const char* = "import");
- // Print import_direct<exe>() result either as a target for a normal import
- // or as a process path for ad hoc and fallback imports. Normally used in
- // build system modules to print the configuration report.
+ // The import_direct() equivalent for importing buildfile targets. Return
+ // empty name if optional and not found. Note that the returned file path is
+ // not necessarily checked for existence so sourcing it may still fail.
//
- LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const import_result<exe>&);
+ // Note also that this function can be used for an ad hoc import by passing
+ // an absolute target name as would be returned by the normal import (can be
+ // useful for importing own buildfiles).
+ //
+ LIBBUILD2_SYMEXPORT path
+ import_buildfile (scope& base, name, bool optional, const location&);
// As import phase 2 but only imports as an already existing target. But
// unlike it, this function can be called during the load and execute
@@ -471,6 +521,27 @@ namespace build2
bool metadata,
const location&);
+ // Import (more precisely, alias as if using the `define =` syntax) the
+ // target type from imported project (iroot) into this project (root). If
+ // the target type with this name is already defined in this project, then
+ // make sure it is the same as in the imported project.
+ //
+ LIBBUILD2_SYMEXPORT const target_type&
+ import_target_type (scope& root,
+ const scope& iroot, const string&,
+ const location&);
+
+ // Suggest appropriate ways to import the specified target (as type and
+ // name) from the specified project.
+ //
+ void
+ import_suggest (const diag_record&,
+ const project_name&,
+ const target_type*,
+ const string& name,
+ bool rule_hint,
+ const char* qual = nullptr);
+
// Create a build system project in the specified directory.
//
LIBBUILD2_SYMEXPORT void
@@ -485,7 +556,7 @@ namespace build2
const optional<string>& config_file, // Ad hoc config.build contents.
bool buildfile, // Create root buildfile.
const char* who, // Who is creating it.
- uint16_t verbosity = 1); // Diagnostic verbosity.
+ uint16_t verbosity); // Diagnostic verbosity.
}
#include <libbuild2/file.ixx>
diff --git a/libbuild2/file.ixx b/libbuild2/file.ixx
index dbd892d..dc39bcb 100644
--- a/libbuild2/file.ixx
+++ b/libbuild2/file.ixx
@@ -22,15 +22,16 @@ namespace build2
}
LIBBUILD2_SYMEXPORT const target*
- import (context&,
- const prerequisite_key&,
- bool optional_,
- const optional<string>& metadata, // False or metadata key.
- bool existing,
- const location&);
+ import2 (context&,
+ const prerequisite_key&,
+ const string& hint,
+ bool optional_,
+ const optional<string>& metadata, // False or metadata key.
+ bool existing,
+ const location&);
inline const target&
- import (context& ctx, const prerequisite_key& pk)
+ import2 (context& ctx, const prerequisite_key& pk)
{
assert (ctx.phase == run_phase::match);
@@ -39,13 +40,13 @@ namespace build2
// Looks like the only way to do this is to keep location in name and
// then in prerequisite. Perhaps one day...
//
- return *import (ctx, pk, false, nullopt, false, location ());
+ return *import2 (ctx, pk, string (), false, nullopt, false, location ());
}
inline import_result<target>
import_direct (scope& base,
name tgt,
- bool ph2, bool opt, bool md,
+ const optional<string>& ph2, bool opt, bool md,
const location& loc, const char* w)
{
bool dummy (false);
@@ -59,7 +60,13 @@ namespace build2
bool ph2, bool opt, bool md,
const location& loc, const char* w)
{
- auto r (import_direct (base, move (tgt), ph2, opt, md, loc, w));
+ auto r (import_direct (base,
+ move (tgt),
+ ph2 ? optional<string> (string ()) : nullopt,
+ opt,
+ md,
+ loc,
+ w));
return import_result<T> {
r.target != nullptr ? &r.target->as<const T> () : nullptr,
move (r.name),
@@ -74,7 +81,14 @@ namespace build2
bool ph2, bool opt, bool md,
const location& loc, const char* w)
{
- auto r (import_direct (nv, base, move (tgt), ph2, opt, md, loc, w));
+ auto r (import_direct (nv,
+ base,
+ move (tgt),
+ ph2 ? optional<string> (string ()) : nullopt,
+ opt,
+ md,
+ loc,
+ w));
return import_result<T> {
r.target != nullptr ? &r.target->as<const T> () : nullptr,
move (r.name),
@@ -84,6 +98,6 @@ namespace build2
inline const target*
import_existing (context& ctx, const prerequisite_key& pk)
{
- return import (ctx, pk, false, nullopt, true, location ());
+ return import2 (ctx, pk, string (), false, nullopt, true, location ());
}
}
diff --git a/libbuild2/filesystem.cxx b/libbuild2/filesystem.cxx
index 2e3309d..f340dd7 100644
--- a/libbuild2/filesystem.cxx
+++ b/libbuild2/filesystem.cxx
@@ -15,7 +15,12 @@ namespace build2
touch (context& ctx, const path& p, bool create, uint16_t v)
{
if (verb >= v)
- text << "touch " << p;
+ {
+ if (verb >= 2)
+ text << "touch " << p;
+ else if (verb)
+ print_diag ("touch", p);
+ }
if (ctx.dry_run)
return;
@@ -50,25 +55,30 @@ namespace build2
// We don't want to print the command if the directory already exists.
// This makes the below code a bit ugly.
//
- mkdir_status ms;
+ auto print = [v, &d] (bool ovr)
+ {
+ if (verb >= v || ovr)
+ {
+ if (verb >= 2)
+ text << "mkdir " << d;
+ else if (verb)
+ print_diag ("mkdir", d);
+ }
+ };
+ mkdir_status ms;
try
{
ms = try_mkdir (d);
}
catch (const system_error& e)
{
- if (verb >= v)
- text << "mkdir " << d;
-
+ print (true);
fail << "unable to create directory " << d << ": " << e << endf;
}
if (ms == mkdir_status::success)
- {
- if (verb >= v)
- text << "mkdir " << d;
- }
+ print (false);
return ms;
}
@@ -79,25 +89,30 @@ namespace build2
// We don't want to print the command if the directory already exists.
// This makes the below code a bit ugly.
//
- mkdir_status ms;
+ auto print = [v, &d] (bool ovr)
+ {
+ if (verb >= v || ovr)
+ {
+ if (verb >= 2)
+ text << "mkdir -p " << d;
+ else if (verb)
+ print_diag ("mkdir -p", d);
+ }
+ };
+ mkdir_status ms;
try
{
ms = try_mkdir_p (d);
}
catch (const system_error& e)
{
- if (verb >= v)
- text << "mkdir -p " << d;
-
+ print (true);
fail << "unable to create directory " << d << ": " << e << endf;
}
if (ms == mkdir_status::success)
- {
- if (verb >= v)
- text << "mkdir -p " << d;
- }
+ print (false);
return ms;
}
@@ -106,7 +121,12 @@ namespace build2
mvfile (const path& f, const path& t, uint16_t v)
{
if (verb >= v)
- text << "mv " << f << ' ' << t;
+ {
+ if (verb >= 2)
+ text << "mv " << f << ' ' << t;
+ else if (verb)
+ print_diag ("mv", f, t);
+ }
try
{
@@ -126,10 +146,18 @@ namespace build2
fs_status<rmfile_status>
rmsymlink (context& ctx, const path& p, bool d, uint16_t v)
{
- auto print = [&p, v] ()
+ auto print = [&p, v] (bool ovr)
{
- if (verb >= v)
- text << "rm " << p.string ();
+ if (verb >= v || ovr)
+ {
+ // Note: strip trailing directory separator (but keep as path for
+ // relative).
+ //
+ if (verb >= 2)
+ text << "rm " << p.string ();
+ else if (verb)
+ print_diag ("rm", p.to_directory () ? path (p.string ()) : p);
+ }
};
rmfile_status rs;
@@ -144,12 +172,12 @@ namespace build2
}
catch (const system_error& e)
{
- print ();
+ print (true);
fail << "unable to remove symlink " << p.string () << ": " << e << endf;
}
if (rs == rmfile_status::success)
- print ();
+ print (false);
return rs;
}
@@ -166,7 +194,12 @@ namespace build2
return rmdir_status::not_exist;
if (verb >= v)
- text << "rmdir -r " << d;
+ {
+ if (verb >= 2)
+ text << "rmdir -r " << d;
+ else if (verb)
+ print_diag ("rmdir -r", d);
+ }
if (!ctx.dry_run)
{
@@ -258,7 +291,7 @@ namespace build2
{
try
{
- for (const dir_entry& de: dir_iterator (d, false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (d, dir_iterator::no_follow))
{
// The .buildignore filesystem entry should be of the regular file
// type.
diff --git a/libbuild2/filesystem.hxx b/libbuild2/filesystem.hxx
index 565e832..7b45a08 100644
--- a/libbuild2/filesystem.hxx
+++ b/libbuild2/filesystem.hxx
@@ -22,6 +22,8 @@
//
namespace build2
{
+ using butl::entry_type;
+
using butl::auto_rmfile;
using butl::auto_rmdir;
@@ -73,10 +75,10 @@ namespace build2
using mkdir_status = butl::mkdir_status;
LIBBUILD2_SYMEXPORT fs_status<mkdir_status>
- mkdir (const dir_path&, uint16_t verbosity = 1);
+ mkdir (const dir_path&, uint16_t verbosity);
LIBBUILD2_SYMEXPORT fs_status<mkdir_status>
- mkdir_p (const dir_path&, uint16_t verbosity = 1);
+ mkdir_p (const dir_path&, uint16_t verbosity);
// Rename a file (or file symlink) overwriting the destination if exists.
//
@@ -166,7 +168,7 @@ namespace build2
//
LIBBUILD2_SYMEXPORT fs_status<mkdir_status>
mkdir_buildignore (context&,
- const dir_path&, const path&, uint16_t verbosity = 1);
+ const dir_path&, const path&, uint16_t verbosity);
// Return true if the directory is empty or only contains the .buildignore
// file. Fail if the directory doesn't exist.
diff --git a/libbuild2/filesystem.txx b/libbuild2/filesystem.txx
index 7404532..afdb48d 100644
--- a/libbuild2/filesystem.txx
+++ b/libbuild2/filesystem.txx
@@ -1,8 +1,6 @@
// file : libbuild2/filesystem.txx -*- C++ -*-
// license : MIT; see accompanying LICENSE file
-#include <type_traits> // is_base_of
-
#include <libbuild2/diagnostics.hxx>
namespace build2
@@ -15,16 +13,17 @@ namespace build2
// We don't want to print the command if we couldn't remove the file
// because it does not exist (just like we don't print the update command
- // if the file is up to date). This makes the below code a bit ugly.
+ // if the file is up to date). But we always want to print some command
+ // before we issue diagnostics. This makes the below code a bit ugly.
//
- auto print = [&f, &t, v] ()
+ auto print = [&f, &t, v] (bool ovr)
{
- if (verb >= v)
+ if (verb >= v || ovr)
{
if (verb >= 2)
text << "rm " << f;
else if (verb)
- text << "rm " << t;
+ print_diag ("rm", t); // T can be target or path.
}
};
@@ -38,12 +37,12 @@ namespace build2
}
catch (const system_error& e)
{
- print ();
+ print (true);
fail << "unable to remove file " << f << ": " << e << endf;
}
if (rs == rmfile_status::success)
- print ();
+ print (false);
return rs;
}
@@ -56,16 +55,17 @@ namespace build2
// We don't want to print the command if we couldn't remove the directory
// because it does not exist (just like we don't print mkdir if it already
- // exists) or if it is not empty. This makes the below code a bit ugly.
+ // exists) or if it is not empty. But we always want to print some command
+ // before we issue diagnostics. This makes the below code a bit ugly.
//
- auto print = [&d, &t, v] ()
+ auto print = [&d, &t, v] (bool ovr)
{
- if (verb >= v)
+ if (verb >= v || ovr)
{
if (verb >= 2)
text << "rmdir " << d;
else if (verb)
- text << (std::is_base_of<dir_path, T>::value ? "rmdir " : "rm ") << t;
+ print_diag ("rmdir", t); // T can be target or dir_path.
}
};
@@ -79,7 +79,7 @@ namespace build2
}
catch (const system_error& e)
{
- print ();
+ print (true);
fail << "unable to remove directory " << d << ": " << e << endf;
}
@@ -87,14 +87,14 @@ namespace build2
{
case rmdir_status::success:
{
- print ();
+ print (false);
break;
}
case rmdir_status::not_empty:
{
if (verb >= v && verb >= 2)
{
- text << d << " is "
+ info << d << " is "
<< (w ? "current working directory" : "not empty")
<< ", not removing";
}
diff --git a/libbuild2/forward.hxx b/libbuild2/forward.hxx
index d2b8989..057ab24 100644
--- a/libbuild2/forward.hxx
+++ b/libbuild2/forward.hxx
@@ -26,6 +26,7 @@ namespace build2
struct variable;
class variable_pool;
+ class variable_patterns;
class variable_map;
struct variable_override;
using variable_overrides = vector<variable_override>;
diff --git a/libbuild2/function.cxx b/libbuild2/function.cxx
index eaf3f9e..3110547 100644
--- a/libbuild2/function.cxx
+++ b/libbuild2/function.cxx
@@ -213,7 +213,7 @@ namespace build2
if (f->arg_types[i] &&
*f->arg_types[i] == nullptr &&
args[i].type != nullptr)
- untypify (args[i]);
+ untypify (args[i], true /* reduce */);
}
}
@@ -348,29 +348,40 @@ namespace build2
// Static-initialize the function map and populate with builtin functions.
//
+ // NOTE: remember to also arrange for automatic documentation extraction in
+ // doc/buildfile!
+ void bool_functions (function_map&); // functions-bool.cxx
void builtin_functions (function_map&); // functions-builtin.cxx
void filesystem_functions (function_map&); // functions-filesystem.cxx
+ void integer_functions (function_map&); // functions-integer.cxx
+ void json_functions (function_map&); // functions-json.cxx
void name_functions (function_map&); // functions-name.cxx
void path_functions (function_map&); // functions-path.cxx
void process_functions (function_map&); // functions-process.cxx
void process_path_functions (function_map&); // functions-process-path.cxx
void regex_functions (function_map&); // functions-regex.cxx
void string_functions (function_map&); // functions-string.cxx
+ void target_functions (function_map&); // functions-target.cxx
void target_triplet_functions (function_map&); // functions-target-triplet.cxx
void project_name_functions (function_map&); // functions-target-triplet.cxx
+
void
insert_builtin_functions (function_map& m)
{
+ bool_functions (m);
builtin_functions (m);
filesystem_functions (m);
+ integer_functions (m);
+ json_functions (m);
name_functions (m);
path_functions (m);
process_functions (m);
process_path_functions (m);
regex_functions (m);
string_functions (m);
+ target_functions (m);
target_triplet_functions (m);
project_name_functions (m);
}
diff --git a/libbuild2/function.hxx b/libbuild2/function.hxx
index 81ece89..cda856a 100644
--- a/libbuild2/function.hxx
+++ b/libbuild2/function.hxx
@@ -4,8 +4,9 @@
#ifndef LIBBUILD2_FUNCTION_HXX
#define LIBBUILD2_FUNCTION_HXX
-#include <utility> // index_sequence
-#include <type_traits> // aligned_storage
+#include <cstddef> // max_align_t
+#include <utility> // index_sequence
+#include <type_traits> // is_*
#include <libbuild2/types.hxx>
#include <libbuild2/forward.hxx>
@@ -133,8 +134,8 @@ namespace build2
// Auxiliary data storage. Note that it is expected to be trivially
// copyable and destructible.
//
- std::aligned_storage<sizeof (void*) * 3>::type data;
- static const size_t data_size = sizeof (decltype (data));
+ static const size_t data_size = sizeof (void*) * 3;
+ alignas (std::max_align_t) unsigned char data[data_size];
function_overload (const char* an,
size_t mi, size_t ma, types ts,
@@ -952,7 +953,8 @@ namespace build2
// Low-level interface that can be used to pass additional data.
//
- // Note that the call to this function sidesteps the thunk.
+ // Note that the call to this function sidesteps the thunk. One notable
+ // consequence of this is that the values are not checked for NULL.
//
template <typename D, typename... A>
void
diff --git a/libbuild2/function.test.cxx b/libbuild2/function.test.cxx
index f711d2f..37ed5ff 100644
--- a/libbuild2/function.test.cxx
+++ b/libbuild2/function.test.cxx
@@ -115,7 +115,7 @@ namespace build2
else if (!a.empty ())
{
names storage;
- cout << reverse (a, storage);
+ cout << reverse (a, storage, true /* reduce */);
}
cout << endl;
}
@@ -124,7 +124,9 @@ namespace build2
try
{
- scope& s (ctx.global_scope.rw ());
+ // Use temp scope for the private variable pool.
+ //
+ temp_scope s (ctx.global_scope.rw ());
parser p (ctx);
p.parse_buildfile (cin, path_name ("buildfile"), &s, s);
diff --git a/libbuild2/functions-bool.cxx b/libbuild2/functions-bool.cxx
new file mode 100644
index 0000000..bb2fd3f
--- /dev/null
+++ b/libbuild2/functions-bool.cxx
@@ -0,0 +1,26 @@
+// file : libbuild2/functions-bool.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/function.hxx>
+#include <libbuild2/variable.hxx>
+
+using namespace std;
+
+namespace build2
+{
+ void
+ bool_functions (function_map& m)
+ {
+ function_family f (m, "bool");
+
+ // $string(<bool>)
+ //
+ // Convert a boolean value to a string literal `true` or `false`.
+ //
+
+ // Note that we don't handle NULL values for this type since it has no
+ // empty representation.
+ //
+ f["string"] += [](bool b) {return b ? "true" : "false";};
+ }
+}
diff --git a/libbuild2/functions-builtin.cxx b/libbuild2/functions-builtin.cxx
index 93f2d0f..e24ff8e 100644
--- a/libbuild2/functions-builtin.cxx
+++ b/libbuild2/functions-builtin.cxx
@@ -26,7 +26,7 @@ namespace build2
if (s == "dedup")
r = true;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
return r;
@@ -37,11 +37,17 @@ namespace build2
{
function_family f (m, "builtin");
- // Note that we may want to extend the scope argument to a more general
- // notion of "lookup context" (scope, target, prerequisite).
+ // $defined(<variable>)
+ //
+ // Return true if the specified variable is defined in the calling scope or
+ // any outer scopes.
//
// Note that this function is not pure.
//
+
+ // Note that we may want to extend the scope argument to a more general
+ // notion of "lookup context" (scope, target, prerequisite).
+ //
f.insert ("defined", false) += [](const scope* s, names name)
{
if (s == nullptr)
@@ -50,7 +56,17 @@ namespace build2
return (*s)[convert<string> (move (name))].defined ();
};
- // Return variable visibility if it has been entered and NULL otherwise.
+ // $visibility(<variable>)
+ //
+ // Return variable visibility if it is known and `null` otherwise.
+ //
+ // Possible visibility value are:
+ //
+ // global -- all outer scopes
+ // project -- this project (no outer projects)
+ // scope -- this scope (no outer scopes)
+ // target -- target and target type/pattern-specific
+ // prereq -- prerequisite-specific
//
// Note that this function is not pure.
//
@@ -60,36 +76,108 @@ namespace build2
fail << "visibility() called out of scope" << endf;
const variable* var (
- s->ctx.var_pool.find (convert<string> (move (name))));
+ s->var_pool ().find (convert<string> (move (name))));
return (var != nullptr
? optional<string> (to_string (var->visibility))
: nullopt);
};
+ // $type(<value>)
+ //
+ // Return the type name of the value or empty string if untyped.
+ //
f["type"] += [](value* v) {return v->type != nullptr ? v->type->name : "";};
+
+ // $null(<value>)
+ //
+ // Return true if the value is `null`.
+ //
f["null"] += [](value* v) {return v->null;};
+
+ // $empty(<value>)
+ //
+ // Return true if the value is empty.
+ //
f["empty"] += [](value* v) {return v->null || v->empty ();};
- f["identity"] += [](value* v) {return move (*v);};
- // string
+ // $first(<value>[, <not_pair>])
+ // $second(<value>[, <not_pair>])
+ //
+ // Return the first or the second half of a pair, respectively. If a value
+ // is not a pair, then return `null` unless the <not_pair> argument is
+ // `true`, in which case return the non-pair value.
+ //
+ // If multiple pairs are specified, then return the list of first/second
+ // halfs. If an element is not a pair, then omit it from the resulting
+ // list unless the <not_pair> argument is `true`, in which case add the
+ // non-pair element to the list.
+ //
+ f["first"] += [] (names ns, optional<value> not_pair)
+ {
+ // @@ TODO: would be nice to return typed half if passed typed value.
+
+ bool np (not_pair && convert<bool> (move (*not_pair)));
+
+ names r;
+ for (auto i (ns.begin ()), e (ns.end ()); i != e; )
+ {
+ name& f (*i++);
+ name* s (f.pair ? &*i++ : nullptr);
+
+ if (s != nullptr || np)
+ {
+ f.pair = '\0';
+ r.push_back (move (f));
+ }
+ else if (ns.size () == 1)
+ return value (nullptr); // Single non-pair.
+ }
+
+ return value (move (r));
+ };
+
+ f["second"] += [] (names ns, optional<value> not_pair)
+ {
+ bool np (not_pair && convert<bool> (move (*not_pair)));
+
+ names r;
+ for (auto i (ns.begin ()), e (ns.end ()); i != e; )
+ {
+ name& f (*i++);
+ name* s (f.pair ? &*i++ : nullptr);
+
+ if (s != nullptr)
+ r.push_back (move (*s));
+ else if (np)
+ r.push_back (move (f));
+ else if (ns.size () == 1)
+ return value (nullptr); // Single non-pair.
+ }
+
+ return value (move (r));
+ };
+
+ // Leave this one undocumented for now since it's unclear why would anyone
+ // want to use it currently (we don't yet have any function composition
+ // facilities).
//
- f["string"] += [](bool b) {return b ? "true" : "false";};
- f["string"] += [](int64_t i) {return to_string (i);};
- f["string"] += [](uint64_t i) {return to_string (i);};
+ f["identity"] += [](value* v) {return move (*v);};
- // Quote a value returning its string representation. If escape is true,
- // then also escape (with a backslash) the quote characters being added
- // (this is useful if the result will be re-parsed, for example as a
- // Testscript command line).
+ // $quote(<value>[, <escape>])
+ //
+ // Quote the value returning its string representation. If <escape> is
+ // `true`, then also escape (with a backslash) the quote characters being
+ // added (this is useful if the result will be re-parsed, for example as a
+ // script command line).
//
f["quote"] += [](value* v, optional<value> escape)
{
if (v->null)
return string ();
- untypify (*v); // Reverse to names.
+ untypify (*v, true /* reduce */); // Reverse to names.
ostringstream os;
to_stream (os,
@@ -100,48 +188,13 @@ namespace build2
return os.str ();
};
- // $size(<ints>)
- //
- // Return the number of elements in the sequence.
- //
- f["size"] += [] (int64s v) {return v.size ();};
- f["size"] += [] (uint64s v) {return v.size ();};
-
- // $sort(<ints> [, <flags>])
- //
- // Sort integers in ascending order.
- //
- // The following flags are supported:
- //
- // dedup - in addition to sorting also remove duplicates
- //
- f["sort"] += [](int64s v, optional<names> fs)
- {
- sort (v.begin (), v.end ());
-
- if (functions_sort_flags (move (fs)))
- v.erase (unique (v.begin(), v.end()), v.end ());
-
- return v;
- };
-
- f["sort"] += [](uint64s v, optional<names> fs)
- {
- sort (v.begin (), v.end ());
-
- if (functions_sort_flags (move (fs)))
- v.erase (unique (v.begin(), v.end()), v.end ());
-
- return v;
- };
-
- // getenv
+ // $getenv(<name>)
//
- // Return NULL if the environment variable is not set, untyped value
- // otherwise.
+ // Get the value of the environment variable. Return `null` if the
+ // environment variable is not set.
//
// Note that if the build result can be affected by the variable being
- // queried, then it should be reported with the config.environment
+ // queried, then it should be reported with the `config.environment`
// directive.
//
// Note that this function is not pure.
diff --git a/libbuild2/functions-filesystem.cxx b/libbuild2/functions-filesystem.cxx
index ef7bfc5..665a0f3 100644
--- a/libbuild2/functions-filesystem.cxx
+++ b/libbuild2/functions-filesystem.cxx
@@ -7,6 +7,7 @@
#include <libbuild2/variable.hxx>
using namespace std;
+using namespace butl;
namespace build2
{
@@ -29,12 +30,27 @@ namespace build2
return true;
};
+ auto dangling = [] (const dir_entry& de)
+ {
+ bool sl (de.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << de.base () / de.path ();
+
+ return true;
+ };
+
// Print paths "as is" in the diagnostics.
//
try
{
if (pattern.absolute ())
- path_search (pattern, add);
+ path_search (pattern,
+ add,
+ dir_path () /* start */,
+ path_match_flags::follow_symlinks,
+ dangling);
else
{
// An absolute start directory must be specified for the relative
@@ -54,7 +70,11 @@ namespace build2
<< "' is relative";
}
- path_search (pattern, add, *start);
+ path_search (pattern,
+ add,
+ *start,
+ path_match_flags::follow_symlinks,
+ dangling);
}
}
catch (const system_error& e)
@@ -83,14 +103,19 @@ namespace build2
function_family f (m, "filesystem");
- // path_search
+ // $path_search(<pattern>[, <start-dir>])
//
- // Return filesystem paths that match the pattern. If the pattern is an
- // absolute path, then the start directory is ignored (if present).
- // Otherwise, the start directory must be specified and be absolute.
+ // Return filesystem paths that match the shell-like wildcard pattern. If
+ // the pattern is an absolute path, then the start directory is ignored
+ // (if present). Otherwise, the start directory must be specified and be
+ // absolute.
//
// Note that this function is not pure.
//
+
+ // @@ In the future we may want to add a flag that controls the
+ // dangling/inaccessible treatment.
+ //
{
auto e (f.insert ("path_search", false));
@@ -115,6 +140,5 @@ namespace build2
convert<dir_path> (move (start)));
};
}
-
}
}
diff --git a/libbuild2/functions-integer.cxx b/libbuild2/functions-integer.cxx
new file mode 100644
index 0000000..8f9e2cf
--- /dev/null
+++ b/libbuild2/functions-integer.cxx
@@ -0,0 +1,156 @@
+// file : libbuild2/functions-integer.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/function.hxx>
+#include <libbuild2/variable.hxx>
+
+using namespace std;
+
+namespace build2
+{
+ extern bool
+ functions_sort_flags (optional<names>); // functions-builtin.cxx
+
+ static string
+ to_string (uint64_t i, optional<value> base, optional<value> width)
+ {
+ int b (base ?
+ static_cast<int> (convert<uint64_t> (move (*base)))
+ : 10);
+
+ size_t w (width
+ ? static_cast<size_t> (convert<uint64_t> (move (*width)))
+ : 0);
+
+ return (to_string (i, b, w));
+ }
+
+ void
+ integer_functions (function_map& m)
+ {
+ function_family f (m, "integer");
+
+ // $string(<int64>)
+ // $string(<uint64>[, <base>[, <width>]])
+ //
+ // Convert an integer to a string. For unsigned integers we can specify
+ // the desired base and width. For example:
+ //
+ // x = [uint64] 0x0000ffff
+ //
+ // c.poptions += "-DOFFSET=$x" # -DOFFSET=65535
+ // c.poptions += "-DOFFSET=$string($x, 16)" # -DOFFSET=0xffff
+ // c.poptions += "-DOFFSET=$string($x, 16, 8)" # -DOFFSET=0x0000ffff
+ //
+
+ // Note that we don't handle NULL values for these type since they have no
+ // empty representation.
+ //
+ f["string"] += [](int64_t i) {return to_string (i);};
+
+ f["string"] += [](uint64_t i, optional<value> base, optional<value> width)
+ {
+ return to_string (i, move (base), move (width));
+ };
+
+ // $integer_sequence(<begin>, <end>[, <step>])
+ //
+ // Return the list of uint64 integers starting from <begin> (including) to
+ // <end> (excluding) with the specified <step> or `1` if unspecified. If
+ // <begin> is greater than <end>, empty list is returned.
+ //
+
+ // Note that currently negative numbers are not supported but this could
+ // be handled if required (e.g., by returning int64s in this case).
+ //
+ // Note also that we could improve this by adding a shortcut to get the
+ // indexes of a list (for example, $indexes(<list>) plus potentially a
+ // similar $keys() function for maps).
+ //
+ f["integer_sequence"] += [](value begin, value end, optional<value> step)
+ {
+ uint64_t b (convert<uint64_t> (move (begin)));
+ uint64_t e (convert<uint64_t> (move (end)));
+ uint64_t s (step ? convert<uint64_t> (move (*step)) : 1);
+
+ uint64s r;
+ if (b < e)
+ {
+ r.reserve (static_cast<size_t> ((e - b) / s + 1));
+
+ for (; b < e; b += s)
+ r.push_back (static_cast<size_t> (b));
+ }
+
+ return r;
+ };
+
+ // $size(<ints>)
+ //
+ // Return the number of elements in the sequence.
+ //
+ f["size"] += [] (int64s v) {return v.size ();};
+ f["size"] += [] (uint64s v) {return v.size ();};
+
+ // $sort(<ints> [, <flags>])
+ //
+ // Sort integers in ascending order.
+ //
+ // The following flags are supported:
+ //
+ // dedup - in addition to sorting also remove duplicates
+ //
+ f["sort"] += [](int64s v, optional<names> fs)
+ {
+ sort (v.begin (), v.end ());
+
+ if (functions_sort_flags (move (fs)))
+ v.erase (unique (v.begin(), v.end()), v.end ());
+
+ return v;
+ };
+
+ f["sort"] += [](uint64s v, optional<names> fs)
+ {
+ sort (v.begin (), v.end ());
+
+ if (functions_sort_flags (move (fs)))
+ v.erase (unique (v.begin(), v.end()), v.end ());
+
+ return v;
+ };
+
+ // $find(<ints>, <int>)
+ //
+ // Return true if the integer sequence contains the specified integer.
+ //
+ f["find"] += [](int64s vs, value v)
+ {
+ return find (vs.begin (), vs.end (),
+ convert<int64_t> (move (v))) != vs.end ();
+ };
+
+ f["find"] += [](uint64s vs, value v)
+ {
+ return find (vs.begin (), vs.end (),
+ convert<uint64_t> (move (v))) != vs.end ();
+ };
+
+ // $find_index(<ints>, <int>)
+ //
+ // Return the index of the first element in the integer sequence that is
+ // equal to the specified integer or `$size(ints)` if none is found.
+ //
+ f["find_index"] += [](int64s vs, value v)
+ {
+ auto i (find (vs.begin (), vs.end (), convert<int64_t> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+
+ f["find_index"] += [](uint64s vs, value v)
+ {
+ auto i (find (vs.begin (), vs.end (), convert<uint64_t> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+ }
+}
diff --git a/libbuild2/functions-json.cxx b/libbuild2/functions-json.cxx
new file mode 100644
index 0000000..e06d9a5
--- /dev/null
+++ b/libbuild2/functions-json.cxx
@@ -0,0 +1,335 @@
+// file : libbuild2/functions-json.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/function.hxx>
+#include <libbuild2/variable.hxx>
+
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/parser.hxx>
+# include <libbutl/json/serializer.hxx>
+#endif
+
+using namespace std;
+
+namespace build2
+{
+ static size_t
+ array_find_index (const json_value& a, value v)
+ {
+ if (a.type != json_type::array)
+ fail << "expected json array instead of " << to_string (a.type)
+ << " as first argument";
+
+ auto b (a.array.begin ()), e (a.array.end ());
+ auto i (find (b, e, convert<json_value> (move (v))));
+ return i != e ? i - b : a.array.size ();
+ };
+
+ void
+ json_functions (function_map& m)
+ {
+ function_family f (m, "json");
+
+ // $value_type(<json>[, <distinguish_numbers>])
+ //
+ // Return the type of a JSON value: `null`, `boolean`, `number`, `string`,
+ // `array`, or `object`. If the <distinguish_numbers> argument is `true`,
+ // then instead of `number` return `signed number`, `unsigned number`, or
+ // `hexadecimal number`.
+ //
+ f["value_type"] += [] (json_value v, optional<value> distinguish_numbers)
+ {
+ bool dn (distinguish_numbers &&
+ convert<bool> (move (*distinguish_numbers)));
+
+ return to_string (v.type, dn);
+ };
+
+ // $value_size(<json>)
+ //
+ // Return the size of a JSON value.
+ //
+ // The size of a `null` value is `0`. The sizes of simple values
+ // (`boolean`, `number`, and `string`) is `1`. The size of `array` and
+ // `object` values is the number of elements and members, respectively.
+ //
+ // Note that the size of a `string` JSON value is not the length of the
+ // string. To get the length call `$string.size()` instead by casting the
+ // JSON value to the `string` value type.
+ //
+ f["value_size"] += [] (json_value v) -> size_t
+ {
+ // Note: should be consistent with value_traits<json_value>::empty(),
+ // json_subscript().
+ //
+ switch (v.type)
+ {
+ case json_type::null: return 0;
+ case json_type::boolean:
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ case json_type::string: break;
+ case json_type::array: return v.array.size ();
+ case json_type::object: return v.object.size ();
+ }
+
+ return 1;
+ };
+
+ // $member_name(<json-member>)
+ //
+ // Return the name of a JSON object member.
+ //
+ f["member_name"] += [] (json_value v)
+ {
+ // A member becomes an object with a single member (see json_reverse()
+ // for details).
+ //
+ if (v.type == json_type::object && v.object.size () == 1)
+ return move (v.object.front ().name);
+
+ fail << "json object member expected instead of " << v.type << endf;
+ };
+
+ // $member_value(<json-member>)
+ //
+ // Return the value of a JSON object member.
+ //
+ f["member_value"] += [] (json_value v)
+ {
+ // A member becomes an object with a single member (see json_reverse()
+ // for details).
+ //
+ if (v.type == json_type::object && v.object.size () == 1)
+ {
+ // Reverse simple JSON values to the corresponding fundamental type
+ // values for consistency with subscript/iteration (see
+ // json_subscript_impl() for background).
+ //
+ json_value& jr (v.object.front ().value);
+
+ switch (jr.type)
+ {
+#if 0
+ case json_type::null: return value (names {});
+#else
+ case json_type::null: return value ();
+#endif
+ case json_type::boolean: return value (jr.boolean);
+ case json_type::signed_number: return value (jr.signed_number);
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number: return value (jr.unsigned_number);
+ case json_type::string: return value (move (jr.string));
+ case json_type::array:
+ case json_type::object: return value (move (jr));
+ }
+ }
+
+ fail << "json object member expected instead of " << v.type << endf;
+ };
+
+ // $object_names(<json-object>)
+ //
+ // Return the list of names in the JSON object. If the JSON `null` is
+ // passed instead, assume it is a missing object and return an empty list.
+ //
+ f["object_names"] += [] (json_value o)
+ {
+ names ns;
+
+ if (o.type == json_type::null)
+ ;
+ else if (o.type == json_type::object)
+ {
+ ns.reserve (o.object.size ());
+
+ for (json_member& m: o.object)
+ ns.push_back (name (move (m.name)));
+ }
+ else
+ fail << "expected json object instead of " << to_string (o.type);
+
+ return ns;
+ };
+
+ // $array_size(<json-array>)
+ //
+ // Return the number of elements in the JSON array. If the JSON `null`
+ // value is passed instead, assume it is a missing array and return `0`.
+ //
+ f["array_size"] += [] (json_value a) -> size_t
+ {
+ if (a.type == json_type::null)
+ return 0;
+
+ if (a.type == json_type::array)
+ return a.array.size ();
+
+ fail << "expected json array instead of " << to_string (a.type) << endf;
+ };
+
+ // $array_find(<json-array>, <json>)
+ //
+ // Return true if the JSON array contains the specified JSON value. If the
+ // JSON `null` value is passed instead, assume it is a missing array and
+ // return `false`.
+ //
+ f["array_find"] += [] (json_value a, value v)
+ {
+ if (a.type == json_type::null)
+ return false;
+
+ size_t i (array_find_index (a, move (v)));
+ return i != a.array.size (); // We now know it's an array.
+ };
+
+ // $array_find_index(<json-array>, <json>)
+ //
+ // Return the index of the first element in the JSON array that is equal
+ // to the specified JSON value or `$array_size(<json-array>)` if none is
+ // found. If the JSON `null` value is passed instead, assume it is a
+ // missing array and return `0`.
+ //
+ f["array_find_index"] += [](json_value a, value v) -> size_t
+ {
+ if (a.type == json_type::null)
+ return 0;
+
+ return array_find_index (a, move (v));
+ };
+
+#ifndef BUILD2_BOOTSTRAP
+
+ // @@ Flag to support multi-value (returning it as JSON array)? Then
+ // probably also in $serialize().
+ //
+ // @@ Flag to override duplicates instead of failing?
+
+ // $json.load(<path>)
+ //
+ // Parse the contents of the specified file as JSON input text and return
+ // the result as a value of the `json` type.
+ //
+ // See also `$json.parse()`.
+ //
+ // Note that this function is not pure.
+ //
+ f.insert (".load", false) += [] (names xf)
+ {
+ path f (convert<path> (move (xf)));
+
+ try
+ {
+ ifdstream is (f);
+ json_parser p (is, f.string ());
+ return json_value (p);
+ }
+ catch (const invalid_json_input& e)
+ {
+ fail (location (f, e.line, e.column)) << "invalid json input: " << e <<
+ info << "byte offset " << e.position << endf;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read from " << f << ": " << e << endf;
+ }
+ };
+
+ // $json.parse(<text>)
+ //
+ // Parse the specified JSON input text and return the result as a value of
+ // the `json` type.
+ //
+ // See also `$json.load()` and `$json.serialize()`.
+ //
+ f[".parse"] += [] (names text)
+ {
+ string t (convert<string> (move (text)));
+
+ try
+ {
+ json_parser p (t, nullptr /* name */);
+ return json_value (p);
+ }
+ catch (const invalid_json_input& e)
+ {
+ fail << "invalid json input: " << e <<
+ info << "line " << e.line
+ << ", column " << e.column
+ << ", byte offset " << e.position << endf;
+ }
+ };
+
+ // $serialize(<json>[, <indentation>])
+ //
+ // Serialize the specified JSON value and return the resulting JSON output
+ // text.
+ //
+ // The optional <indentation> argument specifies the number of indentation
+ // spaces that should be used for pretty-printing. If `0` is passed, then
+ // no pretty-printing is performed. The default is `2` spaces.
+ //
+ // See also `$json.parse()`.
+ //
+ f["serialize"] += [] (json_value v, optional<value> indentation)
+ {
+ uint64_t i (indentation ? convert<uint64_t> (*indentation) : 2);
+
+ try
+ {
+ // For the diagnostics test.
+ //
+#if 0
+ if (v.type == json_type::string && v.string == "deadbeef")
+ {
+ v.string[4] = 0xe0;
+ v.string[5] = 0xe0;
+ }
+#endif
+
+ string o;
+ json_buffer_serializer s (o, i);
+ v.serialize (s);
+ return o;
+ }
+ catch (const invalid_json_output& e)
+ {
+ diag_record dr;
+ dr << fail << "invalid json value: " << e;
+
+ if (e.event)
+ dr << info << "while serializing " << to_string (*e.event);
+
+ if (e.offset != string::npos)
+ dr << info << "offending byte offset " << e.offset;
+
+ dr << endf;
+ }
+ };
+#endif
+
+ // $size(<json-set>)
+ // $size(<json-map>)
+ //
+ // Return the number of elements in the sequence.
+ //
+ f["size"] += [] (set<json_value> v) {return v.size ();};
+ f["size"] += [] (map<json_value, json_value> v) {return v.size ();};
+
+ // $keys(<json-map>)
+ //
+ // Return the list of keys in a json map as a json array.
+ //
+ // Note that the result is sorted in ascending order.
+ //
+ f["keys"] += [](map<json_value, json_value> v)
+ {
+ json_value r (json_type::array);
+ r.array.reserve (v.size ());
+ for (pair<const json_value, json_value>& p: v)
+ r.array.push_back (p.first); // @@ PERF: use C++17 map::extract() to steal.
+ return r;
+ };
+ }
+}
diff --git a/libbuild2/functions-name.cxx b/libbuild2/functions-name.cxx
index 72ac2bd..456f85b 100644
--- a/libbuild2/functions-name.cxx
+++ b/libbuild2/functions-name.cxx
@@ -20,36 +20,59 @@ namespace build2
// out of scope). See scope::find_target_type() for details. Allow out-
// qualified names (out is discarded).
//
- static pair<name, optional<string>>
- to_target_name (const scope* s, name&& n, const name& o = name ())
+ static pair<const target_type*, optional<string>>
+ to_target_type (const scope* s, name& n, const name& o = name ())
{
if (n.pair && !o.directory ())
fail << "name pair in names";
- optional<string> e;
+ return s != nullptr
+ ? s->find_target_type (n, location ())
+ : pair<const target_type*, optional<string>> {nullptr, nullopt};
+ }
- if (s != nullptr)
- {
- auto rp (s->find_target_type (n, location ()));
+ static pair<name, optional<string>>
+ to_target_name (const scope* s, name&& n, const name& o = name ())
+ {
+ auto rp (to_target_type (s, n, o));
- if (rp.first != nullptr)
- n.type = rp.first->name;
+ if (rp.first != nullptr)
+ n.type = rp.first->name;
- e = move (rp.second);
+ if (n.value.empty () && (n.type == "dir" || n.type == "fsdir"))
+ {
+ n.value = n.dir.leaf ().string ();
+ n.dir.make_directory ();
}
- return make_pair (move (n), move (e));
+ return make_pair (move (n), move (rp.second));
}
const target&
to_target (const scope& s, name&& n, name&& o)
{
+ // Note: help the user out and search in both out and src like a
+ // prerequisite.
+ //
if (const target* r = search_existing (n, s, o.dir))
return *r;
- fail << "target "
- << (n.pair ? names {move (n), move (o)} : names {move (n)})
- << " not found" << endf;
+ // Inside recipes we don't treat `{}` as special so a literal target name
+ // will have no type and won't be found, which is confusing as hell.
+ //
+ bool typed (n.typed ());
+
+ diag_record dr (fail);
+
+ dr << "target "
+ << (n.pair ? names {move (n), move (o)} : names {move (n)})
+ << " not found";
+
+ if (!typed)
+ dr << info << "wrap it in ([names] ...) if this is literal target name "
+ << "specified inside recipe";
+
+ dr << endf;
}
const target&
@@ -61,6 +84,103 @@ namespace build2
return to_target (s, move (ns[0]), move (ns[0].pair ? ns[1] : o));
}
+ static bool
+ is_a (const scope* s, name&& n, const name& o, names&& t)
+ {
+ if (s == nullptr)
+ fail << "name.is_a() called out of scope";
+
+ string tts (convert<string> (move (t)));
+ const target_type* tt (s->find_target_type (tts));
+ if (tt == nullptr)
+ fail << "unknown target type " << tts;
+
+ const target_type* ntt (to_target_type (s, n, o).first);
+ if (ntt == nullptr)
+ {
+ // If this is an imported target and the target type is unknown, then
+ // it cannot possibly match one of the known types. We handle it like
+ // this instead of failing because the later failure (e.g., as a
+ // result of this target listed as prerequisite) will have more
+ // accurate diagnostics. See also filter() below.
+ //
+ if (n.proj)
+ return false;
+
+ fail << "unknown target type " << n.type << " in " << n;
+ }
+
+ return ntt->is_a (*tt);
+ }
+
+ static names
+ filter (const scope* s, names ns, names ts, bool out)
+ {
+ if (s == nullptr)
+ fail << "name." << (out ? "filter_out" : "filter")
+ << "() called out of scope";
+
+ small_vector<const target_type*, 1> tts;
+ for (const name& n: ts)
+ {
+ if (!n.simple ())
+ fail << "invalid target type name " << n;
+
+ if (n.pair)
+ fail << "pair in target type name " << n;
+
+ const target_type* tt (s->find_target_type (n.value));
+ if (tt == nullptr)
+ fail << "unknown target type " << n.value;
+
+ tts.push_back (tt);
+ }
+
+ names r;
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ name& n (*i);
+ bool p (n.pair);
+
+ // to_target_type() splits the name into the target name and extension.
+ // While we could try to reconstitute it with combine_name(), there are
+ // murky corner cases (see the default_extension argument) which won't
+ // be easy to handle. So let's just make a copy. Looking at the
+ // implementation of scope::find_target_type(), we can optimize for the
+ // (common) typed case by only copying the type.
+ //
+ name c (n.typed () ? name (n.type, "") : n);
+
+ const target_type* ntt (to_target_type (s, c, p ? *++i : name ()).first);
+ if (ntt == nullptr)
+ {
+ // If this is an imported target and the target type is unknown, then
+ // it cannot possibly match one of the known types. We handle it like
+ // this instead of failing because the later failure (e.g., as a
+ // result of this target listed as prerequisite) will have more
+ // accurate diagnostics. See also is_a() above.
+ //
+ if (!n.proj)
+ fail << "unknown target type " << n.type << " in " << n;
+ }
+
+ if (ntt != nullptr
+ ? (find_if (tts.begin (), tts.end (),
+ [ntt] (const target_type* tt)
+ {
+ return ntt->is_a (*tt);
+ }) != tts.end ()) != out
+ : out)
+ {
+ r.push_back (move (n));
+ if (p)
+ r.push_back (move (*i));
+ }
+ }
+
+ return r;
+ }
+
void
name_functions (function_map& m)
{
@@ -71,17 +191,30 @@ namespace build2
// on prerequisite names. They also won't always return the same result as
// if we were interrogating an actual target (e.g., the directory may be
// relative). Plus we now have functions that can only be called on
- // targets (see below).
+ // targets (see functions-target.cxx).
//
- function_family fn (m, "name");
+ function_family f (m, "name");
- fn["string"] += [](name n) {return to_string (n);};
+ // Note: let's leave this undocumented for now since it's not often needed
+ // and is a can of worms.
+ //
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ f["string"] += [](name* n)
+ {
+ return n != nullptr ? to_string (move (*n)) : string ();
+ };
- fn["name"] += [](const scope* s, name n)
+ // $name(<names>)
+ //
+ // Return the name of a target (or a list of names for a list of targets).
+ //
+ f["name"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).first.value;
};
- fn["name"] += [](const scope* s, names ns)
+ f["name"] += [](const scope* s, names ns)
{
small_vector<string, 1> r;
@@ -99,14 +232,18 @@ namespace build2
make_move_iterator (r.end ())));
};
- // Note: returns NULL if extension is unspecified (default) and empty if
- // specified as no extension.
+ // $extension(<name>)
//
- fn["extension"] += [](const scope* s, name n)
+ // Return the extension of a target.
+ //
+ // Note that this function returns `null` if the extension is unspecified
+ // (default) and empty string if it's specified as no extension.
+ //
+ f["extension"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).second;
};
- fn["extension"] += [](const scope* s, names ns)
+ f["extension"] += [](const scope* s, names ns)
{
// Note: can't do multiple due to NULL semantics.
//
@@ -121,11 +258,16 @@ namespace build2
return to_target_name (s, move (n), o).second;
};
- fn["directory"] += [](const scope* s, name n)
+ // $directory(<names>)
+ //
+ // Return the directory of a target (or a list of directories for a list
+ // of targets).
+ //
+ f["directory"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).first.dir;
};
- fn["directory"] += [](const scope* s, names ns)
+ f["directory"] += [](const scope* s, names ns)
{
small_vector<dir_path, 1> r;
@@ -143,11 +285,16 @@ namespace build2
make_move_iterator (r.end ())));
};
- fn["target_type"] += [](const scope* s, name n)
+ // $target_type(<names>)
+ //
+ // Return the target type name of a target (or a list of target type names
+ // for a list of targets).
+ //
+ f["target_type"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).first.type;
};
- fn["target_type"] += [](const scope* s, names ns)
+ f["target_type"] += [](const scope* s, names ns)
{
small_vector<string, 1> r;
@@ -165,13 +312,15 @@ namespace build2
make_move_iterator (r.end ())));
};
- // Note: returns NULL if no project specified.
+ // $project(<name>)
+ //
+ // Return the project of a target or `null` if not project-qualified.
//
- fn["project"] += [](const scope* s, name n)
+ f["project"] += [](const scope* s, name n)
{
return to_target_name (s, move (n)).first.proj;
};
- fn["project"] += [](const scope* s, names ns)
+ f["project"] += [](const scope* s, names ns)
{
// Note: can't do multiple due to NULL semantics.
//
@@ -186,11 +335,50 @@ namespace build2
return to_target_name (s, move (n), o).first.proj;
};
+ // $is_a(<name>, <target-type>)
+ //
+ // Return true if the <name>'s target type is-a <target-type>. Note that
+ // this is a dynamic type check that takes into account target type
+ // inheritance.
+ //
+ f["is_a"] += [](const scope* s, name n, names t)
+ {
+ return is_a (s, move (n), name (), move (t));
+ };
+ f["is_a"] += [](const scope* s, names ns, names t)
+ {
+ auto i (ns.begin ());
+
+ name& n (*i);
+ const name& o (n.pair ? *++i : name ());
+
+ if (++i != ns.end ())
+ fail << "invalid name value: multiple names"; // Like in convert().
+
+ return is_a (s, move (n), o, move (t));
+ };
+
+ // $filter(<names>, <target-types>)
+ // $filter_out(<names>, <target-types>)
+ //
+ // Return names with target types which are-a (`filter`) or not are-a
+ // (`filter_out`) one of <target-types>. See `$is_a()` for background.
+ //
+ f["filter"] += [](const scope* s, names ns, names ts)
+ {
+ return filter (s, move (ns), move (ts), false /* out */);
+ };
+
+ f["filter_out"] += [](const scope* s, names ns, names ts)
+ {
+ return filter (s, move (ns), move (ts), true /* out */);
+ };
+
// $size(<names>)
//
// Return the number of elements in the sequence.
//
- fn["size"] += [] (names ns)
+ f["size"] += [] (names ns)
{
size_t n (0);
@@ -204,109 +392,60 @@ namespace build2
return n;
};
- // $sort(<names> [, <flags>])
+ // $sort(<names>[, <flags>])
//
// Sort names in ascending order.
//
// The following flags are supported:
//
- // dedup - in addition to sorting also remove duplicates
+ // dedup - in addition to sorting also remove duplicates
//
- fn["sort"] += [] (names ns, optional<names> fs)
+ f["sort"] += [] (names ns, optional<names> fs)
{
//@@ TODO: shouldn't we do this in a pair-aware manner?
sort (ns.begin (), ns.end ());
if (functions_sort_flags (move (fs)))
- ns.erase (unique (ns.begin(), ns.end()), ns.end ());
+ ns.erase (unique (ns.begin (), ns.end ()), ns.end ());
return ns;
};
- // Functions that can be called only on real targets.
+ // $find(<names>, <name>)
//
- function_family ft (m, "target");
-
- // Note that while this function is not technically pure, we don't mark it
- // as such since it can only be called (normally form a recipe) after the
- // target has been matched, meaning that this target is a prerequisite and
- // therefore this impurity has been accounted for.
+ // Return true if the name sequence contains the specified name.
//
- ft["path"] += [](const scope* s, names ns)
+ f["find"] += [](names vs, names v)
{
- if (s == nullptr)
- fail << "target.path() called out of scope";
-
- // Most of the time we will have a single target so optimize for that.
- //
- small_vector<path, 1> r;
-
- for (auto i (ns.begin ()); i != ns.end (); ++i)
- {
- name& n (*i), o;
- const target& t (to_target (*s, move (n), move (n.pair ? *++i : o)));
-
- if (const auto* pt = t.is_a<path_target> ())
- {
- const path& p (pt->path ());
-
- if (&p != &empty_path)
- r.push_back (p);
- else
- fail << "target " << t << " path is not assigned";
- }
- else
- fail << "target " << t << " is not path-based";
- }
-
- // We want the result to be path if we were given a single target and
- // paths if multiple (or zero). The problem is, we cannot distinguish it
- // based on the argument type (e.g., name vs names) since passing an
- // out-qualified single target requires two names.
- //
- if (r.size () == 1)
- return value (move (r[0]));
+ //@@ TODO: shouldn't we do this in a pair-aware manner?
- return value (paths (make_move_iterator (r.begin ()),
- make_move_iterator (r.end ())));
+ return find (vs.begin (), vs.end (),
+ convert<name> (move (v))) != vs.end ();
};
- // This one can only be called on a single target since we don't support
- // containers of process_path's (though we probably could).
+ // $find_index(<names>, <name>)
//
- // Note that while this function is not technically pure, we don't mark it
- // as such for the same reasons as $path() above.
+ // Return the index of the first element in the name sequence that is
+ // equal to the specified name or `$size(names)` if none is found.
//
- fn["process_path"] += [](const scope* s, names ns)
+ f["find_index"] += [](names vs, names v)
{
- if (s == nullptr)
- fail << "target.process_path() called out of scope";
-
- if (ns.empty () || ns.size () != (ns[0].pair ? 2 : 1))
- fail << "target.process_path() expects single target";
-
- name o;
- const target& t (
- to_target (*s, move (ns[0]), move (ns[0].pair ? ns[1] : o)));
-
- if (const auto* et = t.is_a<exe> ())
- {
- process_path r (et->process_path ());
-
- if (r.empty ())
- fail << "target " << t << " path is not assigned";
+ //@@ TODO: shouldn't we do this in a pair-aware manner?
- return r;
- }
- else
- fail << "target " << t << " is not process_path-based" << endf;
+ auto i (find (vs.begin (), vs.end (), convert<name> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
};
// Name-specific overloads from builtins.
//
function_family fb (m, "builtin");
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected. So for now
+ // we keep it a bit tighter.
+ //
fb[".concat"] += [](dir_path d, name n)
{
d /= n.dir;
diff --git a/libbuild2/functions-path.cxx b/libbuild2/functions-path.cxx
index 8362d2e..4b114f5 100644
--- a/libbuild2/functions-path.cxx
+++ b/libbuild2/functions-path.cxx
@@ -154,14 +154,66 @@ namespace build2
return path_match (entry, pattern, *start);
}
+ // Don't fail for absolute paths on Windows and, for example, just return
+ // c:/foo for c:\foo.
+ //
+ template <typename P>
+ static inline string
+ posix_string (P&& p)
+ {
+#ifndef _WIN32
+ return move (p).posix_string ();
+#else
+ if (p.relative ())
+ return move (p).posix_string ();
+
+ // Note: also handles root directories.
+ //
+ dir_path d (p.root_directory ());
+ return d.string () + '/' + p.leaf (d).posix_string ();
+#endif
+ }
+
+ // Similar to the above don't fail for absolute paths on Windows.
+ //
+ template <typename P>
+ static inline string
+ posix_representation (P&& p)
+ {
+#ifndef _WIN32
+ return move (p).posix_representation ();
+#else
+ if (p.relative ())
+ return move (p).posix_representation ();
+
+ // Note: also handles root directories.
+ //
+ dir_path d (p.root_directory ());
+ return d.string () + '/' + p.leaf (d).posix_representation ();
+#endif
+ }
+
void
path_functions (function_map& m)
{
function_family f (m, "path", &path_thunk);
- // string
+ // $string(<paths>)
+ //
+ // Return the traditional string representation of a path (or a list of
+ // string representations for a list of paths). In particular, for
+ // directory paths, the traditional representation does not include the
+ // trailing directory separator (except for the POSIX root directory). See
+ // `$representation()` below for the precise string representation.
+ //
+
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
//
- f["string"] += [](path p) {return move (p).string ();};
+ f["string"] += [](path* p)
+ {
+ return p != nullptr ? move (*p).string () : string ();
+ };
f["string"] += [](paths v)
{
@@ -179,7 +231,53 @@ namespace build2
return r;
};
- // representation
+ // $posix_string(<paths>)
+ // $path.posix_string(<untyped>)
+ //
+ // Return the traditional string representation of a path (or a list of
+ // string representations for a list of paths) using the POSIX directory
+ // separators (forward slashes).
+ //
+ f["posix_string"] += [](path p) {return posix_string (move (p));};
+ f["posix_string"] += [](dir_path p) {return posix_string (move (p));};
+
+ f["posix_string"] += [](paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_string (move (p)));
+ return r;
+ };
+
+ f["posix_string"] += [](dir_paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_string (move (p)));
+ return r;
+ };
+
+ f[".posix_string"] += [](names ns)
+ {
+ // For each path decide based on the presence of a trailing slash
+ // whether it is a directory. Return as untyped list of strings.
+ //
+ for (name& n: ns)
+ {
+ n = n.directory ()
+ ? posix_string (move (n.dir))
+ : posix_string (convert<path> (move (n)));
+ }
+ return ns;
+ };
+
+ // $representation(<paths>)
+ //
+ // Return the precise string representation of a path (or a list of string
+ // representations for a list of paths). In particular, for directory
+ // paths, the precise representation includes the trailing directory
+ // separator. See `$string()` above for the traditional string
+ // representation.
//
f["representation"] += [](path p) {return move (p).representation ();};
@@ -199,8 +297,61 @@ namespace build2
return r;
};
- // canonicalize
+ // $posix_representation(<paths>)
+ // $path.posix_representation(<untyped>)
+ //
+ // Return the precise string representation of a path (or a list of string
+ // representations for a list of paths) using the POSIX directory
+ // separators (forward slashes).
//
+ f["posix_representation"] += [](path p)
+ {
+ return posix_representation (move (p));
+ };
+
+ f["posix_representation"] += [](dir_path p)
+ {
+ return posix_representation (move (p));
+ };
+
+ f["posix_representation"] += [](paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_representation (move (p)));
+ return r;
+ };
+
+ f["posix_representation"] += [](dir_paths v)
+ {
+ strings r;
+ for (auto& p: v)
+ r.push_back (posix_representation (move (p)));
+ return r;
+ };
+
+ f[".posix_representation"] += [](names ns)
+ {
+ // For each path decide based on the presence of a trailing slash
+ // whether it is a directory. Return as untyped list of strings.
+ //
+ for (name& n: ns)
+ {
+ n = n.directory ()
+ ? posix_representation (move (n.dir))
+ : posix_representation (convert<path> (move (n)));
+ }
+ return ns;
+ };
+
+ // $canonicalize(<paths>)
+ // $path.canonicalize(<untyped>)
+ //
+ // Canonicalize the path (or list of paths) by converting all the
+ // directory separators to the canonical form for the host platform. Note
+ // that multiple directory separators are not collapsed.
+ //
+
// @@ TODO: add ability to specify alternative separator.
//
f["canonicalize"] += [](path p) {p.canonicalize (); return p;};
@@ -236,7 +387,13 @@ namespace build2
return ns;
};
- // normalize
+ // $normalize(<paths>)
+ // $path.normalize(<untyped>)
+ //
+ // Normalize the path (or list of paths) by collapsing the `.` and `..`
+ // components if possible, collapsing multiple directory separators, and
+ // converting all the directory separators to the canonical form for the
+ // host platform.
//
f["normalize"] += [](path p) {p.normalize (); return p;};
f["normalize"] += [](dir_path p) {p.normalize (); return p;};
@@ -271,7 +428,16 @@ namespace build2
return ns;
};
- // actualize
+ // $actualize(<paths>)
+ // $path.actualize(<untyped>)
+ //
+ // Actualize the path (or list of paths) by first normalizing it and then
+ // for host platforms with case-insensitive filesystems obtaining the
+ // actual spelling of the path.
+ //
+ // Note that only an absolute path can be actualized. If a path component
+ // does not exist, then its (and all subsequent) spelling is
+ // unchanged. This is a potentially expensive operation.
//
// Note that this function is not pure.
//
@@ -312,11 +478,12 @@ namespace build2
return ns;
};
- // $directory(<path>)
// $directory(<paths>)
+ // $path.directory(<untyped>)
//
- // Return the directory part of the path or empty path if there is no
- // directory. Directory of a root directory is an empty path.
+ // Return the directory part of a path (or a list of directory parts for a
+ // list of paths) or an empty path if there is no directory. A directory of
+ // a root directory is an empty path.
//
f["directory"] += &path::directory;
@@ -350,11 +517,12 @@ namespace build2
return ns;
};
- // $root_directory(<path>)
// $root_directory(<paths>)
+ // $path.root_directory(<untyped>)
//
- // Return the root directory of the path or empty path if the directory is
- // not absolute.
+ // Return the root directory of a path (or a list of root directories for
+ // a list of paths) or an empty path if the specified path is not
+ // absolute.
//
f["root_directory"] += &path::root_directory;
@@ -388,17 +556,22 @@ namespace build2
return ns;
};
- // $leaf(<path>)
- //
- f["leaf"] += &path::leaf;
-
- // $leaf(<path>, <dir-path>)
+ // $leaf(<paths>)
+ // $path.leaf(<untyped>)
// $leaf(<paths>, <dir-path>)
+ // $path.leaf(<untyped>, <dir-path>)
//
- // Return the path without the specified directory part. Return empty path
- // if the paths are the same. Issue diagnostics and fail if the directory
- // is not a prefix of the path. Note: expects both paths to be normalized.
+ // First form (one argument): return the last component of a path (or a
+ // list of last components for a list of paths).
//
+ // Second form (two arguments): return a path without the specified
+ // directory part (or a list of paths without the directory part for a
+ // list of paths). Return an empty path if the paths are the same. Issue
+ // diagnostics and fail if the directory is not a prefix of the
+ // path. Note: expects both paths to be normalized.
+ //
+ f["leaf"] += &path::leaf;
+
f["leaf"] += [](path p, dir_path d)
{
return leaf (p, move (d));
@@ -434,13 +607,13 @@ namespace build2
return ns;
};
- // $relative(<path>, <dir-path>)
// $relative(<paths>, <dir-path>)
+ // $path.relative(<untyped>, <dir-path>)
//
- // Return a path relative to the specified directory that is equivalent to
- // the specified path. Issue diagnostics and fail if a relative path
- // cannot be derived (for example, paths are on different drives on
- // Windows).
+ // Return the path relative to the specified directory that is equivalent
+ // to the specified path (or a list of relative paths for a list of
+ // specified paths). Issue diagnostics and fail if a relative path cannot
+ // be derived (for example, paths are on different drives on Windows).
//
f["relative"] += [](path p, dir_path d)
{
@@ -477,7 +650,11 @@ namespace build2
return ns;
};
- // base
+ // $base(<paths>)
+ // $path.base(<untyped>)
+ //
+ // Return the base part (without the extension) of a path (or a list of
+ // base parts for a list of paths).
//
f["base"] += &path::base;
@@ -511,7 +688,11 @@ namespace build2
return ns;
};
- // extension
+ // $extension(<path>)
+ // $path.extension(<untyped>)
+ //
+ // Return the extension part (without the dot) of a path or empty string
+ // if there is no extension.
//
f["extension"] += &extension;
@@ -521,32 +702,29 @@ namespace build2
};
// $size(<paths>)
- // $size(<dir_paths>)
+ // $size(<path>)
+ //
+ // First form: return the number of elements in the paths sequence.
+ //
+ // Second form: return the number of characters (bytes) in the path. Note
+ // that for `dir_path` the result does not include the trailing directory
+ // separator (except for the POSIX root directory).
//
- // Return the number of elements in the sequence.
//
f["size"] += [] (paths v) {return v.size ();};
f["size"] += [] (dir_paths v) {return v.size ();};
- // $size(<path>)
- // $size(<dir_path>)
- //
- // Return the number of characters (bytes) in the path. Note that for
- // dir_path the result does not include the trailing directory separator
- // (except for the POSIX root directory).
- //
f["size"] += [] (path v) {return v.size ();};
f["size"] += [] (dir_path v) {return v.size ();};
- // $sort(<paths> [, <flags>])
- // $sort(<dir_paths> [, <flags>])
+ // $sort(<paths>[, <flags>])
//
- // Sort paths in ascending order. Note that on case-insensitive filesystem
- // the order is case-insensitive.
+ // Sort paths in ascending order. Note that on host platforms with a
+ // case-insensitive filesystem the order is case-insensitive.
//
// The following flags are supported:
//
- // dedup - in addition to sorting also remove duplicates
+ // dedup - in addition to sorting also remove duplicates
//
f["sort"] += [](paths v, optional<names> fs)
{
@@ -568,34 +746,73 @@ namespace build2
return v;
};
- // $path.match(<val>, <pat> [, <start>])
+ // $find(<paths>, <path>)
//
- // Match a filesystem entry name against a name pattern (both are strings),
- // or a filesystem entry path against a path pattern. For the latter case
- // the start directory may also be required (see below). The semantics of
- // the pattern and name/entry arguments is determined according to the
+ // Return true if the paths sequence contains the specified path. Note
+ // that on host platforms with a case-insensitive filesystem the
+ // comparison is case-insensitive.
+ //
+ f["find"] += [](paths vs, value v)
+ {
+ return find (vs.begin (), vs.end (),
+ convert<path> (move (v))) != vs.end ();
+ };
+
+ f["find"] += [](dir_paths vs, value v)
+ {
+ return find (vs.begin (), vs.end (),
+ convert<dir_path> (move (v))) != vs.end ();
+ };
+
+ // $find_index(<paths>, <path>)
+ //
+ // Return the index of the first element in the paths sequence that is
+ // equal to the specified path or `$size(paths)` if none is found. Note
+ // that on host platforms with a case-insensitive filesystem the
+ // comparison is case-insensitive.
+ //
+ f["find_index"] += [](paths vs, value v)
+ {
+ auto i (find (vs.begin (), vs.end (), convert<path> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+
+ f["find_index"] += [](dir_paths vs, value v)
+ {
+ auto i (find (vs.begin (), vs.end (), convert<dir_path> (move (v))));
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+
+ // $path.match(<entry>, <pattern>[, <start-dir>])
+ //
+ // Match a filesystem entry name against a name pattern (both are
+ // strings), or a filesystem entry path against a path pattern. For the
+ // latter case the start directory may also be required (see below). The
+ // pattern is a shell-like wildcard pattern. The semantics of the
+ // <pattern> and <entry> arguments is determined according to the
// following rules:
//
- // - The arguments must be of the string or path types, or be untyped.
+ // 1. The arguments must be of the string or path types, or be untyped.
//
- // - If one of the arguments is typed, then the other one must be of the
- // same type or be untyped. In the later case, an untyped argument is
- // converted to the type of the other argument.
+ // 2. If one of the arguments is typed, then the other one must be of the
+ // same type or be untyped. In the later case, an untyped argument is
+ // converted to the type of the other argument.
//
- // - If both arguments are untyped and the start directory is specified,
- // then the arguments are converted to the path type.
+ // 3. If both arguments are untyped and the start directory is specified,
+ // then the arguments are converted to the path type.
//
- // - If both arguments are untyped and the start directory is not
- // specified, then, if one of the arguments is syntactically a path (the
- // value contains a directory separator), convert them to the path type,
- // otherwise to the string type (match as names).
+ // 4. If both arguments are untyped and the start directory is not
+ // specified, then, if one of the arguments is syntactically a path (the
+ // value contains a directory separator), then they are converted to the
+ // path type, otherwise -- to the string type (match as names).
//
- // If pattern and entry paths are both either absolute or relative and
- // non-empty, and the first pattern component is not a self-matching
- // wildcard (doesn't contain ***), then the start directory is not
- // required, and is ignored if specified. Otherwise, the start directory
- // must be specified and be an absolute path.
+ // If pattern and entry paths are both either absolute or relative and not
+ // empty, and the first pattern component is not a self-matching wildcard
+ // (doesn't contain `***`), then the start directory is not required, and
+ // is ignored if specified. Otherwise, the start directory must be
+ // specified and be an absolute path.
//
+
// Name matching.
//
f[".match"] += [](string name, string pattern)
@@ -655,6 +872,11 @@ namespace build2
//
function_family b (m, "builtin", &path_thunk);
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected, especially
+ // if the NULL value is on the LHS. So for now we keep it a bit tighter.
+ //
b[".concat"] += &concat_path_string;
b[".concat"] += &concat_dir_path_string;
@@ -667,5 +889,15 @@ namespace build2
{
return concat_dir_path_string (move (l), convert<string> (move (ur)));
};
+
+ b[".concat"] += [](dir_path l, dir_path r)
+ {
+ return value (move (l /= r));
+ };
+
+ b[".concat"] += [](dir_path l, path r)
+ {
+ return value (path_cast<path> (move (l)) /= r);
+ };
}
}
diff --git a/libbuild2/functions-process-path.cxx b/libbuild2/functions-process-path.cxx
index 486a806..6746623 100644
--- a/libbuild2/functions-process-path.cxx
+++ b/libbuild2/functions-process-path.cxx
@@ -11,24 +11,47 @@ namespace build2
void
process_path_functions (function_map& m)
{
- {
- function_family f (m, "process_path");
+ function_family f (m, "process_path");
+
+ // $recall(<process-path>)
+ //
+ // Return the recall path of an executable, that is, a path that is not
+ // necessarily absolute but which nevertheless can be used to re-run the
+ // executable in the current environment. This path, for example, could be
+ // used in diagnostics when printing the failing command line.
+ //
+
+ // As discussed in value_traits<process_path>, we always have recall.
+ //
+ f["recall"] += &process_path::recall;
- // As discussed in value_traits<process_path>, we always have recall.
- //
- f["recall"] += &process_path::recall;
- f["effect"] += [](process_path p)
- {
- return move (p.effect.empty () ? p.recall : p.effect);
- };
- }
+ // $effect(<process-path>)
+ //
+ // Return the effective path of an executable, that is, the absolute path
+ // to the executable that will also include any omitted extensions, etc.
+ //
+ f["effect"] += [] (process_path p)
{
- function_family f (m, "process_path_ex");
+ return move (p.effect.empty () ? p.recall : p.effect);
+ };
+
+ // $name(<process-path-ex>)
+ //
+ // Return the stable process name for diagnostics.
+ //
+ f["name"] += &process_path_ex::name;
+
+ // $checksum(<process-path-ex>)
+ //
+ // Return the executable checksum for change tracking.
+ //
+ f["checksum"] += &process_path_ex::checksum;
- f["name"] += &process_path_ex::name;
- f["checksum"] += &process_path_ex::checksum;
- f["env_checksum"] += &process_path_ex::env_checksum;
- }
+ // $env_checksum(<process-path-ex>)
+ //
+ // Return the environment checksum for change tracking.
+ //
+ f["env_checksum"] += &process_path_ex::env_checksum;
}
}
diff --git a/libbuild2/functions-process.cxx b/libbuild2/functions-process.cxx
index c4e5c24..6faa798 100644
--- a/libbuild2/functions-process.cxx
+++ b/libbuild2/functions-process.cxx
@@ -4,6 +4,8 @@
#include <libbutl/regex.hxx>
#include <libbutl/builtin.hxx>
+#include <libbuild2/scope.hxx>
+#include <libbuild2/context.hxx>
#include <libbuild2/function.hxx>
#include <libbuild2/variable.hxx>
@@ -141,6 +143,9 @@ namespace build2
builtin_callbacks cb;
fdpipe ofd (open_pipe ());
+ if (verb >= 3)
+ print_process (process_args (bn, args));
+
uint8_t rs; // Storage.
butl::builtin b (bf (rs,
args,
@@ -172,7 +177,16 @@ namespace build2
// While assuming that the builtin has issued the diagnostics on failure
// we still print the error message (see process_finish() for details).
//
- fail << bn << " builtin " << process_exit (rs) << endf;
+ diag_record dr;
+ dr << fail << "builtin " << bn << " " << process_exit (rs);
+
+ if (verb >= 1 && verb <= 2)
+ {
+ dr << info << "command line: ";
+ print_process (dr, process_args (bn, args));
+ }
+
+ dr << endf;
}
catch (const system_error& e)
{
@@ -181,18 +195,32 @@ namespace build2
}
static inline value
- run_builtin (builtin_function* bf, const strings& args, const string& bn)
+ run_builtin (const scope* s,
+ builtin_function* bf,
+ const strings& args,
+ const string& bn)
{
+ // See below.
+ //
+ if (s != nullptr && s->ctx.phase != run_phase::load)
+ fail << "process.run() called during " << s->ctx.phase << " phase";
+
return run_builtin_impl (bf, args, bn, read);
}
static inline value
- run_builtin_regex (builtin_function* bf,
+ run_builtin_regex (const scope* s,
+ builtin_function* bf,
const strings& args,
const string& bn,
const string& pat,
const optional<string>& fmt)
{
+ // See below.
+ //
+ if (s != nullptr && s->ctx.phase != run_phase::load)
+ fail << "process.run_regex() called during " << s->ctx.phase << " phase";
+
// Note that we rely on the "small function object" optimization here.
//
return run_builtin_impl (bf, args, bn,
@@ -293,6 +321,9 @@ namespace build2
[] (const string& s) {return s.c_str ();});
cargs.push_back (nullptr);
+ // Note that for now these functions can only be called during the load
+ // phase (see below) and so no diagnostics buffering is needed.
+ //
return run_start (3 /* verbosity */,
pp,
cargs,
@@ -309,15 +340,7 @@ namespace build2
void
process_finish (const scope*, const cstrings& args, process& pr)
{
- try
- {
- if (!pr.wait ())
- fail << "process " << args[0] << " " << *pr.exit;
- }
- catch (const process_error& e)
- {
- fail << "unable to execute " << args[0] << ": " << e;
- }
+ run_finish (args, pr, 2 /* verbosity */);
}
// Run a process.
@@ -352,6 +375,15 @@ namespace build2
static inline value
run_process (const scope* s, const process_path& pp, const strings& args)
{
+ // The only plausible place where these functions can be called outside
+ // the load phase are scripts and there it doesn't make much sense to use
+ // them (the same can be achieved with commands in a uniform manner). Note
+ // that if there is no scope, then this is most likely (certainly?) the
+ // load phase (for example, command line).
+ //
+ if (s != nullptr && s->ctx.phase != run_phase::load)
+ fail << "process.run() called during " << s->ctx.phase << " phase";
+
return run_process_impl (s, pp, args, read);
}
@@ -362,6 +394,11 @@ namespace build2
const string& pat,
const optional<string>& fmt)
{
+ // See above.
+ //
+ if (s != nullptr && s->ctx.phase != run_phase::load)
+ fail << "process.run_regex() called during " << s->ctx.phase << " phase";
+
// Note that we rely on the "small function object" optimization here.
//
return run_process_impl (s, pp, args,
@@ -377,7 +414,7 @@ namespace build2
if (builtin_function* bf = builtin (args))
{
pair<string, strings> ba (builtin_args (bf, move (args), "run"));
- return run_builtin (bf, ba.second, ba.first);
+ return run_builtin (s, bf, ba.second, ba.first);
}
else
{
@@ -395,7 +432,7 @@ namespace build2
if (builtin_function* bf = builtin (args))
{
pair<string, strings> ba (builtin_args (bf, move (args), "run_regex"));
- return run_builtin_regex (bf, ba.second, ba.first, pat, fmt);
+ return run_builtin_regex (s, bf, ba.second, ba.first, pat, fmt);
}
else
{
@@ -413,14 +450,15 @@ namespace build2
// $process.run(<prog>[ <args>...])
//
- // Run builtin or external program and return trimmed stdout.
+ // Run builtin or external program and return trimmed `stdout` output.
//
// Note that if the result of executing the program can be affected by
// environment variables and this result can in turn affect the build
// result, then such variables should be reported with the
- // config.environment directive.
+ // `config.environment` directive.
//
- // Note that this function is not pure.
+ // Note that this function is not pure and can only be called during the
+ // load phase.
//
f.insert (".run", false) += [](const scope* s, names args)
{
@@ -432,21 +470,23 @@ namespace build2
return run_process (s, pp, strings ());
};
- // $process.run_regex(<prog>[ <args>...], <pat> [, <fmt>])
+ // $process.run_regex(<prog>[ <args>...], <pat>[, <fmt>])
//
- // Run builtin or external program and return stdout lines matched and
- // optionally processed with regex.
+ // Run builtin or external program and return `stdout` output lines
+ // matched and optionally processed with a regular expression.
//
// Each line of stdout (including the customary trailing blank) is matched
// (as a whole) against <pat> and, if successful, returned, optionally
- // processed with <fmt>, as an element of a list.
+ // processed with <fmt>, as an element of a list. See the `$regex.*()`
+ // function family for details on regular expressions and format strings.
//
// Note that if the result of executing the program can be affected by
// environment variables and this result can in turn affect the build
// result, then such variables should be reported with the
- // config.environment directive.
+ // `config.environment` directive.
//
- // Note that this function is not pure.
+ // Note that this function is not pure and can only be called during the
+ // load phase.
//
{
auto e (f.insert (".run_regex", false));
diff --git a/libbuild2/functions-project-name.cxx b/libbuild2/functions-project-name.cxx
index 145e62c..23523f0 100644
--- a/libbuild2/functions-project-name.cxx
+++ b/libbuild2/functions-project-name.cxx
@@ -13,8 +13,28 @@ namespace build2
{
function_family f (m, "project_name");
- f["string"] += [](project_name p) {return move (p).string ();};
+ // $string(<project-name>)
+ //
+ // Return the string representation of a project name. See also the
+ // `$variable()` function below.
+ //
+
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ f["string"] += [](project_name* p)
+ {
+ return p != nullptr ? move (*p).string () : string ();
+ };
+ // $base(<project-name>[, <extension>])
+ //
+ // Return the base part (without the extension) of a project name.
+ //
+ // If <extension> is specified, then only remove that extension. Note that
+ // <extension> should not include the dot and the comparison is always
+ // case-insensitive.
+ //
f["base"] += [](project_name p, optional<string> ext)
{
return ext ? p.base (ext->c_str ()) : p.base ();
@@ -25,13 +45,30 @@ namespace build2
return p.base (convert<string> (move (ext)).c_str ());
};
+ // $extension(<project-name>)
+ //
+ // Return the extension part (without the dot) of a project name or empty
+ // string if there is no extension.
+ //
f["extension"] += &project_name::extension;
+
+ // $variable(<project-name>)
+ //
+ // Return the string representation of a project name that is sanitized to
+ // be usable as a variable name. Specifically, `.`, `-`, and `+` are
+ // replaced with `_`.
+ //
f["variable"] += &project_name::variable;
// Project name-specific overloads from builtins.
//
function_family b (m, "builtin");
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected. So for now
+ // we keep it a bit tighter.
+ //
b[".concat"] += [](project_name n, string s)
{
string r (move (n).string ());
diff --git a/libbuild2/functions-regex.cxx b/libbuild2/functions-regex.cxx
index 2f0d122..a7fcf55 100644
--- a/libbuild2/functions-regex.cxx
+++ b/libbuild2/functions-regex.cxx
@@ -21,7 +21,7 @@ namespace build2
// Optimize for the string value type.
//
if (v.type != &value_traits<string>::value_type)
- untypify (v);
+ untypify (v, true /* reduce */);
return convert<string> (move (v));
}
@@ -69,7 +69,7 @@ namespace build2
else if (s == "return_subs")
subs = true;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -92,10 +92,7 @@ namespace build2
names r;
for (size_t i (1); i != m.size (); ++i)
- {
- if (m[i].matched)
- r.emplace_back (m.str (i));
- }
+ r.emplace_back (m[i].matched ? m.str (i) : string ());
return value (move (r));
}
@@ -129,7 +126,7 @@ namespace build2
else if (s == "return_subs")
subs = true;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -161,10 +158,7 @@ namespace build2
if (subs)
{
for (size_t i (1); i != m.size (); ++i)
- {
- if (m[i].matched)
- r.emplace_back (m.str (i));
- }
+ r.emplace_back (m[i].matched ? m.str (i) : string ());
}
return value (move (r));
@@ -174,7 +168,9 @@ namespace build2
}
static pair<regex::flag_type, regex_constants::match_flag_type>
- parse_replacement_flags (optional<names>&& flags, bool first_only = true)
+ parse_replacement_flags (optional<names>&& flags,
+ bool first_only = true,
+ bool* copy_empty = nullptr)
{
regex::flag_type rf (regex::ECMAScript);
regex_constants::match_flag_type mf (regex_constants::match_default);
@@ -191,8 +187,10 @@ namespace build2
mf |= regex_constants::format_first_only;
else if (s == "format_no_copy")
mf |= regex_constants::format_no_copy;
+ else if (copy_empty != nullptr && s == "format_copy_empty")
+ *copy_empty = true;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -334,7 +332,10 @@ namespace build2
const string& fmt,
optional<names>&& flags)
{
- auto fl (parse_replacement_flags (move (flags), false));
+ bool copy_empty (false);
+ auto fl (parse_replacement_flags (move (flags),
+ false /* first_only */,
+ &copy_empty));
regex rge (parse_regex (re, fl.first));
names r;
@@ -342,10 +343,10 @@ namespace build2
try
{
regex_replace_search (to_string (move (v)), rge, fmt,
- [&r] (string::const_iterator b,
- string::const_iterator e)
+ [copy_empty, &r] (string::const_iterator b,
+ string::const_iterator e)
{
- if (b != e)
+ if (copy_empty || b != e)
r.emplace_back (string (b, e));
},
fl.second);
@@ -364,26 +365,29 @@ namespace build2
// apply() overloads (below) for details.
//
static names
- apply (names&& s,
+ apply (names&& ns,
const string& re,
const string& fmt,
optional<names>&& flags)
{
- auto fl (parse_replacement_flags (move (flags)));
+ bool copy_empty (false);
+ auto fl (parse_replacement_flags (move (flags),
+ true /* first_only */,
+ &copy_empty));
regex rge (parse_regex (re, fl.first));
names r;
try
{
- for (auto& v: s)
+ for (auto& n: ns)
{
- string s (regex_replace_search (convert<string> (move (v)),
+ string s (regex_replace_search (convert<string> (move (n)),
rge,
fmt,
fl.second).first);
- if (!s.empty ())
+ if (copy_empty || !s.empty ())
r.emplace_back (move (s));
}
}
@@ -411,7 +415,7 @@ namespace build2
if (s == "icase")
r |= regex::icase;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -422,67 +426,141 @@ namespace build2
// See find_match() overloads (below) for details.
//
static bool
- find_match (names&& s, const string& re, optional<names>&& flags)
+ find_match (names&& ns, const string& re, optional<names>&& flags)
{
regex::flag_type fl (parse_find_flags (move (flags)));
regex rge (parse_regex (re, fl));
- for (auto& v: s)
+ for (auto& n: ns)
{
- if (regex_match (convert<string> (move (v)), rge))
+ if (regex_match (convert<string> (move (n)), rge))
return true;
}
return false;
}
+ // Return a list of elements that match (matching is true) or don't match
+ // (matching is false) the regular expression. See filter_match() and
+ // filter_out_match() overloads (below) for details.
+ //
+ static names
+ filter_match (names&& ns,
+ const string& re,
+ optional<names>&& flags,
+ bool matching)
+ {
+ regex::flag_type fl (parse_find_flags (move (flags)));
+ regex rge (parse_regex (re, fl));
+
+ names r;
+
+ for (name& n: ns)
+ {
+ // Note that we need to preserve the element while converting it to
+ // string since we may add it to the resulting list. But let's optimize
+ // this for the simple value case by round-tripping it through the
+ // string.
+ //
+ bool s (n.simple ());
+ string v (convert<string> (s ? move (n) : name (n)));
+
+ if (regex_match (v, rge) == matching)
+ r.emplace_back (s ? name (move (v)) : move (n));
+ }
+
+ return r;
+ }
+
// Return true if a part of any of the list elements matches the regular
// expression. See find_search() overloads (below) for details.
//
static bool
- find_search (names&& s, const string& re, optional<names>&& flags)
+ find_search (names&& ns, const string& re, optional<names>&& flags)
{
regex::flag_type fl (parse_find_flags (move (flags)));
regex rge (parse_regex (re, fl));
- for (auto& v: s)
+ for (auto& n: ns)
{
- if (regex_search (convert<string> (move (v)), rge))
+ if (regex_search (convert<string> (move (n)), rge))
return true;
}
return false;
}
+ // Return those elements of a list which have a match (matching is true) or
+ // have no match (matching is false) between the regular expression and
+ // some/any part of the element. See filter_search() and filter_out_search()
+ // overloads (below) for details.
+ //
+ static names
+ filter_search (names&& ns,
+ const string& re,
+ optional<names>&& flags,
+ bool matching)
+ {
+ regex::flag_type fl (parse_find_flags (move (flags)));
+ regex rge (parse_regex (re, fl));
+
+ names r;
+
+ for (auto& n: ns)
+ {
+ // Note that we need to preserve the element while converting it to
+ // string since we may add it to the resulting list. But let's optimize
+ // this for the simple value case by round-tripping it through the
+ // string.
+ //
+ bool s (n.simple ());
+ string v (convert<string> (s ? move (n) : name (n)));
+
+ if (regex_search (v, rge) == matching)
+ r.emplace_back (s ? name (move (v)) : move (n));
+ }
+
+ return r;
+ }
+
// Replace matched parts of list elements using the format string and
// concatenate the transformed elements. See merge() overloads (below) for
// details.
//
static names
- merge (names&& s,
+ merge (names&& ns,
const string& re,
const string& fmt,
optional<string>&& delim,
optional<names>&& flags)
{
- auto fl (parse_replacement_flags (move (flags)));
+ bool copy_empty (false);
+ auto fl (parse_replacement_flags (move (flags),
+ true /* first_only */,
+ &copy_empty));
regex rge (parse_regex (re, fl.first));
string rs;
try
{
- for (auto& v: s)
+ bool first (true);
+ for (auto& n: ns)
{
- string s (regex_replace_search (convert<string> (move (v)),
+ string s (regex_replace_search (convert<string> (move (n)),
rge,
fmt,
fl.second).first);
- if (!s.empty ())
+ if (copy_empty || !s.empty ())
{
- if (!rs.empty () && delim)
- rs.append (*delim);
+ if (delim)
+ {
+ if (first)
+ first = false;
+ else
+ rs.append (*delim);
+ }
rs.append (s);
}
@@ -510,129 +588,201 @@ namespace build2
//
// Match a value of an arbitrary type against the regular expression.
// Convert the value to string prior to matching. Return the boolean value
- // unless return_subs flag is specified (see below), in which case return
- // names (NULL if no match).
+ // unless `return_subs` flag is specified (see below), in which case
+ // return names (or `null` if no match).
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- // return_subs - return names (rather than boolean), that contain
- // sub-strings that match the marked sub-expressions and
- // NULL if no match
+ // return_subs - return names (rather than boolean), that contain
+ // sub-strings that match the marked sub-expressions
+ // and null if no match
//
- f[".match"] += [](value s, string re, optional<names> flags)
+ f[".match"] += [](value v, string re, optional<names> flags)
{
- return match (move (s), re, move (flags));
+ return match (move (v), re, move (flags));
};
- f[".match"] += [](value s, names re, optional<names> flags)
+ f[".match"] += [](value v, names re, optional<names> flags)
{
- return match (move (s), convert<string> (move (re)), move (flags));
+ return match (move (v), convert<string> (move (re)), move (flags));
};
// $regex.find_match(<vals>, <pat> [, <flags>])
//
// Match list elements against the regular expression and return true if
- // the match is found. Convert the elements to string prior to matching.
+ // the match is found. Convert the elements to strings prior to matching.
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- f[".find_match"] += [](names s, string re, optional<names> flags)
+ f[".find_match"] += [](names ns, string re, optional<names> flags)
{
- return find_match (move (s), re, move (flags));
+ return find_match (move (ns), re, move (flags));
};
- f[".find_match"] += [](names s, names re, optional<names> flags)
+ f[".find_match"] += [](names ns, names re, optional<names> flags)
{
- return find_match (move (s), convert<string> (move (re)), move (flags));
+ return find_match (move (ns), convert<string> (move (re)), move (flags));
+ };
+
+ // $regex.filter_match(<vals>, <pat> [, <flags>])
+ // $regex.filter_out_match(<vals>, <pat> [, <flags>])
+ //
+ // Return elements of a list that match (`filter`) or do not match
+ // (`filter_out`) the regular expression. Convert the elements to strings
+ // prior to matching.
+ //
+ // The following flags are supported:
+ //
+ // icase - match ignoring case
+ //
+ f[".filter_match"] += [](names ns, string re, optional<names> flags)
+ {
+ return filter_match (move (ns), re, move (flags), true /* matching */);
+ };
+
+ f[".filter_match"] += [](names ns, names re, optional<names> flags)
+ {
+ return filter_match (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ true /* matching */);
+ };
+
+ f[".filter_out_match"] += [](names s, string re, optional<names> flags)
+ {
+ return filter_match (move (s), re, move (flags), false /* matching */);
+ };
+
+ f[".filter_out_match"] += [](names ns, names re, optional<names> flags)
+ {
+ return filter_match (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ false /* matching */);
};
// $regex.search(<val>, <pat> [, <flags>])
//
// Determine if there is a match between the regular expression and some
// part of a value of an arbitrary type. Convert the value to string prior
- // to searching. Return the boolean value unless return_match or
- // return_subs flag is specified (see below) in which case return names
- // (NULL if no match).
+ // to searching. Return the boolean value unless `return_match` or
+ // `return_subs` flag is specified (see below) in which case return names
+ // (`null` if no match).
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- // return_match - return names (rather than boolean), that contain a
- // sub-string that matches the whole regular expression and
- // NULL if no match
+ // return_match - return names (rather than boolean), that contain a
+ // sub-string that matches the whole regular expression
+ // and null if no match
//
- // return_subs - return names (rather than boolean), that contain
- // sub-strings that match the marked sub-expressions and
- // NULL if no match
+ // return_subs - return names (rather than boolean), that contain
+ // sub-strings that match the marked sub-expressions
+ // and null if no match
//
- // If both return_match and return_subs flags are specified then the
+ // If both `return_match` and `return_subs` flags are specified then the
// sub-string that matches the whole regular expression comes first.
//
- f[".search"] += [](value s, string re, optional<names> flags)
+ f[".search"] += [](value v, string re, optional<names> flags)
{
- return search (move (s), re, move (flags));
+ return search (move (v), re, move (flags));
};
- f[".search"] += [](value s, names re, optional<names> flags)
+ f[".search"] += [](value v, names re, optional<names> flags)
{
- return search (move (s), convert<string> (move (re)), move (flags));
+ return search (move (v), convert<string> (move (re)), move (flags));
};
// $regex.find_search(<vals>, <pat> [, <flags>])
//
// Determine if there is a match between the regular expression and some
- // part of any of the list elements. Convert the elements to string prior
+ // part of any of the list elements. Convert the elements to strings prior
// to matching.
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- f[".find_search"] += [](names s, string re, optional<names> flags)
+ f[".find_search"] += [](names ns, string re, optional<names> flags)
{
- return find_search (move (s), re, move (flags));
+ return find_search (move (ns), re, move (flags));
};
- f[".find_search"] += [](names s, names re, optional<names> flags)
+ f[".find_search"] += [](names ns, names re, optional<names> flags)
{
- return find_search (move (s),
+ return find_search (move (ns),
convert<string> (move (re)),
move (flags));
};
+ // $regex.filter_search(<vals>, <pat> [, <flags>])
+ // $regex.filter_out_search(<vals>, <pat> [, <flags>])
+ //
+ // Return elements of a list for which there is a match (`filter`) or no
+ // match (`filter_out`) between the regular expression and some part of
+ // the element. Convert the elements to strings prior to matching.
+ //
+ // The following flags are supported:
+ //
+ // icase - match ignoring case
+ //
+ f[".filter_search"] += [](names ns, string re, optional<names> flags)
+ {
+ return filter_search (move (ns), re, move (flags), true /* matching */);
+ };
+
+ f[".filter_search"] += [](names ns, names re, optional<names> flags)
+ {
+ return filter_search (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ true /* matching */);
+ };
+
+ f[".filter_out_search"] += [](names ns, string re, optional<names> flags)
+ {
+ return filter_search (move (ns), re, move (flags), false /* matching */);
+ };
+
+ f[".filter_out_search"] += [](names ns, names re, optional<names> flags)
+ {
+ return filter_search (move (ns),
+ convert<string> (move (re)),
+ move (flags),
+ false /* matching */);
+ };
+
// $regex.replace(<val>, <pat>, <fmt> [, <flags>])
//
// Replace matched parts in a value of an arbitrary type, using the format
// string. Convert the value to string prior to matching. The result value
// is always untyped, regardless of the argument type.
//
- // Substitution escape sequences are extended with a subset of Perl
- // sequences (see libbutl/regex.hxx for details).
- //
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
//
- // format_first_only - only replace the first match
+ // format_first_only - only replace the first match
//
- // format_no_copy - do not copy unmatched value parts into the result
+ // format_no_copy - do not copy unmatched value parts into the
+ // result
//
- // If both format_first_only and format_no_copy flags are specified then
- // the result will only contain the replacement of the first match.
+ // If both `format_first_only` and `format_no_copy` flags are specified
+ // then the result will only contain the replacement of the first match.
//
- f[".replace"] += [](value s, string re, string fmt, optional<names> flags)
+ f[".replace"] += [](value v, string re, string fmt, optional<names> flags)
{
- return replace (move (s), re, fmt, move (flags));
+ return replace (move (v), re, fmt, move (flags));
};
- f[".replace"] += [](value s, names re, names fmt, optional<names> flags)
+ f[".replace"] += [](value v, names re, names fmt, optional<names> flags)
{
- return replace (move (s),
+ return replace (move (v),
convert<string> (move (re)),
convert<string> (move (fmt)),
move (flags));
@@ -641,38 +791,38 @@ namespace build2
// $regex.replace_lines(<val>, <pat>, <fmt> [, <flags>])
//
// Convert the value to string, parse it into lines and for each line
- // apply the $regex.replace() function with the specified pattern, format,
- // and flags. If the format argument is NULL, omit the "all-NULL"
- // replacements for the matched lines from the result. Return unmatched
- // lines and line replacements as a name list unless return_lines flag is
- // specified (see below), in which case return a single multi-line simple
- // name value.
+ // apply the `$regex.replace()` function with the specified pattern,
+ // format, and flags. If the format argument is `null`, omit the
+ // "all-`null`" replacements for the matched lines from the result. Return
+ // unmatched lines and line replacements as a `name` list unless
+ // `return_lines` flag is specified (see below), in which case return a
+ // single multi-line simple `name` value.
//
- // The following flags are supported in addition to the $regex.replace()
- // function flags:
+ // The following flags are supported in addition to the `$regex.replace()`
+ // function's flags:
//
- // return_lines - return the simple name (rather than a name list)
- // containing the unmatched lines and line replacements
- // separated with newlines.
+ // return_lines - return the simple name (rather than a name list)
+ // containing the unmatched lines and line replacements
+ // separated with newlines.
//
- // Note that if format_no_copy is specified, unmatched lines are not
+ // Note that if `format_no_copy` is specified, unmatched lines are not
// copied either.
//
- f[".replace_lines"] += [](value s,
- string re,
- string fmt,
- optional<names> flags)
+ f[".replace_lines"] += [](value v,
+ string re,
+ string fmt,
+ optional<names> flags)
{
- return replace_lines (move (s), re, move (fmt), move (flags));
+ return replace_lines (move (v), re, move (fmt), move (flags));
};
- f[".replace_lines"] += [](value s,
- names re,
- names* fmt,
- optional<names> flags)
+ f[".replace_lines"] += [](value v,
+ names re,
+ names* fmt,
+ optional<names> flags)
{
return replace_lines (
- move (s),
+ move (v),
convert<string> (move (re)),
(fmt != nullptr
? optional<string> (convert<string> (move (*fmt)))
@@ -683,26 +833,27 @@ namespace build2
// $regex.split(<val>, <pat>, <fmt> [, <flags>])
//
// Split a value of an arbitrary type into a list of unmatched value parts
- // and replacements of the matched parts, omitting empty ones. Convert the
- // value to string prior to matching.
- //
- // Substitution escape sequences are extended with a subset of Perl
- // sequences (see libbutl/regex.hxx for details).
+ // and replacements of the matched parts, omitting empty ones (unless the
+ // `format_copy_empty` flag is specified). Convert the value to string
+ // prior to matching.
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
+ //
+ // format_no_copy - do not copy unmatched value parts into the
+ // result
//
- // format_no_copy - do not copy unmatched value parts into the result
+ // format_copy_empty - copy empty elements into the result
//
- f[".split"] += [](value s, string re, string fmt, optional<names> flags)
+ f[".split"] += [](value v, string re, string fmt, optional<names> flags)
{
- return split (move (s), re, fmt, move (flags));
+ return split (move (v), re, fmt, move (flags));
};
- f[".split"] += [](value s, names re, names fmt, optional<names> flags)
+ f[".split"] += [](value v, names re, names fmt, optional<names> flags)
{
- return split (move (s),
+ return split (move (v),
convert<string> (move (re)),
convert<string> (move (fmt)),
move (flags));
@@ -711,45 +862,52 @@ namespace build2
// $regex.merge(<vals>, <pat>, <fmt> [, <delim> [, <flags>]])
//
// Replace matched parts in a list of elements using the regex format
- // string. Convert the elements to string prior to matching. The result
+ // string. Convert the elements to strings prior to matching. The result
// value is untyped and contains concatenation of transformed non-empty
- // elements optionally separated with a delimiter.
- //
- // Substitution escape sequences are extended with a subset of Perl
- // sequences (see libbutl/regex.hxx for details).
+ // elements (unless the `format_copy_empty` flag is specified) optionally
+ // separated with a delimiter.
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
+ //
+ // format_first_only - only replace the first match
//
- // format_first_only - only replace the first match
+ // format_no_copy - do not copy unmatched value parts into the
+ // result
//
- // format_no_copy - do not copy unmatched value parts into the result
+ // format_copy_empty - copy empty elements into the result
//
- // If both format_first_only and format_no_copy flags are specified then
- // the result will be a concatenation of only the first match
+ // If both `format_first_only` and `format_no_copy` flags are specified
+ // then the result will be a concatenation of only the first match
// replacements.
//
- f[".merge"] += [](names s,
- string re,
- string fmt,
- optional<string> delim,
- optional<names> flags)
- {
- return merge (move (s), re, fmt, move (delim), move (flags));
+ f[".merge"] += [](names ns,
+ string re,
+ string fmt,
+ optional<string*> delim,
+ optional<names> flags)
+ {
+ return merge (move (ns),
+ re,
+ fmt,
+ delim && *delim != nullptr
+ ? move (**delim)
+ : optional<string> (),
+ move (flags));
};
- f[".merge"] += [](names s,
- names re,
- names fmt,
- optional<names> delim,
- optional<names> flags)
+ f[".merge"] += [](names ns,
+ names re,
+ names fmt,
+ optional<names*> delim,
+ optional<names> flags)
{
- return merge (move (s),
+ return merge (move (ns),
convert<string> (move (re)),
convert<string> (move (fmt)),
- delim
- ? convert<string> (move (*delim))
+ delim && *delim != nullptr
+ ? convert<string> (move (**delim))
: optional<string> (),
move (flags));
};
@@ -757,32 +915,33 @@ namespace build2
// $regex.apply(<vals>, <pat>, <fmt> [, <flags>])
//
// Replace matched parts of each element in a list using the regex format
- // string. Convert the elements to string prior to matching. Return a list
- // of transformed elements, omitting the empty ones.
- //
- // Substitution escape sequences are extended with a subset of Perl
- // sequences (see libbutl/regex.hxx for details).
+ // string. Convert the elements to strings prior to matching. Return a
+ // list of transformed elements, omitting the empty ones (unless the
+ // `format_copy_empty` flag is specified).
//
// The following flags are supported:
//
- // icase - match ignoring case
+ // icase - match ignoring case
+ //
+ // format_first_only - only replace the first match
//
- // format_first_only - only replace the first match
+ // format_no_copy - do not copy unmatched value parts into the
+ // result
//
- // format_no_copy - do not copy unmatched value parts into the result
+ // format_copy_empty - copy empty elements into the result
//
- // If both format_first_only and format_no_copy flags are specified then
- // the result elements will only contain the replacement of the first
+ // If both `format_first_only` and `format_no_copy` flags are specified
+ // then the result elements will only contain the replacement of the first
// match.
//
- f[".apply"] += [](names s, string re, string fmt, optional<names> flags)
+ f[".apply"] += [](names ns, string re, string fmt, optional<names> flags)
{
- return apply (move (s), re, fmt, move (flags));
+ return apply (move (ns), re, fmt, move (flags));
};
- f[".apply"] += [](names s, names re, names fmt, optional<names> flags)
+ f[".apply"] += [](names ns, names re, names fmt, optional<names> flags)
{
- return apply (move (s),
+ return apply (move (ns),
convert<string> (move (re)),
convert<string> (move (fmt)),
move (flags));
diff --git a/libbuild2/functions-string.cxx b/libbuild2/functions-string.cxx
index 4a03a5e..367923f 100644
--- a/libbuild2/functions-string.cxx
+++ b/libbuild2/functions-string.cxx
@@ -8,18 +8,51 @@ using namespace std;
namespace build2
{
+ static size_t
+ find_index (const strings& vs, value&& v, optional<names>&& fs)
+ {
+ bool ic (false);
+ if (fs)
+ {
+ for (name& f: *fs)
+ {
+ string s (convert<string> (move (f)));
+
+ if (s == "icase")
+ ic = true;
+ else
+ throw invalid_argument ("invalid flag '" + s + '\'');
+ }
+ }
+
+ auto i (find_if (vs.begin (), vs.end (),
+ [ic, y = convert<string> (move (v))] (const string& x)
+ {
+ return (ic ? icasecmp (x, y) : x.compare (y)) == 0;
+ }));
+
+ return i != vs.end () ? i - vs.begin () : vs.size ();
+ };
+
void
string_functions (function_map& m)
{
function_family f (m, "string");
- f["string"] += [](string s) {return s;};
-
- // @@ Shouldn't it concatenate elements into the single string?
- // @@ Doesn't seem to be used so far. Can consider removing.
+ // Note: leave undocumented since there is no good reason for the user to
+ // call this function (which would be converting string to string).
//
- // f["string"] += [](strings v) {return v;};
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ f["string"] += [](string* s)
+ {
+ return s != nullptr ? move (*s) : string ();
+ };
+ // $string.icasecmp(<untyped>, <untyped>)
+ // $icasecmp(<string>, <string>)
+ //
// Compare ASCII strings ignoring case and returning the boolean value.
//
f["icasecmp"] += [](string x, string y)
@@ -43,7 +76,10 @@ namespace build2
convert<string> (move (y))) == 0;
};
- // Trim.
+ // $string.trim(<untyped>)
+ // $trim(<string>)
+ //
+ // Trim leading and trailing whitespaces in a string.
//
f["trim"] += [](string s)
{
@@ -55,7 +91,12 @@ namespace build2
return names {name (trim (convert<string> (move (s))))};
};
- // Convert ASCII strings into lower/upper case.
+ // $string.lcase(<untyped>)
+ // $string.ucase(<untyped>)
+ // $lcase(<string>)
+ // $ucase(<string>)
+ //
+ // Convert ASCII string into lower/upper case.
//
f["lcase"] += [](string s)
{
@@ -78,16 +119,18 @@ namespace build2
};
// $size(<strings>)
- //
- // Return the number of elements in the sequence.
- //
- f["size"] += [] (strings v) {return v.size ();};
-
+ // $size(<string-set>)
+ // $size(<string-map>)
// $size(<string>)
//
- // Return the number of characters (bytes) in the string.
+ // First three forms: return the number of elements in the sequence.
//
- f["size"] += [] (string v) {return v.size ();};
+ // Fourth form: return the number of characters (bytes) in the string.
+ //
+ f["size"] += [] (strings v) {return v.size ();};
+ f["size"] += [] (set<string> v) {return v.size ();};
+ f["size"] += [] (map<string, string> v) {return v.size ();};
+ f["size"] += [] (string v) {return v.size ();};
// $sort(<strings> [, <flags>])
//
@@ -95,9 +138,9 @@ namespace build2
//
// The following flags are supported:
//
- // icase - sort ignoring case
+ // icase - sort ignoring case
//
- // dedup - in addition to sorting also remove duplicates
+ // dedup - in addition to sorting also remove duplicates
//
f["sort"] += [](strings v, optional<names> fs)
{
@@ -114,7 +157,7 @@ namespace build2
else if (s == "dedup")
dd = true;
else
- throw invalid_argument ("invalid flag '" + s + "'");
+ throw invalid_argument ("invalid flag '" + s + '\'');
}
}
@@ -135,23 +178,75 @@ namespace build2
return v;
};
+ // $find(<strings>, <string>[, <flags>])
+ //
+ // Return true if the string sequence contains the specified string.
+ //
+ // The following flags are supported:
+ //
+ // icase - compare ignoring case
+ //
+ // See also `$regex.find_match()` and `$regex.find_search()`.
+ //
+ f["find"] += [](strings vs, value v, optional<names> fs)
+ {
+ return find_index (vs, move (v), move (fs)) != vs.size ();
+ };
+
+ // $find_index(<strings>, <string>[, <flags>])
+ //
+ // Return the index of the first element in the string sequence that
+ // is equal to the specified string or `$size(strings)` if none is
+ // found.
+ //
+ // The following flags are supported:
+ //
+ // icase - compare ignoring case
+ //
+ f["find_index"] += [](strings vs, value v, optional<names> fs)
+ {
+ return find_index (vs, move (v), move (fs));
+ };
+
+ // $keys(<string-map>)
+ //
+ // Return the list of keys in a string map.
+ //
+ // Note that the result is sorted in ascending order.
+ //
+ f["keys"] += [](map<string, string> v)
+ {
+ strings r;
+ r.reserve (v.size ());
+ for (pair<const string, string>& p: v)
+ r.push_back (p.first); // @@ PERF: use C++17 map::extract() to steal.
+ return r;
+ };
+
// String-specific overloads from builtins.
//
function_family b (m, "builtin");
- b[".concat"] += [](string l, string r) {l += r; return l;};
+ // Note that we must handle NULL values (relied upon by the parser to
+ // provide concatenation semantics consistent with untyped values).
+ //
+ b[".concat"] += [](string* l, string* r)
+ {
+ return l != nullptr
+ ? r != nullptr ? move (*l += *r) : move (*l)
+ : r != nullptr ? move (*r) : string ();
+ };
- b[".concat"] += [](string l, names ur)
+ b[".concat"] += [](string* l, names* ur)
{
- l += convert<string> (move (ur));
- return l;
+ string r (ur != nullptr ? convert<string> (move (*ur)) : string ());
+ return l != nullptr ? move (*l += r) : move (r);
};
- b[".concat"] += [](names ul, string r)
+ b[".concat"] += [](names* ul, string* r)
{
- string l (convert<string> (move (ul)));
- l += r;
- return l;
+ string l (ul != nullptr ? convert<string> (move (*ul)) : string ());
+ return r != nullptr ? move (l += *r) : move (l);
};
}
}
diff --git a/libbuild2/functions-target-triplet.cxx b/libbuild2/functions-target-triplet.cxx
index 4b0ec02..6e12c97 100644
--- a/libbuild2/functions-target-triplet.cxx
+++ b/libbuild2/functions-target-triplet.cxx
@@ -13,13 +13,39 @@ namespace build2
{
function_family f (m, "target_triplet");
- f["string"] += [](target_triplet t) {return t.string ();};
- f["representation"] += [](target_triplet t) {return t.representation ();};
+ // $string(<target-triplet>)
+ //
+ // Return the canonical (that is, without the `unknown` vendor component)
+ // target triplet string.
+ //
+
+ // Note that we must handle NULL values (relied upon by the parser
+ // to provide conversion semantics consistent with untyped values).
+ //
+ f["string"] += [](target_triplet* t)
+ {
+ return t != nullptr ? t->string () : string ();
+ };
+
+ // $representation(<target-triplet>)
+ //
+ // Return the complete target triplet string that always contains the
+ // vendor component.
+ //
+ f["representation"] += [](target_triplet t)
+ {
+ return t.representation ();
+ };
// Target triplet-specific overloads from builtins.
//
function_family b (m, "builtin");
+ // Note that while we should normally handle NULL values (relied upon by
+ // the parser to provide concatenation semantics consistent with untyped
+ // values), the result will unlikely be what the user expected. So for now
+ // we keep it a bit tighter.
+ //
b[".concat"] += [](target_triplet l, string sr) {return l.string () + sr;};
b[".concat"] += [](string sl, target_triplet r) {return sl + r.string ();};
diff --git a/libbuild2/functions-target.cxx b/libbuild2/functions-target.cxx
new file mode 100644
index 0000000..d564aa2
--- /dev/null
+++ b/libbuild2/functions-target.cxx
@@ -0,0 +1,108 @@
+// file : libbuild2/functions-target.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/functions-name.hxx> // to_target()
+
+#include <libbuild2/scope.hxx>
+#include <libbuild2/target.hxx>
+#include <libbuild2/function.hxx>
+#include <libbuild2/variable.hxx>
+
+using namespace std;
+
+namespace build2
+{
+ void
+ target_functions (function_map& m)
+ {
+ // Functions that can be called only on real targets.
+ //
+ function_family f (m, "target");
+
+ // $path(<names>)
+ //
+ // Return the path of a target (or a list of paths for a list of
+ // targets). The path must be assigned, which normally happens during
+ // match. As a result, this function is normally called form a recipe.
+ //
+ // Note that while this function is technically not pure, we don't mark it
+ // as such since it can only be called (normally form a recipe) after the
+ // target has been matched, meaning that this target is a prerequisite and
+ // therefore this impurity has been accounted for.
+ //
+ f["path"] += [](const scope* s, names ns)
+ {
+ if (s == nullptr)
+ fail << "target.path() called out of scope";
+
+ // Most of the time we will have a single target so optimize for that.
+ //
+ small_vector<path, 1> r;
+
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ name& n (*i), o;
+ const target& t (to_target (*s, move (n), move (n.pair ? *++i : o)));
+
+ if (const auto* pt = t.is_a<path_target> ())
+ {
+ const path& p (pt->path ());
+
+ if (&p != &empty_path)
+ r.push_back (p);
+ else
+ fail << "target " << t << " path is not assigned";
+ }
+ else
+ fail << "target " << t << " is not path-based";
+ }
+
+ // We want the result to be path if we were given a single target and
+ // paths if multiple (or zero). The problem is, we cannot distinguish it
+ // based on the argument type (e.g., name vs names) since passing an
+ // out-qualified single target requires two names.
+ //
+ if (r.size () == 1)
+ return value (move (r[0]));
+
+ return value (paths (make_move_iterator (r.begin ()),
+ make_move_iterator (r.end ())));
+ };
+
+ // $process_path(<name>)
+ //
+ // Return the process path of an executable target.
+ //
+ // Note that while this function is not technically pure, we don't mark it
+ // as such for the same reasons as for `$path()` above.
+ //
+
+ // This one can only be called on a single target since we don't support
+ // containers of process_path's (though we probably could).
+ //
+ f["process_path"] += [](const scope* s, names ns)
+ {
+ if (s == nullptr)
+ fail << "target.process_path() called out of scope";
+
+ if (ns.empty () || ns.size () != (ns[0].pair ? 2 : 1))
+ fail << "target.process_path() expects single target";
+
+ name o;
+ const target& t (
+ to_target (*s, move (ns[0]), move (ns[0].pair ? ns[1] : o)));
+
+ if (const auto* et = t.is_a<exe> ())
+ {
+ process_path r (et->process_path ());
+
+ if (r.empty ())
+ fail << "target " << t << " path is not assigned";
+
+ return r;
+ }
+ else
+ fail << "target " << t << " is not executable-based" << endf;
+ };
+ }
+}
diff --git a/libbuild2/in/init.cxx b/libbuild2/in/init.cxx
index 18071f8..2fb73e1 100644
--- a/libbuild2/in/init.cxx
+++ b/libbuild2/in/init.cxx
@@ -34,7 +34,10 @@ namespace build2
// Enter variables.
//
{
- auto& vp (rs.var_pool ());
+ // All the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// Alternative variable substitution symbol with '$' being the
// default.
@@ -58,7 +61,27 @@ namespace build2
// is still stricter than the autoconf's semantics which also leaves
// unknown substitutions as is.
//
- vp.insert<string> ("in.substitution");
+ const variable& im (vp.insert<string> ("in.mode"));
+
+ // Original name of this variable for backwards compatibility.
+ //
+ vp.insert_alias (im, "in.substitution");
+
+ // Substitution map. Substitutions can be specified as key-value pairs
+ // rather than buildfile variables. This map is checked before the
+ // variables. An absent value in key-value has the NULL semantics.
+ //
+ // This mechanism has two primary uses: Firstly, it allows us to have
+ // substitution names that cannot be specified as buildfile variables.
+ // For example, a name may start with an underscore and thus be
+ // reserved or it may refer to one of the predefined variables such a
+ // `include` or `extension` that may have a wrong visibility and/or
+ // type.
+ //
+ // Secondly, this mechanism allows us to encapsulate a group of
+ // substitutions and pass this group around as a single value.
+ //
+ vp.insert<map<string, optional<string>>> ("in.substitutions");
// Fallback value to use for NULL value substitutions. If unspecified,
// NULL substitutions are an error.
diff --git a/libbuild2/in/rule.cxx b/libbuild2/in/rule.cxx
index 5a6db30..31a9d94 100644
--- a/libbuild2/in/rule.cxx
+++ b/libbuild2/in/rule.cxx
@@ -47,6 +47,13 @@ namespace build2
if (!fi)
l5 ([&]{trace << "no in file prerequisite for target " << t;});
+ // If we match, derive the file name here instead of in apply() to make
+ // it available early for the in{} prerequisite search (see
+ // install::file_rule::apply_impl() for background).
+ //
+ if (fi)
+ t.derive_path ();
+
return fi;
}
@@ -55,9 +62,9 @@ namespace build2
{
file& t (xt.as<file> ());
- // Derive the file name.
+ // Make sure derived rules assign the path in match().
//
- t.derive_path ();
+ assert (!t.path ().empty ());
// Inject dependency on the output directory.
//
@@ -108,7 +115,7 @@ namespace build2
// Substitution mode.
//
bool strict (strict_);
- if (const string* s = cast_null<string> (t["in.substitution"]))
+ if (const string* s = cast_null<string> (t["in.mode"]))
{
if (*s == "lax")
strict = false;
@@ -116,6 +123,11 @@ namespace build2
fail << "invalid substitution mode '" << *s << "'";
}
+ // Substitution map.
+ //
+ const substitution_map* smap (
+ cast_null<map<string, optional<string>>> (t["in.substitutions"]));
+
// NULL substitutions.
//
optional<string> null;
@@ -251,7 +263,7 @@ namespace build2
substitute (location (ip, ln),
a, t,
name, flags,
- strict, null));
+ strict, smap, null));
assert (v); // Rule semantics change without version increment?
@@ -294,7 +306,35 @@ namespace build2
if (verb >= 2)
text << program_ << ' ' << ip << " >" << tp;
else if (verb)
- text << program_ << ' ' << ip;
+ {
+ // If we straight print the target, in most cases we will end up with
+ // something ugly like in{version...h.in} (due to the in{} target
+ // type search semantics). There is the `...h` part but also the
+ // `.in` part that is redundant given in{}. So let's tidy this up
+ // a bit if the extension could have been derived by in_search().
+ //
+ target_key ik (i.key ());
+
+ if (ik.ext)
+ {
+ string& ie (*ik.ext);
+ const string* te (t.ext ());
+
+ size_t in (ie.size ());
+ size_t tn (te != nullptr ? te->size () : 0);
+
+ if (in == tn + (tn != 0 ? 1 : 0) + 2) // [<te>.]in
+ {
+ if (ie.compare (in - 2, 2, "in") == 0)
+ {
+ if (tn == 0 || (ie.compare (0, tn, *te) == 0 && ie[tn] == '.'))
+ ie.clear ();
+ }
+ }
+ }
+
+ print_diag (program_.c_str (), move (ik), t);
+ }
// Read and process the file, one line at a time, while updating depdb.
//
@@ -379,7 +419,7 @@ namespace build2
a, t,
dd, dd_skip,
s, 0,
- nl, sym, strict, null);
+ nl, sym, strict, smap, null);
ofs << s;
}
@@ -445,6 +485,7 @@ namespace build2
const char* nl,
char sym,
bool strict,
+ const substitution_map* smap,
const optional<string>& null) const
{
// Scan the line looking for substiutions in the $<name>$ form. In the
@@ -500,8 +541,7 @@ namespace build2
dd, dd_skip,
string (s, b + 1, e - b -1),
nullopt /* flags */,
- strict,
- null))
+ strict, smap, null))
{
replace_newlines (*val, nl);
@@ -522,9 +562,10 @@ namespace build2
const string& n,
optional<uint64_t> flags,
bool strict,
+ const substitution_map* smap,
const optional<string>& null) const
{
- optional<string> val (substitute (l, a, t, n, flags, strict, null));
+ optional<string> val (substitute (l, a, t, n, flags, strict, smap, null));
if (val)
{
@@ -561,6 +602,7 @@ namespace build2
const string& n,
optional<uint64_t> flags,
bool strict,
+ const substitution_map* smap,
const optional<string>& null) const
{
// In the lax mode scan the fragment to make sure it is a variable name
@@ -585,7 +627,7 @@ namespace build2
}
}
- return lookup (l, a, t, n, flags, null);
+ return lookup (l, a, t, n, flags, smap, null);
}
string rule::
@@ -593,10 +635,32 @@ namespace build2
action, const target& t,
const string& n,
optional<uint64_t> flags,
+ const substitution_map* smap,
const optional<string>& null) const
{
assert (!flags);
+ // First look in the substitution map.
+ //
+ if (smap != nullptr)
+ {
+ auto i (smap->find (n));
+
+ if (i != smap->end ())
+ {
+ if (i->second)
+ return *i->second;
+
+ if (null)
+ return *null;
+
+ fail (loc) << "null value in substitution map entry '" << n << "'" <<
+ info << "use in.null to specify null value substiution string";
+ }
+ }
+
+ // Next look for the buildfile variable.
+ //
auto l (t[n]);
if (l.defined ())
@@ -607,9 +671,9 @@ namespace build2
{
if (null)
return *null;
- else
- fail (loc) << "null value in variable '" << n << "'" <<
- info << "use in.null to specify null value substiution string";
+
+ fail (loc) << "null value in variable '" << n << "'" <<
+ info << "use in.null to specify null value substiution string";
}
// For typed values call string() for conversion.
diff --git a/libbuild2/in/rule.hxx b/libbuild2/in/rule.hxx
index bd0d15a..67c2509 100644
--- a/libbuild2/in/rule.hxx
+++ b/libbuild2/in/rule.hxx
@@ -22,6 +22,11 @@ namespace build2
// cache data (e.g., in match() or apply()) to be used in substitute() and
// lookup() calls.
//
+ // A derived rule is also required to derive the target file name in
+ // match() instead of apply() to make it available early for the in{}
+ // prerequisite search (see install::file_rule::apply_impl() for
+ // background).
+ //
// Note also that currently this rule ignores the dry-run mode (see
// perform_update() for the rationale).
//
@@ -54,6 +59,7 @@ namespace build2
// Customization hooks and helpers.
//
+ using substitution_map = map<string, optional<string>>;
// Perform prerequisite search.
//
@@ -89,6 +95,7 @@ namespace build2
action, const target&,
const string& name,
optional<uint64_t> flags,
+ const substitution_map*,
const optional<string>& null) const;
// Perform variable substitution. Return nullopt if it should be
@@ -100,6 +107,7 @@ namespace build2
const string& name,
optional<uint64_t> flags,
bool strict,
+ const substitution_map*,
const optional<string>& null) const;
// Call the above version and do any necessary depdb saving.
@@ -111,6 +119,7 @@ namespace build2
const string& name,
optional<uint64_t> flags,
bool strict,
+ const substitution_map*,
const optional<string>& null) const;
// Process a line of input from the specified position performing any
@@ -124,6 +133,7 @@ namespace build2
const char* newline,
char sym,
bool strict,
+ const substitution_map*,
const optional<string>& null) const;
// Replace newlines in a multi-line value with the given newline
diff --git a/libbuild2/in/target.cxx b/libbuild2/in/target.cxx
index d548453..d664e3a 100644
--- a/libbuild2/in/target.cxx
+++ b/libbuild2/in/target.cxx
@@ -10,7 +10,7 @@ namespace build2
namespace in
{
static const target*
- in_search (const target& xt, const prerequisite_key& cpk)
+ in_search (context& ctx, const target* xt, const prerequisite_key& cpk)
{
// If we have no extension then derive it from our target. Then delegate
// to file_search().
@@ -18,18 +18,26 @@ namespace build2
prerequisite_key pk (cpk);
optional<string>& e (pk.tk.ext);
- if (!e)
+ if (!e && xt != nullptr)
{
- if (const file* t = xt.is_a<file> ())
+ // Why is the extension, say, .h.in and not .in (with .h being in the
+ // name)? While this is mostly academic (in this case things will work
+ // the same either way), conceptually, it is a header template rather
+ // than some file template. In other words, we are adding the second
+ // level classification.
+ //
+ // See also the low verbosity tidying up code in the rule.
+ //
+ if (const file* t = xt->is_a<file> ())
{
const string& te (t->derive_extension ());
e = te + (te.empty () ? "" : ".") + "in";
}
else
- fail << "prerequisite " << pk << " for a non-file target " << xt;
+ fail << "prerequisite " << pk << " for a non-file target " << *xt;
}
- return file_search (xt, pk);
+ return file_search (ctx, xt, pk);
}
static bool
@@ -51,7 +59,7 @@ namespace build2
&target_extension_none,
nullptr, /* default_extension */ // Taken care of by search.
&in_pattern,
- &target_print_1_ext_verb, // Same as file.
+ &target_print_1_ext_verb, // Same as file (but see rule).
&in_search,
target_type::flag::none
};
diff --git a/libbuild2/install/functions.cxx b/libbuild2/install/functions.cxx
index 5668efe..1de4d3e 100644
--- a/libbuild2/install/functions.cxx
+++ b/libbuild2/install/functions.cxx
@@ -15,17 +15,125 @@ namespace build2
{
function_family f (m, "install");
- // Resolve potentially relative install.* value to an absolute directory
- // based on (other) install.* values visible from the calling scope.
+ // $install.resolve(<dir>[, <rel_base>])
+ //
+ // @@ TODO: add overload to call resolve_file().
+ //
+ // Resolve potentially relative install.* value to an absolute and
+ // normalized directory based on (other) install.* values visible from
+ // the calling scope.
+ //
+ // If rel_base is specified and is not empty, then make the resulting
+ // directory relative to it. If rel_base itself is relative, first
+ // resolve it to an absolute and normalized directory based on install.*
+ // values. Note that this argument is mandatory if this function is
+ // called during relocatable installation (install.relocatable is true).
+ // While you can pass empty directory to suppress this functionality,
+ // make sure this does not render the result non-relocatable.
+ //
+ // As an example, consider an executable that supports loading plugins
+ // and requires the plugin installation directory to be embedded into
+ // the executable during the build. The common way to support
+ // relocatable installations for such cases is to embed a path relative
+ // to the executable and complete it at runtime. If you would like to
+ // always use the relative path, regardless of whether the installation
+ // is relocatable of not, then you can simply always pass rel_base, for
+ // example:
+ //
+ // plugin_dir = $install.resolve($install.lib, $install.bin)
+ //
+ // Alternatively, if you would like to continue using absolute paths for
+ // non-relocatable installations, then you can use something like this:
+ //
+ // plugin_dir = $install.resolve($install.lib, ($install.relocatable ? $install.bin : [dir_path] ))
+ //
+ // Finally, if you are unable to support relocatable installations, the
+ // correct way to handle this is NOT to always pass an empty path for
+ // rel_base but rather assert in root.build that your project does not
+ // support relocatable installations, for example:
+ //
+ // assert (!$install.relocatable) 'relocatable installation not supported'
//
// Note that this function is not pure.
//
- f.insert (".resolve", false) += [] (const scope* s, dir_path d)
+ f.insert (".resolve", false) += [] (const scope* s,
+ dir_path dir,
+ optional<dir_path> rel_base)
{
if (s == nullptr)
fail << "install.resolve() called out of scope" << endf;
- return resolve_dir (*s, move (d));
+ if (!rel_base)
+ {
+ const scope& rs (*s->root_scope ());
+
+ if (cast_false<bool> (rs["install.relocatable"]))
+ {
+ fail << "relocatable installation requires relative base "
+ << "directory" <<
+ info << "pass empty relative base directory if this call does "
+ << "not affect installation relocatability" <<
+ info << "or add `assert (!$install.relocatable) 'relocatable "
+ << "installation not supported'` before the call";
+ }
+ }
+
+ return resolve_dir (*s,
+ move (dir),
+ rel_base ? move (*rel_base) : dir_path ());
+ };
+
+ // @@ TODO: add $install.chroot().
+
+ // $install.filter(<path>[, <type>])
+ //
+ // Apply filters from config.install.filter and return true if the
+ // specified filesystem entry should be installed/uninstalled. Note that
+ // the entry is specified as an absolute and normalized installation
+ // path (so not $path($>) but $install.resolve($>)).
+ //
+ // The type argument can be one of `regular`, `directory`, or `symlink`.
+ // If unspecified, either `directory` or `regular` is assumed, based on
+ // whether path is syntactially a directory (ends with a directory
+ // separator).
+ //
+ // Note that this function is not pure.
+ //
+ f.insert (".filter", false) += [] (const scope* s,
+ path p,
+ optional<names> ot)
+ {
+ if (s == nullptr)
+ fail << "install.filter() called out of scope" << endf;
+
+ entry_type t;
+ if (ot)
+ {
+ string v (convert<string> (move (*ot)));
+
+ if (v == "regular") t = entry_type::regular;
+ else if (v == "directory") t = entry_type::directory;
+ else if (v == "symlink") t = entry_type::symlink;
+ else throw invalid_argument ("unknown type '" + v + '\'');
+ }
+ else
+ t = p.to_directory () ? entry_type::directory : entry_type::regular;
+
+ // Split into directory and leaf.
+ //
+ dir_path d;
+ if (t == entry_type::directory)
+ {
+ d = path_cast<dir_path> (move (p));
+ p = path (); // No leaf.
+ }
+ else
+ {
+ d = p.directory ();
+ p.make_leaf ();
+ }
+
+ return filter_entry (*s->root_scope (), d, p, t);
};
}
}
diff --git a/libbuild2/install/init.cxx b/libbuild2/install/init.cxx
index ef9de05..3df912f 100644
--- a/libbuild2/install/init.cxx
+++ b/libbuild2/install/init.cxx
@@ -166,6 +166,8 @@ namespace build2
bool global (*name == '\0');
+ auto& vp (rs.var_pool (true /* default */)); // All qualified.
+
if (spec)
{
vn = "config.install";
@@ -175,7 +177,7 @@ namespace build2
vn += name;
}
vn += var;
- const variable& vr (rs.var_pool ().insert<CT> (move (vn)));
+ const variable& vr (vp.insert<CT> (move (vn)));
using config::lookup_config;
@@ -192,7 +194,7 @@ namespace build2
vn = "install.";
vn += name;
vn += var;
- const variable& vr (rs.var_pool ().insert<T> (move (vn)));
+ const variable& vr (vp.insert<T> (move (vn)));
value& v (rs.assign (vr));
@@ -236,7 +238,7 @@ namespace build2
// This one doesn't have config.* value (only set in a buildfile).
//
if (!global)
- rs.var_pool ().insert<bool> (string ("install.") + n + ".subdirs");
+ rs.var_pool (true).insert<bool> (string ("install.") + n + ".subdirs");
}
void
@@ -250,6 +252,20 @@ namespace build2
context& ctx (rs.ctx);
+ // Enter module variables (note that init() below enters some more).
+ //
+ // The install variable is a path, not dir_path, since it can be used
+ // to both specify the target directory (to install with the same file
+ // name) or target file (to install with a different name). And the
+ // way we distinguish between the two is via the presence/absence of
+ // the trailing directory separator.
+ //
+ // Plus it can have the special true/false values when acting as an
+ // operation variable.
+ //
+ auto& ovar (rs.var_pool ().insert<path> ("install",
+ variable_visibility::target));
+
// Register the install function family if this is the first instance of
// the install modules.
//
@@ -258,9 +274,9 @@ namespace build2
// Register our operations.
//
- rs.insert_operation (install_id, op_install);
- rs.insert_operation (uninstall_id, op_uninstall);
- rs.insert_operation (update_for_install_id, op_update_for_install);
+ rs.insert_operation (install_id, op_install, &ovar);
+ rs.insert_operation (uninstall_id, op_uninstall, &ovar);
+ rs.insert_operation (update_for_install_id, op_update_for_install, &ovar);
}
static const path cmd ("install");
@@ -269,24 +285,26 @@ namespace build2
//
#define DIR(N, V) static const dir_path dir_##N (V)
- DIR (data_root, dir_path ("root"));
- DIR (exec_root, dir_path ("root"));
+ DIR (data_root, dir_path ("root"));
+ DIR (exec_root, dir_path ("root"));
- DIR (sbin, dir_path ("exec_root") /= "sbin");
- DIR (bin, dir_path ("exec_root") /= "bin");
- DIR (lib, (dir_path ("exec_root") /= "lib") /= "<private>");
- DIR (libexec, ((dir_path ("exec_root") /= "libexec") /= "<private>") /= "<project>");
- DIR (pkgconfig, dir_path ("lib") /= "pkgconfig");
+ DIR (sbin, dir_path ("exec_root") /= "sbin");
+ DIR (bin, dir_path ("exec_root") /= "bin");
+ DIR (lib, (dir_path ("exec_root") /= "lib") /= "<private>");
+ DIR (libexec, ((dir_path ("exec_root") /= "libexec") /= "<private>") /= "<project>");
+ DIR (pkgconfig, dir_path ("lib") /= "pkgconfig");
- DIR (etc, dir_path ("data_root") /= "etc");
- DIR (include, (dir_path ("data_root") /= "include") /= "<private>");
- DIR (share, dir_path ("data_root") /= "share");
- DIR (data, (dir_path ("share") /= "<private>") /= "<project>");
+ DIR (etc, dir_path ("data_root") /= "etc");
+ DIR (include, (dir_path ("data_root") /= "include") /= "<private>");
+ DIR (include_arch, dir_path ("include"));
+ DIR (share, dir_path ("data_root") /= "share");
+ DIR (data, (dir_path ("share") /= "<private>") /= "<project>");
+ DIR (buildfile, ((dir_path ("share") /= "build2") /= "export") /= "<project>");
- DIR (doc, ((dir_path ("share") /= "doc") /= "<private>") /= "<project>");
- DIR (legal, dir_path ("doc"));
- DIR (man, dir_path ("share") /= "man");
- DIR (man1, dir_path ("man") /= "man1");
+ DIR (doc, ((dir_path ("share") /= "doc") /= "<private>") /= "<project>");
+ DIR (legal, dir_path ("doc"));
+ DIR (man, dir_path ("share") /= "man");
+ DIR (man1, dir_path ("man") /= "man1");
#undef DIR
@@ -312,22 +330,17 @@ namespace build2
// Enter module variables.
//
- auto& vp (rs.var_pool ());
+ rs.var_pool ().insert<bool> ("for_install", variable_visibility::prereq);
+
+ // The rest of the variables we enter are qualified so go straight
+ // for the public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
// Note that the set_dir() calls below enter some more.
//
- {
- // The install variable is a path, not dir_path, since it can be used
- // to both specify the target directory (to install with the same file
- // name) or target file (to install with a different name). And the
- // way we distinguish between the two is via the presence/absence of
- // the trailing directory separator.
- //
- vp.insert<path> ("install", variable_visibility::target);
- vp.insert<bool> ("for_install", variable_visibility::prereq);
- vp.insert<string> ("install.mode");
- vp.insert<bool> ("install.subdirs");
- }
+ vp.insert<string> ("install.mode");
+ vp.insert<bool> ("install.subdirs");
// Environment.
//
@@ -383,6 +396,9 @@ namespace build2
// Note: use mtime_target (instead of target) to take precedence over
// the fallback file rules below.
//
+ // @@ We could fix this by checking the target type in file_rule,
+ // similar to build2::file_rule.
+ //
bs.insert_rule<mtime_target> (perform_install_id, "install.group", gr);
bs.insert_rule<mtime_target> (perform_uninstall_id, "install.group", gr);
@@ -390,7 +406,7 @@ namespace build2
// operation, similar to update.
//
// @@ Hm, it's a bit fuzzy why we would be updating-for-install
- // something outside of any project..?
+ // something outside of any project?
//
scope& gs (rs.global_scope ());
@@ -409,9 +425,9 @@ namespace build2
using config::lookup_config;
using config::specified_config;
- // Note: ignore config.install.scope (see below).
+ // Note: ignore config.install.{scope,manifest} (see below).
//
- bool s (specified_config (rs, "install", {"scope"}));
+ bool s (specified_config (rs, "install", {"scope", "manifest"}));
// Adjust module priority so that the (numerous) config.install.*
// values are saved at the end of config.build.
@@ -448,6 +464,123 @@ namespace build2
config::unsave_variable (rs, v);
}
+ // config.install.manifest
+ //
+ // Installation manifest. Valid values are a file path or `-` to dump
+ // the manifest to stdout.
+ //
+ // If specified during the install operation, then write the
+ // information about all the filesystem entries being installed into
+ // the manifest. If specified during uninstall, then remove the
+ // filesystem entries according to the manifest as opposed to the
+ // current build state. In particular, this functionality can be used
+ // to avoid surprising (and potentially lengthy) updates during
+ // uninstall that may happen because of changes to system-installed
+ // dependencies (for example, the compiler or standard library).
+ //
+ // @@ TODO: manifest uninstall is still TODO.
+ //
+ // Note: there is a single manifest per operation and thus this
+ // variable can only be specified as a global override. (While it
+ // could be handy to save this varible in config.build in some
+ // situations, supporting this will complicate the global override
+ // case). Note that as a result the manifest file path may not be
+ // specified in terms of the config.install.* values.
+ //
+ // Note also that the manifest is produced even in the dry-run mode.
+ // However, in this case no directory creation is tracked.
+ //
+ // The format of the installation manifest is "JSON lines", that is,
+ // each line is a JSON text (this makes it possible to reverse the
+ // order of lines without loading the entire file into memory). For
+ // example (indented lines indicate line continuations):
+ //
+ // {"type":"directory","path":"/tmp/install","mode":"755"}
+ // {"type":"target","name":"/tmp/libhello/libs{hello}",
+ // "entries":[
+ // {"type":"file","path":"/tmp/install/lib/libhello-1.0.so","mode":"755"},
+ // {"type":"symlink","path":"/tmp/install/lib/libhello.so","target":"libhello-1.0.so"}]}
+ //
+ // Each line is a serialization of one of the following non-abstract
+ // C++ structs:
+ //
+ // struct entry // abstract
+ // {
+ // enum {directory, file, symlink, target} type;
+ // };
+ //
+ // struct filesystem_entry: entry // abstract
+ // {
+ // path path;
+ // };
+ //
+ // struct directory_entry: filesystem_entry
+ // {
+ // string mode;
+ // };
+ //
+ // struct file_entry: filesystem_entry
+ // {
+ // string mode;
+ // };
+ //
+ // struct symlink_entry: filesystem_entry
+ // {
+ // path target;
+ // };
+ //
+ // struct target_entry: entry
+ // {
+ // string name;
+ // vector<filesystem_entry*> entries;
+ // };
+ //
+ // New entry types may be added later. Additional entry members may be
+ // added later to existing entries after the existing members.
+ //
+ // If installation is relocatable (see config.install.relocatable) and
+ // the installation manifest file path is inside config.install.root
+ // (including chroot), then absolute filesystem_entry::path's are
+ // saved as relative to the manifest file's directory (note that
+ // symlink_entry::target cannot be absolute in relocatable
+ // installation).
+ //
+ {
+ auto& v (vp.insert<path> ("config.install.manifest"));
+
+ // If specified, verify it is a global override.
+ //
+ if (lookup l = rs[v])
+ {
+ if (!l.belongs (rs.global_scope ()))
+ fail << "config.install.manifest must be a global override" <<
+ info << "specify !config.install.manifest=...";
+ }
+
+ config::unsave_variable (rs, v);
+ }
+
+ // Support for relocatable install.
+ //
+ // Note that it is false by default since supporting relocatable
+ // installation may require extra effort and not all projects may
+ // support it. A project that is known not to support it should assert
+ // this fact in its root.build, for example:
+ //
+ // assert (!$install.relocatable) 'relocatable installation not supported'
+ //
+ {
+ auto& var (vp.insert<bool> ( "install.relocatable"));
+ auto& cvar (vp.insert<bool> ("config.install.relocatable"));
+
+ value& v (rs.assign (var));
+
+ // Note: unlike other variables, for ease of assertion set it to
+ // false if no config.install.* is specified.
+ //
+ v = s && cast_false<bool> (lookup_config (rs, cvar, false));
+ }
+
// Support for private install (aka poor man's Flatpack).
//
const dir_path* p;
@@ -485,35 +618,109 @@ namespace build2
}
}
- // Global config.install.* values.
+ // config.install.filter
//
- set_dir (s, p, rs, "", abs_dir_path (), false, "644", "755", cmd);
-
- set_dir (s, p, rs, "root", abs_dir_path ());
-
- set_dir (s, p, rs, "data_root", dir_data_root);
- set_dir (s, p, rs, "exec_root", dir_exec_root, false, "755");
+ // Installation filterting. The value of this variable is a list of
+ // key-value pairs that specify the filesystem entries to include or
+ // exclude from the installation. For example, the following filters
+ // will omit installing headers and static libraries (notice the
+ // quoting of the wildcard).
+ //
+ // config.install.filter='include/@false "*.a"@false'
+ //
+ // The key in each pair is a file or directory path or a path wildcard
+ // pattern. If a key is relative and contains a directory component or
+ // is a directory, then it is treated relative to the corresponding
+ // config.install.* location. Otherwise (simple path, normally a
+ // pattern), it is matched against the leaf of any path. Note that if
+ // an absolute path is specified, it should be without the
+ // config.install.chroot prefix.
+ //
+ // The value in each pair is either true (include) or false (exclude).
+ // The filters are evaluated in the order specified and the first
+ // match that is found determines the outcome. If no match is found,
+ // the default is to include. For a directory, while false means
+ // exclude all the sub-paths inside this directory, true does not mean
+ // that all the sub-paths will be included wholesale. Rather, the
+ // matched component of the sub-path is treated as included with the
+ // rest of the components matched against the following
+ // sub-filters. For example:
+ //
+ // config.install.filter='
+ // include/x86_64-linux-gnu/@true
+ // include/x86_64-linux-gnu/details/@false
+ // include/@false'
+ //
+ // The true or false value may be followed by comma and the `symlink`
+ // modifier to only apply to symlink filesystem entries. For example:
+ //
+ // config.install.filter='"*.so"@false,symlink'
+ //
+ // Note that this mechanism only affects what gets physically copied
+ // to the installation directory without affecting what gets built for
+ // install or the view of what gets installed at the buildfile level.
+ // For example, given the `include/@false *.a@false` filters, static
+ // libraries will still be built (unless arranged not to with
+ // config.bin.lib) and the pkg-config files will still end up with -I
+ // options pointing to the header installation directory. Note also
+ // that this mechanism applies to both install and uninstall
+ // operations.
+ //
+ // If you are familiar with the Debian or Fedora packaging, this
+ // mechanism is somewhat similar to (and can be used for a similar
+ // purpose as) the Debian's .install files and Fedora's %files spec
+ // file sections that are used to split the installation into multiple
+ // binary packages.
+ //
+ {
+ auto& var (vp.insert<filters> ( "install.filter"));
+ auto& cvar (vp.insert<filters> ("config.install.filter"));
- set_dir (s, p, rs, "sbin", dir_sbin);
- set_dir (s, p, rs, "bin", dir_bin);
- set_dir (s, p, rs, "lib", dir_lib);
- set_dir (s, p, rs, "libexec", dir_libexec);
- set_dir (s, p, rs, "pkgconfig", dir_pkgconfig, false, "644");
+ value& v (rs.assign (var));
- set_dir (s, p, rs, "etc", dir_etc);
- set_dir (s, p, rs, "include", dir_include);
- set_dir (s, p, rs, "share", dir_share);
- set_dir (s, p, rs, "data", dir_data);
+ if (s)
+ {
+ if (lookup l = lookup_config (rs, cvar, nullptr))
+ v = cast<filters> (l);
+ }
+ }
- set_dir (s, p, rs, "doc", dir_doc);
- set_dir (s, p, rs, "legal", dir_legal);
- set_dir (s, p, rs, "man", dir_man);
- set_dir (s, p, rs, "man1", dir_man1);
+ // Global config.install.* values.
+ //
+ set_dir (s, p, rs, "", abs_dir_path (), false, "644", "755", cmd);
+
+ set_dir (s, p, rs, "root", abs_dir_path ());
+
+ set_dir (s, p, rs, "data_root", dir_data_root);
+ set_dir (s, p, rs, "exec_root", dir_exec_root, false, "755");
+
+ set_dir (s, p, rs, "sbin", dir_sbin);
+ set_dir (s, p, rs, "bin", dir_bin);
+ set_dir (s, p, rs, "lib", dir_lib);
+ set_dir (s, p, rs, "libexec", dir_libexec);
+ set_dir (s, p, rs, "pkgconfig", dir_pkgconfig, false, "644");
+
+ set_dir (s, p, rs, "etc", dir_etc);
+ set_dir (s, p, rs, "include", dir_include);
+ set_dir (s, p, rs, "include_arch", dir_include_arch);
+ set_dir (s, p, rs, "share", dir_share);
+ set_dir (s, p, rs, "data", dir_data);
+ set_dir (s, p, rs, "buildfile", dir_buildfile);
+
+ set_dir (s, p, rs, "doc", dir_doc);
+ set_dir (s, p, rs, "legal", dir_legal);
+ set_dir (s, p, rs, "man", dir_man);
+ set_dir (s, p, rs, "man1", dir_man1);
}
// Configure "installability" for built-in target types.
//
+ // Note that for exe{} we also set explicit 755 mode in case it gets
+ // installed somewhere else where the default is not 755 (for example to
+ // libexec/, which on Debian has the 644 mode).
+ //
install_path<exe> (bs, dir_path ("bin"));
+ install_mode<exe> (bs, "755");
install_path<doc> (bs, dir_path ("doc"));
install_path<legal> (bs, dir_path ("legal"));
install_path<man> (bs, dir_path ("man"));
diff --git a/libbuild2/install/operation.cxx b/libbuild2/install/operation.cxx
index 8c59ac1..ce5d24a 100644
--- a/libbuild2/install/operation.cxx
+++ b/libbuild2/install/operation.cxx
@@ -3,8 +3,15 @@
#include <libbuild2/install/operation.hxx>
+#include <sstream>
+
+#include <libbuild2/scope.hxx>
+#include <libbuild2/target.hxx>
+#include <libbuild2/context.hxx>
#include <libbuild2/variable.hxx>
+#include <libbuild2/install/utility.hxx>
+
using namespace std;
using namespace butl;
@@ -12,39 +19,371 @@ namespace build2
{
namespace install
{
+#ifndef BUILD2_BOOTSTRAP
+ context_data::
+ context_data (const path* mf)
+ : manifest_name (mf),
+ manifest_os (mf != nullptr
+ ? open_file_or_stdout (manifest_name, manifest_ofs)
+ : manifest_ofs),
+ manifest_autorm (manifest_ofs.is_open () ? *mf : path ()),
+ manifest_json (manifest_os, 0 /* indentation */)
+ {
+ if (manifest_ofs.is_open ())
+ {
+ manifest_file = *mf;
+ manifest_file.complete ();
+ manifest_file.normalize ();
+ }
+ }
+
+ static path
+ relocatable_path (context_data& d, const target& t, path p)
+ {
+ // This is both inefficient (re-detecting relocatable manifest for every
+ // path) and a bit dirty (if multiple projects are being installed with
+ // different install.{relocatable,root} values, we may end up producing
+ // some paths relative and some absolute). But doing either of these
+ // properly is probably not worth the extra complexity.
+ //
+ if (!d.manifest_file.empty ()) // Not stdout.
+ {
+ const scope& rs (t.root_scope ());
+
+ if (cast_false<bool> (rs["install.relocatable"]))
+ {
+ // Note: install.root is abs_dir_path so absolute and normalized.
+ //
+ const dir_path* root (cast_null<dir_path> (rs["install.root"]));
+ if (root == nullptr)
+ fail << "unknown installation root directory in " << rs <<
+ info << "did you forget to specify config.install.root?";
+
+ // The manifest path would include chroot so if used, we need to add
+ // it to root and the file path (we could also strip it, but then
+ // making it absolute gets tricky on Windows).
+ //
+ dir_path md (d.manifest_file.directory ());
+
+ if (md.sub (chroot_path (rs, *root))) // Inside installation root.
+ {
+ p = chroot_path (rs, p);
+ try
+ {
+ p = p.relative (md);
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make filesystem entry path " << p
+ << " relative to " << md <<
+ info << "required for relocatable installation manifest";
+ }
+ }
+ }
+ }
+
+ return p;
+ }
+
+ // Serialize current target and, if tgt is not NULL, start the new target.
+ //
+ // Note that we always serialize directories as top-level entries. And
+ // theoretically we can end up "splitting" a target with a directory
+ // creation. For example, if some files that belong to the target are
+ // installed into subdirectories that have not yet been created. So we
+ // have to cache the information for the current target in memory and only
+ // flush it once we see the next target (or the end).
+ //
+ // You may be wondering why not just serialize directories as target
+ // entries. While we could do that, it's not quite correct conceptually,
+ // since this would be the first of potentially many targets that caused
+ // the directory's creation. To put it another way, while files and
+ // symlinks belong to tragets, directories do not.
+ //
+ static void
+ manifest_flush_target (context_data& d, const target* tgt)
+ {
+ if (d.manifest_target != nullptr)
+ {
+ assert (!d.manifest_target_entries.empty ());
+
+ // Target name format is the same as in the structured result output.
+ //
+ ostringstream os;
+ stream_verb (os, stream_verbosity (1, 0));
+ os << *d.manifest_target;
+
+ try
+ {
+ auto& s (d.manifest_json);
+
+ s.begin_object ();
+ s.member ("type", "target");
+ s.member ("name", os.str ());
+ s.member_name ("entries");
+ s.begin_array ();
+
+ for (const auto& e: d.manifest_target_entries)
+ {
+ path p (relocatable_path (d, *d.manifest_target, move (e.path)));
+
+ s.begin_object ();
+
+ if (e.target.empty ())
+ {
+ s.member ("type", "file");
+ s.member ("path", p.string ());
+ s.member ("mode", e.mode);
+ }
+ else
+ {
+ s.member ("type", "symlink");
+ s.member ("path", p.string ());
+ s.member ("target", e.target.string ());
+ }
+
+ s.end_object ();
+ }
+
+ s.end_array (); // entries member
+ s.end_object (); // target object
+ }
+ catch (const json::invalid_json_output& e)
+ {
+ fail << "invalid " << d.manifest_name << " json output: " << e;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << d.manifest_name << ": " << e;
+ }
+
+ d.manifest_target_entries.clear ();
+ }
+
+ d.manifest_target = tgt;
+ }
+
+ void context_data::
+ manifest_install_d (context& ctx,
+ const target& tgt,
+ const dir_path& dir,
+ const string& mode)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ try
+ {
+ auto& s (d.manifest_json);
+
+ // If we moved to the next target, flush the current one.
+ //
+ if (d.manifest_target != &tgt)
+ manifest_flush_target (d, nullptr);
+
+ s.begin_object ();
+ s.member ("type", "directory");
+ s.member ("path", relocatable_path (d, tgt, dir).string ());
+ s.member ("mode", mode);
+ s.end_object ();
+ }
+ catch (const json::invalid_json_output& e)
+ {
+ fail << "invalid " << d.manifest_name << " json output: " << e;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << d.manifest_name << ": " << e;
+ }
+ }
+ }
+
+ void context_data::
+ manifest_install_f (context& ctx,
+ const target& tgt,
+ const dir_path& dir,
+ const path& name,
+ const string& mode)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ if (d.manifest_target != &tgt)
+ manifest_flush_target (d, &tgt);
+
+ d.manifest_target_entries.push_back (
+ manifest_target_entry {dir / name, mode, path ()});
+ }
+ }
+
+ void context_data::
+ manifest_install_l (context& ctx,
+ const target& tgt,
+ const path& link_target,
+ const dir_path& dir,
+ const path& link)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ if (d.manifest_target != &tgt)
+ manifest_flush_target (d, &tgt);
+
+ d.manifest_target_entries.push_back (
+ manifest_target_entry {dir / link, "", link_target});
+ }
+ }
+
+ static void
+ manifest_close (context& ctx)
+ {
+ auto& d (*static_cast<context_data*> (ctx.current_inner_odata.get ()));
+
+ if (d.manifest_name.path != nullptr)
+ {
+ try
+ {
+ manifest_flush_target (d, nullptr);
+
+ d.manifest_os << '\n'; // Final newline.
+
+ if (d.manifest_ofs.is_open ())
+ {
+ d.manifest_ofs.close ();
+ d.manifest_autorm.cancel ();
+ }
+ }
+ catch (const json::invalid_json_output& e)
+ {
+ fail << "invalid " << d.manifest_name << " json output: " << e;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to write to " << d.manifest_name << ": " << e;
+ }
+ }
+ }
+#else
+ context_data::
+ context_data (const path*)
+ {
+ }
+
+ void context_data::
+ manifest_install_d (context&,
+ const target&,
+ const dir_path&,
+ const string&)
+ {
+ }
+
+ void context_data::
+ manifest_install_f (context&,
+ const target&,
+ const dir_path&,
+ const path&,
+ const string&)
+ {
+ }
+
+ void context_data::
+ manifest_install_l (context&,
+ const target&,
+ const path&,
+ const dir_path&,
+ const path&)
+ {
+ }
+
+ static void
+ manifest_close (context&)
+ {
+ }
+#endif
+
static operation_id
- install_pre (context&,
- const values& params,
+ pre_install (context&,
+ const values&,
meta_operation_id mo,
- const location& l)
+ const location&)
{
- if (!params.empty ())
- fail (l) << "unexpected parameters for operation install";
+ // Run update as a pre-operation, unless we are disfiguring.
+ //
+ return mo != disfigure_id ? update_id : 0;
+ }
+ static operation_id
+ pre_uninstall (context&,
+ const values&,
+ meta_operation_id mo,
+ const location&)
+ {
// Run update as a pre-operation, unless we are disfiguring.
//
return mo != disfigure_id ? update_id : 0;
}
+ static void
+ install_pre (context& ctx,
+ const values& params,
+ bool inner,
+ const location& l)
+ {
+ if (!params.empty ())
+ fail (l) << "unexpected parameters for operation install";
+
+ if (inner)
+ {
+ // See if we need to write the installation manifest.
+ //
+ // Note: go straight for the public variable pool.
+ //
+ const path* mf (
+ cast_null<path> (
+ ctx.global_scope[*ctx.var_pool.find ("config.install.manifest")]));
+
+ // Note that we cannot calculate whether the manifest should use
+ // relocatable (relative) paths once here since we don't know the
+ // value of config.install.root.
+
+ ctx.current_inner_odata = context::current_data_ptr (
+ new context_data (mf),
+ [] (void* p) {delete static_cast<context_data*> (p);});
+ }
+ }
+
+ static void
+ install_post (context& ctx, const values&, bool inner)
+ {
+ if (inner)
+ manifest_close (ctx);
+ }
+
// Note that we run both install and uninstall serially. The reason for
// this is all the fuzzy things we are trying to do like removing empty
// outer directories if they are empty. If we do this in parallel, then
// those things get racy. Also, since all we do here is creating/removing
// files, there is not going to be much speedup from doing it in parallel.
+ // There is also now the installation manifest, which relies on us
+ // installing all the filesystem entries of a target serially.
const operation_info op_install {
install_id,
0,
"install",
"install",
- "install",
"installing",
"installed",
"has nothing to install", // We cannot "be installed".
execution_mode::first,
0 /* concurrency */, // Run serially.
- &install_pre,
+ &pre_install,
nullptr,
+ &install_pre,
+ &install_post,
nullptr,
nullptr
};
@@ -62,14 +401,15 @@ namespace build2
uninstall_id,
0,
"uninstall",
- "install",
"uninstall",
"uninstalling",
"uninstalled",
"is not installed",
execution_mode::last,
0 /* concurrency */, // Run serially
- &install_pre,
+ &pre_uninstall,
+ nullptr,
+ nullptr,
nullptr,
nullptr,
nullptr
@@ -81,7 +421,6 @@ namespace build2
update_id, // Note: not update_for_install_id.
install_id,
op_update.name,
- op_update.var_name,
op_update.name_do,
op_update.name_doing,
op_update.name_did,
@@ -90,6 +429,8 @@ namespace build2
op_update.concurrency,
op_update.pre_operation,
op_update.post_operation,
+ op_update.operation_pre,
+ op_update.operation_post,
op_update.adhoc_match,
op_update.adhoc_apply
};
diff --git a/libbuild2/install/operation.hxx b/libbuild2/install/operation.hxx
index c1f5416..bd818b4 100644
--- a/libbuild2/install/operation.hxx
+++ b/libbuild2/install/operation.hxx
@@ -4,10 +4,15 @@
#ifndef LIBBUILD2_INSTALL_OPERATION_HXX
#define LIBBUILD2_INSTALL_OPERATION_HXX
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/serializer.hxx>
+#endif
+
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
#include <libbuild2/operation.hxx>
+#include <libbuild2/filesystem.hxx> // auto_rmfile
namespace build2
{
@@ -16,6 +21,65 @@ namespace build2
extern const operation_info op_install;
extern const operation_info op_uninstall;
extern const operation_info op_update_for_install;
+
+ // Set as context::current_inner_odata during the install/uninstall inner
+ // operations.
+ //
+ struct context_data
+ {
+ // Manifest.
+ //
+#ifndef BUILD2_BOOTSTRAP
+ path manifest_file; // Absolute and normalized, empty if `-`.
+ path_name manifest_name; // Original path/name.
+ ofdstream manifest_ofs;
+ ostream& manifest_os;
+ auto_rmfile manifest_autorm;
+ butl::json::stream_serializer manifest_json;
+ const target* manifest_target = nullptr; // Target being installed.
+ struct manifest_target_entry
+ {
+ build2::path path;
+ string mode;
+ build2::path target;
+ };
+ vector<manifest_target_entry> manifest_target_entries;
+#endif
+
+ // The following manifest_install_[dfl]() functions correspond to (and
+ // are called from) file_rule::install_[dfl]().
+
+ // install -d -m <mode> <dir>
+ //
+ static void
+ manifest_install_d (context&,
+ const target&,
+ const dir_path& dir,
+ const string& mode);
+
+ // install -m <mode> <file> <dir>/<name>
+ //
+ static void
+ manifest_install_f (context&,
+ const target& file,
+ const dir_path& dir,
+ const path& name,
+ const string& mode);
+
+ // install -l <link_target> <dir>/<link>
+ //
+ static void
+ manifest_install_l (context&,
+ const target&,
+ const path& link_target,
+ const dir_path& dir,
+ const path& link);
+
+ // Constructor.
+ //
+ explicit
+ context_data (const path* manifest);
+ };
}
}
diff --git a/libbuild2/install/rule.cxx b/libbuild2/install/rule.cxx
index 1411143..873b2e9 100644
--- a/libbuild2/install/rule.cxx
+++ b/libbuild2/install/rule.cxx
@@ -13,6 +13,8 @@
#include <libbuild2/filesystem.hxx>
#include <libbuild2/diagnostics.hxx>
+#include <libbuild2/install/operation.hxx>
+
using namespace std;
using namespace butl;
@@ -40,12 +42,17 @@ namespace build2
// Note that the below rules are called for both install and
// update-for-install.
//
+ // @@ TODO: we clearly need a module class.
+ //
static inline const variable&
- var_install (context& ctx)
+ var_install (const scope& rs)
{
- return ctx.current_outer_ovar != nullptr
- ? *ctx.current_outer_ovar
- : *ctx.current_inner_ovar;
+ context& ctx (rs.ctx);
+
+ return *rs.root_extra->operations[
+ (ctx.current_outer_oif != nullptr
+ ? ctx.current_outer_oif
+ : ctx.current_inner_oif)->id].ovar;
}
// alias_rule
@@ -64,28 +71,44 @@ namespace build2
return true;
}
- const target* alias_rule::
+ pair<const target*, uint64_t> alias_rule::
filter (const scope* is,
- action a, const target& t, prerequisite_iterator& i) const
+ action a, const target& t, prerequisite_iterator& i,
+ match_extra& me) const
{
assert (i->member == nullptr);
- return filter (is, a, t, i->prerequisite);
+ return filter (is, a, t, i->prerequisite, me);
}
- const target* alias_rule::
+ pair<const target*, uint64_t> alias_rule::
filter (const scope* is,
- action, const target& t, const prerequisite& p) const
+ action, const target& t, const prerequisite& p,
+ match_extra&) const
{
const target& pt (search (t, p));
- return is == nullptr || pt.in (*is) ? &pt : nullptr;
+ const uint64_t options (match_extra::all_options); // No definition.
+ return make_pair (is == nullptr || pt.in (*is) ? &pt : nullptr, options);
}
recipe alias_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra& me) const
+ {
+ return apply_impl (a, t, me);
+ }
+
+ recipe alias_rule::
+ apply (action, target&) const
+ {
+ assert (false); // Never called.
+ return nullptr;
+ }
+
+ recipe alias_rule::
+ apply_impl (action a, target& t, match_extra& me, bool reapply) const
{
tracer trace ("install::alias_rule::apply");
- context& ctx (t.ctx);
+ assert (!reapply || a.operation () != update_id);
// Pass-through to our installable prerequisites.
//
@@ -97,6 +120,8 @@ namespace build2
auto pms (group_prerequisite_members (a, t, members_mode::never));
for (auto i (pms.begin ()), e (pms.end ()); i != e; ++i)
{
+ // NOTE: see essentially the same logic in reapply_impl() below.
+ //
const prerequisite& p (i->prerequisite);
// Ignore excluded.
@@ -120,13 +145,17 @@ namespace build2
if (!is)
is = a.operation () != update_id ? install_scope (t) : nullptr;
- const target* pt (filter (*is, a, t, i));
+ pair<const target*, uint64_t> fr (filter (*is, a, t, i, me));
+
+ const target* pt (fr.first);
+ uint64_t options (fr.second);
+
+ lookup l;
+
if (pt == nullptr)
{
l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
- continue;
}
-
// Check if this prerequisite is explicitly "not installable", that
// is, there is the 'install' variable and its value is false.
//
@@ -138,64 +167,108 @@ namespace build2
//
// Note: not the same as lookup_install() above.
//
- auto l ((*pt)[var_install (ctx)]);
- if (l && cast<path> (l).string () == "false")
+ else if ((l = (*pt)[var_install (*p.scope.root_scope ())]) &&
+ cast<path> (l).string () == "false")
{
l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
- continue;
+ pt = nullptr;
}
-
// If this is not a file-based target (e.g., a target group such as
// libu{}) then ignore it if there is no rule to install.
//
- if (pt->is_a<file> ())
- match_sync (a, *pt);
- else if (!try_match_sync (a, *pt).first)
+ else if (pt->is_a<file> ())
+ {
+ match_sync (a, *pt, options);
+ }
+ else if (!try_match_sync (a, *pt, options).first)
{
l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
pt = nullptr;
}
- if (pt != nullptr)
- pts.push_back (prerequisite_target (pt, pi));
+ if (pt != nullptr || reapply)
+ {
+ // Use auxiliary data for a NULL entry to distinguish between
+ // filtered out (1) and ignored for other reasons (0).
+ //
+ pts.push_back (
+ prerequisite_target (pt, pi, fr.first == nullptr ? 1 : 0));
+ }
}
return default_recipe;
}
- // fsdir_rule
- //
- const fsdir_rule fsdir_rule::instance;
-
- bool fsdir_rule::
- match (action, target&) const
+ void alias_rule::
+ reapply_impl (action a, target& t, match_extra& me) const
{
- // We always match.
- //
- // Note that we are called both as the outer part during the update-for-
- // un/install pre-operation and as the inner part during the un/install
- // operation itself.
- //
- return true;
- }
+ tracer trace ("install::alias_rule::reapply");
- recipe fsdir_rule::
- apply (action a, target& t) const
- {
- // If this is outer part of the update-for-un/install, delegate to the
- // default fsdir rule. Otherwise, this is a noop (we don't install
- // fsdir{}).
- //
- // For now we also assume we don't need to do anything for prerequisites
- // (the only sensible prerequisite of fsdir{} is another fsdir{}).
+ assert (a.operation () != update_id);
+
+ optional<const scope*> is;
+
+ // Iterate over prerequisites and prerequisite targets in parallel.
//
- if (a.operation () == update_id)
+ auto& pts (t.prerequisite_targets[a]);
+ size_t j (0), n (pts.size ()), en (0);
+
+ auto pms (group_prerequisite_members (a, t, members_mode::never));
+ for (auto i (pms.begin ()), e (pms.end ());
+ i != e && j != n;
+ ++i, ++j, ++en)
{
- match_inner (a, t);
- return &execute_inner;
+ // The same logic as in apply() above except that we skip
+ // prerequisites that were not filtered out.
+ //
+ const prerequisite& p (i->prerequisite);
+
+ include_type pi (include (a, t, p));
+ if (!pi)
+ continue;
+
+ if (p.proj)
+ continue;
+
+ prerequisite_target& pto (pts[j]);
+
+ if (pto.target != nullptr || pto.data == 0)
+ continue;
+
+ if (!is)
+ is = a.operation () != update_id ? install_scope (t) : nullptr;
+
+ pair<const target*, uint64_t> fr (filter (*is, a, t, i, me));
+
+ const target* pt (fr.first);
+ uint64_t options (fr.second);
+
+ lookup l;
+
+ if (pt == nullptr)
+ {
+ l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
+ }
+ else if ((l = (*pt)[var_install (*p.scope.root_scope ())]) &&
+ cast<path> (l).string () == "false")
+ {
+ l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
+ pt = nullptr;
+ }
+ else if (pt->is_a<file> ())
+ {
+ match_sync (a, *pt, options);
+ }
+ else if (!try_match_sync (a, *pt, options).first)
+ {
+ l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
+ pt = nullptr;
+ }
+
+ pto = prerequisite_target (pt, pi, fr.first == nullptr ? 1 : 0);
}
- else
- return noop_recipe;
+
+ assert (en == n); // Did not call apply() with true for reapply?
}
// group_rule
@@ -209,41 +282,48 @@ namespace build2
alias_rule::match (a, t);
}
- const target* group_rule::
- filter (action, const target&, const target& m) const
+ bool group_rule::
+ filter (action, const target&, const target&) const
{
- return &m;
+ return true;
}
- const target* group_rule::
+ pair<const target*, uint64_t> group_rule::
filter (const scope* is,
- action, const target& t, const prerequisite& p) const
+ action, const target& t, const prerequisite& p,
+ match_extra&) const
{
+ const uint64_t options (match_extra::all_options); // No definition.
+ pair<const target*, uint64_t> r (nullptr, options);
+
// The same logic as in file_rule::filter() below.
//
if (p.is_a<exe> ())
{
+ const scope& rs (*p.scope.root_scope ());
+
if (p.vars.empty () ||
- cast_empty<path> (p.vars[var_install (t.ctx)]).string () != "true")
- return nullptr;
+ cast_empty<path> (p.vars[var_install (rs)]).string () != "true")
+ return r;
}
const target& pt (search (t, p));
- return is == nullptr || pt.in (*is) ? &pt : nullptr;
+ if (is == nullptr || pt.in (*is))
+ r.first = &pt;
+
+ return r;
}
recipe group_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra& me) const
{
tracer trace ("install::group_rule::apply");
- context& ctx (t.ctx);
-
// Resolve group members.
//
// Remember that we are called twice: first during update for install
// (pre-operation) and then during install. During the former, we rely
- // on the normall update rule to resolve the group members. During the
+ // on the normal update rule to resolve the group members. During the
// latter, there will be no rule to do this but the group will already
// have been resolved by the pre-operation.
//
@@ -253,22 +333,23 @@ namespace build2
? resolve_members (a, t)
: t.group_members (a));
- if (gv.members != nullptr)
+ if (gv.members != nullptr && gv.count != 0)
{
+ const scope& rs (t.root_scope ());
+
auto& pts (t.prerequisite_targets[a]);
for (size_t i (0); i != gv.count; ++i)
{
- const target* m (gv.members[i]);
+ const target* mt (gv.members[i]);
- if (m == nullptr)
+ if (mt == nullptr)
continue;
// Let a customized rule have its say.
//
- const target* mt (filter (a, t, *m));
- if (mt == nullptr)
+ if (!filter (a, t, *mt))
{
- l5 ([&]{trace << "ignoring " << *m << " (filtered out)";});
+ l5 ([&]{trace << "ignoring " << *mt << " (filtered out)";});
continue;
}
@@ -277,7 +358,7 @@ namespace build2
//
// Note: not the same as lookup_install() above.
//
- auto l ((*mt)[var_install (ctx)]);
+ auto l ((*mt)[var_install (rs)]);
if (l && cast<path> (l).string () == "false")
{
l5 ([&]{trace << "ignoring " << *mt << " (not installable)";});
@@ -291,7 +372,7 @@ namespace build2
// Delegate to the base rule.
//
- return alias_rule::apply (a, t);
+ return alias_rule::apply (a, t, me);
}
@@ -308,49 +389,72 @@ namespace build2
return true;
}
- const target* file_rule::
+ bool file_rule::
+ filter (action, const target&, const target&) const
+ {
+ return true;
+ }
+
+ pair<const target*, uint64_t> file_rule::
filter (const scope* is,
- action a, const target& t, prerequisite_iterator& i) const
+ action a, const target& t, prerequisite_iterator& i,
+ match_extra& me) const
{
assert (i->member == nullptr);
- return filter (is, a, t, i->prerequisite);
+ return filter (is, a, t, i->prerequisite, me);
}
- const target* file_rule::
+ pair<const target*, uint64_t> file_rule::
filter (const scope* is,
- action, const target& t, const prerequisite& p) const
+ action, const target& t, const prerequisite& p,
+ match_extra&) const
{
+ const uint64_t options (match_extra::all_options); // No definition.
+ pair<const target*, uint64_t> r (nullptr, options);
+
// See also group_rule::filter() with identical semantics.
//
if (p.is_a<exe> ())
{
+ const scope& rs (*p.scope.root_scope ());
+
// Note that while include() checks for install=false, here we need to
// check for explicit install=true. We could have re-used the lookup
// performed by include(), but then we would have had to drag it
// through and also diagnose any invalid values.
//
if (p.vars.empty () ||
- cast_empty<path> (p.vars[var_install (t.ctx)]).string () != "true")
- return nullptr;
+ cast_empty<path> (p.vars[var_install (rs)]).string () != "true")
+ return r;
}
const target& pt (search (t, p));
- return is == nullptr || pt.in (*is) ? &pt : nullptr;
+ if (is == nullptr || pt.in (*is))
+ r.first = &pt;
+
+ return r;
}
recipe file_rule::
- apply (action a, target& t) const
+ apply (action a, target& t, match_extra& me) const
{
- recipe r (apply_impl (a, t));
+ recipe r (apply_impl (a, t, me));
return r != nullptr ? move (r) : noop_recipe;
}
recipe file_rule::
- apply_impl (action a, target& t) const
+ apply (action, target&) const
+ {
+ assert (false); // Never called.
+ return nullptr;
+ }
+
+ recipe file_rule::
+ apply_impl (action a, target& t, match_extra& me, bool reapply) const
{
tracer trace ("install::file_rule::apply");
- context& ctx (t.ctx);
+ assert (!reapply || a.operation () != update_id);
// Note that we are called both as the outer part during the update-for-
// un/install pre-operation and as the inner part during the un/install
@@ -371,11 +475,36 @@ namespace build2
// (actual update). We used to do this after matching the prerequisites
// but the inner rule may provide some rule-specific information (like
// the target extension for exe{}) that may be required during the
- // prerequisite search (like the base name for in{}).
+ // prerequisite search (like the base name for in{}; this no longer
+ // reproduces likely due to the changes to exe{} extension derivation
+ // but a contrived arrangement can still be made to trigger this).
+ //
+ // But then we discovered that doing this before the prerequisites messes
+ // up with the for-install signaling. Specifically, matching the
+ // prerequisites may signal that they are being updated for install,
+ // for example, for a library via a metadata library used in a moc
+ // recipe. While matching the inner rule may trigger updating during
+ // match of such prerequisites, for example, a source file generated by
+ // that moc recipe that depends on this metadata library. If we match
+ // prerequisites before, then the library that is pulled by the metadata
+ // library will be updated before we had a chance to signal that it
+ // should be updated for install.
+ //
+ // To try to accommodate both cases (as best as we can) we now split the
+ // inner rule match into two steps: we do the match before and apply
+ // after. This allows rules that deal with tricky prerequisites like
+ // in{} to assign the target path in match() instead of apply() (see
+ // in::rule, for example).
//
+#if 0
optional<bool> unchanged;
if (a.operation () == update_id)
unchanged = match_inner (a, t, unmatch::unchanged).first;
+#else
+ action ia (a.inner_action ());
+ if (a.operation () == update_id)
+ match_only_sync (ia, t);
+#endif
optional<const scope*> is; // Installation scope (resolve lazily).
@@ -383,6 +512,8 @@ namespace build2
auto pms (group_prerequisite_members (a, t, members_mode::never));
for (auto i (pms.begin ()), e (pms.end ()); i != e; ++i)
{
+ // NOTE: see essentially the same logic in reapply_impl() below.
+ //
const prerequisite& p (i->prerequisite);
// Ignore excluded.
@@ -406,27 +537,30 @@ namespace build2
if (!is)
is = a.operation () != update_id ? install_scope (t) : nullptr;
- const target* pt (filter (*is, a, t, i));
+ pair<const target*, uint64_t> fr (filter (*is, a, t, i, me));
+
+ const target* pt (fr.first);
+ uint64_t options (fr.second);
+
+ lookup l;
if (pt == nullptr)
{
l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
- continue;
}
-
+ //
// See if we were explicitly instructed not to touch this target (the
// same semantics as in alias_rule).
//
// Note: not the same as lookup_install() above.
//
- auto l ((*pt)[var_install (ctx)]);
- if (l && cast<path> (l).string () == "false")
+ else if ((l = (*pt)[var_install (*p.scope.root_scope ())]) &&
+ cast<path> (l).string () == "false")
{
l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
- continue;
+ pt = nullptr;
}
-
- if (pt->is_a<file> ())
+ else if (pt->is_a<file> ())
{
// If the matched rule returned noop_recipe, then the target state
// is set to unchanged as an optimization. Use this knowledge to
@@ -434,19 +568,36 @@ namespace build2
// when updating static installable content (headers, documentation,
// etc).
//
- if (match_sync (a, *pt, unmatch::unchanged).first)
+ // Regarding options, the expectation here is that they are not used
+ // for the update operation. And for install/uninstall, if they are
+ // used, then they don't effect whether the target is unchanged. All
+ // feels reasonable.
+ //
+ if (match_sync (a, *pt, unmatch::unchanged, options).first)
pt = nullptr;
}
- else if (!try_match_sync (a, *pt).first)
+ else if (!try_match_sync (a, *pt, options).first)
{
l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
pt = nullptr;
}
- if (pt != nullptr)
- pts.push_back (prerequisite_target (pt, pi));
+ if (pt != nullptr || reapply)
+ {
+ // Use auxiliary data for a NULL entry to distinguish between
+ // filtered out (1) and ignored for other reasons (0).
+ //
+ pts.push_back (
+ prerequisite_target (pt, pi, fr.first == nullptr ? 1 : 0));
+ }
}
+#if 1
+ optional<bool> unchanged;
+ if (a.operation () == update_id)
+ unchanged = match_sync (ia, t, unmatch::unchanged).first;
+#endif
+
if (a.operation () == update_id)
{
return *unchanged
@@ -464,6 +615,79 @@ namespace build2
}
}
+ void file_rule::
+ reapply_impl (action a, target& t, match_extra& me) const
+ {
+ tracer trace ("install::file_rule::reapply");
+
+ assert (a.operation () != update_id);
+
+ optional<const scope*> is;
+
+ // Iterate over prerequisites and prerequisite targets in parallel.
+ //
+ auto& pts (t.prerequisite_targets[a]);
+ size_t j (0), n (pts.size ()), en (0);
+
+ auto pms (group_prerequisite_members (a, t, members_mode::never));
+ for (auto i (pms.begin ()), e (pms.end ());
+ i != e && j != n;
+ ++i, ++j, ++en)
+ {
+ // The same logic as in apply() above except that we skip
+ // prerequisites that were not filtered out.
+ //
+ const prerequisite& p (i->prerequisite);
+
+ include_type pi (include (a, t, p));
+ if (!pi)
+ continue;
+
+ if (p.proj)
+ continue;
+
+ prerequisite_target& pto (pts[j]);
+
+ if (pto.target != nullptr || pto.data == 0)
+ continue;
+
+ if (!is)
+ is = a.operation () != update_id ? install_scope (t) : nullptr;
+
+ pair<const target*, uint64_t> fr (filter (*is, a, t, i, me));
+
+ const target* pt (fr.first);
+ uint64_t options (fr.second);
+
+ lookup l;
+
+ if (pt == nullptr)
+ {
+ l5 ([&]{trace << "ignoring " << p << " (filtered out)";});
+ }
+ else if ((l = (*pt)[var_install (*p.scope.root_scope ())]) &&
+ cast<path> (l).string () == "false")
+ {
+ l5 ([&]{trace << "ignoring " << *pt << " (not installable)";});
+ pt = nullptr;
+ }
+ else if (pt->is_a<file> ())
+ {
+ if (match_sync (a, *pt, unmatch::unchanged, options).first)
+ pt = nullptr;
+ }
+ else if (!try_match_sync (a, *pt, options).first)
+ {
+ l5 ([&]{trace << "ignoring " << *pt << " (no rule)";});
+ pt = nullptr;
+ }
+
+ pto = prerequisite_target (pt, pi, fr.first == nullptr ? 1 : 0);
+ }
+
+ assert (en == n); // Did not call apply() with true for reapply?
+ }
+
target_state file_rule::
perform_update (action a, const target& t)
{
@@ -548,7 +772,8 @@ namespace build2
const dir_path& d (t.out_dir ().leaf (p->out_path ()));
// Add it as another leading directory rather than modifying
- // the last one directly; somehow, it feels right.
+ // the last one directly; somehow, it feels right. Note: the
+ // result is normalized.
//
if (!d.empty ())
rs.emplace_back (rs.back ().dir / d, rs.back ());
@@ -559,8 +784,9 @@ namespace build2
return rs.back ();
}
- // Resolve installation directory name to absolute directory path. Return
- // all the super-directories leading up to the destination (last).
+ // Resolve installation directory name to absolute and normalized
+ // directory path. Return all the super-directories leading up to the
+ // destination (last).
//
// If target is not NULL, then also handle the subdirs logic.
//
@@ -659,24 +885,52 @@ namespace build2
return rs;
}
- static inline install_dirs
- resolve (const target& t, dir_path d, bool fail_unknown = true)
+ static dir_path
+ resolve_dir (const scope& s, const target* t,
+ dir_path d, dir_path rb,
+ bool fail_unknown)
{
- return resolve (t.base_scope (), &t, move (d), fail_unknown);
+ install_dirs rs (resolve (s, t, move (d), fail_unknown));
+
+ if (rs.empty ())
+ return dir_path ();
+
+ dir_path r (move (rs.back ().dir));
+
+ if (!rb.empty ())
+ {
+ dir_path b (resolve (s, t, move (rb), false).back ().dir);
+
+ try
+ {
+ r = r.relative (b);
+ }
+ catch (const invalid_path&)
+ {
+ fail << "unable to make installation directory " << r
+ << " relative to " << b;
+ }
+ }
+
+ return r;
}
dir_path
- resolve_dir (const target& t, dir_path d, bool fail_unknown)
+ resolve_dir (const target& t, dir_path d, dir_path rb, bool fail_unknown)
{
- install_dirs r (resolve (t, move (d), fail_unknown));
- return r.empty () ? dir_path () : move (r.back ().dir);
+ return resolve_dir (t.base_scope (), &t, move (d), move (rb), fail_unknown);
}
dir_path
- resolve_dir (const scope& s, dir_path d, bool fail_unknown)
+ resolve_dir (const scope& s, dir_path d, dir_path rb, bool fail_unknown)
{
- install_dirs r (resolve (s, nullptr, move (d), fail_unknown));
- return r.empty () ? dir_path () : move (r.back ().dir);
+ return resolve_dir (s, nullptr, move (d), move (rb), fail_unknown);
+ }
+
+ static inline install_dirs
+ resolve (const target& t, dir_path d, bool fail_unknown = true)
+ {
+ return resolve (t.base_scope (), &t, move (d), fail_unknown);
}
path
@@ -692,6 +946,10 @@ namespace build2
bool n (!p->to_directory ());
dir_path d (n ? p->directory () : path_cast<dir_path> (*p));
+ if (n && d.empty ())
+ fail << "relative installation file path '" << p
+ << "' has no directory component";
+
install_dirs ids (resolve (f, d));
if (!n)
@@ -742,30 +1000,15 @@ namespace build2
return s;
}
- // Given an abolute path return its chroot'ed version, if any, accoring to
- // install.chroot.
- //
- template <typename P>
- static inline P
- chroot_path (const scope& rs, const P& p)
- {
- if (const dir_path* d = cast_null<dir_path> (rs["install.chroot"]))
- {
- dir_path r (p.root_directory ());
- assert (!r.empty ()); // Must be absolute.
-
- return *d / p.leaf (r);
- }
-
- return p;
- }
-
void file_rule::
install_d (const scope& rs,
const install_dir& base,
const dir_path& d,
+ const file& t,
uint16_t verbosity)
{
+ assert (d.absolute ());
+
context& ctx (rs.ctx);
// Here is the problem: if this is a dry-run, then we will keep showing
@@ -778,7 +1021,10 @@ namespace build2
// with uninstall since the directories won't be empty (because we don't
// actually uninstall any files).
//
- if (ctx.dry_run)
+ // Note that this also means we won't have the directory entries in the
+ // manifest created with dry-run. Probably not a big deal.
+ //
+ if (ctx.dry_run || !filter_entry (rs, d, path (), entry_type::directory))
return;
dir_path chd (chroot_path (rs, d));
@@ -805,13 +1051,13 @@ namespace build2
dir_path pd (d.directory ());
if (pd != base.dir)
- install_d (rs, base, pd, verbosity);
+ install_d (rs, base, pd, t, verbosity);
}
cstrings args;
string reld (
- cast<string> (ctx.global_scope["build.host.class"]) == "windows"
+ ctx.build_host->class_ == "windows"
? msys_path (chd)
: relative (chd).string ());
@@ -836,10 +1082,14 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "install " << chd;
+ print_diag ("install -d", chd); // See also `install -l` below.
}
- run (pp, args);
+ run (ctx,
+ pp, args,
+ verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
+
+ context_data::manifest_install_d (ctx, t, d, *base.dir_mode);
}
void file_rule::
@@ -850,14 +1100,21 @@ namespace build2
const path& f,
uint16_t verbosity)
{
+ assert (name.empty () || name.simple ());
+
context& ctx (rs.ctx);
+ const path& leaf (name.empty () ? f.leaf () : name);
+
+ if (!filter_entry (rs, base.dir, leaf, entry_type::regular))
+ return;
+
path relf (relative (f));
dir_path chd (chroot_path (rs, base.dir));
string reld (
- cast<string> (ctx.global_scope["build.host.class"]) == "windows"
+ ctx.build_host->class_ == "windows"
? msys_path (chd)
: relative (chd).string ());
@@ -890,23 +1147,47 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "install " << t;
+ {
+ if (name.empty ())
+ print_diag ("install", t, chd);
+ else
+ print_diag ("install", t, chd / name);
+ }
}
if (!ctx.dry_run)
- run (pp, args);
+ run (ctx,
+ pp, args,
+ verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
+
+ context_data::manifest_install_f (ctx, t, base.dir, leaf, *base.mode);
}
void file_rule::
install_l (const scope& rs,
const install_dir& base,
- const path& target,
const path& link,
+ const file& target,
+ const path& link_target,
uint16_t verbosity)
{
+ assert (link.simple () && !link.empty ());
+
context& ctx (rs.ctx);
- path rell (relative (chroot_path (rs, base.dir)));
+ if (!filter_entry (rs, base.dir, link, entry_type::symlink))
+ return;
+
+ if (link_target.absolute () &&
+ cast_false<bool> (rs["install.relocatable"]))
+ {
+ fail << "absolute symlink target " << link_target.string ()
+ << " in relocatable installation";
+ }
+
+ dir_path chd (chroot_path (rs, base.dir));
+
+ path rell (relative (chd));
rell /= link;
// We can create a symlink directly without calling ln. This, however,
@@ -920,7 +1201,7 @@ namespace build2
base.sudo != nullptr ? base.sudo->c_str () : nullptr,
"ln",
"-sf",
- target.string ().c_str (),
+ link_target.string ().c_str (),
rell.string ().c_str (),
nullptr};
@@ -933,11 +1214,19 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "install " << rell << " -> " << target;
+ {
+ // Without a flag it's unclear (unlike with ln) that we are creating
+ // a link. FreeBSD install(1) has the -l flag with the appropriate
+ // semantics. For consistency, we also pass -d above.
+ //
+ print_diag ("install -l", link_target, chd / link);
+ }
}
if (!ctx.dry_run)
- run (pp, args);
+ run (ctx,
+ pp, args,
+ verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
#else
// The -f part.
//
@@ -949,15 +1238,15 @@ namespace build2
if (verb >= verbosity)
{
if (verb >= 2)
- text << "ln -sf " << target.string () << ' ' << rell.string ();
+ text << "ln -sf " << link_target.string () << ' ' << rell.string ();
else if (verb)
- text << "install " << rell << " -> " << target;
+ print_diag ("install -l", link_target, chd / link);
}
if (!ctx.dry_run)
try
{
- mkanylink (target, rell, true /* copy */);
+ mkanylink (link_target, rell, true /* copy */);
}
catch (const pair<entry_type, system_error>& e)
{
@@ -969,13 +1258,17 @@ namespace build2
fail << "unable to make " << w << ' ' << rell << ": " << e.second;
}
#endif
+
+ context_data::manifest_install_l (ctx,
+ target,
+ link_target,
+ base.dir,
+ link);
}
target_state file_rule::
perform_install (action a, const target& xt) const
{
- context& ctx (xt.ctx);
-
const file& t (xt.as<file> ());
const path& tp (t.path ());
@@ -994,6 +1287,10 @@ namespace build2
bool n (!p.to_directory ());
dir_path d (n ? p.directory () : path_cast<dir_path> (p));
+ if (n && d.empty ())
+ fail << "relative installation file path '" << p
+ << "' has no directory component";
+
// Resolve target directory.
//
install_dirs ids (resolve (t, d));
@@ -1015,7 +1312,7 @@ namespace build2
// sudo, etc).
//
for (auto i (ids.begin ()), j (i); i != ids.end (); j = i++)
- install_d (rs, *j, i->dir, verbosity); // install -d
+ install_d (rs, *j, i->dir, t, verbosity); // install -d
install_dir& id (ids.back ());
@@ -1049,6 +1346,8 @@ namespace build2
//
target_state r (straight_execute_prerequisites (a, t));
+ bool fr (filter (a, t, t));
+
// Then installable ad hoc group members, if any.
//
for (const target* m (t.adhoc_member);
@@ -1059,10 +1358,13 @@ namespace build2
{
if (!mf->path ().empty () && mf->mtime () != timestamp_nonexistent)
{
- if (const path* p = lookup_install<path> (*mf, "install"))
+ if (filter (a, t, *mf))
{
- install_target (*mf, *p, tp.empty () ? 1 : 2);
- r |= target_state::changed;
+ if (const path* p = lookup_install<path> (*mf, "install"))
+ {
+ install_target (*mf, *p, !fr || tp.empty () ? 1 : 2);
+ r |= target_state::changed;
+ }
}
}
}
@@ -1071,9 +1373,9 @@ namespace build2
// Finally install the target itself (since we got here we know the
// install variable is there).
//
- if (!tp.empty ())
+ if (fr && !tp.empty ())
{
- install_target (t, cast<path> (t[var_install (ctx)]), 1);
+ install_target (t, cast<path> (t[var_install (rs)]), 1);
r |= target_state::changed;
}
@@ -1086,9 +1388,13 @@ namespace build2
const dir_path& d,
uint16_t verbosity)
{
+ assert (d.absolute ());
+
+ context& ctx (rs.ctx);
+
// See install_d() for the rationale.
//
- if (rs.ctx.dry_run)
+ if (ctx.dry_run || !filter_entry (rs, d, path (), entry_type::directory))
return false;
dir_path chd (chroot_path (rs, d));
@@ -1135,7 +1441,7 @@ namespace build2
if (verb >= 2)
text << "rmdir " << reld;
else if (verb)
- text << "uninstall " << reld;
+ print_diag ("uninstall -d", chd);
}
try
@@ -1165,11 +1471,19 @@ namespace build2
if (verb >= 2)
print_process (args);
else if (verb)
- text << "uninstall " << reld;
+ print_diag ("uninstall -d", chd);
}
- process pr (run_start (pp, args));
- r = run_finish_code (args, pr);
+ process pr (run_start (pp, args,
+ 0 /* stdin */,
+ 1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */));
+ diag_buffer dbuf (ctx, args[0], pr);
+ dbuf.read ();
+ r = run_finish_code (
+ dbuf,
+ args, pr,
+ verb >= verbosity ? 1 : verb_never /* verbosity */);
}
if (!r)
@@ -1193,42 +1507,16 @@ namespace build2
return r;
}
- bool file_rule::
- uninstall_f (const scope& rs,
- const install_dir& base,
- const file* t,
- const path& name,
- uint16_t verbosity)
+ static void
+ uninstall_f_impl (const scope& rs,
+ const install_dir& base,
+ const path& f,
+ uint16_t verbosity)
{
context& ctx (rs.ctx);
- assert (t != nullptr || !name.empty ());
- path f (chroot_path (rs, base.dir) /
- (name.empty () ? t->path ().leaf () : name));
-
- try
- {
- // Note: don't follow symlinks so if the target is a dangling symlinks
- // we will proceed to removing it.
- //
- if (!file_exists (f, false)) // May throw (e.g., EACCES).
- return false;
- }
- catch (const system_error& e)
- {
- fail << "invalid installation path " << f << ": " << e;
- }
-
path relf (relative (f));
- if (verb >= verbosity && verb == 1)
- {
- if (t != nullptr)
- text << "uninstall " << *t;
- else
- text << "uninstall " << relf;
- }
-
// The same story as with uninstall -d (on Windows rm is also from
// MSYS2/Cygwin).
//
@@ -1264,21 +1552,113 @@ namespace build2
process_path pp (run_search (args[0]));
- if (verb >= verbosity && verb >= 2)
- print_process (args);
+ if (verb >= verbosity)
+ {
+ if (verb >= 2)
+ print_process (args);
+ }
if (!ctx.dry_run)
- run (pp, args);
+ run (ctx,
+ pp, args,
+ verb >= verbosity ? 1 : verb_never /* finish_verbosity */);
+ }
+ }
+
+ bool file_rule::
+ uninstall_f (const scope& rs,
+ const install_dir& base,
+ const file* t,
+ const path& name,
+ uint16_t verbosity)
+ {
+ assert (name.empty () ? t != nullptr : name.simple ());
+
+ const path& leaf (name.empty () ? t->path ().leaf () : name);
+
+ if (!filter_entry (rs, base.dir, leaf, entry_type::regular))
+ return false;
+
+ dir_path chd (chroot_path (rs, base.dir));
+ path f (chd / leaf);
+
+ try
+ {
+ // Note: don't follow symlinks so if the target is a dangling symlinks
+ // we will proceed to removing it.
+ //
+ if (!file_exists (f, false)) // May throw (e.g., EACCES).
+ return false;
+ }
+ catch (const system_error& e)
+ {
+ fail << "invalid installation path " << f << ": " << e;
+ }
+
+ if (verb >= verbosity && verb == 1)
+ {
+ if (t != nullptr)
+ {
+ if (name.empty ())
+ print_diag ("uninstall", *t, chd, "<-");
+ else
+ print_diag ("uninstall", *t, f, "<-");
+ }
+ else
+ print_diag ("uninstall", f);
+ }
+
+ uninstall_f_impl (rs, base, f, verbosity);
+ return true;
+ }
+
+ bool file_rule::
+ uninstall_l (const scope& rs,
+ const install_dir& base,
+ const path& link,
+ const path& /*link_target*/,
+ uint16_t verbosity)
+ {
+ assert (link.simple () && !link.empty ());
+
+ if (!filter_entry (rs, base.dir, link, entry_type::symlink))
+ return false;
+
+ dir_path chd (chroot_path (rs, base.dir));
+ path f (chd / link);
+
+ try
+ {
+ // Note: don't follow symlinks so if the target is a dangling symlinks
+ // we will proceed to removing it.
+ //
+ if (!file_exists (f, false)) // May throw (e.g., EACCES).
+ return false;
+ }
+ catch (const system_error& e)
+ {
+ fail << "invalid installation path " << f << ": " << e;
+ }
+
+ if (verb >= verbosity && verb == 1)
+ {
+ // It's dubious showing the link target path adds anything useful
+ // here.
+ //
+#if 0
+ print_diag ("uninstall -l", target, f, "<-");
+#else
+ print_diag ("uninstall -l", f);
+#endif
}
+ uninstall_f_impl (rs, base, f, verbosity);
return true;
}
target_state file_rule::
perform_uninstall (action a, const target& xt) const
{
- context& ctx (xt.ctx);
-
const file& t (xt.as<file> ());
const path& tp (t.path ());
@@ -1295,6 +1675,10 @@ namespace build2
bool n (!p.to_directory ());
dir_path d (n ? p.directory () : path_cast<dir_path> (p));
+ if (n && d.empty ())
+ fail << "relative installation file path '" << p
+ << "' has no directory component";
+
// Resolve target directory.
//
install_dirs ids (resolve (t, d));
@@ -1341,8 +1725,10 @@ namespace build2
//
target_state r (target_state::unchanged);
- if (!tp.empty ())
- r |= uninstall_target (t, cast<path> (t[var_install (ctx)]), 1);
+ bool fr (filter (a, t, t));
+
+ if (fr && !tp.empty ())
+ r |= uninstall_target (t, cast<path> (t[var_install (rs)]), 1);
// Then installable ad hoc group members, if any. To be anally precise,
// we would have to do it in reverse, but that's not easy (it's a
@@ -1356,12 +1742,15 @@ namespace build2
{
if (!mf->path ().empty () && mf->mtime () != timestamp_nonexistent)
{
- if (const path* p = lookup_install<path> (*m, "install"))
+ if (filter (a, t, *mf))
{
- r |= uninstall_target (
- *mf,
- *p,
- tp.empty () || r != target_state::changed ? 1 : 2);
+ if (const path* p = lookup_install<path> (*m, "install"))
+ {
+ r |= uninstall_target (
+ *mf,
+ *p,
+ !fr || tp.empty () || r != target_state::changed ? 1 : 2);
+ }
}
}
}
@@ -1373,5 +1762,40 @@ namespace build2
return r;
}
+
+ // fsdir_rule
+ //
+ const fsdir_rule fsdir_rule::instance;
+
+ bool fsdir_rule::
+ match (action, target&) const
+ {
+ // We always match.
+ //
+ // Note that we are called both as the outer part during the update-for-
+ // un/install pre-operation and as the inner part during the un/install
+ // operation itself.
+ //
+ return true;
+ }
+
+ recipe fsdir_rule::
+ apply (action a, target& t) const
+ {
+ // If this is outer part of the update-for-un/install, delegate to the
+ // default fsdir rule. Otherwise, this is a noop (we don't install
+ // fsdir{}).
+ //
+ // For now we also assume we don't need to do anything for prerequisites
+ // (the only sensible prerequisite of fsdir{} is another fsdir{}).
+ //
+ if (a.operation () == update_id)
+ {
+ match_inner (a, t);
+ return inner_recipe;
+ }
+ else
+ return noop_recipe;
+ }
}
}
diff --git a/libbuild2/install/rule.hxx b/libbuild2/install/rule.hxx
index ad9d6e7..b023af5 100644
--- a/libbuild2/install/rule.hxx
+++ b/libbuild2/install/rule.hxx
@@ -25,42 +25,60 @@ namespace build2
match (action, target&) const override;
// Return NULL if this prerequisite should be ignored and pointer to its
- // target otherwise.
+ // target otherwise. In the latter case, return the match options that
+ // should be used for this prerequisite (use match_extra::all_options
+ // and not 0 if no match options are needed).
//
// The default implementation ignores prerequsites that are outside of
// the installation scope (see install_scope() for details).
//
+ // The default implementation always returns match_extra::all_options.
+ // The match_extra argument is not used by the default implementation.
+ //
// The prerequisite is passed as an iterator allowing the filter to
// "see" inside groups.
//
using prerequisite_iterator =
prerequisite_members_range<group_prerequisites>::iterator;
- virtual const target*
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&, prerequisite_iterator&) const;
+ action, const target&, prerequisite_iterator&,
+ match_extra&) const;
- virtual const target*
- filter (const scope*, action, const target&, const prerequisite&) const;
+ virtual pair<const target*, uint64_t>
+ filter (const scope*,
+ action, const target&, const prerequisite&,
+ match_extra&) const;
+ // Note: rule::apply() override (with match_extra).
+ //
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
+
+ // Implementation of apply().
+ //
+ // If the implementation may call reapply_impl(), then the reapply
+ // argument to apply_impl() must be true. Note that in this case, the
+ // *_impl() functions use the prerequisite_target::data member for own
+ // housekeeping.
+ //
+ recipe
+ apply_impl (action, target&, match_extra&, bool reapply = false) const;
+
+ // Implementation of reapply() that re-tries prerequisites that have
+ // been filtered out during the reapply() call. Note that currently not
+ // supported for update, only for install/uninstall.
+ //
+ void
+ reapply_impl (action, target&, match_extra&) const;
alias_rule () {}
static const alias_rule instance;
- };
-
- class fsdir_rule: public simple_rule
- {
- public:
- virtual bool
- match (action, target&) const override;
+ private:
virtual recipe
- apply (action, target&) const override;
-
- fsdir_rule () {}
- static const fsdir_rule instance;
+ apply (action, target&) const override; // Dummy simple_rule override.
};
// In addition to the alias rule's semantics, this rule sees through to
@@ -80,28 +98,26 @@ namespace build2
virtual bool
match (action, target&) const override;
- // Return NULL if this group member should be ignored and pointer to its
- // target otherwise.
+ // Return false if this group member should be ignored and true
+ // otherwise. Note that this filter is called during apply().
//
// The default implementation accepts all members.
//
- virtual const target*
+ virtual bool
filter (action, const target&, const target& group_member) const;
// Return NULL if this prerequisite should be ignored and pointer to its
- // target otherwise.
- //
- // The same semantics as in file_rule below.
+ // target otherwise. The same semantics as in file_rule below.
//
- using alias_rule::filter; // "Unhide" to make Clang happy.
-
- virtual const target*
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&,
- const prerequisite&) const override;
+ action, const target&, const prerequisite&,
+ match_extra&) const override;
+
+ using alias_rule::filter; // "Unhide" to make Clang happy.
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
group_rule (bool sto): see_through_only (sto) {}
static const group_rule instance;
@@ -117,8 +133,21 @@ namespace build2
virtual bool
match (action, target&) const override;
+ // Return false if this ad hoc group member should be ignored and true
+ // otherwise. Note that this filter is called during execute and only
+ // for install/uninstall (and not update). For generality, it is also
+ // (first) called on the target itself (can be detected by comparing
+ // the second and third arguments).
+ //
+ // The default implementation accepts all members.
+ //
+ virtual bool
+ filter (action, const target&, const target& adhoc_group_member) const;
+
// Return NULL if this prerequisite should be ignored and pointer to its
- // target otherwise.
+ // target otherwise. In the latter case, return the match options that
+ // should be used for this prerequisite (use match_extra::all_options
+ // and not 0 if no match options are needed).
//
// The default implementation ignores prerequsites that are outside of
// the installation scope (see install_scope() for details). It also
@@ -130,27 +159,47 @@ namespace build2
//
// exe{foo}: exe{bar}: install = true # foo runs bar
//
+ // The default implementation always returns match_extra::all_options.
+ // The match_extra argument is not used by the default implementation.
+ //
// The prerequisite is passed as an iterator allowing the filter to
// "see" inside groups.
//
using prerequisite_iterator =
prerequisite_members_range<group_prerequisites>::iterator;
- virtual const target*
+ virtual pair<const target*, uint64_t>
filter (const scope*,
- action, const target&, prerequisite_iterator&) const;
+ action, const target&, prerequisite_iterator&,
+ match_extra&) const;
- virtual const target*
- filter (const scope*, action, const target&, const prerequisite&) const;
+ virtual pair<const target*, uint64_t>
+ filter (const scope*,
+ action, const target&, const prerequisite&,
+ match_extra&) const;
+ // Note: rule::apply() override (with match_extra).
+ //
virtual recipe
- apply (action, target&) const override;
+ apply (action, target&, match_extra&) const override;
- // Implementation of apply() that returns empty_recipe if the target is
- // not installable.
+ // Implementation of apply() that returns empty_recipe (i.e., NULL) if
+ // the target is not installable.
+ //
+ // If the implementation may call reapply_impl(), then the reapply
+ // argument to apply_impl() must be true. Note that in this case, the
+ // *_impl() functions use the prerequisite_target::data member for own
+ // housekeeping.
//
recipe
- apply_impl (action, target&) const;
+ apply_impl (action, target&, match_extra&, bool reapply = false) const;
+
+ // Implementation of reapply() that re-tries prerequisites that have
+ // been filtered out during the reapply() call. Note that currently not
+ // supported for update, only for install/uninstall.
+ //
+ void
+ reapply_impl (action, target&, match_extra&) const;
static target_state
perform_update (action, const target&);
@@ -188,10 +237,16 @@ namespace build2
//
// install -d <dir>
//
+ // Note: <dir> is expected to be absolute.
+ //
+ // Note that the target argument only specifies which target caused
+ // this directory to be created.
+ //
static void
install_d (const scope& rs,
const install_dir& base,
const dir_path& dir,
+ const file& target,
uint16_t verbosity = 1);
// Install a file:
@@ -199,6 +254,8 @@ namespace build2
// install <file> <base>/ # if <name> is empty
// install <file> <base>/<name> # if <name> is not empty
//
+ // Note that <name> should be a simple path.
+ //
static void
install_f (const scope& rs,
const install_dir& base,
@@ -209,13 +266,25 @@ namespace build2
// Install (make) a symlink:
//
- // ln -s <target> <base>/<link>
+ // install -l <link_target> <base>/<link>
+ //
+ // Which is essentially:
+ //
+ // ln -s <link_target> <base>/<link>
+ //
+ // Note that <link> should be a simple path. Note that <link_target>
+ // must not be absolute if relocatable installation is requested
+ // (config.install.relocatable).
+ //
+ // Note that the target argument only specifies which target this
+ // symlink "belongs" to.
//
static void
install_l (const scope& rs,
const install_dir& base,
- const path& target,
const path& link,
+ const file& target,
+ const path& link_target,
uint16_t verbosity = 1);
// Uninstall (remove) a file or symlink:
@@ -233,13 +302,26 @@ namespace build2
const path& name,
uint16_t verbosity = 1);
+ // Uninstall (remove) a symlink.
+ //
+ // This is essentially unistall_f() but with better low-verbosity
+ // diagnostics.
+ //
+ static bool
+ uninstall_l (const scope& rs,
+ const install_dir& base,
+ const path& link,
+ const path& link_target,
+ uint16_t verbosity = 1);
+
+
// Uninstall (remove) an empty directory.
//
// uninstall -d <dir>
//
- // We try to remove all the directories between base and dir but not base
- // itself unless base == dir. Return false if nothing has been removed
- // (i.e., the directories do not exist or are not empty).
+ // We try to remove all the directories between base and dir but not
+ // base itself unless base == dir. Return false if nothing has been
+ // removed (i.e., the directories do not exist or are not empty).
//
static bool
uninstall_d (const scope& rs,
@@ -255,6 +337,23 @@ namespace build2
static const file_rule instance;
file_rule () {}
+
+ private:
+ virtual recipe
+ apply (action, target&) const override; // Dummy simple_rule override.
+ };
+
+ class fsdir_rule: public simple_rule
+ {
+ public:
+ virtual bool
+ match (action, target&) const override;
+
+ virtual recipe
+ apply (action, target&) const override;
+
+ fsdir_rule () {}
+ static const fsdir_rule instance;
};
}
}
diff --git a/libbuild2/install/utility.cxx b/libbuild2/install/utility.cxx
index 17b1365..43d97fb 100644
--- a/libbuild2/install/utility.cxx
+++ b/libbuild2/install/utility.cxx
@@ -3,6 +3,9 @@
#include <libbuild2/install/utility.hxx>
+#include <libbuild2/variable.hxx>
+#include <libbuild2/diagnostics.hxx>
+
namespace build2
{
namespace install
@@ -12,6 +15,8 @@ namespace build2
{
context& ctx (t.ctx);
+ // Note: go straight for the public variable pool.
+ //
const variable& var (*ctx.var_pool.find ("config.install.scope"));
if (const string* s = cast_null<string> (ctx.global_scope[var]))
@@ -30,5 +35,261 @@ namespace build2
return nullptr;
}
+
+ bool
+ filter_entry (const scope& rs,
+ const dir_path& base,
+ const path& leaf,
+ entry_type type)
+ {
+ assert (type != entry_type::unknown &&
+ (type == entry_type::directory) == leaf.empty ());
+
+ const filters* fs (cast_null<filters> (rs["install.filter"]));
+
+ if (fs == nullptr || fs->empty ())
+ return true;
+
+ tracer trace ("install::filter");
+
+ // Parse, resolve, and apply each filter in order.
+ //
+ // If redoing all this work for every entry proves too slow, we can
+ // consider some form of caching (e.g., on the per-project basis).
+ //
+ auto i (fs->begin ());
+
+ bool negate (false);
+ if (i->first == "!")
+ {
+ negate = true;
+ ++i;
+ }
+
+ size_t limit (0); // See below.
+
+ for (auto e (fs->end ()); i != e; ++i)
+ {
+ const pair<string, optional<string>>& kv (*i);
+
+ path k;
+ try
+ {
+ k = path (kv.first);
+
+ if (k.absolute ())
+ k.normalize ();
+ }
+ catch (const invalid_path&)
+ {
+ fail << "invalid path '" << kv.first << "' in config.install.filter "
+ << "value";
+ }
+
+ bool v;
+ {
+ const string& s (kv.second ? *kv.second : string ());
+
+ size_t p (s.find (','));
+
+ if (s.compare (0, p, "true") == 0)
+ v = true;
+ else if (s.compare (0, p, "false") == 0)
+ v = false;
+ else
+ fail << "expected true or false instead of '" << string (s, 0, p)
+ << "' in config.install.filter value" << endf;
+
+ if (p != string::npos)
+ {
+ if (s.compare (p + 1, string::npos, "symlink") == 0)
+ {
+ if (type != entry_type::symlink)
+ continue;
+ }
+ else
+ fail << "unknown modifier '" << string (s, p + 1) << "' in "
+ << "config.install.filter value";
+ }
+ }
+
+ // @@ TODO (see below for all the corner cases). Note that in a sense
+ // we already have the match file in any subdirectory support via
+ // simple patterns so perhaps this is not worth the trouble. Or we
+ // could support some limited form (e.g., `**` should be in the
+ // last component). But it may still be tricky to determine if
+ // it is a sub-filter.
+ //
+ if (path_pattern_recursive (k))
+ fail << "recursive wildcard pattern '" << kv.first << "' in "
+ << "config.install.filter value";
+
+ if (k.simple () && !k.to_directory ())
+ {
+ // Simple name/pattern matched against the leaf.
+ //
+ // @@ What if it is `**`?
+ //
+ if (path_pattern (k))
+ {
+ if (!path_match (leaf, k))
+ continue;
+ }
+ else
+ {
+ if (k != leaf)
+ continue;
+ }
+ }
+ else
+ {
+ // Split into directory and leaf.
+ //
+ // @@ What if leaf is `**`?
+ //
+ dir_path d;
+ if (k.to_directory ())
+ {
+ d = path_cast<dir_path> (move (k));
+ k = path (); // No leaf.
+ }
+ else
+ {
+ d = k.directory ();
+ k.make_leaf ();
+ }
+
+ // Resolve relative directory.
+ //
+ // Note that this resolution is potentially project-specific (that
+ // is, different projects may have different install.* locaitons).
+ //
+ // Note that if the first component is/contains a wildcard (e.g.,
+ // `*/`), then the resulution will fail, which feels correct (what
+ // does */ mean?).
+ //
+ if (d.relative ())
+ {
+ // @@ Strictly speaking, this should be base, not root scope.
+ //
+ d = resolve_dir (rs, move (d));
+ }
+
+ // Return the number of path components in the path.
+ //
+ auto path_comp = [] (const path& p)
+ {
+ size_t n (0);
+ for (auto i (p.begin ()); i != p.end (); ++i)
+ ++n;
+ return n;
+ };
+
+ // We need the sub() semantics but which uses pattern match instead
+ // of equality for the prefix. Looks like chopping off the path and
+ // calling path_match() on that is the best we can do.
+ //
+ // @@ Assumes no `**` components.
+ //
+ auto path_sub = [&path_comp] (const dir_path& ent,
+ const dir_path& pat,
+ size_t n = 0)
+ {
+ if (n == 0)
+ n = path_comp (pat);
+
+ dir_path p;
+ for (auto i (ent.begin ()); n != 0 && i != ent.end (); --n, ++i)
+ p.combine (*i, i.separator ());
+
+ return path_match (p, pat);
+ };
+
+ // The following checks should continue on no match and fall through
+ // to return.
+ //
+ if (k.empty ()) // Directory.
+ {
+ // Directories have special semantics.
+ //
+ // Consider this sequence of filters:
+ //
+ // include/x86_64-linux-gnu/@true
+ // include/x86_64-linux-gnu/details/@false
+ // include/@false
+ //
+ // It seems the semantics we want is that only subcomponent
+ // filters should apply. Maybe remember the latest matched
+ // directory as a current limit? But perhaps we don't need to
+ // remember the directory itself but the number of path
+ // components?
+ //
+ // I guess for patterns we will use the actual matched directory,
+ // not the pattern, to calculate the limit? @@ Because we
+ // currently don't support `**`, we for now can count components
+ // in the pattern.
+
+ // Check if this is a sub-filter.
+ //
+ size_t n (path_comp (d));
+ if (n <= limit)
+ continue;
+
+ if (path_pattern (d))
+ {
+ if (!path_sub (base, d, n))
+ continue;
+ }
+ else
+ {
+ if (!base.sub (d))
+ continue;
+ }
+
+ if (v)
+ {
+ limit = n;
+ continue; // Continue looking for sub-filters.
+ }
+ }
+ else
+ {
+ if (path_pattern (d))
+ {
+ if (!path_sub (base, d))
+ continue;
+ }
+ else
+ {
+ if (!base.sub (d))
+ continue;
+ }
+
+ if (path_pattern (k))
+ {
+ // @@ Does not handle `**`.
+ //
+ if (!path_match (leaf, k))
+ continue;
+ }
+ else
+ {
+ if (k != leaf)
+ continue;
+ }
+ }
+ }
+
+ if (negate)
+ v = !v;
+
+ l4 ([&]{trace << (base / leaf)
+ << (v ? " included by " : " excluded by ")
+ << kv.first << '@' << *kv.second;});
+ return v;
+ }
+
+ return !negate;
+ }
}
}
diff --git a/libbuild2/install/utility.hxx b/libbuild2/install/utility.hxx
index 52b9a54..fc40ebe 100644
--- a/libbuild2/install/utility.hxx
+++ b/libbuild2/install/utility.hxx
@@ -9,6 +9,7 @@
#include <libbuild2/scope.hxx>
#include <libbuild2/target.hxx>
+#include <libbuild2/filesystem.hxx> // entry_type
#include <libbuild2/export.hxx>
@@ -43,7 +44,7 @@ namespace build2
{
auto r (
s.target_vars[tt]["*"].insert (
- *s.var_pool ().find ("install.mode")));
+ *s.ctx.var_pool.find ("install.mode")));
if (r.second) // Already set by the user?
r.first = move (m);
@@ -69,22 +70,56 @@ namespace build2
install_scope (const target&);
// Resolve relative installation directory path (e.g., include/libfoo) to
- // its absolute directory path (e.g., /usr/include/libfoo). If the
- // resolution encountered an unknown directory, issue diagnostics and fail
- // unless fail_unknown is false, in which case return empty directory.
+ // its absolute and normalized directory path (e.g., /usr/include/libfoo).
+ // If the resolution encountered an unknown directory, issue diagnostics
+ // and fail unless fail_unknown is false, in which case return empty
+ // directory.
+ //
+ // For rel_base semantics, see the $install.resolve() documentation. Note
+ // that fail_unknown does not apply to the rel_base resolution.
//
// Note: implemented in rule.cxx.
//
LIBBUILD2_SYMEXPORT dir_path
- resolve_dir (const target&, dir_path, bool fail_unknown = true);
+ resolve_dir (const target&,
+ dir_path,
+ dir_path rel_base = {},
+ bool fail_unknown = true);
LIBBUILD2_SYMEXPORT dir_path
- resolve_dir (const scope&, dir_path, bool fail_unknown = true);
+ resolve_dir (const scope&,
+ dir_path,
+ dir_path rel_base = {},
+ bool fail_unknown = true);
// Resolve file installation path returning empty path if not installable.
//
LIBBUILD2_SYMEXPORT path
resolve_file (const file&); // rule.cxx
+
+ // Given an abolute path return its chroot'ed version, if any, accoring to
+ // install.chroot.
+ //
+ template <typename P>
+ inline P
+ chroot_path (const scope& rs, const P& p)
+ {
+ assert (p.absolute ());
+ const dir_path* d (cast_null<dir_path> (rs["install.chroot"]));
+ return d != nullptr ? *d / p.leaf (p.root_directory ()) : p;
+ }
+
+ // Installation filtering (config.install.filter).
+ //
+ // If entry type is a directory, then leaf must be empty.
+ //
+ using filters = vector<pair<string, optional<string>>>;
+
+ LIBBUILD2_SYMEXPORT bool
+ filter_entry (const scope& rs,
+ const dir_path& base,
+ const path& leaf,
+ entry_type);
}
}
diff --git a/libbuild2/json.cxx b/libbuild2/json.cxx
new file mode 100644
index 0000000..4ed1631
--- /dev/null
+++ b/libbuild2/json.cxx
@@ -0,0 +1,904 @@
+// file : libbuild2/json.cxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#include <libbuild2/json.hxx>
+
+#include <limits>
+
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/parser.hxx>
+# include <libbutl/json/serializer.hxx>
+#endif
+
+namespace build2
+{
+ // json_event
+ //
+#ifndef BUILD2_BOOTSTRAP
+ const char*
+ to_string (json_event e)
+ {
+ switch (e)
+ {
+ case json_event::begin_object: return "beginning of object";
+ case json_event::end_object: return "end of object";
+ case json_event::begin_array: return "beginning of array";
+ case json_event::end_array: return "end of array";
+ case json_event::name: return "member name";
+ case json_event::string: return "string value";
+ case json_event::number: return "numeric value";
+ case json_event::boolean: return "boolean value";
+ case json_event::null: return "null value";
+ }
+
+ return "";
+ }
+#endif
+
+ // json_type
+ //
+ const char*
+ to_string (json_type t, bool dn) noexcept
+ {
+ using type = json_type;
+
+ switch (t)
+ {
+ case type::null: return "null";
+ case type::boolean: return "boolean";
+ case type::signed_number: return dn ? "signed number" : "number";
+ case type::unsigned_number: return dn ? "unsigned number" : "number";
+ case type::hexadecimal_number: return dn ? "hexadecimal number" : "number";
+ case type::string: return "string";
+ case type::array: return "array";
+ case type::object: return "object";
+ }
+ return "";
+ }
+
+ // json_value
+ //
+ const json_value null_json_value (json_type::null);
+
+ [[noreturn]] void
+ json_as_throw (json_type t, json_type e)
+ {
+ string m;
+ m = "expected ";
+ m += to_string (e, true);
+ m += " instead of ";
+ m += to_string (t, true);
+ throw invalid_argument (move (m));
+ }
+
+ [[noreturn]] static void
+ at_throw (json_type t, json_type e, bool index)
+ {
+ string m;
+
+ if (t != e && t != json_type::null)
+ {
+ m = "expected ";
+ m += to_string (e, true);
+ m += " instead of ";
+ m += to_string (t, true);
+ throw invalid_argument (move (m));
+ }
+ else
+ {
+ m = index ? "index" : "name";
+ m += " out of range in ";
+ m += to_string (e, true);
+ throw std::out_of_range (move (m));
+ }
+ }
+
+ const json_value& json_value::
+ at (size_t index) const
+ {
+ if (type == json_type::array)
+ {
+ if (index < array.size ())
+ return array[index];
+ }
+
+ at_throw (type, json_type::array, true);
+ }
+
+ json_value& json_value::
+ at (size_t index)
+ {
+ if (type == json_type::array)
+ {
+ if (index < array.size ())
+ return array[index];
+ }
+
+ at_throw (type, json_type::array, true);
+ }
+
+#if 0
+ const json_value& json_value::
+ operator[] (size_t index) const
+ {
+ if (type == json_type::null)
+ return null_json_value;
+
+ if (type == json_type::array)
+ return index < array.size () ? array[index] : null_json_value;
+
+ at_throw (type, json_type::array, true);
+ }
+
+ json_value& json_value::
+ operator[] (size_t index)
+ {
+ if (type == json_type::null)
+ {
+ new (&array) array_type ();
+ type = json_type::array;
+ }
+
+ if (type == json_type::array)
+ {
+ size_t n (array.size ());
+
+ if (index < n)
+ return array[index];
+
+ // If there are missing elements in between, fill them with nulls.
+ //
+ if (index != n)
+ array.resize (index, json_value ());
+
+ array.push_back (json_value ());
+ return array.back ();
+ }
+
+ at_throw (type, json_type::array, true);
+ }
+#endif
+
+ const json_value& json_value::
+ at (const char* name) const
+ {
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+ if (i != object.end ())
+ return i->value;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+ json_value& json_value::
+ at (const char* name)
+ {
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+ if (i != object.end ())
+ return i->value;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+ const json_value* json_value::
+ find (const char* name) const
+ {
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+ return i != object.end () ? &i->value : nullptr;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+ json_value* json_value::
+ find (const char* name)
+ {
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+ return i != object.end () ? &i->value : nullptr;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+#if 0
+ const json_value& json_value::
+ operator[] (const char* name) const
+ {
+ if (type == json_type::null)
+ return null_json_value;
+
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+
+ return i != object.end () ? i->value : null_json_value;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+
+ json_value& json_value::
+ operator[] (const char* name)
+ {
+ if (type == json_type::null)
+ {
+ new (&object) object_type ();
+ type = json_type::object;
+ }
+
+ if (type == json_type::object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [name] (const json_member& m)
+ {
+ return m.name == name;
+ }));
+
+ if (i != object.end ())
+ return i->value;
+
+ object.push_back (json_member {name, json_value ()});
+ return object.back ().value;
+ }
+
+ at_throw (type, json_type::object, false);
+ }
+#endif
+
+ int json_value::
+ compare (const json_value& v) const noexcept
+ {
+ int r (0);
+ {
+ // Note: we need to treat unsigned and hexadecimal the same.
+ //
+ json_type t (type == json_type::hexadecimal_number
+ ? json_type::unsigned_number
+ : type);
+
+ json_type vt (v.type == json_type::hexadecimal_number
+ ? json_type::unsigned_number
+ : v.type);
+
+ if (t != vt)
+ {
+ // Handle the special signed/unsigned number case here.
+ //
+ if (t == json_type::signed_number &&
+ vt == json_type::unsigned_number)
+ {
+ if (signed_number < 0)
+ r = -1;
+ else
+ {
+ uint64_t u (static_cast<uint64_t> (signed_number));
+ r = u < v.unsigned_number ? -1 : (u > v.unsigned_number ? 1 : 0);
+ }
+ }
+ else if (t == json_type::unsigned_number &&
+ vt == json_type::signed_number)
+ {
+ if (v.signed_number < 0)
+ r = 1;
+ else
+ {
+ uint64_t u (static_cast<uint64_t> (v.signed_number));
+ r = unsigned_number < u ? -1 : (unsigned_number > u ? 1 : 0);
+ }
+ }
+ else
+ r = (static_cast<uint8_t> (t) < static_cast<uint8_t> (vt) ? -1 : 1);
+ }
+ }
+
+ if (r == 0)
+ {
+ switch (type)
+ {
+ case json_type::null:
+ {
+ r = 0;
+ break;
+ }
+ case json_type::boolean:
+ {
+ r = boolean == v.boolean ? 0 : boolean ? 1 : -1;
+ break;
+ }
+ case json_type::signed_number:
+ {
+ r = (signed_number < v.signed_number
+ ? -1
+ : (signed_number > v.signed_number ? 1 : 0));
+ break;
+ }
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ r = (unsigned_number < v.unsigned_number
+ ? -1
+ : (unsigned_number > v.unsigned_number ? 1 : 0));
+ break;
+ }
+ case json_type::string:
+ {
+ r = string.compare (v.string);
+ break;
+ }
+ case json_type::array:
+ {
+ auto i (array.begin ()), ie (array.end ());
+ auto j (v.array.begin ()), je (v.array.end ());
+
+ for (; i != ie && j != je; ++i, ++j)
+ {
+ if ((r = i->compare (*j)) != 0)
+ break;
+ }
+
+ if (r == 0)
+ r = i == ie ? (j == je ? 0 : -1) : 1; // More elements than other?
+
+ break;
+ }
+ case json_type::object:
+ {
+ // We don't expect there to be a large number of members so it makes
+ // sense to iterate in the lexicographical order without making any
+ // copies.
+ //
+ auto next = [] (object_type::const_iterator p, // == e for first
+ object_type::const_iterator b,
+ object_type::const_iterator e)
+ {
+ // We need to find an element with the "smallest" name that is
+ // greater than the previous entry.
+ //
+ auto n (e);
+
+ for (auto i (b); i != e; ++i)
+ {
+ if (p == e || i->name > p->name)
+ {
+ int r;
+ if (n == e || (r = n->name.compare (i->name)) > 0)
+ n = i;
+ else
+ assert (r != 0); // No duplicates.
+ }
+ }
+
+ return n;
+ };
+
+ auto ib (object.begin ()), ie (object.end ()), i (ie);
+ auto jb (v.object.begin ()), je (v.object.end ()), j (je);
+
+ for (;;)
+ {
+ // Note: we must call next() on both.
+ //
+ i = next (i, ib, ie);
+ j = next (j, jb, je);
+
+ if (i == ie || j == je)
+ break;
+
+ // Determine if both have this name and if not, which name comes
+ // first.
+ //
+ int n (i->name.compare (j->name));
+
+ r = (n < 0 // If i's first, then i is greater.
+ ? -1
+ : (n > 0 // If j's first, then j is greater.
+ ? 1
+ : i->value.compare (j->value))); // Both have this name.
+
+ if (r != 0)
+ break;
+ }
+
+ if (r == 0)
+ r = i == ie ? (j == je ? 0 : -1) : 1; // More members than other?
+
+ break;
+ }
+ }
+ }
+
+ return r;
+ }
+
+ static void
+ append_numbers (json_value& l, const json_value& r) noexcept
+ {
+ auto append = [&l] (uint64_t u, int64_t s, bool hex = false)
+ {
+ if (s < 0)
+ {
+ // The absolute value of a minimum signed intereger is not
+ // representable in the 2s complement integers. So handle this
+ // specially for completeness.
+ //
+ uint64_t a (
+ s != std::numeric_limits<int64_t>::min ()
+ ? static_cast<uint64_t> (-s)
+ : static_cast<uint64_t> (std::numeric_limits<int64_t>::max ()) + 1);
+
+ if (u >= a)
+ {
+ l.unsigned_number = u - a;
+ l.type = (hex
+ ? json_type::hexadecimal_number
+ : json_type::unsigned_number);
+ }
+ else
+ {
+ l.signed_number = -static_cast<int64_t> (a - u);
+ l.type = json_type::signed_number;
+ }
+ }
+ else
+ {
+ l.unsigned_number = u + static_cast<uint64_t> (s);
+ l.type = (hex
+ ? json_type::hexadecimal_number
+ : json_type::unsigned_number);
+ }
+ };
+
+ // We try to keep LHS hex if possible.
+ //
+ if (l.type == json_type::signed_number)
+ {
+ if (r.type == json_type::signed_number)
+ {
+ // Deal with non-negative signed numbers for completeness.
+ //
+ if (l.signed_number >= 0)
+ append (static_cast <uint64_t> (l.signed_number), r.signed_number);
+ else if (r.signed_number >= 0)
+ append (static_cast <uint64_t> (r.signed_number), l.signed_number);
+ else
+ l.signed_number += r.signed_number;
+ }
+ else
+ append (r.unsigned_number, l.signed_number);
+ }
+ else
+ {
+ if (r.type == json_type::signed_number)
+ append (l.unsigned_number,
+ r.signed_number,
+ l.type == json_type::hexadecimal_number);
+ else
+ l.unsigned_number += r.unsigned_number;
+ }
+ }
+
+ void json_value::
+ append (json_value&& v, bool override)
+ {
+ if (type == json_type::null)
+ {
+ *this = move (v);
+ return;
+ }
+ else if (type == json_type::array)
+ {
+ if (v.type == json_type::array)
+ {
+ if (array.empty ())
+ array = move (v.array);
+ else
+ array.insert (array.end (),
+ make_move_iterator (v.array.begin ()),
+ make_move_iterator (v.array.end ()));
+ }
+ else
+ array.push_back (move (v));
+
+ return;
+ }
+ else
+ {
+ switch (v.type)
+ {
+ case json_type::null: return;
+ case json_type::boolean:
+ {
+ if (type != json_type::boolean)
+ break;
+
+ boolean = boolean || v.boolean;
+ return;
+ }
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ if (type != json_type::signed_number &&
+ type != json_type::unsigned_number &&
+ type != json_type::hexadecimal_number)
+ break;
+
+ append_numbers (*this, v);
+ return;
+ }
+ case json_type::string:
+ {
+ if (type != json_type::string)
+ break;
+
+ string += v.string;
+ return;
+ }
+ case json_type::array: break;
+ case json_type::object:
+ {
+ if (type != json_type::object)
+ break;
+
+ if (object.empty ())
+ object = move (v.object);
+ else
+ {
+ for (json_member& m: v.object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [&m] (const json_member& o)
+ {
+ return m.name == o.name;
+ }));
+ if (i == object.end ())
+ object.push_back (move (m));
+ else if (override)
+ i->value = move (m.value);
+ }
+ }
+
+ return;
+ }
+ }
+ }
+
+ throw invalid_argument (
+ string_type ("unable to append ") + to_string (v.type) + " to " +
+ to_string (type));
+ }
+
+ void json_value::
+ prepend (json_value&& v, bool override)
+ {
+ if (type == json_type::null)
+ {
+ *this = move (v);
+ return;
+ }
+ else if (type == json_type::array)
+ {
+ if (v.type == json_type::array)
+ {
+ if (array.empty ())
+ array = move (v.array);
+ else
+ array.insert (array.begin (),
+ make_move_iterator (v.array.begin ()),
+ make_move_iterator (v.array.end ()));
+ }
+ else
+ array.insert (array.begin (), move (v));
+
+ return;
+ }
+ else
+ {
+ switch (v.type)
+ {
+ case json_type::null: return;
+ case json_type::boolean:
+ {
+ if (type != json_type::boolean)
+ break;
+
+ boolean = boolean || v.boolean;
+ return;
+ }
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ if (type != json_type::signed_number &&
+ type != json_type::unsigned_number &&
+ type != json_type::hexadecimal_number)
+ break;
+
+ append_numbers (*this, v);
+ return;
+ }
+ case json_type::string:
+ {
+ if (type != json_type::string)
+ break;
+
+ string.insert (0, v.string);
+ return;
+ }
+ case json_type::array: break;
+ case json_type::object:
+ {
+ if (type != json_type::object)
+ break;
+
+ if (object.empty ())
+ object = move (v.object);
+ else
+ {
+ for (json_member& m: v.object)
+ {
+ auto i (find_if (object.begin (), object.end (),
+ [&m] (const json_member& o)
+ {
+ return m.name == o.name;
+ }));
+ if (i == object.end ())
+ object.insert (object.begin (), move (m));
+ else if (override)
+ i->value = move (m.value);
+ }
+ }
+
+ return;
+ }
+ }
+ }
+
+ throw invalid_argument (
+ string_type ("unable to prepend ") + to_string (v.type) + " to " +
+ to_string (type));
+ }
+
+#ifndef BUILD2_BOOTSTRAP
+ json_value::
+ json_value (json_parser& p, optional<json_type> et)
+ {
+ using namespace butl::json;
+
+ // A JSON input text cannot be empty.
+ //
+ // Once we have JSON5 support we will be able to distinguish hexadecimal
+ // numbers.
+ //
+ json_type t (json_type::null);
+ switch (*p.next ())
+ {
+ case event::begin_object: t = json_type::object; break;
+ case event::begin_array: t = json_type::array; break;
+ case event::string: t = json_type::string; break;
+ case event::number: t = (p.value ()[0] == '-'
+ ? json_type::signed_number
+ : json_type::unsigned_number); break;
+ case event::boolean: t = json_type::boolean; break;
+ case event::null: t = json_type::null; break;
+ case event::name:
+ case event::end_array:
+ case event::end_object:
+ {
+ assert (false);
+ type = json_type::null;
+ return;
+ }
+ }
+
+ if (et && *et != t)
+ {
+ throw invalid_json_input (
+ p.input_name != nullptr ? p.input_name : "",
+ p.line (),
+ p.column (),
+ p.position (),
+ string_type ("expected ") + to_string (*et, true) + " instead of " +
+ to_string (t, true));
+ }
+
+ switch (t)
+ {
+ case json_type::object:
+ {
+ object_type o; // For exception safety.
+ while (*p.next () != event::end_object)
+ {
+ string_type n (p.name ());
+
+ // Check for duplicates. For now we fail but in the future we may
+ // provide a mode (via a flag) to override instead.
+ //
+ if (find_if (o.begin (), o.end (),
+ [&n] (const json_member& m)
+ {
+ return m.name == n;
+ }) != o.end ())
+ {
+ throw invalid_json_input (
+ p.input_name != nullptr ? p.input_name : "",
+ p.line (),
+ p.column (),
+ p.position (),
+ "duplicate object member '" + n + '\'');
+ }
+
+ o.push_back (json_member {move (n), json_value (p)});
+ }
+
+ new (&object) object_type (move (o));
+ type = t;
+ break;
+ }
+ case json_type::array:
+ {
+ array_type c; // For exception safety.
+ while (*p.peek () != event::end_array)
+ c.push_back (json_value (p));
+ p.next (); // Consume end_array.
+
+ new (&array) array_type (move (c));
+ type = t;
+ break;
+ }
+ case json_type::string:
+ {
+ string_type& s (p.value ());
+
+ // Don't move if small string optimized.
+ //
+ if (s.size () > 15)
+ new (&string) string_type (move (s));
+ else
+ new (&string) string_type (s);
+
+ type = t;
+ break;
+ }
+ case json_type::signed_number:
+ {
+ signed_number = p.value<int64_t> ();
+ type = t;
+ break;
+ }
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ unsigned_number = p.value<uint64_t> ();
+ type = t;
+ break;
+ }
+ case json_type::boolean:
+ {
+ boolean = p.value<bool> ();
+ type = t;
+ break;
+ }
+ case json_type::null:
+ {
+ type = t;
+ break;
+ }
+ }
+ }
+
+ void json_value::
+ serialize (json_buffer_serializer& s, optional<json_type> et) const
+ {
+ using namespace butl::json;
+
+ if (et && *et != type)
+ {
+ throw invalid_json_output (
+ nullopt,
+ invalid_json_output::error_code::invalid_value,
+ string_type ("expected ") + to_string (*et, true) + " instead of " +
+ to_string (type, true));
+ }
+
+ switch (type)
+ {
+ case json_type::null:
+ {
+ s.value (nullptr);
+ break;
+ }
+ case json_type::boolean:
+ {
+ s.value (boolean);
+ break;
+ }
+ case json_type::signed_number:
+ {
+ s.value (signed_number);
+ break;
+ }
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ {
+ // When we have JSON5 support, we will be able to serialize
+ // hexadecimal properly.
+ //
+ s.value (unsigned_number);
+ break;
+ }
+ case json_type::string:
+ {
+ s.value (string);
+ break;
+ }
+ case json_type::array:
+ {
+ s.begin_array ();
+ for (const json_value& e: array)
+ e.serialize (s);
+ s.end_array ();
+ break;
+ }
+ case json_type::object:
+ {
+ s.begin_object ();
+ for (const json_member& m: object)
+ {
+ s.member_name (m.name);
+ m.value.serialize (s);
+ }
+ s.end_object ();
+ break;
+ }
+ }
+ }
+
+#else
+ json_value::
+ json_value (json_parser&, optional<json_type>)
+ {
+ assert (false);
+ type = json_type::null;
+ }
+
+ void json_value::
+ serialize (json_buffer_serializer&, optional<json_type>) const
+ {
+ assert (false);
+ }
+#endif
+}
diff --git a/libbuild2/json.hxx b/libbuild2/json.hxx
new file mode 100644
index 0000000..96596e3
--- /dev/null
+++ b/libbuild2/json.hxx
@@ -0,0 +1,369 @@
+// file : libbuild2/json.hxx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+#ifndef LIBBUILD2_JSON_HXX
+#define LIBBUILD2_JSON_HXX
+
+#include <libbuild2/types.hxx>
+#include <libbuild2/utility.hxx>
+
+#include <libbuild2/export.hxx>
+
+namespace butl
+{
+ namespace json
+ {
+ enum class event: uint8_t;
+ class parser;
+ class buffer_serializer;
+ class stream_serializer;
+ class invalid_json_input;
+ class invalid_json_output;
+ }
+}
+
+namespace build2
+{
+ using json_event = butl::json::event;
+ using json_parser = butl::json::parser;
+ using json_buffer_serializer = butl::json::buffer_serializer;
+ using json_stream_serializer = butl::json::stream_serializer;
+ using butl::json::invalid_json_input;
+ using butl::json::invalid_json_output;
+
+#ifndef BUILD2_BOOTSTRAP
+ LIBBUILD2_SYMEXPORT const char*
+ to_string (json_event);
+#endif
+
+ // @@ TODO:
+ //
+ // - provide swap().
+ // - provide operator=(uint64_t), etc.
+ // - provide std::hash specialization
+ // - tighted at()/[] interface in json_array and json_object
+ // - tighten noexcep where possible
+ // - operator bool() - in a sense null is like nullopt.
+ //
+
+ // This JSON representation has one extensions compared to the standard JSON
+ // model: it distinguishes between signed, unsigned, and hexadecimal
+ // numbers.
+ //
+ // Note also that we don't assume that object members are in a sorted order
+ // (but do assume there are no duplicates). However, we could add an
+ // argument to signal that this is the case to speed up some functions, for
+ // example, compare().
+ //
+ enum class json_type: uint8_t
+ {
+ null, // Note: keep first for comparison.
+ boolean,
+ signed_number,
+ unsigned_number,
+ hexadecimal_number,
+ string,
+ array,
+ object,
+ };
+
+ // Return the JSON type as string. If distinguish_numbers is true, then
+ // distinguish between the singned, unsigned, and hexadecimal types.
+ //
+ LIBBUILD2_SYMEXPORT const char*
+ to_string (json_type, bool distinguish_numbers = false) noexcept;
+
+ inline ostream&
+ operator<< (ostream& os, json_type t) {return os << to_string (t);}
+
+ struct json_member;
+
+ class LIBBUILD2_SYMEXPORT json_value
+ {
+ public:
+ using string_type = build2::string;
+ using array_type = vector<json_value>;
+ using object_type = vector<json_member>;
+
+ json_type type;
+
+ // Unchecked value access.
+ //
+ union
+ {
+ bool boolean;
+ int64_t signed_number;
+ uint64_t unsigned_number; // Also used for hexadecimal_number.
+ string_type string;
+ array_type array;
+ object_type object;
+ };
+
+ // Checked value access.
+ //
+ // If the type matches, return the corresponding member of the union.
+ // Otherwise throw std::invalid_argument.
+ //
+ bool as_bool () const;
+ bool& as_bool ();
+
+ int64_t as_int64 () const;
+ int64_t& as_int64 ();
+
+ uint64_t as_uint64 () const;
+ uint64_t& as_uint64 ();
+
+ const string_type& as_string () const;
+ string_type& as_string ();
+
+ const array_type& as_array () const;
+ array_type& as_array ();
+
+ const object_type& as_object () const;
+ object_type& as_object ();
+
+
+ // Construction.
+ //
+ explicit
+ json_value (json_type = json_type::null) noexcept;
+
+ explicit
+ json_value (std::nullptr_t) noexcept;
+
+ explicit
+ json_value (bool) noexcept;
+
+ explicit
+ json_value (int64_t) noexcept;
+
+ explicit
+ json_value (uint64_t, bool hexadecimal = false) noexcept;
+
+ explicit
+ json_value (string_type);
+
+ // If the expected type is specfied, then fail if it does not match
+ // parsed. Throws invalid_json_input.
+ //
+ explicit
+ json_value (json_parser&, optional<json_type> expected = {});
+
+ // If the expected type is specfied, then fail if it does not match the
+ // value's. Throws invalid_json_output.
+ //
+ void
+ serialize (json_buffer_serializer&,
+ optional<json_type> expected = {}) const;
+
+ // Note that values of different types are never equal, except for
+ // signed/unsigned/hexadecimal numbers. Null is equal to null and is less
+ // than any other value. Arrays are compared lexicographically. Object
+ // members are considered in the lexicographically-compared name-ascending
+ // order (see RFC8785). An absent member is less than a present member
+ // (even if it's null).
+ //
+ int
+ compare (const json_value&) const noexcept;
+
+ // Append/prepend one JSON value to another. Throw invalid_argument if the
+ // values are incompatible. Note that for numbers this can also lead to
+ // the change of the value type.
+ //
+ // Append/prepend an array to an array splices in the array elements
+ // rather than adding an element of the array type.
+ //
+ // By default, append to an object overrides existing members while
+ // prepend does not. In a sense, whatever appears last is kept, which is
+ // consistent with what we expect to happen when specifying the same name
+ // repeatedly (provided it's not considered invalid) in a text
+ // representation (e.g., {"a":1,"a":2}). Position-wise, both append and
+ // prepend retain the positions of existing members with append inserting
+ // new ones at the end while prepend -- at the beginning.
+ //
+ void
+ append (json_value&&, bool override = true);
+
+ void
+ prepend (json_value&&, bool override = false);
+
+ // Array element access.
+ //
+ // If the index is out of array bounds, the at() functions throw
+ // std::out_of_range, the const operator[] returns null_json_value, and
+ // the non-const operator[] inserts a new null value at the specified
+ // position (filling any missing elements in between with nulls) and
+ // returns that. All three functions throw std::invalid_argument if the
+ // value is not an array or null with null treated as (missing) array
+ // rather than wrong value type (and with at() functions throwing
+ // out_of_range in this case).
+ //
+ // Note that non-const operator[] will not only insert a new element but
+ // will also turn the value it is called upon into array if it is null.
+ // This semantics allows you to string several subscripts to build up a
+ // chain of values.
+ //
+ // Note also that while the operator[] interface is convenient for
+ // accessing and modifying (or building up) values deep in the tree, it
+ // can lead to inefficiencies or even undesirable semantics during
+ // otherwise read-only access of a non-const object due to the potential
+ // insertion of null values for missing array elements. As a result, it's
+ // recommended to always use a const reference for read-only access (or
+ // use the at() interface if this is deemed too easy to forget).
+ //
+ const json_value&
+ at (size_t) const;
+
+ json_value&
+ at (size_t);
+
+#if 0
+ const json_value&
+ operator[] (size_t) const;
+
+ json_value&
+ operator[] (size_t);
+#endif
+
+
+ // Object member access.
+ //
+ // If a member with the specified name is not found in the object, the
+ // at() functions throw std::out_of_range, the find() function returns
+ // NULL, the const operator[] returns null_json_value, and the non-const
+ // operator[] adds a new member with the specified name and null value and
+ // returns that value. All three functions throw std::invalid_argument if
+ // the value is not an object or null with null treated as (missing)
+ // object rather than wrong value type (and with at() functions throwing
+ // out_of_range in this case).
+ //
+ // Note that non-const operator[] will not only insert a new member but
+ // will also turn the value it is called upon into object if it is null.
+ // This semantics allows you to string several subscripts to build up a
+ // chain of values.
+ //
+ // Note also that while the operator[] interface is convenient for
+ // accessing and modifying (or building up) values deep in the tree, it
+ // can lead to inefficiencies or even undesirable semantics during
+ // otherwise read-only access of a non-const object due to the potential
+ // insertion of null values for missing object members. As a result, it's
+ // recommended to always use a const reference for read-only access (or
+ // use the at() interface if this is deemed too easy to forget).
+ //
+ const json_value&
+ at (const char*) const;
+
+ json_value&
+ at (const char*);
+
+ const json_value*
+ find (const char*) const;
+
+ json_value*
+ find (const char*);
+
+#if 0
+ const json_value&
+ operator[] (const char*) const;
+
+ json_value&
+ operator[] (const char*);
+#endif
+
+ const json_value&
+ at (const string_type&) const;
+
+ json_value&
+ at (const string_type&);
+
+ const json_value*
+ find (const string_type&) const;
+
+ json_value*
+ find (const string_type&);
+
+#if 0
+ const json_value&
+ operator[] (const string_type&) const;
+
+ json_value&
+ operator[] (const string_type&);
+#endif
+
+ // Note that the moved-from value becomes JSON null value.
+ //
+ json_value (json_value&&) noexcept;
+ json_value (const json_value&);
+
+ json_value& operator= (json_value&&) noexcept;
+ json_value& operator= (const json_value&);
+
+ ~json_value () noexcept;
+ };
+
+ LIBBUILD2_SYMEXPORT extern const json_value null_json_value;
+
+ inline bool
+ operator== (const json_value& x, const json_value& y) {return x.compare (y) == 0;}
+
+ inline bool
+ operator!= (const json_value& x, const json_value& y) {return !(x == y);}
+
+ inline bool
+ operator< (const json_value& x, const json_value& y) {return x.compare (y) < 0;}
+
+ inline bool
+ operator<= (const json_value& x, const json_value& y) {return x.compare (y) <= 0;}
+
+ inline bool
+ operator> (const json_value& x, const json_value& y) {return !(x <= y);}
+
+ inline bool
+ operator>= (const json_value& x, const json_value& y) {return !(x < y);}
+
+ // A JSON object member.
+ //
+ struct json_member
+ {
+ // @@ TODO: add some convenience constructors?
+
+ string name;
+ json_value value;
+ };
+
+ // A JSON value that can only be an array.
+ //
+ class /*LIBBUILD2_SYMEXPORT*/ json_array: public json_value
+ {
+ public:
+ // Create empty array.
+ //
+ json_array () noexcept;
+
+ explicit
+ json_array (json_parser&);
+
+ void
+ serialize (json_buffer_serializer& s) const;
+ };
+
+ // A JSON value that can only be an object.
+ //
+ class /*LIBBUILD2_SYMEXPORT*/ json_object: public json_value
+ {
+ public:
+ // Create empty object.
+ //
+ json_object () noexcept;
+
+ explicit
+ json_object (json_parser&);
+
+ void
+ serialize (json_buffer_serializer& s) const;
+ };
+}
+
+#include <libbuild2/json.ixx>
+
+#endif // LIBBUILD2_JSON_HXX
diff --git a/libbuild2/json.ixx b/libbuild2/json.ixx
new file mode 100644
index 0000000..76cd00a
--- /dev/null
+++ b/libbuild2/json.ixx
@@ -0,0 +1,349 @@
+// file : libbuild2/json.ixx -*- C++ -*-
+// license : MIT; see accompanying LICENSE file
+
+namespace build2
+{
+ [[noreturn]] LIBBUILD2_SYMEXPORT void
+ json_as_throw (json_type actual, json_type expected);
+
+ inline bool json_value::
+ as_bool () const
+ {
+ if (type == json_type::boolean)
+ return boolean;
+
+ json_as_throw (type, json_type::boolean);
+ }
+
+ inline bool& json_value::
+ as_bool ()
+ {
+ if (type == json_type::boolean)
+ return boolean;
+
+ json_as_throw (type, json_type::boolean);
+ }
+
+ inline int64_t json_value::
+ as_int64 () const
+ {
+ if (type == json_type::signed_number)
+ return signed_number;
+
+ json_as_throw (type, json_type::signed_number);
+ }
+
+ inline int64_t& json_value::
+ as_int64 ()
+ {
+ if (type == json_type::signed_number)
+ return signed_number;
+
+ json_as_throw (type, json_type::signed_number);
+ }
+
+ inline uint64_t json_value::
+ as_uint64 () const
+ {
+ if (type == json_type::unsigned_number ||
+ type == json_type::hexadecimal_number)
+ return unsigned_number;
+
+ json_as_throw (type, json_type::unsigned_number);
+ }
+
+ inline uint64_t& json_value::
+ as_uint64 ()
+ {
+ if (type == json_type::unsigned_number ||
+ type == json_type::hexadecimal_number)
+ return unsigned_number;
+
+ json_as_throw (type, json_type::unsigned_number);
+ }
+
+ inline const string& json_value::
+ as_string () const
+ {
+ if (type == json_type::string)
+ return string;
+
+ json_as_throw (type, json_type::string);
+ }
+
+ inline string& json_value::
+ as_string ()
+ {
+ if (type == json_type::string)
+ return string;
+
+ json_as_throw (type, json_type::string);
+ }
+
+ inline const json_value::array_type& json_value::
+ as_array () const
+ {
+ if (type == json_type::array)
+ return array;
+
+ json_as_throw (type, json_type::array);
+ }
+
+ inline json_value::array_type& json_value::
+ as_array ()
+ {
+ if (type == json_type::array)
+ return array;
+
+ json_as_throw (type, json_type::array);
+ }
+
+ inline const json_value::object_type& json_value::
+ as_object () const
+ {
+ if (type == json_type::object)
+ return object;
+
+ json_as_throw (type, json_type::object);
+ }
+
+ inline json_value::object_type& json_value::
+ as_object ()
+ {
+ if (type == json_type::object)
+ return object;
+
+ json_as_throw (type, json_type::object);
+ }
+
+ inline json_value::
+ ~json_value () noexcept
+ {
+ switch (type)
+ {
+ case json_type::null:
+ case json_type::boolean:
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number: break;
+ case json_type::string: string.~string_type (); break;
+ case json_type::array: array.~array_type (); break;
+ case json_type::object: object.~object_type (); break;
+ }
+ }
+
+ inline json_value::
+ json_value (json_type t) noexcept
+ : type (t)
+ {
+ switch (type)
+ {
+ case json_type::null: break;
+ case json_type::boolean: boolean = false; break;
+ case json_type::signed_number: signed_number = 0; break;
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number: unsigned_number = 0; break;
+ case json_type::string: new (&string) string_type (); break;
+ case json_type::array: new (&array) array_type (); break;
+ case json_type::object: new (&object) object_type (); break;
+ }
+ }
+
+ inline json_value::
+ json_value (std::nullptr_t) noexcept
+ : type (json_type::null)
+ {
+ }
+
+ inline json_value::
+ json_value (bool v) noexcept
+ : type (json_type::boolean), boolean (v)
+ {
+ }
+
+ inline json_value::
+ json_value (int64_t v) noexcept
+ : type (json_type::signed_number), signed_number (v)
+ {
+ }
+
+ inline json_value::
+ json_value (uint64_t v, bool hex) noexcept
+ : type (hex
+ ? json_type::hexadecimal_number
+ : json_type::unsigned_number),
+ unsigned_number (v)
+ {
+ }
+
+ inline json_value::
+ json_value (string_type v)
+ : type (json_type::string), string (move (v))
+ {
+ }
+
+ inline const json_value& json_value::
+ at (const string_type& n) const
+ {
+ return at (n.c_str ());
+ }
+
+ inline json_value& json_value::
+ at (const string_type& n)
+ {
+ return at (n.c_str ());
+ }
+
+ inline const json_value* json_value::
+ find (const string_type& n) const
+ {
+ return find (n.c_str ());
+ }
+
+ inline json_value* json_value::
+ find (const string_type& n)
+ {
+ return find (n.c_str ());
+ }
+
+#if 0
+ inline const json_value& json_value::
+ operator[] (const string_type& n) const
+ {
+ return operator[] (n.c_str ());
+ }
+
+ inline json_value& json_value::
+ operator[] (const string_type& n)
+ {
+ return operator[] (n.c_str ());
+ }
+#endif
+
+ inline json_value::
+ json_value (json_value&& v) noexcept
+ : type (v.type)
+ {
+ switch (type)
+ {
+ case json_type::null:
+ break;
+ case json_type::boolean:
+ boolean = v.boolean;
+ break;
+ case json_type::signed_number:
+ signed_number = v.signed_number;
+ break;
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ unsigned_number = v.unsigned_number;
+ break;
+ case json_type::string:
+ new (&string) string_type (move (v.string));
+ v.string.~string_type ();
+ break;
+ case json_type::array:
+ new (&array) array_type (move (v.array));
+ v.array.~array_type ();
+ break;
+ case json_type::object:
+ new (&object) object_type (move (v.object));
+ v.object.~object_type ();
+ break;
+ }
+
+ v.type = json_type::null;
+ }
+
+ inline json_value::
+ json_value (const json_value& v)
+ : type (v.type)
+ {
+ switch (type)
+ {
+ case json_type::null:
+ break;
+ case json_type::boolean:
+ boolean = v.boolean;
+ break;
+ case json_type::signed_number:
+ signed_number = v.signed_number;
+ break;
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ unsigned_number = v.unsigned_number;
+ break;
+ case json_type::string:
+ new (&string) string_type (v.string);
+ break;
+ case json_type::array:
+ new (&array) array_type (v.array);
+ break;
+ case json_type::object:
+ new (&object) object_type (v.object);
+ break;
+ }
+ }
+
+ inline json_value& json_value::
+ operator= (json_value&& v) noexcept
+ {
+ if (this != &v)
+ {
+ this->~json_value ();
+ new (this) json_value (move (v));
+ }
+ return *this;
+ }
+
+ inline json_value& json_value::
+ operator= (const json_value& v)
+ {
+ if (this != &v)
+ {
+ this->~json_value ();
+ new (this) json_value (v);
+ }
+ return *this;
+ }
+
+ // json_array
+ //
+ inline json_array::
+ json_array () noexcept
+ : json_value (json_type::array)
+ {
+ }
+
+ inline json_array::
+ json_array (json_parser& p)
+ : json_value (p, json_type::array)
+ {
+ }
+
+ inline void json_array::
+ serialize (json_buffer_serializer& s) const
+ {
+ json_value::serialize (s, json_type::array);
+ }
+
+ // json_object
+ //
+ inline json_object::
+ json_object () noexcept
+ : json_value (json_type::object)
+ {
+ }
+
+ inline json_object::
+ json_object (json_parser& p)
+ : json_value (p, json_type::object)
+ {
+ }
+
+ inline void json_object::
+ serialize (json_buffer_serializer& s) const
+ {
+ json_value::serialize (s, json_type::object);
+ }
+}
diff --git a/libbuild2/lexer.cxx b/libbuild2/lexer.cxx
index 992e5d1..04c15be 100644
--- a/libbuild2/lexer.cxx
+++ b/libbuild2/lexer.cxx
@@ -42,6 +42,22 @@ namespace build2
return make_pair (make_pair (r[0], r[1]), sep_);
}
+ pair<char, bool> lexer::
+ peek_char ()
+ {
+ auto p (skip_spaces ());
+ assert (!p.second);
+ sep_ = p.first;
+
+ char r ('\0');
+
+ xchar c (peek ());
+ if (!eos (c))
+ r = c;
+
+ return make_pair (r, sep_);
+ }
+
void lexer::
mode (lexer_mode m, char ps, optional<const char*> esc, uintptr_t data)
{
@@ -144,13 +160,15 @@ namespace build2
break;
}
case lexer_mode::foreign:
- assert (data > 1);
- // Fall through.
+ {
+ assert (ps == '\0' && data > 1);
+ s = false;
+ break;
+ }
case lexer_mode::single_quoted:
case lexer_mode::double_quoted:
{
- assert (ps == '\0');
- s = false;
+ assert (false); // Can only be set manually in word().
break;
}
case lexer_mode::variable:
@@ -162,8 +180,49 @@ namespace build2
default: assert (false); // Unhandled custom mode.
}
- state_.push (
- state {m, data, nullopt, lsb, false, ps, s, n, q, *esc, s1, s2});
+ mode_impl (state {m, data, nullopt, lsb, false, ps, s, n, q, *esc, s1, s2});
+ }
+
+ void lexer::
+ mode_impl (state&& s)
+ {
+ // If we are in the double-quoted mode then, unless the new mode is eval
+ // or variable, delay the state switch until the current mode is expired.
+ // Note that we delay by injecting the new state beneath the current
+ // state.
+ //
+ if (!state_.empty () &&
+ state_.top ().mode == lexer_mode::double_quoted &&
+ s.mode != lexer_mode::eval &&
+ s.mode != lexer_mode::variable)
+ {
+ state qs (move (state_.top ())); // Save quoted state.
+ state_.top () = move (s); // Overwrite quoted state with new state.
+ state_.push (move (qs)); // Restore quoted state.
+ }
+ else
+ state_.push (move (s));
+ }
+
+ void lexer::
+ expire_mode ()
+ {
+ // If we are in the double-quoted mode, then delay the state expiration
+ // until the current mode is expired. Note that we delay by overwriting
+ // the being expired state with the current state.
+ //
+ assert (!state_.empty () &&
+ (state_.top ().mode != lexer_mode::double_quoted ||
+ state_.size () > 1));
+
+ if (state_.top ().mode == lexer_mode::double_quoted)
+ {
+ state qs (move (state_.top ())); // Save quoted state.
+ state_.pop (); // Pop quoted state.
+ state_.top () = move (qs); // Expire state, restoring quoted state.
+ }
+ else
+ state_.pop ();
}
token lexer::
@@ -654,9 +713,9 @@ namespace build2
}
token lexer::
- word (state st, bool sep)
+ word (const state& rst, bool sep)
{
- lexer_mode m (st.mode);
+ lexer_mode m (rst.mode);
xchar c (peek ());
assert (!eos (c));
@@ -687,22 +746,66 @@ namespace build2
lexeme += c;
};
- for (; !eos (c); c = peek ())
+ const state* st (&rst);
+ for (bool first (true); !eos (c); first = false, c = peek ())
{
// First handle escape sequences.
//
if (c == '\\')
{
- // In the variable mode we treat the beginning of the escape sequence
- // as a separator (think \"$foo\").
+ // In the variable mode we treat immediate `\` as the escape sequence
+ // literal and any following as a separator (think \"$foo\").
//
if (m == lexer_mode::variable)
- break;
+ {
+ if (!first)
+ break;
+
+ get ();
+ c = get ();
+
+ if (eos (c))
+ fail (c) << "unterminated escape sequence";
+
+ // For now we only support all the simple C/C++ escape sequences
+ // plus \0 (which in C/C++ is an octal escape sequence).
+ //
+ // In the future we may decide to support more elaborate sequences
+ // such as \xNN, \uNNNN, etc.
+ //
+ // Note: we return it in the literal form instead of translating for
+ // easier printing.
+ //
+ switch (c)
+ {
+ case '\'':
+ case '"':
+ case '?':
+ case '\\':
+ case '0':
+ case 'a':
+ case 'b':
+ case 'f':
+ case 'n':
+ case 'r':
+ case 't':
+ case 'v': lexeme = c; break;
+ default:
+ fail (c) << "unknown escape sequence \\" << c;
+ }
+
+ state_.pop ();
+ return token (type::escape,
+ move (lexeme),
+ sep,
+ qtype, qcomp, qfirst,
+ ln, cn);
+ }
get ();
xchar p (peek ());
- const char* esc (st.escapes);
+ const char* esc (st->escapes);
if (esc == nullptr ||
(*esc != '\0' && !eos (p) && strchr (esc, p) != nullptr))
@@ -718,7 +821,7 @@ namespace build2
continue;
}
else
- unget (c); // Treat as a normal character.
+ unget (c); // Fall through to treat as a normal character.
}
bool done (false);
@@ -747,8 +850,8 @@ namespace build2
get ();
state_.pop ();
- st = state_.top ();
- m = st.mode;
+ st = &state_.top ();
+ m = st->mode;
continue;
}
}
@@ -757,19 +860,17 @@ namespace build2
//
else if (m == lexer_mode::variable)
{
- bool first (lexeme.empty ());
-
// Handle special variable names, if any.
//
- if (first &&
- st.data != 0 &&
- strchr (reinterpret_cast<const char*> (st.data), c) != nullptr)
+ if (first &&
+ st->data != 0 &&
+ strchr (reinterpret_cast<const char*> (st->data), c) != nullptr)
{
get ();
lexeme += c;
done = true;
}
- else if (c != '_' && !(first ? alpha (c) : alnum (c)))
+ else if (c != '_' && !(lexeme.empty () ? alpha (c) : alnum (c)))
{
if (c != '.')
done = true;
@@ -789,17 +890,17 @@ namespace build2
{
// First check if it's a pair separator.
//
- if (c == st.sep_pair)
+ if (c == st->sep_pair)
done = true;
else
{
// Then see if this character or character sequence is a separator.
//
- for (const char* p (strchr (st.sep_first, c));
+ for (const char* p (strchr (st->sep_first, c));
p != nullptr;
p = done ? nullptr : strchr (p + 1, c))
{
- char s (st.sep_second[p - st.sep_first]);
+ char s (st->sep_second[p - st->sep_first]);
// See if it has a second.
//
@@ -817,8 +918,21 @@ namespace build2
// Handle single and double quotes if enabled for this mode and unless
// they were considered separators.
//
- if (st.quotes && !done)
+ if (st->quotes && !done)
{
+ auto quoted_mode = [this] (lexer_mode m)
+ {
+ // In the double-quoted mode we only do effective escaping of the
+ // special `$("\` characters, line continuations, plus `)` for
+ // symmetry. Nothing can be escaped in single-quoted.
+ //
+ const char* esc (m == lexer_mode::double_quoted ? "$()\"\\\n" : "");
+
+ state_.push (state {
+ m, 0, nullopt, false, false, '\0', false, true, true,
+ esc, nullptr, nullptr});
+ };
+
switch (c)
{
case '\'':
@@ -826,7 +940,7 @@ namespace build2
// Enter the single-quoted mode in case the derived lexer needs
// to notice this.
//
- mode (lexer_mode::single_quoted);
+ quoted_mode (lexer_mode::single_quoted);
switch (qtype)
{
@@ -865,9 +979,10 @@ namespace build2
{
get ();
- mode (lexer_mode::double_quoted);
- st = state_.top ();
- m = st.mode;
+ quoted_mode (lexer_mode::double_quoted);
+
+ st = &state_.top ();
+ m = st->mode;
switch (qtype)
{
@@ -1023,6 +1138,8 @@ namespace build2
}
case '\\':
{
+ // See if this is line continuation.
+ //
get ();
if (peek () == '\n')
diff --git a/libbuild2/lexer.hxx b/libbuild2/lexer.hxx
index 148666e..e913829 100644
--- a/libbuild2/lexer.hxx
+++ b/libbuild2/lexer.hxx
@@ -26,14 +26,15 @@ namespace build2
// mode we don't treat certain characters (e.g., `+`, `=`) as special so
// that we can use them in the variable values, e.g., `foo = g++`. In
// contrast, in the variable mode, we restrict certain character (e.g., `/`)
- // from appearing in the name. The values mode is like value but recogizes
- // `,` as special (used in contexts where we need to list multiple
- // values). The attributes/attribute_value modes are like values where each
- // value is potentially a variable assignment; they don't treat `{` and `}`
- // as special (so we cannot have name groups in attributes) as well as
- // recognizes `=` and `]`. The subscript mode is like value but doesn't
- // treat `{` and `}` as special and recognizes `]`. The eval mode is used in
- // the evaluation context.
+ // from appearing in the name. Additionally, in the variable mode we
+ // recognize leading `\` as the beginning of the escape sequent ($\n). The
+ // values mode is like value but recogizes `,` as special (used in contexts
+ // where we need to list multiple values). The attributes/attribute_value
+ // modes are like values where each value is potentially a variable
+ // assignment; they don't treat `{` and `}` as special (so we cannot have
+ // name groups in attributes) as well as recognizes `=` and `]`. The
+ // subscript mode is like value but doesn't treat `{` and `}` as special and
+ // recognizes `]`. The eval mode is used in the evaluation context.
//
// A number of modes are "derived" from the value/values mode by recognizing
// a few extra characters:
@@ -133,10 +134,23 @@ namespace build2
const path_name&
name () const {return name_;}
- // Note: sets mode for the next token. The second argument can be used to
- // specify the pair separator character (if the mode supports pairs). If
- // escapes is not specified, then inherit the current mode's (though a
- // mode can also override it).
+ // Set the lexer mode for the next token or delay this until the end of a
+ // double-quoted token sequence is encountered. The second argument can be
+ // used to specify the pair separator character (if the mode supports
+ // pairs). If escapes is not specified, then inherit the current mode's
+ // (though a mode can also override it).
+ //
+ // Note that there is a common parsing pattern of sensing the language
+ // construct kind we are about to parse by reading its first token,
+ // switching to an appropriate lexing mode, and then parsing the rest. The
+ // problem here is that the first token may start the double-quoted token
+ // sequence, turning the lexer into the double-quoted mode. In this case
+ // switching the lexer mode right away would not be a good idea. Thus,
+ // this function delays the mode switch until the end of the double-quoted
+ // sequence is encountered. Note, however, that such a delay only works
+ // properly if the function is called right after the first quoted token
+ // is read (because any subsequent tokens may end up being parsed in a
+ // nested mode such as variable or eval; see mode_impl() for details).
//
virtual void
mode (lexer_mode,
@@ -153,10 +167,12 @@ namespace build2
state_.top ().lsbrace_unsep = unsep;
}
- // Expire the current mode early.
+ // Expire the current mode early or delay this until the end of a
+ // double-quoted token sequence is encountered (see mode() for details on
+ // the delay condition and reasoning).
//
void
- expire_mode () {state_.pop ();}
+ expire_mode ();
lexer_mode
mode () const {return state_.top ().mode;}
@@ -175,7 +191,7 @@ namespace build2
virtual token
next ();
- // Peek at the first two characters of the next token(s). Return the
+ // Peek at the first one/two characters of the next token(s). Return the
// characters or '\0' if either would be eos. Also return an indicator of
// whether the next token would be separated. Note: cannot be used to peek
// at the first character of a line.
@@ -184,6 +200,9 @@ namespace build2
// mode in which these characters will actually be parsed use the same
// whitespace separation (the sep_space and sep_newline values).
//
+ pair<char, bool>
+ peek_char ();
+
pair<pair<char, char>, bool>
peek_chars ();
@@ -244,7 +263,7 @@ namespace build2
// been "expired" from the top).
//
virtual token
- word (state current, bool separated);
+ word (const state& current, bool separated);
// Return true in first if we have seen any spaces. Skipped empty lines
// don't count. In other words, we are only interested in spaces that are
@@ -255,6 +274,20 @@ namespace build2
pair<bool, bool>
skip_spaces ();
+ // Set state for the next token or delay until the end of a double-quoted
+ // token sequence is encountered (see mode() for details on the delay
+ // condition and reasoning).
+ //
+ void
+ mode_impl (state&&);
+
+ state&
+ current_state ()
+ {
+ assert (!state_.empty ());
+ return state_.top ();
+ }
+
// Diagnostics.
//
protected:
@@ -283,11 +316,14 @@ namespace build2
}
const path_name& name_;
- std::stack<state> state_;
bool sep_; // True if we skipped spaces in peek().
private:
+ // Use current_state(), mode_impl(), and expire_mode().
+ //
+ std::stack<state> state_;
+
using base = char_scanner<butl::utf8_validator, 2>;
// Buffer for a get()/peek() potential error.
diff --git a/libbuild2/module.cxx b/libbuild2/module.cxx
index b7b9bbb..1aaa38d 100644
--- a/libbuild2/module.cxx
+++ b/libbuild2/module.cxx
@@ -30,26 +30,26 @@ using namespace butl;
namespace build2
{
- mutex loaded_modules_lock::mutex_;
+ mutex module_libraries_lock::mutex_;
- loaded_module_map loaded_modules;
+ module_libraries_map module_libraries;
void
load_builtin_module (module_load_function* lf)
{
for (const module_functions* i (lf ()); i->name != nullptr; ++i)
- loaded_modules[i->name] = i;
+ module_libraries.emplace (i->name, module_library {*i, dir_path ()});
}
// Sorted array of bundled modules (excluding core modules bundled with
// libbuild2; see below).
//
-#if !defined(BUILD2_BOOTSTRAP) && !defined(LIBBUILD2_STATIC_BUILD)
static const char* bundled_modules[] = {
"bash",
"bin",
"c",
"cc",
+ "cli",
"cxx",
"in",
"version"
@@ -63,7 +63,6 @@ namespace build2
bundled_modules + sizeof (bundled_modules) / sizeof (*bundled_modules),
mod);
}
-#endif
// Note: also used by ad hoc recipes thus not static.
//
@@ -77,15 +76,23 @@ namespace build2
// same global mutexes. Also disable nested module context for good
// measure.
//
+ // The reserve values were picked experimentally by building libbuild2 and
+ // adding a reasonable margin for future growth.
+ //
ctx.module_context_storage->reset (
- new context (ctx.sched,
- ctx.mutexes,
- ctx.fcache,
- false, /* match_only */
+ new context (*ctx.sched,
+ *ctx.mutexes,
+ *ctx.fcache,
+ nullopt, /* match_only */
false, /* no_external_modules */
false, /* dry_run */
+ ctx.no_diag_buffer,
ctx.keep_going,
ctx.global_var_overrides, /* cmd_vars */
+ context::reserves {
+ 2500, /* targets */
+ 900 /* variables */
+ },
nullopt)); /* module_context */
// We use the same context for building any nested modules that might be
@@ -120,6 +127,9 @@ namespace build2
{
// New update operation.
//
+ assert (op_update.operation_pre == nullptr &&
+ op_update.operation_post == nullptr);
+
ctx.module_context->current_operation (op_update);
// Un-tune the scheduler.
@@ -133,8 +143,8 @@ namespace build2
// keep it in case things change. Actually, we may need it, if the
// scheduler was started up in a tuned state, like in bpkg).
//
- auto sched_tune (ctx.sched.serial ()
- ? scheduler::tune_guard (ctx.sched, 0)
+ auto sched_tune (ctx.sched->serial ()
+ ? scheduler::tune_guard (*ctx.sched, 0)
: scheduler::tune_guard ());
// Remap verbosity level 0 to 1 unless we were requested to be silent.
@@ -232,11 +242,20 @@ namespace build2
}
#endif
- static module_load_function*
+ // Return the module functions as well as the module project directory or
+ // empty if not imported from project. Return {nullptr, nullopt} if not
+ // found.
+ //
+ // The dry-run mode only calls import_search() and always returns NULL for
+ // module functions (see below for background).
+ //
+ static pair<module_load_function*, optional<dir_path>>
import_module (
#if defined(BUILD2_BOOTSTRAP) || defined(LIBBUILD2_STATIC_BUILD)
+ bool,
scope&,
#else
+ bool dry_run,
scope& bs,
#endif
const string& mod,
@@ -250,15 +269,21 @@ namespace build2
{
tracer trace ("import_module");
+ pair<module_load_function*, optional<dir_path>> r (nullptr, nullopt);
+
// Take care of core modules that are bundled with libbuild2 in case they
// are not pre-loaded by the driver.
//
- if (mod == "config") return &config::build2_config_load;
- else if (mod == "dist") return &dist::build2_dist_load;
- else if (mod == "install") return &install::build2_install_load;
- else if (mod == "test") return &test::build2_test_load;
+ if (mod == "config") r.first = &config::build2_config_load;
+ else if (mod == "dist") r.first = &dist::build2_dist_load;
+ else if (mod == "install") r.first = &install::build2_install_load;
+ else if (mod == "test") r.first = &test::build2_test_load;
- module_load_function* r (nullptr);
+ if (r.first != nullptr)
+ {
+ r.second = dir_path ();
+ return r;
+ }
// No dynamic loading of build system modules during bootstrap or if
// statically-linked..
@@ -327,7 +352,7 @@ namespace build2
// and undefined if the module was not mentioned.
//
if (boot && !bundled && ctx.no_external_modules)
- return nullptr;
+ return r; // NULL
// See if we can import a target for this module.
//
@@ -382,7 +407,7 @@ namespace build2
if (ir.first.empty ())
{
assert (opt);
- return nullptr;
+ return r; // NULL
}
if (ir.second)
@@ -390,6 +415,8 @@ namespace build2
// What if a module is specified with config.import.<mod>.<lib>.libs?
// Note that this could still be a project-qualified target.
//
+ // Note: we now return an empty directory to mean something else.
+ //
if (ir.second->empty ())
fail (loc) << "direct module target importation not yet supported";
@@ -397,6 +424,17 @@ namespace build2
// the target (which will also give us the shared library path).
//
l5 ([&]{trace << "found " << ir.first << " in " << *ir.second;});
+ }
+
+ if (dry_run)
+ {
+ r.second = ir.second ? move (*ir.second) : dir_path ();
+ return r;
+ }
+
+ if (ir.second)
+ {
+ r.second = *ir.second;
// Create the build context if necessary.
//
@@ -409,7 +447,7 @@ namespace build2
create_module_context (ctx, loc);
}
- // Inherit loaded_modules lock from the outer context.
+ // Inherit module_libraries lock from the outer context.
//
ctx.module_context->modules_lock = ctx.modules_lock;
@@ -418,7 +456,7 @@ namespace build2
//
auto_thread_env penv (nullptr);
context& ctx (*bs.ctx.module_context);
- scheduler::phase_guard pg (ctx.sched);
+ scheduler::phase_guard pg (*ctx.sched);
// Load the imported project in the module context.
//
@@ -469,6 +507,8 @@ namespace build2
}
else
{
+ r.second = dir_path ();
+
// No module project found. Form the shared library name (incorporating
// build system core version) and try using system-default search
// (installed, rpath, etc).
@@ -511,7 +551,7 @@ namespace build2
fail (loc) << "unable to lookup " << sym << " in build system module "
<< mod << " (" << lib << "): " << err;
- r = function_cast<module_load_function*> (hs.second);
+ r.first = function_cast<module_load_function*> (hs.second);
}
else if (!opt)
{
@@ -523,7 +563,10 @@ namespace build2
<< "line variable to specify its project out_root";
}
else
+ {
+ r.second = nullopt;
l5 ([&]{trace << "unable to load " << lib << ": " << err;});
+ }
#endif // BUILD2_BOOTSTRAP || LIBBUILD2_STATIC_BUILD
@@ -539,89 +582,200 @@ namespace build2
{
tracer trace ("find_module");
- // Note that we hold the lock for the entire time it takes to build a
- // module.
+ // If this is a submodule, get the main module name.
+ //
+ string mmod (smod, 0, smod.find ('.'));
+
+ // We have a somewhat strange two-level caching in imported_modules
+ // and module_libraries in order to achieve the following:
//
- loaded_modules_lock lock (bs.ctx);
+ // 1. Correctly handle cases where a module can be imported from one
+ // project but not the other.
+ //
+ // 2. Make sure that for each project that imports the module we actually
+ // call import_search() in order to mark any config.import.* as used.
+ //
+ // 3. Make sure that all the projects import the same module.
+ //
+ scope& rs (*bs.root_scope ());
- // Optional modules and submodules sure make this logic convoluted. So we
- // divide it into two parts: (1) find or insert an entry (for submodule
- // or, failed that, for the main module, the latter potentially NULL) and
- // (2) analyze the entry and issue diagnostics.
+ const string* mod;
+ const module_functions* fun;
+
+ // First check the project's imported_modules in case this (main) module
+ // is known to be not found.
//
- auto i (loaded_modules.find (smod)), e (loaded_modules.end ());
+ auto j (rs.root_extra->imported_modules.find (mmod));
+ auto je (rs.root_extra->imported_modules.end ());
- if (i == e)
+ if (j != je && !j->found)
{
- // If this is a submodule, get the main module name.
+ mod = &mmod;
+ fun = nullptr;
+ }
+ else
+ {
+ // Note that we hold the lock for the entire time it takes to build a
+ // module.
//
- string mmod (smod, 0, smod.find ('.'));
+ module_libraries_lock lock (bs.ctx);
- if (mmod != smod)
- i = loaded_modules.find (mmod);
+ // Optional modules and submodules sure make this logic convoluted. So
+ // we divide it into two parts: (1) find or insert an entry (for
+ // submodule or, failed that, for the main module) and (2) analyze the
+ // entry and issue diagnostics.
+ //
+ auto i (module_libraries.find (smod));
+ auto ie (module_libraries.end ());
- if (i == e)
+ bool imported (false);
+ if (i == ie)
{
- module_load_function* f (import_module (bs, mmod, loc, boot, opt));
+ if (mmod != smod)
+ i = module_libraries.find (mmod);
- if (f != nullptr)
+ if (i == ie)
{
- // Enter all the entries noticing which one is our submodule. If
- // none are, then we notice the main module.
- //
- for (const module_functions* j (f ()); j->name != nullptr; ++j)
+ pair<module_load_function*, optional<dir_path>> ir (
+ import_module (false /* dry_run */, bs, mmod, loc, boot, opt));
+
+ if (module_load_function* f = ir.first)
{
- const string& n (j->name);
+ // Enter all the entries noticing which one is our submodule. If
+ // none are, then we notice the main module.
+ //
+ for (const module_functions* j (f ()); j->name != nullptr; ++j)
+ {
+ const string& n (j->name);
+
+ l5 ([&]{trace << "registering " << n;});
- l5 ([&]{trace << "registering " << n;});
+ bool main (n == mmod);
- auto p (loaded_modules.emplace (n, j));
+ auto p (module_libraries.emplace (
+ n,
+ module_library {
+ *j,
+ main ? move (*ir.second) : dir_path ()}));
- if (!p.second)
- fail (loc) << "build system submodule name " << n << " of main "
- << "module " << mmod << " is already in use";
+ if (!p.second)
+ fail (loc) << "build system submodule name " << n << " of main "
+ << "module " << mmod << " is already in use";
- if (n == smod || (i == e && n == mmod))
- i = p.first;
+ // Note: this assumes the main module is last.
+ //
+ if (n == smod || (main && i == ie))
+ i = p.first;
+ }
+
+ // We should at least have the main module.
+ //
+ if (i == ie)
+ fail (loc) << "invalid function list in build system module "
+ << mmod;
}
- // We should at least have the main module.
- //
- if (i == e)
- fail (loc) << "invalid function list in build system module "
- << mmod;
+ imported = true;
}
- else
- i = loaded_modules.emplace (move (mmod), nullptr).first;
+ }
+
+ // Now the iterator points to a submodule or to the main module, or to
+ // end if neither is found.
+ //
+ assert (j == je || i != ie); // Cache state consistecy sanity check.
+
+ if (i != ie)
+ {
+ // Note: these should remain stable after we release the lock.
+ //
+ mod = &i->first;
+ fun = &i->second.functions.get ();
+
+ // If this project hasn't imported this main module and we found the
+ // entry in the cache, then we have to perform the import_search()
+ // part of import_module() in order to cover items (2) and (3) above.
+ //
+ // There is one nuance: omit this for bundled modules since it's
+ // possible to first import them ad hoc and then, if we call
+ // import_search() again, to find them differently (e.g., as a
+ // subproject).
+ //
+ if (j == je && !imported && !bundled_module (mmod))
+ {
+ pair<module_load_function*, optional<dir_path>> ir (
+ import_module (true /* dry_run */, bs, mmod, loc, boot, opt));
+
+ if (ir.second)
+ {
+ if (i->first != mmod)
+ {
+ i = module_libraries.find (mmod);
+ assert (i != ie); // Has to be there.
+ }
+
+ const dir_path& cd (*ir.second);
+ const dir_path& pd (i->second.import_path);
+
+ if (cd != pd)
+ {
+ fail (loc) << "inconsistent build system module " << mmod
+ << " importation" <<
+ info << rs << " imports it as "
+ << (cd.empty () ? "ad hoc" : cd.representation ().c_str ()) <<
+ info << "previously imported as "
+ << (pd.empty () ? "ad hoc" : pd.representation ().c_str ());
+ }
+ }
+ else
+ {
+ // This module is not found from this project.
+ //
+ mod = &mmod;
+ fun = nullptr;
+ }
+ }
+ }
+ else
+ {
+ mod = &mmod;
+ fun = nullptr;
}
}
+ // Cache the result in imported_modules if necessary.
+ //
+ if (j == je)
+ rs.root_extra->imported_modules.push_back (
+ module_import {mmod, fun != nullptr});
+
// Reduce skipped external module to optional.
//
- if (boot && i->second == nullptr)
+ if (boot && fun == nullptr)
opt = true;
- // Now the iterator points to a submodule or to the main module, the
- // latter potentially NULL.
+ // Handle optional.
//
- if (!opt)
+ if (fun == nullptr)
{
- if (i->second == nullptr)
- {
- fail (loc) << "unable to load build system module " << i->first;
- }
- else if (i->first != smod)
- {
- fail (loc) << "build system module " << i->first << " has no "
+ if (!opt)
+ fail (loc) << "unable to load build system module " << *mod;
+ }
+ else if (*mod != smod)
+ {
+ if (!opt)
+ fail (loc) << "build system module " << *mod << " has no "
<< "submodule " << smod;
+ else
+ {
+ // Note that if the main module exists but has no such submodule, we
+ // return NULL rather than fail (think of an older version of a module
+ // that doesn't implement some extra functionality).
+ //
+ fun = nullptr;
}
}
- // Note that if the main module exists but has no such submodule, we
- // return NULL rather than fail (think of an older version of a module
- // that doesn't implement some extra functionality).
- //
- return i->second;
+ return fun;
}
void
@@ -629,7 +783,7 @@ namespace build2
{
// First see if this modules has already been booted for this project.
//
- module_map& lm (rs.root_extra->modules);
+ module_state_map& lm (rs.root_extra->loaded_modules);
auto i (lm.find (mod));
if (i != lm.end ())
@@ -674,7 +828,7 @@ namespace build2
i->boot_init = e.init;
}
- rs.assign (rs.var_pool ().insert (mod + ".booted")) = (mf != nullptr);
+ rs.assign (rs.var_pool (true).insert (mod + ".booted")) = (mf != nullptr);
}
void
@@ -705,7 +859,7 @@ namespace build2
{
// First see if this modules has already been inited for this project.
//
- module_map& lm (rs.root_extra->modules);
+ module_state_map& lm (rs.root_extra->loaded_modules);
auto i (lm.find (mod));
bool f (i == lm.end ());
@@ -743,7 +897,7 @@ namespace build2
// buildfile-visible (where we use the term "load a module"; see the note
// on terminology above)
//
- auto& vp (rs.var_pool ());
+ auto& vp (rs.var_pool (true));
value& lv (bs.assign (vp.insert (mod + ".loaded")));
value& cv (bs.assign (vp.insert (mod + ".configured")));
@@ -825,7 +979,7 @@ namespace build2
if (cast_false<bool> (bs[name + ".loaded"]))
{
if (cast_false<bool> (bs[name + ".configured"]))
- return rs.root_extra->modules.find (name)->module;
+ return rs.root_extra->loaded_modules.find (name)->module;
}
else
{
@@ -847,7 +1001,7 @@ namespace build2
// attempt to load it was optional?
return cast_false<bool> (bs[name + ".loaded"])
- ? rs.root_extra->modules.find (name)->module
+ ? rs.root_extra->loaded_modules.find (name)->module
: init_module (rs, bs, name, loc, false /* optional */, hints)->module;
}
}
diff --git a/libbuild2/module.hxx b/libbuild2/module.hxx
index 8223bae..6cdd040 100644
--- a/libbuild2/module.hxx
+++ b/libbuild2/module.hxx
@@ -21,6 +21,12 @@ namespace build2
// implementation's perspectives, the module library is "loaded" and the
// module is optionally "bootstrapped" (or "booted" for short) and then
// "initialized" (or "inited").
+ //
+ // Note also that a module name (or component thereof, for submodules) is
+ // not a project name (in particular, it can be less than 3 characters long)
+ // and we usually use `-` instead of `_` as a word separator within
+ // components, for example `c.as-cpp` (since the top-level component ends up
+ // in the library name; but this is not a hard rule).
// Base class for module instance.
//
@@ -145,9 +151,9 @@ namespace build2
//
// The <name> part in the function name is the main module name without
// submodule components (for example, `c` in `c.config`) and the load
- // function is expected to return boot/init functions for all its submodules
- // (if any) as well as for the module itself as an array of module_functions
- // terminated with an all-NULL entry.
+ // function is expected to return boot/init functions as an array of
+ // module_functions: entries for all its submodules (if any) first, followed
+ // by the module itself, and terminated with an all-NULL entry.
//
// Note that the load function is guaranteed to be called during serial
// execution (either from main() or during the load phase).
@@ -155,7 +161,31 @@ namespace build2
extern "C"
using module_load_function = const module_functions* ();
- // Module state.
+ // Imported module state.
+ //
+ // The module name is the main module (corresponding to the library). If
+ // found is false then this module could not be imported from this project.
+ //
+ struct module_import
+ {
+ const string name;
+ bool found;
+ };
+
+ struct module_import_map: vector<module_import>
+ {
+ iterator
+ find (const string& name)
+ {
+ return find_if (
+ begin (), end (),
+ [&name] (const module_import& i) {return i.name == name;});
+ }
+ };
+
+ // Loaded module state.
+ //
+ // Note that unlike import_state, the module name here could be a submodule.
//
struct module_state
{
@@ -167,7 +197,7 @@ namespace build2
optional<module_boot_init> boot_init;
};
- struct module_map: vector<module_state>
+ struct module_state_map: vector<module_state>
{
iterator
find (const string& name)
@@ -268,23 +298,28 @@ namespace build2
return static_cast<T&> (*load_module (root, base, name, l, config_hints));
}
- // Loaded modules (as in libraries).
+ // Loaded module libraries.
//
- // A NULL entry for the main module indicates that a module library was not
- // found.
+ // Note that this map contains entries for all the submodules.
//
- using loaded_module_map = map<string, const module_functions*>;
+ struct module_library
+ {
+ reference_wrapper<const module_functions> functions;
+ dir_path import_path; // Only for main module.
+ };
+
+ using module_libraries_map = map<string, module_library>;
- // The loaded_modules map is locked per top-level (as opposed to nested)
+ // The module_libraries map is locked per top-level (as opposed to nested)
// context (see context.hxx for details).
//
// Note: should only be constructed during contexts-wide serial execution.
//
- class LIBBUILD2_SYMEXPORT loaded_modules_lock
+ class LIBBUILD2_SYMEXPORT module_libraries_lock
{
public:
explicit
- loaded_modules_lock (context& c)
+ module_libraries_lock (context& c)
: ctx_ (c), lock_ (mutex_, defer_lock)
{
if (ctx_.modules_lock == nullptr)
@@ -294,7 +329,7 @@ namespace build2
}
}
- ~loaded_modules_lock ()
+ ~module_libraries_lock ()
{
if (ctx_.modules_lock == this)
ctx_.modules_lock = nullptr;
@@ -306,7 +341,7 @@ namespace build2
mlock lock_;
};
- LIBBUILD2_SYMEXPORT extern loaded_module_map loaded_modules;
+ LIBBUILD2_SYMEXPORT extern module_libraries_map modules_libraries;
// Load a builtin module (i.e., a module linked as a static/shared library
// or that is part of the build system driver).
diff --git a/libbuild2/name.hxx b/libbuild2/name.hxx
index 7d179aa..f5cb2c5 100644
--- a/libbuild2/name.hxx
+++ b/libbuild2/name.hxx
@@ -178,7 +178,8 @@ namespace build2
// trailing directory separator then it is stored as a directory, otherwise
// as a simple name. Note that the returned name is never a pattern.
//
- // NOTE: this function does not parse the full name syntax.
+ // NOTE: this function does not parse the full name syntax. See context-less
+ // parser::parse_names() for a heavy-weight way to achieve this.
//
name
to_name (string);
diff --git a/libbuild2/operation.cxx b/libbuild2/operation.cxx
index 9fbf870..6f88e38 100644
--- a/libbuild2/operation.cxx
+++ b/libbuild2/operation.cxx
@@ -18,6 +18,10 @@
#include <libbuild2/algorithm.hxx>
#include <libbuild2/diagnostics.hxx>
+#if 0
+#include <libbuild2/adhoc-rule-buildscript.hxx> // @@ For a hack below.
+#endif
+
using namespace std;
using namespace butl;
@@ -60,7 +64,7 @@ namespace build2
true, // bootstrap_outer
nullptr, // meta-operation pre
nullptr, // operation pre
- &load,
+ &perform_load,
nullptr, // search
nullptr, // match
nullptr, // execute
@@ -72,16 +76,17 @@ namespace build2
// perform
//
void
- load (const values&,
- scope& root,
- const path& bf,
- const dir_path& out_base,
- const dir_path& src_base,
- const location&)
+ perform_load (const values&,
+ scope& root,
+ const path& bf,
+ const dir_path& out_base,
+ const dir_path& src_base,
+ const location&)
{
// Load project's root.build.
//
- load_root (root);
+ if (!root.root_extra->loaded)
+ load_root (root);
// Create the base scope. Note that its existence doesn't mean it was
// already setup as a base scope; it can be the same as root.
@@ -96,15 +101,15 @@ namespace build2
}
void
- search (const values&,
- const scope&,
- const scope& bs,
- const path& bf,
- const target_key& tk,
- const location& l,
- action_targets& ts)
+ perform_search (const values&,
+ const scope&,
+ const scope& bs,
+ const path& bf,
+ const target_key& tk,
+ const location& l,
+ action_targets& ts)
{
- tracer trace ("search");
+ tracer trace ("perform_search");
context& ctx (bs.ctx);
phase_lock pl (ctx, run_phase::match);
@@ -159,8 +164,9 @@ namespace build2
//
map.reserve (ctx.targets.size () / 2);
- bool e (false);
+ size_t count_matched (ctx.count_matched ());
+ bool e (false);
for (size_t pass (1); pass != 3; ++pass)
{
for (const auto& pt: ctx.targets)
@@ -175,8 +181,7 @@ namespace build2
//
const target::opstate& s (t->state[a]);
- if (s.task_count.load (memory_order_relaxed) - ctx.count_base () <
- target::offset_matched)
+ if (s.task_count.load (memory_order_relaxed) < count_matched)
continue;
// Skip if for some reason the path is not assigned.
@@ -226,9 +231,19 @@ namespace build2
}
else if (t->decl != target_decl::real)
{
- dr << info << "target " << *t << " is not explicitly declared "
- << "in any buildfile" <<
- info << "perhaps it is a dynamic dependency?";
+ if (t->decl == target_decl::implied)
+ {
+ dr << info << "target " << *t << " is implied by a buildfile";
+ }
+ else
+ {
+ dr << info << "target " << *t << " is not declared in a buildfile";
+
+ if (t->decl == target_decl::prereq_file)
+ dr << " but has corresponding existing file";
+
+ dr << info << "perhaps it is a dynamic dependency?";
+ }
}
}
}
@@ -238,9 +253,10 @@ namespace build2
}
void
- match (const values&, action a, action_targets& ts, uint16_t diag, bool prog)
+ perform_match (const values&, action a, action_targets& ts,
+ uint16_t diag, bool prog)
{
- tracer trace ("match");
+ tracer trace ("perform_match");
if (ts.empty ())
return;
@@ -266,10 +282,12 @@ namespace build2
// the up-to-date check on some projects (e.g., Boost). So we jump
// through a few hoops to make sure we don't overindulge.
//
- md.incr = stderr_term ? 5 : 100; // Scale depending on output type.
+ md.incr = stderr_term // Scale depending on output type.
+ ? (ctx.sched->serial () ? 1 : 5)
+ : 100;
md.what = " targets to " + diag_do (ctx, a);
- mg = ctx.sched.monitor (
+ mg = ctx.sched->monitor (
ctx.target_count,
md.incr,
[&md] (size_t c) -> size_t
@@ -301,6 +319,7 @@ namespace build2
// many we have started. Wait with unlocked phase to allow phase
// switching.
//
+ bool fail (false);
size_t i (0), n (ts.size ());
{
atomic_count task_count (0);
@@ -311,21 +330,85 @@ namespace build2
const target& t (ts[i].as<target> ());
l5 ([&]{trace << diag_doing (a, t);});
- target_state s (match_async (a, t, 0, task_count, false));
+ target_state s (match_async (a, t,
+ 0, task_count,
+ match_extra::all_options,
+ false /* fail */));
// Bail out if the target has failed and we weren't instructed to
// keep going.
//
- if (s == target_state::failed && !ctx.keep_going)
+ if (s == target_state::failed)
{
- ++i;
- break;
+ fail = true;
+
+ if (!ctx.keep_going)
+ {
+ ++i;
+ break;
+ }
}
}
wg.wait ();
}
+ // If we have any targets with post hoc prerequisites, match those.
+ //
+ // See match_posthoc() for the overall approach description.
+ //
+ bool posthoc_fail (false);
+ if (!ctx.current_posthoc_targets.empty () && (!fail || ctx.keep_going))
+ {
+ using posthoc_target = context::posthoc_target;
+ using posthoc_prerequisite_target = posthoc_target::prerequisite_target;
+
+ // Note that on each iteration we may end up with new entries at the
+ // back. Since we start and end each iteration in serial execution, we
+ // don't need to mess with the mutex.
+ //
+ for (const posthoc_target& p: ctx.current_posthoc_targets)
+ {
+ action a (p.action); // May not be the same as argument action.
+ const target& t (p.target);
+
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while matching to " << diag_do (t.ctx, a)
+ << " post hoc prerequisites of " << t;
+ });
+
+ // Cannot use normal match because incrementing dependency counts in
+ // the face of cycles does not work well (we will deadlock for the
+ // reverse execution mode).
+ //
+ // @@ PERF: match in parallel (need match_direct_async(), etc).
+ //
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
+ {
+ if (pt.target != nullptr)
+ {
+ target_state s (match_direct_sync (a, *pt.target,
+ pt.match_options,
+ false /* fail */));
+
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
+ }
+ }
+
+ if (posthoc_fail && !ctx.keep_going)
+ break;
+ }
+ }
+
// Clear the progress if present.
//
if (mg)
@@ -336,15 +419,25 @@ namespace build2
// We are now running serially. Re-examine targets that we have matched.
//
- bool fail (false);
for (size_t j (0); j != n; ++j)
{
action_target& at (ts[j]);
const target& t (at.as<target> ());
- target_state s (j < i
- ? match_complete (a, t, false)
- : target_state::postponed);
+ // We cannot attribute post hoc failures to specific targets so it
+ // seems the best we can do is just fail them all.
+ //
+ target_state s;
+ if (j < i)
+ {
+ s = match_complete (a, t, match_extra::all_options, false /* fail */);
+
+ if (posthoc_fail)
+ s = /*t.state[a].state =*/ target_state::failed;
+ }
+ else
+ s = target_state::postponed;
+
switch (s)
{
case target_state::postponed:
@@ -395,16 +488,103 @@ namespace build2
}
void
- execute (const values&, action a, action_targets& ts,
- uint16_t diag, bool prog)
+ perform_execute (const values&, action a, action_targets& ts,
+ uint16_t diag, bool prog)
{
- tracer trace ("execute");
+ tracer trace ("perform_execute");
if (ts.empty ())
return;
context& ctx (ts[0].as<target> ().ctx);
+ bool posthoc_fail (false);
+ auto execute_posthoc = [&ctx, &posthoc_fail] ()
+ {
+ using posthoc_target = context::posthoc_target;
+ using posthoc_prerequisite_target = posthoc_target::prerequisite_target;
+
+ for (const posthoc_target& p: ctx.current_posthoc_targets)
+ {
+ action a (p.action); // May not be the same as argument action.
+ const target& t (p.target);
+
+ auto df = make_diag_frame (
+ [a, &t](const diag_record& dr)
+ {
+ if (verb != 0)
+ dr << info << "while " << diag_doing (t.ctx, a)
+ << " post hoc prerequisites of " << t;
+ });
+
+#if 0
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
+ {
+ if (pt.target != nullptr)
+ {
+ target_state s (
+ execute_direct_sync (a, *pt.target, false /* fail */));
+
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
+ }
+ }
+#else
+ // Note: similar logic/reasoning to below except we use direct
+ // execution.
+ //
+ atomic_count tc (0);
+ wait_guard wg (ctx, tc);
+
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
+ {
+ if (pt.target != nullptr)
+ {
+ target_state s (
+ execute_direct_async (a, *pt.target, 0, tc, false /*fail*/));
+
+ if (s == target_state::failed)
+ {
+ posthoc_fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
+ }
+ }
+
+ wg.wait ();
+
+ // Process the result.
+ //
+ for (const posthoc_prerequisite_target& pt: p.prerequisite_targets)
+ {
+ if (pt.target != nullptr)
+ {
+ // Similar to below, no need to wait.
+ //
+ target_state s (pt.target->executed_state (a, false /* fail */));
+
+ if (s == target_state::failed)
+ {
+ // Note: no need to keep going.
+ //
+ posthoc_fail = true;
+ break;
+ }
+ }
+ }
+#endif
+ if (posthoc_fail && !ctx.keep_going)
+ break;
+ }
+ };
+
// Reverse the order of targets if the execution mode is 'last'.
//
if (ctx.current_mode == execution_mode::last)
@@ -412,6 +592,7 @@ namespace build2
phase_lock pl (ctx, run_phase::execute); // Never switched.
+ bool fail (false);
{
// Tune the scheduler.
//
@@ -420,7 +601,7 @@ namespace build2
switch (ctx.current_inner_oif->concurrency)
{
- case 0: sched_tune = tune_guard (ctx.sched, 1); break; // Run serially.
+ case 0: sched_tune = tune_guard (*ctx.sched, 1); break; // Run serially.
case 1: break; // Run as is.
default: assert (false); // Not supported.
}
@@ -443,7 +624,7 @@ namespace build2
{
what = "% of targets " + diag_did (ctx, a);
- mg = ctx.sched.monitor (
+ mg = ctx.sched->monitor (
ctx.target_count,
init - incr,
[init, incr, &what, &ctx] (size_t c) -> size_t
@@ -468,9 +649,18 @@ namespace build2
}
}
+ // In the 'last' execution mode run post hoc first.
+ //
+ if (ctx.current_mode == execution_mode::last)
+ {
+ if (!ctx.current_posthoc_targets.empty ())
+ execute_posthoc ();
+ }
+
// Similar logic to execute_members(): first start asynchronous
// execution of all the top-level targets.
//
+ if (!posthoc_fail || ctx.keep_going)
{
atomic_count task_count (0);
wait_guard wg (ctx, task_count);
@@ -486,13 +676,24 @@ namespace build2
// Bail out if the target has failed and we weren't instructed to
// keep going.
//
- if (s == target_state::failed && !ctx.keep_going)
- break;
+ if (s == target_state::failed)
+ {
+ fail = true;
+
+ if (!ctx.keep_going)
+ break;
+ }
}
wg.wait ();
}
+ if (ctx.current_mode == execution_mode::first)
+ {
+ if (!ctx.current_posthoc_targets.empty () && (!fail || ctx.keep_going))
+ execute_posthoc ();
+ }
+
// We are now running serially.
//
@@ -528,15 +729,24 @@ namespace build2
// Re-examine all the targets and print diagnostics.
//
- bool fail (false);
for (action_target& at: ts)
{
const target& t (at.as<target> ());
- // Note that here we call executed_state() directly instead of
- // execute_complete() since we know there is no need to wait.
+ // Similar to match we cannot attribute post hoc failures to specific
+ // targets so it seems the best we can do is just fail them all.
//
- switch ((at.state = t.executed_state (a, false)))
+ if (!posthoc_fail)
+ {
+ // Note that here we call executed_state() directly instead of
+ // execute_complete() since we know there is no need to wait.
+ //
+ at.state = t.executed_state (a, false /* fail */);
+ }
+ else
+ at.state = /*t.state[a].state =*/ target_state::failed;
+
+ switch (at.state)
{
case target_state::unknown:
{
@@ -581,26 +791,191 @@ namespace build2
if (fail)
throw failed ();
- // We should have executed every target that we matched, provided we
+#ifndef NDEBUG
+ size_t base (ctx.count_base ());
+
+ // For now we disable these checks if we've performed any group member
+ // resolutions that required a match (with apply()) but not execute.
+ //
+ if (ctx.target_count.load (memory_order_relaxed) != 0 &&
+ ctx.resolve_count.load (memory_order_relaxed) != 0)
+ {
+ // These counts are only tracked for the inner operation.
+ //
+ action ia (a.outer () ? a.inner_action () : a);
+
+ // While it may seem that just decrementing the counters for every
+ // target with the resolve_counted flag set should be enough, this will
+ // miss any prerequisites that this target has matched but did not
+ // execute, which may affect both task_count and dependency_count. Note
+ // that this applies recursively and we effectively need to pretend to
+ // execute this target and all its prerequisites, recursively without
+ // actually executing any of their recepies.
+ //
+ // That last bit means we must be able to interpret the populated
+ // prerequisite_targets generically, which is a requirement we place on
+ // rules that resolve groups in apply (see target::group_members() for
+ // details). It so happens that our own adhoc_buildscript_rule doesn't
+ // follow this rule (see execute_update_prerequisites()) so we detect
+ // and handle this with a hack.
+ //
+ // @@ Hm, but there is no guarantee that this holds recursively since
+ // prerequisites may not be see-through groups. For this to work we
+ // would have to impose this restriction globally. Which we could
+ // probably do, just need to audit things carefully (especially
+ // cc::link_rule). But we already sort of rely on that for dump! Maybe
+ // should just require it everywhere and fix adhoc_buildscript_rule.
+ //
+ // @@ There are special recipes that don't populate prerequisite_targets
+ // like group_recipe! Are we banning any user-defined such recipes?
+ // Need to actually look if we have anything else like this. There
+ // is also inner_recipe, though doesn't apply here (only for outer).
+ //
+ // @@ TMP: do and enable after the 0.16.0 release.
+ //
+ // Note: recursive lambda.
+ //
+#if 0
+ auto pretend_execute = [base, ia] (target& t,
+ const auto& pretend_execute) -> void
+ {
+ context& ctx (t.ctx);
+
+ // Note: tries to emulate the execute_impl() functions semantics.
+ //
+ auto execute_impl = [base, ia, &ctx, &pretend_execute] (target& t)
+ {
+ target::opstate& s (t.state[ia]);
+
+ size_t gd (ctx.dependency_count.fetch_sub (1, memory_order_relaxed));
+ size_t td (s.dependents.fetch_sub (1, memory_order_release));
+ assert (td != 0 && gd != 0);
+
+ // Execute unless already executed.
+ //
+ if (s.task_count.load (memory_order_relaxed) !=
+ base + target::offset_executed)
+ pretend_execute (t, pretend_execute);
+ };
+
+ target::opstate& s (t.state[ia]);
+
+ if (s.state != target_state::unchanged) // Noop recipe.
+ {
+ if (s.recipe_group_action)
+ {
+ execute_impl (const_cast<target&> (*t.group));
+ }
+ else
+ {
+ // @@ Special hack for adhoc_buildscript_rule (remember to drop
+ // include above if getting rid of).
+ //
+ bool adhoc (
+ ia == perform_update_id &&
+ s.rule != nullptr &&
+ dynamic_cast<const adhoc_buildscript_rule*> (
+ &s.rule->second.get ()) != nullptr);
+
+ for (const prerequisite_target& p: t.prerequisite_targets[ia])
+ {
+ const target* pt;
+
+ if (adhoc)
+ pt = (p.target != nullptr ? p.target :
+ p.adhoc () ? reinterpret_cast<target*> (p.data) :
+ nullptr);
+ else
+ pt = p.target;
+
+ if (pt != nullptr)
+ execute_impl (const_cast<target&> (*pt));
+ }
+
+ ctx.target_count.fetch_sub (1, memory_order_relaxed);
+ if (s.resolve_counted)
+ {
+ s.resolve_counted = false;
+ ctx.resolve_count.fetch_sub (1, memory_order_relaxed);
+ }
+ }
+
+ s.state = target_state::changed;
+ }
+
+ s.task_count.store (base + target::offset_executed,
+ memory_order_relaxed);
+ };
+#endif
+
+ for (const auto& pt: ctx.targets)
+ {
+ target& t (*pt);
+ target::opstate& s (t.state[ia]);
+
+ // We are only interested in the targets that have been matched for
+ // this operation and are in the applied state.
+ //
+ if (s.task_count.load (memory_order_relaxed) !=
+ base + target::offset_applied)
+ continue;
+
+ if (s.resolve_counted)
+ {
+#if 0
+ pretend_execute (t, pretend_execute);
+
+ if (ctx.resolve_count.load (memory_order_relaxed) == 0)
+ break;
+#else
+ return; // Skip all the below checks.
+#endif
+ }
+ }
+ }
+
+ // We should have executed every target that we have matched, provided we
// haven't failed (in which case we could have bailed out early).
//
assert (ctx.target_count.load (memory_order_relaxed) == 0);
+ assert (ctx.resolve_count.load (memory_order_relaxed) == 0); // Sanity check.
-#ifndef NDEBUG
if (ctx.dependency_count.load (memory_order_relaxed) != 0)
{
+ auto dependents = [base] (action a, const target& t)
+ {
+ const target::opstate& s (t.state[a]);
+
+ // Only consider targets that have been matched for this operation
+ // (since matching is what causes the dependents count reset).
+ //
+ size_t c (s.task_count.load (memory_order_relaxed));
+
+ return (c >= base + target::offset_applied
+ ? s.dependents.load (memory_order_relaxed)
+ : 0);
+ };
+
diag_record dr;
dr << info << "detected unexecuted matched targets:";
for (const auto& pt: ctx.targets)
{
const target& t (*pt);
- if (size_t n = t[a].dependents.load (memory_order_relaxed))
+
+ if (size_t n = dependents (a, t))
dr << text << t << ' ' << n;
+
+ if (a.outer ())
+ {
+ if (size_t n = dependents (a.inner_action (), t))
+ dr << text << t << ' ' << n;
+ }
}
}
-#endif
+
assert (ctx.dependency_count.load (memory_order_relaxed) == 0);
+#endif
}
const meta_operation_info mo_perform {
@@ -613,10 +988,10 @@ namespace build2
true, // bootstrap_outer
nullptr, // meta-operation pre
nullptr, // operation pre
- &load,
- &search,
- &match,
- &execute,
+ &perform_load,
+ &perform_search,
+ &perform_match,
+ &perform_execute,
nullptr, // operation post
nullptr, // meta-operation post
nullptr // include
@@ -627,31 +1002,66 @@ namespace build2
// Note: similar approach to forward() in configure.
//
- static bool
- info_json (const values& params,
- const char* mo = nullptr,
- const location& l = location ())
+ struct info_params
{
+ bool json = false;
+ bool subprojects = true;
+ };
+
+ // Note: should not fail if mo is NULL (see info_subprojects() below).
+ //
+ static info_params
+ info_parse_params (const values& params,
+ const char* mo = nullptr,
+ const location& l = location ())
+ {
+ info_params r;
+
if (params.size () == 1)
{
- const names& ns (cast<names> (params[0]));
+ for (const name& n: cast<names> (params[0]))
+ {
+ if (n.simple ())
+ {
+ if (n.value == "json")
+ {
+ r.json = true;
+ continue;
+ }
+
+ if (n.value == "no_subprojects")
+ {
+ r.subprojects = false;
+ continue;
+ }
+
+ // Fall through.
+ }
- if (ns.size () == 1 && ns[0].simple () && ns[0].value == "json")
- return true;
- else if (!ns.empty ())
- fail (l) << "unexpected parameter '" << ns << "' for "
- << "meta-operation " << mo;
+ if (mo != nullptr)
+ fail (l) << "unexpected parameter '" << n << "' for "
+ << "meta-operation " << mo;
+ }
}
else if (!params.empty ())
+ {
+ if (mo != nullptr)
fail (l) << "unexpected parameters for meta-operation " << mo;
+ }
- return false;
+ return r;
+ }
+
+ bool
+ info_subprojects (const values& params)
+ {
+ return info_parse_params (params).subprojects;
}
static void
info_pre (context&, const values& params, const location& l)
{
- info_json (params, "info", l); // Validate.
+ info_parse_params (params, "info", l); // Validate.
}
static operation_id
@@ -704,7 +1114,7 @@ namespace build2
}
static void
- info_execute_lines (action_targets& ts)
+ info_execute_lines (action_targets& ts, bool subp)
{
for (size_t i (0); i != ts.size (); ++i)
{
@@ -737,7 +1147,7 @@ namespace build2
//
auto print_mods = [&rs] ()
{
- for (const module_state& ms: rs.root_extra->modules)
+ for (const module_state& ms: rs.root_extra->loaded_modules)
cout << ' ' << ms.name;
};
@@ -778,8 +1188,13 @@ namespace build2
<< "url:" ; print_empty (cast_empty<string> (rs[ctx.var_project_url])); cout << endl
<< "src_root:" ; print_dir (cast<dir_path> (rs[ctx.var_src_root])); cout << endl
<< "out_root:" ; print_dir (cast<dir_path> (rs[ctx.var_out_root])); cout << endl
- << "amalgamation:" ; print_pdir (*rs.root_extra->amalgamation); cout << endl
- << "subprojects:" ; print_null (*rs.root_extra->subprojects); cout << endl
+ << "amalgamation:" ; print_pdir (*rs.root_extra->amalgamation); cout << endl;
+ if (subp)
+ {
+ cout
+ << "subprojects:" ; print_null (*rs.root_extra->subprojects); cout << endl;
+ }
+ cout
<< "operations:" ; print_ops (rs.root_extra->operations, ctx.operation_table); cout << endl
<< "meta-operations:"; print_ops (rs.root_extra->meta_operations, ctx.meta_operation_table); cout << endl
<< "modules:" ; print_mods (); cout << endl;
@@ -788,7 +1203,7 @@ namespace build2
#ifndef BUILD2_BOOTSTRAP
static void
- info_execute_json (action_targets& ts)
+ info_execute_json (action_targets& ts, bool subp)
{
json::stream_serializer s (cout);
s.begin_array ();
@@ -859,6 +1274,7 @@ namespace build2
// Print subprojects.
//
+ if (subp)
{
const subprojects* sps (*rs.root_extra->subprojects);
@@ -902,12 +1318,12 @@ namespace build2
// Print modules.
//
- if (!rs.root_extra->modules.empty ())
+ if (!rs.root_extra->loaded_modules.empty ())
{
s.member_name ("modules", false /* check */);
s.begin_array ();
- for (const module_state& ms: rs.root_extra->modules)
+ for (const module_state& ms: rs.root_extra->loaded_modules)
s.value (ms.name, false /* check */);
s.end_array ();
@@ -921,7 +1337,7 @@ namespace build2
}
#else
static void
- info_execute_json (action_targets&)
+ info_execute_json (action_targets&, bool)
{
}
#endif //BUILD2_BOOTSTRAP
@@ -933,14 +1349,16 @@ namespace build2
uint16_t,
bool)
{
+ info_params ip (info_parse_params (params));
+
// Note that both outputs will not be "ideal" if the user does something
// like `b info(foo/) info(bar/)` instead of `b info(foo/ bar/)`. Oh,
// well.
//
- if (info_json (params))
- info_execute_json (ts);
+ if (ip.json)
+ info_execute_json (ts, ip.subprojects);
else
- info_execute_lines (ts);
+ info_execute_lines (ts, ip.subprojects);
}
const meta_operation_info mo_info {
@@ -968,7 +1386,6 @@ namespace build2
default_id,
0,
"<default>",
- nullptr,
"",
"",
"",
@@ -978,6 +1395,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
@@ -996,7 +1415,6 @@ namespace build2
0,
"update",
"update",
- "update",
"updating",
"updated",
"is up to date",
@@ -1005,6 +1423,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
@@ -1013,7 +1433,6 @@ namespace build2
0,
"clean",
"clean",
- "clean",
"cleaning",
"cleaned",
"is clean",
@@ -1022,6 +1441,8 @@ namespace build2
nullptr,
nullptr,
nullptr,
+ nullptr,
+ nullptr,
nullptr
};
}
diff --git a/libbuild2/operation.hxx b/libbuild2/operation.hxx
index ce3cd79..e8ff38a 100644
--- a/libbuild2/operation.hxx
+++ b/libbuild2/operation.hxx
@@ -121,6 +121,8 @@ namespace build2
// End of operation and meta-operation batches.
//
+ // Note: not called in case any of the earlier callbacks failed.
+ //
void (*operation_post) (context&, const values&, operation_id);
void (*meta_operation_post) (context&, const values&);
@@ -147,41 +149,46 @@ namespace build2
// scope.
//
LIBBUILD2_SYMEXPORT void
- load (const values&,
- scope&,
- const path&,
- const dir_path&,
- const dir_path&,
- const location&);
+ perform_load (const values&,
+ scope&,
+ const path&,
+ const dir_path&,
+ const dir_path&,
+ const location&);
// Search and match the target. This is the default implementation
// that does just that and adds a pointer to the target to the list.
//
LIBBUILD2_SYMEXPORT void
- search (const values&,
- const scope&,
- const scope&,
- const path&,
- const target_key&,
- const location&,
- action_targets&);
+ perform_search (const values&,
+ const scope&,
+ const scope&,
+ const path&,
+ const target_key&,
+ const location&,
+ action_targets&);
LIBBUILD2_SYMEXPORT void
- match (const values&, action, action_targets&,
- uint16_t diag, bool prog);
+ perform_match (const values&, action, action_targets&,
+ uint16_t diag, bool prog);
// Execute the action on the list of targets. This is the default
// implementation that does just that while issuing appropriate
// diagnostics (unless quiet).
//
LIBBUILD2_SYMEXPORT void
- execute (const values&, action, const action_targets&,
- uint16_t diag, bool prog);
+ perform_execute (const values&, action, const action_targets&,
+ uint16_t diag, bool prog);
LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_noop;
LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_perform;
LIBBUILD2_SYMEXPORT extern const meta_operation_info mo_info;
+ // Return true if params does not contain no_subprojects.
+ //
+ LIBBUILD2_SYMEXPORT bool
+ info_subprojects (const values& params);
+
// Operation info.
//
// NOTE: keep POD-like to ensure can be constant-initialized in order to
@@ -196,7 +203,6 @@ namespace build2
const operation_id id;
const operation_id outer_id;
const char* name;
- const char* var_name; // Operation variable or NULL if not used.
// Name derivatives for diagnostics. Note that unlike meta-operations,
// these can only be empty for the default operation (id 1), And
@@ -219,14 +225,22 @@ namespace build2
//
const size_t concurrency;
- // The first argument in all the callbacks is the operation parameters.
+ // The values argument in the callbacks is the operation parameters. If
+ // the operation expects parameters, then it should have a non-NULL
+ // operation_pre() callback. Failed that, any parameters will be diagnosed
+ // as unexpected.
//
- // If the operation expects parameters, then it should have a non-NULL
- // pre(). Failed that, any parameters will be diagnosed as unexpected.
+ // Note also that if the specified operation has outer (for example,
+ // update-for-install), then parameters belong to outer (for example,
+ // install; this is done in order to be consistent with the case when
+ // update is performed as a pre-operation of install).
- // If the returned operation_id's are not 0, then they are injected
- // as pre/post operations for this operation. Can be NULL if unused.
- // The returned operation_id shall not be default_id.
+ // Pre/post operations for this operation. Note that these callbacks are
+ // called before this operation becomes current.
+ //
+ // If the returned by pre/post_*() operation_id's are not 0, then they are
+ // injected as pre/post operations for this operation. Can be NULL if
+ // unused. The returned operation_id shall not be default_id.
//
operation_id (*pre_operation) (
context&, const values&, meta_operation_id, const location&);
@@ -234,6 +248,16 @@ namespace build2
operation_id (*post_operation) (
context&, const values&, meta_operation_id);
+ // Called immediately after/before this operation becomes/ceases to be
+ // current operation for the specified context. Can be used to
+ // initialize/finalize operation-specific data (context::current_*_odata).
+ // Can be NULL if unused.
+ //
+ void (*operation_pre) (
+ context&, const values&, bool inner, const location&);
+ void (*operation_post) (
+ context&, const values&, bool inner);
+
// Operation-specific ad hoc rule callbacks. Essentially, if not NULL,
// then every ad hoc rule match and apply call for this operation is
// proxied through these functions.
@@ -308,35 +332,36 @@ namespace build2
using operation_table = butl::string_table<operation_id>;
- // These are "sparse" in the sense that we may have "holes" that
- // are represented as NULL pointers. Also, lookup out of bounds
- // is treated as a hole.
+ // This is a "sparse" vector in the sense that we may have "holes" that are
+ // represented as default-initialized empty instances (for example, NULL if
+ // T is a pointer). Also, lookup out of bounds is treated as a hole.
//
template <typename T, size_t N>
struct sparse_vector
{
- using base_type = small_vector<T*, N>;
+ using base_type = small_vector<T, N>;
using size_type = typename base_type::size_type;
void
- insert (size_type i, T& x)
+ insert (size_type i, T x)
{
size_type n (v_.size ());
if (i < n)
- v_[i] = &x;
+ v_[i] = x;
else
{
if (n != i)
- v_.resize (i, nullptr); // Add holes.
- v_.push_back (&x);
+ v_.resize (i, T ()); // Add holes.
+
+ v_.push_back (move (x));
}
}
- T*
+ T
operator[] (size_type i) const
{
- return i < v_.size () ? v_[i] : nullptr;
+ return i < v_.size () ? v_[i] : T ();
}
bool
@@ -351,8 +376,28 @@ namespace build2
base_type v_;
};
- using meta_operations = sparse_vector<const meta_operation_info, 8>;
- using operations = sparse_vector<const operation_info, 10>;
+ // For operations we keep both the pointer to its description as well
+ // as to its operation variable (see var_include) which may belong to
+ // the project-private variable pool.
+ //
+ struct project_operation_info
+ {
+ const operation_info* info = nullptr;
+ const variable* ovar = nullptr; // Operation variable.
+
+ // Allow treating it as pointer to operation_info in most contexts.
+ //
+ operator const operation_info*() const {return info;}
+ bool operator== (nullptr_t) {return info == nullptr;}
+ bool operator!= (nullptr_t) {return info != nullptr;}
+
+ project_operation_info (const operation_info* i = nullptr, // VC14
+ const variable* v = nullptr)
+ : info (i), ovar (v) {}
+ };
+
+ using meta_operations = sparse_vector<const meta_operation_info*, 8>;
+ using operations = sparse_vector<project_operation_info, 10>;
}
namespace butl
diff --git a/libbuild2/parser.cxx b/libbuild2/parser.cxx
index 99e67a7..5321cd5 100644
--- a/libbuild2/parser.cxx
+++ b/libbuild2/parser.cxx
@@ -24,6 +24,8 @@
#include <libbuild2/adhoc-rule-regex-pattern.hxx>
+#include <libbuild2/dist/module.hxx> // module
+
#include <libbuild2/config/utility.hxx> // lookup_config
using namespace std;
@@ -42,7 +44,10 @@ namespace build2
{
o << '=';
names storage;
- to_stream (o, reverse (a.value, storage), quote_mode::normal, '@');
+ to_stream (o,
+ reverse (a.value, storage, true /* reduce */),
+ quote_mode::normal,
+ '@');
}
return o;
@@ -57,27 +62,7 @@ namespace build2
enter_scope (parser& p, dir_path&& d)
: p_ (&p), r_ (p.root_), s_ (p.scope_), b_ (p.pbase_)
{
- // Try hard not to call normalize(). Most of the time we will go just
- // one level deeper.
- //
- bool n (true);
-
- if (d.relative ())
- {
- // Relative scopes are opened relative to out, not src.
- //
- if (d.simple () && !d.current () && !d.parent ())
- {
- d = dir_path (p.scope_->out_path ()) /= d.string ();
- n = false;
- }
- else
- d = p.scope_->out_path () / d;
- }
-
- if (n)
- d.normalize ();
-
+ complete_normalize (*p.scope_, d);
e_ = p.switch_scope (d);
}
@@ -103,8 +88,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- enter_scope (enter_scope&& x) {*this = move (x);}
- enter_scope& operator= (enter_scope&& x)
+ enter_scope (enter_scope&& x) noexcept {*this = move (x);}
+ enter_scope& operator= (enter_scope&& x) noexcept
{
if (this != &x)
{
@@ -121,6 +106,31 @@ namespace build2
enter_scope (const enter_scope&) = delete;
enter_scope& operator= (const enter_scope&) = delete;
+ static void
+ complete_normalize (scope& s, dir_path& d)
+ {
+ // Try hard not to call normalize(). Most of the time we will go just
+ // one level deeper.
+ //
+ bool n (true);
+
+ if (d.relative ())
+ {
+ // Relative scopes are opened relative to out, not src.
+ //
+ if (d.simple () && !d.current () && !d.parent ())
+ {
+ d = dir_path (s.out_path ()) /= d.string ();
+ n = false;
+ }
+ else
+ d = s.out_path () / d;
+ }
+
+ if (n)
+ d.normalize ();
+ }
+
private:
parser* p_;
scope* r_;
@@ -162,7 +172,11 @@ namespace build2
tracer& tr)
{
auto r (p.scope_->find_target_type (n, o, loc));
- return p.ctx.targets.insert (
+
+ if (r.first.factory == nullptr)
+ p.fail (loc) << "abstract target type " << r.first.name << "{}";
+
+ return p.ctx->targets.insert (
r.first, // target type
move (n.dir),
move (o.dir),
@@ -182,12 +196,16 @@ namespace build2
tracer& tr)
{
auto r (p.scope_->find_target_type (n, o, loc));
- return p.ctx.targets.find (r.first, // target type
- n.dir,
- o.dir,
- n.value,
- r.second, // extension
- tr);
+
+ if (r.first.factory == nullptr)
+ p.fail (loc) << "abstract target type " << r.first.name << "{}";
+
+ return p.ctx->targets.find (r.first, // target type
+ n.dir,
+ o.dir,
+ n.value,
+ r.second, // extension
+ tr);
}
~enter_target ()
@@ -198,8 +216,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- enter_target (enter_target&& x) {*this = move (x);}
- enter_target& operator= (enter_target&& x) {
+ enter_target (enter_target&& x) noexcept {*this = move (x);}
+ enter_target& operator= (enter_target&& x) noexcept {
p_ = x.p_; t_ = x.t_; x.p_ = nullptr; return *this;}
enter_target (const enter_target&) = delete;
@@ -230,8 +248,8 @@ namespace build2
// Note: move-assignable to empty only.
//
- enter_prerequisite (enter_prerequisite&& x) {*this = move (x);}
- enter_prerequisite& operator= (enter_prerequisite&& x) {
+ enter_prerequisite (enter_prerequisite&& x) noexcept {*this = move (x);}
+ enter_prerequisite& operator= (enter_prerequisite&& x) noexcept {
p_ = x.p_; r_ = x.r_; x.p_ = nullptr; return *this;}
enter_prerequisite (const enter_prerequisite&) = delete;
@@ -247,6 +265,7 @@ namespace build2
{
pre_parse_ = false;
attributes_.clear ();
+ condition_ = nullopt;
default_target_ = nullptr;
peeked_ = false;
replay_ = replay::stop;
@@ -259,10 +278,11 @@ namespace build2
scope* root,
scope& base,
target* tgt,
- prerequisite* prq)
+ prerequisite* prq,
+ bool enter)
{
lexer l (is, in);
- parse_buildfile (l, root, base, tgt, prq);
+ parse_buildfile (l, root, base, tgt, prq, enter);
}
void parser::
@@ -270,7 +290,8 @@ namespace build2
scope* root,
scope& base,
target* tgt,
- prerequisite* prq)
+ prerequisite* prq,
+ bool enter)
{
path_ = &l.name ();
lexer_ = &l;
@@ -289,9 +310,9 @@ namespace build2
? auto_project_env (*root_)
: auto_project_env ());
- if (path_->path != nullptr)
- enter_buildfile (*path_->path); // Note: needs scope_.
-
+ const buildfile* bf (enter && path_->path != nullptr
+ ? &enter_buildfile<buildfile> (*path_->path)
+ : nullptr);
token t;
type tt;
next (t, tt);
@@ -303,13 +324,34 @@ namespace build2
else
{
parse_clause (t, tt);
- process_default_target (t);
+
+ if (stage_ != stage::boot && stage_ != stage::root)
+ process_default_target (t, bf);
}
if (tt != type::eos)
fail (t) << "unexpected " << t;
}
+ names parser::
+ parse_export_stub (istream& is, const path_name& name,
+ const scope& rs, scope& gs, scope& ts)
+ {
+ // Enter the export stub manually with correct out.
+ //
+ if (name.path != nullptr)
+ {
+ dir_path out (!rs.out_eq_src ()
+ ? out_src (name.path->directory (), rs)
+ : dir_path ());
+
+ enter_buildfile<buildfile> (*name.path, move (out));
+ }
+
+ parse_buildfile (is, name, &gs, ts, nullptr, nullptr, false /* enter */);
+ return move (export_value);
+ }
+
token parser::
parse_variable (lexer& l, scope& s, const variable& var, type kind)
{
@@ -355,6 +397,37 @@ namespace build2
return make_pair (move (lhs), move (t));
}
+ names parser::
+ parse_names (lexer& l,
+ const dir_path* b,
+ pattern_mode pmode,
+ const char* what,
+ const string* separators)
+ {
+ path_ = &l.name ();
+ lexer_ = &l;
+
+ root_ = nullptr;
+ scope_ = nullptr;
+ target_ = nullptr;
+ prerequisite_ = nullptr;
+
+ pbase_ = b;
+
+ token t;
+ type tt;
+
+ mode (lexer_mode::value, '@');
+ next (t, tt);
+
+ names r (parse_names (t, tt, pmode, what, separators));
+
+ if (tt != type::eos)
+ fail (t) << "unexpected " << t;
+
+ return r;
+ }
+
value parser::
parse_eval (lexer& l, scope& rs, scope& bs, pattern_mode pmode)
{
@@ -544,6 +617,12 @@ namespace build2
{
f = &parser::parse_config_environment;
}
+ else if (n == "recipe")
+ {
+ // Valid only after recipe header (%).
+ //
+ fail (t) << n << " directive without % recipe header";
+ }
if (f != nullptr)
{
@@ -560,9 +639,39 @@ namespace build2
location nloc (get_location (t));
names ns;
- if (tt != type::labrace)
+ // We have to parse names in chunks to detect invalid cases of the
+ // group{foo}<...> syntax.
+ //
+ // Consider (1):
+ //
+ // x =
+ // group{foo} $x<...>:
+ //
+ // And (2):
+ //
+ // x = group{foo} group{bar}
+ // $x<...>:
+ //
+ // As well as (3):
+ //
+ // <...><...>:
+ //
+ struct chunk
{
- ns = parse_names (t, tt, pattern_mode::preserve);
+ size_t pos; // Index in ns of the beginning of the last chunk.
+ location loc; // Position of the beginning of the last chunk.
+ };
+ optional<chunk> ns_last;
+
+ bool labrace_first (tt == type::labrace);
+ if (!labrace_first)
+ {
+ do
+ {
+ ns_last = chunk {ns.size (), get_location (t)};
+ parse_names (t, tt, ns, pattern_mode::preserve, true /* chunk */);
+ }
+ while (start_names (tt));
// Allow things like function calls that don't result in anything.
//
@@ -578,44 +687,87 @@ namespace build2
}
}
- // Handle ad hoc target group specification (<...>).
+ // Handle target group specification (<...>).
//
// We keep an "optional" (empty) vector of names parallel to ns that
- // contains the ad hoc group members.
+ // contains the group members. Note that when we "catch" gns up to ns,
+ // we populate it with ad hoc (as opposed to explicit) groups with no
+ // members.
//
- adhoc_names ans;
+ group_names gns;
if (tt == type::labrace)
{
- while (tt == type::labrace)
+ for (; tt == type::labrace; labrace_first = false)
{
- // Parse target names inside < >.
+ // Detect explicit group (group{foo}<...>).
+ //
+ // Note that `<` first thing on the line is not seperated thus the
+ // labrace_first complication.
+ //
+ bool expl (!t.separated && !labrace_first);
+ if (expl)
+ {
+ // Note: (N) refers to the example in the above comment.
+ //
+ if (!ns_last /* (3) */ || ns_last->pos == ns.size () /* (1) */)
+ {
+ fail (t) << "group name or whitespace expected before '<'";
+ }
+ else
+ {
+ size_t n (ns.size () - ns_last->pos);
+
+ // Note: could be a pair.
+ //
+ if ((n > 2 || (n == 2 && !ns[ns_last->pos].pair)) /* (2) */)
+ {
+ fail (t) << "single group name or whitespace expected before "
+ << "'<' instead of '"
+ << names_view (ns.data () + ns_last->pos, n) << "'";
+ }
+ }
+ }
+
+ // Parse target names inside <>.
//
// We "reserve" the right to have attributes inside <> though what
// exactly that would mean is unclear. One potentially useful
- // semantics would be the ability to specify attributes for ad hoc
- // members though the fact that the primary target is listed first
- // would make it rather unintuitive. Maybe attributes that change
- // the group semantics itself?
+ // semantics would be the ability to specify attributes for group
+ // members though the fact that the primary target for ad hoc groups
+ // is listed first would make it rather unintuitive. Maybe
+ // attributes that change the group semantics itself?
//
next_with_attributes (t, tt);
auto at (attributes_push (t, tt));
if (at.first)
- fail (at.second) << "attributes before ad hoc target";
+ fail (at.second) << "attributes before group member";
else
attributes_pop ();
- // Allow empty case (<>).
+ // For explicit groups, the group target is already in ns and all
+ // the members should go straight to gns.
//
- if (tt != type::rabrace)
+ // For ad hoc groups, the first name (or a pair) is the primary
+ // target which we need to keep in ns. The rest, if any, are ad
+ // hoc members that we should move to gns.
+ //
+ if (expl)
+ {
+ gns.resize (ns.size ()); // Catch up with the names vector.
+ group_names_loc& g (gns.back ());
+ g.expl = true;
+ g.group_loc = move (ns_last->loc);
+ g.member_loc = get_location (t); // Start of members.
+
+ if (tt != type::rabrace) // Handle empty case (<>)
+ parse_names (t, tt, g.ns, pattern_mode::preserve);
+ }
+ else if (tt != type::rabrace) // Allow and ignore empty case (<>).
{
- location aloc (get_location (t));
+ location mloc (get_location (t)); // Start of members.
- // The first name (or a pair) is the primary target which we need
- // to keep in ns. The rest, if any, are ad hoc members that we
- // should move to ans.
- //
size_t m (ns.size ());
parse_names (t, tt, ns, pattern_mode::preserve);
size_t n (ns.size ());
@@ -632,11 +784,10 @@ namespace build2
{
n -= m; // Number of names in ns we should end up with.
- ans.resize (n); // Catch up with the names vector.
- adhoc_names_loc& a (ans.back ());
-
- a.loc = move (aloc);
- a.ns.insert (a.ns.end (),
+ gns.resize (n); // Catch up with the names vector.
+ group_names_loc& g (gns.back ());
+ g.group_loc = g.member_loc = move (mloc);
+ g.ns.insert (g.ns.end (),
make_move_iterator (ns.begin () + n),
make_move_iterator (ns.end ()));
ns.resize (n);
@@ -650,12 +801,16 @@ namespace build2
// Parse the next chunk of target names after >, if any.
//
next (t, tt);
- if (start_names (tt))
- parse_names (t, tt, ns, pattern_mode::preserve);
+ ns_last = nullopt; // To detect <...><...>.
+ while (start_names (tt))
+ {
+ ns_last = chunk {ns.size (), get_location (t)};
+ parse_names (t, tt, ns, pattern_mode::preserve, true /* chunk */);
+ }
}
- if (!ans.empty ())
- ans.resize (ns.size ()); // Catch up with the final chunk.
+ if (!gns.empty ())
+ gns.resize (ns.size ()); // Catch up with the final chunk.
if (tt != type::colon)
fail (t) << "expected ':' instead of " << t;
@@ -683,10 +838,11 @@ namespace build2
// evaluated. The function signature is:
//
// void (token& t, type& tt,
+ // optional<bool> member, // true -- explict, false -- ad hoc
// optional<pattern_type>, const target_type* pat_tt, string pat,
// const location& pat_loc)
//
- // Note that the target and its ad hoc members are inserted implied
+ // Note that the target and its group members are inserted implied
// but this flag can be cleared and default_target logic applied if
// appropriate.
//
@@ -776,23 +932,31 @@ namespace build2
// Resolve target type. If none is specified, then it's file{}.
//
+ // Note: abstract target type is ok here.
+ //
const target_type* ttype (n.untyped ()
? &file::static_type
: scope_->find_target_type (n.type));
if (ttype == nullptr)
- fail (nloc) << "unknown target type " << n.type;
+ fail (nloc) << "unknown target type " << n.type <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *scope_->root_scope ();
- f (t, tt, n.pattern, ttype, move (n.value), nloc);
+ f (t, tt, nullopt, n.pattern, ttype, move (n.value), nloc);
};
auto for_each = [this, &trace, &for_one_pat,
- &t, &tt, &as, &ns, &nloc, &ans] (auto&& f)
+ &t, &tt, &as, &ns, &nloc, &gns] (auto&& f)
{
+ // We need replay if we have multiple targets or group members.
+ //
// Note: watch out for an out-qualified single target (two names).
//
replay_guard rg (*this,
- ns.size () > 2 || (ns.size () == 2 && !ns[0].pair));
+ ns.size () > 2 ||
+ (ns.size () == 2 && !ns[0].pair) ||
+ !gns.empty ());
for (size_t i (0), e (ns.size ()); i != e; )
{
@@ -812,8 +976,9 @@ namespace build2
if (n.pair)
fail (nloc) << "out-qualified target type/pattern";
- if (!ans.empty () && !ans[i].ns.empty ())
- fail (ans[i].loc) << "ad hoc member in target type/pattern";
+ if (!gns.empty () && !gns[i].ns.empty ())
+ fail (gns[i].member_loc)
+ << "group member in target type/pattern";
if (*n.pattern == pattern_type::regex_substitution)
fail (nloc) << "regex substitution " << n << " without "
@@ -823,27 +988,47 @@ namespace build2
}
else
{
- name o (n.pair ? move (ns[++i]) : name ());
- enter_target tg (*this,
- move (n),
- move (o),
- true /* implied */,
- nloc,
- trace);
-
- if (!as.empty ())
- apply_target_attributes (*target_, as);
-
- // Enter ad hoc members.
- //
- if (!ans.empty ())
+ bool expl;
+ vector<reference_wrapper<target>> gms;
{
- // Note: index after the pair increment.
+ name o (n.pair ? move (ns[++i]) : name ());
+ enter_target tg (*this,
+ move (n),
+ move (o),
+ true /* implied */,
+ nloc,
+ trace);
+
+ if (!as.empty ())
+ apply_target_attributes (*target_, as);
+
+ // Enter group members.
//
- enter_adhoc_members (move (ans[i]), true /* implied */);
+ if (!gns.empty ())
+ {
+ // Note: index after the pair increment.
+ //
+ group_names_loc& g (gns[i]);
+ expl = g.expl;
+
+ if (expl && !target_->is_a<group> ())
+ fail (g.group_loc) << *target_ << " is not group target";
+
+ gms = expl
+ ? enter_explicit_members (move (g), true /* implied */)
+ : enter_adhoc_members (move (g), true /* implied */);
+ }
+
+ f (t, tt, nullopt, nullopt, nullptr, string (), location ());
}
- f (t, tt, nullopt, nullptr, string (), location ());
+ for (target& gm: gms)
+ {
+ rg.play (); // Replay.
+
+ enter_target tg (*this, gm);
+ f (t, tt, expl, nullopt, nullptr, string (), location ());
+ }
}
if (++i != e)
@@ -897,12 +1082,15 @@ namespace build2
ploc = get_location (t);
pns = parse_names (t, tt, pattern_mode::preserve);
- // Target-specific variable assignment.
+ // Target type/pattern-specific variable assignment.
//
if (tt == type::assign || tt == type::prepend || tt == type::append)
{
- if (!ans.empty ())
- fail (ans[0].loc) << "ad hoc member in target type/pattern";
+ // Note: ns contains single target name.
+ //
+ if (!gns.empty ())
+ fail (gns[0].member_loc)
+ << "group member in target type/pattern";
// Note: see the same code below if changing anything here.
//
@@ -921,6 +1109,7 @@ namespace build2
for_one_pat (
[this, &var, akind, &aloc] (
token& t, type& tt,
+ optional<bool>,
optional<pattern_type> pt, const target_type* ptt,
string pat, const location& ploc)
{
@@ -966,6 +1155,7 @@ namespace build2
for_one_pat (
[this] (
token& t, type& tt,
+ optional<bool>,
optional<pattern_type> pt, const target_type* ptt,
string pat, const location& ploc)
{
@@ -985,8 +1175,11 @@ namespace build2
if (pns.empty () &&
tt != type::percent && tt != type::multi_lcbrace)
{
- if (!ans.empty ())
- fail (ans[0].loc) << "ad hoc member in target type/pattern";
+ // Note: ns contains single target name.
+ //
+ if (!gns.empty ())
+ fail (gns[0].member_loc)
+ << "group member in target type/pattern";
if (!as.empty ())
fail (as.loc) << "attributes before target type/pattern";
@@ -1090,22 +1283,33 @@ namespace build2
check_pattern (n, nloc);
- // Verify all the ad hoc members are patterns or substitutions and
- // of the correct type.
+ // If we have group members, verify all the members are patterns or
+ // substitutions (ad hoc) or subsitutions (explicit) and of the
+ // correct pattern type. A rule for an explicit group that wishes to
+ // match based on some of its members feels far fetched.
+ //
+ // For explicit groups the use-case is to inject static members
+ // which could otherwise be tedious to specify for each group.
//
- names ns (ans.empty () ? names () : move (ans[0].ns));
- const location& aloc (ans.empty () ? location () : ans[0].loc);
+ const location& mloc (gns.empty () ? location () : gns[0].member_loc);
+ names ns (gns.empty () ? names () : move (gns[0].ns));
+ bool expl (gns.empty () ? false : gns[0].expl);
for (name& n: ns)
{
if (!n.pattern || !(*n.pattern == pt || (st && *n.pattern == *st)))
{
- fail (aloc) << "expected " << pn << " pattern or substitution "
+ fail (mloc) << "expected " << pn << " pattern or substitution "
<< "instead of " << n;
}
if (*n.pattern != pattern_type::regex_substitution)
- check_pattern (n, aloc);
+ {
+ if (expl)
+ fail (mloc) << "explicit group member pattern " << n;
+
+ check_pattern (n, mloc);
+ }
}
// The same for prerequisites except here we can have non-patterns.
@@ -1165,7 +1369,15 @@ namespace build2
: scope_->find_target_type (n.type);
if (ttype == nullptr)
- fail (nloc) << "unknown target type " << n.type;
+ fail (nloc) << "unknown target type " << n.type <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *scope_->root_scope ();
+
+ if (!gns.empty ())
+ {
+ if (ttype->is_a<group> () != expl)
+ fail (nloc) << "group type and target type mismatch";
+ }
unique_ptr<adhoc_rule_pattern> rp;
switch (pt)
@@ -1178,7 +1390,7 @@ namespace build2
rp.reset (new adhoc_rule_regex_pattern (
*scope_, rn, *ttype,
move (n), nloc,
- move (ns), aloc,
+ move (ns), mloc,
move (pns), ploc));
break;
case pattern_type::regex_substitution:
@@ -1201,8 +1413,13 @@ namespace build2
for (shared_ptr<adhoc_rule>& pr: recipes)
{
- pr->pattern = &rp; // Connect recipe to pattern.
- rp.rules.push_back (move (pr));
+ // Can be NULL if the recipe is disabled with a condition.
+ //
+ if (pr != nullptr)
+ {
+ pr->pattern = &rp; // Connect recipe to pattern.
+ rp.rules.push_back (move (pr));
+ }
}
// Register this adhoc rule for all its actions.
@@ -1250,7 +1467,14 @@ namespace build2
// recipe for dist.
//
// And the same for the configure meta-operation to, for
- // example, make sure a hinted ad hoc rule matches.
+ // example, make sure a hinted ad hoc rule matches. @@ Hm,
+ // maybe we fixed this with action-specific hints? But the
+ // injection part above may still apply. BTW, this is also
+ // required for see-through groups in order to resolve their
+ // member.
+ //
+ // Note also that the equivalent semantics for ad hoc recipes
+ // is provided by match_adhoc_recipe().
//
if (a.meta_operation () == perform_id)
{
@@ -1272,7 +1496,8 @@ namespace build2
// see-through target group, then we may also need to
// register update for other meta-operations (see, for
// example, wildcard update registration in the cli
- // module).
+ // module). BTW, we can now detect such a target via
+ // its target type flags.
}
}
}
@@ -1316,6 +1541,7 @@ namespace build2
st = token (t), // Save start token (will be gone on replay).
recipes = small_vector<shared_ptr<adhoc_rule>, 1> ()]
(token& t, type& tt,
+ optional<bool> gm, // true -- explicit, false -- ad hoc
optional<pattern_type> pt, const target_type* ptt, string pat,
const location& ploc) mutable
{
@@ -1329,7 +1555,14 @@ namespace build2
//
next (t, tt); // Newline.
next (t, tt); // First token inside the variable block.
- parse_variable_block (t, tt, pt, ptt, move (pat), ploc);
+
+ // For explicit groups we only assign variables on the group
+ // omitting the members.
+ //
+ if (!gm || !*gm)
+ parse_variable_block (t, tt, pt, ptt, move (pat), ploc);
+ else
+ skip_block (t, tt);
if (tt != type::rcbrace)
fail (t) << "expected '}' instead of " << t;
@@ -1345,6 +1578,16 @@ namespace build2
else
rt = st;
+ // If this is a group member then we know we are replaying and
+ // can skip the recipe.
+ //
+ if (gm)
+ {
+ replay_skip ();
+ next (t, tt);
+ return;
+ }
+
if (pt)
fail (rt) << "unexpected recipe after target type/pattern" <<
info << "ad hoc pattern rule may not be combined with other "
@@ -1365,7 +1608,7 @@ namespace build2
// Note also that we treat this as an explicit dependency
// declaration (i.e., not implied).
//
- enter_targets (move (ns), nloc, move (ans), 0, as);
+ enter_targets (move (ns), nloc, move (gns), 0, as);
}
continue;
@@ -1380,7 +1623,8 @@ namespace build2
if (!start_names (tt))
fail (t) << "unexpected " << t;
- // @@ PAT: currently we pattern-expand target-specific var names.
+ // @@ PAT: currently we pattern-expand target-specific var names (see
+ // also parse_import()).
//
const location ploc (get_location (t));
names pns (parse_names (t, tt, pattern_mode::expand));
@@ -1415,6 +1659,7 @@ namespace build2
for_each (
[this, &var, akind, &aloc] (
token& t, type& tt,
+ optional<bool> gm,
optional<pattern_type> pt, const target_type* ptt, string pat,
const location& ploc)
{
@@ -1423,7 +1668,18 @@ namespace build2
*pt, *ptt, move (pat), ploc,
var, akind, aloc);
else
- parse_variable (t, tt, var, akind);
+ {
+ // Skip explicit group members (see the block case above for
+ // background).
+ //
+ if (!gm || !*gm)
+ parse_variable (t, tt, var, akind);
+ else
+ {
+ next (t, tt);
+ skip_line (t, tt);
+ }
+ }
});
next_after_newline (t, tt);
@@ -1441,7 +1697,7 @@ namespace build2
parse_dependency (t, tt,
move (ns), nloc,
- move (ans),
+ move (gns),
move (pns), ploc,
as);
}
@@ -1667,7 +1923,7 @@ namespace build2
// Parse a recipe chain.
//
// % [<attrs>] [<buildspec>]
- // [if|switch ...]
+ // [if|if!|switch|recipe ...]
// {{ [<lang> ...]
// ...
// }}
@@ -1686,10 +1942,27 @@ namespace build2
//
if (target_ != nullptr)
{
+ // @@ What if some members are added later?
+ //
+ // @@ Also, what happends if redeclared as real dependency, do we
+ // upgrade the members?
+ //
if (target_->decl != target_decl::real)
{
- for (target* m (target_); m != nullptr; m = m->adhoc_member)
- m->decl = target_decl::real;
+ target_->decl = target_decl::real;
+
+ if (group* g = target_->is_a<group> ())
+ {
+ for (const target& m: g->static_members)
+ const_cast<target&> (m).decl = target_decl::real; // During load.
+ }
+ else
+ {
+ for (target* m (target_->adhoc_member);
+ m != nullptr;
+ m = m->adhoc_member)
+ m->decl = target_decl::real;
+ }
if (default_target_ == nullptr)
default_target_ = target_;
@@ -1702,7 +1975,15 @@ namespace build2
t = start; tt = t.type;
for (size_t i (0); tt == type::percent || tt == type::multi_lcbrace; ++i)
{
- recipes.push_back (nullptr); // For missing else/default (see below).
+ // For missing else/default (see below).
+ //
+ // Note that it may remain NULL if we have, say, an if-condition that
+ // evaluates to false and no else. While it may be tempting to get rid
+ // of such "holes", it's not easy due to the replay semantics (see the
+ // target_ != nullptr block below). So we expect the caller to be
+ // prepared to handle this.
+ //
+ recipes.push_back (nullptr);
attributes as;
buildspec bs;
@@ -1719,7 +2000,131 @@ namespace build2
attributes& as;
buildspec& bs;
const location& bsloc;
- } d {ttype, name, recipes, first, clean, i, as, bs, bsloc};
+ function<void (string&&)> parse_trailer;
+ } d {ttype, name, recipes, first, clean, i, as, bs, bsloc, {}};
+
+ d.parse_trailer = [this, &d] (string&& text)
+ {
+ if (d.first)
+ {
+ adhoc_rule& ar (*d.recipes.back ());
+
+ // Translate each buildspec entry into action and add it to the
+ // recipe entry.
+ //
+ const location& l (d.bsloc);
+
+ for (metaopspec& m: d.bs)
+ {
+ meta_operation_id mi (ctx->meta_operation_table.find (m.name));
+
+ if (mi == 0)
+ fail (l) << "unknown meta-operation " << m.name;
+
+ const meta_operation_info* mf (
+ root_->root_extra->meta_operations[mi]);
+
+ if (mf == nullptr)
+ fail (l) << "project " << *root_ << " does not support meta-"
+ << "operation " << ctx->meta_operation_table[mi].name;
+
+ for (opspec& o: m)
+ {
+ operation_id oi;
+ if (o.name.empty ())
+ {
+ if (mf->operation_pre == nullptr)
+ oi = update_id;
+ else
+ // Calling operation_pre() to translate doesn't feel
+ // appropriate here.
+ //
+ fail (l) << "default operation in recipe action" << endf;
+ }
+ else
+ oi = ctx->operation_table.find (o.name);
+
+ if (oi == 0)
+ fail (l) << "unknown operation " << o.name;
+
+ const operation_info* of (root_->root_extra->operations[oi]);
+
+ if (of == nullptr)
+ fail (l) << "project " << *root_ << " does not support "
+ << "operation " << ctx->operation_table[oi];
+
+ // Note: for now always inner (see match_rule_impl() for
+ // details).
+ //
+ action a (mi, oi);
+
+ // Check for duplicates (local).
+ //
+ if (find_if (
+ d.recipes.begin (), d.recipes.end (),
+ [a] (const shared_ptr<adhoc_rule>& r)
+ {
+ auto& as (r->actions);
+ return find (as.begin (), as.end (), a) != as.end ();
+ }) != d.recipes.end ())
+ {
+ fail (l) << "duplicate " << mf->name << '(' << of->name
+ << ") recipe";
+ }
+
+ ar.actions.push_back (a);
+ }
+ }
+
+ // Set the recipe text.
+ //
+ if (ar.recipe_text (
+ *scope_,
+ d.ttype != nullptr ? *d.ttype : target_->type (),
+ move (text),
+ d.as))
+ d.clean = true;
+
+ // Verify we have no unhandled attributes.
+ //
+ for (attribute& a: d.as)
+ fail (d.as.loc) << "unknown recipe attribute " << a << endf;
+ }
+
+ // Copy the recipe over to the target verifying there are no
+ // duplicates (global).
+ //
+ if (target_ != nullptr)
+ {
+ const shared_ptr<adhoc_rule>& r (d.recipes[d.i]);
+
+ for (const shared_ptr<adhoc_rule>& er: target_->adhoc_recipes)
+ {
+ auto& as (er->actions);
+
+ for (action a: r->actions)
+ {
+ if (find (as.begin (), as.end (), a) != as.end ())
+ {
+ const meta_operation_info* mf (
+ root_->root_extra->meta_operations[a.meta_operation ()]);
+
+ const operation_info* of (
+ root_->root_extra->operations[a.operation ()]);
+
+ fail (d.bsloc)
+ << "duplicate " << mf->name << '(' << of->name
+ << ") recipe for target " << *target_;
+ }
+ }
+ }
+
+ target_->adhoc_recipes.push_back (r);
+
+ // Note that "registration" of configure_* and dist_* actions
+ // (similar to ad hoc rules) is provided by match_adhoc_recipe().
+ }
+ };
// Note that this function must be called at most once per iteration.
//
@@ -1762,7 +2167,7 @@ namespace build2
// to rule_name.
shared_ptr<adhoc_rule> ar;
- if (!lang)
+ if (!lang || icasecmp (*lang, "buildscript") == 0)
{
// Buildscript
//
@@ -1858,129 +2263,200 @@ namespace build2
}
if (!skip)
- {
- if (d.first)
- {
- adhoc_rule& ar (*d.recipes.back ());
-
- // Translate each buildspec entry into action and add it to the
- // recipe entry.
- //
- const location& l (d.bsloc);
+ d.parse_trailer (move (t.value));
- for (metaopspec& m: d.bs)
- {
- meta_operation_id mi (ctx.meta_operation_table.find (m.name));
-
- if (mi == 0)
- fail (l) << "unknown meta-operation " << m.name;
+ next (t, tt);
+ assert (tt == type::multi_rcbrace);
- const meta_operation_info* mf (
- root_->root_extra->meta_operations[mi]);
+ next (t, tt); // Newline.
+ next_after_newline (t, tt, token (t)); // Should be on its own line.
+ };
- if (mf == nullptr)
- fail (l) << "project " << *root_ << " does not support meta-"
- << "operation " << ctx.meta_operation_table[mi].name;
+ auto parse_recipe_directive = [this, &d] (token& t, type& tt,
+ const string&)
+ {
+ // Parse recipe directive:
+ //
+ // recipe <lang> <file>
+ //
+ // Note that here <lang> is not optional.
+ //
+ // @@ We could guess <lang> from the extension.
- for (opspec& o: m)
- {
- operation_id oi;
- if (o.name.empty ())
- {
- if (mf->operation_pre == nullptr)
- oi = update_id;
- else
- // Calling operation_pre() to translate doesn't feel
- // appropriate here.
- //
- fail (l) << "default operation in recipe action" << endf;
- }
- else
- oi = ctx.operation_table.find (o.name);
+ // Use value mode to minimize the number of special characters.
+ //
+ mode (lexer_mode::value, '@');
- if (oi == 0)
- fail (l) << "unknown operation " << o.name;
+ // Parse <lang>.
+ //
+ if (next (t, tt) != type::word)
+ fail (t) << "expected recipe language instead of " << t;
- const operation_info* of (root_->root_extra->operations[oi]);
+ location lloc (get_location (t));
+ string lang (t.value);
+ next (t, tt);
- if (of == nullptr)
- fail (l) << "project " << *root_ << " does not support "
- << "operation " << ctx.operation_table[oi];
+ // Parse <file> as names to get variable expansion, etc.
+ //
+ location nloc (get_location (t));
+ names ns (parse_names (t, tt, pattern_mode::ignore, "file name"));
- // Note: for now always inner (see match_rule() for details).
- //
- action a (mi, oi);
+ path file;
+ try
+ {
+ file = convert<path> (move (ns));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (nloc) << "invalid recipe file path: " << e;
+ }
- // Check for duplicates (local).
- //
- if (find_if (
- d.recipes.begin (), d.recipes.end (),
- [a] (const shared_ptr<adhoc_rule>& r)
- {
- auto& as (r->actions);
- return find (as.begin (), as.end (), a) != as.end ();
- }) != d.recipes.end ())
- {
- fail (l) << "duplicate " << mf->name << '(' << of->name
- << ") recipe";
- }
+ string text;
+ if (d.first)
+ {
+ // Source relative to the buildfile rather than src scope. In
+ // particular, this make sourcing from exported buildfiles work.
+ //
+ if (file.relative () && path_->path != nullptr)
+ {
+ // Note: all sourced/included/imported paths are absolute and
+ // normalized.
+ //
+ file = path_->path->directory () / file;
+ }
- ar.actions.push_back (a);
- }
- }
+ file.normalize ();
- // Set the recipe text.
- //
- if (ar.recipe_text (
- *scope_,
- d.ttype != nullptr ? *d.ttype : target_->type (),
- move (t.value),
- d.as))
- d.clean = true;
-
- // Verify we have no unhandled attributes.
- //
- for (attribute& a: d.as)
- fail (d.as.loc) << "unknown recipe attribute " << a << endf;
+ try
+ {
+ ifdstream ifs (file);
+ text = ifs.read_text ();
+ }
+ catch (const io_error& e)
+ {
+ fail (nloc) << "unable to read recipe file " << file << ": " << e;
}
- // Copy the recipe over to the target verifying there are no
- // duplicates (global).
- //
- if (target_ != nullptr)
+ shared_ptr<adhoc_rule> ar;
{
- const shared_ptr<adhoc_rule>& r (d.recipes[d.i]);
+ // This is expected to be the location of the opening multi-curly
+ // with the recipe body starting from the following line. So we
+ // need to fudge the line number a bit.
+ //
+ location loc (file, 0, 1);
+
+ if (icasecmp (lang, "buildscript") == 0)
+ {
+ // Buildscript
+ //
+ ar.reset (
+ new adhoc_buildscript_rule (
+ d.name.empty () ? "<ad hoc buildscript recipe>" : d.name,
+ loc,
+ 2)); // Use `{{` and `}}` for dump.
- for (const shared_ptr<adhoc_rule>& er: target_->adhoc_recipes)
+ // Enter as buildfile-like so that it gets automatically
+ // distributed. Note: must be consistent with build/export/
+ // handling in process_default_target().
+ //
+ enter_buildfile<buildscript> (file);
+ }
+ else if (icasecmp (lang, "c++") == 0)
{
- auto& as (er->actions);
+ // C++
+ //
+ // We expect to find a C++ comment line with version and
+ // optional fragment separator before the first non-comment,
+ // non-blank line:
+ //
+ // // c++ <ver> [<sep>]
+ //
+ string s;
+ location sloc (file, 1, 1);
+ {
+ // Note: observe blank lines for accurate line count.
+ //
+ size_t b (0), e (0);
+ for (size_t m (0), n (text.size ());
+ next_word (text, n, b, e, m, '\n', '\r'), b != n;
+ sloc.line++)
+ {
+ s.assign (text, b, e - b);
- for (action a: r->actions)
+ if (!trim (s).empty ())
+ {
+ if (icasecmp (s, "// c++ ", 7) == 0)
+ break;
+
+ if (s[0] != '/' || s[1] != '/')
+ {
+ b = e;
+ break;
+ }
+ }
+ }
+
+ if (b == e)
+ fail (sloc) << "no '// c++ <version> [<separator>]' line";
+ }
+
+ uint64_t ver;
+ optional<string> sep;
{
- if (find (as.begin (), as.end (), a) != as.end ())
+ size_t b (7), e (7);
+ if (next_word (s, b, e, ' ', '\t') == 0)
+ fail (sloc) << "missing c++ recipe version" << endf;
+
+ try
{
- const meta_operation_info* mf (
- root_->root_extra->meta_operations[a.meta_operation ()]);
+ ver = convert<uint64_t> (build2::name (string (s, b, e - b)));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (sloc) << "invalid c++ recipe version: " << e << endf;
+ }
- const operation_info* of (
- root_->root_extra->operations[a.operation ()]);
+ if (next_word (s, b, e, ' ', '\t') != 0)
+ {
+ sep = string (s, b, e - b);
- fail (d.bsloc)
- << "duplicate " << mf->name << '(' << of->name
- << ") recipe for target " << *target_;
+ if (next_word (s, b, e, ' ', '\t') != 0)
+ fail (sloc) << "junk after fragment separator";
}
}
- }
- target_->adhoc_recipes.push_back (r);
+ ar.reset (
+ new adhoc_cxx_rule (
+ d.name.empty () ? "<ad hoc c++ recipe>" : d.name,
+ loc,
+ 2, // Use `{{` and `}}` for dump.
+ ver,
+ move (sep)));
+
+ // Enter as buildfile-like so that it gets automatically
+ // distributed. Note: must be consistent with build/export/
+ // handling in process_default_target().
+ //
+ // While ideally we would want to use the cxx{} target type,
+ // it's defined in a seperate build system module (which may not
+ // even be loaded by this project, so even runtime lookup won't
+ // work). So we use file{} instead.
+ //
+ enter_buildfile<build2::file> (file);
+ }
+ else
+ fail (lloc) << "unknown recipe language '" << lang << "'";
}
+
+ assert (d.recipes[d.i] == nullptr);
+ d.recipes[d.i] = move (ar);
}
+ else
+ assert (d.recipes[d.i] != nullptr);
- next (t, tt);
- assert (tt == type::multi_rcbrace);
+ d.parse_trailer (move (text));
- next (t, tt); // Newline.
- next_after_newline (t, tt, token (t)); // Should be on its own line.
+ next_after_newline (t, tt);
};
bsloc = get_location (t); // Fallback location.
@@ -2040,7 +2516,7 @@ namespace build2
expire_mode ();
next_after_newline (t, tt, "recipe action");
- // See if this is if-else or switch.
+ // See if this is if-else/switch or `recipe`.
//
// We want the keyword test similar to parse_clause() but we cannot do
// it if replaying. So we skip it with understanding that if it's not
@@ -2056,14 +2532,21 @@ namespace build2
// handy if we want to provide a custom recipe but only on certain
// platforms or some such).
- if (n == "if")
+ if (n == "if" || n == "if!")
{
- parse_if_else (t, tt, true /* multi */, parse_block);
+ parse_if_else (t, tt, true /* multi */,
+ parse_block, parse_recipe_directive);
continue;
}
else if (n == "switch")
{
- parse_switch (t, tt, true /* multi */, parse_block);
+ parse_switch (t, tt, true /* multi */,
+ parse_block, parse_recipe_directive);
+ continue;
+ }
+ else if (n == "recipe")
+ {
+ parse_recipe_directive (t, tt, "" /* kind */);
continue;
}
@@ -2071,7 +2554,7 @@ namespace build2
}
if (tt != type::multi_lcbrace)
- fail (t) << "expected recipe block instead of " << t;
+ fail (t) << "expected recipe block or 'recipe' instead of " << t;
// Fall through.
}
@@ -2116,13 +2599,97 @@ namespace build2
}
}
- void parser::
- enter_adhoc_members (adhoc_names_loc&& ans, bool implied)
+ vector<reference_wrapper<target>> parser::
+ enter_explicit_members (group_names_loc&& gns, bool implied)
+ {
+ tracer trace ("parser::enter_explicit_members", &path_);
+
+ names& ns (gns.ns);
+ const location& loc (gns.member_loc);
+
+ vector<reference_wrapper<target>> r;
+ r.reserve (ns.size ());
+
+ group& g (target_->as<group> ());
+ auto& ms (g.static_members);
+
+ for (size_t i (0); i != ns.size (); ++i)
+ {
+ name&& n (move (ns[i]));
+ name&& o (n.pair ? move (ns[++i]) : name ());
+
+ if (n.qualified ())
+ fail (loc) << "project name in target " << n;
+
+ // We derive the path unless the target name ends with the '...' escape
+ // which here we treat as the "let the rule derive the path" indicator
+ // (see target::split_name() for details). This will only be useful for
+ // referring to group members that are managed by the group's matching
+ // rule. Note also that omitting '...' for such a member could be used
+ // to override the file name, provided the rule checks if the path has
+ // already been derived before doing it itself.
+ //
+ // @@ What can the ad hoc recipe/rule do differently here? Maybe get
+ // path from dynamic targets? Maybe we will have custom path
+ // derivation support in buildscript in the future?
+ //
+ bool escaped;
+ {
+ const string& v (n.value);
+ size_t p (v.size ());
+
+ escaped = (p > 3 &&
+ v[--p] == '.' && v[--p] == '.' && v[--p] == '.' &&
+ v[--p] != '.');
+ }
+
+ target& m (enter_target::insert_target (*this,
+ move (n), move (o),
+ implied,
+ loc, trace));
+
+ if (g == m)
+ fail (loc) << "explicit group member " << m << " is group itself";
+
+ // Add as static member skipping duplicates.
+ //
+ if (find (ms.begin (), ms.end (), m) == ms.end ())
+ {
+ if (m.group == nullptr)
+ m.group = &g;
+ else if (m.group != &g)
+ fail (loc) << g << " group member " << m << " already belongs to "
+ << "group " << *m.group;
+
+ ms.push_back (m);
+ }
+
+ if (!escaped)
+ {
+ if (file* ft = m.is_a<file> ())
+ ft->derive_path ();
+ }
+
+ r.push_back (m);
+ }
+
+ return r;
+ }
+
+ vector<reference_wrapper<target>> parser::
+ enter_adhoc_members (group_names_loc&& gns, bool implied)
{
tracer trace ("parser::enter_adhoc_members", &path_);
- names& ns (ans.ns);
- const location& loc (ans.loc);
+ names& ns (gns.ns);
+ const location& loc (gns.member_loc);
+
+ if (target_->is_a<group> ())
+ fail (loc) << "ad hoc group primary member " << *target_
+ << " is explicit group";
+
+ vector<reference_wrapper<target>> r;
+ r.reserve (ns.size ());
for (size_t i (0); i != ns.size (); ++i)
{
@@ -2150,14 +2717,16 @@ namespace build2
v[--p] != '.');
}
- target& at (
- enter_target::insert_target (*this,
- move (n), move (o),
- implied,
- loc, trace));
+ target& m (enter_target::insert_target (*this,
+ move (n), move (o),
+ implied,
+ loc, trace));
+
+ if (target_ == &m)
+ fail (loc) << "ad hoc group member " << m << " is primary target";
- if (target_ == &at)
- fail (loc) << "ad hoc group member " << at << " is primary target";
+ if (m.is_a<group> ())
+ fail (loc) << "ad hoc group member " << m << " is explicit group";
// Add as an ad hoc member at the end of the chain skipping duplicates.
//
@@ -2165,7 +2734,7 @@ namespace build2
const_ptr<target>* mp (&target_->adhoc_member);
for (; *mp != nullptr; mp = &(*mp)->adhoc_member)
{
- if (*mp == &at)
+ if (*mp == &m)
{
mp = nullptr;
break;
@@ -2174,31 +2743,41 @@ namespace build2
if (mp != nullptr)
{
- *mp = &at;
- at.group = target_;
+ if (m.group == nullptr)
+ m.group = target_;
+ else if (m.group != target_)
+ fail (loc) << *target_ << " ad hoc group member " << m
+ << " already belongs to group " << *m.group;
+ *mp = &m;
}
}
if (!escaped)
{
- if (file* ft = at.is_a<file> ())
+ if (file* ft = m.is_a<file> ())
ft->derive_path ();
}
+
+ r.push_back (m);
}
+
+ return r;
}
- small_vector<reference_wrapper<target>, 1> parser::
+ small_vector<pair<reference_wrapper<target>,
+ vector<reference_wrapper<target>>>, 1> parser::
enter_targets (names&& tns, const location& tloc, // Target names.
- adhoc_names&& ans, // Ad hoc target names.
+ group_names&& gns, // Group member names.
size_t prereq_size,
const attributes& tas) // Target attributes.
{
- // Enter all the targets (normally we will have just one) and their ad hoc
- // groups.
+ // Enter all the targets (normally we will have just one) and their group
+ // members.
//
tracer trace ("parser::enter_targets", &path_);
- small_vector<reference_wrapper<target>, 1> tgs;
+ small_vector<pair<reference_wrapper<target>,
+ vector<reference_wrapper<target>>>, 1> tgs;
for (size_t i (0); i != tns.size (); ++i)
{
@@ -2223,13 +2802,21 @@ namespace build2
if (!tas.empty ())
apply_target_attributes (*target_, tas);
- // Enter ad hoc members.
+ // Enter group members.
//
- if (!ans.empty ())
+ vector<reference_wrapper<target>> gms;
+ if (!gns.empty ())
{
// Note: index after the pair increment.
//
- enter_adhoc_members (move (ans[i]), false /* implied */);
+ group_names_loc& g (gns[i]);
+
+ if (g.expl && !target_->is_a<group> ())
+ fail (g.group_loc) << *target_ << " is not group target";
+
+ gms = g.expl
+ ? enter_explicit_members (move (g), false /* implied */)
+ : enter_adhoc_members (move (g), false /* implied */);
}
if (default_target_ == nullptr)
@@ -2237,7 +2824,7 @@ namespace build2
target_->prerequisites_state_.store (2, memory_order_relaxed);
target_->prerequisites_.reserve (prereq_size);
- tgs.push_back (*target_);
+ tgs.emplace_back (*target_, move (gms));
}
return tgs;
@@ -2295,7 +2882,7 @@ namespace build2
if (!v.empty ())
{
- oi = ctx.operation_table.find (v);
+ oi = ctx->operation_table.find (v);
if (oi == 0)
fail (l) << "unknown operation " << v << " in rule_hint "
@@ -2303,7 +2890,7 @@ namespace build2
if (root_->root_extra->operations[oi] == nullptr)
fail (l) << "project " << *root_ << " does not support "
- << "operation " << ctx.operation_table[oi]
+ << "operation " << ctx->operation_table[oi]
<< " specified in rule_hint attribute";
}
}
@@ -2325,7 +2912,7 @@ namespace build2
void parser::
parse_dependency (token& t, token_type& tt,
names&& tns, const location& tloc, // Target names.
- adhoc_names&& ans, // Ad hoc target names.
+ group_names&& gns, // Group member names.
names&& pns, const location& ploc, // Prereq names.
const attributes& tas) // Target attributes.
{
@@ -2337,33 +2924,92 @@ namespace build2
//
tracer trace ("parser::parse_dependency", &path_);
+ // Diagnose conditional prerequisites. Note that we want to diagnose this
+ // even if pns is empty (think empty variable expansion; the literal "no
+ // prerequisites" case is handled elsewhere).
+ //
+ // @@ TMP For now we only do it during the dist meta-operation. In the
+ // future we should tighten this to any meta-operation provided
+ // the dist module is loaded.
+ //
+ // @@ TMP For now it's a warning because we have dependencies like
+ // cli.cxx{foo}: cli{foo} which are not currently possible to
+ // rewrite (cli.cxx{} is not always registered).
+ //
+ if (condition_ &&
+ ctx->current_mif != nullptr &&
+ ctx->current_mif->id == dist_id)
+ {
+ // Only issue the warning for the projects being distributed. In
+ // particular, this makes sure we don't complain about imported
+ // projects. Note: use amalgamation to cover bundled subprojects.
+ //
+ auto* dm (root_->bundle_scope ()->find_module<dist::module> (
+ dist::module::name));
+
+ if (dm != nullptr && dm->distributed)
+ {
+ warn (tloc) << "conditional dependency declaration may result in "
+ << "incomplete distribution" <<
+ info (ploc) << "prerequisite declared here" <<
+ info (*condition_) << "conditional buildfile fragment starts here" <<
+ info << "instead use 'include' prerequisite-specific variable to "
+ << "conditionally include prerequisites" <<
+ info << "for example: "
+ << "<target>: <prerequisite>: include = (<condition>)" <<
+ info << "for details, see https://github.com/build2/HOWTO/blob/"
+ << "master/entries/keep-build-graph-config-independent.md";
+ }
+ }
+
// First enter all the targets.
//
- small_vector<reference_wrapper<target>, 1> tgs (
- enter_targets (move (tns), tloc, move (ans), pns.size (), tas));
+ small_vector<pair<reference_wrapper<target>,
+ vector<reference_wrapper<target>>>, 1>
+ tgs (enter_targets (move (tns), tloc, move (gns), pns.size (), tas));
// Now enter each prerequisite into each target.
//
- for (name& pn: pns)
+ for (auto i (pns.begin ()); i != pns.end (); ++i)
{
// We cannot reuse the names if we (potentially) may need to pass them
// as targets in case of a chain (see below).
//
- name n (tt != type::colon ? move (pn) : pn);
+ name n (tt != type::colon ? move (*i) : *i);
// See also scope::find_prerequisite_key().
//
auto rp (scope_->find_target_type (n, ploc));
- const target_type* tt (rp.first);
+ const target_type* t (rp.first);
optional<string>& e (rp.second);
- if (tt == nullptr)
- fail (ploc) << "unknown target type " << n.type;
+ if (t == nullptr)
+ {
+ if (n.proj)
+ {
+ // If the target type is unknown then no phase 2 import (like
+ // rule-specific search) can possibly succeed so we can fail now and
+ // with a more accurate reason. See import2(names) for background.
+ //
+ diag_record dr;
+ dr << fail (ploc) << "unable to import target " << n;
+ import_suggest (dr, *n.proj, nullptr, string (), false);
+ }
+ else
+ {
+ fail (ploc) << "unknown target type " << n.type <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *scope_->root_scope ();
+ }
+ }
+
+ if (t->factory == nullptr)
+ fail (ploc) << "abstract target type " << t->name << "{}";
// Current dir collapses to an empty one.
//
if (!n.dir.empty ())
- n.dir.normalize (false, true);
+ n.dir.normalize (false /* actual */, true);
// @@ OUT: for now we assume the prerequisite's out is undetermined. The
// only way to specify an src prerequisite will be with the explicit
@@ -2374,10 +3020,47 @@ namespace build2
// a special indicator. Also, one can easily and natually suppress any
// searches by specifying the absolute path.
//
+ name o;
+ if (n.pair)
+ {
+ assert (n.pair == '@');
+
+ ++i;
+ o = tt != type::colon ? move (*i) : *i;
+
+ if (!o.directory ())
+ fail (ploc) << "expected directory after '@'";
+
+ o.dir.normalize (); // Note: don't collapse current to empty.
+
+ // Make sure out and src are parallel unless both were specified as
+ // absolute. We make an exception for this case because out may be
+ // used to "tag" imported targets (see cc::search_library()). So it's
+ // sort of the "I know what I am doing" escape hatch (it would have
+ // been even better to verify such a target is outside any project
+ // but that won't be cheap).
+ //
+ // For now we require that both are either relative or absolute.
+ //
+ // See similar code for targets in scope::find_target_type().
+ //
+ if (n.dir.absolute () && o.dir.absolute ())
+ ;
+ else if (n.dir.empty () && o.dir.current ())
+ ;
+ else if (o.dir.relative () &&
+ n.dir.relative () &&
+ o.dir == n.dir)
+ ;
+ else
+ fail (ploc) << "prerequisite output directory " << o.dir
+ << " must be parallel to source directory " << n.dir;
+ }
+
prerequisite p (move (n.proj),
- *tt,
+ *t,
move (n.dir),
- dir_path (),
+ move (o.dir),
move (n.value),
move (e),
*scope_);
@@ -2386,7 +3069,7 @@ namespace build2
{
// Move last prerequisite (which will normally be the only one).
//
- target& t (*i);
+ target& t (i->first);
t.prerequisites_.push_back (++i == e
? move (p)
: prerequisite (p, memory_order_relaxed));
@@ -2399,20 +3082,42 @@ namespace build2
//
// We handle multiple targets and/or prerequisites by replaying the tokens
// (see the target-specific case comments for details). The function
- // signature is:
+ // signature for for_each_t (see for_each on the gm argument semantics):
+ //
+ // void (token& t, type& tt, optional<bool> gm)
+ //
+ // And for for_each_p:
//
// void (token& t, type& tt)
//
auto for_each_t = [this, &t, &tt, &tgs] (auto&& f)
{
- replay_guard rg (*this, tgs.size () > 1);
+ // We need replay if we have multiple targets or group members.
+ //
+ replay_guard rg (*this, tgs.size () > 1 || !tgs[0].second.empty ());
for (auto ti (tgs.begin ()), te (tgs.end ()); ti != te; )
{
- target& tg (*ti);
- enter_target tgg (*this, tg);
+ target& tg (ti->first);
+ const vector<reference_wrapper<target>>& gms (ti->second);
- f (t, tt);
+ {
+ enter_target g (*this, tg);
+ f (t, tt, nullopt);
+ }
+
+ if (!gms.empty ())
+ {
+ bool expl (tg.is_a<group> ());
+
+ for (target& gm: gms)
+ {
+ rg.play (); // Replay.
+
+ enter_target g (*this, gm);
+ f (t, tt, expl);
+ }
+ }
if (++ti != te)
rg.play (); // Replay.
@@ -2425,8 +3130,8 @@ namespace build2
for (auto ti (tgs.begin ()), te (tgs.end ()); ti != te; )
{
- target& tg (*ti);
- enter_target tgg (*this, tg);
+ target& tg (ti->first);
+ enter_target g (*this, tg);
for (size_t pn (tg.prerequisites_.size ()), pi (pn - pns.size ());
pi != pn; )
@@ -2469,7 +3174,7 @@ namespace build2
this,
st = token (t), // Save start token (will be gone on replay).
recipes = small_vector<shared_ptr<adhoc_rule>, 1> ()]
- (token& t, type& tt) mutable
+ (token& t, type& tt, optional<bool> gm) mutable
{
token rt; // Recipe start token.
@@ -2479,7 +3184,14 @@ namespace build2
{
next (t, tt); // Newline.
next (t, tt); // First token inside the variable block.
- parse_variable_block (t, tt);
+
+ // Skip explicit group members (see the block case above for
+ // background).
+ //
+ if (!gm || !*gm)
+ parse_variable_block (t, tt);
+ else
+ skip_block (t, tt);
if (tt != type::rcbrace)
fail (t) << "expected '}' instead of " << t;
@@ -2495,6 +3207,16 @@ namespace build2
else
rt = st;
+ // If this is a group member then we know we are replaying and can
+ // skip the recipe.
+ //
+ if (gm)
+ {
+ replay_skip ();
+ next (t, tt);
+ return;
+ }
+
parse_recipe (t, tt, rt, recipes);
};
@@ -2504,21 +3226,6 @@ namespace build2
return;
}
- // What should we do if there are no prerequisites (for example, because
- // of an empty wildcard result)? We can fail or we can ignore. In most
- // cases, however, this is probably an error (for example, forgetting to
- // checkout a git submodule) so let's not confuse the user and fail (one
- // can always handle the optional prerequisites case with a variable and
- // an if).
- //
- if (pns.empty ())
- fail (ploc) << "no prerequisites in dependency chain or prerequisite-"
- << "specific variable assignment";
-
- next_with_attributes (t, tt); // Recognize attributes after `:`.
-
- auto at (attributes_push (t, tt));
-
// If we are here, then this can be one of three things:
//
// 1. A prerequisite-specific variable bloc:
@@ -2532,10 +3239,37 @@ namespace build2
//
// foo: bar: x = y
//
- // 3. A further dependency chain :
+ // 3. A further dependency chain:
//
// foo: bar: baz ...
//
+ // What should we do if there are no prerequisites, for example, because
+ // of an empty wildcard result or empty variable expansion? We can fail or
+ // we can ignore. In most cases, however, this is probably an error (for
+ // example, forgetting to checkout a git submodule) so let's not confuse
+ // the user and fail (one can always handle the optional prerequisites
+ // case with a variable and an if).
+ //
+ // On the other hand, we allow just empty prerequisites (which is also the
+ // more common case by far) and so it's strange that we don't allow the
+ // same with, say, `include = false`:
+ //
+ // exe{foo}: cxx{$empty} # Ok.
+ // exe{foo}: cxx{$empty}: include = false # Not Ok?
+ //
+ // So let's ignore in the first two cases (variable block and assignment)
+ // for consistency. The dependency chain is iffy both conceptually and
+ // implementation-wise (it could be followed by a variable block). So
+ // let's keep it an error for now.
+ //
+ // Note that the syntactically-empty prerequisite list is still an error:
+ //
+ // exe{foo}: : include = false # Error.
+ //
+ next_with_attributes (t, tt); // Recognize attributes after `:`.
+
+ auto at (attributes_push (t, tt));
+
if (tt == type::newline || tt == type::eos)
{
attributes_pop (); // Must be none since can't be standalone.
@@ -2550,15 +3284,22 @@ namespace build2
// Parse the block for each prerequisites of each target.
//
- for_each_p ([this] (token& t, token_type& tt)
- {
- next (t, tt); // First token inside the block.
+ if (!pns.empty ())
+ for_each_p ([this] (token& t, token_type& tt)
+ {
+ next (t, tt); // First token inside the block.
- parse_variable_block (t, tt);
+ parse_variable_block (t, tt);
- if (tt != type::rcbrace)
- fail (t) << "expected '}' instead of " << t;
- });
+ if (tt != type::rcbrace)
+ fail (t) << "expected '}' instead of " << t;
+ });
+ else
+ {
+ skip_block (t, tt);
+ if (tt != type::rcbrace)
+ fail (t) << "expected '}' instead of " << t;
+ }
next (t, tt); // Presumably newline after '}'.
next_after_newline (t, tt, '}'); // Should be on its own line.
@@ -2581,10 +3322,13 @@ namespace build2
// Parse the assignment for each prerequisites of each target.
//
- for_each_p ([this, &var, at] (token& t, token_type& tt)
- {
- parse_variable (t, tt, var, at);
- });
+ if (!pns.empty ())
+ for_each_p ([this, &var, at] (token& t, token_type& tt)
+ {
+ parse_variable (t, tt, var, at);
+ });
+ else
+ skip_line (t, tt);
next_after_newline (t, tt);
@@ -2603,6 +3347,9 @@ namespace build2
//
else
{
+ if (pns.empty ())
+ fail (ploc) << "no prerequisites in dependency chain";
+
// @@ This is actually ambiguous: prerequisite or target attributes
// (or both or neither)? Perhaps this should be prerequisites for
// the same reason as below (these are prerequsites first).
@@ -2618,16 +3365,16 @@ namespace build2
// we just say that the dependency chain is equivalent to specifying
// each dependency separately.
//
- // Also note that supporting ad hoc target group specification in
- // chains will be complicated. For example, what if prerequisites that
- // have ad hoc targets don't end up being chained? Do we just silently
- // drop them? Also, these are prerequsites first that happened to be
- // reused as target names so perhaps it is the right thing not to
- // support, conceptually.
+ // Also note that supporting target group specification in chains will
+ // be complicated. For example, what if prerequisites that have group
+ // members don't end up being chained? Do we just silently drop them?
+ // Also, these are prerequsites first that happened to be reused as
+ // target names so perhaps it is the right thing not to support,
+ // conceptually.
//
parse_dependency (t, tt,
move (pns), ploc,
- {} /* ad hoc target name */,
+ {} /* group names */,
move (ns), loc,
attributes () /* target attributes */);
}
@@ -2635,14 +3382,18 @@ namespace build2
}
void parser::
- source (istream& is, const path_name& in, const location& loc, bool deft)
+ source_buildfile (istream& is,
+ const path_name& in,
+ const location& loc,
+ optional<bool> deft)
{
- tracer trace ("parser::source", &path_);
+ tracer trace ("parser::source_buildfile", &path_);
l5 ([&]{trace (loc) << "entering " << in;});
- if (in.path != nullptr)
- enter_buildfile (*in.path);
+ const buildfile* bf (in.path != nullptr
+ ? &enter_buildfile<buildfile> (*in.path)
+ : nullptr);
const path_name* op (path_);
path_ = &in;
@@ -2652,11 +3403,11 @@ namespace build2
lexer_ = &l;
target* odt;
- if (deft)
- {
+ if (!deft || *deft)
odt = default_target_;
+
+ if (deft && *deft)
default_target_ = nullptr;
- }
token t;
type tt;
@@ -2666,12 +3417,15 @@ namespace build2
if (tt != type::eos)
fail (t) << "unexpected " << t;
- if (deft)
+ if (deft && *deft)
{
- process_default_target (t);
- default_target_ = odt;
+ if (stage_ != stage::boot && stage_ != stage::root)
+ process_default_target (t, bf);
}
+ if (!deft || *deft)
+ default_target_ = odt;
+
lexer_ = ol;
path_ = op;
@@ -2681,11 +3435,35 @@ namespace build2
void parser::
parse_source (token& t, type& tt)
{
+ // source [<attrs>] <path>+
+ //
+
// The rest should be a list of buildfiles. Parse them as names in the
- // value mode to get variable expansion and directory prefixes.
+ // value mode to get variable expansion and directory prefixes. Also
+ // handle optional attributes.
//
mode (lexer_mode::value, '@');
- next (t, tt);
+ next_with_attributes (t, tt);
+ attributes_push (t, tt);
+
+ bool nodt (false); // Source buildfile without default target semantics.
+ {
+ attributes as (attributes_pop ());
+ const location& l (as.loc);
+
+ for (const attribute& a: as)
+ {
+ const string& n (a.name);
+
+ if (n == "no_default_target")
+ {
+ nodt = true;
+ }
+ else
+ fail (l) << "unknown source directive attribute " << a;
+ }
+ }
+
const location l (get_location (t));
names ns (tt != type::newline && tt != type::eos
? parse_names (t, tt, pattern_mode::expand, "path", nullptr)
@@ -2712,10 +3490,10 @@ namespace build2
try
{
ifdstream ifs (p);
- source (ifs,
- path_name (p),
- get_location (t),
- false /* default_target */);
+ source_buildfile (ifs,
+ path_name (p),
+ get_location (t),
+ nodt ? optional<bool> {} : false);
}
catch (const io_error& e)
{
@@ -2729,6 +3507,9 @@ namespace build2
void parser::
parse_include (token& t, type& tt)
{
+ // include <path>+
+ //
+
tracer trace ("parser::parse_include", &path_);
if (stage_ == stage::boot)
@@ -2837,19 +3618,35 @@ namespace build2
l6 ([&]{trace (l) << "absolute path " << p;});
- if (!root_->buildfiles.insert (p).second) // Note: may be "new" root.
+ // Note: may be "new" root.
+ //
+ if (!root_->root_extra->insert_buildfile (p))
{
l5 ([&]{trace (l) << "skipping already included " << p;});
continue;
}
+ // Note: see a variant of this in parse_import().
+ //
+ // Clear/restore if/switch location.
+ //
+ // We do it here but not in parse_source since the included buildfile is
+ // in a sense expected to be a standalone entity (think a file included
+ // from an export stub).
+ //
+ auto g = make_guard ([this, old = condition_] () mutable
+ {
+ condition_ = old;
+ });
+ condition_ = nullopt;
+
try
{
ifdstream ifs (p);
- source (ifs,
- path_name (p),
- get_location (t),
- true /* default_target */);
+ source_buildfile (ifs,
+ path_name (p),
+ get_location (t),
+ true /* default_target */);
}
catch (const io_error& e)
{
@@ -2901,13 +3698,16 @@ namespace build2
[] (const string& s) {return s.c_str ();});
cargs.push_back (nullptr);
+ // Note: we are in the serial load phase and so no diagnostics buffering
+ // is needed.
+ //
process pr (run_start (3 /* verbosity */,
cargs,
0 /* stdin */,
-1 /* stdout */,
- true /* error */,
- dir_path () /* cwd */,
+ 2 /* stderr */,
nullptr /* env */,
+ dir_path () /* cwd */,
l));
try
{
@@ -2929,10 +3729,10 @@ namespace build2
dr << info (l) << "while parsing " << args[0] << " output";
});
- source (is,
- path_name ("<stdout>"),
- l,
- false /* default_target */);
+ source_buildfile (is,
+ path_name ("<stdout>"),
+ l,
+ false /* default_target */);
}
is.close (); // Detect errors.
@@ -2946,7 +3746,7 @@ namespace build2
// caused by that and let run_finish() deal with it.
}
- run_finish (cargs, pr, l);
+ run_finish (cargs, pr, 2 /* verbosity */, false /* omit_normal */, l);
next_after_newline (t, tt);
}
@@ -2992,14 +3792,16 @@ namespace build2
// which case it will be duplicating them in its root.build file). So
// for now we allow this trusting the user knows what they are doing.
//
- string proj;
- {
- const project_name& n (named_project (*root_));
-
- if (!n.empty ())
- proj = n.variable ();
- }
-
+ // There is another special case: a buildfile imported from another
+ // project. In this case we also allow <project> to be the imported
+ // project name in addition to importing. The thinking here is that an
+ // imported buildfile is in a sense like a module (may provide rules which
+ // may require configuration, etc) and should be able to use its own
+ // project name (which is often the corresponding tool name) in the
+ // configuration variables, just like modules. In this case we use the
+ // imported project name as the reporting module name (but which can
+ // be overridden with config.report.module attribute).
+ //
const location loc (get_location (t));
// We are now in the normal lexing mode and we let the lexer handle `?=`.
@@ -3017,6 +3819,11 @@ namespace build2
optional<string> report;
string report_var;
+ // Reporting module name. Empty means the config module reporting
+ // project's own configuration.
+ //
+ project_name report_module;
+
for (auto i (as.begin ()); i != as.end (); )
{
if (i->name == "null")
@@ -3035,7 +3842,7 @@ namespace build2
report = move (v);
else
throw invalid_argument (
- "expected 'false' or format name instead of '" + v + "'");
+ "expected 'false' or format name instead of '" + v + '\'');
}
catch (const invalid_argument& e)
{
@@ -3047,6 +3854,23 @@ namespace build2
try
{
report_var = convert<string> (move (i->value));
+
+ if (!report)
+ report = string ("true");
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (as.loc) << "invalid " << i->name << " attribute value: " << e;
+ }
+ }
+ else if (i->name == "config.report.module")
+ {
+ try
+ {
+ report_module = convert<project_name> (move (i->value));
+
+ if (!report)
+ report = string ("true");
}
catch (const invalid_argument& e)
{
@@ -3070,9 +3894,13 @@ namespace build2
// As a way to print custom (discovered, computed, etc) configuration
// information we allow specifying a non config.* variable provided it is
- // explicitly marked with the config.report attribute.
+ // explicitly marked with the config.report attribute (or another
+ // attribute that implies it).
//
bool new_val (false);
+ string org_var; // Original variable if config.report.variable specified.
+
+ const variable* var (nullptr); // config.* variable.
lookup l;
if (report && *report != "false" && !config)
@@ -3091,7 +3919,14 @@ namespace build2
// philosophical question. In either case it doesn't seem useful for it
// to unconditionally force reporting at level 2.
//
- report_var = move (name);
+ if (!report_var.empty ())
+ {
+ // For example, config [config.report.variable=multi] multi_database
+ //
+ org_var = move (name);
+ }
+ else
+ report_var = move (name);
next (t, tt); // We shouldn't have the default value part.
}
@@ -3104,41 +3939,133 @@ namespace build2
// config prefix and the project substring.
//
{
- diag_record dr;
+ string proj;
+ {
+ const project_name& n (named_project (*root_));
- if (!config)
- dr << fail (t) << "configuration variable '" << name
- << "' does not start with 'config.'";
+ if (!n.empty ())
+ proj = n.variable ();
+ }
- if (!proj.empty ())
+ diag_record dr;
+ do // Breakout loop.
{
- size_t p (name.find ('.' + proj));
+ if (!config)
+ {
+ dr << fail (t) << "configuration variable '" << name
+ << "' does not start with 'config.'";
+ break;
+ }
- if (p == string::npos ||
- ((p += proj.size () + 1) != name.size () && // config.<proj>
- name[p] != '.')) // config.<proj>.
+ auto match = [&name] (const string& proj)
{
+ size_t p (name.find ('.' + proj));
+ return (p != string::npos &&
+ ((p += proj.size () + 1) == name.size () || // config.<proj>
+ name[p] == '.')); // config.<proj>.
+ };
+
+ if (!proj.empty () && match (proj))
+ break;
+
+ // See if this buildfile belongs to a different project. If so, use
+ // the project name as the reporting module name.
+ //
+ if (path_->path != nullptr)
+ {
+ // Note: all sourced/included/imported paths are absolute and
+ // normalized.
+ //
+ const path& f (*path_->path);
+ dir_path d (f.directory ());
+
+ auto p (ctx->scopes.find (d)); // Note: never empty.
+ if (*p.first != &ctx->global_scope)
+ {
+ // The buildfile will most likely be in src which means we may
+ // end up with multiple scopes (see scope_map for background).
+ // First check if one of them is us. If not, then we can extract
+ // the project name from any one of them.
+ //
+ const scope& bs (**p.first); // Save.
+
+ for (; p.first != p.second; ++p.first)
+ {
+ if (root_ == (*p.first)->root_scope ())
+ break;
+ }
+
+ if (p.first == p.second)
+ {
+ // Note: we expect the project itself to be named.
+ //
+ const project_name& n (project (*bs.root_scope ()));
+
+ if (!n.empty ())
+ {
+ // If the buildfile comes from a different project, then
+ // it's more likely to use the imported project's config
+ // variables. So replace proj with that for diagnostics
+ // below.
+ //
+ proj = n.variable ();
+
+ if (*report != "false" && verb >= 2)
+ report_module = n;
+ }
+ }
+ }
+ else
+ {
+ // If the buildfile is not in any project, then it could be
+ // installed.
+ //
+ // Per import2_buildfile(), exported buildfiles are installed
+ // into $install.buildfile/<proj>/....
+ //
+ const dir_path& id (build_install_buildfile);
+
+ if (!id.empty () && d.sub (id))
+ {
+ dir_path l (d.leaf (id));
+ if (!l.empty ())
+ {
+ project_name n (*l.begin ());
+ proj = n.variable ();
+
+ if (*report != "false" && verb >= 2)
+ report_module = move (n);
+ }
+ }
+ }
+ }
+
+ if (!proj.empty () && match (proj))
+ break;
+
+ // Note: only if proj not empty (see above).
+ //
+ if (!proj.empty ())
dr << fail (t) << "configuration variable '" << name
<< "' does not include project name";
- }
}
+ while (false);
if (!dr.empty ())
dr << info << "expected variable name in the 'config[.**]."
<< (proj.empty () ? "<project>" : proj.c_str ()) << ".**' form";
}
- const variable& var (
- parse_variable_name (move (name), get_location (t)));
- apply_variable_attributes (var);
+ var = &parse_variable_name (move (name), get_location (t));
+ apply_variable_attributes (*var);
// Note that even though we are relying on the config.** variable
// pattern to set global visibility, let's make sure as a sanity check.
//
- if (var.visibility != variable_visibility::global)
+ if (var->visibility != variable_visibility::global)
{
- fail (t) << "configuration variable " << var << " has "
- << var.visibility << " visibility";
+ fail (t) << "configuration variable " << *var << " has "
+ << var->visibility << " visibility";
}
// See if we have the default value part.
@@ -3160,15 +4087,15 @@ namespace build2
//
bool dev;
{
- size_t p (var.name.rfind ('.'));
- dev = p != 6 && var.name.compare (p + 1, string::npos, "develop") == 0;
+ size_t p (var->name.rfind ('.'));
+ dev = p != 6 && var->name.compare (p + 1, string::npos, "develop") == 0;
}
uint64_t sflags (0);
if (dev)
{
- if (var.type != &value_traits<bool>::value_type)
- fail (loc) << var << " variable must be of type bool";
+ if (var->type != &value_traits<bool>::value_type)
+ fail (loc) << *var << " variable must be of type bool";
// This is quite messy: below we don't always parse the value (plus it
// may be computed) so here we just peek at the next token. But we
@@ -3177,10 +4104,10 @@ namespace build2
if (!def_val ||
peek (lexer_mode::value, '@') != type::word ||
peeked ().value != "false")
- fail (loc) << var << " variable default value must be literal false";
+ fail (loc) << *var << " variable default value must be literal false";
if (nullable)
- fail (loc) << var << " variable must not be nullable";
+ fail (loc) << *var << " variable must not be nullable";
sflags |= config::save_false_omitted;
}
@@ -3189,7 +4116,7 @@ namespace build2
// in order to mark it as saved. We also have to do this to get the new
// value status.
//
- l = config::lookup_config (new_val, *root_, var, sflags);
+ l = config::lookup_config (new_val, *root_, *var, sflags);
// Handle the default value.
//
@@ -3225,12 +4152,12 @@ namespace build2
else
{
value lhs, rhs (parse_variable_value (t, tt, !dev /* mode */));
- apply_value_attributes (&var, lhs, move (rhs), type::assign);
+ apply_value_attributes (var, lhs, move (rhs), type::assign);
if (!nullable)
nullable = lhs.null;
- l = config::lookup_config (new_val, *root_, var, move (lhs), sflags);
+ l = config::lookup_config (new_val, *root_, *var, move (lhs), sflags);
}
}
@@ -3241,51 +4168,86 @@ namespace build2
// then the user is expected to handle the undefined case).
//
if (!nullable && l.defined () && l->null)
- fail (loc) << "null value in non-nullable variable " << var;
+ fail (loc) << "null value in non-nullable variable " << *var;
}
// We will be printing the report at either level 2 (-v) or 3 (-V)
- // depending on the final value of config_report_new.
+ // depending on the final value of config_report::new_value.
//
- // Note that for the config_report_new calculation we only incorporate
- // variables that we are actually reporting.
+ // Note that for the config_report::new_value calculation we only
+ // incorporate variables that we are actually reporting.
//
if (*report != "false" && verb >= 2)
{
+ // Find existing or insert new config_report entry for this module.
+ //
+ auto i (find_if (config_reports.begin (),
+ config_reports.end (),
+ [&report_module] (const config_report& r)
+ {
+ return r.module == report_module;
+ }));
+
+ if (i == config_reports.end ())
+ {
+ config_reports.push_back (
+ config_report {move (report_module), {}, false});
+ i = config_reports.end () - 1;
+ }
+
+ auto& report_values (i->values);
+ bool& report_new_value (i->new_value);
+
// We don't want to lookup the report variable value here since it's
// most likely not set yet.
//
if (!report_var.empty ())
{
+ if (org_var.empty () && var != nullptr)
+ org_var = var->name;
+
// In a somewhat hackish way we pass the variable in an undefined
// lookup.
//
+ // Note: consistent with parse_variable_name() wrt overridability.
+ //
l = lookup ();
l.var = &root_->var_pool ().insert (
- move (report_var), true /* overridable */);
+ move (report_var),
+ report_var.find ('.') != string::npos /* overridable */);
}
if (l.var != nullptr)
{
- auto r (make_pair (l, move (*report)));
-
// If we have a duplicate, update it (it could be useful to have
// multiple config directives to "probe" the value before calculating
// the default; see lookup_config() for details).
//
- auto i (find_if (config_report.begin (),
- config_report.end (),
- [&l] (const pair<lookup, string>& p)
+ // Since the original variable is what the user will see in the
+ // report, we prefer that as a key.
+ //
+ auto i (find_if (report_values.begin (),
+ report_values.end (),
+ [&org_var, &l] (const config_report::value& v)
{
- return p.first.var == l.var;
+ return (v.org.empty () && org_var.empty ()
+ ? v.val.var == l.var
+ : (v.org.empty ()
+ ? v.val.var->name == org_var
+ : v.org == l.var->name));
}));
- if (i == config_report.end ())
- config_report.push_back (move (r));
+ if (i == report_values.end ())
+ report_values.push_back (
+ config_report::value {l, move (*report), move (org_var)});
else
- *i = move (r);
+ {
+ i->val = l;
+ i->fmt = move (*report);
+ if (i->org.empty ()) i->org = move (org_var);
+ }
- config_report_new = config_report_new || new_val;
+ report_new_value = report_new_value || new_val;
}
}
@@ -3342,118 +4304,306 @@ namespace build2
if (stage_ == stage::boot)
fail (t) << "import during bootstrap";
- // General import format:
+ // General import form:
//
// import[?!] [<attrs>] <var> = [<attrs>] (<target>|<project>%<target>])+
//
+ // Special form for importing buildfiles:
+ //
+ // import[?!] [<attrs>] (<target>|<project>%<target>])+
+ //
bool opt (t.value.back () == '?');
- bool ph2 (opt || t.value.back () == '!');
+ optional<string> ph2 (opt || t.value.back () == '!'
+ ? optional<string> (string ())
+ : nullopt);
// We are now in the normal lexing mode and we let the lexer handle `=`.
//
next_with_attributes (t, tt);
- // Get variable attributes, if any, and deal with the special metadata
- // attribute. Since currently it can only appear in the import directive,
- // we handle it in an ad hoc manner.
+ // Get variable (or value, in the second form) attributes, if any, and
+ // deal with the special metadata and rule_hint attributes. Since
+ // currently they can only appear in the import directive, we handle them
+ // in an ad hoc manner.
//
attributes_push (t, tt);
- attributes& as (attributes_top ());
- bool meta (false);
- for (auto i (as.begin ()); i != as.end (); )
+ bool meta (false); // Import with metadata.
+ bool once (false); // Import buildfile once.
+ bool nodt (false); // Import buildfile without default target semantics.
{
- if (i->name == "metadata")
- {
- if (!ph2)
- fail (as.loc) << "loading metadata requires immediate import" <<
- info << "consider using the import! directive instead";
+ attributes& as (attributes_top ());
+ const location& l (as.loc);
- meta = true;
- }
- else
+ for (auto i (as.begin ()); i != as.end (); )
{
- ++i;
- continue;
- }
+ const string& n (i->name);
+ value& v (i->value);
- i = as.erase (i);
- }
+ if (n == "metadata")
+ {
+ if (!ph2)
+ fail (l) << "loading metadata requires immediate import" <<
+ info << "consider using the import! directive instead";
- if (tt != type::word)
- fail (t) << "expected variable name instead of " << t;
+ meta = true;
+ }
+ else if (n == "no_default_target")
+ {
+ nodt = true;
+ }
+ else if (n == "once")
+ {
+ once = true;
+ }
+ else if (n == "rule_hint")
+ {
+ if (!ph2)
+ fail (l) << "rule hint can only be used with immediate import" <<
+ info << "consider using the import! directive instead";
- const variable& var (
- parse_variable_name (move (t.value), get_location (t)));
- apply_variable_attributes (var);
+ // Here we only allow a single name.
+ //
+ try
+ {
+ ph2 = convert<string> (move (v));
- if (var.visibility > variable_visibility::scope)
- {
- fail (t) << "variable " << var << " has " << var.visibility
- << " visibility but is assigned in import";
+ if (ph2->empty ())
+ throw invalid_argument ("empty name");
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid " << n << " attribute value: " << e;
+ }
+ }
+ else
+ {
+ ++i;
+ continue;
+ }
+
+ i = as.erase (i);
+ }
}
- // Next should come the assignment operator. Note that we don't support
+ // Note that before supporting the second form (without <var>) we used to
+ // parse the value after assignment in the value mode. However, we don't
+ // really need to since what we should have is a bunch of target names.
+ // In other words, whatever the value mode does not treat as special
+ // compared to the normal mode (like `:`) would be illegal here.
+ //
+ // Note that we expant patterns for the ad hoc import case:
+ //
+ // import sub = */
+ //
+ // @@ PAT: the only issue here is that we currently pattern-expand var
+ // name (same assue as with target-specific var names).
+ //
+ if (!start_names (tt))
+ fail (t) << "expected variable name or buildfile target instead of " << t;
+
+ location loc (get_location (t));
+ names ns (parse_names (t, tt, pattern_mode::expand));
+
+ // Next could come the assignment operator. Note that we don't support
// default assignment (?=) yet (could make sense when attempting to import
// alternatives or some such).
//
- next (t, tt);
+ type atype;
+ const variable* var (nullptr);
+ if (tt == type::assign || tt == type::append || tt == type::prepend)
+ {
+ var = &parse_variable_name (move (ns), loc);
+ apply_variable_attributes (*var);
+
+ if (var->visibility > variable_visibility::scope)
+ {
+ fail (loc) << "variable " << *var << " has " << var->visibility
+ << " visibility but is assigned in import";
+ }
- if (tt != type::assign && tt != type::append && tt != type::prepend)
- fail (t) << "expected variable assignment instead of " << t;
+ atype = tt;
+ next_with_attributes (t, tt);
+ attributes_push (t, tt, true /* standalone */);
- type atype (tt);
- value& val (atype == type::assign
- ? scope_->assign (var)
- : scope_->append (var));
+ if (!start_names (tt))
+ fail (t) << "expected target to import instead of " << t;
- // The rest should be a list of targets. Parse them similar to a value on
- // the RHS of an assignment (attributes, etc).
- //
- // Note that we expant patterns for the ad hoc import case:
- //
- // import sub = */
+ loc = get_location (t);
+ ns = parse_names (t, tt, pattern_mode::expand);
+ }
+ else if (tt == type::default_assign)
+ fail (t) << "default assignment not yet supported";
+
+
+ // If there are any value attributes, roundtrip the names through the
+ // value applying the attributes.
//
- mode (lexer_mode::value, '@');
- next_with_attributes (t, tt);
+ if (!attributes_top ().empty ())
+ {
+ value lhs, rhs (move (ns));
+ apply_value_attributes (nullptr, lhs, move (rhs), type::assign);
- if (tt == type::newline || tt == type::eos)
- fail (t) << "expected target to import instead of " << t;
+ if (!lhs)
+ fail (loc) << "expected target to import instead of null value";
- const location loc (get_location (t));
+ untypify (lhs, true /* reduce */);
+ ns = move (lhs.as<names> ());
+ }
+ else
+ attributes_pop ();
- if (value v = parse_value_with_attributes (t, tt, pattern_mode::expand))
+ value* val (var != nullptr ?
+ &(atype == type::assign
+ ? scope_->assign (*var)
+ : scope_->append (*var))
+ : nullptr);
+
+ for (name& n: ns)
{
- names storage;
- for (name& n: reverse (v, storage))
+ // @@ Could this be an out-qualified ad hoc import? Yes, see comment
+ // about buildfile import in import_load().
+ //
+ if (n.pair)
+ fail (loc) << "unexpected pair in import";
+
+ // See if we are importing a buildfile target. Such an import is always
+ // immediate.
+ //
+ bool bf (n.type == "buildfile");
+ if (bf)
{
- // @@ Could this be an out-qualified ad hoc import?
- //
- if (n.pair)
- fail (loc) << "unexpected pair in import";
+ if (meta)
+ fail (loc) << "metadata requested for buildfile target " << n;
- // import() will check the name, if required.
- //
- names r (import (*scope_, move (n), ph2, opt, meta, loc).first);
+ if (var != nullptr)
+ {
+ if (once)
+ fail (loc) << "once importation requested with variable assignment";
+
+ if (nodt)
+ fail (loc) << "no_default_target importation requested with "
+ << "variable assignment";
+ }
+
+ if (ph2 && !ph2->empty ())
+ fail (loc) << "rule hint specified for buildfile target " << n;
+ }
+ else
+ {
+ if (once)
+ fail (loc) << "once importation requested for target " << n;
+
+ if (nodt)
+ fail (loc) << "no_default_target importation requested for target "
+ << n;
+ if (var == nullptr)
+ fail (loc) << "variable assignment required to import target " << n;
+ }
+
+ // import() will check the name, if required.
+ //
+ import_result<scope> ir (
+ import (*scope_,
+ move (n),
+ ph2 ? ph2 : bf ? optional<string> (string ()) : nullopt,
+ opt,
+ meta,
+ loc));
+
+ names& r (ir.name);
+
+ if (val != nullptr)
+ {
if (r.empty ()) // Optional not found.
{
if (atype == type::assign)
- val = nullptr;
+ *val = nullptr;
}
else
{
- if (atype == type::assign)
- val.assign (move (r), &var);
- else if (atype == type::prepend)
- val.prepend (move (r), &var);
- else
- val.append (move (r), &var);
+ // Import (more precisely, alias) the target type into this project
+ // if not known.
+ //
+ // Note that if the result is ignored (val is NULL), then it's fair
+ // to assume this is not necessary.
+ //
+ if (const scope* iroot = ir.target)
+ {
+ const name& n (r.front ());
+ if (n.typed ())
+ import_target_type (*root_, *iroot, n.type, loc);
+ }
+
+ if (atype == type::assign) val->assign (move (r), var);
+ else if (atype == type::prepend) val->prepend (move (r), var);
+ else val->append (move (r), var);
}
if (atype == type::assign)
atype = type::append; // Append subsequent values.
}
+ else
+ {
+ assert (bf);
+
+ if (r.empty ()) // Optional not found.
+ {
+ assert (opt);
+ continue;
+ }
+
+ // Note: see also import_buildfile().
+ //
+ assert (r.size () == 1); // See import_load() for details.
+ name& n (r.front ());
+ path p (n.dir / n.value); // Should already include extension.
+
+ // Note: similar to parse_include().
+ //
+ // Nuance: we insert this buildfile even with once=false in case it
+ // gets imported with once=true from another place.
+ //
+ if (!root_->root_extra->insert_buildfile (p) && once)
+ {
+ l5 ([&]{trace (loc) << "skipping already imported " << p;});
+ continue;
+ }
+
+ // Clear/restore if/switch location.
+ //
+ auto g = make_guard ([this, old = condition_] () mutable
+ {
+ condition_ = old;
+ });
+ condition_ = nullopt;
+
+ try
+ {
+ ifdstream ifs (p);
+
+ auto df = make_diag_frame (
+ [this, &p, &loc] (const diag_record& dr)
+ {
+ dr << info (loc) << p << " imported from here";
+ });
+
+ // @@ Do we want to enter this buildfile? What's the harm (one
+ // benefit is that it will be in dump). But, we currently don't
+ // out-qualify them, though feels like there is nothing fatal
+ // in that, just inaccurate.
+ //
+ source_buildfile (ifs,
+ path_name (p),
+ loc,
+ nodt ? optional<bool> {} : false);
+ }
+ catch (const io_error& e)
+ {
+ fail (loc) << "unable to read imported buildfile " << p << ": " << e;
+ }
+ }
}
next_after_newline (t, tt);
@@ -3495,7 +4645,12 @@ namespace build2
fail (l) << "null value in export";
if (val.type != nullptr)
- untypify (val);
+ {
+ // While feels far-fetched, let's preserve empty typed values in the
+ // result.
+ //
+ untypify (val, false /* reduce */);
+ }
export_value = move (val).as<names> ();
@@ -3582,41 +4737,160 @@ namespace build2
void parser::
parse_define (token& t, type& tt)
{
- // define <derived>: <base>
+ // define [<attrs>] <derived>: <base>
+ // define <alias> = <scope>/<type>
//
// See tests/define.
//
- if (next (t, tt) != type::word)
- fail (t) << "expected name instead of " << t << " in target type "
- << "definition";
+ next_with_attributes (t, tt);
- string dn (move (t.value));
- const location dnl (get_location (t));
+ attributes_push (t, tt);
+ attributes as (attributes_pop ());
- if (next (t, tt) != type::colon)
- fail (t) << "expected ':' instead of " << t << " in target type "
+ if (tt != type::word)
+ fail (t) << "expected name instead of " << t << " in target type "
<< "definition";
+ string n (move (t.value));
+ const location nl (get_location (t));
+
next (t, tt);
- if (tt == type::word)
+ if (tt == type::colon)
{
+ // Handle attributes.
+ //
+ target_type::flag fs (target_type::flag::none);
+ {
+ const location& l (as.loc);
+
+ for (attribute& a: as)
+ {
+ const string& n (a.name);
+ value& v (a.value);
+
+ if (n == "see_through") fs |= target_type::flag::see_through;
+ else if (n == "member_hint") fs |= target_type::flag::member_hint;
+ else
+ fail (l) << "unknown target type definition attribute " << n;
+
+ if (!v.null)
+ fail (l) << "unexpected value in attribute " << n;
+ }
+ }
+
+ if (next (t, tt) != type::word)
+ fail (t) << "expected name instead of " << t << " in target type "
+ << "definition";
+
// Target.
//
const string& bn (t.value);
const target_type* bt (scope_->find_target_type (bn));
if (bt == nullptr)
- fail (t) << "unknown target type " << bn;
+ fail (t) << "unknown target type " << bn <<
+ info << "perhaps the module that defines this target type is "
+ << "not loaded by project " << *scope_->root_scope ();
- if (!root_->derive_target_type (move (dn), *bt).second)
- fail (dnl) << "target type " << dn << " already defined in this "
- << "project";
+ // The derive_target_type() call below does not produce a non-abstract
+ // type if passed an abstract base. So we ban this for now (it's unclear
+ // why would someone want to do this).
+ //
+ if (bt->factory == nullptr)
+ fail (t) << "abstract base target type " << bt->name << "{}";
+
+ // Note that the group{foo}<...> syntax is only recognized for group-
+ // based targets and ad hoc buildscript recipes/rules only match group.
+ // (We may want to relax this for member_hint in the future since its
+ // currently also used on non-mtime-based targets, though what exactly
+ // we will do in ad hoc recipes/rules in this case is fuzzy).
+ //
+ if ((fs & target_type::flag::group) == target_type::flag::group &&
+ !bt->is_a<group> ())
+ fail (t) << "base target type " << bn << " must be group for "
+ << "group-related attribute";
+
+ if (!root_->derive_target_type (move (n), *bt, fs).second)
+ fail (nl) << "target type " << n << " already defined in project "
+ << *root_;
next (t, tt); // Get newline.
}
+ else if (tt == type::assign)
+ {
+ if (!as.empty ())
+ fail (as.loc) << "unexpected target type alias attribute";
+
+ // The rest should be a path-like target type. Parse it as names in
+ // the value mode to get variable expansion, etc.
+ //
+ mode (lexer_mode::value, '@');
+ next (t, tt);
+ const location tl (get_location (t));
+ names ns (
+ parse_names (t, tt, pattern_mode::ignore, "target type", nullptr));
+
+ name* tn (nullptr);
+ if (ns.size () == 1)
+ {
+ tn = &ns.front ();
+
+ if (tn->file ())
+ {
+ try
+ {
+ tn->canonicalize ();
+
+ if (tn->dir.absolute ())
+ tn->dir.normalize ();
+ else
+ tn = nullptr;
+ }
+ catch (const invalid_path&) {tn = nullptr;}
+ catch (const invalid_argument&) {tn = nullptr;}
+ }
+ else
+ tn = nullptr;
+ }
+
+ if (tn == nullptr)
+ fail (tl) << "expected scope-qualified target type instead of " << ns;
+
+ // If we got here, then tn->dir is the scope and tn->value is the target
+ // type.
+ //
+ // NOTE: see similar code in import_target_type().
+ //
+ const target_type* tt (nullptr);
+ if (const scope* rs = ctx->scopes.find_out (tn->dir).root_scope ())
+ {
+ tt = rs->find_target_type (tn->value);
+
+ if (tt == nullptr)
+ fail (tl) << "unknown target type " << tn->value << " in scope "
+ << *rs;
+ }
+ else
+ fail (tl) << "unknown project scope " << tn->dir << " in scope"
+ << "-qualified target type" <<
+ info << "did you forget to import the corresponding project?";
+
+ if (n != tn->value)
+ fail (nl) << "alias target type name " << n << " does not match "
+ << tn->value;
+
+ // Note that this is potentially a shallow reference to a user-derived
+ // target type. Seeing that we only ever destory the entire graph, this
+ // should be ok.
+ //
+ auto p (root_->root_extra->target_types.insert (*tt));
+
+ if (!p.second && &p.first.get () != tt)
+ fail (nl) << "target type " << n << " already defined in this project";
+ }
else
- fail (t) << "expected name instead of " << t << " in target type "
+ fail (t) << "expected ':' or '=' instead of " << t << " in target type "
<< "definition";
next_after_newline (t, tt);
@@ -3625,19 +4899,28 @@ namespace build2
void parser::
parse_if_else (token& t, type& tt)
{
+ auto g = make_guard ([this, old = condition_] () mutable
+ {
+ condition_ = old;
+ });
+ condition_ = get_location (t);
+
parse_if_else (t, tt,
false /* multi */,
[this] (token& t, type& tt, bool s, const string& k)
{
return parse_clause_block (t, tt, s, k);
- });
+ },
+ {});
}
void parser::
parse_if_else (token& t, type& tt,
bool multi,
const function<void (
- token&, type&, bool, const string&)>& parse_block)
+ token&, type&, bool, const string&)>& parse_block,
+ const function<void (
+ token&, token_type&, const string&)>& parse_recipe_directive)
{
// Handle the whole if-else chain. See tests/if-else.
//
@@ -3662,7 +4945,7 @@ namespace build2
// is not an option. So let's skip it.
//
if (taken)
- skip_line (t, tt);
+ skip_line (t, tt); // Skip expression.
else
{
if (tt == type::newline || tt == type::eos)
@@ -3722,31 +5005,65 @@ namespace build2
parse_block (t, tt, !take, k);
taken = taken || take;
}
- else if (!multi) // No lines in multi-curly if-else.
+ else
{
- if (take)
+ // The only valid line in multi-curly if-else is `recipe`.
+ //
+ if (multi)
{
- if (!parse_clause (t, tt, true))
- fail (t) << "expected " << k << "-line instead of " << t;
+ // Note that we cannot do the keyword test if we are replaying. So
+ // we skip it with the understanding that if it's not a keywords,
+ // then we wouldn't have gotten here on the replay.
+ //
+ if (tt == type::word &&
+ (replay_ == replay::play || keyword (t)) &&
+ t.value == "recipe")
+ {
+ if (take)
+ {
+ parse_recipe_directive (t, tt, k);
+ taken = true;
+ }
+ else
+ {
+ skip_line (t, tt);
- taken = true;
+ if (tt == type::newline)
+ next (t, tt);
+ }
+ }
+ else
+ fail (t) << "expected " << k << "-block or 'recipe' instead of "
+ << t;
}
else
{
- skip_line (t, tt);
+ if (tt == type::multi_lcbrace)
+ fail (t) << "expected " << k << "-line instead of " << t <<
+ info << "did you forget to specify % recipe header?";
- if (tt == type::newline)
- next (t, tt);
+ if (take)
+ {
+ if (!parse_clause (t, tt, true))
+ fail (t) << "expected " << k << "-line instead of " << t;
+
+ taken = true;
+ }
+ else
+ {
+ skip_line (t, tt);
+
+ if (tt == type::newline)
+ next (t, tt);
+ }
}
}
- else
- fail (t) << "expected " << k << "-block instead of " << t;
// See if we have another el* keyword.
//
// Note that we cannot do the keyword test if we are replaying. So we
// skip it with the understanding that if it's not a keywords, then we
- // wouldn't have gotten here on the reply (see parse_recipe() for
+ // wouldn't have gotten here on the replay (see parse_recipe() for
// details).
//
if (k != "else" &&
@@ -3766,19 +5083,28 @@ namespace build2
void parser::
parse_switch (token& t, type& tt)
{
+ auto g = make_guard ([this, old = condition_] () mutable
+ {
+ condition_ = old;
+ });
+ condition_ = get_location (t);
+
parse_switch (t, tt,
false /* multi */,
[this] (token& t, type& tt, bool s, const string& k)
{
return parse_clause_block (t, tt, s, k);
- });
+ },
+ {});
}
void parser::
parse_switch (token& t, type& tt,
bool multi,
const function<void (
- token&, type&, bool, const string&)>& parse_block)
+ token&, type&, bool, const string&)>& parse_block,
+ const function<void (
+ token&, token_type&, const string&)>& parse_recipe_directive)
{
// switch <value> [: <func> [<arg>]] [, <value>...]
// {
@@ -3873,7 +5199,7 @@ namespace build2
{
// Note that we cannot do the keyword test if we are replaying. So we
// skip it with the understanding that if it's not a keywords, then we
- // wouldn't have gotten here on the reply (see parse_recipe() for
+ // wouldn't have gotten here on the replay (see parse_recipe() for
// details). Note that this appears to mean that replay cannot be used
// if we allow lines, only blocks. Consider:
//
@@ -3976,7 +5302,7 @@ namespace build2
if (!e.arg.empty ())
args.push_back (value (e.arg));
- value r (ctx.functions.call (scope_, *e.func, args, l));
+ value r (ctx->functions.call (scope_, *e.func, args, l));
// We support two types of functions: matchers and extractors:
// a matcher returns a statically-typed bool value while an
@@ -4079,25 +5405,49 @@ namespace build2
parse_block (t, tt, !take, k);
taken = taken || take;
}
- else if (!multi) // No lines in multi-curly if-else.
+ else
{
- if (take)
+ if (multi)
{
- if (!parse_clause (t, tt, true))
- fail (t) << "expected " << k << "-line instead of " << t;
+ if (tt == type::word &&
+ (replay_ == replay::play || keyword (t)) &&
+ t.value == "recipe")
+ {
+ if (take)
+ {
+ parse_recipe_directive (t, tt, k);
+ taken = true;
+ }
+ else
+ {
+ skip_line (t, tt);
- taken = true;
+ if (tt == type::newline)
+ next (t, tt);
+ }
+ }
+ else
+ fail (t) << "expected " << k << "-block or 'recipe' instead of "
+ << t;
}
else
{
- skip_line (t, tt);
+ if (take)
+ {
+ if (!parse_clause (t, tt, true))
+ fail (t) << "expected " << k << "-line instead of " << t;
- if (tt == type::newline)
- next (t, tt);
+ taken = true;
+ }
+ else
+ {
+ skip_line (t, tt);
+
+ if (tt == type::newline)
+ next (t, tt);
+ }
}
}
- else
- fail (t) << "expected " << k << "-block instead of " << t;
}
if (tt != type::rcbrace)
@@ -4110,10 +5460,10 @@ namespace build2
void parser::
parse_for (token& t, type& tt)
{
- // for <varname>: <value>
+ // for [<var-attrs>] <varname> [<elem-attrs>]: [<val-attrs>] <value>
// <line>
//
- // for <varname>: <value>
+ // for [<var-attrs>] <varname> [<elem-attrs>]: [<val-attrs>] <value>
// {
// <block>
// }
@@ -4124,13 +5474,12 @@ namespace build2
next_with_attributes (t, tt);
attributes_push (t, tt);
- // @@ PAT: currently we pattern-expand for var.
+ // Enable list element attributes.
//
- const location vloc (get_location (t));
- names vns (parse_names (t, tt, pattern_mode::expand));
+ enable_attributes ();
- if (tt != type::colon)
- fail (t) << "expected ':' instead of " << t << " after variable name";
+ const location vloc (get_location (t));
+ names vns (parse_names (t, tt, pattern_mode::preserve));
const variable& var (parse_variable_name (move (vns), vloc));
apply_variable_attributes (var);
@@ -4141,6 +5490,17 @@ namespace build2
<< " visibility but is assigned in for-loop";
}
+ // Parse the list element attributes, if present.
+ //
+ attributes_push (t, tt);
+
+ if (tt != type::colon)
+ fail (t) << "expected ':' instead of " << t << " after variable name";
+
+ // Save element attributes so that we can inject them on each iteration.
+ //
+ attributes val_attrs (attributes_pop ());
+
// Now the value (list of names) to iterate over. Parse it similar to a
// value on the RHS of an assignment (expansion, attributes).
//
@@ -4149,15 +5509,24 @@ namespace build2
value val (parse_value_with_attributes (t, tt, pattern_mode::expand));
- // If this value is a vector, then save its element type so that we
+ // If the value type provides custom iterate function, then use that (see
+ // value_type::iterate for details).
+ //
+ auto iterate (val.type != nullptr ? val.type->iterate : nullptr);
+
+ // If this value is a container, then save its element type so that we
// can typify each element below.
//
const value_type* etype (nullptr);
- if (val && val.type != nullptr)
+ if (!iterate && val && val.type != nullptr)
{
etype = val.type->element_type;
- untypify (val);
+
+ // Note that here we don't want to be reducing empty simple values to
+ // empty lists.
+ //
+ untypify (val, false /* reduce */);
}
if (tt != type::newline)
@@ -4205,32 +5574,50 @@ namespace build2
// Iterate.
//
- value& v (scope_->assign (var)); // Assign even if no iterations.
+ value& lhs (scope_->assign (var)); // Assign even if no iterations.
if (!val)
return;
- names& ns (val.as<names> ());
-
- if (ns.empty ())
- return;
+ names* ns (nullptr);
+ if (!iterate)
+ {
+ ns = &val.as<names> ();
+ if (ns->empty ())
+ return;
+ }
istringstream is (move (body));
- for (auto i (ns.begin ()), e (ns.end ());; )
+ struct data
{
- // Set the variable value.
+ const variable& var;
+ const attributes& val_attrs;
+ uint64_t line;
+ bool block;
+ value& lhs;
+ istringstream& is;
+
+ } d {var, val_attrs, line, block, lhs, is};
+
+ function<void (value&&, bool first)> iteration =
+ [this, &d] (value&& v, bool first)
+ {
+ // Rewind the stream.
+ //
+ if (!first)
+ {
+ d.is.clear ();
+ d.is.seekg (0);
+ }
+
+ // Inject element attributes.
//
- bool pair (i->pair);
- names n;
- n.push_back (move (*i));
- if (pair) n.push_back (move (*++i));
- v = value (move (n));
+ attributes_.push_back (d.val_attrs);
- if (etype != nullptr)
- typify (v, *etype, &var);
+ apply_value_attributes (&d.var, d.lhs, move (v), type::assign);
- lexer l (is, *path_, line);
+ lexer l (d.is, *path_, d.line);
lexer* ol (lexer_);
lexer_ = &l;
@@ -4238,7 +5625,7 @@ namespace build2
type tt;
next (t, tt);
- if (block)
+ if (d.block)
{
next (t, tt); // {
next (t, tt); // <newline>
@@ -4246,20 +5633,33 @@ namespace build2
parse_clause (t, tt);
- if (tt != (block ? type::rcbrace : type::eos))
- fail (t) << "expected name " << (block ? "or '}' " : "")
+ if (tt != (d.block ? type::rcbrace : type::eos))
+ fail (t) << "expected name " << (d.block ? "or '}' " : "")
<< "instead of " << t;
lexer_ = ol;
+ };
- if (++i == e)
- break;
+ if (!iterate)
+ {
+ for (auto b (ns->begin ()), i (b), e (ns->end ()); i != e; ++i)
+ {
+ // Set the variable value.
+ //
+ bool pair (i->pair);
+ names n;
+ n.push_back (move (*i));
+ if (pair) n.push_back (move (*++i));
+ value v (move (n));
- // Rewind the stream.
- //
- is.clear ();
- is.seekg (0);
+ if (etype != nullptr)
+ typify (v, *etype, &var);
+
+ iteration (move (v), i == b);
+ }
}
+ else
+ iterate (val, iteration);
}
void parser::
@@ -4332,7 +5732,7 @@ namespace build2
if (value v = parse_value_with_attributes (t, tt, pattern_mode::expand))
{
names storage;
- cout << reverse (v, storage) << endl;
+ cout << reverse (v, storage, true /* reduce */) << endl;
}
else
cout << "[null]" << endl;
@@ -4365,7 +5765,7 @@ namespace build2
if (value v = parse_value_with_attributes (t, tt, pattern_mode::expand))
{
names storage;
- dr << reverse (v, storage);
+ dr << reverse (v, storage, true /* reduce */);
}
if (tt != type::eos)
@@ -4395,8 +5795,10 @@ namespace build2
if (ns.empty ())
{
+ // Indent two spaces.
+ //
if (scope_ != nullptr)
- dump (*scope_, " "); // Indent two spaces.
+ dump (scope_, nullopt /* action */, dump_format::buildfile, " ");
else
os << " <no current scope>" << endl;
}
@@ -4414,8 +5816,10 @@ namespace build2
const target* t (enter_target::find_target (*this, n, o, l, trace));
+ // Indent two spaces.
+ //
if (t != nullptr)
- dump (*t, " "); // Indent two spaces.
+ dump (t, nullopt /* action */, dump_format::buildfile, " ");
else
{
os << " <no target " << n;
@@ -4437,10 +5841,12 @@ namespace build2
{
// Enter a variable name for assignment (as opposed to lookup).
+ // If the variable is qualified (and thus public), make it overridable.
+ //
// Note that the overridability can still be restricted (e.g., by a module
// that enters this variable or by a pattern).
//
- bool ovr (true);
+ bool ovr (on.find ('.') != string::npos);
auto r (scope_->var_pool ().insert (move (on), nullptr, nullptr, &ovr));
if (!r.second)
@@ -4474,9 +5880,13 @@ namespace build2
{
// Parse and enter a variable name for assignment (as opposed to lookup).
- // The list should contain a single, simple name.
+ // The list should contain a single, simple name. Go an extra mile to
+ // issue less confusing diagnostics.
//
- if (ns.size () != 1 || ns[0].pattern || !ns[0].simple () || ns[0].empty ())
+ size_t n (ns.size ());
+ if (n == 0 || (n == 1 && ns[0].empty ()))
+ fail (l) << "empty variable name";
+ else if (n != 1 || ns[0].pattern || !ns[0].simple ())
fail (l) << "expected variable name instead of " << ns;
return parse_variable_name (move (ns[0].value), l);
@@ -4532,7 +5942,7 @@ namespace build2
// Note that the pattern is preserved if insert fails with regex_error.
//
p = scope_->target_vars[ptt].insert (pt, move (pat)).insert (
- var, kind == type::assign);
+ var, kind == type::assign, false /* reset_extra */);
}
catch (const regex_error& e)
{
@@ -4546,7 +5956,12 @@ namespace build2
// We store prepend/append values untyped (similar to overrides).
//
if (rhs.type != nullptr && kind != type::assign)
- untypify (rhs);
+ {
+ // Our heuristics for prepend/append of a typed value is to preserve
+ // empty (see apply_value_attributes() for details) so do not reduce.
+ //
+ untypify (rhs, false /* reduce */);
+ }
if (p.second)
{
@@ -4633,32 +6048,119 @@ namespace build2
: value (names ());
}
- static const value_type*
- map_type (const string& n)
+ const value_type* parser::
+ find_value_type (const scope*, const string& n)
{
- auto ptr = [] (const value_type& vt) {return &vt;};
-
- return
- n == "bool" ? ptr (value_traits<bool>::value_type) :
- n == "int64" ? ptr (value_traits<int64_t>::value_type) :
- n == "uint64" ? ptr (value_traits<uint64_t>::value_type) :
- n == "string" ? ptr (value_traits<string>::value_type) :
- n == "path" ? ptr (value_traits<path>::value_type) :
- n == "dir_path" ? ptr (value_traits<dir_path>::value_type) :
- n == "abs_dir_path" ? ptr (value_traits<abs_dir_path>::value_type) :
- n == "name" ? ptr (value_traits<name>::value_type) :
- n == "name_pair" ? ptr (value_traits<name_pair>::value_type) :
- n == "target_triplet" ? ptr (value_traits<target_triplet>::value_type) :
- n == "project_name" ? ptr (value_traits<project_name>::value_type) :
-
- n == "int64s" ? ptr (value_traits<int64s>::value_type) :
- n == "uint64s" ? ptr (value_traits<uint64s>::value_type) :
- n == "strings" ? ptr (value_traits<strings>::value_type) :
- n == "paths" ? ptr (value_traits<paths>::value_type) :
- n == "dir_paths" ? ptr (value_traits<dir_paths>::value_type) :
- n == "names" ? ptr (value_traits<vector<name>>::value_type) :
-
- nullptr;
+ switch (n[0])
+ {
+ case 'a':
+ {
+ if (n == "abs_dir_path") return &value_traits<abs_dir_path>::value_type;
+ break;
+ }
+ case 'b':
+ {
+ if (n == "bool") return &value_traits<bool>::value_type;
+ break;
+ }
+ case 'c':
+ {
+ if (n == "cmdline") return &value_traits<cmdline>::value_type;
+ break;
+ }
+ case 'd':
+ {
+ if (n.compare (0, 8, "dir_path") == 0)
+ {
+ if (n[8] == '\0') return &value_traits<dir_path>::value_type;
+ if (n[8] == 's' &&
+ n[9] == '\0') return &value_traits<dir_paths>::value_type;
+ }
+ break;
+ }
+ case 'i':
+ {
+ if (n.compare (0, 5, "int64") == 0)
+ {
+ if (n[5] == '\0') return &value_traits<int64_t>::value_type;
+ if (n[5] == 's' &&
+ n[6] == '\0') return &value_traits<int64s>::value_type;
+ }
+ break;
+ }
+ case 'j':
+ {
+ if (n.compare (0, 4, "json") == 0)
+ {
+ if (n[4] == '\0') return &value_traits<json_value>::value_type;
+ if (n == "json_array") return &value_traits<json_array>::value_type;
+ if (n == "json_object")
+ return &value_traits<json_object>::value_type;
+ if (n == "json_set")
+ return &value_traits<set<json_value>>::value_type;
+ if (n == "json_map")
+ return &value_traits<map<json_value, json_value>>::value_type;
+ }
+ break;
+ }
+ case 'n':
+ {
+ if (n.compare (0, 4, "name") == 0)
+ {
+ if (n[4] == '\0') return &value_traits<name>::value_type;
+ if (n[4] == 's' &&
+ n[5] == '\0') return &value_traits<vector<name>>::value_type;
+ if (n == "name_pair") return &value_traits<name_pair>::value_type;
+ }
+ break;
+ }
+
+ case 'p':
+ {
+ if (n.compare (0, 4, "path") == 0)
+ {
+ if (n[4] == '\0') return &value_traits<path>::value_type;
+ if (n[4] == 's' &&
+ n[5] == '\0') return &value_traits<paths>::value_type;
+ }
+ else if (n == "project_name")
+ return &value_traits<project_name>::value_type;
+ break;
+ }
+ case 's':
+ {
+ if (n.compare (0, 6, "string") == 0)
+ {
+ if (n[6] == '\0') return &value_traits<string>::value_type;
+ if (n[6] == 's' &&
+ n[7] == '\0') return &value_traits<strings>::value_type;
+ if (n == "string_set") return &value_traits<set<string>>::value_type;
+ if (n == "string_map")
+ return &value_traits<map<string,string>>::value_type;
+ }
+ break;
+ }
+ case 't':
+ {
+ if (n == "target_triplet")
+ return &value_traits<target_triplet>::value_type;
+ break;
+ }
+ case 'u':
+ {
+ if (n.compare (0, 6, "uint64") == 0)
+ {
+ if (n[6] == '\0') return &value_traits<uint64_t>::value_type;
+ if (n[6] == 's' &&
+ n[7] == '\0') return &value_traits<uint64s>::value_type;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return nullptr;
}
void parser::
@@ -4680,19 +6182,62 @@ namespace build2
string& n (a.name);
value& v (a.value);
- if (const value_type* t = map_type (n))
+ if (n == "visibility")
+ {
+ try
+ {
+ string s (convert<string> (move (v)));
+
+ variable_visibility r;
+ if (s == "global") r = variable_visibility::global;
+ else if (s == "project") r = variable_visibility::project;
+ else if (s == "scope") r = variable_visibility::scope;
+ else if (s == "target") r = variable_visibility::target;
+ else if (s == "prerequisite") r = variable_visibility::prereq;
+ else throw invalid_argument ("unknown visibility name");
+
+ if (vis && r != *vis)
+ fail (l) << "conflicting variable visibilities: " << s << ", "
+ << *vis;
+
+ vis = r;
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid " << n << " attribute value: " << e;
+ }
+ }
+ else if (n == "overridable")
+ {
+ try
+ {
+ // Treat absent value (represented as NULL) as true.
+ //
+ bool r (v.null || convert<bool> (move (v)));
+
+ if (ovr && r != *ovr)
+ fail (l) << "conflicting variable overridabilities";
+
+ ovr = r;
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid " << n << " attribute value: " << e;
+ }
+ }
+ else if (const value_type* t = find_value_type (root_, n))
{
+ if (!v.null)
+ fail (l) << "unexpected value in attribute " << a;
+
if (type != nullptr && t != type)
- fail (l) << "multiple variable types: " << n << ", " << type->name;
+ fail (l) << "conflicting variable types: " << n << ", "
+ << type->name;
type = t;
- // Fall through.
}
else
fail (l) << "unknown variable attribute " << a;
-
- if (!v.null)
- fail (l) << "unexpected value in attribute " << a;
}
if (type != nullptr && var.type != nullptr)
@@ -4704,15 +6249,33 @@ namespace build2
<< var.type->name << " to " << type->name;
}
- //@@ TODO: the same checks for vis and ovr (when we have the corresponding
- // attributes).
+ if (vis)
+ {
+ // Note that this logic naturally makes sure that a project-private
+ // variable doesn't have global visibility (since it would have been
+ // entered with the project visibility).
+ //
+ if (var.visibility == *vis)
+ vis = nullopt;
+ else if (var.visibility > *vis) // See variable_pool::update().
+ fail (l) << "changing variable " << var << " visibility from "
+ << var.visibility << " to " << *vis;
+ }
+
+ if (ovr)
+ {
+ // Note that the overridability incompatibilities are diagnosed by
+ // update(). So we just need to diagnose the project-private case.
+ //
+ if (*ovr && var.owner != &ctx->var_pool)
+ fail (l) << "private variable " << var << " cannot be overridable";
+ }
if (type || vis || ovr)
- ctx.var_pool.update (const_cast<variable&> (var),
- type,
- vis ? &*vis : nullptr,
- ovr ? &*ovr : nullptr);
-
+ var.owner->update (const_cast<variable&> (var),
+ type,
+ vis ? &*vis : nullptr,
+ ovr ? &*ovr : nullptr);
}
void parser::
@@ -4722,7 +6285,7 @@ namespace build2
type kind)
{
attributes as (attributes_pop ());
- const location& l (as.loc);
+ const location& l (as.loc); // This points to value if no attributes.
// Essentially this is an attribute-augmented assign/append/prepend.
//
@@ -4736,16 +6299,18 @@ namespace build2
if (n == "null")
{
+ // @@ Looks like here we assume representationally empty?
+ //
if (rhs && !rhs.empty ()) // Note: null means we had an expansion.
fail (l) << "value with null attribute";
null = true;
// Fall through.
}
- else if (const value_type* t = map_type (n))
+ else if (const value_type* t = find_value_type (root_, n))
{
if (type != nullptr && t != type)
- fail (l) << "multiple value types: " << n << ", " << type->name;
+ fail (l) << "conflicting value types: " << n << ", " << type->name;
type = t;
// Fall through.
@@ -4793,6 +6358,13 @@ namespace build2
bool rhs_type (false);
if (rhs.type != nullptr)
{
+ // Our heuristics is to not reduce typed RHS empty simple values for
+ // prepend/append and additionally for assign provided LHS is a
+ // container.
+ //
+ bool reduce (kind == type::assign &&
+ (type == nullptr || !type->container));
+
// Only consider RHS type if there is no explicit or variable type.
//
if (type == nullptr)
@@ -4803,7 +6375,7 @@ namespace build2
// Reduce this to the untyped value case for simplicity.
//
- untypify (rhs);
+ untypify (rhs, reduce);
}
if (kind == type::assign)
@@ -4832,6 +6404,17 @@ namespace build2
}
else
{
+ auto df = make_diag_frame (
+ [this, var, &l](const diag_record& dr)
+ {
+ if (!l.empty ())
+ {
+ dr << info (l);
+ if (var != nullptr) dr << "variable " << var->name << ' ';
+ dr << "value is assigned here";
+ }
+ });
+
if (kind == type::assign)
{
if (rhs)
@@ -5186,17 +6769,38 @@ namespace build2
if (pre_parse_)
return v; // Empty.
- if (v.type != nullptr || !v || v.as<names> ().size () != 1)
- fail (l) << "expected target before ':'";
-
+ // We used to return this as a <target>:<name> pair but that meant we
+ // could not handle an out-qualified target (which is represented as
+ // <target>@<out> pair). As a somewhat of a hack, we deal with this by
+ // changing the order of the name and target to be <name>:<target> with
+ // the qualified case becoming a "tripple pair" <name>:<target>@<out>.
+ //
+ // @@ This is actually not great since it's possible to observe such a
+ // tripple pair, for example with `print (file{x}@./:y)`.
+ //
if (n.type != nullptr || !n || n.as<names> ().size () != 1 ||
n.as<names> ()[0].pattern)
fail (nl) << "expected variable name after ':'";
- names& ns (v.as<names> ());
+ names& ns (n.as<names> ());
ns.back ().pair = ':';
- ns.push_back (move (n.as<names> ().back ()));
- return v;
+
+ if (v.type == nullptr && v)
+ {
+ names& ts (v.as<names> ());
+
+ size_t s (ts.size ());
+ if (s == 1 || (s == 2 && ts.front ().pair == '@'))
+ {
+ ns.push_back (move (ts.front ()));
+ if (s == 2)
+ ns.push_back (move (ts.back ()));
+
+ return n;
+ }
+ }
+
+ fail (l) << "expected target before ':'" << endf;
}
else
{
@@ -5265,8 +6869,13 @@ namespace build2
}
pair<bool, location> parser::
- attributes_push (token& t, type& tt, bool standalone)
+ attributes_push (token& t, type& tt, bool standalone, bool next_token)
{
+ // To make sure that the attributes are not standalone we need to read the
+ // token which follows ']'.
+ //
+ assert (standalone || next_token);
+
location l (get_location (t));
bool has (tt == type::lsbrace);
@@ -5340,28 +6949,27 @@ namespace build2
if (tt != type::rsbrace)
fail (t) << "expected ']' instead of " << t;
- next (t, tt);
-
- if (tt == type::newline || tt == type::eos)
+ if (next_token)
{
- if (!standalone)
- fail (t) << "standalone attributes";
+ next (t, tt);
+
+ if (tt == type::newline || tt == type::eos)
+ {
+ if (!standalone)
+ fail (t) << "standalone attributes";
+ }
+ //
+ // Verify that the attributes are separated from the following word or
+ // "word-producing" token.
+ //
+ else if (!t.separated && (tt == type::word ||
+ tt == type::dollar ||
+ tt == type::lparen ||
+ tt == type::lcbrace))
+ fail (t) << "whitespace required after attributes" <<
+ info (l) << "use the '\\[' escape sequence if this is a wildcard "
+ << "pattern";
}
- //
- // We require attributes to be separated from the following word or
- // "word-producing" tokens (`$` for variable expansions/function calls,
- // `(` for eval contexts, and `{` for name generation) to reduce the
- // possibility of confusing them with wildcard patterns. Consider:
- //
- // ./: [abc]-foo.txt
- //
- else if (!t.separated && (tt == type::word ||
- tt == type::dollar ||
- tt == type::lparen ||
- tt == type::lcbrace))
- fail (t) << "whitespace required after attributes" <<
- info (l) << "use the '\\[' escape sequence if this is a wildcard "
- << "pattern";
return make_pair (has, l);
}
@@ -5596,9 +7204,11 @@ namespace build2
// May throw invalid_path.
//
auto include_pattern =
- [&r, &append, &include_match, sp, &l, this] (string&& p,
- optional<string>&& e,
- bool a)
+ [this,
+ &append, &include_match,
+ &r, sp, &l, &dir] (string&& p,
+ optional<string>&& e,
+ bool a)
{
// If we don't already have any matches and our pattern doesn't contain
// multiple recursive wildcards, then the result will be unique and we
@@ -5645,14 +7255,62 @@ namespace build2
// multiple entries for each pattern.
//
if (!interm)
- d.appf (move (m).representation (), optional<string> (d.e));
+ {
+ // If the extension is empty (meaning there should be no extension,
+ // for example hxx{Q*.}), skip entries with extensions.
+ //
+ if (!d.e || !d.e->empty () || m.extension_cstring () == nullptr)
+ d.appf (move (m).representation (), optional<string> (d.e));
+ }
return true;
};
+ const function<bool (const dir_entry&)> dangling (
+ [&dir] (const dir_entry& de)
+ {
+ bool sl (de.ltype () == entry_type::symlink);
+
+ const path& n (de.path ());
+
+ // One case where this turned out to be not worth it practically
+ // (too much noise) is the backlinks to executables (and the
+ // associated DLL assemblies for Windows). So we now have this
+ // heuristics that if this looks like an executable (or DLL for
+ // Windows), then we omit the warning. On POSIX, where executables
+ // don't have extensions, we will consider it an executable only if
+ // we are not looking for directories (which also normally don't
+ // have extension).
+ //
+ // @@ PEDANTIC: re-enable if --pedantic.
+ //
+ if (sl)
+ {
+ string e (n.extension ());
+
+ if ((e.empty () && !dir) ||
+ path_traits::compare (e, "exe") == 0 ||
+ path_traits::compare (e, "dll") == 0 ||
+ path_traits::compare (e, "pdb") == 0 || // .{exe,dll}.pdb
+ (path_traits::compare (e, "dlls") == 0 && // .exe.dlls assembly
+ path_traits::compare (n.base ().extension (), "exe") == 0))
+ return true;
+ }
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry")
+ << ' ' << de.base () / n;
+
+ return true;
+ });
+
try
{
- path_search (path (move (p)), process, *sp);
+ path_search (path (move (p)),
+ process,
+ *sp,
+ path_match_flags::follow_symlinks,
+ dangling);
}
catch (const system_error& e)
{
@@ -5820,6 +7478,7 @@ namespace build2
if ((n.pair & 0x02) != 0)
{
e = move (n.type);
+ n.type.clear ();
// Remove non-empty extension from the name (it got to be there, see
// above).
@@ -6097,9 +7756,35 @@ namespace build2
bool concat_quoted_first (false);
name concat_data;
- auto concat_typed = [this, &vnull, &vtype,
- &concat, &concat_data] (value&& rhs,
- const location& loc)
+ auto concat_diag_multiple = [this] (const location& loc,
+ const char* what_expansion)
+ {
+ diag_record dr (fail (loc));
+
+ dr << "concatenating " << what_expansion << " contains multiple values";
+
+ // See if this looks like a subscript without an evaluation context and
+ // help the user out.
+ //
+ if (mode () != lexer_mode::eval)
+ {
+ const token& t (peeked ()); // Should be peeked at.
+
+ if (t.type == type::word &&
+ t.qtype == quote_type::unquoted &&
+ t.value[0] == '[')
+ {
+ dr << info << "wrap it in (...) evaluation context if this "
+ << "is value subscript";
+ }
+ }
+ };
+
+ auto concat_typed = [this, what, &vnull, &vtype,
+ &concat, &concat_data,
+ &concat_diag_multiple] (value&& rhs,
+ const location& loc,
+ const char* what_expansion)
{
// If we have no LHS yet, then simply copy value/type.
//
@@ -6116,6 +7801,10 @@ namespace build2
// RHS.
//
+ // Note that if RHS contains multiple values then we expect the result
+ // to be a single value somehow or, more likely, there to be no
+ // suitable $builtin.concat() overload.
+ //
a.push_back (move (rhs));
const char* l ((a[0].type != nullptr ? a[0].type->name : "<untyped>"));
@@ -6132,7 +7821,10 @@ namespace build2
dr << info << "use quoting to force untyped concatenation";
});
- p = ctx.functions.try_call (
+ if (ctx == nullptr)
+ fail << "literal " << what << " expected";
+
+ p = ctx->functions.try_call (
scope_, "builtin.concat", vector_view<value> (a), loc);
}
@@ -6154,18 +7846,22 @@ namespace build2
if (!vnull)
{
if (vtype != nullptr)
- untypify (rhs);
+ untypify (rhs, true /* reduce */);
names& d (rhs.as<names> ());
- // If the value is empty, then untypify() will (typically; no pun
- // intended) represent it as an empty sequence of names rather than
- // a sequence of one empty name. This is usually what we need (see
- // simple_reverse() for details) but not in this case.
+ // If the value is empty, then we asked untypify() to reduce it to
+ // an empty sequence of names rather than a sequence of one empty
+ // name.
//
- if (!d.empty ())
+ if (size_t n = d.size ())
{
- assert (d.size () == 1); // Must be a single value.
+ if (n != 1)
+ {
+ assert (what_expansion != nullptr);
+ concat_diag_multiple (loc, what_expansion);
+ }
+
concat_data = move (d[0]);
}
}
@@ -6272,6 +7968,8 @@ namespace build2
// continue accumulating or inject. We inject if the next token is not a
// word, var expansion, or eval context or if it is separated.
//
+ optional<pair<const value_type*, name>> path_concat; // Backup.
+
if (concat && last_concat ())
{
// Concatenation does not affect the tokens we get, only what we do
@@ -6311,6 +8009,13 @@ namespace build2
// dir/{$str}
// file{$str}
//
+ // And yet another exception: if the type is path or dir_path and the
+ // pattern mode is not ignore, then we will inject to try our luck in
+ // interpreting the concatenation result as a path pattern. This makes
+ // sure patterns like `$src_base/*.txt` work, naturally. Failed that,
+ // we will handle this concatenation as we do for other types (via the
+ // path_concat backup).
+ //
// A concatenation cannot produce value/NULL.
//
@@ -6322,12 +8027,14 @@ namespace build2
bool e1 (tt == type::lcbrace && !peeked ().separated);
bool e2 (pp || dp != nullptr || tp != nullptr);
+ const value_type* pt (&value_traits<path>::value_type);
+ const value_type* dt (&value_traits<dir_path>::value_type);
+
if (e1 || e2)
{
- if (vtype == &value_traits<path>::value_type ||
- vtype == &value_traits<string>::value_type)
+ if (vtype == pt || vtype == &value_traits<string>::value_type)
; // Representation is already in concat_data.value.
- else if (vtype == &value_traits<dir_path>::value_type)
+ else if (vtype == dt)
concat_data.value = move (concat_data.dir).representation ();
else
{
@@ -6342,6 +8049,20 @@ namespace build2
vtype = nullptr;
// Fall through to injection.
}
+ else if (pmode != pattern_mode::ignore &&
+ (vtype == pt || vtype == dt))
+ {
+ path_concat = make_pair (vtype, concat_data);
+
+ // Note: for path the representation is already in
+ // concat_data.value.
+ //
+ if (vtype == dt)
+ concat_data.value = move (concat_data.dir).representation ();
+
+ vtype = nullptr;
+ // Fall through to injection.
+ }
else
{
// This is either a simple name (untyped concatenation; in which
@@ -6435,7 +8156,7 @@ namespace build2
//
names ns;
ns.push_back (name (move (val)));
- concat_typed (value (move (ns)), get_location (t));
+ concat_typed (value (move (ns)), get_location (t), nullptr);
}
else
{
@@ -6537,6 +8258,8 @@ namespace build2
if (ttp == nullptr)
ppat = pinc = false;
+ else if (ttp->factory == nullptr)
+ fail (loc) << "abstract target type " << ttp->name << "{}";
}
}
@@ -6602,7 +8325,7 @@ namespace build2
// See if this is a pattern, path or regex.
//
// A path pattern either contains an unquoted wildcard character or,
- // in the curly context, start with unquoted/unescaped `+`.
+ // in the curly context, starts with unquoted/unescaped `+`.
//
// A regex pattern starts with unquoted/unescaped `~` followed by a
// non-alphanumeric delimiter and has the following form:
@@ -6680,7 +8403,7 @@ namespace build2
// Note that we have to check for regex patterns first since
// they may also be detected as path patterns.
//
- if (!quoted_first && regex_pattern ())
+ if (!quoted_first && !path_concat && regex_pattern ())
{
// Note: we may decide to support regex-based name generation
// some day (though a substitution won't make sense here).
@@ -6698,6 +8421,9 @@ namespace build2
? scope_->find_target_type (*tp)
: nullptr);
+ if (ttp != nullptr && ttp->factory == nullptr)
+ fail (loc) << "abstract target type " << ttp->name << "{}";
+
if (tp == nullptr || ttp != nullptr)
{
if (pmode == pattern_mode::detect)
@@ -6748,7 +8474,7 @@ namespace build2
// there isn't any good reason to; see also to_stream(name) for
// the corresponding serialization logic).
//
- if (!quoted_first && regex_pattern ())
+ if (!quoted_first && !path_concat && regex_pattern ())
{
const char* w;
if (val[0] == '~')
@@ -6806,6 +8532,24 @@ namespace build2
}
}
+ // If this is a concatenation of the path or dir_path type and it is
+ // not a pattern, then handle it in the same way as concatenations of
+ // other types (see above).
+ //
+ if (path_concat && !pat)
+ {
+ ns.push_back (move (path_concat->second));
+
+ // Restore the type information if that's the only name.
+ //
+ if (start == ns.size () && last_token ())
+ vtype = path_concat->first;
+
+ // Restart the loop.
+ //
+ continue;
+ }
+
// If we are a second half of a pair, add another first half
// unless this is the first instance.
//
@@ -6860,6 +8604,9 @@ namespace build2
//
if (tt == type::dollar || tt == type::lparen)
{
+ if (ctx == nullptr)
+ fail << "literal " << what << " expected";
+
// These cases are pretty similar in that in both we quickly end up
// with a list of names that we need to splice into the result.
//
@@ -6881,11 +8628,15 @@ namespace build2
// token is a paren or a word, we turn it on and switch to the eval
// mode if what we get next is a paren.
//
- // Also sniff out the special variables string from mode data for
- // the ad hoc $() handling below.
- //
mode (lexer_mode::variable);
+ // Sniff out the special variables string from mode data and use
+ // that to recognize special variables in the ad hoc $() handling
+ // below.
+ //
+ // Note: must be done before calling next() which may expire the
+ // mode.
+ //
auto special = [s = reinterpret_cast<const char*> (mode_data ())]
(const token& t) -> char
{
@@ -6924,156 +8675,202 @@ namespace build2
next (t, tt);
loc = get_location (t);
- name qual;
- string name;
-
- if (t.separated)
- ; // Leave the name empty to fail below.
- else if (tt == type::word)
+ if (tt == type::escape)
{
- name = move (t.value);
+ // For now we only support all the simple C/C++ escape sequences
+ // plus \0 (which in C/C++ is an octal escape sequence). See the
+ // lexer part for details.
+ //
+ // Note: cannot be subscripted.
+ //
+ if (!pre_parse_)
+ {
+ string s;
+ switch (char c = t.value[0])
+ {
+ case '\'':
+ case '"':
+ case '?':
+ case '\\': s = c; break;
+ case '0': s = '\0'; break;
+ case 'a': s = '\a'; break;
+ case 'b': s = '\b'; break;
+ case 'f': s = '\f'; break;
+ case 'n': s = '\n'; break;
+ case 'r': s = '\r'; break;
+ case 't': s = '\t'; break;
+ case 'v': s = '\v'; break;
+ default:
+ assert (false);
+ }
+
+ result_data = name (move (s));
+ what = "escape sequence expansion";
+ }
+
+ tt = peek ();
}
- else if (tt == type::lparen)
+ else
{
- expire_mode ();
- mode (lexer_mode::eval, '@');
- next_with_attributes (t, tt);
+ names qual;
+ string name;
- // Handle the $(x) case ad hoc. We do it this way in order to get
- // the variable name even during pre-parse. It should also be
- // faster.
- //
- char c;
- if ((tt == type::word
- ? path_traits::rfind_separator (t.value) == string::npos
- : (c = special (t))) &&
- peek () == type::rparen)
+ if (t.separated)
+ ; // Leave the name empty to fail below.
+ else if (tt == type::word)
{
- name = (tt == type::word ? move (t.value) : string (1, c));
- next (t, tt); // Get `)`.
+ name = move (t.value);
}
- else
+ else if (tt == type::lparen)
{
- using name_type = build2::name;
+ expire_mode ();
+ mode (lexer_mode::eval, '@');
+ next_with_attributes (t, tt);
- //@@ OUT will parse @-pair and do well?
+ // Handle the $(x) case ad hoc. We do it this way in order to
+ // get the variable name even during pre-parse. It should also
+ // be faster.
//
- values vs (parse_eval (t, tt, pmode));
-
- if (!pre_parse_)
+ char c ('\0');
+ if ((tt == type::word
+ ? path_traits::rfind_separator (t.value) == string::npos
+ : (c = special (t))) &&
+ peek () == type::rparen)
+ {
+ name = (tt == type::word ? move (t.value) : string (1, c));
+ next (t, tt); // Get `)`.
+ }
+ else
{
- if (vs.size () != 1)
- fail (loc) << "expected single variable/function name";
+ using name_type = build2::name;
- value& v (vs[0]);
+ values vs (parse_eval (t, tt, pmode));
- if (!v)
- fail (loc) << "null variable/function name";
+ if (!pre_parse_)
+ {
+ if (vs.size () != 1)
+ fail (loc) << "expected single variable/function name";
- names storage;
- vector_view<name_type> ns (reverse (v, storage)); // Movable.
- size_t n (ns.size ());
+ value& v (vs[0]);
- // We cannot handle scope-qualification in the eval context as
- // we do for target-qualification (see eval-qual) since then
- // we would be treating all paths as qualified variables. So
- // we have to do it here.
- //
- if (n == 2 && ns[0].pair == ':') // $(foo: x)
- {
- qual = move (ns[0]);
+ if (!v)
+ fail (loc) << "null variable/function name";
- if (qual.empty ())
- fail (loc) << "empty variable/function qualification";
- }
- else if (n == 2 && ns[0].directory ()) // $(foo/ x)
- {
- qual = move (ns[0]);
- qual.pair = '/';
- }
- else if (n > 1)
- fail (loc) << "expected variable/function name instead of '"
- << ns << "'";
+ names storage;
+ vector_view<name_type> ns (
+ reverse (v, storage, true /* reduce */)); // Movable.
+ size_t n (ns.size ());
- // Note: checked for empty below.
- //
- if (!ns[n - 1].simple ())
- fail (loc) << "expected variable/function name instead of '"
- << ns[n - 1] << "'";
+ // We cannot handle scope-qualification in the eval context
+ // as we do for target-qualification (see eval-qual) since
+ // then we would be treating all paths as qualified
+ // variables. So we have to do it here.
+ //
+ if (n >= 2 && ns[0].pair == ':') // $(foo: x)
+ {
+ // Note: name is first (see eval for details).
+ //
+ qual.push_back (move (ns[1]));
- size_t p;
- if (n == 1 && // $(foo/x)
- (p = path_traits::rfind_separator (ns[0].value)) !=
- string::npos)
- {
- // Note that p cannot point to the last character since then
- // it would have been a directory, not a simple name.
+ if (qual.back ().empty ())
+ fail (loc) << "empty variable/function qualification";
+
+ if (n > 2)
+ qual.push_back (move (ns[2]));
+
+ // Move name to the last position (see below).
+ //
+ swap (ns[0], ns[n - 1]);
+ }
+ else if (n == 2 && ns[0].directory ()) // $(foo/ x)
+ {
+ qual.push_back (move (ns[0]));
+ qual.back ().pair = '/';
+ }
+ else if (n > 1)
+ fail (loc) << "expected variable/function name instead of '"
+ << ns << "'";
+
+ // Note: checked for empty below.
//
- string& s (ns[0].value);
+ if (!ns[n - 1].simple ())
+ fail (loc) << "expected variable/function name instead of '"
+ << ns[n - 1] << "'";
+
+ size_t p;
+ if (n == 1 && // $(foo/x)
+ (p = path_traits::rfind_separator (ns[0].value)) !=
+ string::npos)
+ {
+ // Note that p cannot point to the last character since
+ // then it would have been a directory, not a simple name.
+ //
+ string& s (ns[0].value);
- name = string (s, p + 1);
- s.resize (p + 1);
- qual = name_type (dir_path (move (s)));
- qual.pair = '/';
+ name = string (s, p + 1);
+ s.resize (p + 1);
+ qual.push_back (name_type (dir_path (move (s))));
+ qual.back ().pair = '/';
+ }
+ else
+ name = move (ns[n - 1].value);
}
- else
- name = move (ns[n - 1].value);
}
}
- }
- else
- fail (t) << "expected variable/function name instead of " << t;
-
- if (!pre_parse_ && name.empty ())
- fail (loc) << "empty variable/function name";
-
- // Figure out whether this is a variable expansion with potential
- // subscript or a function call.
- //
- if (sub) enable_subscript ();
- tt = peek ();
+ else
+ fail (t) << "expected variable/function name instead of " << t;
- // Note that we require function call opening paren to be
- // unseparated; consider: $x ($x == 'foo' ? 'FOO' : 'BAR').
- //
- if (tt == type::lparen && !peeked ().separated)
- {
- // Function call.
- //
- next (t, tt); // Get '('.
- mode (lexer_mode::eval, '@');
- next_with_attributes (t, tt);
+ if (!pre_parse_ && name.empty ())
+ fail (loc) << "empty variable/function name";
- // @@ Should we use (target/scope) qualification (of name) as the
- // context in which to call the function? Hm, interesting...
+ // Figure out whether this is a variable expansion with potential
+ // subscript or a function call.
//
- values args (parse_eval (t, tt, pmode));
-
if (sub) enable_subscript ();
tt = peek ();
- // Note that we "move" args to call().
+ // Note that we require function call opening paren to be
+ // unseparated; consider: $x ($x == 'foo' ? 'FOO' : 'BAR').
//
- if (!pre_parse_)
+ if (tt == type::lparen && !peeked ().separated)
{
- result_data = ctx.functions.call (scope_, name, args, loc);
- what = "function call";
+ // Function call.
+ //
+ next (t, tt); // Get '('.
+ mode (lexer_mode::eval, '@');
+ next_with_attributes (t, tt);
+
+ // @@ Should we use (target/scope) qualification (of name) as
+ // the context in which to call the function? Hm, interesting...
+ //
+ values args (parse_eval (t, tt, pmode));
+
+ if (sub) enable_subscript ();
+ tt = peek ();
+
+ // Note that we "move" args to call().
+ //
+ if (!pre_parse_)
+ {
+ result_data = ctx->functions.call (scope_, name, args, loc);
+ what = "function call";
+ }
+ else
+ lookup_function (move (name), loc);
}
else
- lookup_function (move (name), loc);
- }
- else
- {
- // Variable expansion.
- //
- lookup l (lookup_variable (move (qual), move (name), loc));
-
- if (!pre_parse_)
{
- if (l.defined ())
- result = l.value; // Otherwise leave as NULL result_data.
+ // Variable expansion.
+ //
+ lookup l (lookup_variable (move (qual), move (name), loc));
+
+ if (!pre_parse_)
+ {
+ if (l.defined ())
+ result = l.value; // Otherwise leave as NULL result_data.
- what = "variable expansion";
+ what = "variable expansion";
+ }
}
}
}
@@ -7105,85 +8902,132 @@ namespace build2
// Handle value subscript.
//
- if (tt == type::lsbrace)
+ if (mode () == lexer_mode::eval) // Note: not if(sub)!
{
- location bl (get_location (t));
- next (t, tt); // `[`
- mode (lexer_mode::subscript, '\0' /* pair */);
- next (t, tt);
-
- location l (get_location (t));
- value v (
- tt != type::rsbrace
- ? parse_value (t, tt, pattern_mode::ignore, "value subscript")
- : value (names ()));
-
- if (tt != type::rsbrace)
+ while (tt == type::lsbrace)
{
- // Note: wildcard pattern should have `]` as well so no escaping
- // suggestion.
- //
- fail (t) << "expected ']' instead of " << t;
- }
+ location bl (get_location (t));
+ next (t, tt); // `[`
+ mode (lexer_mode::subscript, '\0' /* pair */);
+ next (t, tt);
- if (!pre_parse_)
- {
- uint64_t j;
- try
- {
- j = convert<uint64_t> (move (v));
- }
- catch (const invalid_argument& e)
+ location l (get_location (t));
+ value v (
+ tt != type::rsbrace
+ ? parse_value (t, tt, pattern_mode::ignore, "value subscript")
+ : value (names ()));
+
+ if (tt != type::rsbrace)
{
- fail (l) << "invalid value subscript: " << e <<
- info (bl) << "use the '\\[' escape sequence if this is a "
- << "wildcard pattern" << endf;
+ // Note: wildcard pattern should have `]` as well so no escaping
+ // suggestion.
+ //
+ fail (t) << "expected ']' instead of " << t;
}
- // Similar to expanding an undefined variable, we return NULL if
- // the index is out of bounds.
- //
- // Note that result may or may not point to result_data.
- //
- if (result->null)
- result_data = value ();
- else if (result->type == nullptr)
+ if (!pre_parse_)
{
- const names& ns (result->as<names> ());
-
- // Pair-aware subscript.
+ // For type-specific subscript implementations we pass the
+ // subscript value as is.
//
- names r;
- for (auto i (ns.begin ()); i != ns.end (); ++i, --j)
+ if (auto f = (result->type != nullptr
+ ? result->type->subscript
+ : nullptr))
{
- if (j == 0)
+ result_data = f (*result, &result_data, move (v), l, bl);
+ }
+ else
+ {
+ uint64_t j;
+ try
{
- r.push_back (*i);
- if (i->pair)
- r.push_back (*++i);
- break;
+ j = convert<uint64_t> (move (v));
}
+ catch (const invalid_argument& e)
+ {
+ fail (l) << "invalid value subscript: " << e <<
+ info (bl) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern" << endf;
+ }
+
+ // Similar to expanding an undefined variable, we return NULL
+ // if the index is out of bounds.
+ //
+ // Note that result may or may not point to result_data.
+ //
+ if (result->null)
+ result_data = value ();
+ else if (result->type == nullptr)
+ {
+ const names& ns (result->as<names> ());
+
+ // Pair-aware subscript.
+ //
+ names r;
+ for (auto i (ns.begin ()); i != ns.end (); ++i, --j)
+ {
+ if (j == 0)
+ {
+ r.push_back (*i);
+ if (i->pair)
+ r.push_back (*++i);
+ break;
+ }
- if (i->pair)
- ++i;
+ if (i->pair)
+ ++i;
+ }
+
+ result_data = r.empty () ? value () : value (move (r));
+ }
+ else
+ {
+ // Similar logic to parse_for().
+ //
+ const value_type* etype (result->type->element_type);
+
+ value val (result == &result_data
+ ? value (move (result_data))
+ : value (*result));
+
+ untypify (val, false /* reduce */);
+
+ names& ns (val.as<names> ());
+
+ // Pair-aware subscript.
+ //
+ names r;
+ for (auto i (ns.begin ()); i != ns.end (); ++i, --j)
+ {
+ bool p (i->pair);
+
+ if (j == 0)
+ {
+ r.push_back (move (*i));
+ if (p)
+ r.push_back (move (*++i));
+ break;
+ }
+
+ if (p)
+ ++i;
+ }
+
+ result_data = r.empty () ? value () : value (move (r));
+
+ if (etype != nullptr)
+ typify (result_data, *etype, nullptr /* var */);
+ }
}
- result_data = r.empty () ? value () : value (move (r));
- }
- else
- {
- // @@ TODO: we would want to return a value with element type.
- //
- //result_data = ...
- fail (l) << "typed value subscript not yet supported" <<
- info (bl) << "use the '\\[' escape sequence if this is a "
- << "wildcard pattern";
+ result = &result_data;
}
- result = &result_data;
+ // See if we have chained subscript.
+ //
+ enable_subscript ();
+ tt = peek ();
}
-
- tt = peek ();
}
if (pre_parse_)
@@ -7227,7 +9071,8 @@ namespace build2
// then it should not be overloaded for a type). In a quoted
// context we use $string() which returns a "canonical
// representation" (e.g., a directory path without a trailing
- // slash).
+ // slash). Note: looks like we use typed $concat() now in the
+ // unquoted context.
//
if (result->type != nullptr && quoted)
{
@@ -7250,7 +9095,10 @@ namespace build2
dr << info (loc) << "while converting " << t << " to string";
});
- p = ctx.functions.try_call (
+ if (ctx == nullptr)
+ fail << "literal " << what << " expected";
+
+ p = ctx->functions.try_call (
scope_, "string", vector_view<value> (&result_data, 1), loc);
}
@@ -7258,7 +9106,11 @@ namespace build2
fail (loc) << "no string conversion for " << t;
result_data = move (p.first);
- untypify (result_data); // Convert to untyped simple name.
+
+ // Convert to untyped simple name reducing empty string to empty
+ // names as an optimization.
+ //
+ untypify (result_data, true /* reduce */);
}
if ((concat && vtype != nullptr) || // LHS typed.
@@ -7267,13 +9119,13 @@ namespace build2
if (result != &result_data) // Same reason as above.
result = &(result_data = *result);
- concat_typed (move (result_data), loc);
+ concat_typed (move (result_data), loc, what);
}
//
// Untyped concatenation. Note that if RHS is NULL/empty, we still
// set the concat flag.
//
- else if (!result->null && !result->empty ())
+ else if (!result->null)
{
// This can only be an untyped value.
//
@@ -7281,34 +9133,36 @@ namespace build2
//
const names& lv (cast<names> (*result));
- // This should be a simple value or a simple directory.
- //
- if (lv.size () > 1)
- fail (loc) << "concatenating " << what << " contains multiple "
- << "values";
+ if (size_t s = lv.size ())
+ {
+ // This should be a simple value or a simple directory.
+ //
+ if (s > 1)
+ concat_diag_multiple (loc, what);
- const name& n (lv[0]);
+ const name& n (lv[0]);
- if (n.qualified ())
- fail (loc) << "concatenating " << what << " contains project "
- << "name";
+ if (n.qualified ())
+ fail (loc) << "concatenating " << what << " contains project "
+ << "name";
- if (n.typed ())
- fail (loc) << "concatenating " << what << " contains type";
+ if (n.typed ())
+ fail (loc) << "concatenating " << what << " contains target type";
- if (!n.dir.empty ())
- {
- if (!n.value.empty ())
- fail (loc) << "concatenating " << what << " contains "
- << "directory";
+ if (!n.dir.empty ())
+ {
+ if (!n.value.empty ())
+ fail (loc) << "concatenating " << what << " contains "
+ << "directory";
- // Note that here we cannot assume what's in dir is really a
- // path (think s/foo/bar/) so we have to reverse it exactly.
- //
- concat_data.value += n.dir.representation ();
+ // Note that here we cannot assume what's in dir is really a
+ // path (think s/foo/bar/) so we have to reverse it exactly.
+ //
+ concat_data.value += n.dir.representation ();
+ }
+ else
+ concat_data.value += n.value;
}
- else
- concat_data.value += n.value;
}
// The same little hack as in the word case ($empty+foo).
@@ -7334,16 +9188,27 @@ namespace build2
// Nothing else to do here if the result is NULL or empty.
//
- if (result->null || result->empty ())
- continue;
-
- // @@ Could move if nv is result_data; see untypify().
+ // Note that we cannot use value::empty() here since we are
+ // interested in representationally empty.
//
- names nv_storage;
- names_view nv (reverse (*result, nv_storage));
+ if (!result->null)
+ {
+ // @@ Could move if nv is result_data; see untypify().
+ //
+ // Nuance: we should only be reducing empty simple value to empty
+ // list if we are not a second half of a pair.
+ //
+ bool pair (!ns.empty () && ns.back ().pair);
+
+ names nv_storage;
+ names_view nv (reverse (*result, nv_storage, !pair /* reduce */));
- count = splice_names (
- loc, nv, move (nv_storage), ns, what, pairn, pp, dp, tp);
+ if (!nv.empty ())
+ {
+ count = splice_names (
+ loc, nv, move (nv_storage), ns, what, pairn, pp, dp, tp);
+ }
+ }
}
continue;
@@ -7578,14 +9443,16 @@ namespace build2
buildspec parser::
parse_buildspec (istream& is, const path_name& in)
{
- // We do "effective escaping" and only for ['"\$(] (basically what's
- // necessary inside a double-quoted literal plus the single quote).
+ // We do "effective escaping" of the special `'"\$(` characters (basically
+ // what's escapable inside a double-quoted literal plus the single quote;
+ // note, however, that we exclude line continuations and `)` since they
+ // would make directory paths on Windows unusable).
//
path_ = &in;
lexer l (is, *path_, 1 /* line */, "\'\"\\$(");
lexer_ = &l;
- root_ = &ctx.global_scope.rw ();
+ root_ = &ctx->global_scope.rw ();
scope_ = root_;
target_ = nullptr;
prerequisite_ = nullptr;
@@ -7820,8 +9687,11 @@ namespace build2
}
lookup parser::
- lookup_variable (name&& qual, string&& name, const location& loc)
+ lookup_variable (names&& qual, string&& name, const location& loc)
{
+ // Note that this function can be called during execute (for example, from
+ // scripts). In particular, this means we cannot use enter_{scope,target}.
+
if (pre_parse_)
return lookup ();
@@ -7833,9 +9703,6 @@ namespace build2
// If we are qualified, it can be a scope or a target.
//
- enter_scope sg;
- enter_target tg;
-
if (qual.empty ())
{
s = scope_;
@@ -7844,36 +9711,70 @@ namespace build2
}
else
{
- switch (qual.pair)
+ // What should we do if we cannot find the qualification (scope or
+ // target)? We can "fall through" to an outer scope (there is always the
+ // global scope backstop), we can return NULL straight away, or we can
+ // fail. It feels like in most cases unknown scope or target is a
+ // mistake and doing anything other than failing is just making things
+ // harder to debug.
+ //
+ switch (qual.front ().pair)
{
case '/':
{
- assert (qual.directory ());
- sg = enter_scope (*this, move (qual.dir));
- s = scope_;
+ assert (qual.front ().directory ());
+
+ dir_path& d (qual.front ().dir);
+ enter_scope::complete_normalize (*scope_, d);
+
+ s = &ctx->scopes.find_out (d);
+
+ if (s->out_path () != d)
+ fail (loc) << "unknown scope " << d << " in scope-qualified "
+ << "variable " << name << " expansion" <<
+ info << "did you forget to include the corresponding buildfile?";
+
break;
}
- case ':':
+ default:
{
- qual.pair = '\0';
+ build2::name n (move (qual.front ())), o;
+
+ if (n.pair)
+ o = move (qual.back ());
- // @@ OUT TODO
+ t = enter_target::find_target (*this, n, o, loc, trace);
+
+ if (t == nullptr || !operator>= (t->decl, target_decl::implied)) // VC14
+ {
+ diag_record dr (fail (loc));
+
+ dr << "unknown target " << n;
+
+ if (n.pair && !o.dir.empty ())
+ dr << '@' << o.dir;
+
+ dr << " in target-qualified variable " << name << " expansion";
+ }
+
+ // Use the target's var_pool for good measure.
//
- tg = enter_target (
- *this, move (qual), build2::name (), true, loc, trace);
- t = target_;
+ s = &t->base_scope ();
+
break;
}
- default: assert (false);
}
}
// Lookup.
//
- if (const variable* pvar = scope_->var_pool ().find (name))
+ if (const variable* pvar =
+ (s != nullptr ? s : scope_)->var_pool ().find (name))
{
auto& var (*pvar);
+ // Note: the order of the following blocks is important.
+
if (p != nullptr)
{
// The lookup depth is a bit of a hack but should be harmless since
@@ -7960,62 +9861,213 @@ namespace build2
return r;
}
+ // file.cxx
+ //
+ extern const dir_path std_export_dir;
+ extern const dir_path alt_export_dir;
+
void parser::
- process_default_target (token& t)
+ process_default_target (token& t, const buildfile* bf)
{
tracer trace ("parser::process_default_target", &path_);
// The logic is as follows: if we have an explicit current directory
- // target, then that's the default target. Otherwise, we take the
- // first target and use it as a prerequisite to create an implicit
- // current directory target, effectively making it the default
- // target via an alias. If there are no targets in this buildfile,
- // then we don't do anything.
+ // target, then that's the default target. Otherwise, we take the first
+ // target and use it as a prerequisite to create an implicit current
+ // directory target, effectively making it the default target via an
+ // alias. If this is a project root buildfile, then also add exported
+ // buildfiles. And if there are no targets in this buildfile, then we
+ // don't do anything (reasonably assuming it's not root).
//
if (default_target_ == nullptr) // No targets in this buildfile.
return;
- target& dt (*default_target_);
-
target* ct (
- const_cast<target*> ( // Ok (serial execution).
- ctx.targets.find (dir::static_type, // Explicit current dir target.
- scope_->out_path (),
- dir_path (), // Out tree target.
- string (),
- nullopt,
- trace)));
-
- if (ct == nullptr)
- {
- l5 ([&]{trace (t) << "creating current directory alias for " << dt;});
-
- // While this target is not explicitly mentioned in the buildfile, we
- // say that we behave as if it were. Thus not implied.
- //
- ct = &ctx.targets.insert (dir::static_type,
- scope_->out_path (),
- dir_path (),
- string (),
- nullopt,
- target_decl::real,
- trace).first;
- // Fall through.
- }
- else if (ct->decl != target_decl::real)
+ const_cast<target*> ( // Ok (serial execution).
+ ctx->targets.find (dir::static_type, // Explicit current dir target.
+ scope_->out_path (),
+ dir_path (), // Out tree target.
+ string (),
+ nullopt,
+ trace)));
+
+ if (ct != nullptr && ct->decl == target_decl::real)
+ ; // Existing and not implied.
+ else
{
- ct->decl = target_decl::real;
- // Fall through.
+ target& dt (*default_target_);
+
+ if (ct == nullptr)
+ {
+ l5 ([&]{trace (t) << "creating current directory alias for " << dt;});
+
+ // While this target is not explicitly mentioned in the buildfile, we
+ // say that we behave as if it were. Thus not implied.
+ //
+ ct = &ctx->targets.insert (dir::static_type,
+ scope_->out_path (),
+ dir_path (),
+ string (),
+ nullopt,
+ target_decl::real,
+ trace).first;
+ }
+ else
+ ct->decl = target_decl::real;
+
+ ct->prerequisites_state_.store (2, memory_order_relaxed);
+ ct->prerequisites_.push_back (prerequisite (dt));
}
- else
- return; // Existing and not implied.
- ct->prerequisites_state_.store (2, memory_order_relaxed);
- ct->prerequisites_.emplace_back (prerequisite (dt));
+ // See if this is a root buildfile and not in a simple project.
+ //
+ if (bf != nullptr &&
+ root_ != nullptr &&
+ root_->root_extra != nullptr &&
+ root_->root_extra->loaded &&
+ *root_->root_extra->project != nullptr &&
+ bf->dir == root_->src_path () &&
+ bf->name == root_->root_extra->buildfile_file.string ())
+ {
+ // See if we have any exported buildfiles.
+ //
+ const dir_path& export_dir (
+ root_->root_extra->altn ? alt_export_dir : std_export_dir);
+
+ dir_path d (root_->src_path () / export_dir);
+ if (exists (d))
+ {
+ // Make sure prerequisites are set.
+ //
+ ct->prerequisites_state_.store (2, memory_order_relaxed);
+
+ const string& build_ext (root_->root_extra->build_ext);
+
+ // Return true if entered any exported buildfiles.
+ //
+ // Note: recursive lambda.
+ //
+ auto iterate = [this, &trace,
+ ct, &build_ext] (const dir_path& d,
+ const auto& iterate) -> bool
+ {
+ bool r (false);
+
+ try
+ {
+ for (const dir_entry& e:
+ dir_iterator (d, dir_iterator::detect_dangling))
+ {
+ switch (e.type ())
+ {
+ case entry_type::directory:
+ {
+ r = iterate (d / path_cast<dir_path> (e.path ()), iterate) || r;
+ break;
+ }
+ case entry_type::regular:
+ {
+ const path& n (e.path ());
+
+ // Besides the buildfile also export buildscript and C++ files
+ // that are used to provide recipe implementations (see
+ // parse_recipe() for details).
+ //
+ string e (n.extension ());
+ if (const target_type* tt = (
+ e == build_ext ? &buildfile::static_type :
+ e == "buildscript" ? &buildscript::static_type :
+ e == "cxx" ||
+ e == "cpp" ||
+ e == "cc" ? &file::static_type : nullptr))
+ {
+ // Enter as if found by search_existing_file(). Note that
+ // entering it as real would cause file_rule not to match
+ // for clean.
+ //
+ // Note that these targets may already be entered (for
+ // example, if already imported).
+ //
+ const target& bf (
+ ctx->targets.insert (*tt,
+ d,
+ (root_->out_eq_src ()
+ ? dir_path ()
+ : out_src (d, *root_)),
+ n.base ().string (),
+ move (e),
+ target_decl::prereq_file,
+ trace).first);
+
+ ct->prerequisites_.push_back (prerequisite (bf));
+ r = true;
+ }
+
+ break;
+ }
+ case entry_type::unknown:
+ {
+ bool sl (e.ltype () == entry_type::symlink);
+
+ fail << (sl ? "dangling symlink" : "inaccessible entry")
+ << ' ' << d / e.path ();
+
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+ catch (const system_error& e)
+ {
+ fail << "unable to iterate over " << d << ": " << e;
+ }
+
+ return r;
+ };
+
+ if (iterate (d, iterate))
+ {
+ // Arrange for the exported buildfiles to be installed, recreating
+ // subdirectories inside export/. Essentially, we are arranging for
+ // this:
+ //
+ // build/export/file{*}:
+ // {
+ // install = buildfile/
+ // install.subdirs = true
+ // }
+ //
+ if (cast_false<bool> (root_->vars["install.loaded"]))
+ {
+ enter_scope es (*this, dir_path (export_dir));
+ auto& vars (scope_->target_vars[file::static_type]["*"]);
+
+ // @@ TODO: get cached variables from the module once we have one.
+ //
+ {
+ auto r (vars.insert (*root_->var_pool ().find ("install")));
+
+ if (r.second) // Already set by the user?
+ r.first = path_cast<path> (dir_path ("buildfile"));
+ }
+
+ {
+ auto r (vars.insert (
+ *root_->var_pool (true).find ("install.subdirs")));
+ if (r.second)
+ r.first = true;
+ }
+ }
+ }
+ }
+ }
}
- void parser::
- enter_buildfile (const path& p)
+ template <typename T>
+ const T& parser::
+ enter_buildfile (const path& p, optional<dir_path> out)
{
tracer trace ("parser::enter_buildfile", &path_);
@@ -8023,17 +10075,20 @@ namespace build2
// Figure out if we need out.
//
- dir_path out;
- if (scope_->src_path_ != nullptr &&
- scope_->src_path () != scope_->out_path () &&
- d.sub (scope_->src_path ()))
+ dir_path o;
+ if (out)
+ o = move (*out);
+ else if (root_ != nullptr &&
+ root_->src_path_ != nullptr &&
+ !root_->out_eq_src () &&
+ d.sub (*root_->src_path_))
{
- out = out_src (d, *root_);
+ o = out_src (d, *root_);
}
- ctx.targets.insert<buildfile> (
+ return ctx->targets.insert<T> (
move (d),
- move (out),
+ move (o),
p.leaf ().base ().string (),
p.extension (), // Always specified.
trace);
diff --git a/libbuild2/parser.hxx b/libbuild2/parser.hxx
index a6060d5..3e1d0a0 100644
--- a/libbuild2/parser.hxx
+++ b/libbuild2/parser.hxx
@@ -48,7 +48,7 @@ namespace build2
explicit
parser (context& c, stage s = stage::rest)
: fail ("error", &path_), info ("info", &path_),
- ctx (c),
+ ctx (&c),
stage_ (s) {}
// Pattern expansion mode.
@@ -69,14 +69,20 @@ namespace build2
scope* root,
scope& base,
target* = nullptr,
- prerequisite* = nullptr);
+ prerequisite* = nullptr,
+ bool enter_buildfile = true);
void
parse_buildfile (lexer&,
scope* root,
scope& base,
target* = nullptr,
- prerequisite* = nullptr);
+ prerequisite* = nullptr,
+ bool enter_buildfile = true);
+
+ names
+ parse_export_stub (istream& is, const path_name& name,
+ const scope& rs, scope& gs, scope& ts);
buildspec
parse_buildspec (istream&, const path_name&);
@@ -87,14 +93,6 @@ namespace build2
pair<value, token>
parse_variable_value (lexer&, scope&, const dir_path*, const variable&);
- names
- parse_export_stub (istream& is, const path_name& name,
- scope& rs, scope& bs)
- {
- parse_buildfile (is, name, &rs, bs);
- return move (export_value);
- }
-
// Parse an evaluation context (`(...)`).
//
value
@@ -106,6 +104,25 @@ namespace build2
void
reset ();
+ // Special, context-less mode that can only be used to parse literal
+ // names.
+ //
+ public:
+ static const string name_separators;
+
+ explicit
+ parser (context* c)
+ : fail ("error", &path_), info ("info", &path_),
+ ctx (c),
+ stage_ (stage::rest) {}
+
+ names
+ parse_names (lexer&,
+ const dir_path* base,
+ pattern_mode pmode,
+ const char* what = "name",
+ const string* separators = &name_separators);
+
// Ad hoc parsing results for some cases.
//
// Note that these are not touched by reset().
@@ -117,8 +134,29 @@ namespace build2
// config directive result.
//
- vector<pair<lookup, string>> config_report; // Config value and format.
- bool config_report_new = false; // One of values is new.
+ struct config_report
+ {
+ struct value
+ {
+ lookup val; // Value.
+ string fmt; // Format.
+ string org; // Original variable if config.report.variable.
+ };
+
+ project_name module; // Reporting module name.
+ vector<value> values;
+ bool new_value; // One of values is new.
+ };
+ small_vector<config_report, 1> config_reports;
+
+ // Misc utilities.
+ //
+ public:
+ // Return the value type corresponding to the type name or NULL if the
+ // type name is unknown. Pass project's root scope if known.
+ //
+ static const value_type*
+ find_value_type (const scope* rs, const string& name);
// Recursive descent parser.
//
@@ -152,22 +190,29 @@ namespace build2
const target_type* = nullptr,
const string& = {});
- // Ad hoc target names inside < ... >.
+ // Group target names inside < ... >.
//
- struct adhoc_names_loc
+ struct group_names_loc
{
+ bool expl = false; // True -- explicit group, fase -- ad hoc.
+ location group_loc; // Group/primary target location.
+ location member_loc; // Members location.
names ns;
- location loc;
};
- using adhoc_names = small_vector<adhoc_names_loc, 1>;
+ using group_names = small_vector<group_names_loc, 1>;
- void
- enter_adhoc_members (adhoc_names_loc&&, bool);
+ vector<reference_wrapper<target>>
+ enter_explicit_members (group_names_loc&&, bool);
+
+ vector<reference_wrapper<target>>
+ enter_adhoc_members (group_names_loc&&, bool);
- small_vector<reference_wrapper<target>, 1>
+ small_vector<pair<reference_wrapper<target>, // Target.
+ vector<reference_wrapper<target>>>, // Ad hoc members.
+ 1>
enter_targets (names&&, const location&,
- adhoc_names&&,
+ group_names&&,
size_t,
const attributes&);
@@ -177,7 +222,7 @@ namespace build2
void
parse_dependency (token&, token_type&,
names&&, const location&,
- adhoc_names&&,
+ group_names&&,
names&&, const location&,
const attributes&);
@@ -227,7 +272,9 @@ namespace build2
parse_if_else (token&, token_type&,
bool,
const function<void (
- token&, token_type&, bool, const string&)>&);
+ token&, token_type&, bool, const string&)>&,
+ const function<void (
+ token&, token_type&, const string&)>&);
void
parse_switch (token&, token_type&);
@@ -236,7 +283,9 @@ namespace build2
parse_switch (token&, token_type&,
bool,
const function<void (
- token&, token_type&, bool, const string&)>&);
+ token&, token_type&, bool, const string&)>&,
+ const function<void (
+ token&, token_type&, const string&)>&);
void
parse_for (token&, token_type&);
@@ -314,15 +363,25 @@ namespace build2
// Push a new entry into the attributes_ stack. If the next token is `[`
// then parse the attribute sequence until ']' storing the result in the
- // new stack entry. Then get the next token and, if standalone is false,
- // verify it is not newline/eos (i.e., there is something after it).
- // Return the indication of whether we have seen any attributes (note
- // that the `[]` empty list does not count) and the location of `[`.
+ // new stack entry. Then, if next_token is true, get the next token and,
+ // if standalone is false, verify it is not newline/eos (i.e., there is
+ // something after it). If the next token is read and it is a word or a
+ // "word-producing" token (`$` for variable expansions/function calls, `(`
+ // for eval contexts, and `{` for name generation), then verify that it is
+ // separated to reduce the possibility of confusing it with a wildcard
+ // pattern. Consider:
+ //
+ // ./: [abc]-foo.txt
+ //
+ // Return the indication of whether we have seen any attributes (note that
+ // the `[]` empty list does not count) and the location of `[`.
//
// Note that during pre-parsing nothing is pushed into the stack.
//
pair<bool, location>
- attributes_push (token&, token_type&, bool standalone = false);
+ attributes_push (token&, token_type&,
+ bool standalone = false,
+ bool next_token = true);
attributes
attributes_pop ()
@@ -336,15 +395,21 @@ namespace build2
attributes&
attributes_top () {return attributes_.back ();}
- // Source a stream optionnaly performing the default target processing.
- // If the specified path name has a real path, then also enter it as a
- // buildfile.
+ // Source a buildfile as a stream optionally performing the default target
+ // processing. If the specified path name has a real path, then also enter
+ // it as a buildfile.
+ //
+ // If default_target is nullopt, then disable the default target semantics
+ // as when loading boostrap.build or root.build. If it is false, then
+ // continue with the existing default_target value. If it is true, then
+ // start with a new default_value and call process_default_target() at
+ // the end.
//
void
- source (istream&,
- const path_name&,
- const location&,
- bool default_target);
+ source_buildfile (istream&,
+ const path_name&,
+ const location&,
+ optional<bool> default_target);
// The what argument is used in diagnostics (e.g., "expected <what>
// instead of ...".
@@ -354,9 +419,6 @@ namespace build2
// project separator. Note that even if it is NULL, the result may still
// contain non-simple names due to variable expansions.
//
-
- static const string name_separators;
-
names
parse_names (token& t, token_type& tt,
pattern_mode pmode,
@@ -379,14 +441,7 @@ namespace build2
const string* separators = &name_separators)
{
names ns;
- parse_names (t, tt,
- ns,
- pmode,
- chunk,
- what,
- separators,
- 0,
- nullopt, nullptr, nullptr);
+ parse_names (t, tt, ns, pmode, chunk, what, separators);
return ns;
}
@@ -408,14 +463,7 @@ namespace build2
bool chunk = false)
{
names ns;
- auto r (parse_names (t, tt,
- ns,
- pmode,
- chunk,
- what,
- separators,
- 0,
- nullopt, nullptr, nullptr));
+ auto r (parse_names (t, tt, ns, pmode, chunk, what, separators));
value v (r.type); // Potentially typed NULL value.
@@ -535,8 +583,12 @@ namespace build2
// Customization hooks.
//
protected:
- // If qual is not empty, then its pair member should indicate the kind
- // of qualification: ':' -- target, '/' -- scope.
+ // If qual is not empty, then first element's pair member indicates the
+ // kind of qualification:
+ //
+ // '\0' -- target
+ // '@' -- out-qualified target
+ // '/' -- scope
//
// Note that this function is called even during pre-parse with the result
// unused. In this case a valid name will only be provided for variables
@@ -544,8 +596,12 @@ namespace build2
// example, $($x ? X : Y)) it will be empty (along with qual, which can
// only be non-empty for a computed variable).
//
+ // Note also that this function is (currently) not called by some lookup-
+ // like functions ($defined(), $config.origin()). But we should be careful
+ // if/when extending this and audit all the existing use-cases.
+ //
virtual lookup
- lookup_variable (name&& qual, string&& name, const location&);
+ lookup_variable (names&& qual, string&& name, const location&);
// This function is only called during pre-parse and is the continuation
// of the similar logic in lookup_variable() above (including the fact
@@ -570,12 +626,15 @@ namespace build2
switch_scope (const dir_path& out_base);
void
- process_default_target (token&);
+ process_default_target (token&, const buildfile*);
- // Enter buildfile as a target.
+ private:
+ // Enter buildfile or buildfile-file like file (e.g., a recipe file) as a
+ // target.
//
- void
- enter_buildfile (const path&);
+ template <typename T>
+ const T&
+ enter_buildfile (const path&, optional<dir_path> out = nullopt);
// Lexer.
//
@@ -661,15 +720,24 @@ namespace build2
replay_data_[replay_i_].mode == m);
}
+ // In the replay mode return the lexing mode of the token returned by the
+ // subsequent next() or peek() call.
+ //
lexer_mode
mode () const
{
if (replay_ != replay::play)
+ {
return lexer_->mode ();
+ }
else
{
- assert (replay_i_ != replay_data_.size ());
- return replay_data_[replay_i_].mode;
+ assert (!peeked_ || replay_i_ != 0);
+
+ size_t i (!peeked_ ? replay_i_ : replay_i_ - 1);
+ assert (i != replay_data_.size ());
+
+ return replay_data_[i].mode;
}
}
@@ -724,6 +792,16 @@ namespace build2
}
void
+ replay_pop ()
+ {
+ assert (replay_ == replay::save);
+
+ assert (!peeked_ && !replay_data_.empty ());
+
+ replay_data_.pop_back ();
+ }
+
+ void
replay_play ()
{
assert ((replay_ == replay::save && !replay_data_.empty ()) ||
@@ -739,6 +817,16 @@ namespace build2
}
void
+ replay_skip ()
+ {
+ assert (replay_ == replay::play);
+
+ assert (!peeked_);
+
+ replay_i_ = replay_data_.size () - 1;
+ }
+
+ void
replay_stop (bool verify = true)
{
if (verify)
@@ -856,7 +944,7 @@ namespace build2
// NOTE: remember to update reset() if adding anything here.
//
protected:
- context& ctx;
+ context* ctx;
stage stage_;
bool pre_parse_ = false;
@@ -873,6 +961,13 @@ namespace build2
small_vector<attributes, 2> attributes_;
+ // Innermost if/switch (but excluding recipes).
+ //
+ // Note also that this is cleared/restored when crossing the include
+ // (but not source) boundary.
+ //
+ optional<location> condition_;
+
target* default_target_ = nullptr;
replay_token peek_;
diff --git a/libbuild2/prerequisite.cxx b/libbuild2/prerequisite.cxx
index cc41708..bb77c9e 100644
--- a/libbuild2/prerequisite.cxx
+++ b/libbuild2/prerequisite.cxx
@@ -54,16 +54,16 @@ namespace build2
}
prerequisite::
- prerequisite (const target_type& t)
+ prerequisite (const target_type& t, bool locked)
: proj (nullopt),
type (t.type ()),
dir (t.dir),
out (t.out), // @@ If it's empty, then we treat as undetermined?
name (t.name),
- ext (to_ext (t.ext ())),
+ ext (to_ext (locked ? t.ext_locked () : t.ext ())),
scope (t.base_scope ()),
target (&t),
- vars (t.ctx, false /* global */)
+ vars (*this, false /* shared */)
{
}
diff --git a/libbuild2/prerequisite.hxx b/libbuild2/prerequisite.hxx
index 476ed9d..9b9cccf 100644
--- a/libbuild2/prerequisite.hxx
+++ b/libbuild2/prerequisite.hxx
@@ -29,7 +29,9 @@ namespace build2
using target_type_type = build2::target_type;
// Note that unlike targets, for prerequisites an empty out directory
- // means undetermined rather than being definitely in the out tree.
+ // means undetermined rather than being definitely in the out tree (but
+ // maybe we should make this explicit via optional<>; see the from-target
+ // constructor).
//
// It might seem natural to keep the reference to the owner target instead
// of to the scope. But that's not the semantics that we have, consider:
@@ -61,6 +63,8 @@ namespace build2
// Note that the lookup is often ad hoc (see bin.whole as an example).
// But see also parser::lookup_variable() if adding something here.
//
+ // @@ PERF: redo as vector so can make move constructor noexcept.
+ //
public:
variable_map vars;
@@ -91,12 +95,28 @@ namespace build2
name (move (n)),
ext (move (e)),
scope (s),
- vars (s.ctx, false /* global */) {}
+ vars (*this, false /* shared */) {}
- // Make a prerequisite from a target.
+ prerequisite (const target_type_type& t,
+ dir_path d,
+ dir_path o,
+ string n,
+ optional<string> e,
+ const scope_type& s)
+ : type (t),
+ dir (move (d)),
+ out (move (o)),
+ name (move (n)),
+ ext (move (e)),
+ scope (s),
+ vars (*this, false /* shared */) {}
+
+ // Make a prerequisite from a target. If the second argument is true,
+ // assume the targets mutex is locked (see ext_locked()/key_locked()
+ // for background).
//
explicit
- prerequisite (const target_type&);
+ prerequisite (const target_type&, bool locked = false);
// Note that the returned key "tracks" the prerequisite; that is, any
// updates to the prerequisite's members will be reflected in the key.
@@ -136,7 +156,10 @@ namespace build2
is_a (const target_type_type& tt) const {return type.is_a (tt);}
public:
- prerequisite (prerequisite&& x)
+ // Note that we have the noexcept specification even though vars
+ // (std::map) could potentially throw.
+ //
+ prerequisite (prerequisite&& x) noexcept
: proj (move (x.proj)),
type (x.type),
dir (move (x.dir)),
@@ -145,7 +168,8 @@ namespace build2
ext (move (x.ext)),
scope (x.scope),
target (x.target.load (memory_order_relaxed)),
- vars (move (x.vars)) {}
+ vars (move (x.vars), *this, false /* shared */)
+ {}
prerequisite (const prerequisite& x, memory_order o = memory_order_consume)
: proj (x.proj),
@@ -156,7 +180,7 @@ namespace build2
ext (x.ext),
scope (x.scope),
target (x.target.load (o)),
- vars (x.vars) {}
+ vars (x.vars, *this, false /* shared */) {}
};
inline ostream&
diff --git a/libbuild2/recipe.cxx b/libbuild2/recipe.cxx
index eeafe87..87d37e7 100644
--- a/libbuild2/recipe.cxx
+++ b/libbuild2/recipe.cxx
@@ -11,4 +11,5 @@ namespace build2
recipe_function* const noop_recipe = &noop_action;
recipe_function* const default_recipe = &default_action;
recipe_function* const group_recipe = &group_action;
+ recipe_function* const inner_recipe = &execute_inner;
}
diff --git a/libbuild2/recipe.hxx b/libbuild2/recipe.hxx
index dfe36ec..97261f5 100644
--- a/libbuild2/recipe.hxx
+++ b/libbuild2/recipe.hxx
@@ -30,7 +30,7 @@ namespace build2
// Note that max size for the "small size optimization" in std::function
// (which is what move_only_function_ex is based on) ranges (in pointer
// sizes) from 0 (GCC libstdc++ prior to 5) to 2 (GCC 5 and later) to 3
- // (Clang libc++) to 6 (VC 14.2). With the size ranging (in bytes for 64-bit
+ // (Clang libc++) to 6 (VC 14.3). With the size ranging (in bytes for 64-bit
// target) from 32 (GCC) to 64 (VC).
//
using recipe_function = target_state (action, const target&);
@@ -49,6 +49,7 @@ namespace build2
LIBBUILD2_SYMEXPORT extern recipe_function* const noop_recipe;
LIBBUILD2_SYMEXPORT extern recipe_function* const default_recipe;
LIBBUILD2_SYMEXPORT extern recipe_function* const group_recipe;
+ LIBBUILD2_SYMEXPORT extern recipe_function* const inner_recipe;
}
#endif // LIBBUILD2_RECIPE_HXX
diff --git a/libbuild2/rule.cxx b/libbuild2/rule.cxx
index acb46e8..dc1c96c 100644
--- a/libbuild2/rule.cxx
+++ b/libbuild2/rule.cxx
@@ -15,17 +15,54 @@ using namespace butl;
namespace build2
{
- // rule (vtable)
+ // rule
//
rule::
~rule ()
{
}
+ void rule::
+ apply_posthoc (action, target&, match_extra&) const
+ {
+ }
+
+ void rule::
+ reapply (action, target&, match_extra&) const
+ {
+ // Unless the rule overrode cur_options, this function should never get
+ // called. And if it did, then it should override this function.
+ //
+ assert (false);
+ }
+
+ const target* rule::
+ import (const prerequisite_key&,
+ const optional<string>&,
+ const location&) const
+ {
+ return nullptr;
+ }
+
+ const rule_match*
+ match_adhoc_recipe (action, target&, match_extra&); // algorithm.cxx
+
bool rule::
sub_match (const string& n, operation_id o,
action a, target& t, match_extra& me) const
{
+ // First check for an ad hoc recipe (see match_rule_impl() for details).
+ //
+ if (!t.adhoc_recipes.empty ())
+ {
+ // Use scratch match_extra since if there is no recipe, then we don't
+ // want to keep any changes and if there is, then we want it discarded.
+ //
+ match_extra s (true /* locked */); // Not called from adhoc_rule::match().
+ if (match_adhoc_recipe (action (a.meta_operation (), o), t, s) != nullptr)
+ return false;
+ }
+
const string& h (t.find_hint (o));
return name_rule_map::sub (h, n) && match (a, t, h, me);
}
@@ -48,6 +85,13 @@ namespace build2
sub_match (const string& n, operation_id o,
action a, target& t) const
{
+ if (!t.adhoc_recipes.empty ())
+ {
+ match_extra s (true /* locked */); // Not called from adhoc_rule::match().
+ if (match_adhoc_recipe (action (a.meta_operation (), o), t, s) != nullptr)
+ return false;
+ }
+
return name_rule_map::sub (t.find_hint (o), n) && match (a, t);
}
@@ -65,6 +109,9 @@ namespace build2
{
tracer trace ("file_rule::match");
+ if (match_type_ && !t.is_a<mtime_target> ())
+ return false;
+
// While strictly speaking we should check for the file's existence
// for every action (because that's the condition for us matching),
// for some actions this is clearly a waste. Say, perform_clean: we
@@ -171,7 +218,7 @@ namespace build2
}
const file_rule file_rule::instance;
- const rule_match file_rule::rule_match ("file", file_rule::instance);
+ const rule_match file_rule::rule_match ("build.file", file_rule::instance);
// alias_rule
//
@@ -187,9 +234,25 @@ namespace build2
// Inject dependency on our directory (note: not parent) so that it is
// automatically created on update and removed on clean.
//
- inject_fsdir (a, t, false);
+ inject_fsdir (a, t, true, true, false);
- match_prerequisites (a, t);
+ // Handle the alias match-only level.
+ //
+ match_search ms;
+ if (t.ctx.match_only && *t.ctx.match_only == match_only_level::alias)
+ {
+ ms = [] (action,
+ const target& t,
+ const prerequisite& p,
+ include_type i)
+ {
+ return prerequisite_target (
+ p.is_a<alias> () ? &search (t, p) : nullptr,
+ i);
+ };
+ }
+
+ match_prerequisites (a, t, ms);
return default_recipe;
}
@@ -232,7 +295,7 @@ namespace build2
if (verb >= 2)
text << "mkdir " << d;
else if (verb && t.ctx.current_diag_noise)
- text << "mkdir " << t;
+ print_diag ("mkdir", t);
};
// Note: ignoring the dry_run flag.
@@ -289,16 +352,19 @@ namespace build2
}
void fsdir_rule::
- perform_update_direct (action a, const target& t)
+ perform_update_direct (action a, const fsdir& t)
{
+ assert (t.ctx.phase == run_phase::match);
+
// First create the parent directory. If present, it is always first.
//
- const target* p (t.prerequisite_targets[a].empty ()
- ? nullptr
- : t.prerequisite_targets[a][0]);
-
- if (p != nullptr && p->is_a<fsdir> ())
- perform_update_direct (a, *p);
+ if (const target* p = (t.prerequisite_targets[a].empty ()
+ ? nullptr
+ : t.prerequisite_targets[a][0]))
+ {
+ if (const fsdir* fp = p->is_a<fsdir> ())
+ perform_update_direct (a, *fp);
+ }
// The same code as in perform_update() above.
//
@@ -317,6 +383,8 @@ namespace build2
// Don't fail if we couldn't remove the directory because it is not empty
// (or is current working directory). In this case rmdir() will issue a
// warning when appropriate.
+
+ // The same code as in perform_clean_direct() below.
//
target_state ts (rmdir (t.dir, t, t.ctx.current_diag_noise ? 1 : 2)
? target_state::changed
@@ -328,6 +396,35 @@ namespace build2
return ts;
}
+ void fsdir_rule::
+ perform_clean_direct (action a, const fsdir& t)
+ {
+ assert (t.ctx.phase == run_phase::match);
+
+ // The same code as in perform_clean() above.
+ //
+ // Except that if there are other dependens of this fsdir{} then this will
+ // likely be a noop (because the directory won't be empty) and it makes
+ // sense to just defer cleaning to such other dependents. See
+ // clean_during_match() for backgound. This is similar logic as in
+ // unmatch::safe.
+ //
+ if (t[a].dependents.load (memory_order_relaxed) == 0)
+ {
+ rmdir (t.dir, t, t.ctx.current_diag_noise ? 1 : 2);
+
+ // Then clean the parent directory. If present, it is always first.
+ //
+ if (const target* p = (t.prerequisite_targets[a].empty ()
+ ? nullptr
+ : t.prerequisite_targets[a][0]))
+ {
+ if (const fsdir* fp = p->is_a<fsdir> ())
+ perform_clean_direct (a, *fp);
+ }
+ }
+ }
+
const fsdir_rule fsdir_rule::instance;
// noop_rule
@@ -357,8 +454,9 @@ namespace build2
}
bool adhoc_rule::
- match (action a, target& t, const string& h, match_extra& me) const
+ match (action a, target& xt, const string& h, match_extra& me) const
{
+ const target& t (xt);
return pattern == nullptr || pattern->match (a, t, h, me);
}
diff --git a/libbuild2/rule.hxx b/libbuild2/rule.hxx
index 77c2d2c..eceb6ad 100644
--- a/libbuild2/rule.hxx
+++ b/libbuild2/rule.hxx
@@ -22,7 +22,8 @@ namespace build2
// you need to modify some state (e.g., counters or some such), then make
// sure things are MT-safe.
//
- // Note: match() is only called once but may not be followed by apply().
+ // Note: match() could be called multiple times (so should be idempotent)
+ // and it may not be followed by apply().
//
// The hint argument is the rule hint, if any, that was used to select this
// rule. While normally not factored into the match decision, a rule may
@@ -33,6 +34,13 @@ namespace build2
// implementations. It is also a way for us to later pass more information
// without breaking source compatibility.
//
+ // A rule may adjust post hoc prerequisites by overriding apply_posthoc().
+ // See match_extra::posthoc_prerequisite_targets for background and details.
+ //
+ // A rule may support match options and if such a rule is rematched with
+ // different options, then reapply() is called. See
+ // match_extra::{cur,new}_options for background and details.
+ //
struct match_extra;
class LIBBUILD2_SYMEXPORT rule
@@ -44,6 +52,12 @@ namespace build2
virtual recipe
apply (action, target&, match_extra&) const = 0;
+ virtual void
+ apply_posthoc (action, target&, match_extra&) const;
+
+ virtual void
+ reapply (action, target&, match_extra&) const;
+
rule () = default;
virtual
@@ -52,15 +66,31 @@ namespace build2
rule (const rule&) = delete;
rule& operator= (const rule&) = delete;
+ // Resolve a project-qualified target in a rule-specific manner.
+ //
+ // This is optional functionality that may be provided by some rules to
+ // facilitate immediate importation of certain target types. See the
+ // import machinery for details. The default implementation always returns
+ // NULL.
+ //
+ // Note that if this function returns a target, it should have the
+ // extension assigned so that as_name() returns a stable name.
+ //
+ virtual const target*
+ import (const prerequisite_key&,
+ const optional<string>& metadata,
+ const location&) const;
+
// Sometimes we want to match only if another rule of ours would match
// another operation. For example, we would want our install rule to match
// only if our update rule also matches.
//
// Arranging this, however, is not a simple matter of calling the other
- // rule's match(): we also have to take into account the rule hints for
- // that operation. This helper performs all the necessary steps. Note:
- // should only be called from match() (see target::find_hint() for
- // details).
+ // rule's match(): we also have to take into account ad hoc recipes and
+ // rule hints for that operation. This helper performs all the necessary
+ // checks. Note: should only be called from match() (see
+ // target::find_hint() for details). Note also that ad hoc recipes are
+ // checked for hint_op, not action's operation.
//
bool
sub_match (const string& rule_name, operation_id hint_op,
@@ -109,10 +139,19 @@ namespace build2
virtual recipe
apply (action, target&, match_extra&) const override;
- file_rule () {}
+ // While this rule expects an mtime_target-based target, sometimes it's
+ // necessary to register it for something less specific (normally target)
+ // in order to achieve the desired rule matching priority (see the dist
+ // and config modules for an example). For such cases this rule can be
+ // instructed to check the type and only match if it's mtime_target-based.
+ //
+ file_rule (bool match_type = false): match_type_ (match_type) {}
- static const file_rule instance;
+ static const file_rule instance; // Note: does not match the target type.
static const build2::rule_match rule_match;
+
+ private:
+ bool match_type_;
};
class LIBBUILD2_SYMEXPORT alias_rule: public simple_rule
@@ -150,7 +189,10 @@ namespace build2
// of fsdir{} without the overhead of switching to the execute phase.
//
static void
- perform_update_direct (action, const target&);
+ perform_update_direct (action, const fsdir&);
+
+ static void
+ perform_clean_direct (action, const fsdir&);
fsdir_rule () {}
static const fsdir_rule instance;
@@ -235,6 +277,16 @@ namespace build2
// The default implementation forwards to the pattern's match() if there
// is a pattern and returns true otherwise.
//
+ // Note also that in case of a member of a group-based target, match() is
+ // called on the group while apply() on the member (see match_rule_impl()
+ // in algorithms.cxx for details). This means that match() may be called
+ // without having the target locked and as a result match() should (unless
+ // known to only match a non-group) treat the target as const and only
+ // rely on immutable information (type, name, etc) since the group could
+ // be matched concurrenly. This case can be detected by examining
+ // match_extra::locked (see adhoc_rule_regex_pattern::match() for a
+ // use-case).
+ //
virtual bool
match (action, target&, const string&, match_extra&) const override;
@@ -301,18 +353,24 @@ namespace build2
~adhoc_rule_pattern ();
public:
+ // Note: the adhoc_rule::match() restrictions apply here as well.
+ //
virtual bool
- match (action, target&, const string&, match_extra&) const = 0;
+ match (action, const target&, const string&, match_extra&) const = 0;
+ // Append additional group members. Note that this function should handle
+ // both ad hoc and explicit groups.
+ //
virtual void
- apply_adhoc_members (action, target&,
+ apply_group_members (action, target&,
const scope& base,
match_extra&) const = 0;
// The implementation should append pattern prerequisites to
// t.prerequisite_targets[a] but not match. It should set bit 2 in
// prerequisite_target::include to indicate update=match and bit 3
- // to indicate update=unmatch.
+ // to indicate update=unmatch. It should also avoid adding duplicate
+ // fsdir{} similar to the search_prerequisite*() functions.
//
virtual void
apply_prerequisites (action, target&,
diff --git a/libbuild2/scheduler.cxx b/libbuild2/scheduler.cxx
index 5027f90..e3fbcc1 100644
--- a/libbuild2/scheduler.cxx
+++ b/libbuild2/scheduler.cxx
@@ -93,12 +93,11 @@ namespace build2
}
void scheduler::
- deactivate (bool external)
+ deactivate_impl (bool external, lock&& rl)
{
- if (max_active_ == 1) // Serial execution.
- return;
+ // Note: assume non-serial execution.
- lock l (mutex_);
+ lock l (move (rl)); // Make sure unlocked on exception.
active_--;
waiting_++;
@@ -131,11 +130,10 @@ namespace build2
}
}
- void scheduler::
- activate (bool external, bool collision)
+ scheduler::lock scheduler::
+ activate_impl (bool external, bool collision)
{
- if (max_active_ == 1) // Serial execution.
- return;
+ // Note: assume non-serial execution.
lock l (mutex_);
@@ -160,6 +158,8 @@ namespace build2
if (shutdown_)
throw_generic_error (ECANCELED);
+
+ return l;
}
void scheduler::
@@ -207,7 +207,10 @@ namespace build2
deallocate (size_t n)
{
if (max_active_ == 1) // Serial execution.
+ {
+ assert (n == 0);
return;
+ }
lock l (mutex_);
active_ -= n;
@@ -216,13 +219,15 @@ namespace build2
size_t scheduler::
suspend (size_t start_count, const atomic_count& task_count)
{
+ assert (max_active_ != 1); // Suspend during serial execution?
+
wait_slot& s (
wait_queue_[
hash<const atomic_count*> () (&task_count) % wait_queue_size_]);
// This thread is no longer active.
//
- deactivate (false /* external */);
+ deactivate_impl (false /* external */, lock (mutex_));
// Note that the task count is checked while holding the lock. We also
// have to notify while holding the lock (see resume()). The aim here
@@ -259,7 +264,7 @@ namespace build2
// This thread is no longer waiting.
//
- activate (false /* external */, collision);
+ activate_impl (false /* external */, collision);
return tc;
}
diff --git a/libbuild2/scheduler.hxx b/libbuild2/scheduler.hxx
index dc18859..3cc206e 100644
--- a/libbuild2/scheduler.hxx
+++ b/libbuild2/scheduler.hxx
@@ -7,7 +7,8 @@
#include <list>
#include <tuple>
#include <atomic>
-#include <type_traits> // aligned_storage, etc
+#include <cstddef> // max_align_t
+#include <type_traits> // decay, etc
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
@@ -191,13 +192,15 @@ namespace build2
//
// The external flag indicates whether the wait is for an event external
// to the scheduler, that is, triggered by something other than one of the
- // threads managed by the scheduler.
+ // threads managed by the scheduler. This is used to suspend deadlock
+ // detection (which is progress-based and which cannot be measured for
+ // external events).
//
void
deactivate (bool external);
void
- activate (bool external, bool = false);
+ activate (bool external);
// Sleep for the specified duration, deactivating the thread before going
// to sleep and re-activating it after waking up (which means this
@@ -216,7 +219,7 @@ namespace build2
// Allocate additional active thread count to the current active thread,
// for example, to be "passed" to an external program:
//
- // scheduler::alloc_guard ag (ctx.sched, ctx.sched.max_active () / 2);
+ // scheduler::alloc_guard ag (*ctx.sched, ctx.sched->max_active () / 2);
// args.push_back ("-flto=" + to_string (1 + ag.n));
// run (args);
// ag.deallocate ();
@@ -241,14 +244,38 @@ namespace build2
void
deallocate (size_t);
+ // Similar to allocate() but reserve all the available threads blocking
+ // until this becomes possible. Call unlock() on the specified lock before
+ // deactivating and lock() after activating (can be used to unlock the
+ // phase). Typical usage:
+ //
+ // scheduler::alloc_guard ag (*ctx.sched,
+ // phase_unlock (ctx, true /* delay */));
+ //
+ // Or, without unlocking the phase:
+ //
+ // scheduler::alloc_guard ag (*ctx.sched, phase_unlock (nullptr));
+ //
+ template <typename L>
+ size_t
+ serialize (L& lock);
+
struct alloc_guard
{
size_t n;
alloc_guard (): n (0), s_ (nullptr) {}
alloc_guard (scheduler& s, size_t m): n (s.allocate (m)), s_ (&s) {}
- alloc_guard (alloc_guard&& x): n (x.n), s_ (x.s_) {x.s_ = nullptr;}
- alloc_guard& operator= (alloc_guard&& x)
+
+ template <typename L,
+ typename std::enable_if<!std::is_integral<L>::value, int>::type = 0>
+ alloc_guard (scheduler& s, L&& l): n (s.serialize (l)), s_ (&s) {}
+
+ alloc_guard (alloc_guard&& x) noexcept
+ : n (x.n), s_ (x.s_) {x.s_ = nullptr;}
+
+ alloc_guard&
+ operator= (alloc_guard&& x) noexcept
{
if (&x != this)
{
@@ -353,12 +380,19 @@ namespace build2
size_t
tune (size_t max_active);
+ bool
+ tuned () const {return max_active_ != orig_max_active_;}
+
struct tune_guard
{
tune_guard (): s_ (nullptr), o_ (0) {}
tune_guard (scheduler& s, size_t ma): s_ (&s), o_ (s_->tune (ma)) {}
- tune_guard (tune_guard&& x): s_ (x.s_), o_ (x.o_) {x.s_ = nullptr;}
- tune_guard& operator= (tune_guard&& x)
+
+ tune_guard (tune_guard&& x) noexcept
+ : s_ (x.s_), o_ (x.o_) {x.s_ = nullptr;}
+
+ tune_guard&
+ operator= (tune_guard&& x) noexcept
{
if (&x != this)
{
@@ -426,8 +460,8 @@ namespace build2
{
explicit
monitor_guard (scheduler* s = nullptr): s_ (s) {}
- monitor_guard (monitor_guard&& x): s_ (x.s_) {x.s_ = nullptr;}
- monitor_guard& operator= (monitor_guard&& x)
+ monitor_guard (monitor_guard&& x) noexcept: s_ (x.s_) {x.s_ = nullptr;}
+ monitor_guard& operator= (monitor_guard&& x) noexcept
{
if (&x != this)
{
@@ -543,8 +577,8 @@ namespace build2
atomic_count* task_count;
size_t start_count;
- func_type func;
args_type args;
+ func_type func;
template <size_t... i>
void
@@ -673,7 +707,11 @@ namespace build2
//
struct task_data
{
- std::aligned_storage<sizeof (void*) * 8>::type data;
+ static const size_t data_size = (sizeof (void*) == 4
+ ? sizeof (void*) * 16
+ : sizeof (void*) * 8);
+
+ alignas (std::max_align_t) unsigned char data[data_size];
void (*thunk) (scheduler&, lock&, void*);
};
@@ -923,6 +961,12 @@ namespace build2
private:
optional<size_t>
wait_impl (size_t, const atomic_count&, work_queue);
+
+ void
+ deactivate_impl (bool, lock&&);
+
+ lock
+ activate_impl (bool, bool);
};
}
diff --git a/libbuild2/scheduler.ixx b/libbuild2/scheduler.ixx
index 96eaee1..f46d035 100644
--- a/libbuild2/scheduler.ixx
+++ b/libbuild2/scheduler.ixx
@@ -44,6 +44,20 @@ namespace build2
return suspend (start_count, task_count);
}
+ inline void scheduler::
+ deactivate (bool external)
+ {
+ if (max_active_ != 1) // Serial execution.
+ deactivate_impl (external, lock (mutex_));
+ }
+
+ inline void scheduler::
+ activate (bool external)
+ {
+ if (max_active_ != 1) // Serial execution.
+ activate_impl (external, false /* collision */);
+ }
+
inline scheduler::queue_mark::
queue_mark (scheduler& s)
: tq_ (s.queue ())
diff --git a/libbuild2/scheduler.txx b/libbuild2/scheduler.txx
index 5c6b339..87c9384 100644
--- a/libbuild2/scheduler.txx
+++ b/libbuild2/scheduler.txx
@@ -64,8 +64,8 @@ namespace build2
new (&td->data) task {
&task_count,
start_count,
- decay_copy (forward<F> (f)),
- typename task::args_type (decay_copy (forward<A> (a))...)};
+ typename task::args_type (decay_copy (forward<A> (a))...),
+ decay_copy (forward<F> (f))};
td->thunk = &task_thunk<F, A...>;
@@ -137,4 +137,42 @@ namespace build2
if (tc.fetch_sub (1, memory_order_release) - 1 <= t.start_count)
s.resume (tc); // Resume waiters, if any.
}
+
+ template <typename L>
+ size_t scheduler::
+ serialize (L& el)
+ {
+ if (max_active_ == 1) // Serial execution.
+ return 0;
+
+ lock l (mutex_);
+
+ if (active_ == 1)
+ active_ = max_active_;
+ else
+ {
+ // Wait until we are the only active thread.
+ //
+ el.unlock ();
+
+ while (active_ != 1)
+ {
+ // While it would have been more efficient to implement this via the
+ // condition variable notifications, that logic is already twisted
+ // enough (and took a considerable time to debug). So for now we keep
+ // it simple and do sleep and re-check. Make the sleep external not to
+ // trip up the deadlock detection.
+ //
+ deactivate_impl (true /* external */, move (l));
+ active_sleep (std::chrono::milliseconds (10));
+ l = activate_impl (true /* external */, false /* collision */);
+ }
+
+ active_ = max_active_;
+ l.unlock (); // Important: unlock before attempting to relock external!
+ el.lock ();
+ }
+
+ return max_active_ - 1;
+ }
}
diff --git a/libbuild2/scope.cxx b/libbuild2/scope.cxx
index 814d876..6ed7bab 100644
--- a/libbuild2/scope.cxx
+++ b/libbuild2/scope.cxx
@@ -32,8 +32,8 @@ namespace build2
// scope
//
scope::
- scope (context& c, bool global)
- : ctx (c), vars (c, global), target_vars (c, global)
+ scope (context& c, bool shared)
+ : ctx (c), vars (*this, shared), target_vars (c, shared)
{
}
@@ -685,6 +685,8 @@ namespace build2
pair<const target_type*, optional<string>> scope::
find_target_type (name& n, const location& loc, const target_type* tt) const
{
+ // NOTE: see also functions-name.cxx:filter() if changing anything here.
+
optional<string> ext;
string& v (n.value);
@@ -790,9 +792,11 @@ namespace build2
}
pair<const target_type&, optional<string>> scope::
- find_target_type (name& n, name& o, const location& loc) const
+ find_target_type (name& n, name& o,
+ const location& loc,
+ const target_type* tt) const
{
- auto r (find_target_type (n, loc));
+ auto r (find_target_type (n, loc, tt));
if (r.first == nullptr)
fail (loc) << "unknown target type " << n.type << " in " << n;
@@ -806,26 +810,68 @@ namespace build2
fail (loc) << "expected directory after '@'";
}
- dir_path& d (n.dir);
+ dir_path& dir (n.dir);
const dir_path& sd (src_path ());
const dir_path& od (out_path ());
- if (d.empty ())
- d = src ? sd : od; // Already dormalized.
+ bool nabs (false);
+
+ if (dir.empty ())
+ dir = src ? sd : od; // Already normalized.
else
{
- if (d.relative ())
- d = (src ? sd : od) / d;
+ if (dir.relative ())
+ dir = (src ? sd : od) / dir;
+ else if (src)
+ nabs = true;
- d.normalize ();
+ dir.normalize ();
}
dir_path out;
- if (src && sd != od) // If in source build, then out must be empty.
+ if (src)
{
- out = o.dir.relative () ? od / o.dir : move (o.dir);
+ bool oabs (o.dir.absolute ());
+
+ out = oabs ? move (o.dir) : od / o.dir;
out.normalize ();
+
+ // Make sure out and src are parallel unless both were specified as
+ // absolute. We make an exception for this case because out may be used
+ // to "tag" imported targets (see cc::search_library()). So it's sort of
+ // the "I know what I am doing" escape hatch (it would have been even
+ // better to verify such a target is outside any project but that won't
+ // be cheap).
+ //
+ // See similar code for prerequisites in parser::parse_dependency().
+ //
+ if (nabs && oabs)
+ ;
+ else if (root_->out_eq_src ()
+ ? out == dir
+ //
+ // @@ PERF: could just compare leafs in place.
+ //
+ : (out.sub (root_->out_path ()) &&
+ dir.sub (root_->src_path ()) &&
+ out.leaf (root_->out_path ()) == dir.leaf (root_->src_path ())))
+ ;
+ else
+ // @@ TMP change warn to fail after 0.16.0 release.
+ //
+ warn (loc) << "target output directory " << out
+ << " must be parallel to source directory " << dir;
+
+ // If this target is in this project, then out must be empty if this is
+ // in source build. We assume that if either src or out are relative,
+ // then it belongs to this project.
+ //
+ if (root_->out_eq_src ())
+ {
+ if (!nabs || !oabs || out.sub (root_->out_path ()))
+ out.clear ();
+ }
}
o.dir = move (out); // Result.
@@ -834,14 +880,16 @@ namespace build2
}
target_key scope::
- find_target_key (names& ns, const location& loc) const
+ find_target_key (names& ns,
+ const location& loc,
+ const target_type* tt) const
{
if (size_t n = ns.size ())
{
if (n == (ns[0].pair ? 2 : 1))
{
name dummy;
- return find_target_key (ns[0], n == 1 ? dummy : ns[1], loc);
+ return find_target_key (ns[0], n == 1 ? dummy : ns[1], loc, tt);
}
}
@@ -849,9 +897,11 @@ namespace build2
}
pair<const target_type&, optional<string>> scope::
- find_prerequisite_type (name& n, name& o, const location& loc) const
+ find_prerequisite_type (name& n, name& o,
+ const location& loc,
+ const target_type* tt) const
{
- auto r (find_target_type (n, loc));
+ auto r (find_target_type (n, loc, tt));
if (r.first == nullptr)
fail (loc) << "unknown target type " << n.type << " in " << n;
@@ -875,14 +925,16 @@ namespace build2
}
prerequisite_key scope::
- find_prerequisite_key (names& ns, const location& loc) const
+ find_prerequisite_key (names& ns,
+ const location& loc,
+ const target_type* tt) const
{
if (size_t n = ns.size ())
{
if (n == (ns[0].pair ? 2 : 1))
{
name dummy;
- return find_prerequisite_key (ns[0], n == 1 ? dummy : ns[1], loc);
+ return find_prerequisite_key (ns[0], n == 1 ? dummy : ns[1], loc, tt);
}
}
@@ -910,7 +962,9 @@ namespace build2
}
pair<reference_wrapper<const target_type>, bool> scope::
- derive_target_type (const string& name, const target_type& base)
+ derive_target_type (const string& name,
+ const target_type& base,
+ target_type::flag flags)
{
assert (root_scope () == this);
@@ -928,10 +982,20 @@ namespace build2
//
// Currently, if we define myfile{}: file{}, then myfile{foo} and
// myfile{foo.x} are the same target.
+
+ // Note: copies flags.
//
- unique_ptr<target_type> dt (new target_type (base));
- dt->base = &base;
- dt->factory = &derived_tt_factory;
+ unique_ptr<target_type> dt (
+ new target_type {
+ nullptr, // Will be patched in by insert() below.
+ &base,
+ &derived_tt_factory,
+ base.fixed_extension,
+ base.default_extension,
+ base.pattern,
+ base.print,
+ base.search,
+ base.flags | flags});
#if 0
// @@ We should probably inherit the fixed extension unless overriden with
@@ -1010,8 +1074,17 @@ namespace build2
derive_target_type (const target_type& et)
{
assert (root_scope () == this);
- unique_ptr<target_type> dt (new target_type (et));
- dt->factory = &derived_tt_factory;
+ unique_ptr<target_type> dt (
+ new target_type {
+ nullptr, // Will be patched in by insert() below.
+ et.base,
+ &derived_tt_factory,
+ et.fixed_extension,
+ et.default_extension,
+ et.pattern,
+ et.print,
+ et.search,
+ et.flags});
return root_extra->target_types.insert (et.name, move (dt)).first;
}
@@ -1028,7 +1101,7 @@ namespace build2
if (er.first->second.front () == nullptr)
{
- er.first->second.front () = new scope (ctx, true /* global */);
+ er.first->second.front () = new scope (ctx, true /* shared */);
er.second = true;
}
diff --git a/libbuild2/scope.hxx b/libbuild2/scope.hxx
index 948fc7d..968727b 100644
--- a/libbuild2/scope.hxx
+++ b/libbuild2/scope.hxx
@@ -4,8 +4,6 @@
#ifndef LIBBUILD2_SCOPE_HXX
#define LIBBUILD2_SCOPE_HXX
-#include <unordered_set>
-
#include <libbuild2/types.hxx>
#include <libbuild2/forward.hxx>
#include <libbuild2/utility.hxx>
@@ -103,8 +101,8 @@ namespace build2
scope& global_scope () {return const_cast<scope&> (ctx.global_scope);}
const scope& global_scope () const {return ctx.global_scope;}
- // Return true if the specified root scope is a sub-scope of this root
- // scope. Note that both scopes must be root.
+ // Return true if the specified root scope is a sub-scope of (but not the
+ // same as) this root scope. Note that both scopes must be root.
//
bool
sub_root (const scope&) const;
@@ -136,7 +134,7 @@ namespace build2
lookup_type
operator[] (const string& name) const
{
- const variable* var (ctx.var_pool.find (name));
+ const variable* var (var_pool ().find (name));
return var != nullptr ? operator[] (*var) : lookup_type ();
}
@@ -302,13 +300,6 @@ namespace build2
//
variable_type_map target_vars;
- // Set of buildfiles already loaded for this scope. The included
- // buildfiles are checked against the project's root scope while
- // imported -- against the global scope (global_scope).
- //
- public:
- std::unordered_set<path> buildfiles;
-
// Target types.
//
// Note that target types are project-wide (even if the module that
@@ -329,7 +320,7 @@ namespace build2
const target_type&
insert_target_type (const target_type& tt)
{
- return root_extra->target_types.insert (tt);
+ return root_extra->target_types.insert (tt).first;
}
template <typename T>
@@ -367,40 +358,56 @@ namespace build2
// the out directory.
//
pair<const target_type&, optional<string>>
- find_target_type (name&, name&, const location&) const;
+ find_target_type (name&, name&,
+ const location&,
+ const target_type* = nullptr) const;
// As above, but return the result as a target key (with its members
// shallow-pointing to processed parts in the two names).
//
target_key
- find_target_key (name&, name&, const location&) const;
+ find_target_key (name&, name&,
+ const location&,
+ const target_type* = nullptr) const;
// As above, but the names are passed as a vector. Issue appropriate
// diagnostics if the wrong number of names is passed.
//
target_key
- find_target_key (names&, const location&) const;
+ find_target_key (names&,
+ const location&,
+ const target_type* = nullptr) const;
// Similar to the find_target_type() but does not complete relative
// directories.
//
pair<const target_type&, optional<string>>
- find_prerequisite_type (name&, name&, const location&) const;
+ find_prerequisite_type (name&, name&,
+ const location&,
+ const target_type* = nullptr) const;
// As above, but return a prerequisite key.
//
prerequisite_key
- find_prerequisite_key (name&, name&, const location&) const;
+ find_prerequisite_key (name&, name&,
+ const location&,
+ const target_type* = nullptr) const;
prerequisite_key
- find_prerequisite_key (names&, const location&) const;
+ find_prerequisite_key (names&,
+ const location&,
+ const target_type* = nullptr) const;
// Dynamically derive a new target type from an existing one. Return the
// reference to the target type and an indicator of whether it was
// actually created.
//
+ // Note: the flags are OR'ed to the base's flags.
+ //
pair<reference_wrapper<const target_type>, bool>
- derive_target_type (const string& name, const target_type& base);
+ derive_target_type (const string& name,
+ const target_type& base,
+ target_type::flag flags = target_type::flag::none);
template <typename T>
pair<reference_wrapper<const target_type>, bool>
@@ -486,7 +493,7 @@ namespace build2
// is not yet determined (happens at the end of bootstrap_src()). NULL
// means there are no subprojects.
//
- optional<const build2::subprojects*> subprojects;
+ optional<build2::subprojects*> subprojects;
bool altn; // True if using alternative build file/directory naming.
bool loaded; // True if already loaded (load_root()).
@@ -508,14 +515,40 @@ namespace build2
const path& src_root_file; // build[2]/bootstrap/src-root.build[2]
const path& out_root_file; // build[2]/bootstrap/src-root.build[2]
+ // Project-private variable pool.
+ //
+ // Note: see scope::var_pool_ and use scope::var_pool().
+ //
+ variable_pool var_pool;
+
// Meta/operations supported by this project.
//
build2::meta_operations meta_operations;
build2::operations operations;
- // Modules loaded by this project.
+ // Modules imported/loaded by this project.
//
- module_map modules;
+ module_import_map imported_modules;
+ module_state_map loaded_modules;
+
+ // Buildfiles already loaded for this project.
+ //
+ // We don't expect too many of them per project so let's use vector
+ // with linear search.
+ //
+ paths buildfiles;
+
+ bool
+ insert_buildfile (const path& f)
+ {
+ bool r (find (buildfiles.begin (),
+ buildfiles.end (),
+ f) == buildfiles.end ());
+ if (r)
+ buildfiles.push_back (f);
+
+ return r;
+ }
// Variable override cache.
//
@@ -543,40 +576,53 @@ namespace build2
// when, for example, caching environment-sensitive information.
//
string environment_checksum;
+
+ root_extra_type (scope&, bool altn); // file.cxx
};
unique_ptr<root_extra_type> root_extra;
+ // The last argument is the operation variable (see var_include) or NULL
+ // if not used.
+ //
void
- insert_operation (operation_id id, const operation_info& in)
+ insert_operation (operation_id id,
+ const operation_info& in,
+ const variable* ovar)
{
- root_extra->operations.insert (id, in);
+ // The operation variable should have prerequisite or target visibility.
+ //
+ assert (ovar == nullptr ||
+ (ovar->visibility == variable_visibility::prereq ||
+ ovar->visibility == variable_visibility::target));
+
+ root_extra->operations.insert (id, project_operation_info {&in, ovar});
}
void
insert_meta_operation (meta_operation_id id, const meta_operation_info& in)
{
- root_extra->meta_operations.insert (id, in);
+ root_extra->meta_operations.insert (id, &in);
}
bool
find_module (const string& name) const
{
- return root_extra->modules.find_module<module> (name) != nullptr;
+ return root_extra->loaded_modules.find_module<module> (name) != nullptr;
}
template <typename T>
T*
find_module (const string& name)
{
- return root_extra->modules.find_module<T> (name);
+ return root_extra->loaded_modules.find_module<T> (name);
}
template <typename T>
const T*
find_module (const string& name) const
{
- return root_extra->modules.find_module<T> (name);
+ return root_extra->loaded_modules.find_module<T> (name);
}
public:
@@ -589,10 +635,29 @@ namespace build2
return const_cast<scope&> (*this);
}
+ // Return the project-private variable pool (which is chained to the
+ // public pool) unless pub is true, in which case return the public pool.
+ //
+ // You would normally go for the public pool directly as an optimization
+ // (for example, in the module's init()) if you know all your variables
+ // are qualified and thus public.
+ //
variable_pool&
- var_pool ()
+ var_pool (bool pub = false)
{
- return ctx.var_pool.rw (*this);
+ return (pub ? ctx.var_pool :
+ var_pool_ != nullptr ? *var_pool_ :
+ root_ != nullptr ? *root_->var_pool_ :
+ ctx.var_pool).rw (*this);
+ }
+
+ const variable_pool&
+ var_pool (bool pub = false) const
+ {
+ return (pub ? ctx.var_pool :
+ var_pool_ != nullptr ? *var_pool_ :
+ root_ != nullptr ? *root_->var_pool_ :
+ ctx.var_pool);
}
private:
@@ -600,13 +665,13 @@ namespace build2
friend class scope_map;
friend class temp_scope;
- // These two from <libbuild2/file.hxx> set strong_.
+ // These from <libbuild2/file.hxx> set strong_.
//
- friend LIBBUILD2_SYMEXPORT void create_bootstrap_outer (scope&);
+ friend LIBBUILD2_SYMEXPORT void create_bootstrap_outer (scope&, bool);
friend LIBBUILD2_SYMEXPORT scope& create_bootstrap_inner (scope&,
const dir_path&);
- scope (context&, bool global);
+ scope (context&, bool shared);
~scope ();
// Return true if this root scope can be amalgamated.
@@ -621,6 +686,8 @@ namespace build2
scope* root_;
scope* strong_ = nullptr; // Only set on root scopes.
// NULL means no strong amalgamtion.
+
+ variable_pool* var_pool_ = nullptr; // For temp_scope override.
};
inline bool
@@ -688,24 +755,28 @@ namespace build2
// Temporary scope. The idea is to be able to create a temporary scope in
// order not to change the variables in the current scope. Such a scope is
- // not entered in to the scope map. As a result it can only be used as a
- // temporary set of variables. In particular, defining targets directly in
- // such a scope will surely end up badly. Defining any nested scopes will be
- // as if defining such a scope in the parent (since path() returns parent's
- // path).
+ // not entered in to the scope map and its parent is the global scope. As a
+ // result it can only be used as a temporary set of variables. In
+ // particular, defining targets directly in such a scope will surely end up
+ // badly.
//
class temp_scope: public scope
{
public:
- temp_scope (scope& p)
- : scope (p.ctx, false /* global */)
+ temp_scope (scope& gs)
+ : scope (gs.ctx, false /* shared */),
+ var_pool_ (nullptr /* shared */, &gs.ctx.var_pool.rw (gs), nullptr)
{
- out_path_ = p.out_path_;
- src_path_ = p.src_path_;
- parent_ = &p;
- root_ = p.root_;
- // No need to copy strong_ since we are never root scope.
+ // Note that making this scope its own root is a bad idea.
+ //
+ root_ = nullptr;
+ parent_ = &gs;
+ out_path_ = gs.out_path_;
+ scope::var_pool_ = &var_pool_;
}
+
+ private:
+ variable_pool var_pool_;
};
// Scope map. Protected by the phase mutex.
diff --git a/libbuild2/scope.ixx b/libbuild2/scope.ixx
index 5d76a7f..5975c76 100644
--- a/libbuild2/scope.ixx
+++ b/libbuild2/scope.ixx
@@ -146,9 +146,11 @@ namespace build2
}
inline target_key scope::
- find_target_key (name& n, name& o, const location& loc) const
+ find_target_key (name& n, name& o,
+ const location& loc,
+ const target_type* tt) const
{
- auto p (find_target_type (n, o, loc));
+ auto p (find_target_type (n, o, loc, tt));
return target_key {
&p.first,
&n.dir,
@@ -158,9 +160,11 @@ namespace build2
}
inline prerequisite_key scope::
- find_prerequisite_key (name& n, name& o, const location& loc) const
+ find_prerequisite_key (name& n, name& o,
+ const location& loc,
+ const target_type* tt) const
{
- auto p (find_prerequisite_type (n, o, loc));
+ auto p (find_prerequisite_type (n, o, loc, tt));
return prerequisite_key {
n.proj,
{
diff --git a/libbuild2/script/builtin-options.cxx b/libbuild2/script/builtin-options.cxx
index 7589faf..b71b9d3 100644
--- a/libbuild2/script/builtin-options.cxx
+++ b/libbuild2/script/builtin-options.cxx
@@ -18,6 +18,7 @@
#include <utility>
#include <ostream>
#include <sstream>
+#include <cstring>
namespace build2
{
@@ -52,10 +53,31 @@ namespace build2
struct parser<bool>
{
static void
- parse (bool& x, scanner& s)
+ parse (bool& x, bool& xs, scanner& s)
{
- s.next ();
- x = true;
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ const char* v (s.next ());
+
+ if (std::strcmp (v, "1") == 0 ||
+ std::strcmp (v, "true") == 0 ||
+ std::strcmp (v, "TRUE") == 0 ||
+ std::strcmp (v, "True") == 0)
+ x = true;
+ else if (std::strcmp (v, "0") == 0 ||
+ std::strcmp (v, "false") == 0 ||
+ std::strcmp (v, "FALSE") == 0 ||
+ std::strcmp (v, "False") == 0)
+ x = false;
+ else
+ throw invalid_value (o, v);
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
}
};
@@ -165,6 +187,56 @@ namespace build2
}
};
+ template <typename K, typename V, typename C>
+ struct parser<std::multimap<K, V, C> >
+ {
+ static void
+ parse (std::multimap<K, V, C>& m, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (s.more ())
+ {
+ std::size_t pos (s.position ());
+ std::string ov (s.next ());
+ std::string::size_type p = ov.find ('=');
+
+ K k = K ();
+ V v = V ();
+ std::string kstr (ov, 0, p);
+ std::string vstr (ov, (p != std::string::npos ? p + 1 : ov.size ()));
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (o),
+ 0
+ };
+
+ bool dummy;
+ if (!kstr.empty ())
+ {
+ av[1] = const_cast<char*> (kstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<K>::parse (k, dummy, s);
+ }
+
+ if (!vstr.empty ())
+ {
+ av[1] = const_cast<char*> (vstr.c_str ());
+ argv_scanner s (0, ac, av, false, pos);
+ parser<V>::parse (v, dummy, s);
+ }
+
+ m.insert (typename std::multimap<K, V, C>::value_type (k, v));
+ }
+ else
+ throw missing_value (o);
+
+ xs = true;
+ }
+ };
+
template <typename X, typename T, T X::*M>
void
thunk (X& x, scanner& s)
@@ -172,6 +244,14 @@ namespace build2
parser<T>::parse (x.*M, s);
}
+ template <typename X, bool X::*M>
+ void
+ thunk (X& x, scanner& s)
+ {
+ s.next ();
+ x.*M = true;
+ }
+
template <typename X, typename T, T X::*M, bool X::*S>
void
thunk (X& x, scanner& s)
@@ -183,7 +263,6 @@ namespace build2
}
#include <map>
-#include <cstring>
namespace build2
{
@@ -284,17 +363,17 @@ namespace build2
_cli_set_options_map_init ()
{
_cli_set_options_map_["--exact"] =
- &::build2::build::cli::thunk< set_options, bool, &set_options::exact_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::exact_ >;
_cli_set_options_map_["-e"] =
- &::build2::build::cli::thunk< set_options, bool, &set_options::exact_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::exact_ >;
_cli_set_options_map_["--newline"] =
- &::build2::build::cli::thunk< set_options, bool, &set_options::newline_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::newline_ >;
_cli_set_options_map_["-n"] =
- &::build2::build::cli::thunk< set_options, bool, &set_options::newline_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::newline_ >;
_cli_set_options_map_["--whitespace"] =
- &::build2::build::cli::thunk< set_options, bool, &set_options::whitespace_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::whitespace_ >;
_cli_set_options_map_["-w"] =
- &::build2::build::cli::thunk< set_options, bool, &set_options::whitespace_ >;
+ &::build2::build::cli::thunk< set_options, &set_options::whitespace_ >;
}
};
@@ -563,9 +642,9 @@ namespace build2
_cli_timeout_options_map_init ()
{
_cli_timeout_options_map_["--success"] =
- &::build2::build::cli::thunk< timeout_options, bool, &timeout_options::success_ >;
+ &::build2::build::cli::thunk< timeout_options, &timeout_options::success_ >;
_cli_timeout_options_map_["-s"] =
- &::build2::build::cli::thunk< timeout_options, bool, &timeout_options::success_ >;
+ &::build2::build::cli::thunk< timeout_options, &timeout_options::success_ >;
}
};
@@ -1047,6 +1126,297 @@ namespace build2
return r;
}
+
+ // for_options
+ //
+
+ for_options::
+ for_options ()
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ }
+
+ for_options::
+ for_options (int& argc,
+ char** argv,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
+ _parse (s, opt, arg);
+ }
+
+ for_options::
+ for_options (int start,
+ int& argc,
+ char** argv,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
+ _parse (s, opt, arg);
+ }
+
+ for_options::
+ for_options (int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ ::build2::build::cli::argv_scanner s (argc, argv, erase);
+ _parse (s, opt, arg);
+ end = s.end ();
+ }
+
+ for_options::
+ for_options (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ ::build2::build::cli::argv_scanner s (start, argc, argv, erase);
+ _parse (s, opt, arg);
+ end = s.end ();
+ }
+
+ for_options::
+ for_options (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt,
+ ::build2::build::cli::unknown_mode arg)
+ : exact_ (),
+ newline_ (),
+ whitespace_ ()
+ {
+ _parse (s, opt, arg);
+ }
+
+ typedef
+ std::map<std::string, void (*) (for_options&, ::build2::build::cli::scanner&)>
+ _cli_for_options_map;
+
+ static _cli_for_options_map _cli_for_options_map_;
+
+ struct _cli_for_options_map_init
+ {
+ _cli_for_options_map_init ()
+ {
+ _cli_for_options_map_["--exact"] =
+ &::build2::build::cli::thunk< for_options, &for_options::exact_ >;
+ _cli_for_options_map_["-e"] =
+ &::build2::build::cli::thunk< for_options, &for_options::exact_ >;
+ _cli_for_options_map_["--newline"] =
+ &::build2::build::cli::thunk< for_options, &for_options::newline_ >;
+ _cli_for_options_map_["-n"] =
+ &::build2::build::cli::thunk< for_options, &for_options::newline_ >;
+ _cli_for_options_map_["--whitespace"] =
+ &::build2::build::cli::thunk< for_options, &for_options::whitespace_ >;
+ _cli_for_options_map_["-w"] =
+ &::build2::build::cli::thunk< for_options, &for_options::whitespace_ >;
+ }
+ };
+
+ static _cli_for_options_map_init _cli_for_options_map_init_;
+
+ bool for_options::
+ _parse (const char* o, ::build2::build::cli::scanner& s)
+ {
+ _cli_for_options_map::const_iterator i (_cli_for_options_map_.find (o));
+
+ if (i != _cli_for_options_map_.end ())
+ {
+ (*(i->second)) (*this, s);
+ return true;
+ }
+
+ return false;
+ }
+
+ bool for_options::
+ _parse (::build2::build::cli::scanner& s,
+ ::build2::build::cli::unknown_mode opt_mode,
+ ::build2::build::cli::unknown_mode arg_mode)
+ {
+ // Can't skip combined flags (--no-combined-flags).
+ //
+ assert (opt_mode != ::build2::build::cli::unknown_mode::skip);
+
+ bool r = false;
+ bool opt = true;
+
+ while (s.more ())
+ {
+ const char* o = s.peek ();
+
+ if (std::strcmp (o, "--") == 0)
+ {
+ opt = false;
+ s.skip ();
+ r = true;
+ continue;
+ }
+
+ if (opt)
+ {
+ if (_parse (o, s))
+ {
+ r = true;
+ continue;
+ }
+
+ if (std::strncmp (o, "-", 1) == 0 && o[1] != '\0')
+ {
+ // Handle combined option values.
+ //
+ std::string co;
+ if (const char* v = std::strchr (o, '='))
+ {
+ co.assign (o, 0, v - o);
+ ++v;
+
+ int ac (2);
+ char* av[] =
+ {
+ const_cast<char*> (co.c_str ()),
+ const_cast<char*> (v)
+ };
+
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
+
+ if (_parse (co.c_str (), ns))
+ {
+ // Parsed the option but not its value?
+ //
+ if (ns.end () != 2)
+ throw ::build2::build::cli::invalid_value (co, v);
+
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = co.c_str ();
+ }
+ }
+
+ // Handle combined flags.
+ //
+ char cf[3];
+ {
+ const char* p = o + 1;
+ for (; *p != '\0'; ++p)
+ {
+ if (!((*p >= 'a' && *p <= 'z') ||
+ (*p >= 'A' && *p <= 'Z') ||
+ (*p >= '0' && *p <= '9')))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ for (p = o + 1; *p != '\0'; ++p)
+ {
+ std::strcpy (cf, "-");
+ cf[1] = *p;
+ cf[2] = '\0';
+
+ int ac (1);
+ char* av[] =
+ {
+ cf
+ };
+
+ ::build2::build::cli::argv_scanner ns (0, ac, av);
+
+ if (!_parse (cf, ns))
+ break;
+ }
+
+ if (*p == '\0')
+ {
+ // All handled.
+ //
+ s.next ();
+ r = true;
+ continue;
+ }
+ else
+ {
+ // Set the unknown option and fall through.
+ //
+ o = cf;
+ }
+ }
+ }
+
+ switch (opt_mode)
+ {
+ case ::build2::build::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::build2::build::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::build2::build::cli::unknown_mode::fail:
+ {
+ throw ::build2::build::cli::unknown_option (o);
+ }
+ }
+
+ break;
+ }
+ }
+
+ switch (arg_mode)
+ {
+ case ::build2::build::cli::unknown_mode::skip:
+ {
+ s.skip ();
+ r = true;
+ continue;
+ }
+ case ::build2::build::cli::unknown_mode::stop:
+ {
+ break;
+ }
+ case ::build2::build::cli::unknown_mode::fail:
+ {
+ throw ::build2::build::cli::unknown_argument (o);
+ }
+ }
+
+ break;
+ }
+
+ return r;
+ }
}
}
diff --git a/libbuild2/script/builtin-options.hxx b/libbuild2/script/builtin-options.hxx
index c7cebbc..9361d18 100644
--- a/libbuild2/script/builtin-options.hxx
+++ b/libbuild2/script/builtin-options.hxx
@@ -253,6 +253,90 @@ namespace build2
vector<string> clear_;
bool clear_specified_;
};
+
+ class for_options
+ {
+ public:
+ for_options ();
+
+ for_options (int& argc,
+ char** argv,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ for_options (int start,
+ int& argc,
+ char** argv,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ for_options (int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ for_options (int start,
+ int& argc,
+ char** argv,
+ int& end,
+ bool erase = false,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ for_options (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option = ::build2::build::cli::unknown_mode::fail,
+ ::build2::build::cli::unknown_mode argument = ::build2::build::cli::unknown_mode::stop);
+
+ // Option accessors and modifiers.
+ //
+ const bool&
+ exact () const;
+
+ bool&
+ exact ();
+
+ void
+ exact (const bool&);
+
+ const bool&
+ newline () const;
+
+ bool&
+ newline ();
+
+ void
+ newline (const bool&);
+
+ const bool&
+ whitespace () const;
+
+ bool&
+ whitespace ();
+
+ void
+ whitespace (const bool&);
+
+ // Implementation details.
+ //
+ protected:
+ bool
+ _parse (const char*, ::build2::build::cli::scanner&);
+
+ private:
+ bool
+ _parse (::build2::build::cli::scanner&,
+ ::build2::build::cli::unknown_mode option,
+ ::build2::build::cli::unknown_mode argument);
+
+ public:
+ bool exact_;
+ bool newline_;
+ bool whitespace_;
+ };
}
}
diff --git a/libbuild2/script/builtin-options.ixx b/libbuild2/script/builtin-options.ixx
index 8f84177..575eb95 100644
--- a/libbuild2/script/builtin-options.ixx
+++ b/libbuild2/script/builtin-options.ixx
@@ -153,6 +153,63 @@ namespace build2
{
this->clear_specified_ = x;
}
+
+ // for_options
+ //
+
+ inline const bool& for_options::
+ exact () const
+ {
+ return this->exact_;
+ }
+
+ inline bool& for_options::
+ exact ()
+ {
+ return this->exact_;
+ }
+
+ inline void for_options::
+ exact (const bool& x)
+ {
+ this->exact_ = x;
+ }
+
+ inline const bool& for_options::
+ newline () const
+ {
+ return this->newline_;
+ }
+
+ inline bool& for_options::
+ newline ()
+ {
+ return this->newline_;
+ }
+
+ inline void for_options::
+ newline (const bool& x)
+ {
+ this->newline_ = x;
+ }
+
+ inline const bool& for_options::
+ whitespace () const
+ {
+ return this->whitespace_;
+ }
+
+ inline bool& for_options::
+ whitespace ()
+ {
+ return this->whitespace_;
+ }
+
+ inline void for_options::
+ whitespace (const bool& x)
+ {
+ this->whitespace_ = x;
+ }
}
}
diff --git a/libbuild2/script/builtin.cli b/libbuild2/script/builtin.cli
index 50dd3a0..c993983 100644
--- a/libbuild2/script/builtin.cli
+++ b/libbuild2/script/builtin.cli
@@ -30,5 +30,12 @@ namespace build2
vector<string> --unset|-u;
vector<string> --clear|-c;
};
+
+ class for_options
+ {
+ bool --exact|-e;
+ bool --newline|-n;
+ bool --whitespace|-w;
+ };
}
}
diff --git a/libbuild2/script/lexer.cxx b/libbuild2/script/lexer.cxx
index 7577149..e13bbdb 100644
--- a/libbuild2/script/lexer.cxx
+++ b/libbuild2/script/lexer.cxx
@@ -24,10 +24,7 @@ namespace build2
bool q (true); // quotes
if (!esc)
- {
- assert (!state_.empty ());
- esc = state_.top ().escapes;
- }
+ esc = current_state ().escapes;
switch (m)
{
@@ -84,7 +81,7 @@ namespace build2
}
assert (ps == '\0');
- state_.push (
+ mode_impl (
state {m, data, nullopt, false, false, ps, s, n, q, *esc, s1, s2});
}
@@ -93,7 +90,7 @@ namespace build2
{
token r;
- switch (state_.top ().mode)
+ switch (mode ())
{
case lexer_mode::command_expansion:
case lexer_mode::here_line_single:
@@ -119,7 +116,7 @@ namespace build2
xchar c (get ());
uint64_t ln (c.line), cn (c.column);
- const state& st (state_.top ());
+ const state& st (current_state ());
lexer_mode m (st.mode);
auto make_token = [&sep, &m, ln, cn] (type t)
diff --git a/libbuild2/script/lexer.hxx b/libbuild2/script/lexer.hxx
index dbfdfcc..3cbcc03 100644
--- a/libbuild2/script/lexer.hxx
+++ b/libbuild2/script/lexer.hxx
@@ -112,6 +112,8 @@ namespace build2
const redirect_aliases_type& redirect_aliases;
protected:
+ using build2::lexer::mode; // Getter.
+
lexer (istream& is, const path_name& name, uint64_t line,
const char* escapes,
bool set_mode,
diff --git a/libbuild2/script/parser.cxx b/libbuild2/script/parser.cxx
index 82eb9c8..ae6da76 100644
--- a/libbuild2/script/parser.cxx
+++ b/libbuild2/script/parser.cxx
@@ -3,9 +3,14 @@
#include <libbuild2/script/parser.hxx>
+#include <cstring> // strchr()
+#include <sstream>
+
#include <libbuild2/variable.hxx>
-#include <libbuild2/script/run.hxx> // exit
+
+#include <libbuild2/script/run.hxx> // exit, stream_reader
#include <libbuild2/script/lexer.hxx>
+#include <libbuild2/script/builtin-options.hxx>
using namespace std;
@@ -15,6 +20,33 @@ namespace build2
{
using type = token_type;
+ bool parser::
+ need_cmdline_relex (const string& s)
+ {
+ for (auto i (s.begin ()), e (s.end ()); i != e; ++i)
+ {
+ char c (*i);
+
+ if (c == '\\')
+ {
+ if (++i == e)
+ return false;
+
+ c = *i;
+
+ if (c == '\\' || c == '\'' || c == '\"')
+ return true;
+
+ // Fall through.
+ }
+
+ if (strchr ("|<>&\"'", c) != nullptr)
+ return true;
+ }
+
+ return false;
+ }
+
value parser::
parse_variable_line (token& t, type& tt)
{
@@ -111,18 +143,20 @@ namespace build2
return nullopt;
}
- pair<command_expr, parser::here_docs> parser::
+ parser::parse_command_expr_result parser::
parse_command_expr (token& t, type& tt,
- const redirect_aliases& ra)
+ const redirect_aliases& ra,
+ optional<token>&& program)
{
- // enter: first token of the command line
+ // enter: first (or second, if program) token of the command line
// leave: <newline> or unknown token
command_expr expr;
// OR-ed to an implied false for the first term.
//
- expr.push_back ({expr_operator::log_or, command_pipe ()});
+ if (!pre_parse_)
+ expr.push_back ({expr_operator::log_or, command_pipe ()});
command c; // Command being assembled.
@@ -189,8 +223,8 @@ namespace build2
// Add the next word to either one of the pending positions or to
// program arguments by default.
//
- auto add_word = [&c, &p, &mod, &check_regex_mod, this] (
- string&& w, const location& l)
+ auto add_word = [&c, &p, &mod, &check_regex_mod, this]
+ (string&& w, const location& l)
{
auto add_merge = [&l, this] (optional<redirect>& r,
const string& w,
@@ -668,11 +702,30 @@ namespace build2
const location ll (get_location (t)); // Line location.
// Keep parsing chunks of the command line until we see one of the
- // "terminators" (newline, exit status comparison, etc).
+ // "terminators" (newline or unknown/unexpected token).
//
location l (ll);
names ns; // Reuse to reduce allocations.
+ bool for_loop (false);
+
+ if (program)
+ {
+ assert (program->type == type::word);
+
+ // Note that here we skip all the parse_program() business since the
+ // program can only be one of the specially-recognized names.
+ //
+ if (program->value == "for")
+ for_loop = true;
+ else
+ assert (false); // Must be specially-recognized program.
+
+ // Save the program name and continue parsing as a command.
+ //
+ add_word (move (program->value), get_location (*program));
+ }
+
for (bool done (false); !done; l = get_location (t))
{
tt = ra.resolve (tt);
@@ -688,6 +741,9 @@ namespace build2
case type::equal:
case type::not_equal:
{
+ if (for_loop)
+ fail (l) << "for-loop exit code cannot be checked";
+
if (!pre_parse_)
check_pending (l);
@@ -718,30 +774,39 @@ namespace build2
}
case type::pipe:
+ if (for_loop)
+ fail (l) << "for-loop must be last command in a pipe";
+ // Fall through.
+
case type::log_or:
case type::log_and:
+ if (for_loop)
+ fail (l) << "command expression involving for-loop";
+ // Fall through.
- case type::in_pass:
- case type::out_pass:
+ case type::clean:
+ if (for_loop)
+ fail (l) << "cleanup in for-loop";
+ // Fall through.
- case type::in_null:
+ case type::out_pass:
case type::out_null:
-
case type::out_trace:
-
case type::out_merge:
-
- case type::in_str:
- case type::in_doc:
case type::out_str:
case type::out_doc:
-
- case type::in_file:
case type::out_file_cmp:
case type::out_file_ovr:
case type::out_file_app:
+ if (for_loop)
+ fail (l) << "output redirect in for-loop";
+ // Fall through.
- case type::clean:
+ case type::in_pass:
+ case type::in_null:
+ case type::in_str:
+ case type::in_doc:
+ case type::in_file:
{
if (pre_parse_)
{
@@ -939,6 +1004,42 @@ namespace build2
next (t, tt);
break;
}
+ case type::lsbrace:
+ {
+ // Recompose the attributes into a single command argument.
+ //
+ assert (!pre_parse_);
+
+ attributes_push (t, tt, true /* standalone */);
+
+ attributes as (attributes_pop ());
+ assert (!as.empty ());
+
+ ostringstream os;
+ names storage;
+ char c ('[');
+ for (const attribute& a: as)
+ {
+ os << c << a.name;
+
+ if (!a.value.null)
+ {
+ os << '=';
+
+ storage.clear ();
+ to_stream (os,
+ reverse (a.value, storage, true /* reduce */),
+ quote_mode::normal,
+ '@');
+ }
+
+ c = ',';
+ }
+ os << ']';
+
+ add_word (os.str (), l);
+ break;
+ }
default:
{
// Bail out if this is one of the unknown tokens.
@@ -1007,11 +1108,12 @@ namespace build2
hd.push_back (
here_doc {
{rd},
- move (end),
- (t.qtype == quote_type::unquoted ||
- t.qtype == quote_type::single),
- move (mod),
- r.intro, move (r.flags)});
+ move (end),
+ (t.qtype == quote_type::unquoted ||
+ t.qtype == quote_type::single),
+ move (mod),
+ r.intro,
+ move (r.flags)});
p = pending::none;
mod.clear ();
@@ -1024,16 +1126,33 @@ namespace build2
bool prog (p == pending::program_first ||
p == pending::program_next);
- // Check if this is the env pseudo-builtin.
+ // Check if this is the env pseudo-builtin or the for-loop.
//
bool env (false);
- if (prog && tt == type::word && t.value == "env")
+ if (prog && tt == type::word)
{
- parsed_env r (parse_env_builtin (t, tt));
- c.cwd = move (r.cwd);
- c.variables = move (r.variables);
- c.timeout = r.timeout;
- env = true;
+ if (t.value == "env")
+ {
+ parsed_env r (parse_env_builtin (t, tt));
+ c.cwd = move (r.cwd);
+ c.variables = move (r.variables);
+ c.timeout = r.timeout;
+ env = true;
+ }
+ else if (t.value == "for")
+ {
+ if (expr.size () > 1)
+ fail (l) << "command expression involving for-loop";
+
+ for_loop = true;
+
+ // Save 'for' as a program name and continue parsing as a
+ // command.
+ //
+ add_word (move (t.value), l);
+ next (t, tt);
+ continue;
+ }
}
// Parse the next chunk as names to get expansion, etc. Note that
@@ -1092,16 +1211,17 @@ namespace build2
// Process what we got.
//
- // First see if this is a value that should not be re-lexed. The
- // long term plan is to only re-lex values of a special type
- // representing a canned command line.
+ // First see if this is a value that should not be re-lexed. We
+ // only re-lex values of the special `cmdline` type that
+ // represents a canned command line.
//
// Otherwise, determine whether anything inside was quoted (note
// that the current token is "next" and is not part of this).
//
- bool q (
- (pr.value && !relex_) ||
- (quoted () - (t.qtype != quote_type::unquoted ? 1 : 0)) != 0);
+ bool lex (
+ pr.value
+ ? pr.type != nullptr && pr.type->is_a<cmdline> ()
+ : (quoted () - (t.qtype != quote_type::unquoted ? 1 : 0)) == 0);
for (name& n: ns)
{
@@ -1123,10 +1243,7 @@ namespace build2
// interesting characters (operators plus quotes/escapes),
// then no need to re-lex.
//
- // NOTE: update quoting (script.cxx:to_stream_q()) if adding
- // any new characters.
- //
- if (q || s.find_first_of ("|&<>\'\"\\") == string::npos)
+ if (!lex || !need_cmdline_relex (s))
add_word (move (s), l);
else
{
@@ -1216,9 +1333,16 @@ namespace build2
switch (tt)
{
case type::pipe:
+ if (for_loop)
+ fail (l) << "for-loop must be last command in a pipe";
+ // Fall through.
+
case type::log_or:
case type::log_and:
{
+ if (for_loop)
+ fail (l) << "command expression involving for-loop";
+
// Check that the previous command makes sense.
//
check_command (l, tt != type::pipe);
@@ -1238,30 +1362,11 @@ namespace build2
break;
}
- case type::in_pass:
- case type::out_pass:
-
- case type::in_null:
- case type::out_null:
-
- case type::out_trace:
-
- case type::out_merge:
-
- case type::in_str:
- case type::out_str:
-
- case type::in_file:
- case type::out_file_cmp:
- case type::out_file_ovr:
- case type::out_file_app:
- {
- parse_redirect (move (t), tt, l);
- break;
- }
-
case type::clean:
{
+ if (for_loop)
+ fail (l) << "cleanup in for-loop";
+
parse_clean (t);
break;
}
@@ -1272,6 +1377,27 @@ namespace build2
fail (l) << "here-document redirect in expansion";
break;
}
+
+ case type::out_pass:
+ case type::out_null:
+ case type::out_trace:
+ case type::out_merge:
+ case type::out_str:
+ case type::out_file_cmp:
+ case type::out_file_ovr:
+ case type::out_file_app:
+ if (for_loop)
+ fail (l) << "output redirect in for-loop";
+ // Fall through.
+
+ case type::in_pass:
+ case type::in_null:
+ case type::in_str:
+ case type::in_file:
+ {
+ parse_redirect (move (t), tt, l);
+ break;
+ }
}
}
@@ -1299,7 +1425,7 @@ namespace build2
expr.back ().pipe.push_back (move (c));
}
- return make_pair (move (expr), move (hd));
+ return parse_command_expr_result {move (expr), move (hd), for_loop};
}
parser::parsed_env parser::
@@ -1548,7 +1674,7 @@ namespace build2
void parser::
parse_here_documents (token& t, type& tt,
- pair<command_expr, here_docs>& p)
+ parse_command_expr_result& pr)
{
// enter: newline
// leave: newline
@@ -1556,7 +1682,7 @@ namespace build2
// Parse here-document fragments in the order they were mentioned on
// the command line.
//
- for (here_doc& h: p.second)
+ for (here_doc& h: pr.docs)
{
// Switch to the here-line mode which is like single/double-quoted
// string but recognized the newline as a separator.
@@ -1576,7 +1702,7 @@ namespace build2
{
auto i (h.redirects.cbegin ());
- command& c (p.first[i->expr].pipe[i->pipe]);
+ command& c (pr.expr[i->expr].pipe[i->pipe]);
optional<redirect>& r (i->fd == 0 ? c.in :
i->fd == 1 ? c.out :
@@ -1608,7 +1734,7 @@ namespace build2
//
for (++i; i != h.redirects.cend (); ++i)
{
- command& c (p.first[i->expr].pipe[i->pipe]);
+ command& c (pr.expr[i->expr].pipe[i->pipe]);
optional<redirect>& ir (i->fd == 0 ? c.in :
i->fd == 1 ? c.out :
@@ -2034,6 +2160,8 @@ namespace build2
else if (n == "elif") r = line_type::cmd_elif;
else if (n == "elif!") r = line_type::cmd_elifn;
else if (n == "else") r = line_type::cmd_else;
+ else if (n == "while") r = line_type::cmd_while;
+ else if (n == "for") r = line_type::cmd_for_stream;
else if (n == "end") r = line_type::cmd_end;
else
{
@@ -2064,8 +2192,9 @@ namespace build2
exec_lines (lines::const_iterator i, lines::const_iterator e,
const function<exec_set_function>& exec_set,
const function<exec_cmd_function>& exec_cmd,
- const function<exec_if_function>& exec_if,
- size_t& li,
+ const function<exec_cond_function>& exec_cond,
+ const function<exec_for_function>& exec_for,
+ const iteration_index* ii, size_t& li,
variable_pool* var_pool)
{
try
@@ -2089,6 +2218,73 @@ namespace build2
next (t, tt);
const location ll (get_location (t));
+ // If end is true, then find the flow control construct's end ('end'
+ // line). Otherwise, find the flow control construct's block end
+ // ('end', 'else', etc). If skip is true then increment the command
+ // line index.
+ //
+ auto fcend = [e, &li] (lines::const_iterator j,
+ bool end,
+ bool skip) -> lines::const_iterator
+ {
+ // We need to be aware of nested flow control constructs.
+ //
+ size_t n (0);
+
+ for (++j; j != e; ++j)
+ {
+ line_type lt (j->type);
+
+ if (lt == line_type::cmd_if ||
+ lt == line_type::cmd_ifn ||
+ lt == line_type::cmd_while ||
+ lt == line_type::cmd_for_stream ||
+ lt == line_type::cmd_for_args)
+ ++n;
+
+ // If we are nested then we just wait until we get back
+ // to the surface.
+ //
+ if (n == 0)
+ {
+ switch (lt)
+ {
+ case line_type::cmd_elif:
+ case line_type::cmd_elifn:
+ case line_type::cmd_else:
+ if (end) break;
+ // Fall through.
+ case line_type::cmd_end: return j;
+ default: break;
+ }
+ }
+
+ if (lt == line_type::cmd_end)
+ --n;
+
+ if (skip)
+ {
+ // Note that we don't count else, end, and 'for x: ...' as
+ // commands.
+ //
+ switch (lt)
+ {
+ case line_type::cmd:
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ case line_type::cmd_elif:
+ case line_type::cmd_elifn:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_while: ++li; break;
+ default: break;
+ }
+ }
+ }
+
+ assert (false); // Missing end.
+ return e;
+ };
+
switch (lt)
{
case line_type::var:
@@ -2124,7 +2320,10 @@ namespace build2
single = true;
}
- exec_cmd (t, tt, li++, single, ll);
+ exec_cmd (t, tt,
+ ii, li++, single,
+ nullptr /* command_function */,
+ ll);
replay_stop ();
break;
@@ -2140,7 +2339,7 @@ namespace build2
bool take;
if (lt != line_type::cmd_else)
{
- take = exec_if (t, tt, li++, ll);
+ take = exec_cond (t, tt, ii, li++, ll);
if (lt == line_type::cmd_ifn || lt == line_type::cmd_elifn)
take = !take;
@@ -2153,97 +2352,383 @@ namespace build2
replay_stop ();
- // If end is true, then find the 'end' line. Otherwise, find
- // the next if-else line. If skip is true then increment the
- // command line index.
+ // If we are taking this branch then we need to parse all the
+ // lines until the next if-else line and then skip all the lines
+ // until the end (unless we are already at the end).
+ //
+ // Otherwise, we need to skip all the lines until the next
+ // if-else line and then continue parsing.
//
- auto next = [e, &li] (lines::const_iterator j,
- bool end,
- bool skip) -> lines::const_iterator
+ if (take)
+ {
+ // Find block end.
+ //
+ lines::const_iterator j (fcend (i, false, false));
+
+ if (!exec_lines (i + 1, j,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ ii, li,
+ var_pool))
+ return false;
+
+ // Find construct end.
+ //
+ i = j->type == line_type::cmd_end ? j : fcend (j, true, true);
+ }
+ else
+ {
+ // Find block end.
+ //
+ i = fcend (i, false, true);
+
+ if (i->type != line_type::cmd_end)
+ --i; // Continue with this line (e.g., elif or else).
+ }
+
+ break;
+ }
+ case line_type::cmd_while:
+ {
+ // The while-loop construct end. Set on the first iteration.
+ //
+ lines::const_iterator we (e);
+
+ size_t wli (li);
+
+ for (iteration_index wi {1, ii};; wi.index++)
+ {
+ next (t, tt); // Skip to start of command.
+
+ bool exec (exec_cond (t, tt, &wi, li++, ll));
+
+ replay_stop ();
+
+ // If the condition evaluates to true, then we need to parse
+ // all the lines until the end line, prepare for the condition
+ // reevaluation, and re-iterate.
+ //
+ // Otherwise, we need to skip all the lines until the end
+ // line, bail out from the loop, and continue parsing.
+ //
+ if (exec)
{
- // We need to be aware of nested if-else chains.
+ // Find the construct end, if it is not found yet.
//
- size_t n (0);
+ if (we == e)
+ we = fcend (i, true, false);
+
+ if (!exec_lines (i + 1, we,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ &wi, li,
+ var_pool))
+ return false;
+
+ // Prepare for the condition reevaluation.
+ //
+ replay_data (replay_tokens (ln.tokens));
+ next (t, tt);
+ li = wli;
+ }
+ else
+ {
+ // Position to the construct end, always incrementing the
+ // line index (skip is true).
+ //
+ i = fcend (i, true, true);
+ break; // Bail out from the while-loop.
+ }
+ }
+
+ break;
+ }
+ case line_type::cmd_for_stream:
+ {
+ // The for-loop construct end. Set on the first iteration.
+ //
+ lines::const_iterator fe (e);
- for (++j; j != e; ++j)
+ // Let's "wrap up" all the required data into the single object
+ // to rely on the "small function object" optimization.
+ //
+ struct loop_data
+ {
+ lines::const_iterator i;
+ lines::const_iterator e;
+ const function<exec_set_function>& exec_set;
+ const function<exec_cmd_function>& exec_cmd;
+ const function<exec_cond_function>& exec_cond;
+ const function<exec_for_function>& exec_for;
+ const iteration_index* ii;
+ size_t& li;
+ variable_pool* var_pool;
+ decltype (fcend)& fce;
+ lines::const_iterator& fe;
+ } ld {i, e,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ ii, li,
+ var_pool,
+ fcend,
+ fe};
+
+ function<command_function> cf (
+ [&ld, this]
+ (environment& env,
+ const strings& args,
+ auto_fd in,
+ pipe_command* pipe,
+ const optional<deadline>& dl,
+ const location& ll)
+ {
+ namespace cli = build2::build::cli;
+
+ try
{
- line_type lt (j->type);
+ // Parse arguments.
+ //
+ cli::vector_scanner scan (args);
+ for_options ops (scan);
+
+ // Note: diagnostics consistent with the set builtin.
+ //
+ if (ops.whitespace () && ops.newline ())
+ fail (ll) << "for: both -n|--newline and "
+ << "-w|--whitespace specified";
- if (lt == line_type::cmd_if || lt == line_type::cmd_ifn)
- ++n;
+ if (!scan.more ())
+ fail (ll) << "for: missing variable name";
- // If we are nested then we just wait until we get back
- // to the surface.
+ string vname (scan.next ());
+ if (vname.empty ())
+ fail (ll) << "for: empty variable name";
+
+ // Detect patterns analogous to parse_variable_name() (so
+ // we diagnose `for x[string]`).
+ //
+ if (vname.find_first_of ("[*?") != string::npos)
+ fail (ll) << "for: expected variable name instead of "
+ << vname;
+
+ // Let's also diagnose the `... | for x:...` misuse which
+ // can probably be quite common.
//
- if (n == 0)
+ if (vname.find (':') != string::npos)
+ fail (ll) << "for: ':' after variable name";
+
+ string attrs;
+ if (scan.more ())
{
- switch (lt)
- {
- case line_type::cmd_elif:
- case line_type::cmd_elifn:
- case line_type::cmd_else:
- if (end) break;
- // Fall through.
- case line_type::cmd_end: return j;
- default: break;
- }
+ attrs = scan.next ();
+
+ if (attrs.empty ())
+ fail (ll) << "for: empty variable attributes";
+
+ if (scan.more ())
+ fail (ll) << "for: unexpected argument '"
+ << scan.next () << "'";
}
- if (lt == line_type::cmd_end)
- --n;
+ // Since the command pipe is parsed, we can stop
+ // replaying. Note that we should do this before calling
+ // exec_lines() for the loop body. Also note that we
+ // should increment the line index before that.
+ //
+ replay_stop ();
+
+ size_t fli (++ld.li);
+ iteration_index fi {1, ld.ii};
- if (skip)
+ // Let's "wrap up" all the required data into the single
+ // object to rely on the "small function object"
+ // optimization.
+ //
+ struct
{
- // Note that we don't count else and end as commands.
- //
- switch (lt)
+ loop_data& ld;
+ environment& env;
+ const string& vname;
+ const string& attrs;
+ const location& ll;
+ size_t fli;
+ iteration_index& fi;
+
+ } d {ld, env, vname, attrs, ll, fli, fi};
+
+ function<void (string&&)> f (
+ [&d, this] (string&& s)
{
- case line_type::cmd:
- case line_type::cmd_if:
- case line_type::cmd_ifn:
- case line_type::cmd_elif:
- case line_type::cmd_elifn: ++li; break;
- default: break;
- }
- }
+ loop_data& ld (d.ld);
+
+ ld.li = d.fli;
+
+ // Don't move from the variable name since it is used
+ // on each iteration.
+ //
+ d.env.set_variable (d.vname,
+ names {name (move (s))},
+ d.attrs,
+ d.ll);
+
+ // Find the construct end, if it is not found yet.
+ //
+ if (ld.fe == ld.e)
+ ld.fe = ld.fce (ld.i, true, false);
+
+ if (!exec_lines (ld.i + 1, ld.fe,
+ ld.exec_set,
+ ld.exec_cmd,
+ ld.exec_cond,
+ ld.exec_for,
+ &d.fi, ld.li,
+ ld.var_pool))
+ {
+ throw exit (true);
+ }
+
+ d.fi.index++;
+ });
+
+ read (move (in),
+ !ops.newline (), ops.newline (), ops.exact (),
+ f,
+ pipe,
+ dl,
+ ll,
+ "for");
+ }
+ catch (const cli::exception& e)
+ {
+ fail (ll) << "for: " << e;
}
+ });
- assert (false); // Missing end.
- return e;
- };
+ exec_cmd (t, tt, ii, li, false /* single */, cf, ll);
- // If we are taking this branch then we need to parse all the
- // lines until the next if-else line and then skip all the
- // lines until the end (unless next is already end).
+ // Position to construct end.
//
- // Otherwise, we need to skip all the lines until the next
- // if-else line and then continue parsing.
+ i = (fe != e ? fe : fcend (i, true, true));
+
+ break;
+ }
+ case line_type::cmd_for_args:
+ {
+ // Parse the variable name.
//
- if (take)
+ next (t, tt);
+
+ assert (tt == type::word && t.qtype == quote_type::unquoted);
+
+ string vn (move (t.value));
+
+ // Enter the variable into the pool if this is not done during
+ // the script parsing (see the var line type handling for
+ // details).
+ //
+ const variable* var (ln.var);
+
+ if (var == nullptr)
{
- // Next if-else.
- //
- lines::const_iterator j (next (i, false, false));
- if (!exec_lines (i + 1, j,
- exec_set, exec_cmd, exec_if,
- li,
- var_pool))
- return false;
+ assert (var_pool != nullptr);
- i = j->type == line_type::cmd_end ? j : next (j, true, true);
+ var = &var_pool->insert (move (vn));
}
- else
+
+ // Parse the potential element attributes and skip the colon.
+ //
+ next_with_attributes (t, tt);
+ attributes_push (t, tt);
+
+ assert (tt == type::colon);
+
+ // Save element attributes so that we can inject them on each
+ // iteration.
+ //
+ attributes val_attrs (attributes_pop ());
+
+ // Parse the value with the potential attributes.
+ //
+ // Note that we don't really need to change the mode since we
+ // are replaying the tokens.
+ //
+ value val;
+ apply_value_attributes (nullptr /* variable */,
+ val,
+ parse_variable_line (t, tt),
+ type::assign);
+
+ replay_stop ();
+
+ // If the value is not NULL then iterate over its elements,
+ // assigning them to the for-loop variable, and parsing all the
+ // construct lines afterwards. Then position to the end line of
+ // the construct and continue parsing.
+
+ // The for-loop construct end. Set on the first iteration.
+ //
+ lines::const_iterator fe (e);
+
+ if (val)
{
- i = next (i, false, true);
- if (i->type != line_type::cmd_end)
- --i; // Continue with this line (e.g., elif or else).
+ // If this value is a vector, then save its element type so
+ // that we can typify each element below.
+ //
+ const value_type* etype (nullptr);
+
+ if (val.type != nullptr)
+ {
+ etype = val.type->element_type;
+
+ // Note that here we don't want to be reducing empty simple
+ // values to empty lists.
+ //
+ untypify (val, false /* reduce */);
+ }
+
+ size_t fli (li);
+ iteration_index fi {1, ii};
+ names& ns (val.as<names> ());
+
+ for (auto ni (ns.begin ()), ne (ns.end ()); ni != ne; ++ni)
+ {
+ li = fli;
+
+ // Set the variable value.
+ //
+ bool pair (ni->pair);
+ names n;
+ n.push_back (move (*ni));
+ if (pair) n.push_back (move (*++ni));
+ value v (move (n)); // Untyped.
+
+ if (etype != nullptr)
+ typify (v, *etype, var);
+
+ exec_for (*var, move (v), val_attrs, ll);
+
+ // Find the construct end, if it is not found yet.
+ //
+ if (fe == e)
+ fe = fcend (i, true, false);
+
+ if (!exec_lines (i + 1, fe,
+ exec_set, exec_cmd, exec_cond, exec_for,
+ &fi, li,
+ var_pool))
+ return false;
+
+ fi.index++;
+ }
}
+ // Position to construct end.
+ //
+ i = (fe != e ? fe : fcend (i, true, true));
+
break;
}
case line_type::cmd_end:
{
assert (false);
+ break;
}
}
}
@@ -2278,7 +2763,7 @@ namespace build2
}
parser::parsed_doc::
- parsed_doc (parsed_doc&& d)
+ parsed_doc (parsed_doc&& d) noexcept
: re (d.re), end_line (d.end_line), end_column (d.end_column)
{
if (re)
diff --git a/libbuild2/script/parser.hxx b/libbuild2/script/parser.hxx
index 6e24d37..cadf909 100644
--- a/libbuild2/script/parser.hxx
+++ b/libbuild2/script/parser.hxx
@@ -25,7 +25,7 @@ namespace build2
class parser: protected build2::parser
{
public:
- parser (context& c, bool relex): build2::parser (c), relex_ (relex) {}
+ parser (context& c): build2::parser (c) {}
// Helpers.
//
@@ -42,6 +42,15 @@ namespace build2
using build2::parser::apply_value_attributes;
+ // Return true if a command line element needs to be re-lexed.
+ //
+ // Specifically, it needs to be re-lexed if it contains any of the
+ // special characters (|<>&), quotes ("') or effective escape sequences
+ // (\", \', \\).
+ //
+ static bool
+ need_cmdline_relex (const string&);
+
// Commonly used parsing functions. Issue diagnostics and throw failed
// in case of an error.
//
@@ -88,15 +97,34 @@ namespace build2
};
using here_docs = vector<here_doc>;
- pair<command_expr, here_docs>
- parse_command_expr (token&, token_type&, const redirect_aliases&);
+ struct parse_command_expr_result
+ {
+ command_expr expr; // Single pipe for the for-loop.
+ here_docs docs;
+ bool for_loop = false;
+
+ parse_command_expr_result () = default;
+
+ parse_command_expr_result (command_expr&& e,
+ here_docs&& h,
+ bool f)
+ : expr (move (e)), docs (move (h)), for_loop (f) {}
+ };
+
+ // Pass the first special command program name (token_type::word) if it
+ // is already pre-parsed.
+ //
+ parse_command_expr_result
+ parse_command_expr (token&, token_type&,
+ const redirect_aliases&,
+ optional<token>&& program = nullopt);
command_exit
parse_command_exit (token&, token_type&);
void
parse_here_documents (token&, token_type&,
- pair<command_expr, here_docs>&);
+ parse_command_expr_result&);
struct parsed_doc
{
@@ -112,7 +140,7 @@ namespace build2
parsed_doc (string, uint64_t line, uint64_t column);
parsed_doc (regex_lines&&, uint64_t line, uint64_t column);
- parsed_doc (parsed_doc&&); // Note: move constuctible-only type.
+ parsed_doc (parsed_doc&&) noexcept; // Note: move constuctible-only type.
~parsed_doc ();
};
@@ -126,6 +154,11 @@ namespace build2
// the first two tokens. Use the specified lexer mode to peek the second
// token.
//
+ // Always return the cmd_for_stream line type for the for-loop. Note
+ // that the for-loop form cannot be detected easily, based on the first
+ // two tokens. Also note that the detection can be specific for the
+ // script implementation (custom lexing mode, special variables, etc).
+ //
line_type
pre_parse_line_start (token&, token_type&, lexer_mode);
@@ -150,19 +183,26 @@ namespace build2
protected:
// Return false if the execution of the script should be terminated with
// the success status (e.g., as a result of encountering the exit
- // builtin). For unsuccessful termination the failed exception is thrown.
+ // builtin). For unsuccessful termination the failed exception is
+ // thrown.
//
using exec_set_function = void (const variable&,
token&, token_type&,
const location&);
using exec_cmd_function = void (token&, token_type&,
- size_t li,
+ const iteration_index*, size_t li,
bool single,
+ const function<command_function>&,
const location&);
- using exec_if_function = bool (token&, token_type&,
- size_t li,
+ using exec_cond_function = bool (token&, token_type&,
+ const iteration_index*, size_t li,
+ const location&);
+
+ using exec_for_function = void (const variable&,
+ value&&,
+ const attributes& value_attrs,
const location&);
// If a parser implementation doesn't pre-enter variables into a pool
@@ -174,8 +214,9 @@ namespace build2
exec_lines (lines::const_iterator b, lines::const_iterator e,
const function<exec_set_function>&,
const function<exec_cmd_function>&,
- const function<exec_if_function>&,
- size_t& li,
+ const function<exec_cond_function>&,
+ const function<exec_for_function>&,
+ const iteration_index*, size_t& li,
variable_pool* = nullptr);
// Customization hooks.
@@ -200,6 +241,13 @@ namespace build2
// something that requires re-lexing, for example `foo|bar`, which won't
// be easy to translate but which are handled by the parser.
//
+ // Note that the chunk could be of the special cmdline type in which
+ // case the names may need to be "preprocessed" (at least unquoted or
+ // potentially fully re-lexed) before being analyzed/consumed. Note also
+ // that in this case any names left unconsumed must remain of the
+ // cmdline type.
+ //
+ //
// During the pre-parsing phase the returned process path and names
// (that must still be parsed) are discarded. The main purpose of the
// call is to allow implementations to perform static script analysis,
@@ -229,7 +277,6 @@ namespace build2
size_t replay_quoted_;
protected:
- bool relex_;
lexer* lexer_ = nullptr;
};
}
diff --git a/libbuild2/script/regex.cxx b/libbuild2/script/regex.cxx
index 3f796b6..11ff8a1 100644
--- a/libbuild2/script/regex.cxx
+++ b/libbuild2/script/regex.cxx
@@ -75,15 +75,29 @@ namespace build2
string::traits_type::find (ex, 4, c) != nullptr)));
}
+ template <typename S>
+ static inline const char_string*
+ find_or_insert (line_pool& p, S&& s)
+ {
+ auto i (find (p.strings.begin (), p.strings.end (), s));
+ if (i == p.strings.end ())
+ {
+ p.strings.push_front (forward<S> (s));
+ i = p.strings.begin ();
+ }
+
+ return &*i;
+ }
+
line_char::
line_char (const char_string& s, line_pool& p)
- : line_char (&(*p.strings.emplace (s).first))
+ : line_char (find_or_insert (p, s))
{
}
line_char::
line_char (char_string&& s, line_pool& p)
- : line_char (&(*p.strings.emplace (move (s)).first))
+ : line_char (find_or_insert (p, move (s)))
{
}
diff --git a/libbuild2/script/regex.hxx b/libbuild2/script/regex.hxx
index e043c99..3c49b31 100644
--- a/libbuild2/script/regex.hxx
+++ b/libbuild2/script/regex.hxx
@@ -9,7 +9,6 @@
#include <locale>
#include <string> // basic_string
#include <type_traits> // make_unsigned, enable_if, is_*
-#include <unordered_set>
#include <libbuild2/types.hxx>
#include <libbuild2/utility.hxx>
@@ -59,7 +58,12 @@ namespace build2
// Note that we assume the pool can be moved without invalidating
// pointers to any already pooled entities.
//
- std::unordered_set<char_string> strings;
+ // Note that we used to use unordered_set for strings but (1) there is
+ // no general expectation that we will have many identical strings and
+ // (2) the number of strings is not expected to be large. So that felt
+ // like an overkill and we now use a list with linear search.
+ //
+ std::list<char_string> strings;
std::list<char_regex> regexes;
};
@@ -267,8 +271,8 @@ namespace build2
template <typename T>
struct line_char_cmp
: public std::enable_if<std::is_integral<T>::value ||
- (std::is_enum<T>::value &&
- !std::is_same<T, char_flags>::value)> {};
+ (std::is_enum<T>::value &&
+ !std::is_same<T, char_flags>::value)> {};
template <typename T, typename = typename line_char_cmp<T>::type>
bool
@@ -466,10 +470,10 @@ namespace std
is (mask m, char_type c) const
{
return m ==
- (c.type () == line_type::special && c.special () >= 0 &&
- build2::digit (static_cast<char> (c.special ()))
- ? digit
- : 0);
+ (c.type () == line_type::special && c.special () >= 0 &&
+ build2::digit (static_cast<char> (c.special ()))
+ ? digit
+ : 0);
}
const char_type*
diff --git a/libbuild2/script/run.cxx b/libbuild2/script/run.cxx
index 8b609f1..6d73a7e 100644
--- a/libbuild2/script/run.cxx
+++ b/libbuild2/script/run.cxx
@@ -9,7 +9,8 @@
# include <libbutl/win32-utility.hxx> // DBG_TERMINATE_PROCESS
#endif
-#include <ios> // streamsize
+#include <ios> // streamsize
+#include <cstring> // strchr()
#include <libbutl/regex.hxx>
#include <libbutl/builtin.hxx>
@@ -759,6 +760,31 @@ namespace build2
output_info (d, op);
}
+ // Note that a here-document regex without ':' modifier can never
+ // match an empty output since it always contains the trailing empty
+ // line-char. This can be confusing, as for example while testing a
+ // program which can print some line or nothing with the following
+ // test:
+ //
+ // $* >>~%EOO%
+ // %(
+ // Hello, World!
+ // %)?
+ // EOO
+ //
+ // Note that the above line-regex contains 4 line-chars and will never
+ // match empty output.
+ //
+ // Thus, let's complete an empty output with an empty line-char for
+ // such a regex, so it may potentially match.
+ //
+ if (ls.empty () &&
+ rd.type == redirect_type::here_doc_regex &&
+ rd.modifiers ().find (':') == string::npos)
+ {
+ ls += line_char (string (), regex.pool);
+ }
+
// Match the output with the regex.
//
// Note that we don't distinguish between the line_regex and
@@ -784,7 +810,7 @@ namespace build2
// regex to file for troubleshooting regardless of whether we print
// the diagnostics or not. We, however, register it for cleanup in the
// later case (the expression may still succeed, we can be evaluating
- // the if condition, etc).
+ // the flow control construct condition, etc).
//
optional<path> rp;
if (env.temp_dir_keep)
@@ -946,96 +972,660 @@ namespace build2
: path (c.program.recall_string ());
}
- // Read out the stream content into a string. Throw io_error on the
- // underlying OS error.
+ // Read the stream content into a string, optionally splitting the input
+ // data at whitespaces or newlines in which case return one, potentially
+ // incomplete, substring at a time (see the set builtin options for the
+ // splitting semantics). Throw io_error on the underlying OS error.
//
- // If the execution deadline is specified, then turn the stream into the
- // non-blocking mode reading its content in chunks and with a single
- // operation otherwise. If the specified deadline is reached while
- // reading the stream, then bail out for the successful deadline and
- // fail otherwise. Note that in the former case the result will be
- // incomplete, but we leave it to the caller to handle that.
+ // On POSIX expects the stream to be non-blocking and its exception mask
+ // to have at least badbit. On Windows can also handle a blocking stream.
//
// Note that on Windows we can only turn pipe file descriptors into the
- // non-blocking mode. Thus, we have no choice but to read from
- // descriptors of other types synchronously there. That implies that we
- // can potentially block indefinitely reading a file and missing the
- // deadline on Windows. Note though, that the user can normally rewrite
- // the command, for example, `set foo <<<file` with `cat file | set foo`
- // to avoid this problem.
+ // non-blocking mode. Thus, we have no choice but to read from descriptors
+ // of other types synchronously there. That implies that we can
+ // potentially block indefinitely reading a file and missing a deadline on
+ // Windows. Note though, that the user can normally rewrite the command,
+ // for example, `set foo <<<file` with `cat file | set foo` to avoid this
+ // problem.
//
- static string
- read (auto_fd in,
+ class stream_reader
+ {
+ public:
+ stream_reader (ifdstream&, bool whitespace, bool newline, bool exact);
+
+ // Read next substring. Return true if the substring has been read or
+ // false if it should be called again once the stream has more data to
+ // read. Also return true on eof (in which case no substring is read).
+ // The string must be empty on the first call. Throw ios::failure on the
+ // underlying OS error.
+ //
+ // Note that there could still be data to read in the stream's buffer
+ // (as opposed to file descriptor) after this function returns true and
+ // you should be careful not to block on fdselect() in this case. The
+ // recommended usage pattern is similar to that of
+ // butl::getline_non_blocking(). The only difference is that
+ // ifdstream::eof() needs to be used instead of butl::eof() since this
+ // function doesn't set failbit and only sets eofbit after the last
+ // substring is returned.
+ //
+ bool
+ next (string&);
+
+ private:
+ ifdstream& is_;
+ bool whitespace_;
+ bool newline_;
+ bool exact_;
+
+ bool empty_ = true; // Set to false after the first character is read.
+ };
+
+ stream_reader::
+ stream_reader (ifdstream& is, bool ws, bool nl, bool ex)
+ : is_ (is),
+ whitespace_ (ws),
+ newline_ (nl),
+ exact_ (ex)
+ {
+ }
+
+ bool stream_reader::
+ next (string& ss)
+ {
#ifndef _WIN32
- bool,
+ assert ((is_.exceptions () & ifdstream::badbit) != 0 && !is_.blocking ());
#else
- bool pipe,
+ assert ((is_.exceptions () & ifdstream::badbit) != 0);
#endif
+
+ fdstreambuf& sb (*static_cast<fdstreambuf*> (is_.rdbuf ()));
+
+ // Return the number of characters available in the stream buffer's get
+ // area, which can be:
+ //
+ // -1 -- EOF.
+ // 0 -- no data since blocked before encountering more data/EOF.
+ // >0 -- there is some data.
+ //
+ // Note that on Windows if the stream is blocking, then the lambda calls
+ // underflow() instead of returning 0.
+ //
+ // @@ Probably we can call underflow() only once per the next() call,
+ // emulating the 'no data' case. This will allow the caller to
+ // perform some housekeeping (reading other streams, checking for the
+ // deadline, etc). But let's keep it simple for now.
+ //
+ auto avail = [&sb] () -> streamsize
+ {
+ // Note that here we reasonably assume that any failure in in_avail()
+ // will lead to badbit and thus an exception (see showmanyc()).
+ //
+ streamsize r (sb.in_avail ());
+
+#ifdef _WIN32
+ if (r == 0 && sb.blocking ())
+ {
+ if (sb.underflow () == ifdstream::traits_type::eof ())
+ return -1;
+
+ r = sb.in_avail ();
+
+ assert (r != 0); // We wouldn't be here otherwise.
+ }
+#endif
+
+ return r;
+ };
+
+ // Read until blocked (0), EOF (-1) or encounter the delimiter.
+ //
+ streamsize s;
+ while ((s = avail ()) > 0)
+ {
+ if (empty_)
+ empty_ = false;
+
+ const char* p (sb.gptr ());
+ size_t n (sb.egptr () - p);
+
+ // We move p and bump by the number of consumed characters.
+ //
+ auto bump = [&sb, &p] () {sb.gbump (static_cast<int> (p - sb.gptr ()));};
+
+ if (whitespace_) // The whitespace mode.
+ {
+ const char* sep (" \n\r\t");
+
+ // Skip the whitespaces.
+ //
+ for (; n != 0 && strchr (sep, *p) != nullptr; ++p, --n) ;
+
+ // If there are any non-whitespace characters in the get area, then
+ // append them to the resulting substring until a whitespace
+ // character is encountered.
+ //
+ if (n != 0)
+ {
+ // Append the non-whitespace characters.
+ //
+ for (char c; n != 0 && strchr (sep, c = *p) == nullptr; ++p, --n)
+ ss += c;
+
+ // If a separator is encountered, then consume it, bump, and
+ // return the substring.
+ //
+ if (n != 0)
+ {
+ ++p; --n; // Consume the separator character.
+
+ bump ();
+ return true;
+ }
+
+ // Fall through.
+ }
+
+ bump (); // Bump and continue reading.
+ }
+ else // The newline or no-split mode.
+ {
+ // Note that we don't collapse multiple consecutive newlines.
+ //
+ // Note also that we always sanitize CRs, so in the no-split mode we
+ // need to loop rather than consume the whole get area at once.
+ //
+ while (n != 0)
+ {
+ // Append the characters until the newline character or the end of
+ // the get area is encountered.
+ //
+ char c;
+ for (; n != 0 && (c = *p) != '\n'; ++p, --n)
+ ss += c;
+
+ // If the newline character is encountered, then sanitize CRs and
+ // return the substring in the newline mode and continue
+ // parsing/reading otherwise.
+ //
+ if (n != 0)
+ {
+ // Strip the trailing CRs that can appear while, for example,
+ // cross-testing Windows target or as a part of msvcrt junk
+ // production (see above).
+ //
+ while (!ss.empty () && ss.back () == '\r')
+ ss.pop_back ();
+
+ assert (c == '\n');
+
+ ++p; --n; // Consume the newline character.
+
+ if (newline_)
+ {
+ bump ();
+ return true;
+ }
+
+ ss += c; // Append newline to the resulting string.
+
+ // Fall through.
+ }
+
+ bump (); // Bump and continue parsing/reading.
+ }
+ }
+ }
+
+ // Here s can be:
+ //
+ // -1 -- EOF.
+ // 0 -- blocked before encountering delimiter/EOF.
+ //
+ // Note: >0 (encountered the delimiter) case is handled in-place.
+ //
+ assert (s == -1 || s == 0);
+
+ if (s == -1)
+ {
+ // Return the last substring if it is not empty or it is the trailing
+ // "blank" in the exact mode. Otherwise, set eofbit for the stream
+ // indicating that we are done.
+ //
+ if (!ss.empty () || (exact_ && !empty_))
+ {
+ // Also, strip the trailing newline character, if present, in the
+ // no-split no-exact mode.
+ //
+ if (!ss.empty () && ss.back () == '\n' && // Trailing newline.
+ !newline_ && !whitespace_ && !exact_) // No-split no-exact mode.
+ {
+ ss.pop_back ();
+ }
+
+ exact_ = false; // Make sure we will set eofbit on the next call.
+ }
+ else
+ is_.setstate (ifdstream::eofbit);
+ }
+
+ return s == -1;
+ }
+
+ // Stack-allocated linked list of information about the running pipeline
+ // processes and builtins.
+ //
+ // Note: constructed incrementally.
+ //
+ struct pipe_command
+ {
+ // Initially NULL. Set to the address of the process or builtin object
+ // when it is created. Reset back to NULL when the respective
+ // process/builtin is executed and its exit status is collected (see
+ // complete_pipe() for details).
+ //
+ // We could probably use a union here, but let's keep it simple for now
+ // (at least one is NULL).
+ //
+ process* proc = nullptr;
+ builtin* bltn = nullptr;
+
+ const command& cmd;
+ const cstrings* args = nullptr;
+ const optional<deadline>& dl;
+
+ diag_buffer dbuf;
+
+ bool terminated = false; // True if this command has been terminated.
+
+ // True if this command has been terminated but we failed to read out
+ // its stdout and/or stderr streams in the reasonable timeframe (2
+ // seconds) after the termination.
+ //
+ // Note that this may happen if there is a still running child process
+ // of the terminated command which has inherited the parent's stdout and
+ // stderr file descriptors.
+ //
+ bool unread_stdout = false;
+ bool unread_stderr = false;
+
+ // Only for diagnostics.
+ //
+ const location& loc;
+ const path* isp = nullptr; // stdin cache.
+ const path* osp = nullptr; // stdout cache.
+ const path* esp = nullptr; // stderr cache.
+
+ pipe_command* prev; // NULL for the left-most command.
+ pipe_command* next; // Left-most command for the right-most command.
+
+ pipe_command (context& x,
+ const command& c,
+ const optional<deadline>& d,
+ const location& l,
+ pipe_command* p,
+ pipe_command* f)
+ : cmd (c), dl (d), dbuf (x), loc (l), prev (p), next (f) {}
+ };
+
+ // Wait for a process/builtin to complete until the deadline is reached
+ // and return the underlying wait function result (optional<something>).
+ //
+ template<typename P>
+ static auto
+ timed_wait (P& p, const timestamp& deadline) -> decltype(p.try_wait ())
+ {
+ timestamp now (system_clock::now ());
+ return deadline > now ? p.timed_wait (deadline - now) : p.try_wait ();
+ }
+
+ // Terminate the pipeline processes starting from the specified one and up
+ // to the leftmost one and then kill those which didn't terminate after 2
+ // seconds.
+ //
+ // After that wait for the pipeline builtins completion. Since their
+ // standard streams should no longer be written to or read from by any
+ // process, that shouldn't take long. If, however, they won't be able to
+ // complete in 2 seconds, then some of them have probably stuck while
+ // communicating with a slow filesystem device or similar, and since we
+ // currently have no way to terminate asynchronous builtins, we have no
+ // choice but to abort.
+ //
+ // Issue diagnostics and fail if something goes wrong, but still try to
+ // terminate/kill all the pipe processes.
+ //
+ static void
+ term_pipe (pipe_command* pc, tracer& trace)
+ {
+ auto prog = [] (pipe_command* c) {return cmd_path (c->cmd);};
+
+ // Terminate processes gracefully and set the terminate flag for the
+ // pipe commands.
+ //
+ diag_record dr;
+ for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ {
+ if (process* p = c->proc)
+ try
+ {
+ l5 ([&]{trace (c->loc) << "terminating: " << c->cmd;});
+
+ p->term ();
+ }
+ catch (const process_error& e)
+ {
+ // If unable to terminate the process for any reason (the process is
+ // exiting on Windows, etc) then just ignore this, postponing the
+ // potential failure till the kill() call.
+ //
+ l5 ([&]{trace (c->loc) << "unable to terminate " << prog (c)
+ << ": " << e;});
+ }
+
+ c->terminated = true;
+ }
+
+ // Wait a bit for the processes to terminate and kill the remaining
+ // ones.
+ //
+ timestamp dl (system_clock::now () + chrono::seconds (2));
+
+ for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ {
+ if (process* p = c->proc)
+ try
+ {
+ l5 ([&]{trace (c->loc) << "waiting: " << c->cmd;});
+
+ if (!timed_wait (*p, dl))
+ {
+ l5 ([&]{trace (c->loc) << "killing: " << c->cmd;});
+
+ p->kill ();
+ p->wait ();
+ }
+ }
+ catch (const process_error& e)
+ {
+ dr << fail (c->loc) << "unable to wait/kill " << prog (c) << ": "
+ << e;
+ }
+ }
+
+ // Wait a bit for the builtins to complete and abort if any remain
+ // running.
+ //
+ dl = system_clock::now () + chrono::seconds (2);
+
+ for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ {
+ if (builtin* b = c->bltn)
+ try
+ {
+ l5 ([&]{trace (c->loc) << "waiting: " << c->cmd;});
+
+ if (!timed_wait (*b, dl))
+ {
+ error (c->loc) << prog (c) << " builtin hanged, aborting";
+ terminate (false /* trace */);
+ }
+ }
+ catch (const system_error& e)
+ {
+ dr << fail (c->loc) << "unable to wait for " << prog (c) << ": "
+ << e;
+ }
+ }
+ }
+
+ void
+ read (auto_fd&& in,
+ bool whitespace, bool newline, bool exact,
+ const function<void (string&&)>& cf,
+ pipe_command* pipeline,
const optional<deadline>& dl,
- const command& deadline_cmd,
- const location& ll)
+ const location& ll,
+ const char* what)
{
- string r;
- ifdstream cin;
+ tracer trace ("script::stream_read");
+ // Note: stays blocking on Windows if the descriptor is not of the pipe
+ // type.
+ //
#ifndef _WIN32
- if (dl)
+ fdstream_mode m (fdstream_mode::non_blocking);
#else
- if (dl && pipe)
+ fdstream_mode m (pipeline != nullptr
+ ? fdstream_mode::non_blocking
+ : fdstream_mode::blocking);
#endif
+
+ ifdstream is (move (in), m, ifdstream::badbit);
+ stream_reader sr (is, whitespace, newline, exact);
+
+ fdselect_set fds;
+ for (pipe_command* c (pipeline); c != nullptr; c = c->prev)
+ {
+ diag_buffer& b (c->dbuf);
+
+ if (b.is.is_open ())
+ fds.emplace_back (b.is.fd (), c);
+ }
+
+ fds.emplace_back (is.fd ());
+ fdselect_state& ist (fds.back ());
+ size_t unread (fds.size ());
+
+ optional<timestamp> dlt (dl ? dl->value : optional<timestamp> ());
+
+ // If there are some left-hand side processes/builtins running, then
+ // terminate them and, if there are unread stdout/stderr file
+ // descriptors, then increase the deadline by another 2 seconds and
+ // return true. In this case the term() should be called again upon
+ // reaching the timeout. Otherwise return false. If there are no
+ // left-hand side processes/builtins running, then fail straight away.
+ //
+ // Note that in the former case the further reading will be performed
+ // with the adjusted timeout. We assume that this timeout is normally
+ // sufficient to read out the buffered data written by the already
+ // terminated processes. If, however, that's not the case (see
+ // pipe_command for the possible reasons), then term() needs to be
+ // called for the second time and the reading should be interrupted
+ // afterwards.
+ //
+ auto term = [&dlt, pipeline, &fds, &ist, &is, &unread,
+ &trace, &ll, what, terminated = false] () mutable -> bool
{
- fdselect_set fds {in.get ()};
- cin.open (move (in), fdstream_mode::non_blocking);
+ // Can only be called if the deadline is specified.
+ //
+ assert (dlt);
- const timestamp& dlt (dl->value);
+ if (pipeline == nullptr)
+ fail (ll) << what << " terminated: execution timeout expired";
- for (char buf[4096];; )
+ if (!terminated)
{
- timestamp now (system_clock::now ());
+ // Terminate the pipeline and adjust the deadline.
+ //
- if (dlt <= now || ifdselect (fds, dlt - now) == 0)
+ // Note that if we are still reading the stream and it's a builtin
+ // stdout, then we need to close it before terminating the pipeline.
+ // Not doing so can result in blocking this builtin on the write
+ // operation and thus aborting the build2 process (see term_pipe()
+ // for details).
+ //
+ // Should we do the same for all the pipeline builtins' stderr
+ // streams? No we don't, since the builtin diagnostics is assumed to
+ // always fit the pipe buffer (see libbutl/builtin.cxx for details).
+ // Thus, we will leave them open to fully read out the diagnostics.
+ //
+ if (ist.fd != nullfd && pipeline->bltn != nullptr)
{
- if (!dl->success)
- fail (ll) << cmd_path (deadline_cmd)
- << " terminated: execution timeout expired";
- else
- break;
+ try
+ {
+ is.close ();
+ }
+ catch (const io_error&)
+ {
+ // Not much we can do here.
+ }
+
+ ist.fd = nullfd;
+ --unread;
}
- streamsize n (cin.readsome (buf, sizeof (buf)));
+ term_pipe (pipeline, trace);
+ terminated = true;
- // Bail out if eos is reached.
+ if (unread != 0)
+ dlt = system_clock::now () + chrono::seconds (2);
+
+ return unread != 0;
+ }
+ else
+ {
+ // Set the unread_{stderr,stdout} flags to true for the commands
+ // whose streams are not fully read yet.
//
- if (n == 0)
- break;
- r.append (buf, n);
+ // Can only be called after the first call of term() which would
+ // throw failed if pipeline is NULL.
+ //
+ assert (pipeline != nullptr);
+
+ for (fdselect_state& s: fds)
+ {
+ if (s.fd != nullfd)
+ {
+ if (s.data != nullptr) // stderr.
+ {
+ pipe_command* c (static_cast<pipe_command*> (s.data));
+
+ c->unread_stderr = true;
+
+ // Let's also close the stderr stream not to confuse
+ // diag_buffer::close() with a not fully read stream (eof is
+ // not reached, etc).
+ //
+ try
+ {
+ c->dbuf.is.close ();
+ }
+ catch (const io_error&)
+ {
+ // Not much we can do here. Anyway the diagnostics will be
+ // issued by complete_pipe().
+ }
+ }
+ else // stdout.
+ pipeline->unread_stdout = true;
+ }
+ }
+
+ return false;
}
- }
- else
+ };
+
+ // Note that on Windows if the file descriptor is not a pipe, then
+ // ifdstream assumes the blocking mode for which ifdselect() would throw
+ // invalid_argument. Such a descriptor can, however, only appear for the
+ // first command in the pipeline and so fds will only contain the input
+ // stream's descriptor. That all means that this descriptor will be read
+ // out by a series of the stream_reader::next() calls which can only
+ // return true and thus no ifdselect() calls will ever be made.
+ //
+ string s;
+ while (unread != 0)
{
- cin.open (move (in));
- r = cin.read_text ();
- }
+ // Read any pending data from the input stream.
+ //
+ if (ist.fd != nullfd)
+ {
+ // Prior to reading let's check that the deadline, if specified, is
+ // not reached. This way we handle the (hypothetical) case when we
+ // are continuously fed with the data without delays and thus can
+ // never get to ifdselect() which watches for the deadline. Also
+ // this check is the only way to bail out early on Windows for a
+ // blocking file descriptor.
+ //
+ if (dlt && *dlt <= system_clock::now ())
+ {
+ if (!term ())
+ break;
+ }
- cin.close ();
+ if (sr.next (s))
+ {
+ if (!is.eof ())
+ {
+ // Consume the substring.
+ //
+ cf (move (s));
+ s.clear ();
+ }
+ else
+ {
+ ist.fd = nullfd;
+ --unread;
+ }
- return r;
+ continue;
+ }
+ }
+
+ try
+ {
+ // Wait until the data appear in any of the streams. If a deadline
+ // is specified, then pass the timeout to fdselect().
+ //
+ if (dlt)
+ {
+ timestamp now (system_clock::now ());
+
+ if (*dlt <= now || ifdselect (fds, *dlt - now) == 0)
+ {
+ if (term ())
+ continue;
+ else
+ break;
+ }
+ }
+ else
+ ifdselect (fds);
+
+ // Read out the pending data from the stderr streams.
+ //
+ for (fdselect_state& s: fds)
+ {
+ if (s.ready &&
+ s.data != nullptr &&
+ !static_cast<pipe_command*> (s.data)->dbuf.read ())
+ {
+ s.fd = nullfd;
+ --unread;
+ }
+ }
+ }
+ catch (const io_error& e)
+ {
+ fail (ll) << "io error reading pipeline streams: " << e;
+ }
+ }
}
// The set pseudo-builtin: set variable from the stdin input.
//
- // set [-e|--exact] [(-n|--newline)|(-w|--whitespace)] [<attr>] <var>
+ // set [-e|--exact] [(-n|--newline)|(-w|--whitespace)] <var> [<attr>]
//
static void
set_builtin (environment& env,
const strings& args,
auto_fd in,
- bool pipe,
+ pipe_command* pipeline,
const optional<deadline>& dl,
- const command& deadline_cmd,
const location& ll)
{
+ tracer trace ("script::set_builtin");
+
try
{
// Parse arguments.
@@ -1049,105 +1639,41 @@ namespace build2
if (!scan.more ())
fail (ll) << "set: missing variable name";
- string a (scan.next ()); // Either attributes or variable name.
- const string* ats (!scan.more () ? nullptr : &a);
- string vname (!scan.more () ? move (a) : scan.next ());
-
- if (scan.more ())
- fail (ll) << "set: unexpected argument '" << scan.next () << "'";
-
- if (ats != nullptr && ats->empty ())
- fail (ll) << "set: empty variable attributes";
-
+ string vname (scan.next ());
if (vname.empty ())
fail (ll) << "set: empty variable name";
- // Read out the stream content into a string while keeping an eye on
- // the deadline.
+ // Detect patterns analogous to parser::parse_variable_name() (so we
+ // diagnose `set x[string]`).
//
- string s (read (move (in), pipe, dl, deadline_cmd, ll));
+ if (vname.find_first_of ("[*?") != string::npos)
+ fail (ll) << "set: expected variable name instead of " << vname;
- // Parse the stream content into the variable value.
- //
- names ns;
-
- if (!s.empty ())
+ string attrs;
+ if (scan.more ())
{
- if (ops.whitespace ()) // The whitespace mode.
- {
- // Note that we collapse multiple consecutive whitespaces.
- //
- for (size_t p (0); p != string::npos; )
- {
- // Skip the whitespaces.
- //
- const char* sep (" \n\r\t");
- size_t b (s.find_first_not_of (sep, p));
+ attrs = scan.next ();
- if (b != string::npos) // Word beginning.
- {
- size_t e (s.find_first_of (sep, b)); // Find the word end.
- ns.emplace_back (string (s, b, e != string::npos ? e - b : e));
-
- p = e;
- }
- else // Trailings whitespaces.
- {
- // Append the trailing "blank" after the trailing whitespaces
- // in the exact mode.
- //
- if (ops.exact ())
- ns.emplace_back (empty_string);
-
- // Bail out since the end of the string is reached.
- //
- break;
- }
- }
- }
- else // The newline or no-split mode.
- {
- // Note that we don't collapse multiple consecutive newlines.
- //
- // Note also that we always sanitize CRs so this loop is always
- // needed.
- //
- for (size_t p (0); p != string::npos; )
- {
- size_t e (s.find ('\n', p));
- string l (s, p, e != string::npos ? e - p : e);
+ if (attrs.empty ())
+ fail (ll) << "set: empty variable attributes";
- // Strip the trailing CRs that can appear while, for example,
- // cross-testing Windows target or as a part of msvcrt junk
- // production (see above).
- //
- while (!l.empty () && l.back () == '\r')
- l.pop_back ();
+ if (scan.more ())
+ fail (ll) << "set: unexpected argument '" << scan.next () << "'";
+ }
- // Append the line.
- //
- if (!l.empty () || // Non-empty.
- e != string::npos || // Empty, non-trailing.
- ops.exact ()) // Empty, trailing, in the exact mode.
- {
- if (ops.newline () || ns.empty ())
- ns.emplace_back (move (l));
- else
- {
- ns[0].value += '\n';
- ns[0].value += l;
- }
- }
+ // Parse the stream content into the variable value.
+ //
+ names ns;
- p = e != string::npos ? e + 1 : e;
- }
- }
- }
+ read (move (in),
+ ops.whitespace (), ops.newline (), ops.exact (),
+ [&ns] (string&& s) {ns.emplace_back (move (s));},
+ pipeline,
+ dl,
+ ll,
+ "set");
- env.set_variable (move (vname),
- move (ns),
- ats != nullptr ? *ats : empty_string,
- ll);
+ env.set_variable (move (vname), move (ns), attrs, ll);
}
catch (const io_error& e)
{
@@ -1174,51 +1700,16 @@ namespace build2
name);
}
- // Stack-allocated linked list of information about the running pipeline
- // processes and builtins.
- //
- struct pipe_command
- {
- // We could probably use a union here, but let's keep it simple for now
- // (one is NULL).
- //
- process* proc;
- builtin* bltn;
-
- // True if this command has been terminated.
- //
- bool terminated = false;
-
- // Only for diagnostics.
- //
- const command& cmd;
- const location& loc;
-
- pipe_command* prev; // NULL for the left-most command.
-
- pipe_command (process& p,
- const command& c,
- const location& l,
- pipe_command* v)
- : proc (&p), bltn (nullptr), cmd (c), loc (l), prev (v) {}
-
- pipe_command (builtin& b,
- const command& c,
- const location& l,
- pipe_command* v)
- : proc (nullptr), bltn (&b), cmd (c), loc (l), prev (v) {}
- };
-
static bool
run_pipe (environment& env,
command_pipe::const_iterator bc,
command_pipe::const_iterator ec,
auto_fd ifd,
- size_t ci, size_t li, const location& ll,
+ const iteration_index* ii, size_t li, size_t ci,
+ const location& ll,
bool diag,
- string* output,
+ const function<command_function>& cf, bool last_cmd,
optional<deadline> dl = nullopt,
- const command* dl_cmd = nullptr, // env -t <cmd>
pipe_command* prev_cmd = nullptr)
{
tracer trace ("script::run_pipe");
@@ -1227,8 +1718,10 @@ namespace build2
//
if (bc == ec)
{
- if (output != nullptr)
+ if (cf != nullptr)
{
+ assert (!last_cmd); // Otherwise we wouldn't be here.
+
// The pipeline can't be empty.
//
assert (ifd != nullfd && prev_cmd != nullptr);
@@ -1237,15 +1730,14 @@ namespace build2
try
{
- *output = read (move (ifd),
- true /* pipe */,
- dl,
- dl_cmd != nullptr ? *dl_cmd : c,
- ll);
+ cf (env, strings () /* arguments */,
+ move (ifd), prev_cmd,
+ dl,
+ ll);
}
catch (const io_error& e)
{
- fail (ll) << "io error reading " << cmd_path (c) << " output: "
+ fail (ll) << "unable to read from " << cmd_path (c) << " stdout: "
<< e;
}
}
@@ -1303,9 +1795,10 @@ namespace build2
command_pipe::const_iterator nc (bc + 1);
bool last (nc == ec);
- // Make sure that stdout is not redirected if meant to be read.
+ // Make sure that stdout is not redirected if meant to be read (last_cmd
+ // is false) or cannot not be produced (last_cmd is true).
//
- if (last && output != nullptr && c.out)
+ if (last && c.out && cf != nullptr)
fail (ll) << "stdout cannot be redirected";
// True if the process path is not pre-searched and the program path
@@ -1319,7 +1812,7 @@ namespace build2
const redirect& in ((c.in ? *c.in : env.in).effective ());
- const redirect* out (!last || output != nullptr
+ const redirect* out (!last || (cf != nullptr && !last_cmd)
? nullptr // stdout is piped.
: &(c.out ? *c.out : env.out).effective ());
@@ -1327,13 +1820,7 @@ namespace build2
auto process_args = [&c] () -> cstrings
{
- cstrings args {c.program.recall_string ()};
-
- for (const auto& a: c.arguments)
- args.push_back (a.c_str ());
-
- args.push_back (nullptr);
- return args;
+ return build2::process_args (c.program.recall_string (), c.arguments);
};
// Prior to opening file descriptors for command input/output redirects
@@ -1356,14 +1843,29 @@ namespace build2
// content), to make sure that the command doesn't print any unwanted
// diagnostics about IO operation failure.
//
- // Note though, that doing so would be a bad idea if the deadline is
- // specified, since we can block on read and miss the deadline.
- //
- if (!dl)
+ if (ifd != nullfd)
{
- // Note that dtor will ignore any errors (which is what we want).
+ // Note that we can't use ifdstream dtor in the skip mode here since
+ // it turns the stream into the blocking mode and we won't be able
+ // to read out the potentially buffered stderr for the
+ // pipeline. Using read() is also not ideal since it performs
+ // parsing and allocations needlessly. This, however, is probably ok
+ // for such an uncommon case.
+ //
+ //ifdstream (move (ifd), fdstream_mode::skip);
+
+ // Let's try to minimize the allocation size splitting the input
+ // data at whitespaces.
//
- ifdstream (move (ifd), fdstream_mode::skip);
+ read (move (ifd),
+ true /* whitespace */,
+ false /* newline */,
+ false /* exact */,
+ [] (string&&) {}, // Just drop the string.
+ prev_cmd,
+ dl,
+ ll,
+ program.c_str ());
}
if (!first || !last)
@@ -1387,7 +1889,7 @@ namespace build2
if (c.out)
fail (ll) << program << " builtin stdout cannot be redirected";
- if (output != nullptr)
+ if (cf != nullptr && !last_cmd)
fail (ll) << program << " builtin stdout cannot be read";
if (c.err)
@@ -1419,19 +1921,28 @@ namespace build2
// Create a unique path for a command standard stream cache file.
//
- auto std_path = [&env, &ci, &li, &ll] (const char* n) -> path
+ auto std_path = [&env, ii, &li, &ci, &ll] (const char* nm) -> path
{
using std::to_string;
- path p (n);
+ string s (nm);
+ size_t n (s.size ());
+
+ if (ii != nullptr)
+ {
+ // Note: reverse order (outermost to innermost).
+ //
+ for (const iteration_index* i (ii); i != nullptr; i = i->prev)
+ s.insert (n, "-i" + to_string (i->index));
+ }
// 0 if belongs to a single-line script, otherwise is the command line
// number (start from one) in the script.
//
- if (li > 0)
+ if (li != 0)
{
- p += '-';
- p += to_string (li);
+ s += "-n";
+ s += to_string (li);
}
// 0 if belongs to a single-command expression, otherwise is the
@@ -1441,13 +1952,13 @@ namespace build2
// single-line script or to N-th single-command line of multi-line
// script. These cases are mutually exclusive and so are unambiguous.
//
- if (ci > 0)
+ if (ci != 0)
{
- p += '-';
- p += to_string (ci);
+ s += "-c";
+ s += to_string (ci);
}
- return normalize (move (p), temp_dir (env), ll);
+ return normalize (path (move (s)), temp_dir (env), ll);
};
// If this is the first pipeline command, then open stdin descriptor
@@ -1552,8 +2063,7 @@ namespace build2
// Calculate the process/builtin execution deadline. Note that we should
// also consider the left-hand side processes deadlines, not to keep
// them waiting for us and allow them to terminate not later than their
- // deadlines. Thus, let's also track which command has introduced the
- // deadline, so we can report it if the deadline is missed.
+ // deadlines.
//
dl = earlier (dl, env.effective_deadline ());
@@ -1561,10 +2071,7 @@ namespace build2
{
deadline d (system_clock::now () + *c.timeout, false /* success */);
if (!dl || d < *dl)
- {
dl = d;
- dl_cmd = &c;
- }
}
// Prior to opening file descriptors for command outputs redirects
@@ -1585,7 +2092,7 @@ namespace build2
if (c.out)
fail (ll) << "set builtin stdout cannot be redirected";
- if (output != nullptr)
+ if (cf != nullptr && !last_cmd)
fail (ll) << "set builtin stdout cannot be read";
if (c.err)
@@ -1597,14 +2104,54 @@ namespace build2
if (verb >= 2)
print_process (process_args ());
- set_builtin (env, c.arguments,
- move (ifd), !first,
- dl, dl_cmd != nullptr ? *dl_cmd : c,
- ll);
+ set_builtin (env, c.arguments, move (ifd), prev_cmd, dl, ll);
+ return true;
+ }
+
+ // If this is the last command in the pipe and the command function is
+ // specified for it, then call it.
+ //
+ if (last && cf != nullptr && last_cmd)
+ {
+ // Must be enforced by the caller.
+ //
+ assert (!c.out && !c.err && !c.exit);
+
+ try
+ {
+ cf (env, c.arguments, move (ifd), prev_cmd, dl, ll);
+ }
+ catch (const io_error& e)
+ {
+ diag_record dr (fail (ll));
+
+ dr << cmd_path (c) << ": unable to read from ";
+
+ if (prev_cmd != nullptr)
+ dr << cmd_path (prev_cmd->cmd) << " output";
+ else
+ dr << "stdin";
+
+ dr << ": " << e;
+ }
return true;
}
+ // Propagate the pointer to the left-most command.
+ //
+ pipe_command pc (env.context,
+ c,
+ dl,
+ ll,
+ prev_cmd,
+ prev_cmd != nullptr ? prev_cmd->next : nullptr);
+
+ if (prev_cmd != nullptr)
+ prev_cmd->next = &pc;
+ else
+ pc.next = &pc; // Points to itself.
+
// Open a file for command output redirect if requested explicitly
// (file overwrite/append redirects) or for the purpose of the output
// validation (none, here_*, file comparison redirects), register the
@@ -1614,9 +2161,9 @@ namespace build2
// or null-device descriptor for merge, pass or null redirects
// respectively (not opening any file).
//
- auto open = [&env, &wdir, &ll, &std_path] (const redirect& r,
- int dfd,
- path& p) -> auto_fd
+ auto open = [&env, &wdir, &ll, &std_path, &c, &pc] (const redirect& r,
+ int dfd,
+ path& p) -> auto_fd
{
assert (dfd == 1 || dfd == 2);
const char* what (dfd == 1 ? "stdout" : "stderr");
@@ -1634,11 +2181,34 @@ namespace build2
{
try
{
+ if (dfd == 2) // stderr?
+ {
+ fdpipe p;
+ if (diag_buffer::pipe (env.context) == -1) // Are we buffering?
+ p = fdopen_pipe ();
+
+ // Deduce the args0 argument similar to cmd_path().
+ //
+ // Note that we must open the diag buffer regardless of the
+ // diag_buffer::pipe() result.
+ //
+ pc.dbuf.open ((c.program.initial == nullptr
+ ? c.program.recall.string ().c_str ()
+ : c.program.recall_string ()),
+ move (p.in),
+ fdstream_mode::non_blocking);
+
+ if (p.out != nullfd)
+ return move (p.out);
+
+ // Fall through.
+ }
+
return fddup (dfd);
}
catch (const io_error& e)
{
- fail (ll) << "unable to duplicate " << what << ": " << e;
+ fail (ll) << "unable to redirect " << what << ": " << e;
}
}
@@ -1767,111 +2337,386 @@ namespace build2
//
assert (ofd.out != nullfd && efd != nullfd);
- // Wait for a process/builtin to complete until the deadline is reached
- // and return the underlying wait function result (optional<something>).
- //
- auto timed_wait = [] (auto& p, const timestamp& deadline)
- {
- timestamp now (system_clock::now ());
- return deadline > now ? p.timed_wait (deadline - now) : p.try_wait ();
- };
+ pc.isp = &isp;
+ pc.osp = &osp;
+ pc.esp = &esp;
- // Terminate the pipeline processes starting from the specified one and
- // up to the leftmost one and then kill those which didn't terminate
- // after 2 seconds.
+ // Read out all the pipeline's buffered strerr streams watching for the
+ // deadline, if specified. If the deadline is reached, then terminate
+ // the whole pipeline, move the deadline by another 2 seconds, and
+ // continue reading.
//
- // After that wait for the pipeline builtins completion. Since their
- // standard streams should no longer be written to or read from by any
- // process, that shouldn't take long. If, however, they won't be able to
- // complete in 2 seconds, then some of them have probably stuck while
- // communicating with a slow filesystem device or similar, and since we
- // currently have no way to terminate asynchronous builtins, we have no
- // choice but to abort.
+ // Note that we assume that this timeout increment is normally
+ // sufficient to read out the buffered data written by the already
+ // terminated processes. If, however, that's not the case (see
+ // pipe_command for the possible reasons), then we just set
+ // unread_stderr flag to true for such commands and bail out.
//
- // Issue diagnostics and fail if something goes wrong, but still try to
- // terminate/kill all the pipe processes.
+ // Also note that this is a reduced version of the above read() function.
//
- auto term_pipe = [&timed_wait, &trace] (pipe_command* pc)
+ auto read_pipe = [&pc, &ll, &trace] ()
{
- diag_record dr;
+ fdselect_set fds;
+ for (pipe_command* c (&pc); c != nullptr; c = c->prev)
+ {
+ diag_buffer& b (c->dbuf);
- auto prog = [] (pipe_command* c) {return cmd_path (c->cmd);};
+ if (b.is.is_open ())
+ fds.emplace_back (b.is.fd (), c);
+ }
- // Terminate processes gracefully and set the terminate flag for the
- // pipe commands.
+ // Note that the current command deadline is the earliest (see above).
//
- for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ optional<timestamp> dlt (pc.dl ? pc.dl->value : optional<timestamp> ());
+
+ bool terminated (false);
+
+ for (size_t unread (fds.size ()); unread != 0;)
{
- if (process* p = c->proc)
try
{
- l5 ([&]{trace (c->loc) << "terminating: " << c->cmd;});
+ // If a deadline is specified, then pass the timeout to fdselect().
+ //
+ if (dlt)
+ {
+ timestamp now (system_clock::now ());
+
+ if (*dlt <= now || ifdselect (fds, *dlt - now) == 0)
+ {
+ if (!terminated)
+ {
+ term_pipe (&pc, trace);
+ terminated = true;
- p->term ();
+ dlt = system_clock::now () + chrono::seconds (2);
+ continue;
+ }
+ else
+ {
+ for (fdselect_state& s: fds)
+ {
+ if (s.fd != nullfd)
+ {
+ pipe_command* c (static_cast<pipe_command*> (s.data));
+
+ c->unread_stderr = true;
+
+ // Let's also close the stderr stream not to confuse
+ // diag_buffer::close() (see read() for details).
+ //
+ try
+ {
+ c->dbuf.is.close ();
+ }
+ catch (const io_error&) {}
+ }
+ }
+
+ break;
+ }
+ }
+ }
+ else
+ ifdselect (fds);
+
+ for (fdselect_state& s: fds)
+ {
+ if (s.ready &&
+ !static_cast<pipe_command*> (s.data)->dbuf.read ())
+ {
+ s.fd = nullfd;
+ --unread;
+ }
+ }
}
- catch (const process_error& e)
+ catch (const io_error& e)
{
- // If unable to terminate the process for any reason (the process
- // is exiting on Windows, etc) then just ignore this, postponing
- // the potential failure till the kill() call.
- //
- l5 ([&]{trace (c->loc) << "unable to terminate " << prog (c)
- << ": " << e;});
+ fail (ll) << "io error reading pipeline streams: " << e;
}
-
- c->terminated = true;
}
+ };
- // Wait a bit for the processes to terminate and kill the remaining
- // ones.
- //
- timestamp dl (system_clock::now () + chrono::seconds (2));
-
- for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ // Wait for the pipeline processes and builtins to complete, watching
+ // for their deadlines if present. If a deadline is reached for any of
+ // them, then terminate the whole pipeline.
+ //
+ // Note: must be called after read_pipe().
+ //
+ auto wait_pipe = [&pc, &dl, &trace] ()
+ {
+ for (pipe_command* c (&pc); c != nullptr; c = c->prev)
{
- if (process* p = c->proc)
try
{
- l5 ([&]{trace (c->loc) << "waiting: " << c->cmd;});
-
- if (!timed_wait (*p, dl))
+ if (process* p = c->proc)
{
- l5 ([&]{trace (c->loc) << "killing: " << c->cmd;});
+ if (!dl)
+ p->wait ();
+ else if (!timed_wait (*p, dl->value))
+ term_pipe (c, trace);
+ }
+ else
+ {
+ builtin* b (c->bltn);
- p->kill ();
- p->wait ();
+ if (!dl)
+ b->wait ();
+ else if (!timed_wait (*b, dl->value))
+ term_pipe (c, trace);
}
}
catch (const process_error& e)
{
- dr << fail (c->loc) << "unable to wait/kill " << prog (c) << ": "
- << e;
+ fail (c->loc) << "unable to wait " << cmd_path (c->cmd) << ": "
+ << e;
}
}
+ };
- // Wait a bit for the builtins to complete and abort if any remain
- // running.
- //
- dl = system_clock::now () + chrono::seconds (2);
+ // Iterate over the pipeline processes and builtins left to right,
+ // printing their stderr if buffered and issuing the diagnostics if the
+ // exit code is not available (terminated abnormally or due to a
+ // deadline), is unexpected, or stdout and/or stderr was not fully
+ // read. Throw failed at the end if the exit code for any of them is not
+ // available or stdout and/or stderr was not fully read. Return false if
+ // exit code for any of them is unexpected (the return is used, for
+ // example, in the if-conditions).
+ //
+ // Note: must be called after wait_pipe() and only once.
+ //
+ auto complete_pipe = [&pc, &env, diag] ()
+ {
+ bool r (true);
+ bool fail (false);
- for (pipe_command* c (pc); c != nullptr; c = c->prev)
+ pipe_command* c (pc.next); // Left-most command.
+ assert (c != nullptr); // Since the lambda must be called once.
+
+ for (pc.next = nullptr; c != nullptr; c = c->next)
{
- if (builtin* b = c->bltn)
- try
+ // Collect the exit status, if present.
+ //
+ // Absent if the process/builtin misses the "unsuccessful" deadline.
+ //
+ optional<process_exit> exit;
+
+ const char* w (c->bltn != nullptr ? "builtin" : "process");
+
+ if (c->bltn != nullptr)
{
- l5 ([&]{trace (c->loc) << "waiting: " << c->cmd;});
+ // Note that this also handles ad hoc termination (without the
+ // call to term_pipe()) by the sleep builtin.
+ //
+ if (c->terminated)
+ {
+ if (c->dl && c->dl->success)
+ exit = process_exit (0);
+ }
+ else
+ exit = process_exit (c->bltn->wait ());
- if (!timed_wait (*b, dl))
+ c->bltn = nullptr;
+ }
+ else if (c->proc != nullptr)
+ {
+ const process& pr (*c->proc);
+
+#ifndef _WIN32
+ if (c->terminated &&
+ !pr.exit->normal () &&
+ pr.exit->signal () == SIGTERM)
+#else
+ if (c->terminated &&
+ !pr.exit->normal () &&
+ pr.exit->status == DBG_TERMINATE_PROCESS)
+#endif
{
- error (c->loc) << prog (c) << " builtin hanged, aborting";
- terminate (false /* trace */);
+ if (c->dl && c->dl->success)
+ exit = process_exit (0);
}
+ else
+ exit = pr.exit;
+
+ c->proc = nullptr;
}
- catch (const system_error& e)
+ else
+ assert (false); // The lambda can only be called once.
+
+ const command& cmd (c->cmd);
+ const location& ll (c->loc);
+
+ // Verify the exit status and issue the diagnostics on failure.
+ //
+ diag_record dr;
+
+ path pr (cmd_path (cmd));
+
+ // Print the diagnostics if the command stdout and/or stderr are not
+ // fully read.
+ //
+ auto unread_output_diag = [&dr, c, w, &pr] (bool main_error)
+ {
+ if (main_error)
+ dr << error (c->loc) << w << ' ' << pr << ' ';
+ else
+ dr << error;
+
+ if (c->unread_stdout)
+ {
+ dr << "stdout ";
+
+ if (c->unread_stderr)
+ dr << "and ";
+ }
+
+ if (c->unread_stderr)
+ dr << "stderr ";
+
+ dr << "not closed after exit";
+ };
+
+ // Fail if the process is terminated due to reaching the deadline.
+ //
+ if (!exit)
+ {
+ dr << error (ll) << w << ' ' << pr
+ << " terminated: execution timeout expired";
+
+ if (c->unread_stdout || c->unread_stderr)
+ unread_output_diag (false /* main_error */);
+
+ if (verb == 1)
+ {
+ dr << info << "command line: ";
+ print_process (dr, *c->args);
+ }
+
+ fail = true;
+ }
+ else
+ {
+ // If there is no valid exit code available by whatever reason
+ // then we print the proper diagnostics, dump stderr (if cached
+ // and not too large) and fail the whole script. Otherwise if the
+ // exit code is not correct then we print diagnostics if requested
+ // and fail the pipeline.
+ //
+ bool valid (exit->normal ());
+
+ // On Windows the exit code can be out of the valid codes range
+ // being defined as uint16_t.
+ //
+#ifdef _WIN32
+ if (valid)
+ valid = exit->code () < 256;
+#endif
+
+ // In the presense of a valid exit code and given stdout and
+ // stderr are fully read out we print the diagnostics and return
+ // false rather than throw.
+ //
+ // Note that there can be a race, so that the process we have
+ // terminated due to reaching the deadline has in fact exited
+ // normally. Thus, the 'unread stderr' situation can also happen
+ // to a successfully terminated process. If that's the case, we
+ // report this problem as the main error and the secondary error
+ // otherwise.
+ //
+ if (!valid || c->unread_stdout || c->unread_stderr)
+ fail = true;
+
+ exit_comparison cmp (cmd.exit
+ ? cmd.exit->comparison
+ : exit_comparison::eq);
+
+ uint16_t exc (cmd.exit ? cmd.exit->code : 0);
+
+ bool success (valid &&
+ (cmp == exit_comparison::eq) ==
+ (exc == exit->code ()));
+
+ if (!success)
+ r = false;
+
+ if (!valid || (!success && diag))
+ {
+ dr << error (ll) << w << ' ' << pr << ' ';
+
+ if (!exit->normal ())
+ dr << *exit;
+ else
+ {
+ uint16_t ec (exit->code ()); // Make sure printed as integer.
+
+ if (!valid)
+ {
+ dr << "exit code " << ec << " out of 0-255 range";
+ }
+ else
+ {
+ if (cmd.exit)
+ dr << "exit code " << ec
+ << (cmp == exit_comparison::eq ? " != " : " == ")
+ << exc;
+ else
+ dr << "exited with code " << ec;
+ }
+ }
+
+ if (c->unread_stdout || c->unread_stderr)
+ unread_output_diag (false /* main_error */);
+
+ if (verb == 1)
+ {
+ dr << info << "command line: ";
+ print_process (dr, *c->args);
+ }
+
+ if (non_empty (*c->esp, ll) && avail_on_failure (*c->esp, env))
+ dr << info << "stderr: " << *c->esp;
+
+ if (non_empty (*c->osp, ll) && avail_on_failure (*c->osp, env))
+ dr << info << "stdout: " << *c->osp;
+
+ if (non_empty (*c->isp, ll) && avail_on_failure (*c->isp, env))
+ dr << info << "stdin: " << *c->isp;
+
+ // Print cached stderr.
+ //
+ print_file (dr, *c->esp, ll);
+ }
+ else if (c->unread_stdout || c->unread_stderr)
+ unread_output_diag (true /* main_error */);
+ }
+
+ // Now print the buffered stderr, if present, and/or flush the
+ // diagnostics, if issued.
+ //
+ if (c->dbuf.is_open ())
+ c->dbuf.close (move (dr));
+ }
+
+ // Fail if required.
+ //
+ if (fail)
+ throw failed ();
+
+ return r;
+ };
+
+ // Close all buffered pipeline stderr streams ignoring io_error
+ // exceptions.
+ //
+ auto close_pipe = [&pc] ()
+ {
+ for (pipe_command* c (&pc); c != nullptr; c = c->prev)
+ {
+ if (c->dbuf.is.is_open ())
+ try
{
- dr << fail (c->loc) << "unable to wait for " << prog (c) << ": "
- << e;
+ c->dbuf.is.close();
}
+ catch (const io_error&) {}
}
};
@@ -1897,9 +2742,8 @@ namespace build2
fail (ll) << "specified working directory " << cwd
<< " does not exist";
- // Absent if the process/builtin misses the "unsuccessful" deadline.
- //
- optional<process_exit> exit;
+ cstrings args (process_args ());
+ pc.args = &args;
const builtin_info* bi (resolve ? builtins.find (program) : nullptr);
@@ -1909,8 +2753,11 @@ namespace build2
{
// Execute the builtin.
//
- if (verb >= 2)
- print_process (process_args ());
+ // Don't print the true and false builtins, since they are normally
+ // used for the commands execution flow control.
+ //
+ if (verb >= 2 && program != "true" && program != "false")
+ print_process (args);
// Some of the script builtins (cp, mkdir, etc) extend libbutl
// builtins (via callbacks) registering/moving cleanups for the
@@ -1951,18 +2798,6 @@ namespace build2
// We also extend the sleep builtin, deactivating the thread before
// going to sleep and waking up before the deadline is reached.
//
- // Let's "wrap up" the sleep-related values into the single object to
- // rely on "small function object" optimization.
- //
- struct sleep
- {
- optional<timestamp> deadline;
- bool terminated = false;
-
- sleep (const optional<timestamp>& d): deadline (d) {}
- };
- sleep slp (dl ? dl->value : optional<timestamp> ());
-
builtin_callbacks bcs {
// create
@@ -2124,16 +2959,19 @@ namespace build2
// sleep
//
- [&env, &slp] (const duration& d)
+ [&env, &pc] (const duration& d)
{
duration t (d);
- const optional<timestamp>& dl (slp.deadline);
+ const optional<timestamp>& dl (pc.dl
+ ? pc.dl->value
+ : optional<timestamp> ());
if (dl)
{
timestamp now (system_clock::now ());
- slp.terminated = now + t > *dl;
+ if (now + t > *dl)
+ pc.terminated = true;
if (*dl <= now)
return;
@@ -2146,7 +2984,7 @@ namespace build2
// If/when required we could probably support the precise sleep
// mode (e.g., via an option).
//
- env.context.sched.sleep (t);
+ env.context.sched->sleep (t);
}
};
@@ -2158,19 +2996,19 @@ namespace build2
move (ifd), move (ofd.out), move (efd),
cwd,
bcs));
+ pc.bltn = &b;
- pipe_command pc (b, c, ll, prev_cmd);
-
- // If the deadline is specified, then make sure we don't miss it
- // waiting indefinitely in the builtin destructor on the right-hand
- // side of the pipe failure.
+ // If the right-hand part of the pipe fails, then make sure we don't
+ // wait indefinitely in the process destructor if the deadlines are
+ // specified or just because a process is blocked on stderr.
//
- auto g (make_exception_guard ([&dl, &pc, &term_pipe] ()
+ auto g (make_exception_guard ([&pc, &close_pipe, &trace] ()
{
- if (dl)
+ if (pc.bltn != nullptr)
try
{
- term_pipe (&pc);
+ close_pipe ();
+ term_pipe (&pc, trace);
}
catch (const failed&)
{
@@ -2181,28 +3019,21 @@ namespace build2
success = run_pipe (env,
nc, ec,
move (ofd.in),
- ci + 1, li, ll, diag,
- output,
- dl, dl_cmd,
+ ii, li, ci + 1, ll, diag,
+ cf, last_cmd,
+ dl,
&pc);
- if (!dl)
- b.wait ();
- else if (!timed_wait (b, dl->value))
- term_pipe (&pc);
-
- // Note that this also handles ad hoc termination (without the call
- // to term_pipe()) by the sleep builtin (see above).
+ // Complete the pipeline execution, if not done yet.
//
- if (pc.terminated || slp.terminated)
+ if (pc.bltn != nullptr)
{
- assert (dl);
+ read_pipe ();
+ wait_pipe ();
- if (dl->success)
- exit = process_exit (0);
+ if (!complete_pipe ())
+ success = false;
}
- else
- exit = process_exit (r);
}
catch (const system_error& e)
{
@@ -2214,8 +3045,6 @@ namespace build2
{
// Execute the process.
//
- cstrings args (process_args ());
-
// If the process path is not pre-searched then resolve the relative
// non-simple program path against the script's working directory. The
// simple one will be left for the process path search machinery. Also
@@ -2273,10 +3102,16 @@ namespace build2
if (verb >= 2)
print_process (pe, args);
+ // Note that stderr can only be a pipe if we are buffering the
+ // diagnostics. In this case also pass the reading end so it can be
+ // "probed" on Windows (see butl::process::pipe for details).
+ //
process pr (
*pe.path,
args.data (),
- {ifd.get (), -1}, process::pipe (ofd), {-1, efd.get ()},
+ {ifd.get (), -1},
+ process::pipe (ofd),
+ {pc.dbuf.is.fd (), efd.get ()},
cwd.string ().c_str (),
pe.vars);
@@ -2286,18 +3121,19 @@ namespace build2
ofd.out.reset ();
efd.reset ();
- pipe_command pc (pr, c, ll, prev_cmd);
+ pc.proc = &pr;
- // If the deadline is specified, then make sure we don't miss it
- // waiting indefinitely in the process destructor on the right-hand
- // part of the pipe failure.
+ // If the right-hand part of the pipe fails, then make sure we don't
+ // wait indefinitely in the process destructor (see above for
+ // details).
//
- auto g (make_exception_guard ([&dl, &pc, &term_pipe] ()
+ auto g (make_exception_guard ([&pc, &close_pipe, &trace] ()
{
- if (dl)
+ if (pc.proc != nullptr)
try
{
- term_pipe (&pc);
+ close_pipe ();
+ term_pipe (&pc, trace);
}
catch (const failed&)
{
@@ -2308,33 +3144,21 @@ namespace build2
success = run_pipe (env,
nc, ec,
move (ofd.in),
- ci + 1, li, ll, diag,
- output,
- dl, dl_cmd,
+ ii, li, ci + 1, ll, diag,
+ cf, last_cmd,
+ dl,
&pc);
- if (!dl)
- pr.wait ();
- else if (!timed_wait (pr, dl->value))
- term_pipe (&pc);
-
-#ifndef _WIN32
- if (pc.terminated &&
- !pr.exit->normal () &&
- pr.exit->signal () == SIGTERM)
-#else
- if (pc.terminated &&
- !pr.exit->normal () &&
- pr.exit->status == DBG_TERMINATE_PROCESS)
-#endif
+ // Complete the pipeline execution, if not done yet.
+ //
+ if (pc.proc != nullptr)
{
- assert (dl);
+ read_pipe ();
+ wait_pipe ();
- if (dl->success)
- exit = process_exit (0);
+ if (!complete_pipe ())
+ success = false;
}
- else
- exit = pr.exit;
}
catch (const process_error& e)
{
@@ -2347,98 +3171,23 @@ namespace build2
}
}
- // If the righ-hand side pipeline failed than the whole pipeline fails,
- // and no further checks are required.
- //
- if (!success)
- return false;
-
- // Fail if the process is terminated due to reaching the deadline.
- //
- if (!exit)
- fail (ll) << cmd_path (dl_cmd != nullptr ? *dl_cmd : c)
- << " terminated: execution timeout expired";
-
- path pr (cmd_path (c));
-
- // If there is no valid exit code available by whatever reason then we
- // print the proper diagnostics, dump stderr (if cached and not too
- // large) and fail the whole script. Otherwise if the exit code is not
- // correct then we print diagnostics if requested and fail the pipeline.
- //
- bool valid (exit->normal ());
-
- // On Windows the exit code can be out of the valid codes range being
- // defined as uint16_t.
- //
-#ifdef _WIN32
- if (valid)
- valid = exit->code () < 256;
-#endif
-
- exit_comparison cmp (c.exit ? c.exit->comparison : exit_comparison::eq);
- uint16_t exc (c.exit ? c.exit->code : 0);
-
- success = valid &&
- (cmp == exit_comparison::eq) == (exc == exit->code ());
-
- if (!valid || (!success && diag))
- {
- // In the presense of a valid exit code we print the diagnostics and
- // return false rather than throw.
- //
- diag_record d (valid ? error (ll) : fail (ll));
-
- if (!exit->normal ())
- d << pr << " " << *exit;
- else
- {
- uint16_t ec (exit->code ()); // Make sure is printed as integer.
-
- if (!valid)
- d << pr << " exit code " << ec << " out of 0-255 range";
- else if (!success)
- {
- if (diag)
- {
- if (c.exit)
- d << pr << " exit code " << ec
- << (cmp == exit_comparison::eq ? " != " : " == ") << exc;
- else
- d << pr << " exited with code " << ec;
- }
- }
- else
- assert (false);
- }
-
- if (non_empty (esp, ll) && avail_on_failure (esp, env))
- d << info << "stderr: " << esp;
-
- if (non_empty (osp, ll) && avail_on_failure (osp, env))
- d << info << "stdout: " << osp;
-
- if (non_empty (isp, ll) && avail_on_failure (isp, env))
- d << info << "stdin: " << isp;
-
- // Print cached stderr.
- //
- print_file (d, esp, ll);
- }
-
- // If exit code is correct then check if the standard outputs match the
- // expectations. Note that stdout is only redirected to file for the
- // last command in the pipeline.
+ // If the pipeline or the righ-hand side outputs check failed, then no
+ // further checks are required. Otherwise, check if the standard outputs
+ // match the expectations. Note that stdout can only be redirected to
+ // file for the last command in the pipeline.
//
// The thinking behind matching stderr first is that if it mismatches,
// then the program probably misbehaves (executes wrong functionality,
// etc) in which case its stdout doesn't really matter.
//
if (success)
- success =
- check_output (pr, esp, isp, err, ll, env, diag, "stderr") &&
- (out == nullptr ||
- check_output (pr, osp, isp, *out, ll, env, diag, "stdout"));
+ {
+ path pr (cmd_path (c));
+
+ success = check_output (pr, esp, isp, err, ll, env, diag, "stderr") &&
+ (out == nullptr ||
+ check_output (pr, osp, isp, *out, ll, env, diag, "stdout"));
+ }
return success;
}
@@ -2446,9 +3195,10 @@ namespace build2
static bool
run_expr (environment& env,
const command_expr& expr,
- size_t li, const location& ll,
+ const iteration_index* ii, size_t li,
+ const location& ll,
bool diag,
- string* output)
+ const function<command_function>& cf, bool last_cmd)
{
// Commands are numbered sequentially throughout the expression
// starting with 1. Number 0 means the command is a single one.
@@ -2492,8 +3242,8 @@ namespace build2
r = run_pipe (env,
p.begin (), p.end (),
auto_fd (),
- ci, li, ll, print,
- output);
+ ii, li, ci, ll, print,
+ cf, last_cmd);
}
ci += p.size ();
@@ -2505,26 +3255,37 @@ namespace build2
void
run (environment& env,
const command_expr& expr,
- size_t li, const location& ll,
- string* output)
+ const iteration_index* ii, size_t li,
+ const location& ll,
+ const function<command_function>& cf,
+ bool last_cmd)
{
// Note that we don't print the expression at any verbosity level
// assuming that the caller does this, potentially providing some
// additional information (command type, etc).
//
- if (!run_expr (env, expr, li, ll, true /* diag */, output))
+ if (!run_expr (env,
+ expr,
+ ii, li, ll,
+ true /* diag */,
+ cf, last_cmd))
throw failed (); // Assume diagnostics is already printed.
}
bool
- run_if (environment& env,
- const command_expr& expr,
- size_t li, const location& ll,
- string* output)
+ run_cond (environment& env,
+ const command_expr& expr,
+ const iteration_index* ii, size_t li,
+ const location& ll,
+ const function<command_function>& cf, bool last_cmd)
{
// Note that we don't print the expression here (see above).
//
- return run_expr (env, expr, li, ll, false /* diag */, output);
+ return run_expr (env,
+ expr,
+ ii, li, ll,
+ false /* diag */,
+ cf, last_cmd);
}
void
@@ -2773,8 +3534,7 @@ namespace build2
try
{
size_t n (0);
- for (const dir_entry& de: dir_iterator (p,
- false /* ignore_dangling */))
+ for (const dir_entry& de: dir_iterator (p, dir_iterator::no_follow))
{
if (n++ < 10)
dr << '\n' << (de.ltype () == entry_type::directory
diff --git a/libbuild2/script/run.hxx b/libbuild2/script/run.hxx
index 8bc246c..c4c2aa2 100644
--- a/libbuild2/script/run.hxx
+++ b/libbuild2/script/run.hxx
@@ -38,22 +38,24 @@ namespace build2
// Location is the start position of this command line in the script. It
// can be used in diagnostics.
//
- // Optionally, save the command output into the referenced variable. In
- // this case assume that the expression contains a single pipline.
+ // Optionally, execute the specified function at the end of the pipe,
+ // either after the last command or instead of it.
//
void
run (environment&,
const command_expr&,
- size_t index,
+ const iteration_index*, size_t index,
const location&,
- string* output = nullptr);
+ const function<command_function>& = nullptr,
+ bool last_cmd = true);
bool
- run_if (environment&,
- const command_expr&,
- size_t index,
- const location&,
- string* output = nullptr);
+ run_cond (environment&,
+ const command_expr&,
+ const iteration_index*, size_t index,
+ const location&,
+ const function<command_function>& = nullptr,
+ bool last_cmd = true);
// Perform the registered special file cleanups in the direct order and
// then the regular cleanups in the reverse order.
@@ -80,6 +82,40 @@ namespace build2
//
string
diag_path (const dir_name_view&);
+
+ // Read the stream content, optionally splitting the input data at
+ // whitespaces or newlines and calling the specified callback function for
+ // each substring (see the set builtin options for the splitting
+ // semantics). Throw failed on io_error.
+ //
+ // If the stream is a pipeline's output, then the pipeline argument must
+ // also be specified. Normally called from a custom command function (see
+ // command_function for details) which is provided with the pipeline
+ // information.
+ //
+ // Turn the stream into the non-blocking mode and, if the pipeline is
+ // specified, read out its buffered stderr streams while waiting for the
+ // input stream data. If a deadline is specified and is reached, then
+ // terminate the whole pipeline, if specified, and bail out. Otherwise
+ // issue diagnostics and fail. The thinking here is that in the former
+ // case the caller first needs to dump the buffered stderr streams, issue
+ // the appropriate diagnostics for the pipeline processes/builtins, and
+ // only throw failed afterwards.
+ //
+ // Note that on Windows we can only turn file descriptors of the pipe type
+ // into the non-blocking mode. Thus, a non-pipe descriptor is read in the
+ // blocking manner (and the deadline is checked less accurately). This is
+ // fine since there are no pipeline stderr streams to read out in this
+ // case.
+ //
+ void
+ read (auto_fd&&,
+ bool whitespace, bool newline, bool exact,
+ const function<void (string&&)>&,
+ pipe_command* pipeline,
+ const optional<deadline>&,
+ const location&,
+ const char* what);
}
}
diff --git a/libbuild2/script/script.cxx b/libbuild2/script/script.cxx
index 9e6eeed..4a6ca33 100644
--- a/libbuild2/script/script.cxx
+++ b/libbuild2/script/script.cxx
@@ -20,14 +20,17 @@ namespace build2
switch (lt)
{
- case line_type::var: s = "variable"; break;
- case line_type::cmd: s = "command"; break;
- case line_type::cmd_if: s = "'if'"; break;
- case line_type::cmd_ifn: s = "'if!'"; break;
- case line_type::cmd_elif: s = "'elif'"; break;
- case line_type::cmd_elifn: s = "'elif!'"; break;
- case line_type::cmd_else: s = "'else'"; break;
- case line_type::cmd_end: s = "'end'"; break;
+ case line_type::var: s = "variable"; break;
+ case line_type::cmd: s = "command"; break;
+ case line_type::cmd_if: s = "'if'"; break;
+ case line_type::cmd_ifn: s = "'if!'"; break;
+ case line_type::cmd_elif: s = "'elif'"; break;
+ case line_type::cmd_elifn: s = "'elif!'"; break;
+ case line_type::cmd_else: s = "'else'"; break;
+ case line_type::cmd_while: s = "'while'"; break;
+ case line_type::cmd_for_args: s = "'for'"; break;
+ case line_type::cmd_for_stream: s = "'for'"; break;
+ case line_type::cmd_end: s = "'end'"; break;
}
return o << s;
@@ -186,14 +189,14 @@ namespace build2
void
dump (ostream& os, const string& ind, const lines& ls)
{
- // Additionally indent the if-branch lines.
+ // Additionally indent the flow control construct block lines.
//
- string if_ind;
+ string fc_ind;
for (const line& l: ls)
{
- // Before printing indentation, decrease it if the else or end line is
- // reached.
+ // Before printing indentation, decrease it if the else, end, etc line
+ // is reached.
//
switch (l.type)
{
@@ -202,9 +205,9 @@ namespace build2
case line_type::cmd_else:
case line_type::cmd_end:
{
- size_t n (if_ind.size ());
+ size_t n (fc_ind.size ());
assert (n >= 2);
- if_ind.resize (n - 2);
+ fc_ind.resize (n - 2);
break;
}
default: break;
@@ -212,9 +215,10 @@ namespace build2
// Print indentations.
//
- os << ind << if_ind;
+ os << ind << fc_ind;
- // After printing indentation, increase it for if/else branch.
+ // After printing indentation, increase it for the flow control
+ // construct block lines.
//
switch (l.type)
{
@@ -222,7 +226,10 @@ namespace build2
case line_type::cmd_ifn:
case line_type::cmd_elif:
case line_type::cmd_elifn:
- case line_type::cmd_else: if_ind += " "; break;
+ case line_type::cmd_else:
+ case line_type::cmd_while:
+ case line_type::cmd_for_args:
+ case line_type::cmd_for_stream: fc_ind += " "; break;
default: break;
}
@@ -761,7 +768,9 @@ namespace build2
{
using script::cleanup;
- assert (!implicit || c.type == cleanup_type::always);
+ // Implicit never-cleanup doesn't make sense.
+ //
+ assert (!implicit || c.type != cleanup_type::never);
const path& p (c.path);
diff --git a/libbuild2/script/script.hxx b/libbuild2/script/script.hxx
index 5a39659..cccad98 100644
--- a/libbuild2/script/script.hxx
+++ b/libbuild2/script/script.hxx
@@ -27,6 +27,9 @@ namespace build2
cmd_elif,
cmd_elifn,
cmd_else,
+ cmd_while,
+ cmd_for_args, // `for x: ...`
+ cmd_for_stream, // `... | for x` and `for x <...`
cmd_end
};
@@ -40,7 +43,7 @@ namespace build2
union
{
- const variable* var; // Pre-entered for line_type::var.
+ const variable* var; // Pre-entered for line_type::{var,cmd_for_*}.
};
};
@@ -262,7 +265,7 @@ namespace build2
cleanup_type type;
build2::path path;
};
- using cleanups = vector<cleanup>;
+ using cleanups = small_vector<cleanup, 1>;
// command_exit
//
@@ -315,6 +318,10 @@ namespace build2
add (string);
};
+ // @@ For better diagnostics we may want to store an individual location
+ // of each command in the pipeline (maybe we can share the file part
+ // somehow since a pipline cannot span multiple files).
+ //
struct command
{
// We use NULL initial as an indication that the path stored in recall
@@ -354,6 +361,10 @@ namespace build2
// command_pipe
//
+ // Note that we cannot use small_vector here, since moving from objects of
+ // the command_pipe type would invalidate the command redirects of the
+ // reference type in this case.
+ //
using command_pipe = vector<command>;
void
@@ -372,7 +383,7 @@ namespace build2
command_pipe pipe;
};
- using command_expr = vector<expr_term>;
+ using command_expr = small_vector<expr_term, 1>;
void
to_stream (ostream&, const command_expr&, command_to_stream);
@@ -380,6 +391,15 @@ namespace build2
ostream&
operator<< (ostream&, const command_expr&);
+ // Stack-allocated linked list of iteration indexes of the nested loops.
+ //
+ struct iteration_index
+ {
+ size_t index; // 1-based.
+
+ const iteration_index* prev; // NULL for the top-most loop.
+ };
+
struct timeout
{
duration value;
@@ -536,7 +556,7 @@ namespace build2
// Set variable value with optional (non-empty) attributes.
//
virtual void
- set_variable (string&& name,
+ set_variable (string name,
names&&,
const string& attrs,
const location&) = 0;
@@ -569,6 +589,20 @@ namespace build2
~environment () = default;
};
+ // Custom command function that can be executed at the end of the
+ // pipeline. Should throw io_error on the underlying OS error.
+ //
+ // Note: the pipeline can be NULL (think of `for x <<<='foo'`).
+ //
+ struct pipe_command;
+
+ using command_function = void (environment&,
+ const strings& args,
+ auto_fd in,
+ pipe_command* pipeline,
+ const optional<deadline>&,
+ const location&);
+
// Helpers.
//
// Issue diagnostics with the specified prefix and fail if the string
diff --git a/libbuild2/search.cxx b/libbuild2/search.cxx
index 19385b0..dee2ae8 100644
--- a/libbuild2/search.cxx
+++ b/libbuild2/search.cxx
@@ -15,7 +15,9 @@ using namespace butl;
namespace build2
{
const target*
- search_existing_target (context& ctx, const prerequisite_key& pk)
+ search_existing_target (context& ctx,
+ const prerequisite_key& pk,
+ bool out_only)
{
tracer trace ("search_existing_target");
@@ -39,9 +41,10 @@ namespace build2
// Prerequisite's out directory can be one of the following:
//
- // empty This means out is undetermined and we simply search for a
- // target that is in the out tree which happens to be indicated
- // by an empty value, so we can just pass this as is.
+ // empty This means out is undetermined and we search for a target
+ // first in the out tree (which happens to be indicated by an
+ // empty value, so we can just pass this as is) and if not
+ // found, then in the src tree (unless suppressed).
//
// absolute This is the "final" value that doesn't require any processing
// and we simply use it as is.
@@ -58,8 +61,11 @@ namespace build2
else
{
o = pk.scope->out_path ();
- o /= *tk.out;
- o.normalize ();
+ if (!tk.out->current ())
+ {
+ o /= *tk.out;
+ o.normalize ();
+ }
}
// Drop out if it is the same as src (in-src build).
@@ -71,6 +77,27 @@ namespace build2
const target* t (
ctx.targets.find (*tk.type, d, o, *tk.name, tk.ext, trace));
+ // Try in the src tree.
+ //
+ if (t == nullptr &&
+ !out_only &&
+ tk.out->empty () &&
+ tk.dir->relative () &&
+ !pk.scope->out_eq_src ())
+ {
+ o = move (d);
+
+ d = pk.scope->src_path ();
+
+ if (!tk.dir->empty ())
+ {
+ d /= *tk.dir;
+ d.normalize ();
+ }
+
+ t = ctx.targets.find (*tk.type, d, o, *tk.name, tk.ext, trace);
+ }
+
if (t != nullptr)
l5 ([&]{trace << "existing target " << *t
<< " for prerequisite " << pk;});
@@ -169,11 +196,7 @@ namespace build2
// will be from the src tree.
//
// In the other two cases we use the prerequisite's out (in case it is
- // relative, we need to complete it, which is @@ OUT TODO). Note that we
- // blindly trust the user's value which can be used for some interesting
- // tricks, for example:
- //
- // ../cxx{foo}@./
+ // relative, we need to complete it).
//
dir_path out;
@@ -183,7 +206,24 @@ namespace build2
out = out_src (d, *s->root_scope ());
}
else
- out = *tk.out;
+ {
+ if (tk.out->absolute ())
+ out = *tk.out; // Already normalized.
+ else
+ {
+ out = pk.scope->out_path ();
+ if (!tk.out->current ())
+ {
+ out /= *tk.out;
+ out.normalize ();
+ }
+ }
+
+ // Drop out if it is the same as src (in-src build).
+ //
+ if (out == d)
+ out.clear ();
+ }
// Find or insert. Note that we are using our updated extension.
//
@@ -215,6 +255,12 @@ namespace build2
const target_key& tk (pk.tk);
+ // If out is present, then it means the target is in src and we shouldn't
+ // be creating new targets in src, should we? Feels like this should not
+ // even be called if out is not empty.
+ //
+ assert (tk.out->empty ());
+
// We default to the target in this directory scope.
//
dir_path d;
@@ -235,8 +281,6 @@ namespace build2
//
// More often insert than find, so skip find in insert().
//
- // @@ OUT: same story as in search_existing_target() re out.
- //
auto r (ctx.targets.insert (*tk.type,
move (d),
*tk.out,
@@ -259,6 +303,12 @@ namespace build2
const target_key& tk (pk.tk);
+ // If out is present, then it means the target is in src and we shouldn't
+ // be creating new targets in src, should we? Feels like this should not
+ // even be called if out is not empty.
+ //
+ assert (tk.out->empty ());
+
// We default to the target in this directory scope.
//
dir_path d;
@@ -279,8 +329,6 @@ namespace build2
//
// More often insert than find, so skip find in insert_locked().
//
- // @@ OUT: same story as in search_existing_target() re out.
- //
auto r (ctx.targets.insert_locked (*tk.type,
move (d),
*tk.out,
diff --git a/libbuild2/search.hxx b/libbuild2/search.hxx
index aa30648..e3b1442 100644
--- a/libbuild2/search.hxx
+++ b/libbuild2/search.hxx
@@ -15,8 +15,13 @@ namespace build2
// Search for an existing target in this prerequisite's scope. Scope can be
// NULL if directories are absolute.
//
+ // If dir is relative and out is not specified, then first search in the out
+ // tree and, if not found, then in the src tree, unless out_only is true.
+ // If dir is absolute, then out is expected to be specified as well, if
+ // necessary.
+ //
LIBBUILD2_SYMEXPORT const target*
- search_existing_target (context&, const prerequisite_key&);
+ search_existing_target (context&, const prerequisite_key&, bool out_only);
// Search for an existing file. If the prerequisite directory is relative,
// then look in the scope's src directory. Otherwise, if the absolute
diff --git a/libbuild2/target-key.hxx b/libbuild2/target-key.hxx
index c5690a9..9ac87dc 100644
--- a/libbuild2/target-key.hxx
+++ b/libbuild2/target-key.hxx
@@ -94,8 +94,21 @@ namespace build2
LIBBUILD2_SYMEXPORT ostream&
operator<< (ostream&, const target_key&);
- LIBBUILD2_SYMEXPORT ostream&
- to_stream (ostream&, const target_key&, optional<stream_verbosity> = nullopt);
+ // If name_only is true, then only print the target name (and extension, if
+ // necessary), without the directory or type.
+ //
+ // Return true if the result is regular, that is, in the
+ // <dir>/<type>{<name>}@<out>/ form with the individual components
+ // corresponding directly to the target_key members (that is, without moving
+ // parts around as would be the case for directories). This information is
+ // used when trying to print several targets in a combined form (for
+ // example, {hxx cxx}{foo}) in print_diag().
+ //
+ LIBBUILD2_SYMEXPORT bool
+ to_stream (ostream&,
+ const target_key&,
+ optional<stream_verbosity> = nullopt,
+ bool name_only = false);
}
namespace std
diff --git a/libbuild2/target-type.hxx b/libbuild2/target-type.hxx
index 09bc316..93c5744 100644
--- a/libbuild2/target-type.hxx
+++ b/libbuild2/target-type.hxx
@@ -89,9 +89,18 @@ namespace build2
const location&,
bool reverse);
- void (*print) (ostream&, const target_key&);
+ // See to_stream(ostream,target_key) for details.
+ //
+ bool (*print) (ostream&, const target_key&, bool name_only);
- const target* (*search) (const target&, const prerequisite_key&);
+ // Target type-specific prerequisite to target search.
+ //
+ // If passed target is NULL, then only search for an existing target (and
+ // which can be performed during execute, not only match).
+ //
+ const target* (*search) (context&,
+ const target*,
+ const prerequisite_key&);
// Target type flags.
//
@@ -100,12 +109,15 @@ namespace build2
// group link-up only happens during match, then the hint would be looked
// up before the group is known.
//
+ // Note: consider exposing as an attribute in define if adding a new flag.
+ //
enum class flag: uint64_t
{
none = 0,
group = 0x01, // A (non-adhoc) group.
see_through = group | 0x02, // A group with "see through" semantics.
- member_hint = group | 0x04 // Untyped rule hint applies to members.
+ member_hint = group | 0x04, // Untyped rule hint applies to members.
+ dyn_members = group | 0x08 // A group with dynamic members.
};
flag flags;
@@ -129,6 +141,9 @@ namespace build2
bool
is_a (const char*) const; // Defined in target.cxx
+
+ target_type& operator= (target_type&&) = delete;
+ target_type& operator= (const target_type&) = delete;
};
inline bool
@@ -189,18 +204,18 @@ namespace build2
return type_map_.empty ();
}
- const target_type&
+ pair<reference_wrapper<const target_type>, bool>
insert (const target_type& tt)
{
- type_map_.emplace (tt.name, target_type_ref (tt));
- return tt;
+ auto r (type_map_.emplace (tt.name, target_type_ref (tt)));
+ return {r.second ? tt : r.first->second.get (), r.second};
}
template <typename T>
const target_type&
insert ()
{
- return insert (T::static_type);
+ return insert (T::static_type).first;
}
pair<reference_wrapper<const target_type>, bool>
@@ -246,7 +261,7 @@ namespace build2
target_type_ref (unique_ptr<target_type>&& p)
: p_ (p.release ()), d_ (true) {}
- target_type_ref (target_type_ref&& r)
+ target_type_ref (target_type_ref&& r) noexcept
: p_ (r.p_), d_ (r.d_) {r.p_ = nullptr;}
~target_type_ref () {if (p_ != nullptr && d_) delete p_;}
diff --git a/libbuild2/target.cxx b/libbuild2/target.cxx
index 78bc5ac..2a134a4 100644
--- a/libbuild2/target.cxx
+++ b/libbuild2/target.cxx
@@ -38,7 +38,9 @@ namespace build2
if (!name->empty ())
{
v = *name;
- target::combine_name (v, ext, false /* @@ TODO: what to do? */);
+ // @@ TMP: see also other calls to combine_name() -- need to fix.
+ //
+ target::combine_name (v, ext, false /* @@ TMP: what to do? */);
}
else
assert (!ext || ext->empty ()); // Unspecified or none.
@@ -109,7 +111,8 @@ namespace build2
group_view target::
group_members (action) const
{
- assert (false); // Not a group or doesn't expose its members.
+ // Not a group or doesn't expose its members.
+ //
return group_view {nullptr, 0};
}
@@ -140,7 +143,8 @@ namespace build2
pair<lookup, size_t> target::
lookup_original (const variable& var,
bool target_only,
- const scope* bs) const
+ const scope* bs,
+ bool locked) const
{
pair<lookup_type, size_t> r (lookup_type (), 0);
@@ -158,6 +162,11 @@ namespace build2
{
++r.second;
+ // While we went back to not treating the first member as a group for
+ // variable lookup, let's keep this logic in case one day we end up with
+ // a separate ad hoc group target.
+ //
+#if 0
// In case of an ad hoc group, we may have to look in two groups.
//
if ((g1 = group) != nullptr)
@@ -175,6 +184,19 @@ namespace build2
}
}
}
+#else
+ // Skip looking up in the ad hoc group, which is semantically the
+ // first/primary member.
+ //
+ if ((g1 = group == nullptr
+ ? nullptr
+ : group->adhoc_group () ? group->group : group))
+ {
+ auto p (g1->vars.lookup (var));
+ if (p.first != nullptr)
+ r.first = lookup_type (*p.first, p.second, g1->vars);
+ }
+#endif
}
// Delegate to scope's lookup_original().
@@ -183,9 +205,14 @@ namespace build2
{
if (!target_only)
{
- target_key tk (key ());
- target_key g1k (g1 != nullptr ? g1->key () : target_key {});
- target_key g2k (g2 != nullptr ? g2->key () : target_key {});
+ auto key = [locked] (const target* t)
+ {
+ return locked ? t->key_locked () : t->key ();
+ };
+
+ target_key tk (key (this));
+ target_key g1k (g1 != nullptr ? key (g1) : target_key {});
+ target_key g2k (g2 != nullptr ? key (g2) : target_key {});
if (bs == nullptr)
bs = &base_scope ();
@@ -206,14 +233,30 @@ namespace build2
}
value& target::
- append (const variable& var)
+ append (const variable& var, const scope* bs)
{
// Note: see also prerequisite::append() if changing anything here.
// Note that here we want the original value without any overrides
// applied.
//
- auto l (lookup_original (var).first);
+ auto l (lookup_original (var, false, bs).first);
+
+ if (l.defined () && l.belongs (*this)) // Existing var in this target.
+ return vars.modify (l); // Ok since this is original.
+
+ value& r (assign (var)); // NULL.
+
+ if (l.defined ())
+ r = *l; // Copy value (and type) from the outer scope.
+
+ return r;
+ }
+
+ value& target::
+ append_locked (const variable& var, const scope* bs)
+ {
+ auto l (lookup_original (var, false, bs, true /* locked */).first);
if (l.defined () && l.belongs (*this)) // Existing var in this target.
return vars.modify (l); // Ok since this is original.
@@ -545,15 +588,33 @@ namespace build2
context& ctx (t.ctx);
include_type r (include_type::normal);
-
- if (const string* v = cast_null<string> (p.vars[ctx.var_include]))
{
- if (*v == "false") r = include_type::excluded;
- else if (*v == "adhoc") r = include_type::adhoc;
- else if (*v == "true") r = include_type::normal;
- else
- fail << "invalid " << *ctx.var_include << " variable value "
- << "'" << *v << "' specified for prerequisite " << p;
+ lookup l (p.vars[ctx.var_include]);
+
+ if (l.defined ())
+ {
+ if (l->null)
+ {
+ // @@ TMP (added in 0.16.0).
+ //
+ warn << "null " << *ctx.var_include << " variable value specified "
+ << "for prerequisite " << p <<
+ info << "treated as undefined for backwards compatibility" <<
+ info << "this warning will become error in the future";
+ }
+ else
+ {
+ const string& v (cast<string> (*l));
+
+ if (v == "false") r = include_type::excluded;
+ else if (v == "true") r = include_type::normal;
+ else if (v == "adhoc") r = include_type::adhoc;
+ else if (v == "posthoc") r = include_type::posthoc;
+ else
+ fail << "invalid " << *ctx.var_include << " variable value '"
+ << v << "' specified for prerequisite " << p;
+ }
+ }
}
// Handle operation-specific override (see var_include documentation
@@ -564,39 +625,55 @@ namespace build2
names storage;
names_view ns;
- const variable* current_ovar (nullptr);
+ const variable* ovar (nullptr);
if (r != include_type::excluded)
{
- current_ovar = a.outer ()
- ? ctx.current_outer_ovar
- : ctx.current_inner_ovar;
+ // Instead of going via potentially expensive target::base_scope(), use
+ // the prerequisite's scope; while it may not be the same as the
+ // targets's base scope, they must have the same root scope.
+ //
+ const scope& rs (*p.scope.root_scope ());
+
+ ovar = rs.root_extra->operations[
+ (a.outer ()
+ ? ctx.current_outer_oif
+ : ctx.current_inner_oif)->id].ovar;
- if (current_ovar != nullptr && (l = p.vars[*current_ovar]))
+ if (ovar != nullptr)
{
- // Maybe we should optimize this for the common cases (bool, path,
- // name)? But then again we don't expect many such overrides. Plus
- // will complicate the diagnostics below.
- //
- ns = reverse (*l, storage);
+ l = p.vars[*ovar];
- if (ns.size () == 1)
+ if (l.defined ())
{
- const name& n (ns[0]);
+ if (l->null)
+ fail << "null " << *ovar << " variable value specified for "
+ << "prerequisite " << p;
+
+ // Maybe we should optimize this for the common cases (bool, path,
+ // name)? But then again we don't expect many such overrides. Plus
+ // will complicate the diagnostics below.
+ //
+ ns = reverse (*l, storage, true /* reduce */);
- if (n.simple ())
+ if (ns.size () == 1)
{
- const string& v (n.value);
+ const name& n (ns[0]);
+
+ if (n.simple ())
+ {
+ const string& v (n.value);
- if (v == "false")
- r1 = false;
- else if (v == "true")
- r1 = true;
+ if (v == "false")
+ r1 = false;
+ else if (v == "true")
+ r1 = true;
+ }
}
- }
- if (r1 && !*r1)
- r = include_type::excluded;
+ if (r1 && !*r1)
+ r = include_type::excluded;
+ }
}
}
@@ -617,8 +694,8 @@ namespace build2
// Note: we have to delay this until the meta-operation callback above
// had a chance to override it.
//
- fail << "unrecognized " << *current_ovar << " variable value "
- << "'" << ns << "' specified for prerequisite " << p;
+ fail << "unrecognized " << *ovar << " variable value '" << ns
+ << "' specified for prerequisite " << p;
}
}
@@ -733,10 +810,24 @@ namespace build2
if (p.second)
{
+#if 0
+ {
+ size_t n (map_.bucket_count ());
+ if (n > buckets_)
+ {
+ text << "target_set buckets: " << buckets_ << " -> " << n
+ << " (" << map_.size () << ")";
+ buckets_ = n;
+ }
+ }
+#endif
+
t->ext_ = &i->first.ext;
t->decl = decl;
t->state.inner.target_ = t;
t->state.outer.target_ = t;
+ t->state.inner.vars.target_ = t;
+ t->state.outer.vars.target_ = t;
if (ctx.phase != run_phase::load && !need_lock)
ul.unlock ();
@@ -792,9 +883,14 @@ namespace build2
static const optional<string> unknown_ext ("?");
- ostream&
- to_stream (ostream& os, const target_key& k, optional<stream_verbosity> osv)
+ bool
+ to_stream (ostream& os,
+ const target_key& k,
+ optional<stream_verbosity> osv,
+ bool name_only)
{
+ // Note: similar code in print_diag_impl(vector<target_key>).
+
stream_verbosity sv (osv ? *osv : stream_verb (os));
uint16_t dv (sv.path);
uint16_t ev (sv.extension);
@@ -804,22 +900,29 @@ namespace build2
//
bool n (!k.name->empty ());
- // Note: relative() returns empty for './'.
- //
- const dir_path& rd (dv < 1 ? relative (*k.dir) : *k.dir); // Relative.
- const dir_path& pd (n ? rd : rd.directory ()); // Parent.
+ const target_type& tt (*k.type);
- if (!pd.empty ())
+ dir_path rds; // Storage.
+ if (!name_only)
{
+ // Note: relative() returns empty for './'.
+ //
if (dv < 1)
- os << diag_relative (pd);
- else
- to_stream (os, pd, true /* representation */);
- }
+ rds = relative (*k.dir);
- const target_type& tt (*k.type);
+ const dir_path& rd (dv < 1 ? rds : *k.dir); // Relative.
+ const dir_path& pd (n ? rd : rd.directory ()); // Parent.
+
+ if (!pd.empty ())
+ {
+ if (dv < 1)
+ os << diag_relative (pd);
+ else
+ to_stream (os, pd, true /* representation */);
+ }
- os << tt.name << '{';
+ os << tt.name << '{';
+ }
if (n)
{
@@ -862,37 +965,47 @@ namespace build2
}
}
else
+ {
+ if (name_only && dv < 1) // Already done if !name_only.
+ rds = relative (*k.dir);
+
+ const dir_path& rd (dv < 1 ? rds : *k.dir);
+
to_stream (os,
rd.empty () ? dir_path (".") : rd.leaf (),
true /* representation */);
+ }
- os << '}';
-
- // If this target is from src, print its out.
- //
- if (!k.out->empty ())
+ if (!name_only)
{
- if (dv < 1)
+ os << '}';
+
+ // If this target is from src, print its out.
+ //
+ if (!k.out->empty ())
{
- // Don't print '@./'.
- //
- const string& o (diag_relative (*k.out, false));
+ if (dv < 1)
+ {
+ // Don't print '@./'.
+ //
+ const string& o (diag_relative (*k.out, false));
- if (!o.empty ())
- os << '@' << o;
+ if (!o.empty ())
+ os << '@' << o;
+ }
+ else
+ os << '@' << *k.out;
}
- else
- os << '@' << *k.out;
}
- return os;
+ return n; // Regular if we had the name.
}
ostream&
operator<< (ostream& os, const target_key& k)
{
if (auto p = k.type->print)
- p (os, k);
+ p (os, k, false /* name_only */);
else
to_stream (os, k, stream_verb (os));
@@ -913,14 +1026,19 @@ namespace build2
case run_phase::load: break;
case run_phase::match:
{
- // Similar logic to matched_state_impl().
+ // Similar logic to target::matched().
//
const opstate& s (state[action () /* inner */]);
- // Note: already synchronized.
- size_t o (s.task_count.load (memory_order_relaxed) - ctx.count_base ());
+ // Note: use acquire for group_state().
+ //
+ size_t c (s.task_count.load (memory_order_acquire));
+ size_t b (ctx.count_base ()); // Note: cannot do (c - b)!
- if (o != offset_applied && o != offset_executed)
+ if (!(c == (b + offset_applied) ||
+ c == (b + offset_executed) ||
+ (c >= (b + offset_busy) &&
+ s.match_extra.cur_options_.load (memory_order_relaxed) != 0)))
break;
}
// Fall through.
@@ -1043,25 +1161,27 @@ namespace build2
//
const target*
- target_search (const target& t, const prerequisite_key& pk)
+ target_search (context& ctx, const target*, const prerequisite_key& pk)
{
// The default behavior is to look for an existing target in the
// prerequisite's directory scope.
//
- return search_existing_target (t.ctx, pk);
+ return search_existing_target (ctx, pk, true /* out_only */);
}
const target*
- file_search (const target& t, const prerequisite_key& pk)
+ file_search (context& ctx, const target* t, const prerequisite_key& pk)
{
- // First see if there is an existing target.
+ // First see if there is an existing target in the out or src tree.
//
- if (const target* e = search_existing_target (t.ctx, pk))
+ if (const target* e = search_existing_target (ctx,
+ pk,
+ false /* out_only */))
return e;
// Then look for an existing file in the src tree.
//
- return search_existing_file (t.ctx, pk);
+ return t != nullptr ? search_existing_file (ctx, pk) : nullptr;
}
extern const char target_extension_none_[] = "";
@@ -1081,20 +1201,20 @@ namespace build2
return tk.ext->c_str ();
}
- void
- target_print_0_ext_verb (ostream& os, const target_key& k)
+ bool
+ target_print_0_ext_verb (ostream& os, const target_key& k, bool no)
{
stream_verbosity sv (stream_verb (os));
if (sv.extension == 1) sv.extension = 0; // Remap 1 to 0.
- to_stream (os, k, sv);
+ return to_stream (os, k, sv, no);
}
- void
- target_print_1_ext_verb (ostream& os, const target_key& k)
+ bool
+ target_print_1_ext_verb (ostream& os, const target_key& k, bool no)
{
stream_verbosity sv (stream_verb (os));
if (sv.extension == 0) sv.extension = 1; // Remap 0 to 1.
- to_stream (os, k, sv);
+ return to_stream (os, k, sv, no);
}
// type info
@@ -1152,15 +1272,79 @@ namespace build2
target_type::flag::none
};
+ // group
+ //
+ group_view group::
+ group_members (action a) const
+ {
+ if (members_on == 0) // Not yet discovered.
+ return group_view {nullptr, 0};
+
+ // Members discovered during anything other than perform_update are only
+ // good for that operation. For example, we only return the static members
+ // ("representative sample") for perform_configure.
+ //
+ // We also re-discover the members on each update and clean not to
+ // overcomplicate the already twisted adhoc_buildscript_rule::apply()
+ // logic.
+ //
+ if (members_on != ctx.current_on)
+ {
+ if (members_action != perform_update_id ||
+ a == perform_update_id ||
+ a == perform_clean_id)
+ return group_view {nullptr, 0};
+ }
+
+ // Note that we may have no members (e.g., perform_configure and there are
+ // no static members). However, whether std::vector returns a non-NULL
+ // pointer in this case is undefined.
+ //
+ size_t n (members.size ());
+ return group_view {
+ n != 0
+ ? members.data ()
+ : reinterpret_cast<const target* const*> (this),
+ n};
+ }
+
+ const target_type group::static_type
+ {
+ "group",
+ &mtime_target::static_type,
+ &target_factory<group>,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ &target_search,
+ //
+ // Note that the dyn_members semantics is used not only to handle
+ // depdb-dyndep --dyn-target, but also pattern rule-static members.
+ //
+ target_type::flag::group | target_type::flag::dyn_members
+ };
+
+ // alias
+ //
static const target*
- alias_search (const target& t, const prerequisite_key& pk)
+ alias_search (context& ctx, const target* t, const prerequisite_key& pk)
{
// For an alias we don't want to silently create a target since it will do
- // nothing and it most likely not what the user intended.
+ // nothing and it most likely not what the user intended (but omit this
+ // check when searching for an existing target since presumably a new one
+ // won't be created in this case).
+ //
+ // But, allowing implied aliases seems harmless since all the alias does
+ // is pull its prerequisites. And they are handy to use as metadata
+ // carriers.
//
- const target* e (search_existing_target (t.ctx, pk));
+ // Doesn't feel like an alias in the src tree makes much sense.
+ //
+ const target* e (search_existing_target (ctx, pk, true /* out_only */));
- if (e == nullptr || e->decl != target_decl::real)
+ if ((e == nullptr ||
+ !(operator>= (e->decl, target_decl::implied))) && t != nullptr)
fail << "no explicit target for " << pk;
return e;
@@ -1186,7 +1370,7 @@ namespace build2
{
try
{
- for (const dir_entry& e: dir_iterator (d, true /* ignore_dangling */))
+ for (const dir_entry& e: dir_iterator (d, dir_iterator::detect_dangling))
{
switch (e.type ())
{
@@ -1204,6 +1388,16 @@ namespace build2
break;
}
+ case entry_type::unknown:
+ {
+ bool sl (e.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << d / e.path ();
+
+ break;
+ }
default:
break;
}
@@ -1225,17 +1419,26 @@ namespace build2
try
{
- for (const dir_entry& e: dir_iterator (d, true /* ignore_dangling */))
+ for (const dir_entry& e: dir_iterator (d, dir_iterator::detect_dangling))
{
if (e.type () == entry_type::directory)
+ {
r.push_back (
- prerequisite (nullopt,
- dir::static_type,
+ prerequisite (dir::static_type,
dir_path (e.path ().representation ()), // Relative.
dir_path (), // In the out tree.
string (),
nullopt,
bs));
+ }
+ else if (e.type () == entry_type::unknown)
+ {
+ bool sl (e.ltype () == entry_type::symlink);
+
+ warn << "skipping "
+ << (sl ? "dangling symlink" : "inaccessible entry") << ' '
+ << d / e.path ();
+ }
}
}
catch (const system_error& e)
@@ -1247,17 +1450,27 @@ namespace build2
}
static const target*
- dir_search (const target& t, const prerequisite_key& pk)
+ dir_search (context& ctx, const target* t, const prerequisite_key& pk)
{
tracer trace ("dir_search");
- // The first step is like in search_alias(): looks for an existing target.
+ // The first step is like in alias_search(): looks for an existing target
+ // (but unlike alias, no implied, think `test/: install=false`).
+ //
+ // Likewise, dir{} in the src tree doesn't make much sense.
//
- const target* e (search_existing_target (t.ctx, pk));
+ const target* e (search_existing_target (ctx, pk, true /* out_only */));
if (e != nullptr && e->decl == target_decl::real)
return e;
+ // The search for an existing target can also be done during execute so
+ // none of the below code applied. Note: return implied instead of NULL
+ // (to be consistent with search_new(), for example).
+ //
+ if (t == nullptr)
+ return e;
+
// If not found (or is implied), then try to load the corresponding
// buildfile (which would normally define this target). Failed that, see
// if we can assume an implied buildfile which would be equivalent to:
@@ -1291,18 +1504,18 @@ namespace build2
//
bool retest (false);
- assert (t.ctx.phase == run_phase::match);
+ assert (ctx.phase == run_phase::match);
{
// Switch the phase to load.
//
- phase_switch ps (t.ctx, run_phase::load);
+ phase_switch ps (ctx, run_phase::load);
// This is subtle: while we were fussing around another thread may have
// loaded the buildfile. So re-test now that we are in an exclusive
// phase.
//
if (e == nullptr)
- e = search_existing_target (t.ctx, pk);
+ e = search_existing_target (ctx, pk, true);
if (e != nullptr && e->decl == target_decl::real)
retest = true;
@@ -1340,14 +1553,14 @@ namespace build2
}
}
- assert (t.ctx.phase == run_phase::match);
+ assert (ctx.phase == run_phase::match);
// If we loaded/implied the buildfile, examine the target again.
//
if (retest)
{
if (e == nullptr)
- e = search_existing_target (t.ctx, pk);
+ e = search_existing_target (ctx, pk, true);
if (e != nullptr && e->decl == target_decl::real)
return e;
@@ -1472,7 +1685,7 @@ namespace build2
nullptr,
#endif
nullptr,
- &file_search,
+ &file_search, // Note: can also be a script in src.
target_type::flag::none
};
@@ -1562,6 +1775,55 @@ namespace build2
target_type::flag::none
};
+ static const char*
+ buildscript_target_extension (const target_key& tk, const scope*)
+ {
+ // If the name is special 'buildscript', then there is no extension,
+ // otherwise it is .buildscript.
+ //
+ return *tk.name == "buildscript" ? "" : "buildscript";
+ }
+
+ static bool
+ buildscript_target_pattern (const target_type&,
+ const scope&,
+ string& v,
+ optional<string>& e,
+ const location& l,
+ bool r)
+ {
+ if (r)
+ {
+ assert (e);
+ e = nullopt;
+ }
+ else
+ {
+ e = target::split_name (v, l);
+
+ if (!e && v != "buildscript")
+ {
+ e = "buildscript";
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ const target_type buildscript::static_type
+ {
+ "buildscript",
+ &file::static_type,
+ &target_factory<buildscript>,
+ &buildscript_target_extension,
+ nullptr, /* default_extension */
+ &buildscript_target_pattern,
+ nullptr,
+ &file_search,
+ target_type::flag::none
+ };
+
const target_type doc::static_type
{
"doc",
diff --git a/libbuild2/target.hxx b/libbuild2/target.hxx
index bf8c4fe..20cd32d 100644
--- a/libbuild2/target.hxx
+++ b/libbuild2/target.hxx
@@ -4,8 +4,9 @@
#ifndef LIBBUILD2_TARGET_HXX
#define LIBBUILD2_TARGET_HXX
+#include <cstddef> // max_align_t
#include <iterator> // tags, etc.
-#include <type_traits> // aligned_storage
+#include <type_traits> // is_*
#include <unordered_map>
#include <libbutl/multi-index.hxx> // map_iterator_adapter
@@ -38,16 +39,19 @@ namespace build2
// Prerequisite inclusion/exclusion (see include() function below).
//
+ // Note that posthoc is handled internally and should normally be treated by
+ // the rules the same as excluded.
+ //
class include_type
{
public:
- enum value {excluded, adhoc, normal};
+ enum value {excluded, posthoc, adhoc, normal};
include_type (value v): v_ (v) {}
include_type (bool v): v_ (v ? normal : excluded) {}
operator value () const {return v_;}
- explicit operator bool () const {return v_ != excluded;}
+ explicit operator bool () const {return v_ == normal || v_ == adhoc;}
private:
value v_;
@@ -75,27 +79,61 @@ namespace build2
//
// The include member normally just indicates (in the first bit) whether
// this prerequisite is ad hoc. But it can also carry additional information
- // (for example, from operation-specific override) in other bits.
+ // (for example, from operation-specific override) in other bits (see below
+ // for details).
//
struct prerequisite_target
{
using target_type = build2::target;
prerequisite_target (const target_type* t, bool a = false, uintptr_t d = 0)
- : target (t), include (a ? 1 : 0), data (d) {}
+ : target (t), include (a ? include_adhoc : 0), data (d) {}
+
+ prerequisite_target (const target_type& t, bool a = false, uintptr_t d = 0)
+ : prerequisite_target (&t, a, d) {}
prerequisite_target (const target_type* t, include_type a, uintptr_t d = 0)
: prerequisite_target (t, a == include_type::adhoc, d) {}
+ prerequisite_target (const target_type& t, include_type a, uintptr_t d = 0)
+ : prerequisite_target (&t, a, d) {}
+
+ const target_type* target;
+
operator const target_type*& () {return target;}
operator const target_type* () const {return target;}
const target_type* operator-> () const {return target;}
- bool adhoc () const {return (include & 1) != 0;}
+ // The first 8 bits are reserved with the first two having the following
+ // semantics:
+ //
+ // adhoc
+ //
+ // This prerequisite is ad hoc.
+ //
+ // udm
+ //
+ // This prerequisite is updated during match. Note that only static
+ // prerequisites that are updated during match should have this bit set
+ // (see dyndep_rule::*_existing_file() for details).
+ //
+ // target
+ //
+ // The data member contains the target pointer that has been "blanked
+ // out" for some reason (updated during match, unmatched, etc). See
+ // dyndep_rule::updated_during_match() for details.
+ //
+ static const uintptr_t include_adhoc = 0x01;
+ static const uintptr_t include_udm = 0x02;
+ static const uintptr_t include_target = 0x80;
+
+ uintptr_t include;
- const target_type* target;
- uintptr_t include; // First bit is 1 if include=adhoc.
- uintptr_t data;
+ bool adhoc () const {return (include & include_adhoc) != 0;}
+
+ // Auxiliary data.
+ //
+ uintptr_t data;
};
using prerequisite_targets = vector<prerequisite_target>;
@@ -145,7 +183,112 @@ namespace build2
//
struct match_extra
{
- bool fallback; // True if matching a fallback rule (see match_rule()).
+ bool locked; // Normally true (see adhoc_rule::match() for background).
+ bool fallback; // True if matching a fallback rule (see match_rule_impl()).
+
+ // When matching a rule, the caller may wish to request a subset of the
+ // full functionality of performing the operation on the target. This is
+ // achieved with match options.
+ //
+ // Since the match caller normally has no control over which rule will be
+ // matched, the options are not specific to a particular rule. Rather,
+ // options are defined for performing a specific operation on a specific
+ // target type and would normally be part of the target type semantics.
+ // To put it another way, when a rule matches a target of certain type for
+ // certain operation, there is an expectation of certain semantics, some
+ // parts of which could be made optional.
+ //
+ // As a concrete example, consider installing libs{}, which traditionally
+ // has two parts: runtime (normally just the versioned shared library) and
+ // build-time (non-versioned symlinks, pkg-config files, headers, etc).
+ // The option to install only the runtime files is part of the bin::libs{}
+ // semantics, not of, say, cc::install_rule.
+ //
+ // The match options are specified as a uint64_t mask, which means there
+ // can be a maximum of 64 options per operation/target type. Options are
+ // opt-out rather than opt-in. That is, by default, all the options are
+ // enabled unless the match caller explicitly opted out of some
+ // functionality. Even if the caller opted out, there is no guarantee that
+ // the matching rule will honor this request (for example, because it is a
+ // user-provided ad hoc recipe). To put it another way, support for
+ // options is a quality of implementation matter.
+ //
+ // From the rule implementation's point view, match options are handled as
+ // follows: On initial match()/apply(), cur_options is initialized to ~0
+ // (all options enabled) and the matching rule is expected to override it
+ // with new_options in apply() (note that match() should no base any
+ // decisions on new_options since they may change between match() and
+ // apply()). This way a rule that does not support any match options does
+ // not need to do anything. Subsequent match calls may add new options
+ // which causes a rematch that manifests in the rule's reapply() call. In
+ // reapply(), cur_options are the currently enabled options and
+ // new_options are the newly requested options. Here the rule is expected
+ // to factor new_options to cur_options as appropriate. Note also that on
+ // rematch, if current options already include new options, then no call
+ // to reapply() is made. This, in particular, means that a rule that does
+ // not adjust cur_options in match() will never get a reapply() call
+ // (because all the options are enabled from the start). Note that
+ // cur_options should only be modfied in apply() or reapply().
+ //
+ // If a rematch is triggered after the rule has already been executed, an
+ // error is issued. This means that match options are not usable for
+ // operation/target types that could plausibly be executed during
+ // match. In particular, using match options for update and clean
+ // operations is a bad idea (update of pretty much any target can happen
+ // during match as a result of a tool update while clean might have to be
+ // performed during match to provide the mirror semantics).
+ //
+ // Note also that with rematches the assumption that in the match phase
+ // after matching the target we can MT-safely examine its state (such as
+ // its prerequisite_targets) no longer holds since such state could be
+ // modified during a rematch. As a result, if the target type specifies
+ // options for a certain operation, then you should not rely on this
+ // assumption for targets of this type during this operation.
+ //
+ // A rule that supports match options must also be prepared to handle the
+ // apply() call with new_options set to 0, for example, by using a
+ // minimally supported set of options instead. While 0 usually won't be
+ // passed by the match caller, this value is passed in the following
+ // circumstances:
+ //
+ // - match to resolve group (resolve_group())
+ // - match to resolve members (resolve_members())
+ // - match of ad hoc group via one of its ad hoc members
+ //
+ // Note that the 0 cur_options value is illegal.
+ //
+ // When it comes to match options specified for group members, the
+ // semantics differs between explicit and ad hoc groups. For explicit
+ // groups, the standard semantics described above applies and the group's
+ // reapply() function will be called both for the group itself as well as
+ // for its members and its the responsibility of the rule to decide what
+ // to do with the two sets of options (e.g., factor member's options into
+ // group's options, etc). For ad hoc groups, members are not matched to a
+ // rule but to the group_recipe directly (so there cannot be a call to
+ // reapply()). Currently, ad hoc group members cannot have options (more
+ // precisely, their options should always be ~0). An alternative semantics
+ // where the group rule is called to translate member options to group
+ // options may be implemented in the future (see match_impl_impl() for
+ // details).
+ //
+ // Note: match options are currently not exposed in Buildscript ad hoc
+ // recipes/rules (but are in C++).
+ //
+ static constexpr uint64_t all_options = ~uint64_t (0);
+
+ uint64_t cur_options;
+ uint64_t new_options;
+
+ atomic<uint64_t> cur_options_; // Implementation detail (see lock_impl()).
+
+ // The list of post hoc prerequisite targets for this target. Only not
+ // NULL in rule::apply_posthoc() and rule::reapply() functions and only if
+ // there are post hoc prerequisites. Primarily useful for adjusting match
+ // options for post hoc prerequisites (but can also be used to blank some
+ // of them out).
+ //
+ vector<context::posthoc_target::prerequisite_target>*
+ posthoc_prerequisite_targets;
// Auxiliary data storage.
//
@@ -165,7 +308,7 @@ namespace build2
? sizeof (string)
: sizeof (void*) * 4);
- std::aligned_storage<data_size>::type data_;
+ alignas (std::max_align_t) unsigned char data_[data_size];
void (*data_dtor_) (void*) = nullptr;
template <typename R,
@@ -212,14 +355,27 @@ namespace build2
// Implementation details.
//
+ // NOTE: see match_rule_impl() in algorithms.cxx if changing anything here.
+ //
public:
+ explicit
+ match_extra (bool l = true, bool f = false)
+ : locked (l), fallback (f),
+ cur_options (all_options), new_options (0),
+ posthoc_prerequisite_targets (nullptr) {}
+
void
- init (bool fallback);
+ reinit (bool fallback);
// Force freeing of the dynamically-allocated memory.
//
void
free ();
+
+ ~match_extra ()
+ {
+ clear_data ();
+ }
};
// Target.
@@ -232,6 +388,9 @@ namespace build2
// Note that the order of the enumerators is arranged so that their
// integral values indicate whether one "overrides" the other.
//
+ // We refer to the targets other than real and implied as
+ // dynamically-created or just dynamic.
+ //
// @@ We have cases (like pkg-config extraction) where it should probably be
// prereq_file rather than implied (also audit targets.insert<> calls).
//
@@ -239,14 +398,24 @@ namespace build2
// fuzzy: they feel more `real` than `implied`. Maybe introduce
// `synthesized` in-between?
//
+ // @@ There are also now dynamically-discovered targets (ad hoc group
+ // members; see depdb-dyndep --dyn-target) which currently end up
+ // with prereq_new.
+ //
enum class target_decl: uint8_t
{
- prereq_new, // Created from prerequisite (create_new_target()).
- prereq_file, // Created from prerequisite/file (search_existing_file ()).
- implied, // Target-spec variable assignment, implicitly-entered, etc.
- real // Real dependency declaration.
+ prereq_new = 1, // Created from prerequisite (create_new_target()).
+ prereq_file, // Created from prerequisite/file (search_existing_file()).
+ implied, // Target-spec variable assignment, implicitly-entered, etc.
+ real // Real dependency declaration.
};
+ inline bool
+ operator>= (target_decl l, target_decl r)
+ {
+ return static_cast<uint8_t> (l) >= static_cast<uint8_t> (r);
+ }
+
class LIBBUILD2_SYMEXPORT target
{
public:
@@ -331,9 +500,12 @@ namespace build2
//
// Note that the group-member link-up can happen anywhere between the
// member creation and rule matching so reading the group before the
- // member has been matched can be racy.
+ // member has been matched can be racy. However, once the member is linked
+ // up to the group, this relationship is immutable. As a result, one can
+ // atomically query the current value to see if already linked up (can be
+ // used as an optimization, to avoid deadlocks, etc).
//
- const target* group = nullptr;
+ relaxed_atomic<const target*> group = nullptr;
// What has been described above is an "explicit" group. That is, there is
// a dedicated target type that explicitly serves as a group and there is
@@ -366,7 +538,7 @@ namespace build2
// usually needed is to derive its path.
//
// - Unless declared, members are discovered lazily, they are only known
- // after the group's rule's apply() call.
+ // after the matching rule's apply() call.
//
// - Only declared members can be used as prerequisites but all can be
// used as targets (e.g., to set variables, etc).
@@ -376,6 +548,10 @@ namespace build2
// - Ad hoc group cannot have sub-groups (of any kind) though an ad hoc
// group can be a sub-group of an explicit group.
//
+ // - Member variable lookup skips the ad hoc group (since the group is the
+ // first member, this is normally what we want). But special semantics
+ // could be arranged; see var_backlink, for example.
+ //
// Note that ad hoc groups can be part of explicit groups. In a sense, we
// have a two-level grouping: an explicit group with its members each of
// which can be an ad hoc group. For example, lib{} contains libs{} which
@@ -384,6 +560,20 @@ namespace build2
// Use add_adhoc_member(), find_adhoc_member() from algorithms to manage
// ad hoc members.
//
+ // One conceptual issue we have with our ad hoc group implementation is
+ // that the behavior could be sensitive to the order in which the members
+ // are specified (due to the primary member logic). For example, unless we
+ // specify the header in the header/source group first, it will not be
+ // installed. Perhaps the solution is to synthesize a separate group
+ // target for the ad hoc members (with a special target type that rules
+ // like install could recognize). See also the variable lookup semantics.
+ // We could also probably support see_through via an attribute or some
+ // such. Or perhaps such cases should be handled through explicit groups
+ // and the ad hoc semantics is left to the non-see_through "primary
+ // targets with a bunch of subordinates" cases. In other words, if the
+ // members are "equal/symmetrical", then perhaps an explicit group is the
+ // correct approach.
+ //
const_ptr<target> adhoc_member = nullptr;
// Return true if this target is an ad hoc group (that is, its primary
@@ -409,7 +599,8 @@ namespace build2
public:
// Normally you should not call this function directly and rather use
- // resolve_members() from <libbuild2/algorithm.hxx>.
+ // resolve_members() from <libbuild2/algorithm.hxx>. Note that action
+ // is always inner.
//
virtual group_view
group_members (action) const;
@@ -511,7 +702,8 @@ namespace build2
prerequisites () const;
// Swap-in a list of prerequisites. Return false if unsuccessful (i.e.,
- // someone beat us to it). Note that it can be called on const target.
+ // someone beat us to it), in which case the passed prerequisites are
+ // not moved. Note that it can be called on const target.
//
bool
prerequisites (prerequisites_type&&) const;
@@ -570,8 +762,9 @@ namespace build2
lookup_type
operator[] (const string& name) const
{
- const variable* var (ctx.var_pool.find (name));
- return var != nullptr ? operator[] (*var) : lookup_type ();
+ const scope& bs (base_scope ());
+ const variable* var (bs.var_pool ().find (name));
+ return var != nullptr ? lookup (*var, &bs).first : lookup_type ();
}
// As above but also return the depth at which the value is found. The
@@ -583,22 +776,26 @@ namespace build2
// earlier. If no value is found, then the depth is set to ~0.
//
pair<lookup_type, size_t>
- lookup (const variable& var) const
+ lookup (const variable& var, const scope* bs = nullptr) const
{
- auto p (lookup_original (var));
+ auto p (lookup_original (var, false, bs));
return var.overrides == nullptr
? p
- : base_scope ().lookup_override (var, move (p), true);
+ : (bs != nullptr
+ ? *bs
+ : base_scope ()).lookup_override (var, move (p), true);
}
// If target_only is true, then only look in target and its target group
// without continuing in scopes. As an optimization, the caller can also
- // pass the base scope of the target, if already known.
+ // pass the base scope of the target, if already known. If locked is true,
+ // assume the targets mutex is locked.
//
pair<lookup_type, size_t>
lookup_original (const variable&,
bool target_only = false,
- const scope* bs = nullptr) const;
+ const scope* bs = nullptr,
+ bool locked = false) const;
// Return a value suitable for assignment. See scope for details.
//
@@ -608,11 +805,41 @@ namespace build2
value&
assign (const variable* var) {return vars.assign (var);} // For cached.
+ // Note: variable must already be entered.
+ //
+ value&
+ assign (const string& name)
+ {
+ return vars.assign (base_scope ().var_pool ().find (name));
+ }
+
// Return a value suitable for appending. See scope for details.
//
value&
- append (const variable&);
+ append (const variable&, const scope* bs = nullptr);
+ // Note: variable must already be entered.
+ //
+ value&
+ append (const string& name)
+ {
+ const scope& bs (base_scope ());
+ return append (*bs.var_pool ().find (name), &bs);
+ }
+
+ // As above but assume the targets mutex is locked.
+ //
+ value&
+ append_locked (const variable&, const scope* bs = nullptr);
+
+ // Note: variable must already be entered.
+ //
+ value&
+ append_locked (const string& name)
+ {
+ const scope& bs (base_scope ());
+ return append_locked (*bs.var_pool ().find (name), &bs);
+ }
// Rule hints.
//
@@ -661,6 +888,12 @@ namespace build2
static const size_t offset_executed = 5; // Recipe has been executed.
static const size_t offset_busy = 6; // Match/execute in progress.
+ // @@ PERF There is a lot of data below that is only needed for "output"
+ // as opposed to "source" targets (data pads, prerequisite_targets,
+ // etc). Maybe we should move this stuff to an optional extra (like we
+ // have for the root scope). Maybe we could even allocate it as part of
+ // the target's memory block or some such?
+
// Inner/outer operation state. See <libbuild2/action.hxx> for details.
//
class LIBBUILD2_SYMEXPORT opstate
@@ -676,7 +909,11 @@ namespace build2
//
mutable atomic_count dependents {0};
- // Match state storage between the match() and apply() calls.
+ // Match state storage between the match() and apply() calls with only
+ // the *_options members extended to reapply().
+ //
+ // Note: in reality, cur_options are used beyong (re)apply() as an
+ // implementation detail.
//
build2::match_extra match_extra;
@@ -700,6 +937,12 @@ namespace build2
//
target_state state;
+ // Set to true (only for the inner action) if this target has been
+ // matched but not executed as a result of the resolve_members() call.
+ // See also context::resolve_count.
+ //
+ bool resolve_counted;
+
// Rule-specific variables.
//
// The rule (for this action) has to be matched before these variables
@@ -732,13 +975,6 @@ namespace build2
return operator[] (*var);
}
- lookup_type
- operator[] (const string& name) const
- {
- const variable* var (target_->ctx.var_pool.find (name));
- return var != nullptr ? operator[] (*var) : lookup_type ();
- }
-
// As above but also return the depth at which the value is found. The
// depth is calculated by adding 1 for each test performed. So a value
// that is from the rule will have depth 1. That from the target - 2,
@@ -767,14 +1003,18 @@ namespace build2
value&
assign (const variable* var) {return vars.assign (var);} // For cached.
+ // Implementation details.
+ //
public:
explicit
- opstate (context& c): vars (c, false /* global */) {}
+ opstate (context& c): vars (variable_map::owner::target, &c) {}
private:
friend class target_set;
- const target* target_ = nullptr; // Back-pointer, set by target_set.
+ // Back-pointer, set by target_set along with vars.target_.
+ //
+ const target* target_ = nullptr;
};
action_state<opstate> state;
@@ -785,8 +1025,11 @@ namespace build2
// Return true if the target has been matched for the specified action.
// This function can only be called during the match or execute phases.
//
+ // If you need to observe something in the matched target (e.g., the
+ // matched rule or recipe), use memory_order_acquire.
+ //
bool
- matched (action) const;
+ matched (action, memory_order mo = memory_order_relaxed) const;
// This function can only be called during match if we have observed
// (synchronization-wise) that this target has been matched (i.e., the
@@ -815,6 +1058,12 @@ namespace build2
target_state
executed_state (action, bool fail = true) const;
+ // Return true if the state comes from the group. Target must be at least
+ // matched except for ad hoc group members during the execute phase.
+ //
+ bool
+ group_state (action) const;
+
protected:
// Version that should be used during match after the target has been
// matched for this action.
@@ -831,24 +1080,28 @@ namespace build2
target_state
executed_state_impl (action) const;
- // Return true if the state comes from the group. Target must be at least
- // matched.
- //
- bool
- group_state (action) const;
-
public:
// Targets to which prerequisites resolve for this action. Note that
// unlike prerequisite::target, these can be resolved to group members.
// NULL means the target should be skipped (or the rule may simply not add
// such a target to the list).
//
- // Note also that it is possible the target can vary from action to
- // action, just like recipes. We don't need to keep track of the action
- // here since the targets will be updated if the recipe is updated,
- // normally as part of rule::apply().
- //
- // Note that the recipe may modify this list.
+ // A rule should make sure that the target's prerequisite_targets are in
+ // the "canonical" form (that is, all the prerequisites that need to be
+ // executed are present with prerequisite_target::target pointing to the
+ // corresponding target). This is relied upon in a number of places,
+ // including in dump and to be able to pretend-execute the operation on
+ // this target without actually calling the recipe (see perform_execute(),
+ // resolve_members_impl() for background). Note that a rule should not
+ // store targets that are semantically prerequisites in an ad hoc manner
+ // (e.g., in match data) with a few well-known execeptions (see
+ // group_recipe and inner_recipe).
+ //
+ // Note that the recipe may modify this list during execute. Normally this
+ // would be just blanking out of ad hoc prerequisites, in which case check
+ // for ad hoc first and for not NULL second if accessing prerequisites of
+ // targets that you did not execute (see the library metadata protocol in
+ // cc for an example).
//
mutable action_state<build2::prerequisite_targets> prerequisite_targets;
@@ -966,13 +1219,28 @@ namespace build2
}
template <typename T>
- typename std::enable_if<!data_invocable<T>::value, T&>::type&
+ typename std::enable_if<!data_invocable<T>::value, T&>::type
data (action a) const
{
using V = typename std::remove_cv<T>::type;
return state[a].recipe.target<data_wrapper<V>> ()->d;
}
+ // Return NULL if there is no data or the data is of a different type.
+ //
+ template <typename T>
+ typename std::enable_if<!data_invocable<T>::value, T*>::type
+ try_data (action a) const
+ {
+ using V = typename std::remove_cv<T>::type;
+
+ if (auto& r = state[a].recipe)
+ if (auto* t = r.target<data_wrapper<V>> ())
+ return &t->d;
+
+ return nullptr;
+ }
+
// Note that in this case we don't strip const (the expectation is that we
// move the recipe in/out of data).
//
@@ -997,18 +1265,18 @@ namespace build2
}
template <typename T>
- typename std::enable_if<data_invocable<T>::value, T&>::type&
+ typename std::enable_if<data_invocable<T>::value, T&>::type
data (action a) const
{
return *state[a].recipe.target<T> ();
}
- void
- clear_data (action a) const
+ template <typename T>
+ typename std::enable_if<data_invocable<T>::value, T*>::type
+ try_data (action a) const
{
- const opstate& s (state[a]);
- s.recipe = nullptr;
- s.recipe_keep = false;
+ auto& r = state[a].recipe;
+ return r ? r.target<T> () : nullptr;
}
// Target type info and casting.
@@ -1114,7 +1382,7 @@ namespace build2
target (context& c, dir_path d, dir_path o, string n)
: ctx (c),
dir (move (d)), out (move (o)), name (move (n)),
- vars (c, false /* global */),
+ vars (*this, false /* shared */),
state (c)
{
dynamic_type = &static_type;
@@ -1388,9 +1656,7 @@ namespace build2
}
include_type
- include (action, const target&,
- const prerequisite_member&,
- lookup* = nullptr);
+ include (action, const target&, const prerequisite_member&, lookup* = nullptr);
// A "range" that presents a sequence of prerequisites (e.g., from
// group_prerequisites()) as a sequence of prerequisite_member's. For each
@@ -1566,8 +1832,7 @@ namespace build2
group_view g_;
size_t j_; // 1-based index, to support enter_group().
const target* k_; // Current member of ad hoc group or NULL.
- mutable typename std::aligned_storage<sizeof (value_type),
- alignof (value_type)>::type m_;
+ alignas (value_type) mutable unsigned char m_[sizeof (value_type)];
};
iterator
@@ -1821,6 +2086,10 @@ namespace build2
mutable shared_mutex mutex_;
map_type map_;
+
+#if 0
+ size_t buckets_ = 0;
+#endif
};
// Modification time-based target.
@@ -1834,7 +2103,7 @@ namespace build2
dynamic_type = &static_type;
}
- // Modification time is an "atomic cash". That is, it can be set at any
+ // Modification time is an "atomic cache". That is, it can be set at any
// time (including on a const instance) and we assume everything will be
// ok regardless of the order in which racing updates happen because we do
// not modify the external state (which is the source of timestemps) while
@@ -1867,8 +2136,7 @@ namespace build2
// If the mtime is unknown, then load it from the filesystem also caching
// the result.
//
- // Note: can only be called during executing and must not be used if the
- // target state is group.
+ // Note: must not be used if the target state is group.
//
timestamp
load_mtime (const path&) const;
@@ -1929,7 +2197,7 @@ namespace build2
// Target path. Must be absolute and normalized.
//
- // Target path is an "atomic consistent cash". That is, it can be set at
+ // Target path is an "atomic consistent cache". That is, it can be set at
// any time (including on a const instance) but any subsequent updates
// must set the same path. Or, in other words, once the path is set, it
// never changes.
@@ -2082,6 +2350,54 @@ namespace build2
static const target_type static_type;
};
+ // Mtime-based group target.
+ //
+ // Used to support explicit groups in buildfiles: can be derived from,
+ // populated with static members using the group{foo}<...> syntax, and
+ // matched with an ad hoc recipe/rule, including dynamic member extraction.
+ // Note that it is not see-through but a derived group can be made see-
+ // through via the [see_through] attribute.
+ //
+ // Note also that you shouldn't use it as a base for a custom group defined
+ // in C++, instead deriving from mtime_target directly and using a custom
+ // members layout more appropriate for the group's semantics. To put it
+ // another way, a group-based target should only be matched by an ad hoc
+ // recipe/rule (see match_rule_impl() in algorithms.cxx for details).
+ //
+ class LIBBUILD2_SYMEXPORT group: public mtime_target
+ {
+ public:
+ vector<reference_wrapper<const target>> static_members;
+
+ // Note: we expect no NULL entries in members.
+ //
+ vector<const target*> members; // Layout compatible with group_view.
+ action members_action; // Action on which members were resolved.
+ size_t members_on = 0; // Operation number on which members were resolved.
+ size_t members_static; // Number of static ones in members (always first).
+
+ void
+ reset_members (action a)
+ {
+ members.clear ();
+ members_action = a;
+ members_on = ctx.current_on;
+ members_static = 0;
+ }
+
+ virtual group_view
+ group_members (action) const override;
+
+ group (context& c, dir_path d, dir_path o, string n)
+ : mtime_target (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
// Alias target. It represents a list of targets (its prerequisites)
// as a single "name".
//
@@ -2208,6 +2524,22 @@ namespace build2
static const target_type static_type;
};
+ // This target type is primarily used for files mentioned in the `recipe`
+ // directive.
+ //
+ class LIBBUILD2_SYMEXPORT buildscript: public file
+ {
+ public:
+ buildscript (context& c, dir_path d, dir_path o, string n)
+ : file (c, move (d), move (o), move (n))
+ {
+ dynamic_type = &static_type;
+ }
+
+ public:
+ static const target_type static_type;
+ };
+
// Common documentation file target.
//
class LIBBUILD2_SYMEXPORT doc: public file
@@ -2270,7 +2602,7 @@ namespace build2
// in the generic install rule. @@ This is still a TODO.
//
// Note that handling subsections with man1..9{} is easy, we
- // simply specify the extension explicitly, e.g., man{foo.1p}.
+ // simply specify the extension explicitly, e.g., man1{foo.1p}.
//
class LIBBUILD2_SYMEXPORT man: public doc
{
@@ -2358,32 +2690,39 @@ namespace build2
string&, optional<string>&, const location&,
bool);
- // Target print functions.
+ // Target print functions (target_type::print).
//
// Target type uses the extension but it is fixed and there is no use
// printing it (e.g., man1{}).
//
- LIBBUILD2_SYMEXPORT void
- target_print_0_ext_verb (ostream&, const target_key&);
+ LIBBUILD2_SYMEXPORT bool
+ target_print_0_ext_verb (ostream&, const target_key&, bool);
// Target type uses the extension and there is normally no default so it
// should be printed (e.g., file{}).
//
- LIBBUILD2_SYMEXPORT void
- target_print_1_ext_verb (ostream&, const target_key&);
+ LIBBUILD2_SYMEXPORT bool
+ target_print_1_ext_verb (ostream&, const target_key&, bool);
+
+ // Target search functions (target_type::search).
+ //
// The default behavior, that is, look for an existing target in the
// prerequisite's directory scope.
//
+ // Note that this implementation assumes a target can only be found in the
+ // out tree (targets that can be in the src tree would normally use
+ // file_search() below).
+ //
LIBBUILD2_SYMEXPORT const target*
- target_search (const target&, const prerequisite_key&);
+ target_search (context&, const target*, const prerequisite_key&);
- // First look for an existing target as above. If not found, then look
- // for an existing file in the target-type-specific list of paths.
+ // First look for an existing target both in out and src. If not found, then
+ // look for an existing file in src.
//
LIBBUILD2_SYMEXPORT const target*
- file_search (const target&, const prerequisite_key&);
+ file_search (context&, const target*, const prerequisite_key&);
}
#include <libbuild2/target.ixx>
diff --git a/libbuild2/target.ixx b/libbuild2/target.ixx
index 5432f7c..39b81e7 100644
--- a/libbuild2/target.ixx
+++ b/libbuild2/target.ixx
@@ -136,10 +136,13 @@ namespace build2
// match_extra
//
inline void match_extra::
- init (bool f)
+ reinit (bool f)
{
clear_data ();
fallback = f;
+ cur_options = all_options;
+ new_options = 0;
+ posthoc_prerequisite_targets = nullptr;
}
inline void match_extra::
@@ -235,17 +238,30 @@ namespace build2
}
inline bool target::
- matched (action a) const
+ matched (action a, memory_order mo) const
{
assert (ctx.phase == run_phase::match ||
ctx.phase == run_phase::execute);
const opstate& s (state[a]);
- size_t c (s.task_count.load (memory_order_relaxed) - ctx.count_base ());
+ size_t c (s.task_count.load (mo));
+ size_t b (ctx.count_base ()); // Note: cannot do (c - b)!
if (ctx.phase == run_phase::match)
{
- return c == offset_applied;
+ // While it will normally be applied, it could also be already executed
+ // or being relocked to reapply match options (see lock_impl() for
+ // background).
+ //
+ // Note that we can't just do >= offset_applied since offset_busy can
+ // also mean it is being matched.
+ //
+ // See also matched_state_impl(), mtime() for similar logic.
+ //
+ return (c == (b + offset_applied) ||
+ c == (b + offset_executed) ||
+ (c >= (b + offset_busy) &&
+ s.match_extra.cur_options_.load (memory_order_relaxed) != 0));
}
else
{
@@ -253,13 +269,10 @@ namespace build2
// least offset_matched since it must have been "achieved" before the
// phase switch.
//
- return c >= offset_matched;
+ return c >= (b + offset_matched);
}
}
- LIBBUILD2_SYMEXPORT target_state
- group_action (action, const target&); // <libbuild2/algorithm.hxx>
-
inline bool target::
group_state (action a) const
{
@@ -273,6 +286,19 @@ namespace build2
// @@ Hm, I wonder why not just return s.recipe_group_action now that we
// cache it.
//
+
+ // This special hack allows us to do things like query an ad hoc member's
+ // state or mtime without matching/executing the member, only the group.
+ // Requiring matching/executing the member would be too burdensome and
+ // this feels harmless (ad hoc membership cannot be changed during the
+ // execute phase).
+ //
+ // Note: this test must come first since the member may not be matched and
+ // thus its state uninitialized.
+ //
+ if (ctx.phase == run_phase::execute && adhoc_group_member ())
+ return true;
+
const opstate& s (state[a]);
if (s.state == target_state::group)
@@ -293,17 +319,22 @@ namespace build2
// Note: already synchronized.
//
- size_t o (s.task_count.load (memory_order_relaxed) - ctx.count_base ());
+ size_t c (s.task_count.load (memory_order_relaxed));
+ size_t b (ctx.count_base ()); // Note: cannot do (c - b)!
- if (o == offset_tried)
+ if (c == (b + offset_tried))
return make_pair (false, target_state::unknown);
else
{
- // Normally applied but can also be already executed. Note that in the
- // latter case we are guaranteed to be synchronized since we are in the
- // match phase.
+ // The same semantics as in target::matched(). Note that in the executed
+ // case we are guaranteed to be synchronized since we are in the match
+ // phase.
//
- assert (o == offset_applied || o == offset_executed);
+ assert (c == (b + offset_applied) ||
+ c == (b + offset_executed) ||
+ (c >= (b + offset_busy) &&
+ s.match_extra.cur_options_.load (memory_order_relaxed) != 0));
+
return make_pair (true, (group_state (a) ? group->state[a] : s).state);
}
}
@@ -715,8 +746,13 @@ namespace build2
inline timestamp mtime_target::
load_mtime (const path& p) const
{
- assert (ctx.phase == run_phase::execute &&
- !group_state (action () /* inner */));
+ // We can only enforce "not group state" during the execute phase. During
+ // match (e.g., the target is being matched), we will just have to pay
+ // attention.
+ //
+ assert (ctx.phase == run_phase::match ||
+ (ctx.phase == run_phase::execute &&
+ !group_state (action () /* inner */)));
duration::rep r (mtime_.load (memory_order_consume));
if (r == timestamp_unknown_rep)
@@ -733,6 +769,8 @@ namespace build2
inline bool mtime_target::
newer (timestamp mt, target_state s) const
{
+ assert (s != target_state::unknown); // Should be executed.
+
timestamp mp (mtime ());
// What do we do if timestamps are equal? This can happen, for example,
diff --git a/libbuild2/test/common.cxx b/libbuild2/test/common.cxx
index 7fdb347..89f3dd6 100644
--- a/libbuild2/test/common.cxx
+++ b/libbuild2/test/common.cxx
@@ -150,8 +150,7 @@ namespace build2
t.name == n->value && // Name matches.
tt.name == n->type && // Target type matches.
d == n->dir && // Directory matches.
- (search_existing (*n, *root_) == &t ||
- search_existing (*n, *root_, d) == &t);
+ search_existing (*n, *root_) == &t;
if (r)
break;
@@ -198,8 +197,7 @@ namespace build2
t.name == n->value &&
tt.name == n->type &&
d == n->dir &&
- (search_existing (*n, *root_) == &t ||
- search_existing (*n, *root_, d) == &t);
+ search_existing (*n, *root_) == &t;
if (!r)
continue; // Not our target.
diff --git a/libbuild2/test/init.cxx b/libbuild2/test/init.cxx
index 539cdec..32548f4 100644
--- a/libbuild2/test/init.cxx
+++ b/libbuild2/test/init.cxx
@@ -23,6 +23,8 @@ namespace build2
{
namespace test
{
+ static const file_rule file_rule_ (true /* check_type */);
+
void
boot (scope& rs, const location&, module_boot_extra& extra)
{
@@ -30,15 +32,14 @@ namespace build2
l5 ([&]{trace << "for " << rs;});
- // Register our operations.
- //
- rs.insert_operation (test_id, op_test);
- rs.insert_operation (update_for_test_id, op_update_for_test);
-
// Enter module variables. Do it during boot in case they get assigned
// in bootstrap.build.
//
- auto& vp (rs.var_pool ());
+ // Most of the variables we enter are qualified so go straight for the
+ // public variable pool.
+ //
+ auto& vp (rs.var_pool (true /* public */));
+ auto& pvp (rs.var_pool ()); // For `test` and `for_test`.
common_data d {
@@ -69,7 +70,7 @@ namespace build2
// The test variable is a name which can be a path (with the
// true/false special values) or a target name.
//
- vp.insert<name> ("test", variable_visibility::target),
+ pvp.insert<name> ("test", variable_visibility::target),
vp.insert<strings> ("test.options"),
vp.insert<strings> ("test.arguments"),
@@ -111,12 +112,12 @@ namespace build2
// This one is used by other modules/rules.
//
- vp.insert<bool> ("for_test", variable_visibility::prereq);
+ pvp.insert<bool> ("for_test", variable_visibility::prereq);
// These are only used in testscript.
//
- vp.insert<strings> ("test.redirects");
- vp.insert<strings> ("test.cleanups");
+ vp.insert<cmdline> ("test.redirects");
+ vp.insert<cmdline> ("test.cleanups");
// Unless already set, default test.target to build.host. Note that it
// can still be overriden by the user, e.g., in root.build.
@@ -125,9 +126,14 @@ namespace build2
value& v (rs.assign (d.test_target));
if (!v || v.empty ())
- v = cast<target_triplet> (rs.ctx.global_scope["build.host"]);
+ v = *rs.ctx.build_host;
}
+ // Register our operations.
+ //
+ rs.insert_operation (test_id, op_test, &d.var_test);
+ rs.insert_operation (update_for_test_id, op_update_for_test, &d.var_test);
+
extra.set_module (new module (move (d)));
}
@@ -296,18 +302,18 @@ namespace build2
{
default_rule& dr (m);
- // Note: register for mtime_target to take priority over the fallback
- // rule below.
- //
- rs.insert_rule<target> (perform_test_id, "test", dr);
- rs.insert_rule<mtime_target> (perform_test_id, "test", dr);
- rs.insert_rule<alias> (perform_test_id, "test", dr);
+ rs.insert_rule<target> (perform_test_id, "test", dr);
+ rs.insert_rule<alias> (perform_test_id, "test", dr);
// Register the fallback file rule for the update-for-test operation,
// similar to update.
//
- rs.global_scope ().insert_rule<mtime_target> (
- perform_test_id, "test.file", file_rule::instance);
+ // Note: use target instead of anything more specific (such as
+ // mtime_target) in order not to take precedence over the "test" rule
+ // above.
+ //
+ rs.global_scope ().insert_rule<target> (
+ perform_test_id, "test.file", file_rule_);
}
return true;
diff --git a/libbuild2/test/operation.cxx b/libbuild2/test/operation.cxx
index b7ec357..2535adb 100644
--- a/libbuild2/test/operation.cxx
+++ b/libbuild2/test/operation.cxx
@@ -17,14 +17,8 @@ namespace build2
namespace test
{
static operation_id
- test_pre (context&,
- const values& params,
- meta_operation_id mo,
- const location& l)
+ pre_test (context&, const values&, meta_operation_id mo, const location&)
{
- if (!params.empty ())
- fail (l) << "unexpected parameters for operation test";
-
// Run update as a pre-operation, unless we are disfiguring.
//
return mo != disfigure_id ? update_id : 0;
@@ -65,13 +59,14 @@ namespace build2
0,
"test",
"test",
- "test",
"testing",
"tested",
"has nothing to test", // We cannot "be tested".
execution_mode::first,
1 /* concurrency */,
- &test_pre,
+ &pre_test,
+ nullptr,
+ nullptr,
nullptr,
nullptr,
&adhoc_apply
@@ -83,7 +78,6 @@ namespace build2
update_id, // Note: not update_for_test_id.
test_id,
op_update.name,
- op_update.var_name,
op_update.name_do,
op_update.name_doing,
op_update.name_did,
@@ -92,6 +86,8 @@ namespace build2
op_update.concurrency,
op_update.pre_operation,
op_update.post_operation,
+ op_update.operation_pre,
+ op_update.operation_post,
op_update.adhoc_match,
op_update.adhoc_apply
};
diff --git a/libbuild2/test/rule.cxx b/libbuild2/test/rule.cxx
index 142bded..28eb35b 100644
--- a/libbuild2/test/rule.cxx
+++ b/libbuild2/test/rule.cxx
@@ -70,7 +70,7 @@ namespace build2
{
// Remember that we are called twice: first during update for test
// (pre-operation) and then during test. During the former, we rely on
- // the normall update rule to resolve the group members. During the
+ // the normal update rule to resolve the group members. During the
// latter, there will be no rule to do this but the group will already
// have been resolved by the pre-operation.
//
@@ -540,11 +540,19 @@ namespace build2
if (verb)
{
- diag_record dr (text);
- dr << "test " << ts;
-
- if (!t.is_a<alias> ())
- dr << ' ' << t;
+ // If the target is an alias, then testscript itself is the
+ // target.
+ //
+ if (t.is_a<alias> ())
+ print_diag ("test", ts);
+ else
+ {
+ // In this case the test is really a combination of the target
+ // and testscript and using "->" feels off. Also, let's list the
+ // testscript after the target even though its a source.
+ //
+ print_diag ("test", t, ts, "+");
+ }
}
res.push_back (ctx.dry_run
@@ -555,22 +563,22 @@ namespace build2
{
scope_state& r (res.back ());
- if (!ctx.sched.async (ctx.count_busy (),
- t[a].task_count,
- [this] (const diag_frame* ds,
- scope_state& r,
- const target& t,
- const testscript& ts,
- const dir_path& wd)
- {
- diag_frame::stack_guard dsg (ds);
- r = perform_script_impl (t, ts, wd, *this);
- },
- diag_frame::stack (),
- ref (r),
- cref (t),
- cref (ts),
- cref (wd)))
+ if (!ctx.sched->async (ctx.count_busy (),
+ t[a].task_count,
+ [this] (const diag_frame* ds,
+ scope_state& r,
+ const target& t,
+ const testscript& ts,
+ const dir_path& wd)
+ {
+ diag_frame::stack_guard dsg (ds);
+ r = perform_script_impl (t, ts, wd, *this);
+ },
+ diag_frame::stack (),
+ ref (r),
+ cref (t),
+ cref (ts),
+ cref (wd)))
{
// Executed synchronously. If failed and we were not asked to
// keep going, bail out.
@@ -641,25 +649,50 @@ namespace build2
// Stack-allocated linked list of information about the running pipeline
// processes.
//
+ // Note: constructed incrementally.
+ //
struct pipe_process
{
- process& proc;
- const char* prog; // Only for diagnostics.
+ // Initially NULL. Set to the address of the process object when it is
+ // created. Reset back to NULL when the process is executed and its exit
+ // status is collected (see complete_pipe() for details).
+ //
+ process* proc = nullptr;
+
+ char const** args; // Only for diagnostics.
+
+ diag_buffer dbuf;
+ bool force_dbuf;
// True if this process has been terminated.
//
bool terminated = false;
- pipe_process* prev; // NULL for the left-most program.
+ // True if this process has been terminated but we failed to read out
+ // its stderr stream in the reasonable timeframe (2 seconds) after the
+ // termination.
+ //
+ // Note that this may happen if there is a still running child process
+ // of the terminated process which has inherited the parent's stderr
+ // file descriptor.
+ //
+ bool unread_stderr = false;
- pipe_process (process& p, const char* g, pipe_process* r)
- : proc (p), prog (g), prev (r) {}
+ pipe_process* prev; // NULL for the left-most program.
+ pipe_process* next; // Left-most program for the right-most program.
+
+ pipe_process (context& x,
+ char const** as,
+ bool fb,
+ pipe_process* p,
+ pipe_process* f)
+ : args (as), dbuf (x), force_dbuf (fb), prev (p), next (f) {}
};
- static bool
+ static void
run_test (const target& t,
- diag_record& dr,
char const** args,
+ int ofd,
const optional<timestamp>& deadline,
pipe_process* prev = nullptr)
{
@@ -669,14 +702,28 @@ namespace build2
for (next++; *next != nullptr; next++) ;
next++;
+ bool last (*next == nullptr);
+
// Redirect stdout to a pipe unless we are last.
//
- int out (*next != nullptr ? -1 : 1);
- bool pr;
+ int out (last ? ofd : -1);
- // Absent if the process misses the deadline.
+ // Propagate the pointer to the left-most program.
//
- optional<process_exit> pe;
+ // Also force diag buffering for the trailing diff process, so it's
+ // stderr is never printed if the test program fails (see
+ // complete_pipe() for details).
+ //
+ pipe_process pp (t.ctx,
+ args,
+ last && ofd == 2,
+ prev,
+ prev != nullptr ? prev->next : nullptr);
+
+ if (prev != nullptr)
+ prev->next = &pp;
+ else
+ pp.next = &pp; // Points to itself.
try
{
@@ -707,11 +754,11 @@ namespace build2
{
try
{
- p->proc.term ();
+ p->proc->term ();
}
catch (const process_error& e)
{
- dr << fail << "unable to terminate " << p->prog << ": " << e;
+ dr << fail << "unable to terminate " << p->args[0] << ": " << e;
}
p->terminated = true;
@@ -724,7 +771,7 @@ namespace build2
for (pipe_process* p (pp); p != nullptr; p = p->prev)
{
- process& pr (p->proc);
+ process& pr (*p->proc);
try
{
@@ -736,26 +783,310 @@ namespace build2
}
catch (const process_error& e)
{
- dr << fail << "unable to wait/kill " << p->prog << ": " << e;
+ dr << fail << "unable to wait/kill " << p->args[0] << ": " << e;
+ }
+ }
+ };
+
+ // Read out all the pipeline's buffered strerr streams watching for
+ // the deadline, if specified. If the deadline is reached, then
+ // terminate the whole pipeline, move the deadline by another 2
+ // seconds, and continue reading.
+ //
+ // Note that we assume that this timeout increment is normally
+ // sufficient to read out the buffered data written by the already
+ // terminated processes. If, however, that's not the case (see
+ // pipe_process for the possible reasons), then we just set
+ // unread_stderr flag to true for such processes and bail out.
+ //
+ // Also note that this implementation is inspired by the
+ // script::run_pipe::read_pipe() lambda.
+ //
+ auto read_pipe = [&pp, &deadline, &term_pipe] ()
+ {
+ fdselect_set fds;
+ for (pipe_process* p (&pp); p != nullptr; p = p->prev)
+ {
+ diag_buffer& b (p->dbuf);
+
+ if (b.is.is_open ())
+ fds.emplace_back (b.is.fd (), p);
+ }
+
+ optional<timestamp> dl (deadline);
+ bool terminated (false);
+
+ for (size_t unread (fds.size ()); unread != 0;)
+ {
+ try
+ {
+ // If a deadline is specified, then pass the timeout to
+ // fdselect().
+ //
+ if (dl)
+ {
+ timestamp now (system_clock::now ());
+
+ if (*dl <= now || ifdselect (fds, *dl - now) == 0)
+ {
+ if (!terminated)
+ {
+ term_pipe (&pp);
+ terminated = true;
+
+ dl = system_clock::now () + chrono::seconds (2);
+ continue;
+ }
+ else
+ {
+ for (fdselect_state& s: fds)
+ {
+ if (s.fd != nullfd)
+ {
+ pipe_process* p (static_cast<pipe_process*> (s.data));
+
+ p->unread_stderr = true;
+
+ // Let's also close the stderr stream not to confuse
+ // diag_buffer::close() (see script::read() for
+ // details).
+ //
+ try
+ {
+ p->dbuf.is.close ();
+ }
+ catch (const io_error&) {}
+ }
+ }
+
+ break;
+ }
+ }
+ }
+ else
+ ifdselect (fds);
+
+ for (fdselect_state& s: fds)
+ {
+ if (s.ready)
+ {
+ pipe_process* p (static_cast<pipe_process*> (s.data));
+
+ if (!p->dbuf.read (p->force_dbuf))
+ {
+ s.fd = nullfd;
+ --unread;
+ }
+ }
+ }
+ }
+ catch (const io_error& e)
+ {
+ fail << "io error reading pipeline streams: " << e;
+ }
+ }
+ };
+
+ // Wait for the pipeline processes to complete, watching for the
+ // deadline, if specified. If the deadline is reached, then terminate
+ // the whole pipeline.
+ //
+ // Note: must be called after read_pipe().
+ //
+ auto wait_pipe = [&pp, &deadline, &timed_wait, &term_pipe] ()
+ {
+ for (pipe_process* p (&pp); p != nullptr; p = p->prev)
+ {
+ try
+ {
+ if (!deadline)
+ p->proc->wait ();
+ else if (!timed_wait (*p->proc, *deadline))
+ term_pipe (p);
+ }
+ catch (const process_error& e)
+ {
+ fail << "unable to wait " << p->args[0] << ": " << e;
+ }
+ }
+ };
+
+ // Iterate over the pipeline processes left to right, printing their
+ // stderr if buffered and issuing the diagnostics if the exit code is
+ // not available (terminated abnormally or due to a deadline), is
+ // non-zero, or stderr was not fully read. Afterwards, fail if any of
+ // such a faulty processes were encountered.
+ //
+ // Note that we only issue diagnostics for the first failure.
+ //
+ // Note: must be called after wait_pipe() and only once.
+ //
+ auto complete_pipe = [&pp, &t] ()
+ {
+ pipe_process* b (pp.next); // Left-most program.
+ assert (b != nullptr); // The lambda can only be called once.
+ pp.next = nullptr;
+
+ bool fail (false);
+ for (pipe_process* p (b); p != nullptr; p = p->next)
+ {
+ assert (p->proc != nullptr); // The lambda can only be called once.
+
+ // Collect the exit status, if present.
+ //
+ // Absent if the process misses the deadline.
+ //
+ optional<process_exit> pe;
+
+ const process& pr (*p->proc);
+
+#ifndef _WIN32
+ if (!(p->terminated &&
+ !pr.exit->normal () &&
+ pr.exit->signal () == SIGTERM))
+#else
+ if (!(p->terminated &&
+ !pr.exit->normal () &&
+ pr.exit->status == DBG_TERMINATE_PROCESS))
+#endif
+ pe = pr.exit;
+
+ p->proc = nullptr;
+
+ // Verify the exit status and issue the diagnostics on failure.
+ //
+ // Note that we only issue diagnostics for the first failure but
+ // continue iterating to reset process pointers to NULL. Also note
+ // that if the test program fails, then the potential diff's
+ // diagnostics is suppressed since it is always buffered.
+ //
+ if (!fail)
+ {
+ diag_record dr;
+
+ // Note that there can be a race, so that the process we have
+ // terminated due to reaching the deadline has in fact exited
+ // normally. Thus, the 'unread stderr' situation can also happen
+ // to a successfully terminated process. If that's the case, we
+ // report this problem as the main error and the secondary error
+ // otherwise.
+ //
+ if (!pe ||
+ !pe->normal () ||
+ pe->code () != 0 ||
+ p->unread_stderr)
+ {
+ fail = true;
+
+ dr << error << "test " << t << " failed" // Multi test: test 1.
+ << error << "process " << p->args[0] << ' ';
+
+ if (!pe)
+ {
+ dr << "terminated: execution timeout expired";
+
+ if (p->unread_stderr)
+ dr << error << "stderr not closed after exit";
+ }
+ else if (!pe->normal () || pe->code () != 0)
+ {
+ dr << *pe;
+
+ if (p->unread_stderr)
+ dr << error << "stderr not closed after exit";
+ }
+ else
+ {
+ assert (p->unread_stderr);
+
+ dr << "stderr not closed after exit";
+ }
+
+ if (verb == 1)
+ {
+ dr << info << "test command line: ";
+
+ for (pipe_process* p (b); p != nullptr; p = p->next)
+ {
+ if (p != b)
+ dr << " | ";
+
+ print_process (dr, p->args);
+ }
+ }
+ }
+
+ // Now print the buffered stderr, if present, and/or flush the
+ // diagnostics, if issued.
+ //
+ if (p->dbuf.is_open ())
+ p->dbuf.close (move (dr));
}
}
+
+ if (fail)
+ throw failed ();
};
- process p (prev == nullptr
- ? process (args, 0, out) // First process.
- : process (args, prev->proc, out)); // Next process.
+ process p;
+ {
+ process::pipe ep;
+ {
+ fdpipe p;
+ if (diag_buffer::pipe (t.ctx, pp.force_dbuf) == -1) // Buffering?
+ {
+ try
+ {
+ p = fdopen_pipe ();
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to redirect stderr: " << e;
+ }
+
+ // Note that we must return non-owning fd to our end of the pipe
+ // (see the process class for details).
+ //
+ ep = process::pipe (p.in.get (), move (p.out));
+ }
+ else
+ ep = process::pipe (-1, 2);
+
+ // Note that we must open the diag buffer regardless of the
+ // diag_buffer::pipe() result.
+ //
+ pp.dbuf.open (args[0], move (p.in), fdstream_mode::non_blocking);
+ }
+
+ p = (prev == nullptr
+ ? process (args, 0, out, move (ep)) // First process.
+ : process (args, *prev->proc, out, move (ep))); // Next process.
+ }
- pipe_process pp (p, args[0], prev);
+ pp.proc = &p;
- // If the deadline is specified, then make sure we don't miss it
- // waiting indefinitely in the process destructor on the right-hand
- // part of the pipe failure.
+ // If the right-hand part of the pipe fails, then make sure we don't
+ // wait indefinitely in the process destructor if the deadline is
+ // specified or just because a process is blocked on stderr.
//
- auto g (make_exception_guard ([&deadline, &pp, &term_pipe] ()
+ auto g (make_exception_guard ([&pp, &term_pipe] ()
{
- if (deadline)
+ if (pp.proc != nullptr)
try
{
+ // Close all buffered pipeline stderr streams ignoring io_error
+ // exceptions.
+ //
+ for (pipe_process* p (&pp); p != nullptr; p = p->prev)
+ {
+ if (p->dbuf.is.is_open ())
+ try
+ {
+ p->dbuf.is.close();
+ }
+ catch (const io_error&) {}
+ }
+
term_pipe (&pp);
}
catch (const failed&)
@@ -764,25 +1095,17 @@ namespace build2
}
}));
- pr = *next == nullptr || run_test (t, dr, next, deadline, &pp);
-
- if (!deadline)
- p.wait ();
- else if (!timed_wait (p, *deadline))
- term_pipe (&pp);
+ if (!last)
+ run_test (t, next, ofd, deadline, &pp);
- assert (p.exit);
-
-#ifndef _WIN32
- if (!(pp.terminated &&
- !p.exit->normal () &&
- p.exit->signal () == SIGTERM))
-#else
- if (!(pp.terminated &&
- !p.exit->normal () &&
- p.exit->status == DBG_TERMINATE_PROCESS))
-#endif
- pe = *p.exit;
+ // Complete the pipeline execution, if not done yet.
+ //
+ if (pp.proc != nullptr)
+ {
+ read_pipe ();
+ wait_pipe ();
+ complete_pipe ();
+ }
}
catch (const process_error& e)
{
@@ -793,24 +1116,6 @@ namespace build2
throw failed ();
}
-
- bool wr (pe && pe->normal () && pe->code () == 0);
-
- if (!wr)
- {
- if (pr) // First failure?
- dr << fail << "test " << t << " failed"; // Multi test: test 1.
-
- dr << error;
- print_process (dr, args);
-
- if (pe)
- dr << " " << *pe;
- else
- dr << " terminated: execution timeout expired";
- }
-
- return pr && wr;
}
target_state rule::
@@ -856,7 +1161,7 @@ namespace build2
fail << "invalid test executable override: '" << *n << "'";
else
{
- // Must be a target name.
+ // Must be a target name. Could be from src (e.g., a script).
//
// @@ OUT: what if this is a @-qualified pair of names?
//
@@ -986,10 +1291,19 @@ namespace build2
// Do we have stdout?
//
+ // If we do, then match it using diff. Also redirect the diff's stdout
+ // to stderr, similar to how we do that for the script (see
+ // script::check_output() for the reasoning). That will also prevent the
+ // diff's output from interleaving with any other output.
+ //
path dp ("diff");
process_path dpp;
+ int ofd (1);
+
if (pass_n != pts_n && pts[pass_n + 1] != nullptr)
{
+ ofd = 2;
+
const file& ot (pts[pass_n + 1]->as<file> ());
const path& op (ot.path ());
assert (!op.empty ()); // Should have been assigned by update.
@@ -1035,25 +1349,29 @@ namespace build2
args.push_back (nullptr); // Second.
if (verb >= 2)
- print_process (args);
+ print_process (args); // Note: prints the whole pipeline.
else if (verb)
- text << "test " << tt;
+ print_diag ("test", tt);
if (!ctx.dry_run)
{
- diag_record dr;
- pipe_process pp (cat, "cat", nullptr);
-
- if (!run_test (tt,
- dr,
- args.data () + (sin ? 3 : 0), // Skip cat.
- test_deadline (tt),
- sin ? &pp : nullptr))
+ pipe_process pp (tt.ctx,
+ args.data (), // Note: only cat's args are considered.
+ false /* force_dbuf */,
+ nullptr /* prev */,
+ nullptr /* next */);
+
+ if (sin)
{
- dr << info << "test command line: ";
- print_process (dr, args);
- dr << endf; // return
+ pp.next = &pp; // Points to itself.
+ pp.proc = &cat;
}
+
+ run_test (tt,
+ args.data () + (sin ? 3 : 0), // Skip cat.
+ ofd,
+ test_deadline (tt),
+ sin ? &pp : nullptr);
}
return target_state::changed;
diff --git a/libbuild2/test/script/lexer+for-loop.test.testscript b/libbuild2/test/script/lexer+for-loop.test.testscript
new file mode 100644
index 0000000..fcd12f7
--- /dev/null
+++ b/libbuild2/test/script/lexer+for-loop.test.testscript
@@ -0,0 +1,231 @@
+# file : libbuild2/test/script/lexer+for-loop.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+test.arguments = for-loop
+
+: semi
+{
+ : immediate
+ :
+ $* <"cmd;" >>EOO
+ 'cmd'
+ ;
+ <newline>
+ EOO
+
+ : separated
+ :
+ $* <"cmd ;" >>EOO
+ 'cmd'
+ ;
+ <newline>
+ EOO
+
+ : only
+ :
+ $* <";" >>EOO
+ ;
+ <newline>
+ EOO
+}
+
+: colon
+:
+{
+ : immediate
+ :
+ $* <"cmd: dsc" >>EOO
+ 'cmd'
+ :
+ 'dsc'
+ <newline>
+ EOO
+
+ : separated
+ :
+ $* <"cmd :dsc" >>EOO
+ 'cmd'
+ :
+ 'dsc'
+ <newline>
+ EOO
+
+ : only
+ :
+ $* <":" >>EOO
+ :
+ <newline>
+ EOO
+}
+
+: redirect
+:
+{
+ : pass
+ :
+ $* <"cmd <| 1>|" >>EOO
+ 'cmd'
+ <|
+ '1'
+ >|
+ <newline>
+ EOO
+
+ : null
+ :
+ $* <"cmd <- 1>-" >>EOO
+ 'cmd'
+ <-
+ '1'
+ >-
+ <newline>
+ EOO
+
+ : trace
+ :
+ $* <"cmd 1>!" >>EOO
+ 'cmd'
+ '1'
+ >!
+ <newline>
+ EOO
+
+ : merge
+ :
+ $* <"cmd 1>&2" >>EOO
+ 'cmd'
+ '1'
+ >&
+ '2'
+ <newline>
+ EOO
+
+ : str
+ :
+ $* <"cmd <a 1>b" >>EOO
+ 'cmd'
+ <
+ 'a'
+ '1'
+ >
+ 'b'
+ <newline>
+ EOO
+
+ : str-nn
+ :
+ $* <"cmd <:a 1>:b" >>EOO
+ 'cmd'
+ <:
+ 'a'
+ '1'
+ >:
+ 'b'
+ <newline>
+ EOO
+
+ : doc
+ :
+ $* <"cmd <<EOI 1>>EOO" >>EOO
+ 'cmd'
+ <<
+ 'EOI'
+ '1'
+ >>
+ 'EOO'
+ <newline>
+ EOO
+
+ : doc-nn
+ :
+ $* <"cmd <<:EOI 1>>:EOO" >>EOO
+ 'cmd'
+ <<:
+ 'EOI'
+ '1'
+ >>:
+ 'EOO'
+ <newline>
+ EOO
+
+ : file-cmp
+ :
+ $* <"cmd <<<in >>>out 2>>>err" >>EOO
+ 'cmd'
+ <<<
+ 'in'
+ >>>
+ 'out'
+ '2'
+ >>>
+ 'err'
+ <newline>
+ EOO
+
+ : file-write
+ :
+ $* <"cmd >=out 2>+err" >>EOO
+ 'cmd'
+ >=
+ 'out'
+ '2'
+ >+
+ 'err'
+ <newline>
+ EOO
+}
+
+: cleanup
+:
+{
+ : always
+ :
+ $* <"cmd &file" >>EOO
+ 'cmd'
+ &
+ 'file'
+ <newline>
+ EOO
+
+ : maybe
+ :
+ $* <"cmd &?file" >>EOO
+ 'cmd'
+ &?
+ 'file'
+ <newline>
+ EOO
+
+ : never
+ :
+ $* <"cmd &!file" >>EOO
+ 'cmd'
+ &!
+ 'file'
+ <newline>
+ EOO
+}
+
+: for
+:
+{
+ : form-1
+ :
+ $* <"for x: a" >>EOO
+ 'for'
+ 'x'
+ :
+ 'a'
+ <newline>
+ EOO
+
+ : form-3
+ :
+ $* <"for <<<a x" >>EOO
+ 'for'
+ <<<
+ 'a'
+ 'x'
+ <newline>
+ EOO
+}
diff --git a/libbuild2/test/script/lexer.cxx b/libbuild2/test/script/lexer.cxx
index f9c8ac6..aec91fc 100644
--- a/libbuild2/test/script/lexer.cxx
+++ b/libbuild2/test/script/lexer.cxx
@@ -34,13 +34,16 @@ namespace build2
bool q (true); // quotes
if (!esc)
- {
- assert (!state_.empty ());
- esc = state_.top ().escapes;
- }
+ esc = current_state ().escapes;
switch (m)
{
+ case lexer_mode::for_loop:
+ {
+ // Leading tokens of the for-loop. Like command_line but also
+ // recognizes lsbrace like value.
+ }
+ // Fall through.
case lexer_mode::command_line:
{
s1 = ":;=!|&<> $(#\t\n";
@@ -107,7 +110,7 @@ namespace build2
}
assert (ps == '\0');
- state_.push (
+ mode_impl (
state {m, data, nullopt, false, false, ps, s, n, q, *esc, s1, s2});
}
@@ -116,12 +119,13 @@ namespace build2
{
token r;
- switch (state_.top ().mode)
+ switch (mode ())
{
case lexer_mode::command_line:
case lexer_mode::first_token:
case lexer_mode::second_token:
case lexer_mode::variable_line:
+ case lexer_mode::for_loop:
r = next_line ();
break;
case lexer_mode::description_line:
@@ -144,7 +148,7 @@ namespace build2
xchar c (get ());
uint64_t ln (c.line), cn (c.column);
- state st (state_.top ()); // Make copy (see first/second_token).
+ state st (current_state ()); // Make copy (see first/second_token).
lexer_mode m (st.mode);
auto make_token = [&sep, ln, cn] (type t)
@@ -157,9 +161,10 @@ namespace build2
//
if (st.lsbrace)
{
- assert (m == lexer_mode::variable_line);
+ assert (m == lexer_mode::variable_line ||
+ m == lexer_mode::for_loop);
- state_.top ().lsbrace = false; // Note: st is a copy.
+ current_state ().lsbrace = false; // Note: st is a copy.
if (c == '[' && (!st.lsbrace_unsep || !sep))
return make_token (type::lsbrace);
@@ -172,7 +177,7 @@ namespace build2
// we push any new mode (e.g., double quote).
//
if (m == lexer_mode::first_token || m == lexer_mode::second_token)
- state_.pop ();
+ expire_mode ();
// NOTE: remember to update mode() if adding new special characters.
@@ -183,7 +188,7 @@ namespace build2
// Expire variable value mode at the end of the line.
//
if (m == lexer_mode::variable_line)
- state_.pop ();
+ expire_mode ();
sep = true; // Treat newline as always separated.
return make_token (type::newline);
@@ -197,10 +202,11 @@ namespace build2
// Line separators.
//
- if (m == lexer_mode::command_line ||
- m == lexer_mode::first_token ||
- m == lexer_mode::second_token ||
- m == lexer_mode::variable_line)
+ if (m == lexer_mode::command_line ||
+ m == lexer_mode::first_token ||
+ m == lexer_mode::second_token ||
+ m == lexer_mode::variable_line ||
+ m == lexer_mode::for_loop)
{
switch (c)
{
@@ -210,7 +216,8 @@ namespace build2
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
switch (c)
{
@@ -222,7 +229,8 @@ namespace build2
//
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
switch (c)
{
@@ -244,7 +252,8 @@ namespace build2
//
if (m == lexer_mode::command_line ||
m == lexer_mode::first_token ||
- m == lexer_mode::second_token)
+ m == lexer_mode::second_token ||
+ m == lexer_mode::for_loop)
{
if (optional<token> t = next_cmd_op (c, sep))
return move (*t);
@@ -310,7 +319,7 @@ namespace build2
if (c == '\n')
{
get ();
- state_.pop (); // Expire the description mode.
+ expire_mode (); // Expire the description mode.
return token (type::newline, true, ln, cn, token_printer);
}
@@ -330,15 +339,17 @@ namespace build2
}
token lexer::
- word (state st, bool sep)
+ word (const state& st, bool sep)
{
- lexer_mode m (st.mode);
+ lexer_mode m (st.mode); // Save.
token r (base_lexer::word (st, sep));
if (m == lexer_mode::variable)
{
- if (r.value.size () == 1 && digit (r.value[0])) // $N
+ if (r.type == type::word &&
+ r.value.size () == 1 &&
+ digit (r.value[0])) // $N
{
xchar c (peek ());
diff --git a/libbuild2/test/script/lexer.hxx b/libbuild2/test/script/lexer.hxx
index 452e794..39b950a 100644
--- a/libbuild2/test/script/lexer.hxx
+++ b/libbuild2/test/script/lexer.hxx
@@ -24,10 +24,11 @@ namespace build2
enum
{
command_line = base_type::value_next,
- first_token, // Expires at the end of the token.
- second_token, // Expires at the end of the token.
- variable_line, // Expires at the end of the line.
- description_line // Expires at the end of the line.
+ first_token, // Expires at the end of the token.
+ second_token, // Expires at the end of the token.
+ variable_line, // Expires at the end of the line.
+ description_line, // Expires at the end of the line.
+ for_loop // Used for sensing the for-loop leading tokens.
};
lexer_mode () = default;
@@ -67,6 +68,8 @@ namespace build2
static redirect_aliases_type redirect_aliases;
private:
+ using build2::script::lexer::mode; // Getter.
+
token
next_line ();
@@ -74,7 +77,7 @@ namespace build2
next_description ();
virtual token
- word (state, bool) override;
+ word (const state&, bool) override;
};
}
}
diff --git a/libbuild2/test/script/lexer.test.cxx b/libbuild2/test/script/lexer.test.cxx
index 76f102d..ef3ce4d 100644
--- a/libbuild2/test/script/lexer.test.cxx
+++ b/libbuild2/test/script/lexer.test.cxx
@@ -36,6 +36,7 @@ namespace build2
else if (s == "variable-line") m = lexer_mode::variable_line;
else if (s == "description-line") m = lexer_mode::description_line;
else if (s == "variable") m = lexer_mode::variable;
+ else if (s == "for-loop") m = lexer_mode::for_loop;
else assert (false);
}
diff --git a/libbuild2/test/script/parser+command-if.test.testscript b/libbuild2/test/script/parser+command-if.test.testscript
index 0b72b4a..9e223dd 100644
--- a/libbuild2/test/script/parser+command-if.test.testscript
+++ b/libbuild2/test/script/parser+command-if.test.testscript
@@ -315,6 +315,7 @@
}
: end
+:
{
: without-if
:
@@ -322,7 +323,7 @@
cmd
end
EOI
- testscript:2:1: error: 'end' without preceding 'if'
+ testscript:2:1: error: 'end' without preceding 'if', 'for', or 'while'
EOE
: without-if-semi
@@ -331,10 +332,11 @@
cmd;
end
EOI
- testscript:2:1: error: 'end' without preceding 'if'
+ testscript:2:1: error: 'end' without preceding 'if', 'for', or 'while'
EOE
: before
+ :
{
: semi
:
diff --git a/libbuild2/test/script/parser+command-re-parse.test.testscript b/libbuild2/test/script/parser+command-re-parse.test.testscript
index 84465b3..5a082eb 100644
--- a/libbuild2/test/script/parser+command-re-parse.test.testscript
+++ b/libbuild2/test/script/parser+command-re-parse.test.testscript
@@ -4,7 +4,7 @@
: double-quote
:
$* <<EOI >>EOO
-x = cmd \">-\" "'<-'"
+x = [cmdline] cmd \">-\" "'<-'"
$x
EOI
cmd '>-' '<-'
diff --git a/libbuild2/test/script/parser+description.test.testscript b/libbuild2/test/script/parser+description.test.testscript
index cee540f..f656b7d 100644
--- a/libbuild2/test/script/parser+description.test.testscript
+++ b/libbuild2/test/script/parser+description.test.testscript
@@ -313,7 +313,7 @@
x = y
end
EOI
- testscript:2:1: error: description before/after setup/teardown variable-if
+ testscript:2:1: error: description before/after setup/teardown variable-only 'if'
EOE
: var-if-after
@@ -323,7 +323,7 @@
x = y
end : foo
EOI
- testscript:1:1: error: description before/after setup/teardown variable-if
+ testscript:1:1: error: description before/after setup/teardown variable-only 'if'
EOE
: test
diff --git a/libbuild2/test/script/parser+expansion.test.testscript b/libbuild2/test/script/parser+expansion.test.testscript
index 77a7d6d..c31b0ad 100644
--- a/libbuild2/test/script/parser+expansion.test.testscript
+++ b/libbuild2/test/script/parser+expansion.test.testscript
@@ -27,7 +27,7 @@ EOE
: invalid-redirect
:
$* <<EOI 2>>EOE != 0
-x = "1>&a"
+x = [cmdline] "1>&a"
cmd $x
EOI
<string>:1:4: error: stdout merge redirect file descriptor must be 2
diff --git a/libbuild2/test/script/parser+for.test.testscript b/libbuild2/test/script/parser+for.test.testscript
new file mode 100644
index 0000000..985f9c9
--- /dev/null
+++ b/libbuild2/test/script/parser+for.test.testscript
@@ -0,0 +1,1029 @@
+# file : libbuild2/test/script/parser+for.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: form-1
+:
+: for x: ...
+:
+{
+ : for
+ :
+ {
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for
+ cmd
+ end
+ EOI
+ testscript:1:1: error: for: missing variable name
+ EOE
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ for x: a b
+ cmd $x
+ end
+ EOI
+ cmd a
+ cmd b
+ EOO
+
+ : null
+ :
+ $* <<EOI >:''
+ for x: [null]
+ cmd $x
+ end
+ EOI
+
+ : empty
+ :
+ $* <<EOI >:''
+ for x:
+ cmd $x
+ end
+ EOI
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ for x: $vs
+ cmd $x
+ end
+ EOI
+ cmd a
+ cmd b
+ EOO
+
+ : typed-values
+ :
+ $* <<EOI >>~%EOO%
+ for x: [dir_paths] a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>~%EOO%
+ for x [dir_path]: a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : typed-elem-value
+ :
+ $* <<EOI >>~%EOO%
+ for x [dir_path]: [strings] a b
+ cmd $x
+ end
+ EOI
+ %cmd (a/|'a\\')%
+ %cmd (b/|'b\\')%
+ EOO
+
+ : scope-var
+ :
+ $* <<EOI >>EOO
+ x = x
+
+ for x: a b
+ cmd $x
+ end
+
+ -cmd $x
+ EOI
+ cmd a
+ cmd b
+ -cmd x
+ EOO
+ }
+
+ : after-semi
+ :
+ $* -s <<EOI >>EOO
+ cmd1;
+ for x: a b
+ cmd2 $x
+ end
+ EOI
+ {
+ {
+ cmd1
+ cmd2 a
+ cmd2 b
+ }
+ }
+ EOO
+
+ : setup
+ :
+ $* -s <<EOI >>EOO
+ +for x: a b
+ cmd $x
+ end
+ EOI
+ {
+ +cmd a
+ +cmd b
+ }
+ EOO
+
+ : tdown
+ :
+ $* -s <<EOI >>EOO
+ -for x: a b
+ cmd $x
+ end
+ EOI
+ {
+ -cmd a
+ -cmd b
+ }
+ EOO
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ for x: a b
+ cmd
+ EOI
+ testscript:3:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ for x: a b
+ elif true
+ cmd
+ end
+ end
+ EOI
+ testscript:2:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ for x: a b
+ cmd1 $x # 1
+ if ($x == "a") # 2
+ cmd2 # 3
+ for y: x y
+ cmd3 # 4
+ end
+ else
+ cmd4 # 5
+ end
+ cmd5 # 6
+ end;
+ cmd6 # 7
+ EOI
+ cmd1 a # 1 i1
+ ? true # 2 i1
+ cmd2 # 3 i1
+ cmd3 # 4 i1 i1
+ cmd3 # 4 i1 i2
+ cmd5 # 6 i1
+ cmd1 b # 1 i2
+ ? false # 2 i2
+ cmd4 # 5 i2
+ cmd5 # 6 i2
+ cmd6 # 7
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : semi
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ cmd;
+ cmd
+ end
+ EOI
+ testscript:2:3: error: ';' inside 'for'
+ EOE
+
+ : colon-leading
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ : foo
+ cmd
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : colon-trailing
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ cmd : foo
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ EOI
+ testscript:2:1: error: expected closing 'end'
+ EOE
+
+ : scope
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ cmd
+ {
+ }
+ end
+ EOI
+ testscript:3:3: error: expected closing 'end'
+ EOE
+
+ : setup
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ +cmd
+ end
+ EOI
+ testscript:2:3: error: setup command inside 'for'
+ EOE
+
+ : tdown
+ :
+ $* <<EOI 2>>EOE != 0
+ for x:
+ -cmd
+ end
+ EOI
+ testscript:2:3: error: teardown command inside 'for'
+ EOE
+ }
+
+ : var
+ :
+ $* <<EOI >>EOO
+ for x: a b
+ cmd1 $x
+ end;
+ cmd2 $x
+ EOI
+ cmd1 a
+ cmd1 b
+ cmd2 b
+ EOO
+
+ : leading-and-trailing-description
+ :
+ $* <<EOI 2>>EOE != 0
+ : foo
+ for x: a b
+ cmd
+ end : bar
+ EOI
+ testscript:4:1: error: both leading and trailing descriptions
+ EOE
+}
+
+: form-2
+:
+: ... | for x
+:
+{
+ : for
+ :
+ {
+ : status
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x != 0
+ cmd
+ end
+ EOI
+ testscript:1:20: error: for-loop exit code cannot be checked
+ EOE
+
+ : not-last
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x | echo x
+ cmd
+ end
+ EOI
+ testscript:1:20: error: for-loop must be last command in a pipe
+ EOE
+
+ : not-last-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x|echo x
+ cmd
+ end
+ EOI
+ testscript:1:19: error: for-loop must be last command in a pipe
+ EOE
+
+ : expression-after
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x && echo x
+ cmd
+ end
+ EOI
+ testscript:1:20: error: command expression involving for-loop
+ EOE
+
+ : expression-after-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x&&echo x
+ cmd
+ end
+ EOI
+ testscript:1:19: error: command expression involving for-loop
+ EOE
+
+ : expression-before
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && echo x | for x
+ cmd
+ end
+ EOI
+ testscript:1:24: error: command expression involving for-loop
+ EOE
+
+ : expression-before-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && echo x|for x
+ cmd
+ end
+ EOI
+ testscript:1:22: error: command expression involving for-loop
+ EOE
+
+ : cleanup
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x &f
+ cmd
+ end
+ EOI
+ testscript:1:20: error: cleanup in for-loop
+ EOE
+
+ : cleanup-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x&f
+ cmd
+ end
+ EOI
+ testscript:1:19: error: cleanup in for-loop
+ EOE
+
+ : stdout-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x >a
+ cmd
+ end
+ EOI
+ testscript:1:20: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x>a
+ cmd
+ end
+ EOI
+ testscript:1:19: error: output redirect in for-loop
+ EOE
+
+ : stdin-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x <a
+ cmd
+ end
+ EOI
+ testscript:1:20: error: stdin is both piped and redirected
+ EOE
+
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for
+ cmd
+ end
+ EOI
+ testscript:1:1: error: for: missing variable name
+ EOE
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ echo 'a b' | for -w x
+ cmd $x
+ end
+ EOI
+ echo 'a b' | for -w x
+ EOO
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ echo $vs | for x
+ cmd $x
+ end
+ EOI
+ echo a b | for x
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>EOO
+ echo 'a b' | for -w x [dir_path]
+ cmd $x
+ end
+ EOI
+ echo 'a b' | for -w x [dir_path]
+ EOO
+ }
+
+ : after-semi
+ :
+ $* -s <<EOI >>EOO
+ cmd1;
+ echo 'a b' | for x
+ cmd2 $x
+ end
+ EOI
+ {
+ {
+ cmd1
+ echo 'a b' | for x
+ }
+ }
+ EOO
+
+ : setup
+ :
+ $* -s <<EOI >>EOO
+ +echo 'a b' | for x
+ cmd $x
+ end
+ EOI
+ {
+ +echo 'a b' | for x
+ }
+ EOO
+
+ : tdown
+ :
+ $* -s <<EOI >>EOO
+ -echo 'a b' | for x
+ cmd $x
+ end
+ EOI
+ {
+ -echo 'a b' | for x
+ }
+ EOO
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd
+ EOI
+ testscript:3:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ elif true
+ cmd
+ end
+ end
+ EOI
+ testscript:2:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ echo 'a b' | for x # 1
+ cmd1 $x # 2
+ if ($x == "a") # 3
+ cmd2 # 4
+ echo x y | for y # 5
+ cmd3 # 6
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ end;
+ cmd6 # 9
+ EOI
+ echo 'a b' | for x # 1
+ cmd6 # 9
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : semi
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd;
+ cmd
+ end
+ EOI
+ testscript:2:3: error: ';' inside 'for'
+ EOE
+
+ : colon-leading
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ : foo
+ cmd
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : colon-trailing
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd : foo
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ EOI
+ testscript:2:1: error: expected closing 'end'
+ EOE
+
+ : scope
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ cmd
+ {
+ }
+ end
+ EOI
+ testscript:3:3: error: expected closing 'end'
+ EOE
+
+ : setup
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ +cmd
+ end
+ EOI
+ testscript:2:3: error: setup command inside 'for'
+ EOE
+
+ : tdown
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' | for x
+ -cmd
+ end
+ EOI
+ testscript:2:3: error: teardown command inside 'for'
+ EOE
+ }
+
+ : leading-and-trailing-description
+ :
+ $* <<EOI 2>>EOE != 0
+ : foo
+ echo 'a b' | for x
+ cmd
+ end : bar
+ EOI
+ testscript:4:1: error: both leading and trailing descriptions
+ EOE
+}
+
+: form-3
+:
+: for x <...
+:
+{
+ : for
+ :
+ {
+ : status
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a != 0
+ cmd
+ end
+ EOI
+ testscript:1:10: error: for-loop exit code cannot be checked
+ EOE
+
+ : not-last
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a | echo x
+ cmd
+ end
+ EOI
+ testscript:1:10: error: for-loop must be last command in a pipe
+ EOE
+
+ : not-last-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x|echo x
+ cmd
+ end
+ EOI
+ testscript:1:9: error: for-loop must be last command in a pipe
+ EOE
+
+ : expression-after
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a && echo x
+ cmd
+ end
+ EOI
+ testscript:1:10: error: command expression involving for-loop
+ EOE
+
+ : expression-after-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x&&echo x
+ cmd
+ end
+ EOI
+ testscript:1:9: error: command expression involving for-loop
+ EOE
+
+ : expression-before
+ :
+ $* <<EOI 2>>EOE != 0
+ echo 'a b' && for x <a
+ cmd
+ end
+ EOI
+ testscript:1:15: error: command expression involving for-loop
+ EOE
+
+ : cleanup
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <a &f
+ cmd
+ end
+ EOI
+ testscript:1:10: error: cleanup in for-loop
+ EOE
+
+ : cleanup-before-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for &f x <a
+ cmd
+ end
+ EOI
+ testscript:1:5: error: cleanup in for-loop
+ EOE
+
+ : cleanup-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a x&f
+ cmd
+ end
+ EOI
+ testscript:1:9: error: cleanup in for-loop
+ EOE
+
+ : stdout-redirect
+ :
+ $* <<EOI 2>>EOE != 0
+ for x >a
+ cmd
+ end
+ EOI
+ testscript:1:7: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-before-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for >a x
+ cmd
+ end
+ EOI
+ testscript:1:5: error: output redirect in for-loop
+ EOE
+
+ : stdout-redirect-relex
+ :
+ $* <<EOI 2>>EOE != 0
+ for x>a
+ cmd
+ end
+ EOI
+ testscript:1:6: error: output redirect in for-loop
+ EOE
+
+ : no-var
+ :
+ $* <<EOI 2>>EOE != 0
+ for <a
+ cmd
+ end
+ EOI
+ testscript:1:1: error: for: missing variable name
+ EOE
+
+ : quoted-opt
+ :
+ $* <<EOI >>EOO
+ o = -w
+ for "$o" x <'a b'
+ cmd $x
+ end;
+ for "($o)" x <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x <'a b'
+ for -w x <'a b'
+ EOO
+
+ : untyped
+ :
+ $* <<EOI >>EOO
+ for -w x <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x <'a b'
+ EOO
+
+ : expansion
+ :
+ $* <<EOI >>EOO
+ vs = a b
+ for x <$vs
+ cmd $x
+ end
+ EOI
+ for x b <a
+ EOO
+
+ : typed-elem
+ :
+ $* <<EOI >>EOO
+ for -w x [dir_path] <'a b'
+ cmd $x
+ end
+ EOI
+ for -w x [dir_path] <'a b'
+ EOO
+ }
+
+ : after-semi
+ :
+ $* -s <<EOI >>EOO
+ cmd1;
+ for x <'a b'
+ cmd2 $x
+ end
+ EOI
+ {
+ {
+ cmd1
+ for x <'a b'
+ }
+ }
+ EOO
+
+ : setup
+ :
+ $* -s <<EOI >>EOO
+ +for x <'a b'
+ cmd $x
+ end
+ EOI
+ {
+ +for x <'a b'
+ }
+ EOO
+
+ : tdown
+ :
+ $* -s <<EOI >>EOO
+ -for x <'a b'
+ cmd $x
+ end
+ EOI
+ {
+ -for x <'a b'
+ }
+ EOO
+
+ : end
+ :
+ {
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd
+ EOI
+ testscript:3:1: error: expected closing 'end'
+ EOE
+ }
+
+ : elif
+ :
+ {
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ elif true
+ cmd
+ end
+ end
+ EOI
+ testscript:2:3: error: 'elif' without preceding 'if'
+ EOE
+ }
+
+ : nested
+ :
+ {
+ $* -l -r <<EOI >>EOO
+ for -w x <'a b' # 1
+ cmd1 $x # 2
+ if ($x == "a") # 3
+ cmd2 # 4
+ for -w y <'x y' # 5
+ cmd3 # 6
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ end;
+ cmd6 # 9
+ EOI
+ for -w x <'a b' # 1
+ cmd6 # 9
+ EOO
+ }
+
+ : contained
+ :
+ {
+ : semi
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd;
+ cmd
+ end
+ EOI
+ testscript:2:3: error: ';' inside 'for'
+ EOE
+
+ : colon-leading
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ : foo
+ cmd
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : colon-trailing
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd : foo
+ end
+ EOI
+ testscript:2:3: error: description inside 'for'
+ EOE
+
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ EOI
+ testscript:2:1: error: expected closing 'end'
+ EOE
+
+ : scope
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ cmd
+ {
+ }
+ end
+ EOI
+ testscript:3:3: error: expected closing 'end'
+ EOE
+
+ : setup
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ +cmd
+ end
+ EOI
+ testscript:2:3: error: setup command inside 'for'
+ EOE
+
+ : tdown
+ :
+ $* <<EOI 2>>EOE != 0
+ for x <'a b'
+ -cmd
+ end
+ EOI
+ testscript:2:3: error: teardown command inside 'for'
+ EOE
+ }
+
+ : leading-and-trailing-description
+ :
+ $* <<EOI 2>>EOE != 0
+ : foo
+ for x <'a b'
+ cmd
+ end : bar
+ EOI
+ testscript:4:1: error: both leading and trailing descriptions
+ EOE
+}
diff --git a/libbuild2/test/script/parser+while.test.testscript b/libbuild2/test/script/parser+while.test.testscript
new file mode 100644
index 0000000..b1a2b44
--- /dev/null
+++ b/libbuild2/test/script/parser+while.test.testscript
@@ -0,0 +1,265 @@
+# file : libbuild2/test/script/parser+while.test.testscript
+# license : MIT; see accompanying LICENSE file
+
+: while
+:
+{
+ : true
+ :
+ $* <<EOI >>EOO
+ while ($v != "aa")
+ cmd "$v"
+ v = "$(v)a"
+ end
+ EOI
+ ? true
+ cmd ''
+ ? true
+ cmd a
+ ? false
+ EOO
+
+ : false
+ :
+ $* <<EOI >>EOO
+ while ($v == "aa")
+ cmd "$v"
+ v = "$(v)a"
+ end
+ EOI
+ ? false
+ EOO
+
+ : without-command
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd
+ end
+ EOI
+ testscript:1:6: error: missing program
+ EOE
+
+ : after-semi
+ :
+ $* -s <<EOI >>EOO
+ cmd1;
+ while ($v != "aa")
+ cmd2 "$v"
+ v = "$(v)a"
+ end
+ EOI
+ {
+ {
+ cmd1
+ ? true
+ cmd2 ''
+ ? true
+ cmd2 a
+ ? false
+ }
+ }
+ EOO
+
+ : setup
+ :
+ $* -s <<EOI >>EOO
+ +while ($v != "aa")
+ cmd2 "$v"
+ v = "$(v)a"
+ end
+ EOI
+ {
+ ? true
+ +cmd2 ''
+ ? true
+ +cmd2 a
+ ? false
+ }
+ EOO
+
+ : tdown
+ :
+ $* -s <<EOI >>EOO
+ -while ($v != "aa")
+ cmd2 "$v"
+ v = "$(v)a"
+ end
+ EOI
+ {
+ ? true
+ -cmd2 ''
+ ? true
+ -cmd2 a
+ ? false
+ }
+ EOO
+}
+
+: end
+:
+{
+ : without-end
+ :
+ $* <<EOI 2>>EOE != 0
+ while true
+ cmd
+ EOI
+ testscript:3:1: error: expected closing 'end'
+ EOE
+}
+
+: elif
+:
+{
+ : without-if
+ :
+ $* <<EOI 2>>EOE != 0
+ while false
+ elif true
+ cmd
+ end
+ end
+ EOI
+ testscript:2:3: error: 'elif' without preceding 'if'
+ EOE
+}
+
+: nested
+:
+{
+ $* -l -r <<EOI >>EOO
+ while ($v != "aa") # 1
+ cmd1 "$v" # 2
+ if ($v == "a") # 3
+ cmd2 # 4
+ while ($v2 != "$v") # 5
+ cmd3 # 6
+ v2=$v
+ end
+ else
+ cmd4 # 7
+ end
+ cmd5 # 8
+ v = "$(v)a"
+ end;
+ cmd6
+ EOI
+ ? true # 1 i1
+ cmd1 '' # 2 i1
+ ? false # 3 i1
+ cmd4 # 7 i1
+ cmd5 # 8 i1
+ ? true # 1 i2
+ cmd1 a # 2 i2
+ ? true # 3 i2
+ cmd2 # 4 i2
+ ? true # 5 i2 i1
+ cmd3 # 6 i2 i1
+ ? false # 5 i2 i2
+ cmd5 # 8 i2
+ ? false # 1 i3
+ cmd6 # 9
+ EOO
+}
+
+: contained
+:
+{
+ : semi
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd;
+ cmd
+ end
+ EOI
+ testscript:2:3: error: ';' inside 'while'
+ EOE
+
+ : colon-leading
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ : foo
+ cmd
+ end
+ EOI
+ testscript:2:3: error: description inside 'while'
+ EOE
+
+ : colon-trailing
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd : foo
+ end
+ EOI
+ testscript:2:3: error: description inside 'while'
+ EOE
+
+ : eos
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ EOI
+ testscript:2:1: error: expected closing 'end'
+ EOE
+
+ : scope
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ cmd
+ {
+ }
+ end
+ EOI
+ testscript:3:3: error: expected closing 'end'
+ EOE
+
+ : setup
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ +cmd
+ end
+ EOI
+ testscript:2:3: error: setup command inside 'while'
+ EOE
+
+ : tdown
+ :
+ $* <<EOI 2>>EOE != 0
+ while
+ -cmd
+ end
+ EOI
+ testscript:2:3: error: teardown command inside 'while'
+ EOE
+}
+
+: var
+:
+$* <<EOI >>EOO
+while ($v1 != "a")
+ v1 = "$(v1)a"
+ v2 = "$v1"
+end
+cmd $v1
+EOI
+? true
+? false
+cmd a
+EOO
+
+: leading-and-trailing-description
+:
+$* <<EOI 2>>EOE != 0
+: foo
+while false
+ cmd
+end : bar
+EOI
+testscript:4:1: error: both leading and trailing descriptions
+EOE
diff --git a/libbuild2/test/script/parser.cxx b/libbuild2/test/script/parser.cxx
index bf04a30..b712c21 100644
--- a/libbuild2/test/script/parser.cxx
+++ b/libbuild2/test/script/parser.cxx
@@ -293,22 +293,30 @@ namespace build2
}
// Parse a logical line (as well as scope-if since the only way to
- // recognize it is to parse the if line).
+ // recognize it is to parse the if line), handling the flow control
+ // constructs recursively.
//
// If one is true then only parse one line returning an indication of
- // whether the line ended with a semicolon. If if_line is true then this
- // line can be an if-else construct flow control line (else, end, etc).
+ // whether the line ended with a semicolon. If the flow control
+ // construct type is specified, then this line is assumed to belong to
+ // such construct.
//
bool parser::
pre_parse_line (token& t, type& tt,
optional<description>& d,
lines* ls,
bool one,
- bool if_line)
+ optional<line_type> fct)
{
// enter: next token is peeked at (type in tt)
// leave: newline
+ assert (!fct ||
+ *fct == line_type::cmd_if ||
+ *fct == line_type::cmd_while ||
+ *fct == line_type::cmd_for_stream ||
+ *fct == line_type::cmd_for_args);
+
// Note: token is only peeked at.
//
const location ll (get_location (peeked ()));
@@ -317,6 +325,52 @@ namespace build2
//
line_type lt;
type st (type::eos); // Later, can only be set to plus or minus.
+ bool semi (false);
+
+ // Parse the command line tail, starting from the newline or the
+ // potential colon/semicolon token.
+ //
+ // Note that colon and semicolon are only valid in test command lines
+ // and after 'end' in flow control constructs. Note that we always
+ // recognize them lexically, even when they are not valid tokens per
+ // the grammar.
+ //
+ auto parse_command_tail = [&t, &tt, &st, &lt, &d, &semi, &ll, this] ()
+ {
+ if (tt != type::newline)
+ {
+ if (lt != line_type::cmd && lt != line_type::cmd_end)
+ fail (t) << "expected newline instead of " << t;
+
+ switch (st)
+ {
+ case type::plus: fail (t) << t << " after setup command" << endf;
+ case type::minus: fail (t) << t << " after teardown command" << endf;
+ }
+ }
+
+ switch (tt)
+ {
+ case type::colon:
+ {
+ if (d)
+ fail (ll) << "both leading and trailing descriptions";
+
+ d = parse_trailing_description (t, tt);
+ break;
+ }
+ case type::semi:
+ {
+ semi = true;
+ replay_pop (); // See above for the reasoning.
+ next (t, tt); // Get newline.
+ break;
+ }
+ }
+
+ if (tt != type::newline)
+ fail (t) << "expected newline instead of " << t;
+ };
switch (tt)
{
@@ -364,8 +418,12 @@ namespace build2
{
const string& n (t.value);
- if (n == "if") lt = line_type::cmd_if;
- else if (n == "if!") lt = line_type::cmd_ifn;
+ // Handle the for-loop consistently with pre_parse_line_start().
+ //
+ if (n == "if") lt = line_type::cmd_if;
+ else if (n == "if!") lt = line_type::cmd_ifn;
+ else if (n == "while") lt = line_type::cmd_while;
+ else if (n == "for") lt = line_type::cmd_for_stream;
}
break;
@@ -379,8 +437,6 @@ namespace build2
// Pre-parse the line keeping track of whether it ends with a semi.
//
- bool semi (false);
-
line ln;
switch (lt)
{
@@ -407,76 +463,147 @@ namespace build2
mode (lexer_mode::variable_line);
parse_variable_line (t, tt);
+ // Note that the semicolon token is only required during
+ // pre-parsing to decide which line list the current line should
+ // go to and provides no additional semantics during the
+ // execution. Moreover, build2::script::parser::exec_lines()
+ // doesn't expect this token to be present. Thus, we just drop
+ // this token from the saved tokens.
+ //
semi = (tt == type::semi);
- if (tt == type::semi)
+ if (semi)
+ {
+ replay_pop ();
next (t, tt);
+ }
if (tt != type::newline)
fail (t) << "expected newline instead of " << t;
break;
}
+ //
+ // See pre_parse_line_start() for details.
+ //
+ case line_type::cmd_for_args: assert (false); break;
+ case line_type::cmd_for_stream:
+ {
+ // First we need to sense the next few tokens and detect which
+ // form of the for-loop that actually is (see
+ // libbuild2/build/script/parser.cxx for details).
+ //
+ token pt (t);
+ assert (pt.type == type::word && pt.value == "for");
+
+ mode (lexer_mode::for_loop);
+ next (t, tt);
+
+ string& n (t.value);
+
+ if (tt == type::word && t.qtype == quote_type::unquoted &&
+ (n[0] == '_' || alpha (n[0]) || // Variable.
+ n == "*" || n == "~" || n == "@")) // Special variable.
+ {
+ // Detect patterns analogous to parse_variable_name() (so we
+ // diagnose `for x[string]: ...`).
+ //
+ if (n.find_first_of ("[*?") != string::npos)
+ fail (t) << "expected variable name instead of " << n;
+
+ if (special_variable (n))
+ fail (t) << "attempt to set '" << n << "' variable directly";
+
+ if (lexer_->peek_char ().first == '[')
+ {
+ token vt (move (t));
+ next_with_attributes (t, tt);
+
+ attributes_push (t, tt,
+ true /* standalone */,
+ false /* next_token */);
+
+ t = move (vt);
+ tt = t.type;
+ }
+
+ if (lexer_->peek_char ().first == ':')
+ lt = line_type::cmd_for_args;
+ }
+
+ if (lt == line_type::cmd_for_stream) // for x <...
+ {
+ ln.var = nullptr;
+
+ expire_mode ();
+
+ parse_command_expr_result r (
+ parse_command_expr (t, tt,
+ lexer::redirect_aliases,
+ move (pt)));
+
+ assert (r.for_loop);
+
+ parse_command_tail ();
+ parse_here_documents (t, tt, r);
+ }
+ else // for x: ...
+ {
+ ln.var = &script_->var_pool.insert (move (n));
+
+ next (t, tt);
+
+ assert (tt == type::colon);
+
+ expire_mode ();
+
+ // Parse the value similar to the var line type (see above),
+ // except for the fact that we don't expect a trailing semicolon.
+ //
+ mode (lexer_mode::variable_line);
+ parse_variable_line (t, tt);
+
+ if (tt != type::newline)
+ fail (t) << "expected newline instead of " << t << " after for";
+ }
+
+ break;
+ }
case line_type::cmd_elif:
case line_type::cmd_elifn:
case line_type::cmd_else:
- case line_type::cmd_end:
{
- if (!if_line)
- {
+ if (!fct || *fct != line_type::cmd_if)
fail (t) << lt << " without preceding 'if'";
- }
+ }
+ // Fall through.
+ case line_type::cmd_end:
+ {
+ if (!fct)
+ fail (t) << lt << " without preceding 'if', 'for', or 'while'";
}
// Fall through.
case line_type::cmd_if:
case line_type::cmd_ifn:
+ case line_type::cmd_while:
next (t, tt); // Skip to start of command.
// Fall through.
case line_type::cmd:
{
- pair<command_expr, here_docs> p;
+ parse_command_expr_result r;
if (lt != line_type::cmd_else && lt != line_type::cmd_end)
- p = parse_command_expr (t, tt, lexer::redirect_aliases);
+ r = parse_command_expr (t, tt, lexer::redirect_aliases);
- // Colon and semicolon are only valid in test command lines and
- // after 'end' in if-else. Note that we still recognize them
- // lexically, they are just not valid tokens per the grammar.
- //
- if (tt != type::newline)
+ if (r.for_loop)
{
- if (lt != line_type::cmd && lt != line_type::cmd_end)
- fail (t) << "expected newline instead of " << t;
-
- switch (st)
- {
- case type::plus: fail (t) << t << " after setup command" << endf;
- case type::minus: fail (t) << t << " after teardown command" << endf;
- }
+ lt = line_type::cmd_for_stream;
+ ln.var = nullptr;
}
- switch (tt)
- {
- case type::colon:
- {
- if (d)
- fail (ll) << "both leading and trailing descriptions";
-
- d = parse_trailing_description (t, tt);
- break;
- }
- case type::semi:
- {
- semi = true;
- next (t, tt); // Get newline.
- break;
- }
- }
-
- if (tt != type::newline)
- fail (t) << "expected newline instead of " << t;
+ parse_command_tail ();
+ parse_here_documents (t, tt, r);
- parse_here_documents (t, tt, p);
break;
}
}
@@ -494,24 +621,39 @@ namespace build2
ln.tokens = replay_data ();
ls->push_back (move (ln));
- if (lt == line_type::cmd_if || lt == line_type::cmd_ifn)
+ switch (lt)
{
- semi = pre_parse_if_else (t, tt, d, *ls);
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ {
+ semi = pre_parse_if_else (t, tt, d, *ls);
- // If this turned out to be scope-if, then ls is empty, semi is
- // false, and none of the below logic applies.
- //
- if (ls->empty ())
- return semi;
+ // If this turned out to be scope-if, then ls is empty, semi is
+ // false, and none of the below logic applies.
+ //
+ if (ls->empty ())
+ return semi;
+
+ break;
+ }
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
+ {
+ semi = pre_parse_loop (t, tt, lt, d, *ls);
+ break;
+ }
+ default: break;
}
// Unless we were told where to put it, decide where it actually goes.
//
if (ls == &ls_data)
{
- // First pre-check variable and variable-if: by themselves (i.e.,
- // without a trailing semicolon) they are treated as either setup or
- // teardown without plus/minus. Also handle illegal line types.
+ // First pre-check variables and variable-only flow control
+ // constructs: by themselves (i.e., without a trailing semicolon)
+ // they are treated as either setup or teardown without
+ // plus/minus. Also handle illegal line types.
//
switch (lt)
{
@@ -524,8 +666,11 @@ namespace build2
}
case line_type::cmd_if:
case line_type::cmd_ifn:
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
{
- // See if this is a variable-only command-if.
+ // See if this is a variable-only flow control construct.
//
if (find_if (ls_data.begin (), ls_data.end (),
[] (const line& l) {
@@ -549,7 +694,7 @@ namespace build2
fail (ll) << "description before setup/teardown variable";
else
fail (ll) << "description before/after setup/teardown "
- << "variable-if";
+ << "variable-only " << lt;
}
// If we don't have any nested scopes or teardown commands,
@@ -793,7 +938,7 @@ namespace build2
td,
&ls,
true /* one */,
- true /* if_line */));
+ line_type::cmd_if));
assert (ls.size () == 1 && ls.back ().type == lt);
assert (tt == type::newline);
@@ -831,6 +976,99 @@ namespace build2
return false; // We never end with a semi.
}
+ // Pre-parse the flow control construct block line. Fail if the line is
+ // unexpectedly followed with a semicolon or test description.
+ //
+ bool parser::
+ pre_parse_block_line (token& t, type& tt,
+ line_type bt,
+ optional<description>& d,
+ lines& ls)
+ {
+ // enter: peeked first token of the line (type in tt)
+ // leave: newline
+
+ const location ll (get_location (peeked ()));
+
+ switch (tt)
+ {
+ case type::colon:
+ fail (ll) << "description inside " << bt << endf;
+ case type::eos:
+ case type::rcbrace:
+ case type::lcbrace:
+ fail (ll) << "expected closing 'end'" << endf;
+ case type::plus:
+ fail (ll) << "setup command inside " << bt << endf;
+ case type::minus:
+ fail (ll) << "teardown command inside " << bt << endf;
+ }
+
+ // Parse one line. Note that this one line can still be multiple lines
+ // in case of a flow control construct. In this case we want to view
+ // it as, for example, cmd_if, not cmd_end. Thus remember the start
+ // position of the next logical line.
+ //
+ size_t i (ls.size ());
+
+ line_type fct; // Flow control construct type the block type relates to.
+
+ switch (bt)
+ {
+ case line_type::cmd_if:
+ case line_type::cmd_ifn:
+ case line_type::cmd_elif:
+ case line_type::cmd_elifn:
+ case line_type::cmd_else:
+ {
+ fct = line_type::cmd_if;
+ break;
+ }
+ case line_type::cmd_while:
+ case line_type::cmd_for_stream:
+ case line_type::cmd_for_args:
+ {
+ fct = bt;
+ break;
+ }
+ default: assert(false);
+ }
+
+ optional<description> td;
+ bool semi (pre_parse_line (t, tt, td, &ls, true /* one */, fct));
+
+ assert (tt == type::newline);
+
+ line_type lt (ls[i].type);
+
+ // First take care of 'end'.
+ //
+ if (lt == line_type::cmd_end)
+ {
+ if (td)
+ {
+ if (d)
+ fail (ll) << "both leading and trailing descriptions";
+
+ d = move (td);
+ }
+
+ return semi;
+ }
+
+ // For any other line trailing semi or description is illegal.
+ //
+ // @@ Not the exact location of semi/colon.
+ //
+ if (semi)
+ fail (ll) << "';' inside " << bt;
+
+ if (td)
+ fail (ll) << "description inside " << bt;
+
+ return false;
+ }
+
bool parser::
pre_parse_if_else_command (token& t, type& tt,
optional<description>& d,
@@ -839,70 +1077,23 @@ namespace build2
// enter: peeked first token of next line (type in tt)
// leave: newline
- // Parse lines until we see closing 'end'. Nested if-else blocks are
- // handled recursively.
+ // Parse lines until we see closing 'end'.
//
for (line_type bt (line_type::cmd_if); // Current block.
;
tt = peek (lexer_mode::first_token))
{
const location ll (get_location (peeked ()));
-
- switch (tt)
- {
- case type::colon:
- fail (ll) << "description inside " << bt << endf;
- case type::eos:
- case type::rcbrace:
- case type::lcbrace:
- fail (ll) << "expected closing 'end'" << endf;
- case type::plus:
- fail (ll) << "setup command inside " << bt << endf;
- case type::minus:
- fail (ll) << "teardown command inside " << bt << endf;
- }
-
- // Parse one line. Note that this one line can still be multiple
- // lines in case of if-else. In this case we want to view it as
- // cmd_if, not cmd_end. Thus remember the start position of the
- // next logical line.
- //
size_t i (ls.size ());
- optional<description> td;
- bool semi (pre_parse_line (t, tt,
- td,
- &ls,
- true /* one */,
- true /* if_line */));
- assert (tt == type::newline);
+ bool semi (pre_parse_block_line (t, tt, bt, d, ls));
line_type lt (ls[i].type);
// First take care of 'end'.
//
if (lt == line_type::cmd_end)
- {
- if (td)
- {
- if (d)
- fail (ll) << "both leading and trailing descriptions";
-
- d = move (td);
- }
-
return semi;
- }
-
- // For any other line trailing semi or description is illegal.
- //
- // @@ Not the exact location of semi/colon.
- //
- if (semi)
- fail (ll) << "';' inside " << bt;
-
- if (td)
- fail (ll) << "description inside " << bt;
// Check if-else block sequencing.
//
@@ -924,6 +1115,40 @@ namespace build2
default: break;
}
}
+
+ assert (false); // Can't be here.
+ return false;
+ }
+
+ bool parser::
+ pre_parse_loop (token& t, type& tt,
+ line_type lt,
+ optional<description>& d,
+ lines& ls)
+ {
+ // enter: <newline> (previous line)
+ // leave: <newline>
+
+ assert (lt == line_type::cmd_while ||
+ lt == line_type::cmd_for_stream ||
+ lt == line_type::cmd_for_args);
+
+ tt = peek (lexer_mode::first_token);
+
+ // Parse lines until we see closing 'end'.
+ //
+ for (;; tt = peek (lexer_mode::first_token))
+ {
+ size_t i (ls.size ());
+
+ bool semi (pre_parse_block_line (t, tt, lt, d, ls));
+
+ if (ls[i].type == line_type::cmd_end)
+ return semi;
+ }
+
+ assert (false); // Can't be here.
+ return false;
}
void parser::
@@ -1266,21 +1491,18 @@ namespace build2
// Note: this one is only used during execution.
- pair<command_expr, here_docs> p (
+ parse_command_expr_result pr (
parse_command_expr (t, tt, lexer::redirect_aliases));
- switch (tt)
- {
- case type::colon: parse_trailing_description (t, tt); break;
- case type::semi: next (t, tt); break; // Get newline.
- }
+ if (tt == type::colon)
+ parse_trailing_description (t, tt);
assert (tt == type::newline);
- parse_here_documents (t, tt, p);
+ parse_here_documents (t, tt, pr);
assert (tt == type::newline);
- command_expr r (move (p.first));
+ command_expr r (move (pr.expr));
// If the test program runner is specified, then adjust the
// expressions to run test programs via this runner.
@@ -1402,9 +1624,6 @@ namespace build2
mode (lexer_mode::variable_line);
value rhs (parse_variable_line (t, tt));
- if (tt == type::semi)
- next (t, tt);
-
assert (tt == type::newline);
// Assign.
@@ -1424,8 +1643,9 @@ namespace build2
command_type ct;
auto exec_cmd = [&ct, this] (token& t, build2::script::token_type& tt,
- size_t li,
+ const iteration_index* ii, size_t li,
bool single,
+ const function<command_function>& cf,
const location& ll)
{
// We use the 0 index to signal that this is the only command.
@@ -1437,19 +1657,35 @@ namespace build2
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- runner_->run (*scope_, ce, ct, li, ll);
+ runner_->run (*scope_, ce, ct, ii, li, cf, ll);
};
- auto exec_if = [this] (token& t, build2::script::token_type& tt,
- size_t li,
- const location& ll)
+ auto exec_cond = [this] (token& t, build2::script::token_type& tt,
+ const iteration_index* ii, size_t li,
+ const location& ll)
{
command_expr ce (
parse_command_line (t, static_cast<token_type&> (tt)));
- // Assume if-else always involves multiple commands.
+ // Assume a flow control construct always involves multiple
+ // commands.
//
- return runner_->run_if (*scope_, ce, li, ll);
+ return runner_->run_cond (*scope_, ce, ii, li, ll);
+ };
+
+ auto exec_for = [this] (const variable& var,
+ value&& val,
+ const attributes& val_attrs,
+ const location&)
+ {
+ value& lhs (scope_->assign (var));
+
+ attributes_.push_back (val_attrs);
+
+ apply_value_attributes (&var, lhs, move (val), type::assign);
+
+ if (script_->test_command_var (var.name))
+ scope_->reset_special ();
};
size_t li (1);
@@ -1459,16 +1695,17 @@ namespace build2
ct = command_type::test;
exec_lines (t->tests_.begin (), t->tests_.end (),
- exec_set, exec_cmd, exec_if,
- li);
+ exec_set, exec_cmd, exec_cond, exec_for,
+ nullptr /* iteration_index */, li);
}
else if (group* g = dynamic_cast<group*> (scope_))
{
ct = command_type::setup;
- bool exec_scope (exec_lines (g->setup_.begin (), g->setup_.end (),
- exec_set, exec_cmd, exec_if,
- li));
+ bool exec_scope (
+ exec_lines (g->setup_.begin (), g->setup_.end (),
+ exec_set, exec_cmd, exec_cond, exec_for,
+ nullptr /* iteration_index */, li));
if (exec_scope)
{
@@ -1526,7 +1763,8 @@ namespace build2
try
{
- take = runner_->run_if (*scope_, ce, li++, ll);
+ take = runner_->run_cond (
+ *scope_, ce, nullptr /* iteration_index */, li++, ll);
}
catch (const exit_scope& e)
{
@@ -1593,24 +1831,24 @@ namespace build2
// UBSan workaround.
//
const diag_frame* df (diag_frame::stack ());
- if (!ctx.sched.async (task_count,
- [] (const diag_frame* ds,
- scope& s,
- script& scr,
- runner& r)
- {
- diag_frame::stack_guard dsg (ds);
- execute_impl (s, scr, r);
- },
- df,
- ref (*chain),
- ref (*script_),
- ref (*runner_)))
+ if (!ctx->sched->async (task_count,
+ [] (const diag_frame* ds,
+ scope& s,
+ script& scr,
+ runner& r)
+ {
+ diag_frame::stack_guard dsg (ds);
+ execute_impl (s, scr, r);
+ },
+ df,
+ ref (*chain),
+ ref (*script_),
+ ref (*runner_)))
{
// Bail out if the scope has failed and we weren't instructed
// to keep going.
//
- if (chain->state == scope_state::failed && !ctx.keep_going)
+ if (chain->state == scope_state::failed && !ctx->keep_going)
throw failed ();
}
}
@@ -1637,8 +1875,8 @@ namespace build2
ct = command_type::teardown;
exec_lines (g->tdown_.begin (), g->tdown_.end (),
- exec_set, exec_cmd, exec_if,
- li);
+ exec_set, exec_cmd, exec_cond, exec_for,
+ nullptr /* iteration_index */, li);
}
else
assert (false);
@@ -1652,7 +1890,8 @@ namespace build2
// The rest.
//
- // When add a special variable don't forget to update lexer::word().
+ // When add a special variable don't forget to update lexer::word() and
+ // for-loop parsing in pre_parse_line().
//
bool parser::
special_variable (const string& n) noexcept
@@ -1661,7 +1900,7 @@ namespace build2
}
lookup parser::
- lookup_variable (name&& qual, string&& name, const location& loc)
+ lookup_variable (names&& qual, string&& name, const location& loc)
{
if (pre_parse_)
return lookup ();
diff --git a/libbuild2/test/script/parser.hxx b/libbuild2/test/script/parser.hxx
index c63bce6..6fe46e2 100644
--- a/libbuild2/test/script/parser.hxx
+++ b/libbuild2/test/script/parser.hxx
@@ -30,7 +30,7 @@ namespace build2
// Pre-parse. Issue diagnostics and throw failed in case of an error.
//
public:
- parser (context& c): build2::script::parser (c, true /* relex */) {}
+ parser (context& c): build2::script::parser (c) {}
void
pre_parse (script&);
@@ -62,7 +62,13 @@ namespace build2
optional<description>&,
lines* = nullptr,
bool one = false,
- bool if_line = false);
+ optional<line_type> flow_control_type = nullopt);
+
+ bool
+ pre_parse_block_line (token&, token_type&,
+ line_type block_type,
+ optional<description>&,
+ lines&);
bool
pre_parse_if_else (token&, token_type&,
@@ -79,6 +85,12 @@ namespace build2
optional<description>&,
lines&);
+ bool
+ pre_parse_loop (token&, token_type&,
+ line_type,
+ optional<description>&,
+ lines&);
+
void
pre_parse_directive (token&, token_type&);
@@ -117,7 +129,7 @@ namespace build2
//
protected:
virtual lookup
- lookup_variable (name&&, string&&, const location&) override;
+ lookup_variable (names&&, string&&, const location&) override;
// Insert id into the id map checking for duplicates.
//
diff --git a/libbuild2/test/script/parser.test.cxx b/libbuild2/test/script/parser.test.cxx
index e0dd3d2..6838e47 100644
--- a/libbuild2/test/script/parser.test.cxx
+++ b/libbuild2/test/script/parser.test.cxx
@@ -33,8 +33,11 @@ namespace build2
class print_runner: public runner
{
public:
- print_runner (bool scope, bool id, bool line)
- : scope_ (scope), id_ (id), line_ (line) {}
+ print_runner (bool scope, bool id, bool line, bool iterations)
+ : scope_ (scope),
+ id_ (id),
+ line_ (line),
+ iterations_ (iterations) {}
virtual bool
test (scope&) const override
@@ -97,11 +100,32 @@ namespace build2
}
virtual void
- run (scope&,
+ run (scope& env,
const command_expr& e, command_type t,
- size_t i,
- const location&) override
+ const iteration_index* ii, size_t i,
+ const function<command_function>& cf,
+ const location& ll) override
{
+ // If the functions is specified, then just execute it with an empty
+ // stdin so it can perform the housekeeping (stop replaying tokens,
+ // increment line index, etc).
+ //
+ if (cf != nullptr)
+ {
+ assert (e.size () == 1 && !e[0].pipe.empty ());
+
+ const command& c (e[0].pipe.back ());
+
+ // Must be enforced by the caller.
+ //
+ assert (!c.out && !c.err && !c.exit);
+
+ cf (env, c.arguments,
+ fdopen_null (), nullptr /* pipe */,
+ nullopt /* deadline */,
+ ll);
+ }
+
const char* s (nullptr);
switch (t)
@@ -113,22 +137,22 @@ namespace build2
cout << ind_ << s << e;
- if (line_)
- cout << " # " << i;
+ if (line_ || iterations_)
+ print_line_info (ii, i);
cout << endl;
}
virtual bool
- run_if (scope&,
- const command_expr& e,
- size_t i,
- const location&) override
+ run_cond (scope&,
+ const command_expr& e,
+ const iteration_index* ii, size_t i,
+ const location&) override
{
cout << ind_ << "? " << e;
- if (line_)
- cout << " # " << i;
+ if (line_ || iterations_)
+ print_line_info (ii, i);
cout << endl;
@@ -146,13 +170,33 @@ namespace build2
}
private:
+ void
+ print_line_info (const iteration_index* ii, size_t i) const
+ {
+ cout << " #";
+
+ if (line_)
+ cout << ' ' << i;
+
+ if (iterations_ && ii != nullptr)
+ {
+ string s;
+ for (const iteration_index* i (ii); i != nullptr; i = i->prev)
+ s.insert (0, " i" + to_string (i->index));
+
+ cout << s;
+ }
+ }
+
+ private:
bool scope_;
bool id_;
bool line_;
+ bool iterations_;
string ind_;
};
- // Usage: argv[0] [-s] [-i] [-l] [<testscript-name>]
+ // Usage: argv[0] [-s] [-i] [-l] [-r] [<testscript-name>]
//
int
main (int argc, char* argv[])
@@ -174,6 +218,7 @@ namespace build2
bool scope (false);
bool id (false);
bool line (false);
+ bool iterations (false);
path name;
for (int i (1); i != argc; ++i)
@@ -186,6 +231,8 @@ namespace build2
id = true;
else if (a == "-l")
line = true;
+ else if (a == "-r")
+ iterations = true;
else
{
name = path (move (a));
@@ -218,7 +265,7 @@ namespace build2
tt.assign (
ctx.var_pool.rw ().insert<target_triplet> ("test.target")));
- v = cast<target_triplet> (ctx.global_scope["build.host"]);
+ v = *ctx.build_host;
testscript& st (
ctx.targets.insert<testscript> (work,
@@ -236,7 +283,7 @@ namespace build2
script s (tt, st, dir_path (work) /= "test-driver");
p.pre_parse (cin, s);
- print_runner r (scope, id, line);
+ print_runner r (scope, id, line, iterations);
p.execute (s, r);
}
catch (const failed&)
diff --git a/libbuild2/test/script/runner.cxx b/libbuild2/test/script/runner.cxx
index 8054c61..98d6868 100644
--- a/libbuild2/test/script/runner.cxx
+++ b/libbuild2/test/script/runner.cxx
@@ -142,7 +142,9 @@ namespace build2
void default_runner::
run (scope& sp,
const command_expr& expr, command_type ct,
- size_t li, const location& ll)
+ const iteration_index* ii, size_t li,
+ const function<command_function>& cf,
+ const location& ll)
{
// Noop for teardown commands if keeping tests output is requested.
//
@@ -164,40 +166,55 @@ namespace build2
text << ": " << c << expr;
}
- // Print test id once per test expression.
+ // Print test id once per test expression and only for the topmost
+ // one.
//
auto df = make_diag_frame (
- [&sp](const diag_record& dr)
+ [&sp, print = (sp.exec_level == 0)](const diag_record& dr)
{
- // Let's not depend on how the path representation can be improved
- // for readability on printing.
- //
- dr << info << "test id: " << sp.id_path.posix_string ();
+ if (print)
+ {
+ // Let's not depend on how the path representation can be
+ // improved for readability on printing.
+ //
+ dr << info << "test id: " << sp.id_path.posix_string ();
+ }
});
- build2::script::run (sp, expr, li, ll);
+ ++sp.exec_level;
+ build2::script::run (sp, expr, ii, li, ll, cf);
+ --sp.exec_level;
}
bool default_runner::
- run_if (scope& sp,
- const command_expr& expr,
- size_t li, const location& ll)
+ run_cond (scope& sp,
+ const command_expr& expr,
+ const iteration_index* ii, size_t li,
+ const location& ll)
{
if (verb >= 3)
text << ": ?" << expr;
- // Print test id once per test expression.
+ // Print test id once per test expression and only for the topmost
+ // one.
//
auto df = make_diag_frame (
- [&sp](const diag_record& dr)
+ [&sp, print = (sp.exec_level == 0)](const diag_record& dr)
{
- // Let's not depend on how the path representation can be improved
- // for readability on printing.
- //
- dr << info << "test id: " << sp.id_path.posix_string ();
+ if (print)
+ {
+ // Let's not depend on how the path representation can be
+ // improved for readability on printing.
+ //
+ dr << info << "test id: " << sp.id_path.posix_string ();
+ }
});
- return build2::script::run_if (sp, expr, li, ll);
+ ++sp.exec_level;
+ bool r (build2::script::run_cond (sp, expr, ii, li, ll));
+ --sp.exec_level;
+
+ return r;
}
}
}
diff --git a/libbuild2/test/script/runner.hxx b/libbuild2/test/script/runner.hxx
index b6a038d..687d991 100644
--- a/libbuild2/test/script/runner.hxx
+++ b/libbuild2/test/script/runner.hxx
@@ -48,14 +48,21 @@ namespace build2
// Location is the start position of this command line in the
// testscript. It can be used in diagnostics.
//
+ // Optionally, execute the specified function instead of the last
+ // pipe command.
+ //
virtual void
run (scope&,
const command_expr&, command_type,
- size_t index,
+ const iteration_index*, size_t index,
+ const function<command_function>&,
const location&) = 0;
virtual bool
- run_if (scope&, const command_expr&, size_t, const location&) = 0;
+ run_cond (scope&,
+ const command_expr&,
+ const iteration_index*, size_t,
+ const location&) = 0;
// Location is the scope end location (for diagnostics, etc).
//
@@ -84,11 +91,15 @@ namespace build2
virtual void
run (scope&,
const command_expr&, command_type,
- size_t,
+ const iteration_index*, size_t,
+ const function<command_function>&,
const location&) override;
virtual bool
- run_if (scope&, const command_expr&, size_t, const location&) override;
+ run_cond (scope&,
+ const command_expr&,
+ const iteration_index*, size_t,
+ const location&) override;
virtual void
leave (scope&, const location&) override;
diff --git a/libbuild2/test/script/script.cxx b/libbuild2/test/script/script.cxx
index 3a8ceac..f7827f6 100644
--- a/libbuild2/test/script/script.cxx
+++ b/libbuild2/test/script/script.cxx
@@ -30,7 +30,7 @@ namespace build2
scope_base::
scope_base (script& s)
: root (s),
- vars (s.test_target.ctx, false /* global */)
+ vars (s.test_target.ctx, false /* shared */) // Note: managed.
{
vars.assign (root.wd_var) = dir_path ();
}
@@ -115,7 +115,7 @@ namespace build2
}
void scope::
- set_variable (string&& nm,
+ set_variable (string nm,
names&& val,
const string& attrs,
const location& ll)
@@ -197,12 +197,12 @@ namespace build2
test_var (var_pool.insert<path> ("test")),
options_var (var_pool.insert<strings> ("test.options")),
arguments_var (var_pool.insert<strings> ("test.arguments")),
- redirects_var (var_pool.insert<strings> ("test.redirects")),
- cleanups_var (var_pool.insert<strings> ("test.cleanups")),
+ redirects_var (var_pool.insert<cmdline> ("test.redirects")),
+ cleanups_var (var_pool.insert<cmdline> ("test.cleanups")),
wd_var (var_pool.insert<dir_path> ("~")),
id_var (var_pool.insert<path> ("@")),
- cmd_var (var_pool.insert<strings> ("*")),
+ cmd_var (var_pool.insert<cmdline> ("*")),
cmdN_var {
&var_pool.insert<path> ("0"),
&var_pool.insert<string> ("1"),
@@ -268,7 +268,7 @@ namespace build2
v = path (n->dir);
else
{
- // Must be a target name.
+ // Must be a target name. Could be from src (e.g., a script).
//
// @@ OUT: what if this is a @-qualified pair of names?
//
@@ -355,7 +355,7 @@ namespace build2
// in parallel). Plus, if there is no such variable, then we cannot
// possibly find any value.
//
- const variable* pvar (context.var_pool.find (n));
+ const variable* pvar (root.target_scope.var_pool ().find (n));
if (pvar == nullptr)
return lookup_type ();
@@ -410,11 +410,12 @@ namespace build2
// First assemble the $* value and save the test variable value into
// the test program set.
//
- strings s;
+ cmdline s;
- auto append = [&s] (const strings& v)
+ auto append = [&s] (const strings& vs)
{
- s.insert (s.end (), v.begin (), v.end ());
+ for (const string& v: vs)
+ s.push_back (name (v)); // Simple name.
};
// If the test variable can't be looked up for any reason (is NULL,
@@ -423,7 +424,7 @@ namespace build2
if (auto l = lookup (root.test_var))
{
const path& p (cast<path> (l));
- s.push_back (p.representation ());
+ s.push_back (name (p.representation ()));
test_programs[0] = &p;
@@ -441,10 +442,16 @@ namespace build2
size_t n (s.size ());
if (auto l = lookup (root.redirects_var))
- append (cast<strings> (l));
+ {
+ const auto& v (cast<cmdline> (l));
+ s.insert (s.end (), v.begin (), v.end ());
+ }
if (auto l = lookup (root.cleanups_var))
- append (cast<strings> (l));
+ {
+ const auto& v (cast<cmdline> (l));
+ s.insert (s.end (), v.begin (), v.end ());
+ }
// Set the $N values if present.
//
@@ -455,9 +462,9 @@ namespace build2
if (i < n)
{
if (i == 0)
- v = path (s[i]);
+ v = path (s[i].value);
else
- v = s[i];
+ v = s[i].value;
}
else
v = nullptr; // Clear any old values.
@@ -465,6 +472,88 @@ namespace build2
// Set $*.
//
+ // We need to effective-quote the $test $test.options, $test.arguments
+ // part of it since they will be re-lexed. See the Testscript manual
+ // for details on quoting semantics. In particular, we cannot escape
+ // the special character (|<>&) so we have to rely on quoting. We can
+ // use single-quoting for everything except if the value contains a
+ // single quote. In which case we should probably just do separately-
+ // quoted regions (similar to shell), for example:
+ //
+ // <''>
+ //
+ // Can be quoted as:
+ //
+ // '<'"''"'>'
+ //
+ for (size_t i (0); i != n; ++i)
+ {
+ string& v (s[i].value);
+
+ // Check if the quoting is required for this value.
+ //
+ if (!parser::need_cmdline_relex (v))
+ continue;
+
+ // If the value doesn't contain the single-quote character, then
+ // single-quote it.
+ //
+ size_t p (v.find ('\''));
+
+ if (p == string::npos)
+ {
+ v = '\'' + v + '\'';
+ continue;
+ }
+
+ // Otherwise quote the regions.
+ //
+ // Note that we double-quote the single-quote character sequences
+ // and single-quote all the other regions.
+ //
+ string r;
+ char q (p == 0 ? '"' : '\''); // Current region quoting mode.
+
+ r += q; // Open the first region.
+
+ for (char c: v)
+ {
+ // If we are in the double-quoting mode, then switch to the
+ // single-quoting mode if a non-single-quote character is
+ // encountered.
+ //
+ if (q == '"')
+ {
+ if (c != '\'')
+ {
+ r += q; // Close the double-quoted region.
+ q = '\''; // Set the single-quoting mode.
+ r += q; // Open the single-quoted region.
+ }
+ }
+ //
+ // If we are in the single-quoting mode, then switch to the
+ // double-quoting mode if the single-quote character is
+ // encountered.
+ //
+ else
+ {
+ if (c == '\'')
+ {
+ r += q; // Close the single-quoted region.
+ q = '"'; // Set the double-quoting mode.
+ r += q; // Open the double-quoted region.
+ }
+ }
+
+ r += c;
+ }
+
+ r += q; // Close the last region.
+
+ v = move (r);
+ }
+
assign (root.cmd_var) = move (s);
}
diff --git a/libbuild2/test/script/script.hxx b/libbuild2/test/script/script.hxx
index 22f6725..9409b01 100644
--- a/libbuild2/test/script/script.hxx
+++ b/libbuild2/test/script/script.hxx
@@ -21,16 +21,19 @@ namespace build2
namespace script
{
using build2::script::line;
+ using build2::script::line_type;
using build2::script::lines;
using build2::script::redirect;
using build2::script::redirect_type;
- using build2::script::line_type;
- using build2::script::command_expr;
- using build2::script::expr_term;
using build2::script::command;
+ using build2::script::expr_term;
+ using build2::script::command_expr;
+ using build2::script::iteration_index;
using build2::script::environment_vars;
using build2::script::deadline;
using build2::script::timeout;
+ using build2::script::pipe_command;
+ using build2::script::command_function;
class parser; // Required by VC for 'friend class parser' declaration.
@@ -94,6 +97,22 @@ namespace build2
scope_state state = scope_state::unknown;
+ // The command expression execution nesting level. Can be maintained
+ // by the runner to, for example, only perform some housekeeping on
+ // the topmost level (add the test id to the diagnostics, etc).
+ //
+ // Note that the command expression execution can be nested, so that
+ // the outer expression execution is not completed before all the
+ // inner expressions are executed. As for example in:
+ //
+ // echo 'a b' | for x
+ // echo 'c d' | for y
+ // test $x $y
+ // end
+ // end
+ //
+ size_t exec_level = 0;
+
// Test program paths.
//
// Currently always contains a single element (see test_program() for
@@ -103,8 +122,8 @@ namespace build2
//
small_vector<const path*, 1> test_programs;
- void
- set_variable (string&& name,
+ virtual void
+ set_variable (string name,
names&&,
const string& attrs,
const location&) override;
diff --git a/libbuild2/token.cxx b/libbuild2/token.cxx
index ab14388..cc102cc 100644
--- a/libbuild2/token.cxx
+++ b/libbuild2/token.cxx
@@ -29,21 +29,30 @@ namespace build2
os << (r ? "\n" : "<newline>");
break;
}
- case token_type::pair_separator:
+ case token_type::word:
{
if (r)
- os << t.value[0];
+ os << t.value;
else
- os << "<pair separator " << t.value[0] << ">";
+ os << '\'' << t.value << '\'';
break;
}
- case token_type::word:
+ case token_type::escape:
{
if (r)
- os << t.value;
+ os << '\\' << t.value;
else
- os << '\'' << t.value << '\'';
+ os << "<escape sequence \\" << t.value << ">";
+
+ break;
+ }
+ case token_type::pair_separator:
+ {
+ if (r)
+ os << t.value[0];
+ else
+ os << "<pair separator " << t.value[0] << ">";
break;
}
diff --git a/libbuild2/token.hxx b/libbuild2/token.hxx
index fca888c..f9ede65 100644
--- a/libbuild2/token.hxx
+++ b/libbuild2/token.hxx
@@ -30,6 +30,7 @@ namespace build2
eos,
newline,
word,
+ escape, // token::value is <...> in $\<...>
pair_separator, // token::value[0] is the pair separator char.
colon, // :
@@ -159,16 +160,13 @@ namespace build2
token (string v, bool s,
quote_type qt, bool qc, bool qf,
uint64_t l, uint64_t c)
- : token (token_type::word, move (v), s,
- qt, qc, qf,
- l, c,
- &token_printer) {}
+ : token (token_type::word, move (v), s, qt, qc, qf, l, c) {}
token (token_type t,
string v, bool s,
quote_type qt, bool qc, bool qf,
uint64_t l, uint64_t c,
- printer_type* p)
+ printer_type* p = &token_printer)
: type (t), separated (s),
qtype (qt), qcomp (qc), qfirst (qf),
value (move (v)),
diff --git a/libbuild2/types-parsers.cxx b/libbuild2/types-parsers.cxx
index 7b4a65d..9c3dc52 100644
--- a/libbuild2/types-parsers.cxx
+++ b/libbuild2/types-parsers.cxx
@@ -3,6 +3,11 @@
#include <libbuild2/types-parsers.hxx>
+#include <sstream>
+
+#include <libbuild2/lexer.hxx>
+#include <libbuild2/parser.hxx>
+
namespace build2
{
namespace build
@@ -47,6 +52,85 @@ namespace build2
parse_path (x, s);
}
+ static names
+ parse_names (const char* o, const char* v)
+ {
+ using build2::parser;
+ using std::istringstream;
+
+ istringstream is (v);
+ is.exceptions (istringstream::failbit | istringstream::badbit);
+
+ // @@ TODO: currently this issues diagnostics to diag_stream.
+ // Perhaps we should redirect it? Also below.
+ //
+ path_name in (o);
+ lexer l (is, in, 1 /* line */, "\'\"\\$("); // Effective.
+ parser p (nullptr);
+ return p.parse_names (l, nullptr, parser::pattern_mode::preserve);
+ }
+
+ void parser<name>::
+ parse (name& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ names r (parse_names (o, v));
+
+ if (r.size () != 1)
+ throw invalid_value (o, v);
+
+ x = move (r.front ());
+ xs = true;
+ }
+ catch (const failed&)
+ {
+ throw invalid_value (o, v);
+ }
+ }
+
+ void parser<pair<name, optional<name>>>::
+ parse (pair<name, optional<name>>& x, bool& xs, scanner& s)
+ {
+ const char* o (s.next ());
+
+ if (!s.more ())
+ throw missing_value (o);
+
+ const char* v (s.next ());
+
+ try
+ {
+ names r (parse_names (o, v));
+
+ if (r.size () == 1)
+ {
+ x.first = move (r.front ());
+ x.second = nullopt;
+ }
+ else if (r.size () == 2 && r.front ().pair == '@')
+ {
+ x.first = move (r.front ());
+ x.second = move (r.back ());
+ }
+ else
+ throw invalid_value (o, v);
+
+ xs = true;
+ }
+ catch (const failed&)
+ {
+ throw invalid_value (o, v);
+ }
+ }
+
void parser<structured_result_format>::
parse (structured_result_format& x, bool& xs, scanner& s)
{
diff --git a/libbuild2/types-parsers.hxx b/libbuild2/types-parsers.hxx
index aef00ca..42fc60d 100644
--- a/libbuild2/types-parsers.hxx
+++ b/libbuild2/types-parsers.hxx
@@ -44,6 +44,27 @@ namespace build2
};
template <>
+ struct parser<name>
+ {
+ static void
+ parse (name&, bool&, scanner&);
+
+ static void
+ merge (name& b, const name& a) {b = a;}
+ };
+
+ template <>
+ struct parser<pair<name, optional<name>>>
+ {
+ static void
+ parse (pair<name, optional<name>>&, bool&, scanner&);
+
+ static void
+ merge (pair<name, optional<name>>& b,
+ const pair<name, optional<name>>& a) {b = a;}
+ };
+
+ template <>
struct parser<structured_result_format>
{
static void
diff --git a/libbuild2/types.hxx b/libbuild2/types.hxx
index c260aeb..ea84701 100644
--- a/libbuild2/types.hxx
+++ b/libbuild2/types.hxx
@@ -15,6 +15,7 @@
#include <map>
#include <set>
+#include <list>
#include <array>
#include <tuple>
#include <regex>
@@ -114,6 +115,7 @@ namespace build2
using std::multiset;
using std::array;
using std::vector;
+ using std::list;
using butl::vector_view; // <libbutl/vector-view.hxx>
using butl::small_vector; // <libbutl/small-vector.hxx>
@@ -349,10 +351,16 @@ namespace build2
// Path printing potentially relative with trailing slash for directories.
//
LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const ::butl::path&); // utility.cxx
+ operator<< (ostream&, const path&); // utility.cxx
+
+ inline ostream&
+ operator<< (ostream& os, const dir_path& d) // For overload resolution.
+ {
+ return build2::operator<< (os, static_cast<const path&> (d));
+ }
LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const ::butl::path_name_view&); // utility.cxx
+ operator<< (ostream&, const path_name_view&); // utility.cxx
// <libbutl/timestamp.hxx>
//
@@ -371,8 +379,10 @@ namespace build2
using butl::sha256;
// <libbutl/process.hxx>
+ //
using butl::process;
using butl::process_env;
+ using butl::process_exit;
using butl::process_path;
using butl::process_error;
@@ -421,10 +431,11 @@ namespace build2
// Print as recall[@effect].
//
LIBBUILD2_SYMEXPORT ostream&
- operator<< (ostream&, const ::butl::process_path&); // utility.cxx
+ operator<< (ostream&, const process_path&); // utility.cxx
// <libbutl/fdstream.hxx>
//
+ using butl::nullfd;
using butl::auto_fd;
using butl::fdpipe;
using butl::ifdstream;
@@ -506,9 +517,9 @@ namespace build2
location_value (const location&);
- location_value (location_value&&);
+ location_value (location_value&&) noexcept;
location_value (const location_value&);
- location_value& operator= (location_value&&);
+ location_value& operator= (location_value&&) noexcept;
location_value& operator= (const location_value&);
};
diff --git a/libbuild2/types.ixx b/libbuild2/types.ixx
index 750c8c7..ee2a605 100644
--- a/libbuild2/types.ixx
+++ b/libbuild2/types.ixx
@@ -10,7 +10,7 @@ namespace build2
{
if (!l.empty ())
{
- o << l.file;
+ build2::operator<< (o, l.file); // Disambiguate.
if (l.line != 0)
{
@@ -43,7 +43,7 @@ namespace build2
}
inline location_value::
- location_value (location_value&& l)
+ location_value (location_value&& l) noexcept
: location (l.line, l.column),
file (std::move (l.file))
{
@@ -58,7 +58,7 @@ namespace build2
}
inline location_value& location_value::
- operator= (location_value&& l)
+ operator= (location_value&& l) noexcept
{
if (this != &l)
{
diff --git a/libbuild2/utility-installed.cxx b/libbuild2/utility-installed.cxx
index 441e31b..e23add1 100644
--- a/libbuild2/utility-installed.cxx
+++ b/libbuild2/utility-installed.cxx
@@ -14,6 +14,14 @@ namespace build2
#ifdef BUILD2_INSTALL_LIB
const dir_path build_install_lib (BUILD2_INSTALL_LIB);
#endif
+
+#ifdef BUILD2_INSTALL_BUILDFILE
+ const dir_path build_install_buildfile (BUILD2_INSTALL_BUILDFILE);
+#endif
+
+#ifdef BUILD2_INSTALL_DATA
+ const dir_path build_install_data (BUILD2_INSTALL_DATA);
+#endif
}
#endif
diff --git a/libbuild2/utility-uninstalled.cxx b/libbuild2/utility-uninstalled.cxx
index a6bad55..f836de6 100644
--- a/libbuild2/utility-uninstalled.cxx
+++ b/libbuild2/utility-uninstalled.cxx
@@ -7,4 +7,16 @@ namespace build2
{
const bool build_installed = false;
const dir_path build_install_lib; // Empty.
+
+#ifdef BUILD2_INSTALL_BUILDFILE
+ const dir_path build_install_buildfile (BUILD2_INSTALL_BUILDFILE);
+#else
+ const dir_path build_install_buildfile; // Empty.
+#endif
+
+#ifdef BUILD2_INSTALL_DATA
+ const dir_path build_install_data (BUILD2_INSTALL_DATA);
+#else
+ const dir_path build_install_data; // Empty (during bootstrap).
+#endif
}
diff --git a/libbuild2/utility.cxx b/libbuild2/utility.cxx
index 3eeeeaa..1135851 100644
--- a/libbuild2/utility.cxx
+++ b/libbuild2/utility.cxx
@@ -42,7 +42,7 @@ namespace build2
}
ostream&
- operator<< (ostream& os, const ::butl::path& p)
+ operator<< (ostream& os, const path& p)
{
using namespace build2;
@@ -53,7 +53,7 @@ namespace build2
}
ostream&
- operator<< (ostream& os, const ::butl::path_name_view& v)
+ operator<< (ostream& os, const path_name_view& v)
{
assert (!v.empty ());
@@ -61,7 +61,7 @@ namespace build2
}
ostream&
- operator<< (ostream& os, const ::butl::process_path& p)
+ operator<< (ostream& os, const process_path& p)
{
using namespace build2;
@@ -86,6 +86,51 @@ namespace build2
//
namespace build2
{
+ static const char hex_digits[] = "0123456789abcdef";
+
+ string
+ to_string (uint64_t i, int b, size_t w)
+ {
+ // One day we can switch to C++17 std::to_chars().
+ //
+ string r;
+ switch (b)
+ {
+ case 10:
+ {
+ r = to_string (i);
+ if (w > r.size ())
+ r.insert (0, w - r.size (), '0');
+ break;
+ }
+ case 16:
+ {
+ r.reserve (18);
+ r += "0x";
+
+ for (size_t j (64); j != 0; )
+ {
+ j -= 4;
+ size_t d ((i >> j) & 0x0f);
+
+ // Omit leading zeros but watch out for the i==0 corner case.
+ //
+ if (d != 0 || r.size () != 2 || j == 0)
+ r += hex_digits[d];
+ }
+
+ if (w > r.size () - 2)
+ r.insert (2, w - (r.size () - 2), '0');
+
+ break;
+ }
+ default:
+ throw invalid_argument ("unsupported base");
+ }
+
+ return r;
+ }
+
void (*terminate) (bool);
process_path argv0;
@@ -215,11 +260,10 @@ namespace build2
process
run_start (uint16_t verbosity,
const process_env& pe,
- const char* args[],
+ const char* const* args,
int in,
int out,
- bool err,
- const dir_path& cwd,
+ int err,
const location& l)
try
{
@@ -233,17 +277,15 @@ namespace build2
args,
in,
out,
- (err ? 2 : 1),
- (!cwd.empty ()
- ? cwd.string ().c_str ()
- : pe.cwd != nullptr ? pe.cwd->string ().c_str () : nullptr),
+ err,
+ pe.cwd != nullptr ? pe.cwd->string ().c_str () : nullptr,
pe.vars);
}
catch (const process_error& e)
{
if (e.child)
{
- // Note: run_finish() expects this exact message.
+ // Note: run_finish_impl() below expects this exact message.
//
cerr << "unable to execute " << args[0] << ": " << e << endl;
@@ -258,7 +300,7 @@ namespace build2
}
bool
- run_wait (const char* args[], process& pr, const location& loc)
+ run_wait (const char* const* args, process& pr, const location& loc)
try
{
return pr.wait ();
@@ -269,55 +311,330 @@ namespace build2
}
bool
- run_finish_impl (const char* args[],
+ run_finish_impl (const char* const* args,
process& pr,
- bool err,
+ bool f,
const string& l,
+ uint16_t v,
+ bool omit_normal,
const location& loc)
- try
{
tracer trace ("run_finish");
- if (pr.wait ())
- return true;
-
- const process_exit& e (*pr.exit);
-
- if (!e.normal ())
- fail (loc) << "process " << args[0] << " " << e;
+ try
+ {
+ if (pr.wait ())
+ return true;
+ }
+ catch (const process_error& e)
+ {
+ fail (loc) << "unable to execute " << args[0] << ": " << e << endf;
+ }
- // Normall but non-zero exit status.
+ // Note: see similar code in diag_buffer::close().
//
- if (err)
+ const process_exit& pe (*pr.exit);
+ bool ne (pe.normal ());
+
+ // Even if the user redirected the diagnostics, one error that we want to
+ // let through is the inability to execute the program itself. We cannot
+ // reserve a special exit status to signal this so we will just have to
+ // compare the output. In a sense, we treat this as a special case of
+ // abnormal termination. This particular situation will result in a single
+ // error line printed by run_start() above.
+ //
+ if (ne && l.compare (0, 18, "unable to execute ") == 0)
+ fail (loc) << l;
+
+ if (omit_normal && ne)
+ {
+ // While we assume diagnostics has already been issued (to stderr), if
+ // that's not the case, it's a real pain to debug. So trace it. (And
+ // if you think that doesn't happen in sensible programs, check GCC
+ // bug #107448).
+ //
+ l4 ([&]{trace << "process " << args[0] << " " << pe;});
+ }
+ else
{
- // While we assuming diagnostics has already been issued (to STDERR), if
- // that's not the case, it's a real pain to debug. So trace it.
+ // It's unclear whether we should print this only if printing the
+ // command line (we could also do things differently for normal/abnormal
+ // exit). Let's print this always and see how it wears. Note that we now
+ // rely on this in, for example, process_finish(), extract_metadata().
+ //
+ // Note: make sure keep the above trace if decide not to print.
//
- l4 ([&]{trace << "process " << args[0] << " " << e;});
+ diag_record dr;
+ dr << error (loc) << "process " << args[0] << " " << pe;
- throw failed ();
+ if (verb >= 1 && verb <= v)
+ {
+ dr << info << "command line: ";
+ print_process (dr, args);
+ }
}
- // Even if the user asked to suppress diagnostiscs, one error that we
- // want to let through is the inability to execute the program itself.
- // We cannot reserve a special exit status to signal this so we will
- // just have to compare the output. This particular situation will
- // result in a single error line printed by run_start() above.
- //
- if (l.compare (0, 18, "unable to execute ") == 0)
- fail (loc) << l;
+ if (f || !ne)
+ throw failed ();
return false;
}
- catch (const process_error& e)
+
+ bool
+ run_finish_impl (diag_buffer& dbuf,
+ const char* const* args,
+ process& pr,
+ bool f,
+ uint16_t v,
+ bool on,
+ const location& loc)
{
- fail (loc) << "unable to execute " << args[0] << ": " << e << endf;
+ try
+ {
+ pr.wait ();
+ }
+ catch (const process_error& e)
+ {
+ fail (loc) << "unable to execute " << args[0] << ": " << e << endf;
+ }
+
+ const process_exit& pe (*pr.exit);
+
+ dbuf.close (args, pe, v, on, loc);
+
+ if (pe)
+ return true;
+
+ if (f || !pe.normal ())
+ throw failed ();
+
+ return false;
}
void
- run_io_error (const char* args[], const io_error& e)
+ run (context& ctx,
+ const process_env& pe,
+ const char* const* args,
+ uint16_t v)
{
- fail << "io error reading " << args[0] << " output: " << e << endf;
+ if (ctx.phase == run_phase::load)
+ {
+ process pr (run_start (pe, args));
+ run_finish (args, pr, v);
+ }
+ else
+ {
+ process pr (run_start (pe,
+ args,
+ 0 /* stdin */,
+ 1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */));
+ diag_buffer dbuf (ctx, args[0], pr);
+ dbuf.read ();
+ run_finish (dbuf, args, pr, v);
+ }
+ }
+
+ bool
+ run (context& ctx,
+ uint16_t verbosity,
+ const process_env& pe,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ const function<bool (string&, bool)>& f,
+ bool tr,
+ bool err,
+ bool ignore_exit,
+ sha256* checksum)
+ {
+ assert (!err || !ignore_exit);
+
+ if (!err || ctx.phase == run_phase::load)
+ {
+ process pr (run_start (verbosity,
+ pe,
+ args,
+ 0 /* stdin */,
+ -1 /* stdout */,
+ err ? 2 : 1 /* stderr */));
+
+ string l; // Last line of output.
+ try
+ {
+ ifdstream is (move (pr.in_ofd), fdstream_mode::skip);
+
+ bool empty (true);
+
+ // Make sure we keep the last line.
+ //
+ for (bool last (is.peek () == ifdstream::traits_type::eof ());
+ !last && getline (is, l); )
+ {
+ last = (is.peek () == ifdstream::traits_type::eof ());
+
+ if (tr)
+ trim (l);
+
+ if (checksum != nullptr)
+ checksum->append (l);
+
+ if (empty)
+ {
+ empty = f (l, last);
+
+ if (!empty && checksum == nullptr)
+ break;
+ }
+ }
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ if (run_wait (args, pr))
+ fail << "io error reading " << args[0] << " output: " << e << endf;
+
+ // If the child process has failed then assume the io error was
+ // caused by that and let run_finish() deal with it.
+ }
+
+ // Omit normal exit code diagnostics if err is false.
+ //
+ if (!(run_finish_impl (args, pr, err, l, finish_verbosity, !err) ||
+ ignore_exit))
+ return false;
+ }
+ else
+ {
+ // We have to use the non-blocking setup since we have to read from stdout
+ // and stderr simultaneously.
+ //
+ process pr (run_start (verbosity,
+ pe,
+ args,
+ 0 /* stdin */,
+ -1 /* stdout */,
+ diag_buffer::pipe (ctx) /* stderr */));
+
+ // Note that while we read both streams until eof in the normal
+ // circumstances, we cannot use fdstream_mode::skip for the exception
+ // case on both of them: we may end up being blocked trying to read one
+ // stream while the process may be blocked writing to the other. So in
+ // case of an exception we only skip the diagnostics and close stdout
+ // hard. The latter should happen first so the order of the dbuf/is
+ // variables is important.
+ //
+ diag_buffer dbuf (ctx, args[0], pr, (fdstream_mode::non_blocking |
+ fdstream_mode::skip));
+ try
+ {
+ ifdstream is (move (pr.in_ofd),
+ fdstream_mode::non_blocking,
+ ifdstream::badbit);
+
+ bool empty (true);
+
+ // Read until we reach EOF on all streams.
+ //
+ // Note that if dbuf is not opened, then we automatically get an
+ // inactive nullfd entry.
+ //
+ fdselect_set fds {is.fd (), dbuf.is.fd ()};
+ fdselect_state& ist (fds[0]);
+ fdselect_state& dst (fds[1]);
+
+ // To detect the last line we are going keep the previous line and
+ // only call the function once we've read the next.
+ //
+ optional<string> pl;
+
+ for (string l; ist.fd != nullfd || dst.fd != nullfd; )
+ {
+ if (ist.fd != nullfd && getline_non_blocking (is, l))
+ {
+ if (eof (is))
+ {
+ if (pl && empty)
+ f (*pl, true /* last */);
+
+ ist.fd = nullfd;
+ }
+ else
+ {
+ if (checksum != nullptr || empty)
+ {
+ if (tr)
+ trim (l);
+
+ if (checksum != nullptr)
+ checksum->append (l);
+
+ if (empty)
+ {
+ if (pl)
+ {
+ if ((empty = f (*pl, false /* last */)))
+ swap (l, *pl);
+
+ // Note that we cannot bail out like in the other version
+ // since we don't have the skip mode on is. Plus, we might
+ // still have the diagnostics.
+ }
+ else
+ pl = move (l);
+ }
+ }
+
+ l.clear ();
+ }
+
+ continue;
+ }
+
+ ifdselect (fds);
+
+ if (dst.ready)
+ {
+ if (!dbuf.read ())
+ dst.fd = nullfd;
+ }
+ }
+
+ is.close ();
+ }
+ catch (const io_error& e)
+ {
+ if (run_wait (args, pr))
+ {
+ // Note that we will drop the diagnostics in this case since reading
+ // it could have been the cause of this error.
+ //
+ fail << "io error reading " << args[0] << " output: " << e << endf;
+ }
+
+ // If the child process has failed then assume the io error was caused
+ // by that and let run_finish() deal with it.
+ }
+
+ run_finish_impl (dbuf, args, pr, true /* fail */, finish_verbosity);
+ }
+
+ return true;
+ }
+
+ cstrings
+ process_args (const char* program, const strings& args)
+ {
+ cstrings r;
+ r.reserve (args.size () + 2);
+
+ r.push_back (program);
+
+ for (const string& a: args)
+ r.push_back (a.c_str ());
+
+ r.push_back (nullptr);
+ return r;
}
fdpipe
diff --git a/libbuild2/utility.hxx b/libbuild2/utility.hxx
index c82dcc2..b534f41 100644
--- a/libbuild2/utility.hxx
+++ b/libbuild2/utility.hxx
@@ -4,14 +4,15 @@
#ifndef LIBBUILD2_UTILITY_HXX
#define LIBBUILD2_UTILITY_HXX
-#include <tuple> // make_tuple()
-#include <memory> // make_shared()
-#include <string> // to_string()
-#include <utility> // move(), forward(), declval(), make_pair(), swap()
-#include <cassert> // assert()
-#include <iterator> // make_move_iterator()
-#include <algorithm> // *
-#include <functional> // ref(), cref()
+#include <tuple> // make_tuple()
+#include <memory> // make_shared()
+#include <string> // to_string()
+#include <utility> // move(), forward(), declval(), make_pair(), swap()
+#include <cassert> // assert()
+#include <iterator> // make_move_iterator(), back_inserter()
+#include <algorithm> // *
+#include <functional> // ref(), cref()
+#include <type_traits>
#include <libbutl/ft/lang.hxx>
@@ -50,10 +51,17 @@ namespace build2
using std::make_tuple;
using std::make_shared;
using std::make_move_iterator;
- using std::to_string;
+ using std::back_inserter;
using std::stoul;
using std::stoull;
+ using std::to_string;
+
+ // Currently only supports base 10 and 16. Note: adds `0x` if base 16.
+ //
+ LIBBUILD2_SYMEXPORT string
+ to_string (uint64_t, int base, size_t width = 0);
+
// <libbutl/utility.hxx>
//
using butl::reverse_iterate;
@@ -69,6 +77,7 @@ namespace build2
using butl::alpha;
using butl::alnum;
using butl::digit;
+ using butl::wspace;
using butl::trim;
using butl::next_word;
@@ -90,12 +99,14 @@ namespace build2
// <libbutl/fdstream.hxx>
//
+ using butl::fdopen_null;
using butl::open_file_or_stdin;
using butl::open_file_or_stdout;
// <libbutl/path-pattern.hxx>
//
using butl::path_pattern;
+ using butl::path_match;
// Perform process-wide initializations/adjustments/workarounds. Should be
// called once early in main(). In particular, besides other things, this
@@ -126,6 +137,7 @@ namespace build2
init_diag (uint16_t verbosity,
bool silent = false,
optional<bool> progress = nullopt,
+ optional<bool> diag_color = nullopt,
bool no_lines = false,
bool no_columns = false,
bool stderr_term = false);
@@ -135,13 +147,21 @@ namespace build2
LIBBUILD2_SYMEXPORT extern bool silent;
// --[no-]progress
+ // --[no-]diag-color
//
LIBBUILD2_SYMEXPORT extern optional<bool> diag_progress_option;
+ LIBBUILD2_SYMEXPORT extern optional<bool> diag_color_option;
LIBBUILD2_SYMEXPORT extern bool diag_no_line; // --no-line
LIBBUILD2_SYMEXPORT extern bool diag_no_column; // --no-column
- LIBBUILD2_SYMEXPORT extern bool stderr_term; // True if stderr is a terminal.
+ // True if stderr is a terminal.
+ //
+ LIBBUILD2_SYMEXPORT extern bool stderr_term;
+
+ // True if the color can be used on the stderr terminal.
+ //
+ LIBBUILD2_SYMEXPORT extern bool stderr_term_color;
// Global state (verbosity, home/work directories, etc).
@@ -170,11 +190,15 @@ namespace build2
LIBBUILD2_SYMEXPORT extern const standard_version build_version;
LIBBUILD2_SYMEXPORT extern const string build_version_interface;
- // Whether running installed build and, if so, the library installation
- // directory (empty otherwise).
+ // Whether running installed build as well as the library installation
+ // directory (only if installed, empty otherwise), the exported buildfile
+ // installation directory (only if configured, empty otherwise), and data
+ // installation directory (only if installed, src_root otherwise).
//
LIBBUILD2_SYMEXPORT extern const bool build_installed;
LIBBUILD2_SYMEXPORT extern const dir_path build_install_lib; // $install.lib
+ LIBBUILD2_SYMEXPORT extern const dir_path build_install_buildfile; // $install.buildfile
+ LIBBUILD2_SYMEXPORT extern const dir_path build_install_data; // $install.data
// --[no-]mtime-check
//
@@ -225,7 +249,7 @@ namespace build2
// Basic process utilities.
//
- // The run*() functions with process_path assume that you are printing
+ // The run*() functions with process_path/_env assume that you are printing
// the process command line yourself.
// Search for a process executable. Issue diagnostics and throw failed in
@@ -259,126 +283,55 @@ namespace build2
[[noreturn]] LIBBUILD2_SYMEXPORT void
run_search_fail (const path&, const location& = location ());
- // Wait for process termination returning true if the process exited
- // normally with a zero code and false otherwise. The latter case is
- // normally followed up with a call to run_finish().
- //
- LIBBUILD2_SYMEXPORT bool
- run_wait (const char* args[], process&, const location& = location ());
-
- bool
- run_wait (cstrings& args, process&, const location& = location ());
-
- // Wait for process termination. Issue diagnostics and throw failed in case
- // of abnormal termination. If the process has terminated normally but with
- // a non-zero exit status, then assume the diagnostics has already been
- // issued and just throw failed. The last argument is used in cooperation
- // with run_start() in case STDERR is redirected to STDOUT.
- //
- void
- run_finish (const char* args[],
- process&,
- const string& = string (),
- const location& = location ());
-
- void
- run_finish (cstrings& args, process& pr, const location& l = location ());
-
- // As above but if the process has exited normally with a non-zero code,
- // then return false rather than throwing.
- //
- bool
- run_finish_code (const char* args[],
- process&,
- const string& = string (),
- const location& = location ());
-
- // Start a process with the specified arguments. If in is -1, then redirect
- // STDIN to a pipe (can also be -2 to redirect to /dev/null or equivalent).
- // If out is -1, redirect STDOUT to a pipe. If error is false, then
- // redirecting STDERR to STDOUT (this can be used to suppress diagnostics
- // from the child process). Issue diagnostics and throw failed in case of an
- // error.
+ // Start a process with the specified arguments. Issue diagnostics and throw
+ // failed in case of an error. If in is -1, then redirect stdin to a pipe
+ // (can also be -2 to redirect it to /dev/null or equivalent). If out is -1,
+ // then redirect stdout to a pipe. If stderr is redirected to stdout (can
+ // be used to analyze diagnostics from the child process), then, in case of
+ // an error, the last line read from stdout must be passed to run_finish()
+ // below.
//
LIBBUILD2_SYMEXPORT process
run_start (uint16_t verbosity,
const process_env&, // Implicit-constructible from process_path.
- const char* args[],
+ const char* const* args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
- const location& = location ());
+ int err = 2,
+ const location& = {});
inline process
run_start (uint16_t verbosity,
const process_env& pe,
- cstrings& args,
+ const cstrings& args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
- const location& l = location ())
+ int err = 2,
+ const location& l = {})
{
- return run_start (verbosity, pe, args.data (), in, out, error, cwd, l);
+ return run_start (verbosity, pe, args.data (), in, out, err, l);
}
inline process
run_start (const process_env& pe,
- const char* args[],
+ const char* const* args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
- const location& l = location ())
+ int err = 2,
+ const location& l = {})
{
- return run_start (verb_never, pe, args, in, out, error, cwd, l);
+ return run_start (verb_never, pe, args, in, out, err, l);
}
inline process
run_start (const process_env& pe,
- cstrings& args,
+ const cstrings& args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
- const location& l = location ())
- {
- return run_start (pe, args.data (), in, out, error, cwd, l);
- }
-
- inline void
- run (const process_env& pe, // Implicit-constructible from process_path.
- const char* args[])
+ int err = 2,
+ const location& l = {})
{
- process pr (run_start (pe, args));
- run_finish (args, pr);
- }
-
- inline void
- run (const process_env& pe, // Implicit-constructible from process_path.
- cstrings& args)
- {
- run (pe, args.data ());
- }
-
- inline void
- run (const process_path& p,
- const char* args[],
- const dir_path& cwd,
- const char* const* env = nullptr)
- {
- process pr (run_start (process_env (p, env), args, 0, 1, true, cwd));
- run_finish (args, pr);
- }
-
- inline void
- run (const process_path& p,
- cstrings& args,
- const dir_path& cwd,
- const char* const* env = nullptr)
- {
- run (p, args.data (), cwd, env);
+ return run_start (pe, args.data (), in, out, err, l);
}
// As above, but search for the process (including updating args[0]) and
@@ -389,16 +342,16 @@ namespace build2
const char* args[],
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
+ int err = 2,
const char* const* env = nullptr,
- const location& l = location ())
+ const dir_path& cwd = {},
+ const location& l = {})
{
process_path pp (run_search (args[0], l));
return run_start (verbosity,
- process_env (pp, env), args,
- in, out, error,
- cwd, l);
+ process_env (pp, cwd, env), args,
+ in, out, err,
+ l);
}
inline process
@@ -406,55 +359,215 @@ namespace build2
cstrings& args,
int in = 0,
int out = 1,
- bool error = true,
- const dir_path& cwd = dir_path (),
+ int err = 2,
const char* const* env = nullptr,
- const location& l = location ())
+ const dir_path& cwd = {},
+ const location& l = {})
{
- return run_start (verbosity, args.data (), in, out, error, cwd, env, l);
+ return run_start (verbosity, args.data (), in, out, err, env, cwd, l);
}
+ // Wait for process termination returning true if the process exited
+ // normally with a zero code and false otherwise. The latter case is
+ // normally followed up with a call to run_finish().
+ //
+ LIBBUILD2_SYMEXPORT bool
+ run_wait (const char* const* args, process&, const location& = location ());
+
+ bool
+ run_wait (const cstrings& args, process&, const location& = location ());
+
+ // Wait for process termination, issues diagnostics, and throw failed.
+ //
+ // If the child process exited abnormally or normally with non-0 code, then
+ // print the error diagnostics to this effect. Additionally, if the
+ // verbosity level is between 1 and the specified value, then print the
+ // command line as info after the error. If omit_normal is true, then don't
+ // print either for the normal exit (usually used for custom diagnostics or
+ // when process failure can be tolerated).
+ //
+ // Normally the specified verbosity will be 1 and the command line args
+ // represent the verbosity level 2 (logical) command line. Or, to put it
+ // another way, it should be 1 less than what gets passed to run_start().
+ // Note that args should only represent a single command in a pipe (see
+ // print_process() for details).
+ //
+ // See also diag_buffer::close().
+ //
+ // The line argument is used in cooperation with run_start() to diagnose a
+ // failure to exec in case stderr is redirected to stdout (see the
+ // implementation for details).
+ //
+ void
+ run_finish (const char* const* args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ void
+ run_finish (const cstrings& args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ void
+ run_finish (const char* const* args,
+ process&,
+ const string& line,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ // As above but if the process has exited normally with a non-zero code,
+ // then return false rather than throwing.
+ //
+ // Note that the normal non-0 exit diagnostics is omitted by default
+ // assuming appropriate custom diagnostics will be issued, if required.
+ //
+ bool
+ run_finish_code (const char* const* args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ bool
+ run_finish_code (const cstrings& args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ bool
+ run_finish_code (const char* const* args,
+ process&,
+ const string&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ // As above but with diagnostics buffering.
+ //
+ // Specifically, this version first waits for the process termination, then
+ // calls diag_buffer::close(verbosity, omit_normal), and finally throws
+ // failed if the process didn't exit with 0 code.
+ //
+ class diag_buffer;
+
+ void
+ run_finish (diag_buffer&,
+ const char* const* args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ void
+ run_finish (diag_buffer&,
+ const cstrings& args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = false,
+ const location& = location ());
+
+ // As above but if the process has exited normally with a non-zero code,
+ // then return false rather than throwing.
+ //
+ // Note that the normal non-0 exit diagnostics is omitted by default
+ // assuming appropriate custom diagnostics will be issued, if required.
+ //
+ bool
+ run_finish_code (diag_buffer&,
+ const char* const* args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ bool
+ run_finish_code (diag_buffer&,
+ const cstrings& args,
+ process&,
+ uint16_t verbosity,
+ bool omit_normal = true,
+ const location& = location ());
+
+ // Run the process with the specified arguments by calling the above start
+ // and finish functions. Buffer diagnostics unless in the load phase.
+ //
+ LIBBUILD2_SYMEXPORT void
+ run (context&,
+ const process_env& pe, // Implicit-constructible from process_path.
+ const char* const* args,
+ uint16_t finish_verbosity);
+
inline void
- run (uint16_t verbosity,
- const char* args[],
- const dir_path& cwd = dir_path (),
- const char* const* env = nullptr)
+ run (context& ctx,
+ const process_env& pe,
+ const cstrings& args,
+ uint16_t finish_verbosity)
{
- process pr (run_start (verbosity, args, 0, 1, true, cwd, env));
- run_finish (args, pr);
+ run (ctx, pe, args.data (), finish_verbosity);
}
+ // As above but pass cwd/env vars as arguments rather than as part of
+ // process_env.
+ //
inline void
- run (uint16_t verbosity,
- cstrings& args,
- const dir_path& cwd = dir_path (),
- const char* const* env = nullptr)
+ run (context& ctx,
+ const process_path& p,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ const char* const* env,
+ const dir_path& cwd = {})
+ {
+ run (ctx, process_env (p, cwd, env), args, finish_verbosity);
+ }
+
+ inline void
+ run (context& ctx,
+ const process_path& p,
+ const cstrings& args,
+ uint16_t finish_verbosity,
+ const char* const* env,
+ const dir_path& cwd = {})
{
- run (verbosity, args.data (), cwd, env);
+ run (ctx, p, args.data (), finish_verbosity, env, cwd);
}
// Start the process as above and then call the specified function on each
// trimmed line of the output until it returns a non-empty object T (tested
// with T::empty()) which is then returned to the caller.
//
+ // If verbosity is specified, print the process commands line at that level
+ // (with the verbosite-1 value passed run_finish()).
+ //
+ // If error is false, then redirecting stderr to stdout and don't fail if
+ // the process exits normally but with non-0 code (can be used to suppress
+ // and/or analyze diagnostics from the child process). Otherwise, buffer
+ // diagnostics unless in the load phase.
+ //
// The predicate can move the value out of the passed string but, if error
// is false, only in case of a "content match" (so that any diagnostics
// lines are left intact). The function signature should be:
//
// T (string& line, bool last)
//
- // If ignore_exit is true, then the program's exit status is ignored (if it
- // is false and the program exits with the non-zero status, then an empty T
- // instance is returned).
+ // If, in addition to error being false, ignore_exit is true, then the
+ // program's normal exit status is ignored (if it is false and the program
+ // exits with the non-zero status, then an empty T instance is returned).
//
// If checksum is not NULL, then feed it the content of each trimmed line
// (including those that come after the callback returns non-empty object).
//
template <typename T, typename F>
T
- run (uint16_t verbosity,
+ run (context&,
+ uint16_t verbosity,
const process_env&, // Implicit-constructible from process_path.
- const char* args[],
+ const char* const* args,
F&&,
bool error = true,
bool ignore_exit = false,
@@ -462,20 +575,55 @@ namespace build2
template <typename T, typename F>
inline T
- run (const process_env& pe, // Implicit-constructible from process_path.
- const char* args[],
+ run (context& ctx,
+ uint16_t verbosity,
+ const process_env& pe,
+ const cstrings& args,
+ F&& f,
+ bool error = true,
+ bool ignore_exit = false,
+ sha256* checksum = nullptr)
+ {
+ return run<T> (ctx,
+ verbosity,
+ pe, args.data (),
+ forward<F> (f),
+ error, ignore_exit, checksum);
+ }
+
+ template <typename T, typename F>
+ inline T
+ run (context&,
+ const process_env&,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ F&&,
+ bool error = true,
+ bool ignore_exit = false,
+ sha256* checksum = nullptr);
+
+ template <typename T, typename F>
+ inline T
+ run (context& ctx,
+ const process_env& pe,
+ const cstrings& args,
+ uint16_t finish_verbosity,
F&& f,
bool error = true,
bool ignore_exit = false,
sha256* checksum = nullptr)
{
- return run<T> (
- verb_never, pe, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ pe, args.data (),
+ finish_verbosity,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
template <typename T, typename F>
inline T
- run (uint16_t verbosity,
+ run (context& ctx,
+ uint16_t verbosity,
const char* args[],
F&& f,
bool error = true,
@@ -483,15 +631,38 @@ namespace build2
sha256* checksum = nullptr)
{
process_path pp (run_search (args[0]));
- return run<T> (
- verbosity, pp, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ pp, args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
+ template <typename T, typename F>
+ inline T
+ run (context& ctx,
+ uint16_t verbosity,
+ cstrings& args,
+ F&& f,
+ bool error = true,
+ bool ignore_exit = false,
+ sha256* checksum = nullptr)
+ {
+ return run<T> (ctx,
+ verbosity,
+ args.data (),
+ forward<F> (f),
+ error, ignore_exit, checksum);
+ }
+
+ // As above but run a program without any arguments or with one argument.
+ //
// run <prog>
//
template <typename T, typename F>
inline T
- run (uint16_t verbosity,
+ run (context& ctx,
+ uint16_t verbosity,
const path& prog,
F&& f,
bool error = true,
@@ -499,13 +670,20 @@ namespace build2
sha256* checksum = nullptr)
{
const char* args[] = {prog.string ().c_str (), nullptr};
- return run<T> (
- verbosity, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
template <typename T, typename F>
- inline T
- run (uint16_t verbosity,
+ inline typename std::enable_if<
+ (!std::is_same<typename std::decay<F>::type, const char**>::value &&
+ !std::is_same<typename std::remove_reference<F>::type, cstrings>::value),
+ T>::type
+ run (context& ctx,
+ uint16_t verbosity,
const process_env& pe, // Implicit-constructible from process_path.
F&& f,
bool error = true,
@@ -513,15 +691,19 @@ namespace build2
sha256* checksum = nullptr)
{
const char* args[] = {pe.path->recall_string (), nullptr};
- return run<T> (
- verbosity, pe, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ pe, args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
// run <prog> <arg>
//
template <typename T, typename F>
inline T
- run (uint16_t verbosity,
+ run (context& ctx,
+ uint16_t verbosity,
const path& prog,
const char* arg,
F&& f,
@@ -530,13 +712,17 @@ namespace build2
sha256* checksum = nullptr)
{
const char* args[] = {prog.string ().c_str (), arg, nullptr};
- return run<T> (
- verbosity, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
}
template <typename T, typename F>
inline T
- run (uint16_t verbosity,
+ run (context& ctx,
+ uint16_t verbosity,
const process_env& pe, // Implicit-constructible from process_path.
const char* arg,
F&& f,
@@ -545,8 +731,47 @@ namespace build2
sha256* checksum = nullptr)
{
const char* args[] = {pe.path->recall_string (), arg, nullptr};
- return run<T> (
- verbosity, pe, args, forward<F> (f), error, ignore_exit, checksum);
+ return run<T> (ctx,
+ verbosity,
+ pe, args,
+ forward<F> (f),
+ error, ignore_exit, checksum);
+ }
+
+ // As above but a lower-level interface that erases T and F and can also be
+ // used to suppress trimming.
+ //
+ // The passed function should return true if it should be called again
+ // (i.e., the object is still empty in the T & F interface) and false
+ // otherwise.
+ //
+ // The first version ruturn true if the result is usable and false
+ // otherwise, depending on the process exit code and error/ignore_exit
+ // values. (In the latter case, the T & F interface makes the resulting
+ // object empty).
+ //
+ LIBBUILD2_SYMEXPORT bool
+ run (context&,
+ uint16_t verbosity,
+ const process_env&,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ const function<bool (string& line, bool last)>&,
+ bool trim = true,
+ bool error = true,
+ bool ignore_exit = false,
+ sha256* checksum = nullptr);
+
+ // Concatenate the program path and arguments into a shallow NULL-terminated
+ // vector of C-strings.
+ //
+ LIBBUILD2_SYMEXPORT cstrings
+ process_args (const char* program, const strings& args);
+
+ inline cstrings
+ process_args (const string& program, const strings& args)
+ {
+ return process_args (program.c_str (), args);
}
// File descriptor streams.
diff --git a/libbuild2/utility.ixx b/libbuild2/utility.ixx
index aedfc94..58ea8db 100644
--- a/libbuild2/utility.ixx
+++ b/libbuild2/utility.ixx
@@ -6,42 +6,195 @@
namespace build2
{
inline bool
- run_wait (cstrings& args, process& pr, const location& loc)
+ run_wait (const cstrings& args, process& pr, const location& loc)
{
return run_wait (args.data (), pr, loc);
}
- // Note: currently this function is also used in a run() implementations.
+ // Note: these functions are also used in the run() implementations.
//
LIBBUILD2_SYMEXPORT bool
- run_finish_impl (const char*[],
+ run_finish_impl (const char* const*,
process&,
- bool error,
+ bool fail,
const string&,
- const location& = location ());
+ uint16_t,
+ bool = false,
+ const location& = {});
+
+ LIBBUILD2_SYMEXPORT bool
+ run_finish_impl (diag_buffer&,
+ const char* const*,
+ process&,
+ bool fail,
+ uint16_t,
+ bool = false,
+ const location& = {});
inline void
- run_finish (const char* args[],
+ run_finish (const char* const* args,
process& pr,
- const string& l,
+ uint16_t v,
+ bool on,
const location& loc)
{
- run_finish_impl (args, pr, true /* error */, l, loc);
+ run_finish_impl (args, pr, true /* fail */, string (), v, on, loc);
}
inline void
- run_finish (cstrings& args, process& pr, const location& loc)
+ run_finish (const cstrings& args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ run_finish (args.data (), pr, v, on, loc);
+ }
+
+ inline void
+ run_finish (const char* const* args,
+ process& pr,
+ const string& l,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ run_finish_impl (args, pr, true, l, v, on, loc);
+ }
+
+ inline bool
+ run_finish_code (const char* const* args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ return run_finish_impl (args, pr, false, string (), v, on, loc);
+ }
+
+ inline bool
+ run_finish_code (const cstrings& args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
{
- run_finish (args.data (), pr, string (), loc);
+ return run_finish_code (args.data (), pr, v, on, loc);
}
inline bool
- run_finish_code (const char* args[],
+ run_finish_code (const char* const* args,
process& pr,
const string& l,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ return run_finish_impl (args, pr, false, l, v, on, loc);
+ }
+
+ inline void
+ run_finish (diag_buffer& dbuf,
+ const char* const* args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ run_finish_impl (dbuf, args, pr, true /* fail */, v, on, loc);
+ }
+
+ inline void
+ run_finish (diag_buffer& dbuf,
+ const cstrings& args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ run_finish_impl (dbuf, args.data (), pr, true, v, on, loc);
+ }
+
+ inline bool
+ run_finish_code (diag_buffer& dbuf,
+ const char* const* args,
+ process& pr,
+ uint16_t v,
+ bool on,
+ const location& loc)
+ {
+ return run_finish_impl (dbuf, args, pr, false, v, on, loc);
+ }
+
+ inline bool
+ run_finish_code (diag_buffer& dbuf,
+ const cstrings& args,
+ process& pr,
+ uint16_t v,
+ bool on,
const location& loc)
{
- return run_finish_impl (args, pr, false /* error */, l, loc);
+ return run_finish_impl (dbuf, args.data (), pr, false, v, on, loc);
+ }
+
+ template <typename T, typename F>
+ inline T
+ run (context& ctx,
+ uint16_t verbosity,
+ const process_env& pe,
+ const char* const* args,
+ F&& f,
+ bool err,
+ bool ignore_exit,
+ sha256* checksum)
+ {
+ T r;
+ if (!run (ctx,
+ verbosity,
+ pe, args,
+ verbosity - 1,
+ [&r, &f] (string& l, bool last) // Small function optimmization.
+ {
+ r = f (l, last);
+ return r.empty ();
+ },
+ true /* trim */,
+ err,
+ ignore_exit,
+ checksum))
+ r = T ();
+
+ return r;
+ }
+
+ template <typename T, typename F>
+ inline T
+ run (context& ctx,
+ const process_env& pe,
+ const char* const* args,
+ uint16_t finish_verbosity,
+ F&& f,
+ bool err,
+ bool ignore_exit,
+ sha256* checksum)
+ {
+ T r;
+ if (!run (ctx,
+ verb_never,
+ pe, args,
+ finish_verbosity,
+ [&r, &f] (string& l, bool last)
+ {
+ r = f (l, last);
+ return r.empty ();
+ },
+ true /* trim */,
+ err,
+ ignore_exit,
+ checksum))
+ r = T ();
+
+ return r;
}
inline void
diff --git a/libbuild2/utility.txx b/libbuild2/utility.txx
index bb25288..d2fc29c 100644
--- a/libbuild2/utility.txx
+++ b/libbuild2/utility.txx
@@ -54,68 +54,4 @@ namespace build2
return p;
}
-
- [[noreturn]] LIBBUILD2_SYMEXPORT void
- run_io_error (const char*[], const io_error&);
-
- template <typename T, typename F>
- T
- run (uint16_t verbosity,
- const process_env& pe,
- const char* args[],
- F&& f,
- bool err,
- bool ignore_exit,
- sha256* checksum)
- {
- process pr (run_start (verbosity,
- pe,
- args,
- 0 /* stdin */,
- -1 /* stdout */,
- err));
- T r;
- string l; // Last line of output.
-
- try
- {
- ifdstream is (move (pr.in_ofd), butl::fdstream_mode::skip);
-
- // Make sure we keep the last line.
- //
- for (bool last (is.peek () == ifdstream::traits_type::eof ());
- !last && getline (is, l); )
- {
- last = (is.peek () == ifdstream::traits_type::eof ());
-
- trim (l);
-
- if (checksum != nullptr)
- checksum->append (l);
-
- if (r.empty ())
- {
- r = f (l, last);
-
- if (!r.empty () && checksum == nullptr)
- break;
- }
- }
-
- is.close ();
- }
- catch (const io_error& e)
- {
- if (run_wait (args, pr))
- run_io_error (args, e);
-
- // If the child process has failed then assume the io error was
- // caused by that and let run_finish() deal with it.
- }
-
- if (!(run_finish_impl (args, pr, err, l) || ignore_exit))
- r = T ();
-
- return r;
- }
}
diff --git a/libbuild2/variable.cxx b/libbuild2/variable.cxx
index 8a063f7..078c13a 100644
--- a/libbuild2/variable.cxx
+++ b/libbuild2/variable.cxx
@@ -3,10 +3,15 @@
#include <libbuild2/variable.hxx>
-#include <cstring> // memcmp()
+#include <cstring> // memcmp(), memcpy()
#include <libbutl/path-pattern.hxx>
+#ifndef BUILD2_BOOTSTRAP
+# include <libbutl/json/parser.hxx>
+# include <libbutl/json/serializer.hxx>
+#endif
+
#include <libbuild2/target.hxx>
#include <libbuild2/diagnostics.hxx>
@@ -47,7 +52,7 @@ namespace build2
}
value::
- value (value&& v)
+ value (value&& v) noexcept
: type (v.type), null (v.null), extra (v.extra)
{
if (!null)
@@ -57,7 +62,7 @@ namespace build2
else if (type->copy_ctor != nullptr)
type->copy_ctor (*this, v, true);
else
- data_ = v.data_; // Copy as POD.
+ memcpy (data_, v.data_, size_); // Copy as POD.
}
}
@@ -72,7 +77,7 @@ namespace build2
else if (type->copy_ctor != nullptr)
type->copy_ctor (*this, v, false);
else
- data_ = v.data_; // Copy as POD.
+ memcpy (data_, v.data_, size_); // Copy as POD.
}
}
@@ -99,12 +104,14 @@ namespace build2
if (null)
new (&data_) names (move (v).as<names> ());
else
+ // Note: can throw (see small_vector for details).
+ //
as<names> () = move (v).as<names> ();
}
else if (auto f = null ? type->copy_ctor : type->copy_assign)
f (*this, v, true);
else
- data_ = v.data_; // Assign as POD.
+ memcpy (data_, v.data_, size_); // Assign as POD.
null = v.null;
}
@@ -143,7 +150,7 @@ namespace build2
else if (auto f = null ? type->copy_ctor : type->copy_assign)
f (*this, v, false);
else
- data_ = v.data_; // Assign as POD.
+ memcpy (data_, v.data_, size_); // Assign as POD.
null = v.null;
}
@@ -367,8 +374,8 @@ namespace build2
// Typification is kind of like caching so we reuse that mutex shard.
//
shared_mutex& m (
- ctx.mutexes.variable_cache[
- hash<value*> () (&v) % ctx.mutexes.variable_cache_size]);
+ ctx.mutexes->variable_cache[
+ hash<value*> () (&v) % ctx.mutexes->variable_cache_size]);
// Note: v.type is rechecked by typify() under lock.
//
@@ -377,7 +384,7 @@ namespace build2
}
void
- untypify (value& v)
+ untypify (value& v, bool reduce)
{
if (v.type == nullptr)
return;
@@ -389,7 +396,7 @@ namespace build2
}
names ns;
- names_view nv (v.type->reverse (v, ns));
+ names_view nv (v.type->reverse (v, ns, reduce));
if (nv.empty () || nv.data () == ns.data ())
{
@@ -451,14 +458,14 @@ namespace build2
m = "invalid " + t + " value ";
if (n.simple ())
- m += "'" + n.value + "'";
+ m += '\'' + n.value + '\'';
else if (n.directory ())
- m += "'" + n.dir.representation () + "'";
+ m += '\'' + n.dir.representation () + '\'';
else
- m += "name '" + to_string (n) + "'";
+ m += "name '" + to_string (n) + '\'';
}
- throw invalid_argument (m);
+ throw invalid_argument (move (m));
}
// names
@@ -493,6 +500,7 @@ namespace build2
type_name,
sizeof (bool),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
nullptr, // No dtor (POD).
nullptr, // No copy_ctor (POD).
@@ -503,7 +511,9 @@ namespace build2
&simple_reverse<bool>,
nullptr, // No cast (cast data_ directly).
nullptr, // No compare (compare as POD).
- nullptr // Never empty.
+ nullptr, // Never empty.
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// int64_t value
@@ -515,13 +525,22 @@ namespace build2
{
try
{
- // May throw invalid_argument or out_of_range.
- //
- size_t i;
- int64_t r (stoll (n.value, &i));
+ const string& v (n.value);
+
+ if (!wspace (v[0]))
+ {
+ // Note that unlike uint64, we don't support hex notation for int64.
- if (i == n.value.size ())
- return r;
+ // May throw invalid_argument or out_of_range.
+ //
+ size_t i;
+ int64_t r (stoll (v, &i));
+
+ if (i == v.size ())
+ return r;
+
+ // Fall through.
+ }
// Fall through.
}
@@ -541,6 +560,7 @@ namespace build2
type_name,
sizeof (int64_t),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
nullptr, // No dtor (POD).
nullptr, // No copy_ctor (POD).
@@ -551,7 +571,9 @@ namespace build2
&simple_reverse<int64_t>,
nullptr, // No cast (cast data_ directly).
nullptr, // No compare (compare as POD).
- nullptr // Never empty.
+ nullptr, // Never empty.
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// uint64_t value
@@ -563,13 +585,24 @@ namespace build2
{
try
{
- // May throw invalid_argument or out_of_range.
- //
- size_t i;
- uint64_t r (stoull (n.value, &i));
+ const string& v (n.value);
+
+ if (!wspace (v[0]))
+ {
+ // Note: see also similar code in to_json_value().
+ //
+ int b (v[0] == '0' && (v[1] == 'x' || v[1] == 'X') ? 16 : 10);
+
+ // May throw invalid_argument or out_of_range.
+ //
+ size_t i;
+ uint64_t r (stoull (v, &i, b));
- if (i == n.value.size ())
- return r;
+ if (i == v.size ())
+ return r;
+
+ // Fall through.
+ }
// Fall through.
}
@@ -589,6 +622,7 @@ namespace build2
type_name,
sizeof (uint64_t),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
nullptr, // No dtor (POD).
nullptr, // No copy_ctor (POD).
@@ -599,7 +633,9 @@ namespace build2
&simple_reverse<uint64_t>,
nullptr, // No cast (cast data_ directly).
nullptr, // No compare (compare as POD).
- nullptr // Never empty.
+ nullptr, // Never empty.
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// string value
@@ -612,28 +648,31 @@ namespace build2
// the common cases (unqualified, unpaired simple name or directory).
//
- // We can only convert project-qualified simple and directory names.
+ // We can only convert project-qualified untyped names.
//
- if (n.pattern ||
- !(n.simple (true) || n.directory (true)))
+ if (n.pattern || n.typed ())
throw_invalid_argument (n, nullptr, "string");
if (r != nullptr)
{
- if (r->pattern ||
- !(r->simple (true) || r->directory (true)))
+ if (r->pattern || r->typed ())
throw_invalid_argument (*r, nullptr, "string");
}
string s;
- if (n.directory (true))
+ if (n.simple (true))
+ s.swap (n.value);
+ else
+ {
// Note that here we cannot assume what's in dir is really a
// path (think s/foo/bar/) so we have to reverse it exactly.
//
s = move (n.dir).representation (); // Move out of path.
- else
- s.swap (n.value);
+
+ if (!n.value.empty ())
+ s += n.value; // Separator is already there.
+ }
// Convert project qualification to its string representation.
//
@@ -657,10 +696,15 @@ namespace build2
s += '%';
}
- if (r->directory (true))
- s += move (r->dir).representation ();
- else
+ if (r->simple (true))
s += r->value;
+ else
+ {
+ s += move (r->dir).representation ();
+
+ if (!r->value.empty ())
+ s += r->value;
+ }
}
return s;
@@ -675,6 +719,7 @@ namespace build2
type_name,
sizeof (string),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<string>,
&default_copy_ctor<string>,
@@ -685,7 +730,9 @@ namespace build2
&simple_reverse<string>,
nullptr, // No cast (cast data_ directly).
&simple_compare<string>,
- &default_empty<string>
+ &default_empty<string>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// path value
@@ -742,6 +789,7 @@ namespace build2
type_name,
sizeof (path),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<path>,
&default_copy_ctor<path>,
@@ -752,7 +800,9 @@ namespace build2
&simple_reverse<path>,
nullptr, // No cast (cast data_ directly).
&simple_compare<path>,
- &default_empty<path>
+ &default_empty<path>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// dir_path value
@@ -809,6 +859,7 @@ namespace build2
sizeof (dir_path),
&value_traits<path>::value_type, // Base (assuming direct cast works for
// both).
+ false, // Not container.
nullptr, // No element.
&default_dtor<dir_path>,
&default_copy_ctor<dir_path>,
@@ -819,7 +870,9 @@ namespace build2
&simple_reverse<dir_path>,
nullptr, // No cast (cast data_ directly).
&simple_compare<dir_path>,
- &default_empty<dir_path>
+ &default_empty<dir_path>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// abs_dir_path value
@@ -843,7 +896,14 @@ namespace build2
return abs_dir_path (move (d));
}
- catch (const invalid_path&) {} // Fall through.
+ catch (invalid_path& e)
+ {
+ // We moved from name so reconstruct the path. Let's always make it
+ // simple since we may not be able to construct dir_path. Should be
+ // good enough for diagnostics.
+ //
+ n.value = move (e.path);
+ }
}
throw_invalid_argument (n, r, "abs_dir_path");
@@ -857,6 +917,7 @@ namespace build2
sizeof (abs_dir_path),
&value_traits<dir_path>::value_type, // Base (assuming direct cast works
// for both).
+ false, // Not container.
nullptr, // No element.
&default_dtor<abs_dir_path>,
&default_copy_ctor<abs_dir_path>,
@@ -867,7 +928,9 @@ namespace build2
&simple_reverse<abs_dir_path>,
nullptr, // No cast (cast data_ directly).
&simple_compare<abs_dir_path>,
- &default_empty<abs_dir_path>
+ &default_empty<abs_dir_path>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// name value
@@ -882,10 +945,10 @@ namespace build2
}
static names_view
- name_reverse (const value& v, names&)
+ name_reverse (const value& v, names&, bool reduce)
{
const name& n (v.as<name> ());
- return n.empty () ? names_view (nullptr, 0) : names_view (&n, 1);
+ return reduce && n.empty () ? names_view (nullptr, 0) : names_view (&n, 1);
}
const char* const value_traits<name>::type_name = "name";
@@ -895,6 +958,7 @@ namespace build2
type_name,
sizeof (name),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<name>,
&default_copy_ctor<name>,
@@ -905,7 +969,9 @@ namespace build2
&name_reverse,
nullptr, // No cast (cast data_ directly).
&simple_compare<name>,
- &default_empty<name>
+ &default_empty<name>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// name_pair
@@ -949,13 +1015,13 @@ namespace build2
}
static names_view
- name_pair_reverse (const value& v, names& ns)
+ name_pair_reverse (const value& v, names& ns, bool reduce)
{
const name_pair& p (v.as<name_pair> ());
const name& f (p.first);
const name& s (p.second);
- if (f.empty () && s.empty ())
+ if (reduce && f.empty () && s.empty ())
return names_view (nullptr, 0);
if (f.empty ())
@@ -977,6 +1043,7 @@ namespace build2
type_name,
sizeof (name_pair),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<name_pair>,
&default_copy_ctor<name_pair>,
@@ -987,7 +1054,9 @@ namespace build2
&name_pair_reverse,
nullptr, // No cast (cast data_ directly).
&simple_compare<name_pair>,
- &default_empty<name_pair>
+ &default_empty<name_pair>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// process_path value
@@ -1107,10 +1176,14 @@ namespace build2
}
static names_view
- process_path_reverse (const value& v, names& s)
+ process_path_reverse (const value& v, names& s, bool)
{
const auto& x (v.as<process_path> ());
+ // Note that strictly speaking process_path doesn't have empty
+ // representation (see convert() above). Thus we always return reduced
+ // representation.
+ //
if (!x.empty ())
{
s.reserve (x.effect.empty () ? 1 : 2);
@@ -1127,6 +1200,7 @@ namespace build2
type_name,
sizeof (process_path),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<process_path>,
&process_path_copy_ctor<process_path>,
@@ -1137,7 +1211,9 @@ namespace build2
&process_path_reverse,
nullptr, // No cast (cast data_ directly).
&simple_compare<process_path>,
- &default_empty<process_path>
+ &default_empty<process_path>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// process_path_ex value
@@ -1275,10 +1351,13 @@ namespace build2
}
static names_view
- process_path_ex_reverse (const value& v, names& s)
+ process_path_ex_reverse (const value& v, names& s, bool)
{
const auto& x (v.as<process_path_ex> ());
+ // Note that process_path_ex only has reduced empty representation (see
+ // convert() above).
+ //
if (!x.empty ())
{
s.reserve ((x.effect.empty () ? 1 : 2) +
@@ -1322,6 +1401,7 @@ namespace build2
sizeof (process_path_ex),
&value_traits< // Base (assuming direct cast works
process_path>::value_type, // for both).
+ false, // Not container.
nullptr, // No element.
&default_dtor<process_path_ex>,
&process_path_ex_copy_ctor,
@@ -1332,7 +1412,9 @@ namespace build2
&process_path_ex_reverse,
nullptr, // No cast (cast data_ directly).
&simple_compare<process_path>, // For now compare as process_path.
- &default_empty<process_path_ex>
+ &default_empty<process_path_ex>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// target_triplet value
@@ -1363,6 +1445,7 @@ namespace build2
type_name,
sizeof (target_triplet),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<target_triplet>,
&default_copy_ctor<target_triplet>,
@@ -1373,7 +1456,9 @@ namespace build2
&simple_reverse<target_triplet>,
nullptr, // No cast (cast data_ directly).
&simple_compare<target_triplet>,
- &default_empty<target_triplet>
+ &default_empty<target_triplet>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// project_name value
@@ -1407,6 +1492,7 @@ namespace build2
type_name,
sizeof (project_name),
nullptr, // No base.
+ false, // Not container.
nullptr, // No element.
&default_dtor<project_name>,
&default_copy_ctor<project_name>,
@@ -1417,7 +1503,1105 @@ namespace build2
&simple_reverse<project_name>,
nullptr, // No cast (cast data_ directly).
&simple_compare<project_name>,
- &default_empty<project_name>
+ &default_empty<project_name>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
+ };
+
+ // json
+ //
+ static string
+ to_string_value (name& n, const char* what)
+ {
+ if (n.typed () || n.qualified () || n.pattern)
+ throw_invalid_argument (n, nullptr, what);
+
+ string s;
+
+ if (n.simple ())
+ s.swap (n.value);
+ else
+ {
+ // Note that here we cannot assume what's in dir is really a path (think
+ // s/foo/bar/) so we have to reverse it exactly.
+ //
+ s = move (n.dir).representation (); // Move out of path.
+
+ if (!n.value.empty ())
+ s += n.value; // Separator is already there.
+ }
+
+ return s;
+ }
+
+ static json_value
+ to_json_value (name& n, const char* what)
+ {
+ if (n.typed () || n.qualified () || n.pattern)
+ throw_invalid_argument (n, nullptr, what);
+
+ string s;
+
+ if (n.simple ())
+ s.swap (n.value);
+ else
+ {
+ // Note that here we cannot assume what's in dir is really a path (think
+ // s/foo/bar/) so we have to reverse it exactly.
+ //
+ s = move (n.dir).representation (); // Move out of path.
+
+ if (!n.value.empty ())
+ s += n.value; // Separator is already there.
+
+ // A path is always interpreted as a JSON string.
+ //
+ return json_value (move (s));
+ }
+
+ bool f;
+ if (s.empty ())
+ return json_value (string ());
+ if (s == "null")
+ return json_value ();
+ else if ((f = (s == "true")) || s == "false")
+ return json_value (f);
+ else if (s.find_first_not_of (
+ "0123456789", (f = (s[0] == '-')) ? 1 : 0) == string::npos)
+ {
+ name n (move (s));
+ return f
+ ? json_value (value_traits<int64_t>::convert (n, nullptr))
+ : json_value (value_traits<uint64_t>::convert (n, nullptr));
+ }
+ //
+ // Handle the hex notation similar to <uint64_t>::convert() (and JSON5).
+ //
+ else if (s[0] == '0' &&
+ (s[1] == 'x' || s[1] == 'X') &&
+ s.size () > 2 &&
+ s.find_first_not_of ("0123456789aAbBcCdDeEfF", 2) == string::npos)
+ {
+ return json_value (
+ value_traits<uint64_t>::convert (name (move (s)), nullptr),
+ true /* hex */);
+ }
+ else
+ {
+ // If this is not a JSON representation of string, array, or object,
+ // then treat it as a string.
+ //
+ // Note that the special `"`, `{`, and `[` characters could be preceded
+ // with whitespaces. Note: see similar test in json_object below.
+ //
+ size_t p (s.find_first_not_of (" \t\n\r"));
+
+ if (p == string::npos || (s[p] != '"' && s[p] != '{' && s[p] != '['))
+ return json_value (move (s));
+
+ // Parse as valid JSON input text.
+ //
+#ifndef BUILD2_BOOTSTRAP
+ try
+ {
+ json_parser p (s, nullptr /* name */);
+ return json_value (p);
+ }
+ catch (const invalid_json_input& e)
+ {
+ // Turned out printing line/column/offset can be misleading since we
+ // could be parsing a single name from a potential list of names.
+ // feels like without also printing the value this is of not much use.
+ //
+#if 0
+ string m ("invalid json input at line ");
+ m += to_string (e.line);
+ m += ", column ";
+ m += to_string (e.column);
+ m += ", byte offset ";
+ m += to_string (e.position);
+ m += ": ";
+ m += e.what ();
+#else
+ string m ("invalid json input: ");
+ m += e.what ();
+#endif
+ throw invalid_argument (move (m));
+ }
+#else
+ throw invalid_argument ("json parsing requested during bootstrap");
+#endif
+ }
+ }
+
+ json_value value_traits<json_value>::
+ convert (name&& l, name* r)
+ {
+ // Here we expect either a simple value or a serialized representation.
+ //
+ if (r != nullptr)
+ throw invalid_argument ("pair in json element value");
+
+ return to_json_value (l, "json element");
+ }
+
+ json_value value_traits<json_value>::
+ convert (names&& ns)
+ {
+ size_t n (ns.size ());
+
+ if (n == 0)
+ {
+ // Note: this is the ([json] ) case, not ([json] ""). See also the
+ // relevant note in json_reverse() below.
+ //
+ return json_value (); // null
+ }
+ else if (n == 1)
+ {
+ return to_json_value (ns.front (), "json");
+ }
+ else
+ {
+ if (ns.front ().pair) // object
+ {
+ json_value r (json_type::object);
+ r.object.reserve (n / 2);
+
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ if (!i->pair)
+ throw invalid_argument (
+ "expected pair in json member value '" + to_string (*i) + '\'');
+
+ // Note that we could support JSON-quoted member names but it's
+ // unclear why would someone want that (and if they do, they can
+ // always specify JSON text instead).
+ //
+ // @@ The empty pair value ([json] one@ ) which is currently empty
+ // string is inconsistent with empty value ([json] ) above which
+ // is null. Maybe we could distinguish the one@ and one@"" cases
+ // via type hints?
+ //
+ string n (to_string_value (*i, "json member name"));
+ json_value v (to_json_value (*++i, "json member"));
+
+ // Check for duplicates. One can use append/prepend to merge.
+ //
+ if (find_if (r.object.begin (), r.object.end (),
+ [&n] (const json_member& m)
+ {
+ return m.name == n;
+ }) != r.object.end ())
+ {
+ throw invalid_argument (
+ "duplicate json object member '" + n + '\'');
+ }
+
+ r.object.push_back (json_member {move (n), move (v)});
+ }
+
+ return r;
+ }
+ else // array
+ {
+ json_value r (json_type::array);
+ r.array.reserve (n);
+
+ for (name& n: ns)
+ {
+ if (n.pair)
+ throw invalid_argument (
+ "unexpected pair in json array element value '" +
+ to_string (n) + '\'');
+
+ r.array.push_back (to_json_value (n, "json array element"));
+ }
+
+ return r;
+ }
+ }
+ }
+
+ static void
+ json_assign (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_value>;
+
+ try
+ {
+ traits::assign (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json value";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_append (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_value>;
+
+ try
+ {
+ traits::append (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json value";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_prepend (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_value>;
+
+ try
+ {
+ traits::prepend (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json value";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ name value_traits<json_value>::
+ reverse (const json_value& v)
+ {
+ switch (v.type)
+ {
+ case json_type::null:
+ {
+ // Note that here we cannot return empty (e.g., to be consistent with
+ // other places) because we treat empty name (as opposed to empty
+ // names) as string, not null (see to_json_value() above).
+ //
+ // Thankfully this version of reverse() is only used when json_value
+ // representation is needed as part of a container. Which means in
+ // "consumption" contexts (e.g., result of subscript) null will still
+ // decay to empty.
+ //
+#if 1
+ return name ("null");
+#else
+ return name ();
+#endif
+ }
+ case json_type::boolean:
+ {
+ return name (v.boolean ? "true" : "false");
+ }
+ case json_type::signed_number:
+ {
+ return value_traits<int64_t>::reverse (v.signed_number);
+ }
+ case json_type::unsigned_number:
+ {
+ return value_traits<uint64_t>::reverse (v.unsigned_number);
+ }
+ case json_type::hexadecimal_number:
+ {
+ return name (to_string (v.unsigned_number, 16));
+ }
+ case json_type::string:
+ //
+ // @@ Hm, it would be nice if this somehow got mapped to unquoted
+ // string but still be round-trippable to JSON value. Perhaps via
+ // the type hint idea? This is pretty bad. See also subscript we
+ // hacked around this somewhat.
+ //
+ // Note that it may be tempting to fix this by only quoting strings
+ // that would otherwise be mis-interpreted (null, true, all digits,
+ // etc). But that would be worse: things would seem to work but
+ // fall apart in the perhaps unlikely event of encountering one of
+ // the problematic values. It is better to produce a consistent
+ // result.
+ //
+ case json_type::array:
+ case json_type::object:
+ {
+ // Serialize as JSON output text.
+ //
+ string o;
+
+#ifndef BUILD2_BOOTSTRAP
+ try
+ {
+ // Disable pretty-printing so that the output is all on the same
+ // line. While it's not going to be easy to read for larger JSON
+ // outputs, it will fit better into the existing model where none of
+ // the value representations use formatting newlines. If a pretty-
+ // printed representation is required, then the $json.serialize()
+ // function can be used to obtain it.
+ //
+ json_buffer_serializer s (o, 0 /* indentation */);
+ v.serialize (s);
+ }
+ catch (const invalid_json_output& e)
+ {
+ // Note that while it feels like value_traits::reverse() should
+ // throw invalid_argument, we don't currently handle it anywhere so
+ // for now let's just fail.
+ //
+ // Note: the same diagnostics as in $json.serialize().
+ //
+ diag_record dr;
+ dr << fail << "invalid json value: " << e;
+
+ if (e.event)
+ dr << info << "while serializing " << to_string (*e.event);
+
+ if (e.offset != string::npos)
+ dr << info << "offending byte offset " << e.offset;
+ }
+#else
+ fail << "json serialization requested during bootstrap";
+#endif
+ return name (move (o));
+ }
+ }
+
+ assert (false);
+ return name ();
+ }
+
+ static names_view
+ json_reverse (const value& x, names& ns, bool reduce)
+ {
+ const json_value& v (x.as<json_value> ());
+
+ // @@ Hm, it would be nice if JSON null somehow got mapped to [null]/empty
+ // but still be round-trippable to JSON null. Perhaps via type hint?
+ //
+ // But won't `print ([json] null)` printing nothing be surprising.
+ // Also, it's not clear that mapping JSON null to out [null] is a good
+ // idea since our [null] means "no value" while JSON null means "null
+ // value".
+ //
+ // Maybe the current semantics is the best: we map our [null] and empty
+ // names to JSON null (naturally) but we always reverse JSON null to
+ // the JSON "null" literal. Or maybe we could reverse it to null but
+ // type-hint it that it's a spelling or [null]/empty. Quite fuzzy,
+ // admittedly. In our model null values decay to empty so JSON null
+ // decaying to "null" literal is strange. Let's try and see how it
+ // goes. See also json_subscript_impl() below.
+ //
+ if (v.type != json_type::null || !reduce)
+ ns.push_back (value_traits<json_value>::reverse (v));
+
+ return ns;
+ }
+
+ static int
+ json_compare (const value& l, const value& r)
+ {
+ return l.as<json_value> ().compare (r.as<json_value> ());
+ }
+
+ // Return the value as well as the indication of whether the index/name is
+ // in range.
+ //
+ static pair<value, bool>
+ json_subscript_impl (const value& val, value* val_data,
+ uint64_t i, const string& n, bool index)
+ {
+ const json_value& jv (val.as<json_value> ());
+
+ json_value jr;
+
+ if (index)
+ {
+ if (i >= (jv.type == json_type::array ? jv.array.size () :
+ jv.type == json_type::object ? jv.object.size () :
+ jv.type == json_type::null ? 0 : 1))
+ return make_pair (value (), false);
+
+ switch (jv.type)
+ {
+ case json_type::boolean:
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ case json_type::string:
+ {
+ // Steal the value if possible.
+ //
+ jr = (&val == val_data
+ ? json_value (move (const_cast<json_value&> (jv)))
+ : json_value (jv));
+ break;
+ }
+ case json_type::array:
+ {
+ // Steal the value if possible.
+ //
+ const json_value& r (jv.array[i]);
+ jr = (&val == val_data
+ ? json_value (move (const_cast<json_value&> (r)))
+ : json_value (r));
+ break;
+ }
+ case json_type::object:
+ {
+ // Represent as an object with one member.
+ //
+ new (&jr.object) json_value::object_type ();
+ jr.type = json_type::object;
+
+ // Steal the member if possible.
+ //
+ const json_member& m (jv.object[i]);
+ jr.object.push_back (&val == val_data
+ ? json_member (move (const_cast<json_member&> (m)))
+ : json_member (m));
+ break;
+ }
+ case json_type::null:
+ assert (false);
+ }
+ }
+ else
+ {
+ auto i (find_if (jv.object.begin (),
+ jv.object.end (),
+ [&n] (const json_member& m)
+ {
+ return m.name == n;
+ }));
+
+ if (i == jv.object.end ())
+ return make_pair (value (), false);
+
+ // Steal the member value if possible.
+ //
+ jr = (&val == val_data
+ ? json_value (move (const_cast<json_value&> (i->value)))
+ : json_value (i->value));
+ }
+
+ // @@ As a temporary work around for the lack of type hints (see
+ // json_reverse() for background), reverse simple JSON values to the
+ // corresponding fundamental type values. The thinking here is that
+ // subscript (and iteration) is primarily meant for consumption (as
+ // opposed to reverse() where it is used to build up values and thus
+ // needs things to be fully reversible). Once we add type hints, then
+ // this should become unnecessary and we should be able to just always
+ // return json_value. See also $json.member_value() where we do the
+ // same thing.
+ //
+ // @@ TODO: split this function into two (index/name) once get rid of this.
+ //
+ value r;
+ switch (jr.type)
+ {
+ // Seeing that we are reversing for consumption, it feels natural to
+ // reverse JSON null to our [null] rather than empty. This, in
+ // particular, helps chained subscript.
+ //
+#if 0
+ case json_type::null: r = value (names {}); break;
+#else
+ case json_type::null: r = value (); break;
+#endif
+ case json_type::boolean: r = value (jr.boolean); break;
+ case json_type::signed_number: r = value (jr.signed_number); break;
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number: r = value (jr.unsigned_number); break;
+ case json_type::string: r = value (move (jr.string)); break;
+ case json_type::array:
+ case json_type::object: r = value (move (jr)); break;
+ }
+
+ return make_pair (move (r), true);
+ }
+
+ static value
+ json_subscript (const value& val, value* val_data,
+ value&& sub,
+ const location& sloc,
+ const location& bloc)
+ {
+ const json_value* jv (val.null ? nullptr : &val.as<json_value> ());
+
+ // For consistency with other places treat JSON null value as maybe
+ // missing array/object. In particular, we don't want to fail trying to
+ // lookup by-name on a null value which could have been an object.
+ //
+ if (jv != nullptr && jv->type == json_type::null)
+ jv = nullptr;
+
+ // Process subscript even if the value is null to make sure it is valid.
+ //
+ bool index;
+ uint64_t i (0);
+ string n;
+
+ // Always interpret uint64-typed subscript as index even for objects.
+ // This can be used to, for example, to iterate with an index over object
+ // members.
+ //
+ if (!sub.null && sub.type == &value_traits<uint64_t>::value_type)
+ {
+ i = sub.as<uint64_t> ();
+ index = true;
+ }
+ else
+ {
+ // How we interpret the subscript depends on the JSON value type. For
+ // objects we treat it as a string (member name) and for everything else
+ // as an index.
+ //
+ // What if the value is null and we don't have a JSON type? In this case
+ // we treat as a string since a valid number is also a valid string.
+ //
+ try
+ {
+ if (jv == nullptr || jv->type == json_type::object)
+ {
+ n = convert<string> (move (sub));
+ index = false;
+ }
+ else
+ {
+ i = convert<uint64_t> (move (sub));
+ index = true;
+ }
+ }
+ catch (const invalid_argument& e)
+ {
+ // We will likely be trying to interpret a member name as an integer
+ // due to the incorrect value type so issue appropriate diagnostics.
+ //
+ diag_record dr;
+ dr << fail (sloc) << "invalid json value subscript: " << e;
+
+ if (jv != nullptr && jv->type != json_type::object)
+ dr << info << "json value type is " << jv->type;
+
+ dr << info (bloc) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern" << endf;
+ }
+ }
+
+ value r (jv != nullptr
+ ? json_subscript_impl (val, val_data, i, n, index).first
+ : value ());
+
+ // Typify null values so that we get called for chained subscripts.
+ //
+ if (r.null)
+ r.type = &value_traits<json_value>::value_type;
+
+ return r;
+ }
+
+ static void
+ json_iterate (const value& val,
+ const function<void (value&&, bool first)>& f)
+ {
+ // Implement in terms of subscript for consistency (in particular,
+ // iterating over simple values like number, string).
+ //
+ for (uint64_t i (0);; ++i)
+ {
+ pair<value, bool> e (json_subscript_impl (val, nullptr, i, {}, true));
+
+ if (!e.second)
+ break;
+
+ f (move (e.first), i == 0);
+ }
+ }
+
+ const json_value value_traits<json_value>::empty_instance;
+ const char* const value_traits<json_value>::type_name = "json";
+
+ // Note that whether the json value is a container or not depends on its
+ // payload type. However, for our purposes it feels correct to assume it is
+ // a container rather than not with itself as the element type (see
+ // value_traits::{container, element_type} usage for details).
+ //
+ const value_type value_traits<json_value>::value_type
+ {
+ type_name,
+ sizeof (json_value),
+ nullptr, // No base.
+ true, // Container.
+ &value_traits<json_value>::value_type, // Element (itself).
+ &default_dtor<json_value>,
+ &default_copy_ctor<json_value>,
+ &default_copy_assign<json_value>,
+ &json_assign,
+ json_append,
+ json_prepend,
+ &json_reverse,
+ nullptr, // No cast (cast data_ directly).
+ &json_compare,
+ &default_empty<json_value>,
+ &json_subscript,
+ &json_iterate
+ };
+
+ // json_array
+ //
+ json_array value_traits<json_array>::
+ convert (names&& ns)
+ {
+ json_array r;
+
+ size_t n (ns.size ());
+ if (n == 0)
+ ; // Empty.
+ else if (n == 1)
+ {
+ // Tricky: this can still be JSON input text that is an array. And if
+ // it's not, then make it an element of an array.
+ //
+ // @@ Hm, this is confusing: [json_array] a = null ! Maybe not? But then
+ // this won't work: [json_array] a = ([json_array] null). Maybe
+ // distinguish in assign?
+ //
+ json_value v (to_json_value (ns.front (), "json"));
+
+ if (v.type == json_type::array)
+ r.array = move (v.array);
+ else
+ r.array.push_back (move (v));
+ }
+ else
+ {
+ r.array.reserve (n);
+
+ for (name& n: ns)
+ {
+ if (n.pair)
+ throw invalid_argument (
+ "unexpected pair in json array element value '" +
+ to_string (n) + '\'');
+
+ r.array.push_back (to_json_value (n, "json array element"));
+ }
+ }
+
+ return r;
+ }
+
+ static void
+ json_array_assign (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_array>;
+
+ try
+ {
+ traits::assign (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json array";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_array_append (value& v, names&& ns, const variable* var)
+ {
+ using val_traits = value_traits<json_value>;
+ using arr_traits = value_traits<json_array>;
+
+ try
+ {
+ arr_traits::append (v, val_traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json array";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_array_prepend (value& v, names&& ns, const variable* var)
+ {
+ using val_traits = value_traits<json_value>;
+ using arr_traits = value_traits<json_array>;
+
+ try
+ {
+ arr_traits::prepend (v, val_traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json array";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ const json_array value_traits<json_array>::empty_instance;
+ const char* const value_traits<json_array>::type_name = "json_array";
+
+ const value_type value_traits<json_array>::value_type
+ {
+ type_name,
+ sizeof (json_array),
+ &value_traits<json_value>::value_type, // Base (assuming direct cast works
+ // for both).
+ true, // Container.
+ &value_traits<json_value>::value_type, // Element (json_value).
+ &default_dtor<json_array>,
+ &default_copy_ctor<json_array>,
+ &default_copy_assign<json_array>,
+ &json_array_assign,
+ &json_array_append,
+ &json_array_prepend,
+ &json_reverse,
+ nullptr, // No cast (cast data_ directly).
+ &json_compare,
+ &default_empty<json_array>,
+ &json_subscript,
+ &json_iterate
+ };
+
+ // json_object
+ //
+ json_object value_traits<json_object>::
+ convert (names&& ns)
+ {
+ json_object r;
+
+ size_t n (ns.size ());
+ if (n == 0)
+ ; // Empty.
+ else if (n == 1)
+ {
+ // Tricky: this can still be JSON input text that is an object. So do
+ // a similar check as in to_json_value() above.
+ //
+ name& n (ns.front ());
+
+ if (!n.simple () || n.pattern)
+ throw_invalid_argument (n, nullptr, "json object");
+
+ string& s (n.value);
+ size_t p (s.find_first_not_of (" \t\n\r"));
+
+ if (p == string::npos || s[p] != '{')
+ {
+ // Unlike for array above, we cannot turn any value into a member.
+ //
+ throw invalid_argument ("expected json object instead of '" + s + '\'');
+ }
+
+ json_value v (to_json_value (ns.front (), "json object"));
+ assert (v.type == json_type::object);
+ r.object = move (v.object);
+ }
+ else
+ {
+ r.object.reserve (n / 2);
+
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ if (!i->pair)
+ throw invalid_argument (
+ "expected pair in json member value '" + to_string (*i) + '\'');
+
+ string n (to_string_value (*i, "json member name"));
+ json_value v (to_json_value (*++i, "json member"));
+
+ if (find_if (r.object.begin (), r.object.end (),
+ [&n] (const json_member& m)
+ {
+ return m.name == n;
+ }) != r.object.end ())
+ {
+ throw invalid_argument (
+ "duplicate json object member '" + n + '\'');
+ }
+
+ r.object.push_back (json_member {move (n), move (v)});
+ }
+ }
+
+ return r;
+ }
+
+ static void
+ json_object_assign (value& v, names&& ns, const variable* var)
+ {
+ using traits = value_traits<json_object>;
+
+ try
+ {
+ traits::assign (v, traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json object";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_object_append (value& v, names&& ns, const variable* var)
+ {
+ using val_traits = value_traits<json_value>;
+ using obj_traits = value_traits<json_object>;
+
+ try
+ {
+ obj_traits::append (v, val_traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json object";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ static void
+ json_object_prepend (value& v, names&& ns, const variable* var)
+ {
+ using val_traits = value_traits<json_value>;
+ using obj_traits = value_traits<json_object>;
+
+ try
+ {
+ obj_traits::prepend (v, val_traits::convert (move (ns)));
+ }
+ catch (const invalid_argument& e)
+ {
+ // Note: ns is not guaranteed to be valid.
+ //
+ diag_record dr (fail);
+ dr << "invalid json object";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << ": " << e;
+ }
+ }
+
+ const json_object value_traits<json_object>::empty_instance;
+ const char* const value_traits<json_object>::type_name = "json_object";
+
+ const value_type value_traits<json_object>::value_type
+ {
+ type_name,
+ sizeof (json_object),
+ &value_traits<json_value>::value_type, // Base (assuming direct cast works
+ // for both).
+ true, // Container.
+ &value_traits<json_value>::value_type, // Element (json_value).
+ &default_dtor<json_object>,
+ &default_copy_ctor<json_object>,
+ &default_copy_assign<json_object>,
+ &json_object_assign,
+ &json_object_append,
+ &json_object_prepend,
+ &json_reverse,
+ nullptr, // No cast (cast data_ directly).
+ &json_compare,
+ &default_empty<json_object>,
+ &json_subscript,
+ &json_iterate
+ };
+
+ // cmdline
+ //
+ cmdline value_traits<cmdline>::
+ convert (names&& ns)
+ {
+ return cmdline (make_move_iterator (ns.begin ()),
+ make_move_iterator (ns.end ()));
+ }
+
+ void value_traits<cmdline>::
+ assign (value& v, cmdline&& x)
+ {
+ if (v)
+ v.as<cmdline> () = move (x);
+ else
+ new (&v.data_) cmdline (move (x));
+ }
+
+ void value_traits<cmdline>::
+ append (value& v, cmdline&& x)
+ {
+ if (v)
+ {
+ cmdline& p (v.as<cmdline> ());
+
+ if (p.empty ())
+ p.swap (x);
+ else
+ p.insert (p.end (),
+ make_move_iterator (x.begin ()),
+ make_move_iterator (x.end ()));
+ }
+ else
+ new (&v.data_) cmdline (move (x));
+ }
+
+ void value_traits<cmdline>::
+ prepend (value& v, cmdline&& x)
+ {
+ if (v)
+ {
+ cmdline& p (v.as<cmdline> ());
+
+ if (!p.empty ())
+ x.insert (x.end (),
+ make_move_iterator (p.begin ()),
+ make_move_iterator (p.end ()));
+
+ p.swap (x);
+ }
+ else
+ new (&v.data_) cmdline (move (x));
+ }
+
+ static void
+ cmdline_assign (value& v, names&& ns, const variable*)
+ {
+ if (!v)
+ {
+ new (&v.data_) cmdline ();
+ v.null = false;
+ }
+
+ v.as<cmdline> ().assign (make_move_iterator (ns.begin ()),
+ make_move_iterator (ns.end ()));
+ }
+
+ static void
+ cmdline_append (value& v, names&& ns, const variable*)
+ {
+ if (!v)
+ {
+ new (&v.data_) cmdline ();
+ v.null = false;
+ }
+
+ auto& x (v.as<cmdline> ());
+ x.insert (x.end (),
+ make_move_iterator (ns.begin ()),
+ make_move_iterator (ns.end ()));
+ }
+
+ static void
+ cmdline_prepend (value& v, names&& ns, const variable*)
+ {
+ if (!v)
+ {
+ new (&v.data_) cmdline ();
+ v.null = false;
+ }
+
+ auto& x (v.as<cmdline> ());
+ x.insert (x.begin (),
+ make_move_iterator (ns.begin ()),
+ make_move_iterator (ns.end ()));
+ }
+
+ static names_view
+ cmdline_reverse (const value& v, names&, bool)
+ {
+ const auto& x (v.as<cmdline> ());
+ return names_view (x.data (), x.size ());
+ }
+
+ static int
+ cmdline_compare (const value& l, const value& r)
+ {
+ return vector_compare<name> (l, r);
+ }
+
+ const cmdline value_traits<cmdline>::empty_instance;
+
+ const char* const value_traits<cmdline>::type_name = "cmdline";
+
+ const value_type value_traits<cmdline>::value_type
+ {
+ type_name,
+ sizeof (cmdline),
+ nullptr, // No base.
+ true, // Container.
+ &value_traits<string>::value_type, // Element type.
+ &default_dtor<cmdline>,
+ &default_copy_ctor<cmdline>,
+ &default_copy_assign<cmdline>,
+ &cmdline_assign,
+ &cmdline_append,
+ &cmdline_prepend,
+ &cmdline_reverse,
+ nullptr, // No cast (cast data_ directly).
+ &cmdline_compare,
+ &default_empty<cmdline>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
};
// variable_pool
@@ -1428,6 +2612,17 @@ namespace build2
const variable_visibility* v,
const bool* o) const
{
+ assert (var.owner == this);
+
+ if (outer_ != nullptr)
+ {
+ // Project-private variable. Assert visibility/overridability, the same
+ // as in insert().
+ //
+ assert ((o == nullptr || !*o) &&
+ (v == nullptr || *v >= variable_visibility::project));
+ }
+
// Check overridability (all overrides, if any, should already have
// been entered; see context ctor for details).
//
@@ -1509,7 +2704,7 @@ namespace build2
}
static inline void
- merge_pattern (const variable_pool::pattern& p,
+ merge_pattern (const variable_patterns::pattern& p,
const build2::value_type*& t,
const variable_visibility*& v,
const bool*& o)
@@ -1560,20 +2755,68 @@ namespace build2
const bool* o,
bool pat)
{
- assert (!global_ || global_->phase == run_phase::load);
+ if (outer_ != nullptr)
+ {
+ // Project-private pool.
+ //
+ if (n.find ('.') != string::npos) // Qualified.
+ return outer_->insert (move (n), t, v, o, pat);
+
+ // Unqualified.
+ //
+ // The pool chaining semantics for insertion: first check the outer pool
+ // then, if not found, insert in own pool.
+ //
+ if (const variable* var = outer_->find (n))
+ {
+ // Verify type/visibility/overridability.
+ //
+ // Should we assert or fail? Currently the buildfile parser goes
+ // through update() to set these so let's do assert for now. We also
+ // require equality (these are a handful of special variables).
+ //
+ assert ((t == nullptr || t == var->type) &&
+ (v == nullptr || *v == var->visibility) &&
+ (o == nullptr || *o || var->overrides == nullptr));
+
+ return pair<variable&, bool> (const_cast<variable&> (*var), false);
+ }
+
+ // Project-private variable. Assert visibility/overridability and fall
+ // through. Again, we expect the buildfile parser to verify and diagnose
+ // these.
+ //
+ // Note: similar code in update().
+ //
+ assert ((o == nullptr || !*o) &&
+ (v == nullptr || *v >= variable_visibility::project));
+ }
+ else if (shared_)
+ {
+ // Public pool.
+ //
+ // Make sure all the unqualified variables are pre-entered during
+ // initialization.
+ //
+ assert (shared_->load_generation == 0 || n.find ('.') != string::npos);
+ }
+
+ assert (!shared_ || shared_->phase == run_phase::load);
// Apply pattern.
//
+ using pattern = variable_patterns::pattern;
+
const pattern* pa (nullptr);
auto pt (t); auto pv (v); auto po (o);
- if (pat)
+ if (pat && patterns_ != nullptr)
{
if (n.find ('.') != string::npos)
{
// Reverse means from the "largest" (most specific).
//
- for (const pattern& p: reverse_iterate (patterns_))
+ for (const pattern& p: reverse_iterate (patterns_->patterns_))
{
if (match_pattern (n, p.prefix, p.suffix, p.multi))
{
@@ -1590,6 +2833,7 @@ namespace build2
variable {
move (n),
nullptr,
+ nullptr,
pt,
nullptr,
pv != nullptr ? *pv : variable_visibility::project}));
@@ -1597,7 +2841,10 @@ namespace build2
variable& var (r.first->second);
if (r.second)
+ {
+ var.owner = this;
var.aliases = &var;
+ }
else // Note: overridden variable will always exist.
{
// This is tricky: if the pattern does not require a match, then we
@@ -1625,7 +2872,15 @@ namespace build2
const variable& variable_pool::
insert_alias (const variable& var, string n)
{
- assert (var.aliases != nullptr && var.overrides == nullptr);
+ if (outer_ != nullptr)
+ {
+ assert (n.find ('.') != string::npos); // Qualified.
+ return outer_->insert_alias (var, move (n));
+ }
+
+ assert (var.owner == this &&
+ var.aliases != nullptr &&
+ var.overrides == nullptr);
variable& a (insert (move (n),
var.type,
@@ -1646,15 +2901,15 @@ namespace build2
return a;
}
- void variable_pool::
- insert_pattern (const string& p,
- optional<const value_type*> t,
- optional<bool> o,
- optional<variable_visibility> v,
- bool retro,
- bool match)
+ void variable_patterns::
+ insert (const string& p,
+ optional<const value_type*> t,
+ optional<bool> o,
+ optional<variable_visibility> v,
+ bool retro,
+ bool match)
{
- assert (!global_ || global_->phase == run_phase::load);
+ assert (!shared_ || shared_->phase == run_phase::load);
size_t pn (p.size ());
@@ -1688,9 +2943,9 @@ namespace build2
// Apply retrospectively to existing variables.
//
- if (retro)
+ if (retro && pool_ != nullptr)
{
- for (auto& p: map_)
+ for (auto& p: pool_->map_)
{
variable& var (p.second);
@@ -1707,10 +2962,10 @@ namespace build2
}
if (j == e)
- update (var,
- t ? *t : nullptr,
- v ? &*v : nullptr,
- o ? &*o : nullptr); // Not changing the key.
+ pool_->update (var,
+ t ? *t : nullptr,
+ v ? &*v : nullptr,
+ o ? &*o : nullptr); // Not changing the key.
}
}
}
@@ -1718,7 +2973,66 @@ namespace build2
// variable_map
//
- const variable_map empty_variable_map (nullptr /* context */);
+ const variable_map empty_variable_map (variable_map::owner::empty);
+
+ // Need scope/target definition thus not inline.
+ //
+ variable_map::
+ variable_map (const scope& s, bool shared)
+ : shared_ (shared), owner_ (owner::scope), scope_ (&s), ctx (&s.ctx)
+ {
+ }
+
+ variable_map::
+ variable_map (const target& t, bool shared)
+ : shared_ (shared), owner_ (owner::target), target_ (&t), ctx (&t.ctx)
+ {
+ }
+
+ variable_map::
+ variable_map (const prerequisite& p, bool shared)
+ : shared_ (shared),
+ owner_ (owner::prereq), prereq_ (&p),
+ ctx (&p.scope.ctx)
+ {
+ }
+
+ variable_map::
+ variable_map (variable_map&& v, const prerequisite& p, bool shared)
+ : shared_ (shared),
+ owner_ (owner::scope), prereq_ (&p),
+ ctx (&p.scope.ctx),
+ m_ (move (v.m_))
+ {
+ }
+
+ variable_map::
+ variable_map (const variable_map& v, const prerequisite& p, bool shared)
+ : shared_ (shared),
+ owner_ (owner::scope), prereq_ (&p),
+ ctx (&p.scope.ctx),
+ m_ (v.m_)
+ {
+ }
+
+ lookup variable_map::
+ lookup (const string& name) const
+ {
+ lookup_type r;
+
+ const scope* bs (owner_ == owner::scope ? scope_ :
+ owner_ == owner::target ? &target_->base_scope () :
+ owner_ == owner::prereq ? &prereq_->scope :
+ nullptr);
+
+ if (const variable* var = bs->var_pool ().find (name))
+ {
+ auto p (lookup (*var));
+ r = lookup_type (p.first, &p.second, this);
+ }
+
+ return r;
+ }
auto variable_map::
lookup (const variable& var, bool typed, bool aliased) const ->
@@ -1761,24 +3075,43 @@ namespace build2
auto* r (const_cast<value_data*> (p.first));
if (r != nullptr)
+ {
+ r->extra = 0;
r->version++;
+ }
return pair<value_data*, const variable&> (r, p.second);
}
+ value& variable_map::
+ assign (const string& name)
+ {
+ assert (owner_ != owner::context);
+
+ const scope* bs (owner_ == owner::scope ? scope_ :
+ owner_ == owner::target ? &target_->base_scope () :
+ owner_ == owner::prereq ? &prereq_->scope :
+ nullptr);
+
+ return insert (bs->var_pool ()[name]).first;
+ }
+
pair<value&, bool> variable_map::
- insert (const variable& var, bool typed)
+ insert (const variable& var, bool typed, bool reset_extra)
{
- assert (!global_ || ctx->phase == run_phase::load);
+ assert (!shared_ || ctx->phase == run_phase::load);
auto p (m_.emplace (var, value_data (typed ? var.type : nullptr)));
value_data& r (p.first->second);
if (!p.second)
{
+ if (reset_extra)
+ r.extra = 0;
+
// Check if this is the first access after being assigned a type.
//
- // Note: we still need atomic in case this is not a global state.
+ // Note: we still need atomic in case this is not a shared state.
//
if (typed && var.type != nullptr)
typify (r, var);
@@ -1789,21 +3122,47 @@ namespace build2
return pair<value&, bool> (r, p.second);
}
+ auto variable_map::
+ find (const string& name) const -> const_iterator
+ {
+ assert (owner_ != owner::context);
+
+ const scope* bs (owner_ == owner::scope ? scope_ :
+ owner_ == owner::target ? &target_->base_scope () :
+ owner_ == owner::prereq ? &prereq_->scope :
+ nullptr);
+
+
+ const variable* var (bs->var_pool ().find (name));
+ return var != nullptr ? find (*var) : end ();
+ }
+
bool variable_map::
erase (const variable& var)
{
- assert (!global_ || ctx->phase == run_phase::load);
+ assert (!shared_ || ctx->phase == run_phase::load);
return m_.erase (var) != 0;
}
+ variable_map::const_iterator variable_map::
+ erase (const_iterator i)
+ {
+ assert (!shared_ || ctx->phase == run_phase::load);
+
+ return const_iterator (m_.erase (i), *this);
+ }
+
// variable_pattern_map
//
variable_map& variable_pattern_map::
insert (pattern_type type, string&& text)
{
+ // Note that this variable map is special and we use context as its owner
+ // (see variable_map for details).
+ //
auto r (map_.emplace (pattern {type, false, move (text), {}},
- variable_map (ctx, global_)));
+ variable_map (ctx, shared_)));
// Compile the regex.
//
@@ -1963,10 +3322,16 @@ namespace build2
template struct LIBBUILD2_DEFEXPORT
value_traits<vector<pair<string, optional<bool>>>>;
+ template struct LIBBUILD2_DEFEXPORT value_traits<set<string>>;
+ template struct LIBBUILD2_DEFEXPORT value_traits<set<json_value>>;
+
template struct LIBBUILD2_DEFEXPORT
value_traits<map<string, string>>;
template struct LIBBUILD2_DEFEXPORT
+ value_traits<map<json_value, json_value>>;
+
+ template struct LIBBUILD2_DEFEXPORT
value_traits<map<string, optional<string>>>;
template struct LIBBUILD2_DEFEXPORT
diff --git a/libbuild2/variable.hxx b/libbuild2/variable.hxx
index 2bfab05..aed3350 100644
--- a/libbuild2/variable.hxx
+++ b/libbuild2/variable.hxx
@@ -4,7 +4,8 @@
#ifndef LIBBUILD2_VARIABLE_HXX
#define LIBBUILD2_VARIABLE_HXX
-#include <type_traits> // aligned_storage
+#include <cstddef> // max_align_t
+#include <type_traits> // is_*
#include <unordered_map>
#include <libbutl/prefix-map.hxx>
@@ -14,8 +15,11 @@
#include <libbuild2/forward.hxx>
#include <libbuild2/utility.hxx>
+#include <libbuild2/json.hxx>
+
#include <libbuild2/context.hxx>
#include <libbuild2/target-type.hxx>
+#include <libbuild2/diagnostics.hxx>
#include <libbuild2/export.hxx>
@@ -47,7 +51,11 @@ namespace build2
template <typename T> const value_type* is_a () const;
- // Element type, if this is a vector.
+ // True if the type is a container.
+ //
+ bool container;
+
+ // Element type, if this is a container and the element type is named.
//
const value_type* element_type;
@@ -74,9 +82,11 @@ namespace build2
void (*const prepend) (value&, names&&, const variable*);
// Reverse the value back to a vector of names. Storage can be used by the
- // implementation if necessary. Cannot be NULL.
+ // implementation if necessary. If reduce is true, then for an empty
+ // simple value return an empty list rather than a list of one empty name.
+ // Note that the value cannot be NULL.
//
- names_view (*const reverse) (const value&, names& storage);
+ names_view (*const reverse) (const value&, names& storage, bool reduce);
// Cast value::data_ storage to value type so that the result can be
// static_cast to const T*. If it is NULL, then cast data_ directly. Note
@@ -90,7 +100,33 @@ namespace build2
// If NULL, then the value is never empty.
//
+ // Note that this is "semantically empty", not necessarily
+ // "representationally empty". For example, an empty JSON array is
+ // semantically empty but its representation (`[]`) is not.
+ //
bool (*const empty) (const value&);
+
+ // Custom subscript function. If NULL, then the generic implementation is
+ // used.
+ //
+ // Note that val can be NULL. If val_data points to val, then it can be
+ // moved from. The sloc and bloc arguments are the subscript and brace
+ // locations, respectively.
+ //
+ // Note: should normally be consistent with iterate.
+ //
+ value (*/*const*/ subscript) (const value& val,
+ value* val_data,
+ value&& subscript,
+ const location& sloc,
+ const location& bloc);
+
+ // Custom iteration function. It should invoked the specified function for
+ // each element in order. If NULL, then the generic implementation is
+ // used. The passed value is never NULL.
+ //
+ void (*const iterate) (const value&,
+ const function<void (value&&, bool first)>&);
};
// The order of the enumerators is arranged so that their integral values
@@ -106,6 +142,10 @@ namespace build2
scope, // This scope (no outer scopes).
target, // Target and target type/pattern-specific.
prereq // Prerequisite-specific.
+
+ // Note: remember to update the visibility attribute parsing if adding any
+ // new values here. As well as the $builtin.visibility() function
+ // documentation.
};
// VC14 reports ambiguity but seems to work if we don't provide any.
@@ -145,13 +185,27 @@ namespace build2
return o << to_string (v);
}
- // variable
+ // A variable.
+ //
+ // A variable can be public, project-private, or script-private, which
+ // corresponds to the variable pool it belongs to (see variable_pool). The
+ // two variables from the same pool are considered the same if they have the
+ // same name. The variable access (public/private) rules are:
+ //
+ // - Qualified variable are by default public while unqualified -- private.
+ //
+ // - Private must have project or lesser visibility and not be overridable.
+ //
+ // - An unqualified public variable can only be pre-entered during the
+ // context construction (to make sure it is not entered as private).
//
- // The two variables are considered the same if they have the same name.
+ // - There is no scope-private variables in our model due to side-loading,
+ // target type/pattern-specific append, etc.
//
// Variables can be aliases of each other in which case they form a circular
// linked list (the aliases pointer for variable without any aliases points
- // to the variable itself).
+ // to the variable itself). This mechanism should only be used for variables
+ // of the same access (normally public).
//
// If the variable is overridden on the command line, then override is the
// linked list of the special override variables. Their names are derived
@@ -198,6 +252,7 @@ namespace build2
struct variable
{
string name;
+ const variable_pool* owner;
const variable* aliases; // Circular linked list.
const value_type* type; // If NULL, then not (yet) typed.
unique_ptr<const variable> overrides;
@@ -276,7 +331,13 @@ namespace build2
// Extra data that is associated with the value that can be used to store
// flags, etc. It is initialized to 0 and copied (but not assigned) from
// one value to another but is otherwise untouched (not even when the
- // value is reset to NULL).
+ // value is reset to NULL) unless it is part of variable_map::value_data,
+ // in which case it is reset to 0 on each modification (version
+ // increment; however, see reset_extra flag in variable_map::insert()).
+ //
+ // (The reset on each modification semantics is used to implement the
+ // default value distinction as currently done in the config module but
+ // later probably will be done for ?= and $origin()).
//
// Note: if deciding to use for something make sure it is not overlapping
// with an existing usage.
@@ -290,6 +351,10 @@ namespace build2
// Check in a type-independent way if the value is empty. The value must
// not be NULL.
//
+ // Note that this is "semantically empty", not necessarily
+ // "representationally empty". For example, an empty JSON array is
+ // semantically empty but its representation (`[]`) is not.
+ //
bool
empty () const;
@@ -327,9 +392,13 @@ namespace build2
value&
operator= (nullptr_t) {if (!null) reset (); return *this;}
- value (value&&);
+ // Note that we have the noexcept specification even though copy_ctor()
+ // could potentially throw (for example, for std::map).
+ //
+ value (value&&) noexcept;
+
explicit value (const value&);
- value& operator= (value&&);
+ value& operator= (value&&); // Note: can throw for untyped RHS.
value& operator= (const value&);
value& operator= (reference_wrapper<value>);
value& operator= (reference_wrapper<const value>);
@@ -338,8 +407,8 @@ namespace build2
//
public:
// Assign/append/prepend a typed value. For assign, LHS should be either
- // of the same type or untyped. For append, LHS should be either of the
- // same type or untyped and NULL.
+ // of the same type or untyped. For append/prepend, LHS should be either
+ // of the same type or untyped and NULL.
//
template <typename T> value& operator= (T);
template <typename T> value& operator+= (T);
@@ -388,8 +457,8 @@ namespace build2
// specialization below). Types that don't fit will have to be handled
// with an extra dynamic allocation.
//
- static constexpr size_t size_ = sizeof (name_pair);
- std::aligned_storage<size_>::type data_;
+ static constexpr size_t size_ = sizeof (name_pair);
+ alignas (std::max_align_t) unsigned char data_[size_];
// Make sure we have sufficient storage for untyped values.
//
@@ -429,38 +498,37 @@ namespace build2
template <typename T> T& cast (value&);
template <typename T> T&& cast (value&&);
template <typename T> const T& cast (const value&);
- template <typename T> const T& cast (const lookup&);
+ template <typename T> const T& cast (lookup);
// As above but returns NULL if the value is NULL (or not defined, in
// case of lookup).
//
template <typename T> T* cast_null (value&);
template <typename T> const T* cast_null (const value&);
- template <typename T> const T* cast_null (const lookup&);
+ template <typename T> const T* cast_null (lookup);
// As above but returns empty value if the value is NULL (or not defined, in
// case of lookup).
//
template <typename T> const T& cast_empty (const value&);
- template <typename T> const T& cast_empty (const lookup&);
+ template <typename T> const T& cast_empty (lookup);
// As above but returns the specified default if the value is NULL (or not
// defined, in case of lookup). Note that the return is by value, not by
// reference.
//
template <typename T> T cast_default (const value&, const T&);
- template <typename T> T cast_default (const lookup&, const T&);
+ template <typename T> T cast_default (lookup, const T&);
// As above but returns false/true if the value is NULL (or not defined,
// in case of lookup). Note that the template argument is only for
// documentation and should be bool (or semantically compatible).
//
template <typename T> T cast_false (const value&);
- template <typename T> T cast_false (const lookup&);
+ template <typename T> T cast_false (lookup);
template <typename T> T cast_true (const value&);
- template <typename T> T cast_true (const lookup&);
-
+ template <typename T> T cast_true (lookup);
// Assign value type to the value. The variable is optional and is only used
// for diagnostics.
@@ -473,20 +541,22 @@ namespace build2
typify_atomic (context&, value&, const value_type&, const variable*);
// Remove value type from the value reversing it to names. This is similar
- // to reverse() below except that it modifies the value itself.
+ // to reverse() below except that it modifies the value itself. Note that
+ // the reduce semantics applies to empty but not null.
//
- LIBBUILD2_SYMEXPORT void untypify (value&);
+ LIBBUILD2_SYMEXPORT void untypify (value&, bool reduce);
// Reverse the value back to names. The value should not be NULL and storage
- // should be empty.
+ // should be empty. If reduce is true, then for an empty simple value return
+ // an empty list rather than a list of one empty name.
//
vector_view<const name>
- reverse (const value&, names& storage);
+ reverse (const value&, names& storage, bool reduce);
vector_view<name>
- reverse (value&, names& storage);
+ reverse (value&, names& storage, bool reduce);
- // Variable lookup result, AKA, binding of a name to a value.
+ // Variable lookup result, AKA, binding of a variable to a value.
//
// A variable can be undefined, NULL, or contain a (potentially empty)
// value.
@@ -629,7 +699,7 @@ namespace build2
// case (container) if invalid_argument is thrown, the names are not
// guaranteed to be unchanged.
//
- //template <typename T> T convert (names&&); (declaration causes ambiguity)
+ template <typename T> T convert (names&&);
// Convert value to T. If value is already of type T, then simply cast it.
// Otherwise call convert(names) above. If value is NULL, then throw
@@ -907,7 +977,7 @@ namespace build2
// pair of two empties).
//
// @@ Maybe we should redo this with optional<> to signify which half can
- // be missing?
+ // be missing? See also dump_value(json).
//
template <>
struct LIBBUILD2_SYMEXPORT value_traits<name_pair>
@@ -1115,12 +1185,35 @@ namespace build2
static const pair_vector_value_type<K, V> value_type;
};
+ // set<T>
+ //
+ template <typename T>
+ struct set_value_type;
+
+ template <typename T>
+ struct value_traits<set<T>>
+ {
+ static_assert (sizeof (set<T>) <= value::size_, "insufficient space");
+
+ static set<T> convert (names&&);
+ static void assign (value&, set<T>&&);
+ static void append (value&, set<T>&&);
+ static void prepend (value&, set<T>&&);
+ static bool empty (const set<T>& x) {return x.empty ();}
+
+ static const set<T> empty_instance;
+ static const set_value_type<T> value_type;
+ };
+
// map<K, V>
//
// Either K or V can be optional<T> making the key or value optional.
//
- // Note that append/+= is non-overriding (like insert()) while prepend/=+
- // is (like insert_or_assign()).
+ // Note that append/+= is overriding (like insert_or_assign()) while
+ // prepend/=+ is not (like insert()). In a sense, whatever appears last
+ // (from left to right) is kept, which is consistent with what we expect to
+ // happen when specifying the same key repeatedly in a representation (e.g.,
+ // a@0 a@1).
//
template <typename K, typename V>
struct map_value_type;
@@ -1141,12 +1234,116 @@ namespace build2
static const map_value_type<K, V> value_type;
};
+ // json
+ //
+ // Note that we do not expose json_member as a value type instead
+ // representing it as an object with one member. While we could expose
+ // member (and reverse it as a pair since there is no valid JSON
+ // representation for a standalone member), this doesn't seem to buy us much
+ // but will cause complications (for example, in supporting append/prepend).
+ // On the other hand, representing a member as an object only requires a bit
+ // of what looks like harmless looseness in a few contexts (such as the
+ // $json.member_*() functions).
+ //
+ // Note that similar to map, JSON object append/+= is overriding while
+ // prepend/=+ is not. In a sense, whatever appears last (from left to right)
+ // is kept, which is consistent with what we expect to happen when
+ // specifying the same name repeatedly (provided it's not considered
+ // invalid) in a representation (e.g., {"a":1,"a":2}).
+ //
+ template <>
+ struct LIBBUILD2_SYMEXPORT value_traits<json_value>
+ {
+ static_assert (sizeof (json_value) <= value::size_, "insufficient space");
+
+ static json_value convert (names&&);
+ static void assign (value&, json_value&&);
+ static void append (value&, json_value&&);
+ static void prepend (value&, json_value&&);
+ static bool empty (const json_value&); // null or empty array/object
+
+ // These are provided to make it possible to use json_value as a container
+ // element.
+ //
+ static json_value convert (name&&, name*);
+ static name reverse (const json_value&);
+ static int compare (const json_value& x, const json_value& y) {
+ return x.compare (y);}
+
+ static const json_value empty_instance; // null
+ static const char* const type_name;
+ static const build2::value_type value_type;
+ };
+
+ template <>
+ struct LIBBUILD2_SYMEXPORT value_traits<json_array>
+ {
+ static_assert (sizeof (json_array) <= value::size_, "insufficient space");
+
+ static json_array convert (names&&);
+ static void assign (value&, json_array&&);
+ static void append (value&, json_value&&); // Note: value, not array.
+ static void prepend (value&, json_value&&);
+ static bool empty (const json_array& v) {return v.array.empty ();}
+
+ static const json_array empty_instance; // empty array
+ static const char* const type_name;
+ static const build2::value_type value_type;
+ };
+
+ template <>
+ struct LIBBUILD2_SYMEXPORT value_traits<json_object>
+ {
+ static_assert (sizeof (json_object) <= value::size_, "insufficient space");
+
+ static json_object convert (names&&);
+ static void assign (value&, json_object&&);
+ static void append (value&, json_value&&); // Note: value, not object.
+ static void prepend (value&, json_value&&);
+ static bool empty (const json_object& v) {return v.object.empty ();}
+
+ static const json_object empty_instance; // empty object
+ static const char* const type_name;
+ static const build2::value_type value_type;
+ };
+
+ // Canned command line to be re-lexed (used in {Build,Test}scripts).
+ //
+ // Note that because the executable can be specific as a target or as
+ // process_path_ex, this is a list of names rather than a list of strings.
+ // Note also that unlike vector<name> this type allows name pairs.
+ //
+ struct cmdline: vector<name>
+ {
+ using vector<name>::vector;
+
+ cmdline () {} // For Clang.
+ };
+
+ template <>
+ struct LIBBUILD2_SYMEXPORT value_traits<cmdline>
+ {
+ static_assert (sizeof (cmdline) <= value::size_, "insufficient space");
+
+ static cmdline convert (names&&);
+ static void assign (value&, cmdline&&);
+ static void append (value&, cmdline&&);
+ static void prepend (value&, cmdline&&);
+ static bool empty (const cmdline& x) {return x.empty ();}
+
+ static const cmdline empty_instance;
+ static const char* const type_name;
+ static const build2::value_type value_type;
+ };
+
// Explicitly pre-instantiate and export value_traits templates for
// vector/map value types used in the build2 project. Note that this is not
// merely an optimization since not doing so we may end up with multiple
// value type objects for the same traits type (and we use their addressed
// as identity; see cast(const value&) for an example).
//
+ // NOTE: REMEMBER TO UPDATE dump_value(json) IF CHANGING ANYTHING HERE!
+ //
extern template struct LIBBUILD2_DECEXPORT value_traits<strings>;
extern template struct LIBBUILD2_DECEXPORT value_traits<vector<name>>;
extern template struct LIBBUILD2_DECEXPORT value_traits<paths>;
@@ -1166,10 +1363,16 @@ namespace build2
extern template struct LIBBUILD2_DECEXPORT
value_traits<vector<pair<string, optional<bool>>>>;
+ extern template struct LIBBUILD2_DECEXPORT value_traits<set<string>>;
+ extern template struct LIBBUILD2_DECEXPORT value_traits<set<json_value>>;
+
extern template struct LIBBUILD2_DECEXPORT
value_traits<map<string, string>>;
extern template struct LIBBUILD2_DECEXPORT
+ value_traits<map<json_value, json_value>>;
+
+ extern template struct LIBBUILD2_DECEXPORT
value_traits<map<string, optional<string>>>;
extern template struct LIBBUILD2_DECEXPORT
@@ -1196,9 +1399,12 @@ namespace build2
// Variable pool.
//
- // The global (as in, context-wide) version is protected by the phase mutex.
+ // The shared versions (as in, context or project-wide) are protected by the
+ // phase mutex and thus can only be modified during the load phase.
//
- class variable_pool
+ class variable_patterns;
+
+ class LIBBUILD2_SYMEXPORT variable_pool
{
public:
// Find existing (assert exists).
@@ -1218,7 +1424,7 @@ namespace build2
//
// Note also that a pattern and later insertions may restrict (but not
// relax) visibility and overridability.
-
+ //
const variable&
insert (string name)
{
@@ -1276,6 +1482,12 @@ namespace build2
}
const variable&
+ insert (string name, const value_type* type)
+ {
+ return insert (move (name), type, nullptr, nullptr).first;
+ }
+
+ const variable&
insert (string name,
const value_type* type,
bool overridable,
@@ -1296,70 +1508,74 @@ namespace build2
// Overridable aliased variables are most likely a bad idea: without a
// significant effort, the overrides will only be applied along the alias
// names (i.e., there would be no cross-alias overriding). So for now we
- // don't allow this (use the common variable mechanism instead).
+ // don't allow this (manually handle multiple names by merging their
+ // values instead).
+ //
+ // Note: currently only public variables can be aliased.
//
- LIBBUILD2_SYMEXPORT const variable&
+ const variable&
insert_alias (const variable& var, string name);
- // Insert a variable pattern. Any variable that matches this pattern will
- // have the specified type, visibility, and overridability. If match is
- // true, then individual insertions of the matching variable must match
- // the specified type/visibility/overridability. Otherwise, individual
- // insertions can provide alternative values and the pattern values are a
- // fallback (if you specify false you better be very clear about what you
- // are trying to achieve).
+ // Iteration.
//
- // The pattern must be in the form [<prefix>.](*|**)[.<suffix>] where '*'
- // matches single component stems (i.e., 'foo' but not 'foo.bar') and '**'
- // matches single and multi-component stems. Note that only multi-
- // component variables are considered for pattern matching (so just '*'
- // won't match anything).
+ public:
+ using key = butl::map_key<string>;
+ using map = std::unordered_map<key, variable>;
+
+ using const_iterator = butl::map_iterator_adapter<map::const_iterator>;
+
+ const_iterator begin () const {return const_iterator (map_.begin ());}
+ const_iterator end () const {return const_iterator (map_.end ());}
+
+ // Construction.
//
- // The patterns are matched in the more-specific-first order where the
- // pattern is considered more specific if it has a greater sum of its
- // prefix and suffix lengths. If the prefix and suffix are equal, then the
- // '*' pattern is considered more specific than '**'. If neither is more
- // specific, then they are matched in the reverse order of insertion.
+ // There are three specific variable pool instances:
//
- // If retro is true then a newly inserted pattern is also applied
- // retrospectively to all the existing variables that match but only
- // if no more specific pattern already exists (which is then assumed
- // to have been applied). So if you use this functionality, watch out
- // for the insertion order (you probably want more specific first).
+ // shared outer
+ // ----------------
+ // true null -- public variable pool in context
+ // true not null -- project-private pool in scope::root_extra
+ // with outer pointing to context::var_pool
+ // false not null -- temporary scope-private pool in temp_scope
+ // with outer pointing to context::var_pool
+ // false null -- script-private pool in script::environment
//
- public:
- LIBBUILD2_SYMEXPORT void
- insert_pattern (const string& pattern,
- optional<const value_type*> type,
- optional<bool> overridable,
- optional<variable_visibility>,
- bool retro = false,
- bool match = true);
+ // Notice that the script-private pool doesn't rely on outer and does
+ // its own pool chaining. So currently we assume that if outer is not
+ // NULL, then this is a project-private pool.
+ //
+ private:
+ friend class context;
+ friend class temp_scope;
- template <typename T>
- void
- insert_pattern (const string& p,
- optional<bool> overridable,
- optional<variable_visibility> v,
- bool retro = false,
- bool match = true)
- {
- insert_pattern (
- p, &value_traits<T>::value_type, overridable, v, retro, match);
- }
+ // Shared pool (public or project-private). The shared argument is
+ // flag/context.
+ //
+ variable_pool (context* shared,
+ variable_pool* outer,
+ const variable_patterns* patterns)
+ : shared_ (shared), outer_ (outer), patterns_ (patterns) {}
public:
- void
- clear () {map_.clear ();}
+ // Script-private pool.
+ //
+ explicit
+ variable_pool (const variable_patterns* patterns = nullptr)
+ : shared_ (nullptr), outer_ (nullptr), patterns_ (patterns) {}
- variable_pool (): variable_pool (nullptr) {}
+ variable_pool (variable_pool&&) = delete;
+ variable_pool& operator= (variable_pool&&) = delete;
- // RW access (only for the global pool).
+ variable_pool (const variable_pool&) = delete;
+ variable_pool& operator= (const variable_pool&) = delete;
+
+ public:
+ // RW access (only for shared pools plus the temp_scope special case).
//
variable_pool&
rw () const
{
- assert (global_->phase == run_phase::load);
+ assert (shared_ == nullptr || shared_->phase == run_phase::load);
return const_cast<variable_pool&> (*this);
}
@@ -1375,14 +1591,16 @@ namespace build2
// Note that in insert() NULL overridable is interpreted as false unless
// overridden by a pattern while in update() NULL overridable is ignored.
//
- LIBBUILD2_SYMEXPORT pair<variable&, bool>
+ pair<variable&, bool>
insert (string name,
const value_type*,
const variable_visibility*,
const bool* overridable,
bool pattern = true);
- LIBBUILD2_SYMEXPORT void
+ // Note: the variable must belong to this pool.
+ //
+ void
update (variable&,
const value_type*,
const variable_visibility*,
@@ -1391,9 +1609,6 @@ namespace build2
// Variable map.
//
private:
- using key = butl::map_key<string>;
- using map = std::unordered_map<key, variable>;
-
pair<map::iterator, bool>
insert (variable&& var)
{
@@ -1402,19 +1617,127 @@ namespace build2
// gets hairy very quickly (there is no std::hash for C-strings). So
// let's rely on small object-optimized std::string for now.
//
- string n (var.name);
+ string n (var.name); // @@ PERF (maybe keep reuse buffer at least?)
auto r (map_.insert (map::value_type (&n, move (var))));
if (r.second)
+ {
+#if 0
+ if (shared_ && outer_ == nullptr) // Global pool in context.
+ {
+ size_t n (map_.bucket_count ());
+ if (n > buckets_)
+ {
+ text << "variable_pool buckets: " << buckets_ << " -> " << n
+ << " (" << map_.size () << ")";
+ buckets_ = n;
+ }
+ }
+#endif
r.first->first.p = &r.first->second.name;
+ }
return r;
}
+ private:
+ friend class variable_patterns;
+
+ context* shared_;
+ variable_pool* outer_;
+ const variable_patterns* patterns_;
map map_;
- // Patterns.
+#if 0
+ size_t buckets_ = 0;
+#endif
+ };
+
+ // Variable patterns.
+ //
+ // This mechanism is used to assign variable types/visibility/overridability
+ // based on the variable name pattern. This mechanism can only be used for
+ // qualified variables and is thus only provided for the public variable
+ // pool.
+ //
+ // Similar to variable_pool, the shared versions are protected by the phase
+ // mutex and thus can only be modified during the load phase.
+ //
+ class LIBBUILD2_SYMEXPORT variable_patterns
+ {
+ public:
+ // Insert a variable pattern. Any variable that matches this pattern will
+ // have the specified type, visibility, and overridability. If match is
+ // true, then individual insertions of the matching variable must match
+ // the specified type/visibility/overridability. Otherwise, individual
+ // insertions can provide alternative values and the pattern values are a
+ // fallback (if you specify false you better be very clear about what you
+ // are trying to achieve).
+ //
+ // The pattern must be in the form [<prefix>.](*|**)[.<suffix>] where '*'
+ // matches single component stems (i.e., 'foo' but not 'foo.bar') and '**'
+ // matches single and multi-component stems. Note that only multi-
+ // component variables are considered for pattern matching (so just '*'
+ // won't match anything).
+ //
+ // The patterns are matched in the more-specific-first order where the
+ // pattern is considered more specific if it has a greater sum of its
+ // prefix and suffix lengths. If the prefix and suffix are equal, then the
+ // '*' pattern is considered more specific than '**'. If neither is more
+ // specific, then they are matched in the reverse order of insertion.
+ //
+ // If retro is true then a newly inserted pattern is also applied
+ // retrospectively to all the existing variables that match but only
+ // if no more specific pattern already exists (which is then assumed
+ // to have been applied). So if you use this functionality, watch out
+ // for the insertion order (you probably want more specific first).
+ //
+ void
+ insert (const string& pattern,
+ optional<const value_type*> type,
+ optional<bool> overridable,
+ optional<variable_visibility>,
+ bool retro = false,
+ bool match = true);
+
+ template <typename T>
+ void
+ insert (const string& p,
+ optional<bool> overridable,
+ optional<variable_visibility> v,
+ bool retro = false,
+ bool match = true)
+ {
+ insert (p, &value_traits<T>::value_type, overridable, v, retro, match);
+ }
+
+ public:
+ // The shared argument is flag/context. The pool argument is for
+ // retrospective pattern application.
//
+ explicit
+ variable_patterns (context* shared, variable_pool* pool)
+ : shared_ (shared), pool_ (pool) {}
+
+ variable_patterns (variable_patterns&&) = delete;
+ variable_patterns& operator= (variable_patterns&&) = delete;
+
+ variable_patterns (const variable_patterns&) = delete;
+ variable_patterns& operator= (const variable_patterns&) = delete;
+
+ public:
+ // RW access (only for shared pools).
+ //
+ variable_patterns&
+ rw () const
+ {
+ assert (shared_->phase == run_phase::load);
+ return const_cast<variable_patterns&> (*this);
+ }
+
+ variable_patterns&
+ rw (scope&) const {return const_cast<variable_patterns&> (*this);}
+
public:
struct pattern
{
@@ -1442,17 +1765,11 @@ namespace build2
};
private:
- multiset<pattern> patterns_;
-
- // Global pool flag/context.
- //
- private:
- friend class context;
-
- explicit
- variable_pool (context* global): global_ (global) {}
+ friend class variable_pool;
- context* global_;
+ context* shared_;
+ variable_pool* pool_;
+ multiset<pattern> patterns_;
};
}
@@ -1493,7 +1810,10 @@ namespace build2
using value::value;
using value::operator=;
- size_t version = 0; // Incremented on each modification (variable_cache).
+ // Incremented on each modification, at which point we also reset
+ // value::extra to 0.
+ //
+ size_t version = 0;
};
// Note that we guarantee ascending iteration order (e.g., for predictable
@@ -1535,8 +1855,13 @@ namespace build2
lookup_type
operator[] (const variable& var) const
{
- auto p (lookup (var));
- return lookup_type (p.first, &p.second, this);
+ lookup_type r;
+ if (!empty ())
+ {
+ auto p (lookup (var));
+ r = lookup_type (p.first, &p.second, this);
+ }
+ return r;
}
lookup_type
@@ -1549,12 +1874,17 @@ namespace build2
lookup_type
operator[] (const string& name) const
{
- const variable* var (ctx != nullptr
- ? ctx->var_pool.find (name)
- : nullptr);
- return var != nullptr ? operator[] (*var) : lookup_type ();
+ assert (owner_ != owner::context);
+
+ lookup_type r;
+ if (!empty ())
+ r = lookup (name);
+ return r;
}
+ lookup_type
+ lookup (const string& name) const;
+
// If typed is false, leave the value untyped even if the variable is. If
// aliased is false, then don't consider aliases (used by the variable
// override machinery where the aliases chain is repurrposed for something
@@ -1581,7 +1911,9 @@ namespace build2
// insert anything.
//
return lookup_namespace (variable {
- move (ns), nullptr, nullptr, nullptr, variable_visibility::project});
+ move (ns),
+ nullptr, nullptr, nullptr, nullptr,
+ variable_visibility::project});
}
// Convert a lookup pointing to a value belonging to this variable map
@@ -1593,6 +1925,7 @@ namespace build2
{
assert (l.vars == this);
value& r (const_cast<value&> (*l.value));
+ r.extra = 0;
static_cast<value_data&> (r).version++;
return r;
}
@@ -1609,24 +1942,37 @@ namespace build2
return assign (*var);
}
- // Note that the variable is expected to have already been registered.
+ // Note that the variable is expected to have already been inserted.
//
value&
- assign (const string& name) {return insert (ctx->var_pool[name]).first;}
+ assign (const string& name);
// As above but also return an indication of whether the new value (which
// will be NULL) was actually inserted. Similar to find(), if typed is
- // false, leave the value untyped even if the variable is.
+ // false, leave the value untyped even if the variable is. If reset_extra
+ // is false, then don't reset the existing value's value::extra.
//
pair<value&, bool>
- insert (const variable&, bool typed = true);
+ insert (const variable&, bool typed = true, bool reset_extra = true);
- // Note: does not deal with aliases.
+ // Note: the following functions do not deal with aliases.
//
+ const_iterator
+ find (const variable& var) const
+ {
+ return const_iterator (m_.find (var), *this);
+ }
+
+ const_iterator
+ find (const string& name) const;
+
bool
erase (const variable&);
const_iterator
+ erase (const_iterator);
+
+ const_iterator
begin () const {return const_iterator (m_.begin (), *this);}
const_iterator
@@ -1639,21 +1985,58 @@ namespace build2
size () const {return m_.size ();}
public:
- // Global should be true if this map is part of the global build state
- // (e.g., scopes, etc).
+ // Shared should be true if this map is part of the shared build state
+ // (e.g., scopes) and thus should only be modified during the load phase.
//
explicit
- variable_map (context& c, bool global = false)
- : ctx (&c), global_ (global) {}
+ variable_map (const scope& owner, bool shared = false);
+
+ explicit
+ variable_map (const target& owner, bool shared = false);
+
+ explicit
+ variable_map (const prerequisite& owner, bool shared = false);
+
+ variable_map (variable_map&&, const prerequisite&, bool shared = false);
+ variable_map (const variable_map&, const prerequisite&, bool shared = false);
+
+ variable_map&
+ operator= (variable_map&& v) noexcept {m_ = move (v.m_); return *this;}
+
+ variable_map&
+ operator= (const variable_map& v) {m_ = v.m_; return *this;}
+
+ // The context owner is for special "managed" variable maps. Note that
+ // such maps cannot lookup/insert variable names specified as strings.
+ //
+ variable_map (context& c, bool shared)
+ : shared_ (shared), owner_ (owner::context), ctx (&c) {}
+
+ // Note: std::map's move constructor can throw.
+ //
+ variable_map (variable_map&& v)
+ : shared_ (v.shared_), owner_ (v.owner_), ctx (v.ctx), m_ (move (v.m_))
+ {
+ assert (owner_ == owner::context);
+ }
+
+ variable_map (const variable_map& v)
+ : shared_ (v.shared_), owner_ (v.owner_), ctx (v.ctx), m_ (v.m_)
+ {
+ assert (v.owner_ == owner::context);
+ }
void
clear () {m_.clear ();}
- // Implementation details (only used for empty_variable_map).
+ // Implementation details.
//
public:
+ enum class owner {empty, context, scope, target, prereq};
+
explicit
- variable_map (context* c): ctx (c) {}
+ variable_map (owner o, context* c = nullptr, bool shared = false)
+ : shared_ (shared), owner_ (o), ctx (c) {}
private:
friend class variable_type_map;
@@ -1662,9 +2045,18 @@ namespace build2
typify (const value_data&, const variable&) const;
private:
+ friend class target_set;
+
+ bool shared_;
+ owner owner_;
+ union
+ {
+ const scope* scope_;
+ const target* target_;
+ const prerequisite* prereq_;
+ };
context* ctx;
map_type m_;
- bool global_;
};
LIBBUILD2_SYMEXPORT extern const variable_map empty_variable_map;
@@ -1797,8 +2189,8 @@ namespace build2
using const_iterator = map_type::const_iterator;
using const_reverse_iterator = map_type::const_reverse_iterator;
- variable_pattern_map (context& c, bool global)
- : ctx (c), global_ (global) {}
+ variable_pattern_map (context& c, bool shared)
+ : ctx (c), shared_ (shared) {}
// Note that here we assume the "outer" pattern format (delimiters, flags,
// etc) is valid.
@@ -1814,7 +2206,7 @@ namespace build2
operator[] (string text)
{
return map_.emplace (pattern {pattern_type::path, false, move (text), {}},
- variable_map (ctx, global_)).first->second;
+ variable_map (ctx, shared_)).first->second;
}
const_iterator begin () const {return map_.begin ();}
@@ -1826,7 +2218,7 @@ namespace build2
private:
context& ctx;
map_type map_;
- bool global_;
+ bool shared_;
};
class LIBBUILD2_SYMEXPORT variable_type_map
@@ -1836,13 +2228,13 @@ namespace build2
variable_pattern_map>;
using const_iterator = map_type::const_iterator;
- variable_type_map (context& c, bool global): ctx (c), global_ (global) {}
+ variable_type_map (context& c, bool shared): ctx (c), shared_ (shared) {}
variable_pattern_map&
operator[] (const target_type& t)
{
return map_.emplace (
- t, variable_pattern_map (ctx, global_)).first->second;
+ t, variable_pattern_map (ctx, shared_)).first->second;
}
const_iterator begin () const {return map_.begin ();}
@@ -1872,7 +2264,7 @@ namespace build2
private:
context& ctx;
map_type map_;
- bool global_;
+ bool shared_;
};
}
diff --git a/libbuild2/variable.ixx b/libbuild2/variable.ixx
index a84c012..ca84a33 100644
--- a/libbuild2/variable.ixx
+++ b/libbuild2/variable.ixx
@@ -224,7 +224,7 @@ namespace build2
template <typename T>
inline const T&
- cast (const lookup& l)
+ cast (lookup l)
{
return cast<T> (*l);
}
@@ -245,7 +245,7 @@ namespace build2
template <typename T>
inline const T*
- cast_null (const lookup& l)
+ cast_null (lookup l)
{
return l ? &cast<T> (*l) : nullptr;
}
@@ -259,7 +259,7 @@ namespace build2
template <typename T>
inline const T&
- cast_empty (const lookup& l)
+ cast_empty (lookup l)
{
return l ? cast<T> (l) : value_traits<T>::empty_instance;
}
@@ -273,7 +273,7 @@ namespace build2
template <typename T>
inline T
- cast_default (const lookup& l, const T& d)
+ cast_default (lookup l, const T& d)
{
return l ? cast<T> (l) : d;
}
@@ -287,7 +287,7 @@ namespace build2
template <typename T>
inline T
- cast_false (const lookup& l)
+ cast_false (lookup l)
{
return l && cast<T> (l);
}
@@ -301,7 +301,7 @@ namespace build2
template <typename T>
inline T
- cast_true (const lookup& l)
+ cast_true (lookup l)
{
return !l || cast<T> (l);
}
@@ -326,18 +326,21 @@ namespace build2
}
inline vector_view<const name>
- reverse (const value& v, names& storage)
+ reverse (const value& v, names& storage, bool reduce)
{
assert (v &&
storage.empty () &&
(v.type == nullptr || v.type->reverse != nullptr));
- return v.type == nullptr ? v.as<names> () : v.type->reverse (v, storage);
+
+ return v.type == nullptr
+ ? v.as<names> ()
+ : v.type->reverse (v, storage, reduce);
}
inline vector_view<name>
- reverse (value& v, names& storage)
+ reverse (value& v, names& storage, bool reduce)
{
- names_view cv (reverse (static_cast<const value&> (v), storage));
+ names_view cv (reverse (static_cast<const value&> (v), storage, reduce));
return vector_view<name> (const_cast<name*> (cv.data ()), cv.size ());
}
@@ -359,13 +362,53 @@ namespace build2
// This one will be SFINAE'd out unless T is a container.
//
+ // If T is both (e.g., json_value), then make this version preferable.
+ //
template <typename T>
inline auto
- convert (names&& ns) -> decltype (value_traits<T>::convert (move (ns)))
+ convert_impl (names&& ns, int)
+ -> decltype (value_traits<T>::convert (move (ns)))
{
return value_traits<T>::convert (move (ns));
}
+ // This one will be SFINAE'd out unless T is a simple value.
+ //
+ // If T is both (e.g., json_value), then make this version less preferable.
+ //
+ template <typename T>
+ auto // NOTE: not inline!
+ convert_impl (names&& ns, ...) ->
+ decltype (value_traits<T>::convert (move (ns[0]), nullptr))
+ {
+ size_t n (ns.size ());
+
+ if (n == 0)
+ {
+ if (value_traits<T>::empty_value)
+ return T ();
+ }
+ else if (n == 1)
+ {
+ return convert<T> (move (ns[0]));
+ }
+ else if (n == 2 && ns[0].pair != '\0')
+ {
+ return convert<T> (move (ns[0]), move (ns[1]));
+ }
+
+ throw invalid_argument (
+ string ("invalid ") + value_traits<T>::type_name +
+ (n == 0 ? " value: empty" : " value: multiple names"));
+ }
+
+ template <typename T>
+ inline T
+ convert (names&& ns)
+ {
+ return convert_impl<T> (move (ns), 0);
+ }
+
// bool value
//
inline void value_traits<bool>::
@@ -850,6 +893,44 @@ namespace build2
new (&v.data_) vector<pair<K, V>> (move (x));
}
+ // set<T> value
+ //
+ template <typename T>
+ inline void value_traits<set<T>>::
+ assign (value& v, set<T>&& x)
+ {
+ if (v)
+ v.as<set<T>> () = move (x);
+ else
+ new (&v.data_) set<T> (move (x));
+ }
+
+ template <typename T>
+ inline void value_traits<set<T>>::
+ append (value& v, set<T>&& x)
+ {
+ if (v)
+ {
+ set<T>& p (v.as<set<T>> ());
+
+ if (p.empty ())
+ p.swap (x);
+ else
+ // Keys (being const) can only be copied.
+ //
+ p.insert (x.begin (), x.end ());
+ }
+ else
+ new (&v.data_) set<T> (move (x));
+ }
+
+ template <typename T>
+ inline void value_traits<set<T>>::
+ prepend (value& v, set<T>&& x)
+ {
+ append (v, move (x));
+ }
+
// map<K, V> value
//
template <typename K, typename V>
@@ -903,21 +984,141 @@ namespace build2
new (&v.data_) map<K, V> (move (x));
}
- // variable_pool
+ // json
//
- inline const variable& variable_pool::
- operator[] (const string& n) const
+ inline bool value_traits<json_value>::
+ empty (const json_value& v)
{
- const variable* r (find (n));
- assert (r != nullptr);
- return *r;
+ // Note: should be consistent with $json.size().
+ //
+ switch (v.type)
+ {
+ case json_type::null: return true;
+ case json_type::boolean:
+ case json_type::signed_number:
+ case json_type::unsigned_number:
+ case json_type::hexadecimal_number:
+ case json_type::string: break;
+ case json_type::array: return v.array.empty ();
+ case json_type::object: return v.object.empty ();
+ }
+
+ return false;
}
+ inline void value_traits<json_value>::
+ assign (value& v, json_value&& x)
+ {
+ if (v)
+ v.as<json_value> () = move (x);
+ else
+ new (&v.data_) json_value (move (x));
+ }
+
+ inline void value_traits<json_value>::
+ append (value& v, json_value&& x)
+ {
+ if (v)
+ v.as<json_value> ().append (move (x));
+ else
+ new (&v.data_) json_value (move (x));
+ }
+
+ inline void value_traits<json_value>::
+ prepend (value& v, json_value&& x)
+ {
+ if (v)
+ v.as<json_value> ().prepend (move (x));
+ else
+ new (&v.data_) json_value (move (x));
+ }
+
+ // json_array
+ //
+ inline void value_traits<json_array>::
+ assign (value& v, json_array&& x)
+ {
+ if (v)
+ v.as<json_array> () = move (x);
+ else
+ new (&v.data_) json_array (move (x));
+ }
+
+ inline void value_traits<json_array>::
+ append (value& v, json_value&& x)
+ {
+ if (!v)
+ new (&v.data_) json_array ();
+
+ v.as<json_array> ().append (move (x));
+ }
+
+ inline void value_traits<json_array>::
+ prepend (value& v, json_value&& x)
+ {
+ if (!v)
+ new (&v.data_) json_array ();
+
+ v.as<json_array> ().prepend (move (x));
+ }
+
+ // json_object
+ //
+ inline void value_traits<json_object>::
+ assign (value& v, json_object&& x)
+ {
+ if (v)
+ v.as<json_object> () = move (x);
+ else
+ new (&v.data_) json_object (move (x));
+ }
+
+ inline void value_traits<json_object>::
+ append (value& v, json_value&& x)
+ {
+ if (!v)
+ new (&v.data_) json_object ();
+
+ v.as<json_object> ().append (move (x));
+ }
+
+ inline void value_traits<json_object>::
+ prepend (value& v, json_value&& x)
+ {
+ if (!v)
+ new (&v.data_) json_object ();
+
+ v.as<json_object> ().prepend (move (x));
+ }
+
+ // variable_pool
+ //
inline const variable* variable_pool::
find (const string& n) const
{
+ // The pool chaining semantics for lookup: first check own pool then, if
+ // not found, check the outer pool.
+ //
auto i (map_.find (&n));
- return i != map_.end () ? &i->second : nullptr;
+ if (i != map_.end ())
+ return &i->second;
+
+ if (outer_ != nullptr)
+ {
+ i = outer_->map_.find (&n);
+ if (i != outer_->map_.end ())
+ return &i->second;
+ }
+
+ return nullptr;
+ }
+
+ inline const variable& variable_pool::
+ operator[] (const string& n) const
+ {
+ const variable* r (find (n));
+ assert (r != nullptr);
+ return *r;
}
// variable_map
diff --git a/libbuild2/variable.txx b/libbuild2/variable.txx
index b1c4112..0b831e9 100644
--- a/libbuild2/variable.txx
+++ b/libbuild2/variable.txx
@@ -27,34 +27,6 @@ namespace build2
return false;
}
- // This one will be SFINAE'd out unless T is a simple value.
- //
- template <typename T>
- auto
- convert (names&& ns) ->
- decltype (value_traits<T>::convert (move (ns[0]), nullptr))
- {
- size_t n (ns.size ());
-
- if (n == 0)
- {
- if (value_traits<T>::empty_value)
- return T ();
- }
- else if (n == 1)
- {
- return convert<T> (move (ns[0]));
- }
- else if (n == 2 && ns[0].pair != '\0')
- {
- return convert<T> (move (ns[0]), move (ns[1]));
- }
-
- throw invalid_argument (
- string ("invalid ") + value_traits<T>::type_name +
- (n == 0 ? " value: empty" : " value: multiple names"));
- }
-
[[noreturn]] LIBBUILD2_SYMEXPORT void
convert_throw (const value_type* from, const value_type& to);
@@ -229,13 +201,13 @@ namespace build2
template <typename T>
names_view
- simple_reverse (const value& v, names& s)
+ simple_reverse (const value& v, names& s, bool reduce)
{
const T& x (v.as<T> ());
- // Represent an empty simple value as empty name sequence rather than
- // a single empty name. This way, for example, during serialization we
- // end up with a much saner looking:
+ // Unless requested otherwise, represent an empty simple value as empty
+ // name sequence rather than a single empty name. This way, for example,
+ // during serialization we end up with a much saner looking:
//
// config.import.foo =
//
@@ -245,6 +217,8 @@ namespace build2
//
if (!value_traits<T>::empty (x))
s.emplace_back (value_traits<T>::reverse (x));
+ else if (!reduce)
+ s.push_back (name ());
return s;
}
@@ -477,6 +451,7 @@ namespace build2
convert (names&& ns)
{
vector<T> v;
+ v.reserve (ns.size ()); // Normally there won't be any pairs.
// Similar to vector_append() below except we throw instead of issuing
// diagnostics.
@@ -492,7 +467,7 @@ namespace build2
if (n.pair != '@')
throw invalid_argument (
- string ("invalid pair character: '") + n.pair + "'");
+ string ("invalid pair character: '") + n.pair + '\'');
}
v.push_back (value_traits<T>::convert (move (n), r));
@@ -509,6 +484,8 @@ namespace build2
? v.as<vector<T>> ()
: *new (&v.data_) vector<T> ());
+ p.reserve (p.size () + ns.size ()); // Normally there won't be any pairs.
+
// Convert each element to T while merging pairs.
//
for (auto i (ns.begin ()); i != ns.end (); ++i)
@@ -589,8 +566,8 @@ namespace build2
}
template <typename T>
- static names_view
- vector_reverse (const value& v, names& s)
+ names_view
+ vector_reverse (const value& v, names& s, bool)
{
auto& vv (v.as<vector<T>> ());
s.reserve (vv.size ());
@@ -602,7 +579,7 @@ namespace build2
}
template <typename T>
- static int
+ int
vector_compare (const value& l, const value& r)
{
auto& lv (l.as<vector<T>> ());
@@ -624,6 +601,68 @@ namespace build2
return 0;
}
+ // Provide subscript for vector<T> for efficiency.
+ //
+ template <typename T>
+ value
+ vector_subscript (const value& val, value* val_data,
+ value&& sub,
+ const location& sloc,
+ const location& bloc)
+ {
+ // Process subscript even if the value is null to make sure it is valid.
+ //
+ size_t i;
+ try
+ {
+ i = static_cast<size_t> (convert<uint64_t> (move (sub)));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (sloc) << "invalid " << value_traits<vector<T>>::value_type.name
+ << " value subscript: " << e <<
+ info (bloc) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern";
+ }
+
+ value r;
+ if (!val.null)
+ {
+ const auto& v (val.as<vector<T>> ());
+ if (i < v.size ())
+ {
+ const T& e (v[i]);
+
+ // Steal the value if possible.
+ //
+ r = &val == val_data ? T (move (const_cast<T&> (e))) : T (e);
+ }
+ }
+
+ // Typify null values so that type-specific subscript (e.g., for
+ // json_value) gets called for chained subscripts.
+ //
+ if (r.null)
+ r.type = &value_traits<T>::value_type;
+
+ return r;
+ }
+
+ // Provide iterate for vector<T> for efficiency.
+ //
+ template <typename T>
+ void
+ vector_iterate (const value& val,
+ const function<void (value&&, bool first)>& f)
+ {
+ const auto& v (val.as<vector<T>> ()); // Never NULL.
+
+ for (auto b (v.begin ()), i (b), e (v.end ()); i != e; ++i)
+ {
+ f (value (*i), i == b);
+ }
+ }
+
// Make sure these are static-initialized together. Failed that VC will make
// sure it's done in the wrong order.
//
@@ -635,6 +674,8 @@ namespace build2
vector_value_type (value_type&& v)
: value_type (move (v))
{
+ // Note: vector<T> always has a convenience alias.
+ //
type_name = value_traits<T>::type_name;
type_name += 's';
name = type_name.c_str ();
@@ -651,7 +692,8 @@ namespace build2
nullptr, // Patched above.
sizeof (vector<T>),
nullptr, // No base.
- &value_traits<T>::value_type,
+ true, // Container.
+ &value_traits<T>::value_type, // Element type.
&default_dtor<vector<T>>,
&default_copy_ctor<vector<T>>,
&default_copy_assign<vector<T>>,
@@ -661,7 +703,9 @@ namespace build2
&vector_reverse<T>,
nullptr, // No cast (cast data_ directly).
&vector_compare<T>,
- &default_empty<vector<T>>
+ &default_empty<vector<T>>,
+ &vector_subscript<T>,
+ &vector_iterate<T>
};
// vector<pair<K, V>> value
@@ -701,8 +745,8 @@ namespace build2
}
template <typename K, typename V>
- static names_view
- pair_vector_reverse (const value& v, names& s)
+ names_view
+ pair_vector_reverse (const value& v, names& s, bool)
{
auto& vv (v.as<vector<pair<K, V>>> ());
s.reserve (2 * vv.size ());
@@ -714,7 +758,7 @@ namespace build2
}
template <typename K, typename V>
- static int
+ int
pair_vector_compare (const value& l, const value& r)
{
auto& lv (l.as<vector<pair<K, V>>> ());
@@ -749,10 +793,13 @@ namespace build2
pair_vector_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = value_traits<K>::type_name;
- type_name += '_';
+ // vector<pair<K,V>>
+ //
+ type_name = "vector<pair<";
+ type_name += value_traits<K>::type_name;
+ type_name += ',';
type_name += value_traits<V>::type_name;
- type_name += "_pair_vector";
+ type_name += ">>";
name = type_name.c_str ();
}
};
@@ -768,10 +815,13 @@ namespace build2
pair_vector_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = value_traits<K>::type_name;
- type_name += "_optional_";
+ // vector<pair<K,optional<V>>>
+ //
+ type_name = "vector<pair<";
+ type_name += value_traits<K>::type_name;
+ type_name += ",optional<";
type_name += value_traits<V>::type_name;
- type_name += "_pair_vector";
+ type_name += ">>>";
name = type_name.c_str ();
}
};
@@ -784,11 +834,13 @@ namespace build2
pair_vector_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = "optional_";
+ // vector<pair<optional<K>,V>>
+ //
+ type_name = "vector<pair<optional<";
type_name += value_traits<K>::type_name;
- type_name += '_';
+ type_name += ">,";
type_name += value_traits<V>::type_name;
- type_name += "_pair_vector";
+ type_name += ">>";
name = type_name.c_str ();
}
};
@@ -803,7 +855,8 @@ namespace build2
nullptr, // Patched above.
sizeof (vector<pair<K, V>>),
nullptr, // No base.
- nullptr, // No element.
+ true, // Container.
+ nullptr, // No element (not named).
&default_dtor<vector<pair<K, V>>>,
&default_copy_ctor<vector<pair<K, V>>>,
&default_copy_assign<vector<pair<K, V>>>,
@@ -813,7 +866,244 @@ namespace build2
&pair_vector_reverse<K, V>,
nullptr, // No cast (cast data_ directly).
&pair_vector_compare<K, V>,
- &default_empty<vector<pair<K, V>>>
+ &default_empty<vector<pair<K, V>>>,
+ nullptr, // Subscript.
+ nullptr // Iterate.
+ };
+
+ // set<T> value
+ //
+ template <typename T>
+ set<T> value_traits<set<T>>::
+ convert (names&& ns)
+ {
+ set<T> s;
+
+ // Similar to set_append() below except we throw instead of issuing
+ // diagnostics.
+ //
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ name& n (*i);
+ name* r (nullptr);
+
+ if (n.pair)
+ {
+ r = &*++i;
+
+ if (n.pair != '@')
+ throw invalid_argument (
+ string ("invalid pair character: '") + n.pair + '\'');
+ }
+
+ s.insert (value_traits<T>::convert (move (n), r));
+ }
+
+ return s;
+ }
+
+ template <typename T>
+ void
+ set_append (value& v, names&& ns, const variable* var)
+ {
+ set<T>& s (v ? v.as<set<T>> () : *new (&v.data_) set<T> ());
+
+ // Convert each element to T while merging pairs.
+ //
+ for (auto i (ns.begin ()); i != ns.end (); ++i)
+ {
+ name& n (*i);
+ name* r (nullptr);
+
+ if (n.pair)
+ {
+ r = &*++i;
+
+ if (n.pair != '@')
+ {
+ diag_record dr (fail);
+
+ dr << "unexpected pair style for "
+ << value_traits<T>::value_type.name << " value "
+ << "'" << n << "'" << n.pair << "'" << *r << "'";
+
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+ }
+ }
+
+ try
+ {
+ s.insert (value_traits<T>::convert (move (n), r));
+ }
+ catch (const invalid_argument& e)
+ {
+ diag_record dr (fail);
+
+ dr << e;
+ if (var != nullptr)
+ dr << " in variable " << var->name;
+
+ dr << info << "while converting ";
+ if (n.pair)
+ dr << " element pair '" << n << "'@'" << *r << "'";
+ else
+ dr << " element '" << n << "'";
+ }
+ }
+ }
+
+ template <typename T>
+ void
+ set_assign (value& v, names&& ns, const variable* var)
+ {
+ if (v)
+ v.as<set<T>> ().clear ();
+
+ set_append<T> (v, move (ns), var);
+ }
+
+ template <typename T>
+ names_view
+ set_reverse (const value& v, names& s, bool)
+ {
+ auto& sv (v.as<set<T>> ());
+ s.reserve (sv.size ());
+
+ for (const T& x: sv)
+ s.push_back (value_traits<T>::reverse (x));
+
+ return s;
+ }
+
+ template <typename T>
+ int
+ set_compare (const value& l, const value& r)
+ {
+ auto& ls (l.as<set<T>> ());
+ auto& rs (r.as<set<T>> ());
+
+ auto li (ls.begin ()), le (ls.end ());
+ auto ri (rs.begin ()), re (rs.end ());
+
+ for (; li != le && ri != re; ++li, ++ri)
+ if (int r = value_traits<T>::compare (*li, *ri))
+ return r;
+
+ if (li == le && ri != re) // l shorter than r.
+ return -1;
+
+ if (ri == re && li != le) // r shorter than l.
+ return 1;
+
+ return 0;
+ }
+
+ // Map subscript to set::contains().
+ //
+ template <typename T>
+ value
+ set_subscript (const value& val, value*,
+ value&& sub,
+ const location& sloc,
+ const location& bloc)
+ {
+ // Process subscript even if the value is null to make sure it is valid.
+ //
+ T k;
+ try
+ {
+ k = convert<T> (move (sub));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (sloc) << "invalid " << value_traits<set<T>>::value_type.name
+ << " value subscript: " << e <<
+ info (bloc) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern";
+ }
+
+ bool r (false);
+ if (!val.null)
+ {
+ const auto& s (val.as<set<T>> ());
+ r = s.find (k) != s.end ();
+ }
+
+ return value (r);
+ }
+
+ // Provide iterate for set<T> for efficiency.
+ //
+ template <typename T>
+ void
+ set_iterate (const value& val,
+ const function<void (value&&, bool first)>& f)
+ {
+ const auto& v (val.as<set<T>> ()); // Never NULL.
+
+ for (auto b (v.begin ()), i (b), e (v.end ()); i != e; ++i)
+ {
+ f (value (*i), i == b);
+ }
+ }
+
+ // Make sure these are static-initialized together. Failed that VC will make
+ // sure it's done in the wrong order.
+ //
+ template <typename T>
+ struct set_value_type: value_type
+ {
+ string type_name;
+
+ set_value_type (value_type&& v)
+ : value_type (move (v))
+ {
+ // set<T>
+ //
+ type_name = "set<";
+ type_name += value_traits<T>::type_name;
+ type_name += '>';
+ name = type_name.c_str ();
+ }
+ };
+
+ // Convenience aliases for certain set<T> cases.
+ //
+ template <>
+ struct set_value_type<string>: value_type
+ {
+ set_value_type (value_type&& v)
+ : value_type (move (v))
+ {
+ name = "string_set";
+ }
+ };
+
+ template <typename T>
+ const set<T> value_traits<set<T>>::empty_instance;
+
+ template <typename T>
+ const set_value_type<T>
+ value_traits<set<T>>::value_type = build2::value_type // VC14 wants =.
+ {
+ nullptr, // Patched above.
+ sizeof (set<T>),
+ nullptr, // No base.
+ true, // Container.
+ &value_traits<T>::value_type, // Element type.
+ &default_dtor<set<T>>,
+ &default_copy_ctor<set<T>>,
+ &default_copy_assign<set<T>>,
+ &set_assign<T>,
+ &set_append<T>,
+ &set_append<T>, // Prepend the same as append.
+ &set_reverse<T>,
+ nullptr, // No cast (cast data_ directly).
+ &set_compare<T>,
+ &default_empty<set<T>>,
+ &set_subscript<T>,
+ &set_iterate<T>
};
// map<K, V> value
@@ -839,7 +1129,9 @@ namespace build2
"element",
var));
- p.emplace (move (v.first), move (v.second));
+ // Poor man's emplace_or_assign().
+ //
+ p.emplace (move (v.first), V ()).first->second = move (v.second);
}
}
@@ -864,9 +1156,7 @@ namespace build2
"element",
var));
- // Poor man's emplace_or_assign().
- //
- p.emplace (move (v.first), V ()).first->second = move (v.second);
+ p.emplace (move (v.first), move (v.second));
}
}
@@ -881,8 +1171,8 @@ namespace build2
}
template <typename K, typename V>
- static names_view
- map_reverse (const value& v, names& s)
+ names_view
+ map_reverse (const value& v, names& s, bool)
{
auto& vm (v.as<map<K, V>> ());
s.reserve (2 * vm.size ());
@@ -894,7 +1184,7 @@ namespace build2
}
template <typename K, typename V>
- static int
+ int
map_compare (const value& l, const value& r)
{
auto& lm (l.as<map<K, V>> ());
@@ -918,6 +1208,59 @@ namespace build2
return 0;
}
+ // Note that unlike json_value, we don't provide index support for maps.
+ // There are two reasons for this: Firstly, consider map<uint64_t,...>.
+ // Secondly, even something like map<string,...> may contain integers as
+ // keys (in JSON, there is a strong convention for object member names not
+ // to be integers). Instead, we provide the $keys() function which allows
+ // one to implement an index-based access with a bit of overhead, if needed.
+ //
+ template <typename K, typename V>
+ value
+ map_subscript (const value& val, value* val_data,
+ value&& sub,
+ const location& sloc,
+ const location& bloc)
+ {
+ // Process subscript even if the value is null to make sure it is valid.
+ //
+ K k;
+ try
+ {
+ k = convert<K> (move (sub));
+ }
+ catch (const invalid_argument& e)
+ {
+ fail (sloc) << "invalid " << value_traits<map<K, V>>::value_type.name
+ << " value subscript: " << e <<
+ info (bloc) << "use the '\\[' escape sequence if this is a "
+ << "wildcard pattern";
+ }
+
+ value r;
+ if (!val.null)
+ {
+ const auto& m (val.as<map<K, V>> ());
+ auto i (m.find (k));
+ if (i != m.end ())
+ {
+ // Steal the value if possible.
+ //
+ r = (&val == val_data
+ ? V (move (const_cast<V&> (i->second)))
+ : V (i->second));
+ }
+ }
+
+ // Typify null values so that type-specific subscript (e.g., for
+ // json_value) gets called for chained subscripts.
+ //
+ if (r.null)
+ r.type = &value_traits<V>::value_type;
+
+ return r;
+ }
+
// Make sure these are static-initialized together. Failed that VC will make
// sure it's done in the wrong order.
//
@@ -929,11 +1272,15 @@ namespace build2
map_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = value_traits<K>::type_name;
- type_name += '_';
+ // map<K,V>
+ //
+ type_name = "map<";
+ type_name += value_traits<K>::type_name;
+ type_name += ',';
type_name += value_traits<V>::type_name;
- type_name += "_map";
+ type_name += '>';
name = type_name.c_str ();
+ subscript = &map_subscript<K, V>;
}
};
@@ -948,11 +1295,15 @@ namespace build2
map_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = value_traits<K>::type_name;
- type_name += "_optional_";
+ // map<K,optional<V>>
+ //
+ type_name = "map<";
+ type_name += value_traits<K>::type_name;
+ type_name += ",optional<";
type_name += value_traits<V>::type_name;
- type_name += "_map";
+ type_name += ">>";
name = type_name.c_str ();
+ // @@ TODO: subscript
}
};
@@ -964,18 +1315,42 @@ namespace build2
map_value_type (value_type&& v)
: value_type (move (v))
{
- type_name = "optional_";
+ // map<optional<K>,V>
+ //
+ type_name = "map<optional<";
type_name += value_traits<K>::type_name;
- type_name += '_';
+ type_name += ">,";
type_name += value_traits<V>::type_name;
- type_name += "_map";
+ type_name += '>';
name = type_name.c_str ();
+ // @@ TODO: subscript
+ }
+ };
+
+ // Convenience aliases for certain map<T,T> cases.
+ //
+ template <>
+ struct map_value_type<string, string>: value_type
+ {
+ map_value_type (value_type&& v)
+ : value_type (move (v))
+ {
+ name = "string_map";
+ subscript = &map_subscript<string, string>;
}
};
template <typename K, typename V>
const map<K, V> value_traits<map<K, V>>::empty_instance;
+ // Note that custom iteration would be better (more efficient, return typed
+ // value), but we don't yet have pair<> as value type so we let the generic
+ // implementation return an untyped pair.
+ //
+ // BTW, one negative consequence of returning untyped pair is that
+ // $first()/$second() don't return types values either, which is quite
+ // unfortunate for something like json_map.
+ //
template <typename K, typename V>
const map_value_type<K, V>
value_traits<map<K, V>>::value_type = build2::value_type // VC14 wants =
@@ -983,7 +1358,8 @@ namespace build2
nullptr, // Patched above.
sizeof (map<K, V>),
nullptr, // No base.
- nullptr, // No element.
+ true, // Container.
+ nullptr, // No element (pair<> not a value type yet).
&default_dtor<map<K, V>>,
&default_copy_ctor<map<K, V>>,
&default_copy_assign<map<K, V>>,
@@ -993,7 +1369,9 @@ namespace build2
&map_reverse<K, V>,
nullptr, // No cast (cast data_ directly).
&map_compare<K, V>,
- &default_empty<map<K, V>>
+ &default_empty<map<K, V>>,
+ nullptr, // Subscript (patched in by map_value_type above).
+ nullptr // Iterate.
};
// variable_cache
@@ -1014,8 +1392,8 @@ namespace build2
: 0);
shared_mutex& m (
- ctx.mutexes.variable_cache[
- hash<variable_cache*> () (this) % ctx.mutexes.variable_cache_size]);
+ ctx.mutexes->variable_cache[
+ hash<variable_cache*> () (this) % ctx.mutexes->variable_cache_size]);
slock sl (m);
ulock ul (m, defer_lock);
@@ -1070,6 +1448,7 @@ namespace build2
e.stem_version = sver;
+ e.value.extra = 0; // For consistency (we don't really use it).
e.value.version++; // Value changed.
}
else
diff --git a/libbuild2/version/rule.cxx b/libbuild2/version/rule.cxx
index 1799666..65c1117 100644
--- a/libbuild2/version/rule.cxx
+++ b/libbuild2/version/rule.cxx
@@ -93,6 +93,12 @@ namespace build2
if (!fi)
l5 ([&]{trace << "no in file prerequisite for target " << t;});
+ // If we match, derive the file name early as recommended by the in
+ // rule.
+ //
+ if (fm && fi)
+ t.derive_path ();
+
return fm && fi;
}
@@ -115,6 +121,7 @@ namespace build2
const target& t,
const string& n,
optional<uint64_t> flags,
+ const substitution_map* smap,
const optional<string>& null) const
{
assert (!flags);
@@ -138,8 +145,7 @@ namespace build2
a,
t,
p == string::npos ? n : string (n, p + 1),
- nullopt,
- null);
+ nullopt, smap, null);
}
string pn (n, 0, p);
@@ -241,13 +247,13 @@ namespace build2
if (mav->snapshot ())
{
- r += (p ? "(" : "");
+ if (p) r += '(';
r += cmp (vm, " < ", mav->version) + " || (";
r += cmp (vm, " == ", mav->version) + " && ";
- r += cmp (sm, (mao ? " < " : " <= "), mav->snapshot_sn) + ")";
+ r += cmp (sm, (mao ? " < " : " <= "), mav->snapshot_sn) + ')';
- r += (p ? ")" : "");
+ if (p) r += ')';
}
else
r = cmp (vm, (mao ? " < " : " <= "), mav->version);
@@ -261,13 +267,13 @@ namespace build2
if (miv->snapshot ())
{
- r += (p ? "(" : "");
+ if (p) r += '(';
r += cmp (vm, " > ", miv->version) + " || (";
r += cmp (vm, " == ", miv->version) + " && ";
- r += cmp (sm, (mio ? " > " : " >= "), miv->snapshot_sn) + ")";
+ r += cmp (sm, (mio ? " > " : " >= "), miv->snapshot_sn) + ')';
- r += (p ? ")" : "");
+ if (p) r += ')';
}
else
r = cmp (vm, (mio ? " > " : " >= "), miv->version);
diff --git a/libbuild2/version/rule.hxx b/libbuild2/version/rule.hxx
index ba673e5..0bdc090 100644
--- a/libbuild2/version/rule.hxx
+++ b/libbuild2/version/rule.hxx
@@ -20,7 +20,7 @@ namespace build2
class in_rule: public in::rule
{
public:
- in_rule (): rule ("version.in 2", "version.in") {}
+ in_rule (): rule ("version.in 2", "version") {}
virtual bool
match (action, target&) const override;
@@ -34,6 +34,7 @@ namespace build2
const target&,
const string&,
optional<uint64_t>,
+ const substitution_map*,
const optional<string>&) const override;
};
diff --git a/libbuild2/version/snapshot-git.cxx b/libbuild2/version/snapshot-git.cxx
index 2ae3f5b..ab0224a 100644
--- a/libbuild2/version/snapshot-git.cxx
+++ b/libbuild2/version/snapshot-git.cxx
@@ -21,7 +21,7 @@ namespace build2
static global_cache<snapshot, dir_path> cache;
snapshot
- extract_snapshot_git (dir_path rep_root)
+ extract_snapshot_git (context& ctx, dir_path rep_root)
{
if (const snapshot* r = cache.find (rep_root))
return *r;
@@ -82,7 +82,11 @@ namespace build2
args[args_i + 1] = "--porcelain";
args[args_i + 2] = nullptr;
+ // @@ PERF: redo with custom stream reading code (then could also
+ // get rid of context).
+ //
r.committed = run<string> (
+ ctx,
3 /* verbosity */,
pp,
args,
@@ -108,7 +112,8 @@ namespace build2
// (reluctantly) assume that the only reason git cat-file fails is if
// there is no HEAD (that we equal with the "new repository" condition
// which is, strictly speaking, might not be the case either). So we
- // suppress any diagnostics, and handle non-zero exit code.
+ // suppress any diagnostics, and handle non-zero exit code (and so no
+ // diagnostics buffering is needed, plus we are in the load phase).
//
string data;
@@ -117,12 +122,12 @@ namespace build2
args[args_i + 2] = "HEAD";
args[args_i + 3] = nullptr;
- process pr (run_start (3 /* verbosity */,
+ process pr (run_start (3 /* verbosity */,
pp,
args,
- 0 /* stdin */,
- -1 /* stdout */,
- false /* error */));
+ 0 /* stdin */,
+ -1 /* stdout */,
+ 1 /* stderr (to stdout) */));
string l;
try
@@ -201,7 +206,7 @@ namespace build2
// that.
}
- if (run_finish_code (args, pr, l))
+ if (run_finish_code (args, pr, l, 2 /* verbosity */))
{
if (r.sn == 0)
fail << "unable to extract git commit id/date for " << rep_root;
diff --git a/libbuild2/version/snapshot.cxx b/libbuild2/version/snapshot.cxx
index d20e633..000bcba 100644
--- a/libbuild2/version/snapshot.cxx
+++ b/libbuild2/version/snapshot.cxx
@@ -12,7 +12,7 @@ namespace build2
namespace version
{
snapshot
- extract_snapshot_git (dir_path);
+ extract_snapshot_git (context&, dir_path);
static const path git (".git");
@@ -46,7 +46,7 @@ namespace build2
if (butl::entry_exists (d / git,
true /* follow_symlinks */,
true /* ignore_errors */))
- return extract_snapshot_git (move (d));
+ return extract_snapshot_git (rs.ctx, move (d));
}
return snapshot ();
diff --git a/manifest b/manifest
index 26b38ae..4781eef 100644
--- a/manifest
+++ b/manifest
@@ -1,6 +1,6 @@
: 1
name: build2
-version: 0.15.0-a.0.z
+version: 0.17.0-a.0.z
summary: build2 build system
license: MIT
topics: build system, build toolchain
@@ -12,11 +12,11 @@ doc-url: https://build2.org/doc.xhtml
src-url: https://git.build2.org/cgit/build2/tree/
email: users@build2.org
build-warning-email: builds@build2.org
-builds: host
+builds: all : &host
requires: c++14
-depends: * build2 >= 0.14.0-
-depends: * bpkg >= 0.14.0-
+depends: * build2 >= 0.16.0-
+depends: * bpkg >= 0.16.0-
# @@ DEP Should probably become conditional dependency.
#requires: ? cli ; Only required if changing .cli files.
-depends: libbutl [0.15.0-a.0.1 0.15.0-a.1)
-depends: libpkgconf [1.4.2 1.7.0-)
+depends: libbutl [0.17.0-a.0.1 0.17.0-a.1)
+depends: libpkg-config ~0.1.1
diff --git a/old-tests/variable/override/buildfile b/old-tests/variable/override/buildfile
index 2889f69..c0330cb 100644
--- a/old-tests/variable/override/buildfile
+++ b/old-tests/variable/override/buildfile
@@ -1,57 +1,61 @@
-if ($t != [null])
+if ($p.t != [null])
{
- [$t] v = [null]
+ [$p.t] p.v = [null]
}
-print "/ :" $(/: v)
+/:
-if ($a == as)
+print "/ :" $(/: p.v)
+
+if ($p.a == as)
{
- v = x
+ p.v = x
}
-elif ($a == ap)
+elif ($p.a == ap)
{
- v += s
+ p.v += s
}
-elif ($a == pr)
+elif ($p.a == pr)
{
- v =+ p
+ p.v =+ p
}
-print ". :" $v
+print ". :" $p.v
d/
{
- if ($d_a == as)
+ file{t}:
+
+ if ($p.d_a == as)
{
- v = x
+ p.v = x
}
- elif ($d_a == ap)
+ elif ($p.d_a == ap)
{
- v += s
+ p.v += s
}
- elif ($d_a == pr)
+ elif ($p.d_a == pr)
{
- v =+ p
+ p.v =+ p
}
- print "d :" $v
+ print "d :" $p.v
- if ($d_t_a == as)
+ if ($p.d_t_a == as)
{
- file{t}: v = x
+ file{t}: p.v = x
}
- elif ($d_t_a == ap)
+ elif ($p.d_t_a == ap)
{
- file{t}: v += s
+ file{t}: p.v += s
}
- elif ($d_t_a == pr)
+ elif ($p.d_t_a == pr)
{
- file{t}: v =+ p
+ file{t}: p.v =+ p
}
- print "d/t :" $(file{t}: v)
+ print "d/t :" $(file{t}: p.v)
}
include p/
diff --git a/old-tests/variable/override/p/buildfile b/old-tests/variable/override/p/buildfile
index 5b84925..8f4df28 100644
--- a/old-tests/variable/override/p/buildfile
+++ b/old-tests/variable/override/p/buildfile
@@ -1,49 +1,51 @@
-if ($p_a == as)
+if ($p.p_a == as)
{
- v = x
+ p.v = x
}
-elif ($p_a == ap)
+elif ($p.p_a == ap)
{
- v += s
+ p.v += s
}
-elif ($p_a == pr)
+elif ($p.p_a == pr)
{
- v =+ p
+ p.v =+ p
}
-print "p :" $v
+print "p :" $p.v
d/
{
- if ($p_d_a == as)
+ file{t}:
+
+ if ($p.p_d_a == as)
{
- v = x
+ p.v = x
}
- elif ($p_d_a == ap)
+ elif ($p.p_d_a == ap)
{
- v += s
+ p.v += s
}
- elif ($p_d_a == pr)
+ elif ($p.p_d_a == pr)
{
- v =+ p
+ p.v =+ p
}
- print "p/d :" $v
+ print "p/d :" $p.v
- if ($p_d_t_a == as)
+ if ($p.p_d_t_a == as)
{
- file{t}: v = x
+ file{t}: p.v = x
}
- elif ($p_d_t_a == ap)
+ elif ($p.p_d_t_a == ap)
{
- file{t}: v += s
+ file{t}: p.v += s
}
- elif ($p_d_t_a == pr)
+ elif ($p.p_d_t_a == pr)
{
- file{t}: v =+ p
+ file{t}: p.v =+ p
}
- print "p/d/t :" $(file{t}: v)
+ print "p/d/t :" $(file{t}: p.v)
}
./:
diff --git a/old-tests/variable/override/simple b/old-tests/variable/override/simple
index 899daa2..983401a 100644
--- a/old-tests/variable/override/simple
+++ b/old-tests/variable/override/simple
@@ -1,3 +1,3 @@
-print $foo
+print $p.foo
./:
diff --git a/old-tests/variable/override/test.sh b/old-tests/variable/override/test.sh
index 4675b7e..fe89c56 100755
--- a/old-tests/variable/override/test.sh
+++ b/old-tests/variable/override/test.sh
@@ -53,16 +53,16 @@ function test ()
fi
}
-fail "foo= [string] bar" # error: typed override of variable foo
-#fail "!foo=bar" "!foo=BAR" # error: multiple global overrides of variable foo
-#fail "foo=bar" "foo=BAR" # error: multiple project overrides of variable foo
-#fail "%foo=bar" "%foo=BAR" # error: multiple project overrides of variable foo
+fail "p.foo= [string] bar" # error: typed override of variable p.foo
+#fail "!p.foo=bar" "!p.foo=BAR" # error: multiple global overrides of variable p.foo
+#fail "p.foo=bar" "p.foo=BAR" # error: multiple project overrides of variable p.foo
+#fail "%p.foo=bar" "%p.foo=BAR" # error: multiple project overrides of variable p.foo
-test --buildfile simple foo=bar ./ ./ <<< "bar" # Multiple bootstraps of the same project.
+test --buildfile simple p.foo=bar ./ ./ <<< "bar" # Multiple bootstraps of the same project.
# Visibility/qualification.
#
-test !v=X <<EOF
+test !p.v=X <<EOF
/ : X
. : X
d : X
@@ -72,7 +72,7 @@ p/d : X
p/d/t : X
EOF
-test v=X <<EOF
+test p.v=X <<EOF
/ :
. : X
d : X
@@ -82,7 +82,7 @@ p/d : X
p/d/t : X
EOF
-test ./v=X <<EOF
+test ./p.v=X <<EOF
/ :
. : X
d : X
@@ -92,7 +92,7 @@ p/d : X
p/d/t : X
EOF
-test .../v=X <<EOF
+test .../p.v=X <<EOF
/ :
. : X
d : X
@@ -102,7 +102,7 @@ p/d : X
p/d/t : X
EOF
-test ./p/v=X <<EOF
+test ./p/p.v=X <<EOF
/ :
. :
d :
@@ -112,7 +112,7 @@ p/d : X
p/d/t : X
EOF
-test .../p/v=X <<EOF
+test .../p/p.v=X <<EOF
/ :
. :
d :
@@ -122,7 +122,7 @@ p/d : X
p/d/t : X
EOF
-test v=X --buildfile loader ./p/ <<EOF
+test p.v=X --buildfile loader ./p/ <<EOF
/ :
. : X
d : X
@@ -132,7 +132,7 @@ p/d : X
p/d/t : X
EOF
-test .../v=X --buildfile loader ./p/ <<EOF
+test .../p.v=X --buildfile loader ./p/ <<EOF
/ :
. :
d :
@@ -142,7 +142,7 @@ p/d : X
p/d/t : X
EOF
-test /v=X <<EOF
+test /p.v=X <<EOF
/ :
. : X
d : X
@@ -152,7 +152,7 @@ p/d : X
p/d/t : X
EOF
-test v=X p_a=as <<EOF
+test p.v=X p.p_a=as <<EOF
/ :
. : X
d : X
@@ -162,7 +162,7 @@ p/d : X
p/d/t : X
EOF
-test %v=X <<EOF
+test %p.v=X <<EOF
/ :
. : X
d : X
@@ -172,7 +172,7 @@ p/d : X
p/d/t : X
EOF
-test %v=X p_a=as <<EOF
+test %p.v=X p.p_a=as <<EOF
/ :
. : X
d : X
@@ -182,7 +182,7 @@ p/d : x
p/d/t : x
EOF
-test /v=X d_a=as p_d_a=as <<EOF
+test /p.v=X p.d_a=as p.p_d_a=as <<EOF
/ :
. : X
d : x
@@ -192,17 +192,17 @@ p/d : x
p/d/t : x
EOF
-test %v+=S %v=+P a=as <<EOF
+test %p.v+=S %p.v=+P p.a=as <<EOF
/ :
. : P x S
d : P x S
d/t : P x S
-p : P x S
-p/d : P x S
-p/d/t : P x S
+p : P S
+p/d : P S
+p/d/t : P S
EOF
-test %v+=S %v=+P a=as p_a=as <<EOF
+test %p.v+=S %p.v=+P p.a=as p.p_a=as <<EOF
/ :
. : P x S
d : P x S
@@ -214,7 +214,7 @@ EOF
# Append/Prepend in override.
#
-test v+=S <<EOF
+test p.v+=S <<EOF
/ :
. : S
d : S
@@ -224,17 +224,17 @@ p/d : S
p/d/t : S
EOF
-test v+=S a=as <<EOF
+test p.v+=S p.a=as <<EOF
/ :
. : x S
d : x S
d/t : x S
-p : x S
-p/d : x S
-p/d/t : x S
+p : S
+p/d : S
+p/d/t : S
EOF
-test %v=+P a=as p_a=as <<EOF
+test %p.v=+P p.a=as p.p_a=as <<EOF
/ :
. : P x
d : P x
@@ -244,7 +244,7 @@ p/d : x
p/d/t : x
EOF
-test %v+=S v=+P a=as p_a=as <<EOF
+test %p.v+=S p.v=+P p.a=as p.p_a=as <<EOF
/ :
. : P x S
d : P x S
@@ -256,7 +256,7 @@ EOF
# Append/Prepend in both.
#
-test v=X a=ap d_a=ap p_a=ap p_d_a=ap <<EOF
+test p.v=X p.a=ap p.d_a=ap p.p_a=ap p.p_d_a=ap <<EOF
/ :
. : X
d : X
@@ -266,52 +266,52 @@ p/d : X
p/d/t : X
EOF
-test v+=S v=+P a=as d_a=ap d_t_a=ap p_a=ap p_d_a=ap p_d_t_a=ap <<EOF
+test p.v+=S p.v=+P p.a=as p.d_a=ap p.d_t_a=ap p.p_a=ap p.p_d_a=ap p.p_d_t_a=ap <<EOF
/ :
. : P x S
d : P x s S
d/t : P x s s S
-p : P x s S
-p/d : P x s s S
-p/d/t : P x s s s S
+p : P s S
+p/d : P s s S
+p/d/t : P s s s S
EOF
# These ones are surprising. I guess the moral is we shouldn't do "blind"
# cross-project append/prepend.
#
-test %v=X a=as d_a=ap p_a=ap p_d_a=ap <<EOF
+test %p.v=X p.a=as p.d_a=ap p.p_a=ap p.p_d_a=ap <<EOF
/ :
. : X
d : X
d/t : X
-p : x s
-p/d : x s s
-p/d/t : x s s
+p : s
+p/d : s s
+p/d/t : s s
EOF
-test %v+=S a=as d_a=ap p_a=ap p_d_a=ap <<EOF
+test %p.v+=S p.a=as p.d_a=ap p.p_a=ap p.p_d_a=ap <<EOF
/ :
. : x S
d : x s S
d/t : x s S
-p : x s
-p/d : x s s
-p/d/t : x s s
+p : s
+p/d : s s
+p/d/t : s s
EOF
-test %v+=S a=as d_a=ap p_a=ap p_d_a=ap ./ p/ <<EOF
+test %p.v+=S p.a=as p.d_a=ap p.p_a=ap p.p_d_a=ap ./ p/ <<EOF
/ :
. : x S
d : x s S
d/t : x s S
-p : x s S
-p/d : x s s S
-p/d/t : x s s S
+p : s S
+p/d : s s S
+p/d/t : s s S
EOF
# Typed override.
#
-test v+=S v=+P t=string <<EOF
+test p.v+=S p.v=+P p.t=string <<EOF
/ :
. : PS
d : PS
@@ -321,12 +321,12 @@ p/d : PS
p/d/t : PS
EOF
-test v+=S v=+P t=string a=as d_a=ap d_t_a=ap p_a=ap p_d_a=ap p_d_t_a=ap <<EOF
+test p.v+=S p.v=+P p.t=string p.a=as p.d_a=ap p.d_t_a=ap p.p_a=ap p.p_d_a=ap p.p_d_t_a=ap <<EOF
/ :
. : PxS
d : PxsS
d/t : PxssS
-p : PxsS
-p/d : PxssS
-p/d/t : PxsssS
+p : PsS
+p/d : PssS
+p/d/t : PsssS
EOF
diff --git a/old-tests/variable/type-pattern-append/buildfile b/old-tests/variable/type-pattern-append/buildfile
index 348f70f..3077c32 100644
--- a/old-tests/variable/type-pattern-append/buildfile
+++ b/old-tests/variable/type-pattern-append/buildfile
@@ -1,3 +1,5 @@
+./ sub/:
+
# Typed append/prepend.
#
#dir{a*}: x += [bool] true
diff --git a/repositories.manifest b/repositories.manifest
index 6bacad3..81d0645 100644
--- a/repositories.manifest
+++ b/repositories.manifest
@@ -7,4 +7,4 @@ location: ../libbutl.git##HEAD
:
role: prerequisite
-location: https://git.build2.org/packaging/pkgconf/pkgconf.git##HEAD
+location: https://github.com/build2/libpkg-config.git##HEAD
diff --git a/tests/build/root.build b/tests/build/root.build
index 8f9a482..712e73c 100644
--- a/tests/build/root.build
+++ b/tests/build/root.build
@@ -14,9 +14,16 @@ if ($cxx.target.system == 'win32-msvc')
if ($cxx.class == 'msvc')
cxx.coptions += /wd4251 /wd4275 /wd4800
elif ($cxx.id == 'gcc')
+{
cxx.coptions += -Wno-maybe-uninitialized -Wno-free-nonheap-object \
-Wno-stringop-overread # libbutl
+ if ($cxx.version.major >= 13)
+ cxx.coptions += -Wno-dangling-reference
+}
+elif ($cxx.id.type == 'clang' && $cxx.version.major >= 15)
+ cxx.coptions += -Wno-unqualified-std-cast-call
+
# Setup the build system driver that we are testing (which may not be the same
# as our $build.path). We also need to disable importation using the built-in
# path.
diff --git a/tests/cc/libu/testscript b/tests/cc/libu/testscript
index 9db3406..b562157 100644
--- a/tests/cc/libu/testscript
+++ b/tests/cc/libu/testscript
@@ -2,7 +2,7 @@
# license : MIT; see accompanying LICENSE file
crosstest = false
-test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode)
.include ../../common.testscript
diff --git a/tests/cc/modules/common.testscript b/tests/cc/modules/common.testscript
index 1e94b36..9883e42 100644
--- a/tests/cc/modules/common.testscript
+++ b/tests/cc/modules/common.testscript
@@ -2,7 +2,7 @@
# license : MIT; see accompanying LICENSE file
crosstest = false
-test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode)
.include ../../common.testscript
@@ -21,7 +21,15 @@ cxx.std = experimental
cxx.features.symexport = true
# @@ TMP revise
-if ($cxx.id == 'gcc')
+#
+# Note: there are some issues with enabling modules in Apple Clang 15 so
+# for now we only test vanilla Clang.
+#
+if (($cxx.id == 'gcc' && $cxx.version.major >= 11) || \
+ ($cxx.id == 'clang' && $cxx.version.major >= 16) || \
+ ($cxx.id == 'msvc' && ($cxx.version.major > 19 || \
+ ($cxx.version.major == 19 && \
+ $cxx.version.minor >= 36))))
cxx.features.modules = true
using cxx
@@ -38,7 +46,7 @@ if ($cxx.target.class == 'windows')
exe{*}: test = true
EOI
-# Determine if we have modules and header units support.
+# Determine if we have named modules and header units support.
#
+$* noop <<EOI | set modules
print $cxx.features.modules
@@ -48,9 +56,11 @@ EOI
print ($cxx.features.modules && $cxx.id == 'gcc')
EOI
-# @@ TMP: modules support is completely broken in MinGW GCC 11.x.
+# @@ TMP: modules support is broken in MinGW GCC (not just symexport).
+# @@ TMP: try modules with Clang on Windows (symexport seems to work).
#
-if ($cxx.target.class == 'windows' && $cxx.id == 'gcc')
+if ($cxx.target.class == 'windows' && \
+ ($cxx.id == 'gcc' || $cxx.id.type == 'clang'))
modules = false
headers = false
end
diff --git a/tests/cc/modules/modules.testscript b/tests/cc/modules/modules.testscript
index 681238a..c286c1f 100644
--- a/tests/cc/modules/modules.testscript
+++ b/tests/cc/modules/modules.testscript
@@ -205,15 +205,11 @@ $* test clean <<EOI
:
: Test global module fragment/leading module marker (module;).
:
-if ($cxx.id != 'msvc') # Disabled for MSVC due to issue 845845.
-{
cat <<EOI >=g.hxx;
void g ();
EOI
cat <<EOI >=core.mxx;
-#if __cpp_modules >= 201810
module;
-#endif
#include "g.hxx"
EOI
@@ -222,7 +218,6 @@ ln -s ../core.cxx ../driver.cxx ./;
$* test clean <<EOI
exe{test}: cxx{driver} {mxx cxx}{core}
EOI
-}
: re-export
:
@@ -354,7 +349,7 @@ $* test --verbose 1 <<EOI 2>>EOE;
exe{test}: cxx{driver} {mxx}{foo-core}
exe{test}: test.arguments = two
EOI
- c++ cxx{driver}
+ c++ cxx{driver} -> obje{driver}
ld exe{test}
test exe{test}
EOE
@@ -373,20 +368,28 @@ cat <<EOI >=core.mxx;
export __symexport int f (int);
- __symexport int g_impl (int i) {return i - 1;}
+ __symexport int g_impl (int i);
export __symexport inline int g (int i) {return g_impl (i);}
+
+ export __symexport int v1 = 1;
+ export __symexport extern int v2;
EOI
ln -s ../core.cxx core-f.cxx;
cat <<EOI >=core-g.cxx;
module foo.core;
int g_impl (int i) {return i - 1;}
+ int v = 1;
+ EOI
+cat <<EOI >=core-v.cxx;
+ module foo.core;
+ int v2 = -1;
EOI
cat <<EOI >=driver.cxx;
import foo.core;
- int main (int argc, char*[]) {return f (argc) + g (argc);}
+ int main (int argc, char*[]) {return f (argc) + g (argc) + v1 + v2;}
EOI
$* test clean <<EOI
./: lib{foo} exe{test} # Full build.
exe{test}: cxx{driver} lib{foo}
- lib{foo}: mxx{core} cxx{core-f} # @@ VC: core-g
+ lib{foo}: mxx{core} cxx{core-g core-f core-v}
EOI
diff --git a/tests/cc/preprocessed/testscript b/tests/cc/preprocessed/testscript
index 269cafe..53e7755 100644
--- a/tests/cc/preprocessed/testscript
+++ b/tests/cc/preprocessed/testscript
@@ -2,7 +2,7 @@
# license : MIT; see accompanying LICENSE file
crosstest = false
-test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true) update
+test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode) update
.include ../../common.testscript
@@ -10,7 +10,7 @@ test.arguments = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true) up
#
# trace: cxx::compile::apply extracting (headers|modules) from: .../obje{(test).o...}
#
-filter = sed -n -e \
+filter = [cmdline] sed -n -e \
\''s/^trace: cxx::compile_rule::apply: extracting ([^ ]+) from[^{]+\{([^.]+).*/\1 \2/p'\'
+cat <<EOI >=build/root.build
@@ -98,6 +98,7 @@ $* &test* <<EOI 2>>EOE != 0
exe{test}: cxx{test}
EOI
error: modules support required by cxx{test}
+ info: consider enabling modules with cxx.features.modules=true in root.build
EOE
: all
diff --git a/tests/dependency/recipe/testscript b/tests/dependency/recipe/testscript
index ebcdfdb..a581724 100644
--- a/tests/dependency/recipe/testscript
+++ b/tests/dependency/recipe/testscript
@@ -406,7 +406,7 @@ alias{x}:
echo
}
EOI
-<stdin>:3:1: error: expected recipe block instead of '{'
+<stdin>:3:1: error: expected recipe block or 'recipe' instead of '{'
EOE
: duplicate-action-single
@@ -611,18 +611,42 @@ EOE
{
: weight-0
:
- $* <<EOI 2>>EOE != 0
- alias{x}:
- {{
-
- exit
- }}
- dump alias{x}
- EOI
- <stdin>:3:1: error: unable to deduce low-verbosity script diagnostics name
- info: consider specifying it explicitly with the 'diag' recipe attribute
- info: or provide custom low-verbosity diagnostics with the 'diag' builtin
- EOE
+ {
+ : single-operation
+ :
+ {
+ $* <<EOI 2>>~%EOE%
+ alias{x}:
+ {{
+
+ exit
+ }}
+ dump alias{x}
+ EOI
+ %.{2}
+ % [diag=update] perform(update)
+ %.{3}
+ EOE
+ }
+
+ : multiple-operations
+ :
+ {
+ $* <<EOI 2>>EOE != 0
+ alias{x}:
+ % update clean
+ {{
+
+ exit
+ }}
+ dump alias{x}
+ EOI
+ <stdin>:4:1: error: unable to deduce low-verbosity script diagnostics name
+ info: consider specifying it explicitly with the 'diag' recipe attribute
+ info: or provide custom low-verbosity diagnostics with the 'diag' builtin
+ EOE
+ }
+ }
: weight-1
:
@@ -675,7 +699,7 @@ EOE
: process-path-ex
:
{
- config_cxx = config.cxx=$quote($recall($cxx.path) $cxx.config.mode, true)
+ config_cxx = config.cxx=$quote($recall($cxx.path) $cxx.config.mode)
mkdir build;
cat <<EOI >=build/bootstrap.build;
@@ -691,7 +715,7 @@ EOE
EOI
$* $config_cxx <<EOI 2>>~%EOE%
- c = $cxx.path --version
+ c = [cmdline] $cxx.path --version
alias{x}:
{{
$c
@@ -729,7 +753,7 @@ EOE
$* <<EOI 2>>EOE != 0
alias{x}:
{{
- foo = bar
+ foo = [cmdline] bar
$foo
}}
dump alias{x}
@@ -845,8 +869,11 @@ EOE
% .+alias\{x\}:%
% perform(update)
{{
+ rm a
+ echo b | set c
diag bar
- %.{4}
+ fo$v
+ }}
EOE
}
}
diff --git a/tests/directive/config.testscript b/tests/directive/config.testscript
index fba858f..ebdd6ac 100644
--- a/tests/directive/config.testscript
+++ b/tests/directive/config.testscript
@@ -212,12 +212,14 @@ test.arguments =
config [strings, config.report=multiline] config.test.d ?= 1 2 3
config [string, config.report.variable=e] config.test.e ?= abc
config [ config.report] f
+ config [ config.report.variable=g] gg
config [bool] config.test.n ?= [null]
config [bool] config.test.p
config [bool] config.test.p ?= true
e = "'$config.test.e'"
f = ($config.test.b || $config.test.c)
+ g = abc
EOI
@@ -240,6 +242,7 @@ test.arguments =
3
e 'abc'
f true
+ gg abc
n [null]
p true
EOO
@@ -262,6 +265,7 @@ test.arguments =
3
e 'xyz'
f true
+ gg abc
n true
p false
EOO
diff --git a/tests/directive/parsing.testscript b/tests/directive/parsing.testscript
index 04dd054..3f180f0 100644
--- a/tests/directive/parsing.testscript
+++ b/tests/directive/parsing.testscript
@@ -1,4 +1,4 @@
-# file : tests/directive/assert.testscript
+# file : tests/directive/parsing.testscript
# license : MIT; see accompanying LICENSE file
# Test overall directive parsing.
diff --git a/tests/directive/run.testscript b/tests/directive/run.testscript
index 199dd5f..ecff0fe 100644
--- a/tests/directive/run.testscript
+++ b/tests/directive/run.testscript
@@ -25,8 +25,9 @@ EOI
: bad-exit
:
cat <'assert false' >=buildfile;
-$* <"$run" 2>>EOE != 0
+$* <"$run" 2>>~/EOE/ != 0
buildfile:1:1: error: assertion failed
+/<stdin>:1:5: error: process .+ exited with code 1/
EOE
: bad-output
diff --git a/tests/eval/qual.testscript b/tests/eval/qual.testscript
index 29f6340..88339fd 100644
--- a/tests/eval/qual.testscript
+++ b/tests/eval/qual.testscript
@@ -5,8 +5,9 @@
.include ../common.testscript
-$* <'print (file{foo}: bar)' >'file{foo}:bar' : target
-$* <'print (foo/dir{}: bar)' >'dir{foo/}:bar' : scope
+$* <'print (file{foo}: bar)' >'bar:file{foo}' : target
+$* <'print (file{foo}@./: bar)' >'bar:file{foo}@./' : target-out
+$* <'print (foo/dir{}: bar)' >'bar:dir{foo/}' : target-dir
: attribute
:
diff --git a/tests/expansion/escape.testscript b/tests/expansion/escape.testscript
new file mode 100644
index 0000000..1140032
--- /dev/null
+++ b/tests/expansion/escape.testscript
@@ -0,0 +1,17 @@
+# file : tests/expansion/type.testscript
+# license : MIT; see accompanying LICENSE file
+
+# Test escape sequence expansion.
+
+.include ../common.testscript
+
+: simple
+:
+$* <<EOI >>EOO
+print "foo$\nbar"
+print $size([string] "foo$\0bar")
+EOI
+foo
+bar
+7
+EOO
diff --git a/tests/function/builtin/testscript b/tests/function/builtin/testscript
index 00d594b..04e8bd8 100644
--- a/tests/function/builtin/testscript
+++ b/tests/function/builtin/testscript
@@ -53,6 +53,34 @@
$* <'print $empty(abc)' >'false' : name
$* <'print $empty(abc cxx{foo})' >'false' : names
$* <'print $empty([bool] false)' >'false' : bool
+ $* <'print $empty([json] null)' >'true' : json-null
+ $* <'print $empty([json] "[]")' >'true' : json-array
+ $* <'print $empty([json] "{}")' >'true' : json-object
+}
+
+: first-second
+:
+{
+ $* <'print $first(a@1)' >'a' : first
+ $* <'print $second(a@1)' >'1' : second
+
+ $* <'print $first(@1)' >'{}' : first-empty
+ $* <'print $second(a@)' >'{}' : second-empty
+
+ $* <'print $first(1)' >'[null]' : first-null
+ $* <'print $second(a)' >'[null]' : second-null
+
+ $* <'print $first(1, true)' >'1' : first-all
+ $* <'print $second(a, true)' >'a' : second-all
+
+ $* <'print $first(0 a@1 b@2 c@ 4)' >'a b c' : firsts
+ $* <'print $second(z a@1 b@2 @3 d)' >'1 2 3' : seconds
+
+ $* <'print $first(0 a@1 b@2 c@ 4, true)' >'0 a b c 4' : firsts-all
+ $* <'print $second(z a@1 b@2 @3 d, true)' >'z 1 2 3 d' : seconds-all
+
+ $* <'print $first([name_pair] a@1)' >'a' : first-typed
+ $* <'print $second([name_pair] a@1)' >'1' : second-typed
}
: identity
@@ -77,13 +105,6 @@
$* <'print $type($identity(abc))' >'' : untyped
}
-: sort
-:
-{
- $* <'print $sort([uint64s] 0 2 1 000)' >'0 0 1 2' : basics
- $* <'print $sort([uint64s] 0 2 1 000, dedup)' >'0 1 2' : dedup
-}
-
: getenv
:
{
@@ -93,8 +114,8 @@
: likely is set at the time of login, and on Windows it is set by build2 on
: startup.
:
- : @@ Use a custom variable, when an ability to set environment variables in
- : testscript is implemented.
+ : @@ TMP Use a custom variable, when an ability to set environment variables
+ : in testscript is implemented. It is now!
:
{
: string
diff --git a/tests/function/integer/buildfile b/tests/function/integer/buildfile
new file mode 100644
index 0000000..308fe09
--- /dev/null
+++ b/tests/function/integer/buildfile
@@ -0,0 +1,4 @@
+# file : tests/function/integer/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/function/integer/testscript b/tests/function/integer/testscript
new file mode 100644
index 0000000..ad2d3bb
--- /dev/null
+++ b/tests/function/integer/testscript
@@ -0,0 +1,41 @@
+# file : tests/function/integer/testscript
+# license : MIT; see accompanying LICENSE file
+
+.include ../../common.testscript
+
+: integer-sequence
+:
+{
+ $* <'print $integer_sequence(1, 3)' >'1 2' : basics
+ $* <'print $integer_sequence(1, 0)' >'' : empty
+ $* <'print $integer_sequence(0, 8, 2)' >'0 2 4 6' : step
+}
+
+: string
+:
+{
+ $* <'print $string([uint64] 0xffff)' >'65535' : uint
+ $* <'print $string([uint64] 0xffff, 16)' >'0xffff' : uint-hex
+ $* <'print $string([uint64] 0xffff, 16, 8)' >'0x0000ffff' : uint-hex-width
+}
+
+: sort
+:
+{
+ $* <'print $sort([uint64s] 0 2 1 000)' >'0 0 1 2' : basics
+ $* <'print $sort([uint64s] 0 2 1 000, dedup)' >'0 1 2' : dedup
+}
+
+: find
+:
+{
+ $* <'print $find([uint64s] 1 2 3, 2)' >'true' : basics-true
+ $* <'print $find([uint64s] 1 2 3, 0)' >'false' : basics-false
+}
+
+: find_index
+:
+{
+ $* <'print $find_index([int64s] -1 -2 -3, -2)' >'1' : basics-true
+ $* <'print $find_index([int64s] -1 -2 -3, 0)' >'3' : basics-false
+}
diff --git a/tests/function/json/buildfile b/tests/function/json/buildfile
new file mode 100644
index 0000000..45c60d2
--- /dev/null
+++ b/tests/function/json/buildfile
@@ -0,0 +1,4 @@
+# file : tests/function/json/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/function/json/testscript b/tests/function/json/testscript
new file mode 100644
index 0000000..54e524f
--- /dev/null
+++ b/tests/function/json/testscript
@@ -0,0 +1,257 @@
+# file : tests/function/json/testscript
+# license : MIT; see accompanying LICENSE file
+
+# See also tests in type/json/.
+
+.include ../../common.testscript
+
+: type
+:
+$* <<EOI >>EOO
+print $value_type([json] )
+print $value_type([json] null)
+print $value_type([json] true)
+print $value_type([json] 123)
+print $value_type([json] -123)
+print $value_type([json] 123, true)
+print $value_type([json] -123, true)
+print $value_type([json] 1 2 3)
+print $value_type([json] one@1 two@2 three@3)
+
+j = [json] one@1 two@2 three@3
+i = [uint64] 1
+m = ($j[$i])
+print $value_type($j[$i])
+print $value_type($m)
+EOI
+null
+null
+boolean
+number
+number
+unsigned number
+signed number
+array
+object
+object
+object
+EOO
+
+: value-size
+:
+$* <<EOI >>EOO
+print $value_size([json] null)
+print $value_size([json] true)
+print $value_size([json] 123)
+print $value_size([json] abc)
+print $size([string] ([json] abc)) # @@ Should be 3 (quoted, type hint).
+print $value_size([json] 1 2 3)
+print $value_size([json] one@1 two@2 three@3)
+
+print $array_size([json] 1 2 3)
+print $array_size([json] null)
+EOI
+0
+1
+1
+1
+5
+3
+3
+3
+0
+EOO
+
+: member
+:
+$* <<EOI >>EOO
+j = [json] one@1 two@2 three@3
+i = [uint64] 1
+m = ($j[$i])
+print $member_name($j[$i]) $member_value($j[$i])
+print $member_name($m) $member_value($m)
+for m: $j
+ print $member_name($m) $member_value($m)
+EOI
+two 2
+two 2
+one 1
+two 2
+three 3
+EOO
+
+: names
+:
+$* <<EOI >>EOO
+j = [json] one@1 two@2 three@3
+for n: $object_names($j)
+ print $n ($j[$n])
+
+print $object_names([json] null)
+EOI
+one 1
+two 2
+three 3
+
+EOO
+
+: find
+:
+$* <<EOI >>EOO
+j = [json] 1 ([json] one@1 two@2) 2 true 3 null 4 abc -5 null ([json] 1 2 3)
+print $array_find_index($j, null)
+print $array_find_index($j, true)
+print $array_find_index($j, 3)
+print $array_find_index($j, 0x4)
+print $array_find_index($j, -5)
+print $array_find_index($j, abc)
+print $array_find_index($j, [json] 1 2 3)
+print $array_find_index($j, [json] two@2 one@1)
+print $array_find_index($j, [json] 1 2)
+print $array_find_index($j, [json] one@1)
+print $array_find_index($j, [json] one@1 two@2 three@3)
+print $array_find_index($j, [json] one@1 TWO@3)
+print $array_find_index($j, [json] one@1 two@3)
+print $array_find_index([json] null, 1)
+EOI
+5
+3
+4
+6
+8
+7
+10
+1
+11
+11
+11
+11
+11
+0
+EOO
+
+: parse
+:
+{
+ : basics
+ :
+ $* <<EOI >>EOO
+ print $json.parse('[123, "abc", {"one":1, "two":2}]')
+ EOI
+ [123,"abc",{"one":1,"two":2}]
+ EOO
+
+ : diagnostics-invalid-input
+ :
+ $* <<EOI 2>>EOE != 0
+ print $json.parse('{"one":, "two":2}]')
+ EOI
+ error: invalid json input: unexpected byte ',' in value
+ info: line 1, column 8, byte offset 8
+ <stdin>:1:8: info: while calling json.parse(<untyped>)
+ EOE
+
+ : diagnostics-duplicate-input
+ :
+ $* <<EOI 2>>EOE != 0
+ print $json.parse('{"one":1, "one":2}]')
+ EOI
+ error: invalid json input: duplicate object member 'one'
+ info: line 1, column 11, byte offset 15
+ <stdin>:1:8: info: while calling json.parse(<untyped>)
+ EOE
+}
+
+: serialize
+:
+{
+ : basics
+ :
+ $* <<EOI >>EOO
+ j = [json] 123 abc ([json] one@1 two@2)
+ print $json.serialize($j)
+ print $json.serialize($j, 0)
+ EOI
+ [
+ 123,
+ "abc",
+ {
+ "one": 1,
+ "two": 2
+ }
+ ]
+ [123,"abc",{"one":1,"two":2}]
+ EOO
+
+ : diagnostics
+ :
+ if false
+ {
+ # This is not easy to trigger normally so we have a normally-disabled
+ # special hack in the $json.serialize() implementation to trigger this.
+ #
+ $* <<EOI 2>>EOE != 0
+ print $json.serialize([json] deadbeef)
+ EOI
+ error: invalid json value: invalid UTF-8 text
+ info: while serializing string value
+ info: offending byte offset 4
+ <stdin>:1:8: info: while calling json.serialize(json)
+ EOE
+ }
+
+}
+
+: load
+:
+{
+ : basics
+ :
+ cat <<EOI >=input.json;
+ {
+ "str":"abc",
+ "num":123,
+ "arr":[1, 2, 3],
+ "obj":{"one":1, "two":2, "three":3}
+ }
+ EOI
+ $* <<EOI >>EOO
+ j = $json.load(input.json)
+ for m: $j
+ print $member_name($m) $member_value($m)
+ EOI
+ str abc
+ num 123
+ arr [1,2,3]
+ obj {"one":1,"two":2,"three":3}
+ EOO
+
+ : diagnostics
+ :
+ cat <<EOI >=input.json;
+ {
+ "str":"abc",
+ "num":,
+ "arr":[1, 2, 3],
+ "obj":{"one":1, "two":2, "three":3}
+ }
+ EOI
+ $* <<EOI 2>>EOE != 0
+ j = $json.load(input.json)
+ EOI
+ input.json:3:9: error: invalid json input: unexpected byte ',' in value
+ info: byte offset 26
+ <stdin>:1:6: info: while calling json.load(<untyped>)
+ EOE
+}
+
+: size
+:
+{
+ $* <'print $size([json_set] a b b)' >'2' : json-set
+ $* <'print $size([json_map] a@1 b@2 b@3)' >'2' : json-map
+}
+
+: keys
+:
+$* <'print $keys([json_map] 2@([json] a@1 b@2 c@3) 1@([json] 1 2 3))' >'[1,2]'
diff --git a/tests/function/name/testscript b/tests/function/name/testscript
index 2fe8e24..4588e1d 100644
--- a/tests/function/name/testscript
+++ b/tests/function/name/testscript
@@ -3,6 +3,42 @@
.include ../../common.testscript
+: is_a
+:
+{
+ $* <'print $is_a(file{foo}, path_target)' >'true' : basics-true
+ $* <'print $is_a(alias{foo}, path_target)' >'false' : basics-false
+ $* <'print $is_a(file{foo}@./, path_target)' >'true' : out
+ $* <<EOI >'true' : derived
+ define txt: file
+ print $is_a(txt{foo}, path_target)
+ EOI
+}
+
+: filter
+:
+{
+ $* <<EOI >'file{foo}@./ txt{baz}' : basics
+ define txt: file
+ print $filter(file{foo}@./ alias{bar} dir{./} txt{baz}, file)
+ EOI
+
+ $* <<EOI >'file{foo}@./ txt{baz}' : basics-out
+ define txt: file
+ print $filter_out(file{foo}@./ alias{bar} dir{./} txt{baz}, alias)
+ EOI
+
+ $* <<EOI >'file{foo}@./ dir{./} txt{baz}' : multiple
+ define txt: file
+ print $filter(file{foo}@./ alias{bar} dir{./} txt{baz}, file dir)
+ EOI
+
+ $* <<EOI >'file{foo}@./ alias{bar}' : multiple-out
+ define txt: file
+ print $filter_out(file{foo}@./ alias{bar} dir{./} txt{baz}, txt dir)
+ EOI
+}
+
: size
:
{
@@ -13,6 +49,20 @@
: sort
:
{
- $* <'print $sort( d/t{a} t{c b} d/t{a})' >'t{b} t{c} d/t{a} d/t{a}' : basics
- $* <'print $sort( d/t{a} t{c b} d/t{a}, dedup)' >'t{b} t{c} d/t{a}' : dedup
+ $* <'print $sort(d/t{a} t{c b} d/t{a})' >'t{b} t{c} d/t{a} d/t{a}' : basics
+ $* <'print $sort(d/t{a} t{c b} d/t{a}, dedup)' >'t{b} t{c} d/t{a}' : dedup
+}
+
+: find
+:
+{
+ $* <'print $find([names] d/t{a} t{a b}, t{a})' >'true' : basics-true
+ $* <'print $find([names] d/t{a} t{a b}, d/t{b})' >'false' : basics-false
+}
+
+: find_index
+:
+{
+ $* <'print $find_index([names] d/t{a} t{a b}, t{a})' >'1' : basics-true
+ $* <'print $find_index([names] d/t{a} t{a b}, d/t{b})' >'3' : basics-false
}
diff --git a/tests/function/path/testscript b/tests/function/path/testscript
index 6219f3c..1ed89ca 100644
--- a/tests/function/path/testscript
+++ b/tests/function/path/testscript
@@ -8,6 +8,78 @@ posix = (!$windows)
s = ($posix ? '/' : '\')
+: posix-string
+:
+{
+ : relative
+ :
+ {
+ s = ($posix ? '/' : '\\')
+
+ $* <"print \$posix_string\([path] a$(s)b)" >'a/b' : path
+ $* <"print \$posix_string\([paths] a$(s)b a$(s)c$(s))" >'a/b a/c' : paths
+ $* <"print \$posix_string\([dir_path] a$(s)b)" >'a/b' : dir-path
+ $* <"print \$posix_string\([dir_paths] a$(s)b a$(s)c$(s))" >'a/b a/c' : dir-paths
+ $* <"print \$path.posix_string\(a$(s)b a$(s)c$(s))" >'a/b a/c' : untyped
+ }
+
+ : absolute
+ :
+ {
+ if $posix
+ {
+ $* <'print $posix_string([paths] /a/b /a/c/)' >'/a/b /a/c' : paths
+ $* <'print $posix_string([dir_paths] /a/b /a/c/)' >'/a/b /a/c' : dir-paths
+ $* <'print $posix_string([dir_path] /)' >'/' : root-dir
+ $* <'print $path.posix_string(/a/b /a/c/)' >'/a/b /a/c' : untyped
+ }
+ else
+ {
+ $* <'print $posix_string([paths] "c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c' : paths
+ $* <'print $posix_string([dir_paths] "c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c' : dir-paths
+ $* <'print $posix_string([dir_paths] "c:\\" "C:")' >'c:/ C:/' : root-dir
+ $* <'print $path.posix_string("c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c' : untyped
+ $* <'print $path.posix_string("c:\\" "C:")' >'c:/ C:/' : untyped-root
+ }
+ }
+}
+
+: posix-representation
+:
+{
+ : relative
+ :
+ {
+ s = ($posix ? '/' : '\\')
+
+ $* <"print \$posix_representation\([path] a$(s)b)" >'a/b' : path
+ $* <"print \$posix_representation\([paths] a$(s)b a$(s)c$(s))" >'a/b a/c/' : paths
+ $* <"print \$posix_representation\([dir_path] a$(s)b)" >'a/b/' : dir-path
+ $* <"print \$posix_representation\([dir_paths] a$(s)b a$(s)c$(s))" >'a/b/ a/c/' : dir-paths
+ $* <"print \$path.posix_representation\(a$(s)b a$(s)c$(s))" >'a/b a/c/' : untyped
+ }
+
+ : absolute
+ :
+ {
+ if $posix
+ {
+ $* <'print $posix_representation([paths] /a/b /a/c/)' >'/a/b /a/c/' : paths
+ $* <'print $posix_representation([dir_paths] /a/b /a/c/)' >'/a/b/ /a/c/' : dir-paths
+ $* <'print $posix_representation([dir_path] /)' >'/' : root-dir
+ $* <'print $path.posix_representation(/a/b /a/c/)' >'/a/b /a/c/' : untyped
+ }
+ else
+ {
+ $* <'print $posix_representation([paths] "c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c/' : paths
+ $* <'print $posix_representation([dir_paths] "c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b/ C:/a/c/' : dir-paths
+ $* <'print $posix_representation([dir_paths] "c:\\" "C:")' >'c:/ C:/' : root-dir
+ $* <'print $path.posix_representation("c:\\a\\b" "C:\\a\\c\\")' >'c:/a/b C:/a/c/' : untyped
+ $* <'print $path.posix_representation("c:\\" "C:")' >'c:/ C:/' : untyped-root
+ }
+ }
+}
+
: canonicalize
:
{
@@ -158,6 +230,20 @@ if! $posix
$* <'print $size([dir_path] )' >'0' : dir-zero
}
+: find
+:
+{
+ $* <'print $find([paths] x y z, y)' >'true' : basics-true
+ $* <'print $find([paths] x y z, a)' >'false' : basics-false
+}
+
+: find_index
+:
+{
+ $* <'print $find_index([dir_paths] x y z, y)' >'1' : basics-true
+ $* <'print $find_index([dir_paths] x y z, a)' >'3' : basics-false
+}
+
: invalid-path
:
p = ($posix ? /../foo : 'c:/../foo');
diff --git a/tests/function/regex/testscript b/tests/function/regex/testscript
index 5167390..538bdab 100644
--- a/tests/function/regex/testscript
+++ b/tests/function/regex/testscript
@@ -478,6 +478,64 @@
}
}
+: filter-match
+:
+{
+ : match
+ :
+ {
+ : string
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_match(-g -O2 -O3, [string] '-O[23]')
+ EOI
+
+ : untyped
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_match(-g -O2 -O3, '-O[23]')
+ EOI
+
+ : strings
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_match([strings] -g -O2 -O3, '-O[23]')
+ EOI
+
+ : nomatch
+ :
+ $* <<EOI >''
+ print $regex.filter_match(-g -O1, '-O[23]')
+ EOI
+ }
+
+ : filter-out
+ :
+ {
+ : untyped
+ :
+ $* <<EOI >'-g'
+ print $regex.filter_out_match(-g -O2 -O3, '-O[23]')
+ EOI
+
+ : all-match
+ :
+ $* <<EOI >''
+ print $regex.filter_out_match(-O2 -O3, '-O[23]')
+ EOI
+ }
+
+ : flags
+ :
+ {
+ : icase
+ :
+ $* <<EOI >'Foo.cxx'
+ print $regex.filter_match(Foo.cxx, 'f[^.]+.*', icase)
+ EOI
+ }
+}
+
: find-search
:
{
@@ -520,6 +578,64 @@
}
}
+: filter-search
+:
+{
+ : match
+ :
+ {
+ : string
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_search(-g -O2 -O3, [string] '-O')
+ EOI
+
+ : untyped
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_search(-g -O2 -O3, '-O')
+ EOI
+
+ : strings
+ :
+ $* <<EOI >'-O2 -O3'
+ print $regex.filter_search([strings] -g -O2 -O3, '-O')
+ EOI
+
+ : nomatch
+ :
+ $* <<EOI >''
+ print $regex.filter_search(-g, '-O')
+ EOI
+ }
+
+ : filter-out
+ :
+ {
+ : untyped
+ :
+ $* <<EOI >'-g'
+ print $regex.filter_out_search(-g -O2 -O3, '-O')
+ EOI
+
+ : all-match
+ :
+ $* <<EOI >''
+ print $regex.filter_out_search(-O2 -O3, '-O')
+ EOI
+ }
+
+ : flags
+ :
+ {
+ : icase
+ :
+ $* <<EOI >'Foo.cxx'
+ print $regex.filter_search(Foo.cxx, 'f', icase)
+ EOI
+ }
+}
+
: merge
:
{
diff --git a/tests/function/string/testscript b/tests/function/string/testscript
index 00835ce..96f5c52 100644
--- a/tests/function/string/testscript
+++ b/tests/function/string/testscript
@@ -43,6 +43,29 @@
: size
:
{
- $* <'print $size([string] abc)' >'3' : basics
- $* <'print $size([string] )' >'0' : zero
+ $* <'print $size([string] abc)' >'3' : basics
+ $* <'print $size([string] )' >'0' : zero
+ $* <'print $size([strings] a b c)' >'3' : strings
+ $* <'print $size([string_set] a b b)' >'2' : string-set
+ $* <'print $size([string_map] a@1 b@2 b@3)' >'2' : string-map
}
+
+: find
+:
+{
+ $* <'print $find([strings] x y z, y)' >'true' : basics-true
+ $* <'print $find([strings] x y z, Y)' >'false' : basics-false
+ $* <'print $find([strings] x y z, Y, icase)' >'true' : icase
+}
+
+: find_index
+:
+{
+ $* <'print $find_index([strings] x y z, y)' >'1' : basics-true
+ $* <'print $find_index([strings] x y z, Y)' >'3' : basics-false
+ $* <'print $find_index([strings] x y z, Y, icase)' >'1' : icase
+}
+
+: keys
+:
+$* <'print $keys([string_map] a@1 b@2 c@3)' >'a b c'
diff --git a/tests/in/testscript b/tests/in/testscript
index 0d5be48..5dce1d3 100644
--- a/tests/in/testscript
+++ b/tests/in/testscript
@@ -17,7 +17,9 @@ cat <<EOI >=test.in;
EOI
cat <<EOI >=buildfile;
file{test}: in{test}
- file{test}: foo = FOO
+ {
+ foo = FOO
+ }
EOI
$* <<<buildfile;
cat test >>EOO;
@@ -25,6 +27,27 @@ cat test >>EOO;
EOO
$* clean <<<buildfile
+: substitution-map
+:
+cat <<EOI >=test.in;
+ foo = $_foo$
+ bar = $bar$
+ EOI
+cat <<EOI >=buildfile;
+ file{test}: in{test}
+ {
+ in.substitutions = _foo@FOO
+ in.substitutions += bar@BAR
+ bar = wrong
+ }
+ EOI
+$* <<<buildfile;
+cat test >>EOO;
+ foo = FOO
+ bar = BAR
+ EOO
+$* clean <<<buildfile
+
: lax
:
cat <<EOI >=test.in;
@@ -33,7 +56,9 @@ cat <<EOI >=test.in;
EOI
$* <<EOI &test &test.d;
file{test}: in{test}
- file{test}: in.substitution = lax
+ {
+ in.mode = lax
+ }
EOI
cat test >>EOO
$10
diff --git a/tests/loop/for.testscript b/tests/loop/for.testscript
index 5376029..e043ec0 100644
--- a/tests/loop/for.testscript
+++ b/tests/loop/for.testscript
@@ -116,3 +116,17 @@ a@1
b@2
c@3
EOO
+
+: elem-attribute
+:
+$* <<EOI >>EOO
+for i [uint64]: 0 1 2
+{
+ i += 1
+ print $i
+}
+EOI
+1
+2
+3
+EOO
diff --git a/tests/name/extension.testscript b/tests/name/extension.testscript
index 1583109..6a542fe 100644
--- a/tests/name/extension.testscript
+++ b/tests/name/extension.testscript
@@ -131,7 +131,7 @@ EOI
EOI
f.oo
- txt{f.oo.}
+
EOO
: default-extension
diff --git a/tests/name/pattern.testscript b/tests/name/pattern.testscript
index 91fb98d..c1a4ce4 100644
--- a/tests/name/pattern.testscript
+++ b/tests/name/pattern.testscript
@@ -18,6 +18,35 @@ pat = '*'
print "$(pat).txt"
EOI
+: typed-concat
+:
+{
+ : dir-path
+ :
+ touch foo.txt;
+ $* <'print {$src_base/*.txt}' >/~'%.+/foo\.txt%'
+
+ : path
+ :
+ touch foo.txt;
+ $* <<EOI >/~'%.+/foo\.txt%'
+ p = [path] $src_base
+ print {$p/*.txt}
+ EOI
+
+ : string
+ :
+ touch foo.txt;
+ $* <<EOI >~'%.+/\*\.txt%'
+ p = [string] "$src_base"
+ print {$p/*.txt}
+ EOI
+
+ : not-pattern
+ :
+ $* <'print {$src_base/foo.txt}' >/~'%.+/foo\.txt%'
+}
+
: detect
:
: Test pattern_mode parsing logic.
@@ -332,13 +361,13 @@ EOI
:
{
mkdir dir;
- $* <'print $d' 'd=*/' >/'dir/' : dir
+ $* <'print $p.d' 'p.d=*/' >/'dir/' : dir
mkdir dir;
- $* <'print $d' 'd=dir{*}' >/'dir{dir/}' : dir-type
+ $* <'print $p.d' 'p.d=dir{*}' >/'dir{dir/}' : dir-type
touch foo.txt;
- $* <'print $f' 'f=*.txt' >'foo.txt' : feil
+ $* <'print $p.f' 'p.f=*.txt' >'foo.txt' : feil
}
: buildspec
diff --git a/tests/recipe/buildscript/testscript b/tests/recipe/buildscript/testscript
index 910ee67..cded5ea 100644
--- a/tests/recipe/buildscript/testscript
+++ b/tests/recipe/buildscript/testscript
@@ -31,7 +31,7 @@ posix = ($cxx.target.class != 'windows')
}}
EOI
- $* 2>'cp file{foo}';
+ $* 2>'cp file{bar} -> file{foo}';
cat <<<foo >'bar';
@@ -65,9 +65,9 @@ posix = ($cxx.target.class != 'windows')
EOI
$* 2>>~%EOE% != 0;
- concat file{bar.}
+ concat file{bar}
%cat: unable to print '.+bar.baz': .+%
- buildfile:10:3: error: cat exited with code 1
+ buildfile:10:3: error: builtin cat exited with code 1
%.+
EOE
@@ -75,7 +75,7 @@ posix = ($cxx.target.class != 'windows')
echo 'baz' >=bar.baz;
- $* 2>'concat file{bar.}';
+ $* 2>'concat file{bar}';
cat <<<foo >>EOO;
bar
@@ -100,7 +100,7 @@ posix = ($cxx.target.class != 'windows')
EOI
$* 2>>~%EOE% != 0;
- cp file{foo}
+ cp file{bar} -> file{foo}
buildfile:4:3: error: stdout and stderr redirected to each other
%.+
EOE
@@ -108,7 +108,7 @@ posix = ($cxx.target.class != 'windows')
$* clean 2>-
}
- : untracked-var
+ : computed-var
:
{
cat <<EOI >=buildfile;
@@ -121,9 +121,29 @@ posix = ($cxx.target.class != 'windows')
}}
EOI
+ $* 2>>EOE != 0
+ buildfile:6:10: error: expansion of computed variable is only allowed in depdb preamble
+ info: consider using 'depdb' builtin to track its value changes
+ EOE
+ }
+
+ : untracked-var
+ :
+ {
+ cat <<EOI >=buildfile;
+ a = a
+ b = b
+ foo:
+ {{
+ x = true
+ y = $($x ? a : b)
+ depdb env BOGUS
+ echo $y >$path($>)
+ }}
+ EOI
+
$* 2>>~%EOE% != 0;
- echo file{foo}
- buildfile:6:10: error: use of untracked variable 'a'
+ buildfile:6:8: error: use of untracked variable 'a'
info: use the 'depdb' builtin to manually track it
%.+
EOE
@@ -155,13 +175,32 @@ posix = ($cxx.target.class != 'windows')
EOI
$* test 2>>EOE;
- cp exe{foo}
- test exe{foo.}
+ cp file{bar} -> exe{foo}
+ test exe{foo}
EOE
$* clean 2>-
}
+ : diag
+ :
+ {
+ cat <<EOI >=buildfile;
+ foo:
+ {{
+ v1 = foo
+ echo bar | set v2
+ diag echo "$v1 $v2" -> $>
+ echo "$v1 $v2" >$path($>)
+ }}
+ EOI
+
+ $* 2>'echo foo bar -> file{foo}';
+ cat <<<foo >'foo bar';
+
+ $* clean 2>-
+ }
+
: depdb
:
{
@@ -201,17 +240,19 @@ posix = ($cxx.target.class != 'windows')
a = $process.run(cat baz)
foo: bar
{{
+ x = true
+ y = $($x ? a : b)
depdb hash "$a"
+
diag compose $>
cp $path($<) $path($>)
- x = true
- echo "$($x ? a : b)" >>$path($>)
+ echo $y >>$path($>)
}}
EOI
- $* 2>'compose file{foo.}';
+ $* 2>'compose file{foo}';
cat <<<foo >>EOO;
bar
@@ -227,7 +268,7 @@ posix = ($cxx.target.class != 'windows')
echo 'BAR' >=bar;
- $* 2>'compose file{foo.}';
+ $* 2>'compose file{foo}';
cat <<<foo >>EOO;
BAR
@@ -238,7 +279,7 @@ posix = ($cxx.target.class != 'windows')
echo 'BAZ' >=baz;
- $* 2>'compose file{foo.}';
+ $* 2>'compose file{foo}';
cat <<<foo >>EOO;
BAR
@@ -335,7 +376,7 @@ posix = ($cxx.target.class != 'windows')
}}
EOI
- $* 2>'echo file{foo.}';
+ $* 2>'echo file{foo}';
$* clean 2>-
}
@@ -465,7 +506,7 @@ posix = ($cxx.target.class != 'windows')
: normal
:
{
- cat <<EOI >=bar.h;
+ cat <<EOI >=bar.h;
bar
EOI
@@ -497,23 +538,23 @@ posix = ($cxx.target.class != 'windows')
}}
EOI
- $* 2>>EOE;
- gen h{baz.h}
- gen h{foo.h}
+ $* 2>>EOE;
+ gen h{baz}
+ gen h{foo}
EOE
- cat foo.h >>EOO;
- bar
- baz
- EOO
+ cat foo.h >>EOO;
+ bar
+ baz
+ EOO
- $* clean 2>-
+ $* clean 2>-
}
: byproduct
:
{
- cat <<EOI >=bar.h;
+ cat <<EOI >=bar.h;
bar
EOI
@@ -540,17 +581,17 @@ posix = ($cxx.target.class != 'windows')
}}
EOI
- $* 2>>EOE;
- gen h{baz.h}
- gen h{foo.h}
+ $* 2>>EOE;
+ gen h{baz}
+ gen h{foo}
EOE
- cat foo.h >>EOO;
- bar
- baz
- EOO
+ cat foo.h >>EOO;
+ bar
+ baz
+ EOO
- $* clean 2>-
+ $* clean 2>-
}
}
}
@@ -601,7 +642,7 @@ posix = ($cxx.target.class != 'windows')
EOI
$* test 2>>EOE;
- cp file{foo}
+ cp file{bar} -> file{foo}
test file{foo}
EOE
@@ -663,7 +704,7 @@ posix = ($cxx.target.class != 'windows')
EOI
$* test 2>>EOE;
- cp file{foo}
+ cp file{bar} -> file{foo}
test file{foo}
bar
EOE
@@ -700,6 +741,30 @@ posix = ($cxx.target.class != 'windows')
$* clean 2>-
}
+: canned-cmdline
+:
+{
+ cat <<EOI >=buildfile;
+ ./:
+ {{
+ x = echo >|
+ y = [cmdline] echo >|
+ diag update $>
+ $x foo
+ $y bar
+ ([cmdline] $x) baz
+ }}
+ EOI
+
+ $* >> EOO 2>>/EOE
+ bar
+ baz
+ EOO
+ update dir{./}
+ >| foo
+ EOE
+}
+
: timeout
:
if $posix
@@ -723,8 +788,9 @@ if $posix
EOI
$* 2>>~%EOE% != 0;
- update file{foo}
- buildfile:6:3: error: ^sleep terminated: execution timeout expired
+ update file{bar} -> file{foo}
+ buildfile:6:3: error: process ^sleep terminated: execution timeout expired
+ info: command line: sleep 5
info: while updating file{foo}
%.+
EOE
@@ -748,7 +814,7 @@ if $posix
EOI
$* 2>>EOE;
- update file{foo}
+ update file{bar} -> file{foo}
EOE
$* clean 2>-
@@ -775,16 +841,18 @@ if $posix
EOI
$* test config.test.timeout=1 2>>~%EOE% != 0;
- cp file{foo}
+ cp file{bar} -> file{foo}
test file{foo}
- buildfile:7:3: error: ^sleep terminated: execution timeout expired
+ buildfile:7:3: error: process ^sleep terminated: execution timeout expired
+ info: command line: sleep 5
info: while testing file{foo}
%.+
EOE
$* test config.test.timeout=/1 2>>~%EOE% != 0;
test file{foo}
- buildfile:7:3: error: ^sleep terminated: execution timeout expired
+ buildfile:7:3: error: process ^sleep terminated: execution timeout expired
+ info: command line: sleep 5
info: while testing file{foo}
%.+
EOE
@@ -811,7 +879,7 @@ if $posix
EOI
$* test config.test.timeout=3 2>>EOE;
- cp file{foo}
+ cp file{bar} -> file{foo}
test file{foo}
EOE
@@ -831,11 +899,933 @@ if $posix
alias{~'/f(.+)/'}: alias{~'/b\1/'}
{{
- diag $< $>
+ diag frob $< -> $>
}}
EOI
$* 2>>EOE
- alias{bar} alias{far}
+ frob alias{bar} -> alias{far}
EOE
}
+
+: loop
+:
+{
+ : while
+ :
+ {
+ : basics
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ while test -f $p != 0
+ cp $path($<) $p
+ end
+ }}
+ EOI
+
+ $* 2>'cp file{bar} -> file{foo}';
+
+ cat <<<foo >'bar';
+
+ $* clean 2>-
+ }
+
+ : exit
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ while test -f $p != 0
+ touch $p
+ exit
+ cp $path($<) $p
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+
+ cat <<<foo >:'';
+
+ $* clean 2>-
+ }
+
+ : error
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ while test -f $p != 0
+ touch $p
+ exit 'fed up'
+ cp $path($<) $p
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ gen file{foo}
+ buildfile:8:5: error: fed up
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : depdb
+ :
+ {
+ : inside
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ while test -f $p != 0
+ depdb hash $p
+ cp $path($<) $p
+ end
+ }}
+ EOI
+
+ $* 2>>EOE != 0
+ buildfile:5:5: error: 'depdb' call inside flow control construct
+ EOE
+ }
+
+ : after-commands
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ while test -f $p != 0
+ cp $path($<) $p
+ end
+
+ depdb hash $p
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ buildfile:5:5: error: disallowed command in depdb preamble
+ info: only variable assignments are allowed in depdb preamble
+ buildfile:8:3: info: depdb preamble ends here
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : after-vars
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($<)
+
+ h =
+ while test -f $p != 0
+ h += $p
+ end
+
+ depdb hash $p
+
+ cat $p >$path($>)
+ }}
+ EOI
+
+ $* 2>'cat file{bar} -> file{foo}';
+ $* clean 2>-
+ }
+ }
+ }
+
+ : for
+ :
+ {
+ : form-1
+ :
+ : for x: ...
+ :
+ {
+ : basics
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar baz
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'cat file{bar} -> file{foo}';
+
+ cat <<<foo >>EOO;
+ bar
+ baz
+ EOO
+
+ $* clean 2>-
+ }
+
+ : pair
+ :
+ {
+ mkdir -p src/build;
+ echo 'bar' >=src/bar;
+ echo 'baz' >=src/baz;
+
+ echo 'project =' >=src/build/bootstrap.build;
+
+ cat <<EOI >=src/buildfile;
+ foo: file{bar}@./ file{baz}@./
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* src/@out/ 2>>/EOE;
+ mkdir fsdir{out/}
+ cat src/file{bar} -> out/file{foo}
+ EOE
+
+ cat <<<out/foo >>EOO;
+ bar
+ baz
+ EOO
+
+ $* 'clean:' src/@out/ 2>-
+ }
+
+ : special-var
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for ~: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>EOE != 0
+ buildfile:6:7: error: attempt to set '~' special variable
+ EOE
+ }
+
+ : exit
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ exit
+ end
+ }}
+ EOI
+
+ $* 2>'cat file{bar} -> file{foo}';
+
+ cat <<<foo >>EOO;
+ bar
+ EOO
+
+ $* clean 2>-
+ }
+
+ : error
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ exit 'fed up'
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ cat file{bar} -> file{foo}
+ buildfile:8:5: error: fed up
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : depdb
+ :
+ {
+ : inside
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ for f: $<
+ depdb hash $f
+ end
+
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>EOE != 0
+ buildfile:4:5: error: 'depdb' call inside flow control construct
+ EOE
+ }
+
+ : after-commands
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ for f: $<
+ echo $path($f) >-
+ end
+
+ depdb hash a
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ buildfile:4:5: error: disallowed command in depdb preamble
+ info: only variable assignments are allowed in depdb preamble
+ buildfile:7:3: info: depdb preamble ends here
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : after-vars
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ h =
+ for f: $<
+ h += $path($f)
+ end
+
+ depdb hash $h
+
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'cat file{bar} -> file{foo}';
+ $* clean 2>-
+ }
+ }
+ }
+
+ : form-2
+ :
+ : ... | for x
+ :
+ {
+ : basics
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar baz
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for -w f
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+
+ cat <<<foo >>EOO;
+ bar
+ baz
+ EOO
+
+ $* clean 2>-
+ }
+
+ : special-var
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for ~
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ gen file{foo}
+ buildfile:8:3: error: attempt to set '~' special variable
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : misuse
+ :
+ {
+ : after-var
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for x:
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ gen file{foo}
+ buildfile:8:3: error: for: ':' after variable name
+ %.+
+ EOE
+
+ $* clean 2>-
+ }
+
+ : after-attrs
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for x [path]:
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ gen file{foo}
+ <attributes>:1:7: error: whitespace required after attributes
+ <attributes>:1:1: info: use the '\[' escape sequence if this is a wildcard pattern
+ buildfile:8:3: info: while parsing attributes '[path]:'
+ %.+
+ EOE
+
+ $* clean 2>-
+ }
+ }
+
+ : exit
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for -w f
+ cat $f >>$p
+ exit
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+
+ cat <<<foo >>EOO;
+ bar
+ EOO
+
+ $* clean 2>-
+ }
+
+ : error
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for -w f
+ cat $f >>$p
+ exit 'fed up'
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ gen file{foo}
+ buildfile:10:5: error: fed up
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : depdb
+ :
+ {
+ : inside
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ echo $path($<) | for -w f
+ depdb hash $f
+ end
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for -w f
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>EOE != 0
+ buildfile:4:5: error: 'depdb' call inside flow control construct
+ EOE
+ }
+
+ : after-commands
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ echo $path($<) | for -w f
+ echo $f >-
+ end
+
+ depdb hash $p
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ buildfile:4:5: error: disallowed command in depdb preamble
+ info: only variable assignments are allowed in depdb preamble
+ buildfile:7:3: info: depdb preamble ends here
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : after-vars
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ h =
+ echo $path($<) | for -w f
+ h += $f
+ end
+
+ depdb hash $h
+
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+ $* clean 2>-
+ }
+ }
+ }
+
+ : form-3
+ :
+ : for x <...
+ :
+ {
+ : basics
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar baz
+ {{
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ for -w f <<"EOF"
+ $path($<)
+ EOF
+ cat $f >>$p
+ end
+
+ for <<"EOF" -w f
+ $path($<)
+ EOF
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+
+ cat <<<foo >>EOO;
+ bar
+ baz
+ bar
+ baz
+ EOO
+
+ $* clean 2>-
+ }
+
+ : quoting
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar baz
+ {{
+ n = 'gen'
+ diag "($n)" ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ o = -w
+ for "$o" f <<"EOF"
+ $path($<)
+ EOF
+ cat $f >>$p
+ end
+
+ o = -n
+ for "($o)" f <<"EOF"
+ $path($<)
+ EOF
+ echo $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+
+ cat <<<foo >>~%EOO%;
+ bar
+ baz
+ %.+bar .+baz%
+ EOO
+
+ $* clean 2>-
+ }
+
+ : special-var
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for ~ <<<$path($<)
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>EOE != 0
+ buildfile:6:6: error: attempt to set '~' special variable
+ EOE
+ }
+
+ : exit
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for f <<<$path($<)
+ cat $f >>$p
+ exit
+ end
+ }}
+ EOI
+
+ $* 2>'cat file{bar} -> file{foo}';
+
+ cat <<<foo >>EOO;
+ bar
+ EOO
+
+ $* clean 2>-
+ }
+
+ : error
+ :
+ {
+ echo 'bar' >=bar;
+ echo 'baz' >=baz;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ p = $path($>)
+ rm -f $p
+
+ for f <<<$path($<)
+ cat $f >>$p
+ exit 'fed up'
+ end
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ cat file{bar} -> file{foo}
+ buildfile:8:5: error: fed up
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : depdb
+ :
+ {
+ : inside
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ for -w f <<<$path($<)
+ depdb hash $f
+ end
+
+ p = $path($>)
+ rm -f $p
+
+ echo $path($<) | for -w f
+ cat $f >>$p
+ end
+ }}
+ EOI
+
+ $* 2>>EOE != 0
+ buildfile:4:5: error: 'depdb' call inside flow control construct
+ EOE
+ }
+
+ : after-commands
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ for -w f <<<$path($<)
+ echo $f >-
+ end
+
+ depdb hash a
+ }}
+ EOI
+
+ $* 2>>~%EOE% != 0;
+ buildfile:4:5: error: disallowed command in depdb preamble
+ info: only variable assignments are allowed in depdb preamble
+ buildfile:7:3: info: depdb preamble ends here
+ %.{3}
+ EOE
+
+ $* clean 2>-
+ }
+
+ : after-vars
+ :
+ {
+ echo 'bar' >=bar;
+
+ cat <<EOI >=buildfile;
+ foo: bar
+ {{
+ h =
+ for -w f <<<$path($<)
+ h += $f
+ end
+
+ depdb hash $h
+
+ diag gen ($>)
+
+ p = $path($>)
+ rm -f $p
+
+ for f: $<
+ cat $path($f) >>$p
+ end
+ }}
+ EOI
+
+ $* 2>'gen file{foo}';
+ $* clean 2>-
+ }
+ }
+ }
+ }
+}
diff --git a/tests/recipe/cxx/testscript b/tests/recipe/cxx/testscript
index 214468f..323d671 100644
--- a/tests/recipe/cxx/testscript
+++ b/tests/recipe/cxx/testscript
@@ -81,7 +81,7 @@ if (!$static && $test.target == $build.host)
return r;
if (verb == 1)
- text << "cp " << t;
+ print_diag ("cp", s, t);
else if (verb >= 2)
text << "cp " << sp << ' ' << tp;
@@ -93,7 +93,7 @@ if (!$static && $test.target == $build.host)
$* 2>>~%EOE%;
%^(c\+\+|ld).*%+
- cp file{foo}
+ cp file{bar} -> file{foo}
EOE
cat <<<foo >'bar';
@@ -141,7 +141,7 @@ if (!$static && $test.target == $build.host)
const path& tp (t.path ());
if (verb == 1)
- text << "test " << t;
+ print_diag ("test", t);
else if (verb >= 2)
text << "cat " << tp;
@@ -157,7 +157,7 @@ if (!$static && $test.target == $build.host)
$* test 2>>~%EOE%;
%^(c\+\+|ld).*%+
- cp file{foo}
+ cp file{bar} -> file{foo}
test file{foo}
bar
EOE
diff --git a/tests/test/script/builtin/sleep.testscript b/tests/test/script/builtin/sleep.testscript
index e1410ac..c044027 100644
--- a/tests/test/script/builtin/sleep.testscript
+++ b/tests/test/script/builtin/sleep.testscript
@@ -13,7 +13,7 @@ $c <'sleep 1' && $b
: failure
:
$c <'env -t 1 -- sleep 86400' && $b 2>>~%EOE% != 0
- %testscript:.*: error: sleep terminated: execution timeout expired%
+ %testscript:.*: error: builtin sleep terminated: execution timeout expired%
%.
EOE
diff --git a/tests/test/script/common.testscript b/tests/test/script/common.testscript
index 4469d1c..651e056 100644
--- a/tests/test/script/common.testscript
+++ b/tests/test/script/common.testscript
@@ -31,7 +31,7 @@ end
# Note that the buildfile is clever hack that relies on the first target
# automatically becoming dir{./}'s prerequisite.
#
-c = cat >=testscript
-b = $0 --no-default-options --serial-stop --quiet --buildfile - test \
+c = [cmdline] cat >=testscript
+b = [cmdline] $0 --no-default-options --serial-stop --quiet --buildfile - test \
<"'testscript{testscript}: \$target'" \
&?test/***
diff --git a/tests/test/script/runner/expr.testscript b/tests/test/script/runner/expr.testscript
index 98e495f..95d4bed 100644
--- a/tests/test/script/runner/expr.testscript
+++ b/tests/test/script/runner/expr.testscript
@@ -20,7 +20,7 @@
true = '$* >| -o'
false = '$* -s 1 >| -o'
- bf = $b 2>-
+ bf = [cmdline] $b 2>-
: true
:
diff --git a/tests/test/script/runner/for.testscript b/tests/test/script/runner/for.testscript
new file mode 100644
index 0000000..f43fcc2
--- /dev/null
+++ b/tests/test/script/runner/for.testscript
@@ -0,0 +1,502 @@
+# File : tests/test/script/runner/for.testscript
+# license : MIT; see accompanying LICENSE file
+
+.include ../common.testscript
+
+: form-1
+:
+: for x: ...
+:
+{
+ : basics
+ :
+ $c <<EOI && $b >>EOO
+ for x: a b
+ echo "$x" >|
+ end
+ EOI
+ a
+ b
+ EOO
+
+ : test-options
+ :
+ $c <<EOI && $b >>~%EOO%
+ for test.options: -a -b
+ echo $* >|
+ end
+ EOI
+ %.+ -a%
+ %.+ -b%
+ EOO
+
+ : special-var
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ for ~: -a -b
+ echo $~ >|
+ end
+ EOI
+ testscript:1:5: error: attempt to set '~' variable directly
+ EOE
+
+ : exit
+ :
+ $c <<EOI && $b >>EOO
+ for x: a b
+ echo "$x" >|
+ exit
+ end
+ EOI
+ a
+ EOO
+
+ : error
+ :
+ $c <<EOI && $b >>EOO 2>>EOE != 0
+ for x: a b
+ echo "$x" >|
+ exit 'fed up'
+ end
+ EOI
+ a
+ EOO
+ testscript:3:3: error: fed up
+ info: test id: 1
+ EOE
+}
+
+: form-2
+:
+: ... | for x
+:
+{
+ : whitespace-split
+ :
+ $c <<EOI && $b >>EOO
+ echo " a b " | for -w x
+ echo "'$x'" >|
+ end
+ EOI
+ 'a'
+ 'b'
+ EOO
+
+ : newline-split
+ :
+ $c <<EOI && $b >>EOO
+ cat <<EOF | for -n x
+
+
+ a
+
+
+ b
+
+ EOF
+ echo "'$x'" >|
+ end
+ EOI
+ ''
+ ''
+ 'a'
+ ''
+ ''
+ 'b'
+ ''
+ EOO
+
+ : typed
+ :
+ $c <<EOI && $b >>/EOO
+ echo "a b" | for -w x [dir_path]
+ echo $x >|
+ end
+ EOI
+ a/
+ b/
+ EOO
+
+ : nested
+ :
+ $c <<EOI && $b >>EOO
+ echo "a b" | for -w x
+ echo "x y" | for -w y
+ echo "'$x $y'" >|
+ end
+ end
+ EOI
+ 'a x'
+ 'a y'
+ 'b x'
+ 'b y'
+ EOO
+
+ : nested-diag
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ echo "a b" | for -w x
+ echo "x y" | for -w y
+ echo "'$x $y'" >"'a x'"
+ end
+ end
+ EOI
+ testscript:3:5: error: echo stdout doesn't match expected
+ info: stdout: test/1/stdout-i1-i2-n3
+ info: expected stdout: test/1/stdout-i1-i2-n3.orig
+ info: stdout diff: test/1/stdout-i1-i2-n3.diff
+ %.+
+ EOE
+
+ : nested-diag-test-id
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ echo "a b" | for -w x
+ echo "x y" | for -w y
+ test -f $x$y
+ end
+ end
+ EOI
+ testscript:3:5: error: builtin test exited with code 1
+ info: test id: 1
+ EOE
+
+ : var-value
+ :
+ $c <<EOI && $b >>EOO
+ x = 'x';
+ echo "a b" | for -w x
+ end;
+ echo $x >|
+ EOI
+ b
+ EOO
+
+ : both-sep-options
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ echo "a b" | for -n -w x
+ echo $x >|
+ end
+ EOI
+ testscript:1:1: error: for: both -n|--newline and -w|--whitespace specified
+ %.+
+ EOE
+
+ : invalid-option
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ echo "a b" | for -a x
+ echo $x >|
+ end
+ EOI
+ testscript:1:1: error: for: unknown option '-a'
+ %.+
+ EOE
+
+ : no-variable
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ echo "a b" | for -w
+ echo $x >|
+ end
+ EOI
+ testscript:1:1: error: for: missing variable name
+ %.+
+ EOE
+
+ : special-var
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ echo "a b" | for -w ~
+ echo $* >|
+ end
+ EOI
+ testscript:1:1: error: attempt to set '~' variable directly
+ info: test id: 1
+ EOE
+
+ : unsep-attrs
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ echo "a b" | for -w x[string]
+ echo $x >|
+ end
+ EOI
+ testscript:1:1: error: for: expected variable name instead of x[string]
+ info: test id: 1
+ EOE
+
+ : misuse
+ :
+ {
+ : after-var
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ echo "a b" | for v:
+ echo $v >|
+ end
+ EOI
+ testscript:1:19: error: expected newline instead of ':'
+ EOE
+
+ : after-attrs
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ echo "a b" | for v [string]:
+ echo $v >|
+ end
+ EOI
+ testscript:1:28: error: expected newline instead of ':'
+ EOE
+ }
+
+ : exit
+ :
+ $c <<EOI && $b >>EOO
+ echo "a b" | for x
+ echo "$x" >|
+ exit
+ end
+ EOI
+ a
+ EOO
+
+ : error
+ :
+ $c <<EOI && $b >>EOO 2>>EOE != 0
+ echo "a b" | for x
+ echo "$x" >|
+ exit 'fed up'
+ end
+ EOI
+ a
+ EOO
+ testscript:3:3: error: fed up
+ info: test id: 1
+ EOE
+}
+
+: form-3
+:
+: for x <...
+:
+{
+ : whitespace-split
+ :
+ $c <<EOI && $b >>EOO
+ for -w x <" a b "
+ echo "'$x'" >|
+ end
+ EOI
+ 'a'
+ 'b'
+ EOO
+
+ : quoted-opt
+ :
+ $c <<EOI && $b >>EOO
+ o = -n
+ for "$o" x <<EOF
+ a
+ b
+ EOF
+ echo "'$x'" >|
+ end;
+ for "($o)" x <<EOF
+ c
+ d
+ EOF
+ echo "'$x'" >|
+ end
+ EOI
+ 'a'
+ 'b'
+ 'c'
+ 'd'
+ EOO
+
+ : newline-split
+ :
+ $c <<EOI && $b >>EOO
+ for -n x <<EOF
+
+
+ a
+
+
+ b
+
+ EOF
+ echo "'$x'" >|
+ end
+ EOI
+ ''
+ ''
+ 'a'
+ ''
+ ''
+ 'b'
+ ''
+ EOO
+
+ : string-before-var
+ :
+ $c <<EOI && $b >>EOO
+ for <"a b" -w x
+ echo "'$x'" >|
+ end
+ EOI
+ 'a'
+ 'b'
+ EOO
+
+ : here-doc-before-var
+ :
+ $c <<EOI && $b >>EOO
+ for <<EOF -n x
+ a
+ b
+ EOF
+ echo "'$x'" >|
+ end
+ EOI
+ 'a'
+ 'b'
+ EOO
+
+ : typed
+ :
+ $c <<EOI && $b >>/EOO
+ for -w x [dir_path] <"a b"
+ echo $x >|
+ end
+ EOI
+ a/
+ b/
+ EOO
+
+ : typed-no-ops
+ :
+ $c <<EOI && $b >>/EOO
+ for x [dir_path] <"a b"
+ echo $x >|
+ end
+ EOI
+ a/
+ b/
+ EOO
+
+ : nested
+ :
+ $c <<EOI && $b >>EOO
+ for -w x <"a b"
+ for -w y <"x y"
+ echo "'$x $y'" >|
+ end
+ end
+ EOI
+ 'a x'
+ 'a y'
+ 'b x'
+ 'b y'
+ EOO
+
+ : nested-diag
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ for -w x <"a b"
+ for -w y <"x y"
+ echo "'$x $y'" >"'a x'"
+ end
+ end
+ EOI
+ testscript:3:5: error: echo stdout doesn't match expected
+ info: stdout: test/1/stdout-i1-i2-n3
+ info: expected stdout: test/1/stdout-i1-i2-n3.orig
+ info: stdout diff: test/1/stdout-i1-i2-n3.diff
+ %.+
+ EOE
+
+ : nested-diag-test-id
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ for -w x <"a b"
+ for -w y <"x y"
+ test -f $x$y
+ end
+ end
+ EOI
+ testscript:3:5: error: builtin test exited with code 1
+ info: test id: 1
+ EOE
+
+ : var-value
+ :
+ $c <<EOI && $b >>EOO
+ x = 'x';
+ for -w x <"a b"
+ end;
+ echo $x >|
+ EOI
+ b
+ EOO
+
+ : invalid-option
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ for -a x <"a b"
+ echo $x >|
+ end
+ EOI
+ testscript:1:1: error: for: unknown option '-a'
+ %.
+ EOE
+
+
+ : no-variable
+ :
+ $c <<EOI && $b 2>>/~%EOE% != 0
+ for -w <"a b"
+ echo $x >|
+ end
+ EOI
+ testscript:1:1: error: for: missing variable name
+ %.
+ EOE
+
+ : special-var
+ :
+ $c <<EOI && $b 2>>EOE != 0
+ for ~ <"a b"
+ echo $~ >|
+ end
+ EOI
+ testscript:1:5: error: attempt to set '~' variable directly
+ EOE
+
+ : exit
+ :
+ $c <<EOI && $b >>EOO
+ for x <"a b"
+ echo "$x" >|
+ exit
+ end
+ EOI
+ a
+ EOO
+
+ : error
+ :
+ $c <<EOI && $b >>EOO 2>>EOE != 0
+ for x <"a b"
+ echo "$x" >|
+ exit 'fed up'
+ end
+ EOI
+ a
+ EOO
+ testscript:3:3: error: fed up
+ info: test id: 1
+ EOE
+}
diff --git a/tests/test/script/runner/pipe.testscript b/tests/test/script/runner/pipe.testscript
index 205fd55..cdd30a6 100644
--- a/tests/test/script/runner/pipe.testscript
+++ b/tests/test/script/runner/pipe.testscript
@@ -6,7 +6,6 @@
$c <'cat <foo | $* -i 1 >foo' && $b : builtin-to-process
$c <'$* -o foo | cat >foo' && $b : process-to-builtin
-
: failure
:
: Note that while both commands for the pipe are faulty the diagnostics for
@@ -15,19 +14,28 @@ $c <'$* -o foo | cat >foo' && $b : process-to-builtin
{
: exit-code
:
- $c <'$* -o foo -s 1 | $* -i 1 >foo -s 2' && $b 2>>/~%EOE% != 0
- %testscript:1:1: error: .+ exited with code 2%
- info: stdout: test/1/stdout-2
+ : Also verify that the command line is printed.
+ :
+ $c <'$* -o foo -s 1 | $* -i 1 -s 2 >foo' && $b --verbose 1 2>>/~%EOE% != 0
+ %.
+ %testscript:1:1: error: process .+ exited with code 1%
+ % info: command line: .+driver.* -o foo -s 1%
+ info: test id: 1
+ %.
+ %testscript:1:1: error: process .+ exited with code 2%
+ % info: command line: .+driver.* -i 1 -s 2%
+ info: stdout: test/1/stdout-c2
info: test id: 1
+ %.{2}
EOE
: stderr
:
$c <'$* -o foo -e foo 2>bar | $* -i 2 2>baz' && $b 2>>/~%EOE% != 0
%testscript:1:1: error: .+ stderr doesn't match expected%
- info: stderr: test/1/stderr-2
- info: expected stderr: test/1/stderr-2.orig
- info: stderr diff: test/1/stderr-2.diff
+ info: stderr: test/1/stderr-c2
+ info: expected stderr: test/1/stderr-c2.orig
+ info: stderr diff: test/1/stderr-c2.diff
%.{3}
-baz
+foo
diff --git a/tests/test/script/runner/redirect.testscript b/tests/test/script/runner/redirect.testscript
index 0fe3aa3..209c4ce 100644
--- a/tests/test/script/runner/redirect.testscript
+++ b/tests/test/script/runner/redirect.testscript
@@ -654,9 +654,9 @@ psr = ($cxx.target.class != 'windows' ? '/' : '\\') # Path separator in regex.
$* -o bar >?out
EOI
%testscript:2: error: ../../../../../driver(.exe)? stdout doesn't match expected%
- info: stdout: test/1/stdout-2
+ info: stdout: test/1/stdout-n2
info: expected stdout: test/1/out
- info: stdout diff: test/1/stdout-2.diff
+ info: stdout diff: test/1/stdout-n2.diff
%--- \.*%
%\+\+\+ \.*%
%@@ \.*%
diff --git a/tests/test/script/runner/set.testscript b/tests/test/script/runner/set.testscript
index b2944a3..1800a7d 100644
--- a/tests/test/script/runner/set.testscript
+++ b/tests/test/script/runner/set.testscript
@@ -76,7 +76,7 @@
: empty-attrs
:
- $c <"set '' baz" && $b 2>>EOE != 0
+ $c <"set baz ''" && $b 2>>EOE != 0
testscript:1:1: error: set: empty variable attributes
info: test id: 1
EOE
@@ -300,7 +300,7 @@
$c <<EOI && $b 2>>~%EOE% != 0
$* -o 'foo' -l 10 | env -t 1 -- set bar
EOI
- %testscript:.*: error: set terminated: execution timeout expired%
+ %testscript:.*: error: .+driver.* terminated: execution timeout expired%
%.
EOE
@@ -326,23 +326,218 @@
echo "$s" >=f;
$* -o 'foo' -l 10 | cat f - | env -t 2 -- set bar
EOI
- %testscript:.*: error: set terminated: execution timeout expired%
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: builtin cat terminated: execution timeout expired%
%.
EOE
: success
:
- : Note that the cat builtin ends up with the 'broken pipe' diagnostics or
- : similar.
- :
$c <<EOI && $b
echo "$s" >=f;
timeout --success 2;
- $* -o 'foo' -l 10 | cat f - 2>>~%EOE% | set bar
- %cat: .+%
- EOE
+
+ # Suppress cat's 'broken pipe' diagnostics.
+ #
+ $* -o 'foo' -l 10 | cat f - 2>- | set bar
EOI
}
+
+ : split
+ :
+ : Test various splitting modes as above, but now reading the stream in the
+ : non-blocking mode.
+ :
+ {
+ : whitespace-separated-list
+ :
+ {
+ : non-exact
+ :
+ {
+ : non-empty
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -w baz <' foo bar ';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'"foo" "bar"'
+ EOI
+
+ : empty
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -w baz <:'';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >''
+ EOI
+
+ : spaces
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -w baz <' ';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >''
+ EOI
+ }
+
+ : exact
+ :
+ {
+ : trailing-ws
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set --exact --whitespace baz <' foo bar ';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'"foo" "bar" ""'
+ EOI
+
+ : no-trailing-ws
+ :
+ : Note that we need to strip the default trailing newline as well with the
+ : ':' modifier.
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -e -w baz <:' foo bar';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'"foo" "bar"'
+ EOI
+
+ : empty
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -e -w baz <:'';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >''
+ EOI
+
+ : spaces
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -e -w baz <' ';
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'""'
+ EOI
+ }
+ }
+
+ : newline-separated-list
+ :
+ {
+ : non-exact
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -n baz <<EOF;
+
+ foo
+
+ bar
+
+ EOF
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'"" "foo" "" "bar" ""'
+ EOI
+
+ : exact
+ :
+ {
+ : trailing-newline
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set --exact --newline baz <<EOF;
+
+ foo
+
+ bar
+
+ EOF
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'"" "foo" "" "bar" "" ""'
+ EOI
+
+ : no-trailing-newline
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set --exact --newline baz <<:EOF;
+
+ foo
+
+ bar
+ EOF
+ echo $regex.apply($baz, '^(.*)$', '"\1"') >'"" "foo" "" "bar"'
+ EOI
+ }
+ }
+
+ : string
+ :
+ {
+ : non-exact
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set baz <<EOF;
+
+ foo
+
+ bar
+
+ EOF
+ echo ($baz[0]) >>EOO
+
+ foo
+
+ bar
+
+ EOO
+ EOI
+
+ : exact
+ :
+ : Note that echo adds the trailing newline, so EOF and EOO here-documents
+ : differ by this newline.
+ :
+ {
+ : trailing-newline
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -e baz <<EOF;
+
+ foo
+
+ bar
+ EOF
+ echo ($baz[0]) >>EOO
+
+ foo
+
+ bar
+
+ EOO
+ EOI
+
+ : no-trailing-newline
+ :
+ $c <<EOI && $b
+ timeout 10;
+ set -e baz <<:EOF;
+
+ foo
+
+ bar
+ EOF
+ echo ($baz[0]) >>EOO
+
+ foo
+
+ bar
+ EOO
+ EOI
+ }
+ }
+ }
}
: attributes
@@ -351,14 +546,14 @@
: dir_path
:
$c <<EOI && $b
- set [dir_path] bar <'foo';
+ set bar [dir_path] <'foo';
echo $bar >/'foo/'
EOI
: null
:
$c <<EOI && $b
- set [null] foo <-;
+ set foo [null] <-;
echo $foo >''
EOI
@@ -376,7 +571,7 @@
: empty-brackets
:
$c <<EOI && $b 2>>EOE != 0
- set -w '[]' baz <'foo bar';
+ set -w baz '[]' <'foo bar';
echo "$baz"
EOI
testscript:2:8: error: concatenating variable expansion contains multiple values
@@ -385,7 +580,7 @@
: no-left-bracket
:
$c <<EOI && $b 2>>EOE != 0
- set -w x baz
+ set -w baz x
EOI
<attributes>:1:1: error: expected '[' instead of 'x'
testscript:1:1: info: while parsing attributes 'x'
@@ -395,7 +590,7 @@
: unknown
:
$c <<EOI && $b 2>>EOE != 0
- set -w [x] baz
+ set -w baz [x]
EOI
<attributes>:1:1: error: unknown value attribute x
testscript:1:1: info: while parsing attributes '[x]'
@@ -405,7 +600,7 @@
: junk
:
$c <<EOI && $b 2>>EOE != 0
- set -w '[string] x' baz
+ set -w baz '[string] x'
EOI
<attributes>:1:10: error: trailing junk after ']'
testscript:1:1: info: while parsing attributes '[string] x'
diff --git a/tests/test/script/runner/status.testscript b/tests/test/script/runner/status.testscript
index e4586d9..461fd5c 100644
--- a/tests/test/script/runner/status.testscript
+++ b/tests/test/script/runner/status.testscript
@@ -15,7 +15,7 @@ b += --no-column
: false
:
$c <'$* -s 1 == 0' && $b 2>>/~%EOE%d != 0
- %testscript:1: error: ../../../../driver(.exe)? exit code 1 != 0%
+ %testscript:1: error: process ../../../../driver(.exe)? exit code 1 != 0%
info: test id: 1
EOE
}
@@ -30,7 +30,7 @@ b += --no-column
: false
:
$c <'$* -s 1 != 1' && $b 2>>/~%EOE% != 0
- %testscript:1: error: ../../../../driver(.exe)? exit code 1 == 1%
+ %testscript:1: error: process ../../../../driver(.exe)? exit code 1 == 1%
info: test id: 1
EOE
}
@@ -38,7 +38,7 @@ b += --no-column
: error
:
$c <'$* -s 1 -e "Error"' && $b 2>>/~%EOE% != 0
-%testscript:1: error: ../../../driver(.exe)? exited with code 1%
+%testscript:1: error: process ../../../driver(.exe)? exited with code 1%
info: stderr: test/1/stderr
Error
info: test id: 1
@@ -47,7 +47,7 @@ EOE
: error-check
:
$c <'$* -s 1 -e "Error" == 0' && $b 2>>/~%EOE% != 0
-%testscript:1: error: ../../../driver(.exe)? exit code 1 != 0%
+%testscript:1: error: process ../../../driver(.exe)? exit code 1 != 0%
info: stderr: test/1/stderr
Error
info: test id: 1
diff --git a/tests/test/script/runner/timeout.testscript b/tests/test/script/runner/timeout.testscript
index 5f87d39..f9b6ec7 100644
--- a/tests/test/script/runner/timeout.testscript
+++ b/tests/test/script/runner/timeout.testscript
@@ -424,7 +424,9 @@
$c <<EOI && $b 2>>~%EOE% != 0
env -t 1 -- $* -l 86400 -o 'foo' | touch $~/foo/bar
EOI
- %testscript:.*: error: touch exited with code 1%
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: builtin touch exited with code 1%
%.+
EOE
}
@@ -435,42 +437,54 @@
: prog-tm-prog
:
$c <'$* -l 10 | env -t 1 -- $* -i 0' && $b 2>>~%EOE% != 0
- %testscript:.*: error: .+driver.* terminated: execution timeout expired%
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
%.
EOE
: tm-prog-prog
:
$c <'env -t 1 -- $* -l 10 | $* -i 0' && $b 2>>~%EOE% != 0
- %testscript:.*: error: .+driver.* terminated: execution timeout expired%
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
%.
EOE
: tm-cat-prog
:
- $c <'env -t 1 -- cat <"test" | $* -l 10' && $b 2>>~%EOE% != 0
- %testscript:.*: error: cat terminated: execution timeout expired%
+ $c <'env -t 3 -- cat <"test" | $* -l 10' && $b 2>>~%EOE% != 0
+ %testscript:.*: error: builtin cat terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
%.
EOE
: cat-tm-prog
:
$c <'cat <"test" | env -t 1 -- $* -l 10' && $b 2>>~%EOE% != 0
- %testscript:.*: error: .+driver.* terminated: execution timeout expired%
+ %testscript:.*: error: builtin cat terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
%.
EOE
: tm-prog-cat
:
$c <'env -t 1 -- $* -l 10 | cat >-' && $b 2>>~%EOE% != 0
- %testscript:.*: error: .+driver.* terminated: execution timeout expired%
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: builtin cat terminated: execution timeout expired%
%.
EOE
: tm-echo-prog
:
- $c <'env -t 1 -- echo "test" | $* -l 10' && $b 2>>~%EOE% != 0
- %testscript:.*: error: echo terminated: execution timeout expired%
+ $c <'env -t 3 -- echo "test" | $* -l 10' && $b 2>>~%EOE% != 0
+ %testscript:.*: error: builtin echo terminated: execution timeout expired%
+ %.
+ %testscript:.*: error: process .+driver.* terminated: execution timeout expired%
%.
EOE
diff --git a/tests/test/script/runner/while.testscript b/tests/test/script/runner/while.testscript
new file mode 100644
index 0000000..1c58827
--- /dev/null
+++ b/tests/test/script/runner/while.testscript
@@ -0,0 +1,16 @@
+# file : tests/test/script/runner/while.testscript
+# license : MIT; see accompanying LICENSE file
+
+.include ../common.testscript
+
+: basics
+:
+$c <<EOI && $b >>EOO
+ while ($v != "aa")
+ echo "$v" >|
+ v = "$(v)a"
+ end
+ EOI
+
+ a
+ EOO
diff --git a/tests/test/simple/generated/driver.cxx b/tests/test/simple/generated/driver.cxx
index 89dacca..ca6dfcb 100644
--- a/tests/test/simple/generated/driver.cxx
+++ b/tests/test/simple/generated/driver.cxx
@@ -49,13 +49,23 @@ main (int argc, char* argv[])
}
else
{
- ifstream ifs (argv[i]);
+ istream* is;
+ ifstream ifs;
- if (!ifs.is_open ())
- cerr << "unable to open " << argv[1] << endl;
+ if (argv[i] != string ("-"))
+ {
+ ifs.open (argv[i]);
+
+ if (!ifs.is_open ())
+ cerr << "unable to open " << argv[1] << endl;
+
+ is = &ifs;
+ }
+ else
+ is = &cin;
string s;
- r = getline (ifs, s) && s == "1.2.3" ? 0 : 1;
+ r = getline (*is, s) && s == "1.2.3" ? 0 : 1;
}
return r;
diff --git a/tests/test/simple/generated/testscript b/tests/test/simple/generated/testscript
index 9ce40ba..49ddbbd 100644
--- a/tests/test/simple/generated/testscript
+++ b/tests/test/simple/generated/testscript
@@ -43,6 +43,123 @@ driver = $src_root/../exe{driver}
file{output}: in{output} $src_root/manifest #@@ in module
EOI
+: output-mismatch
+:
+{
+ # Get rid of --serial-stop --quiet.
+ #
+ test.options = $regex.apply($test.options, '^(--serial-stop|--quiet)$', '')
+
+ : verbose-0
+ :
+ {
+ echo '1.2.3' >=input;
+ echo '3.4.5' >=output;
+ $* -q <<EOI 2>>/~%EOE% != 0
+ driver = $src_root/../exe{driver}
+ ./: test = $driver
+ ./: $driver
+ ./: test.arguments = '-'
+ ./: file{input}: test.stdin = true
+ ./: file{output}: test.stdout = true
+ EOI
+ %.+
+ -3.4.5
+ error: test dir{./} failed
+ error: process diff exited with code 1
+ EOE
+ }
+
+ : verbose-1
+ :
+ {
+ echo '1.2.3' >=input;
+ echo '3.4.5' >=output;
+ $* <<EOI 2>>/~%EOE% != 0
+ driver = $src_root/../exe{driver}
+ ./: test = $driver
+ ./: $driver
+ ./: test.arguments = '-'
+ ./: file{input}: test.stdin = true
+ ./: file{output}: test.stdout = true
+ EOI
+ test dir{./}
+ %.+
+ -3.4.5
+ error: test dir{./} failed
+ error: process diff exited with code 1
+ % info: test command line: cat .+/input \| .+/driver.* - \| diff -u .+%
+ info: while testing dir{./}
+ info: failed to test dir{./}
+ EOE
+ }
+
+ : verbose-2
+ :
+ {
+ echo '1.2.3' >=input;
+ echo '3.4.5' >=output;
+ $* --verbose 2 <<EOI 2>>/~%EOE% != 0
+ driver = $src_root/../exe{driver}
+ ./: test = $driver
+ ./: $driver
+ ./: test.arguments = '-'
+ ./: file{input}: test.stdin = true
+ ./: file{output}: test.stdout = true
+ EOI
+ %cat .+/input \| .+/driver.* - \| diff -u .+%
+ %.+
+ -3.4.5
+ error: test dir{./} failed
+ error: process diff exited with code 1
+ info: while testing dir{./}
+ info: failed to test dir{./}
+ EOE
+ }
+
+ : verbose-3
+ :
+ {
+ echo '1.2.3' >=input;
+ echo '3.4.5' >=output;
+ $* --verbose 3 <<EOI 2>>/~%EOE% != 0
+ driver = $src_root/../exe{driver}
+ ./: test = $driver
+ ./: $driver
+ ./: test.arguments = '-'
+ ./: file{input}: test.stdin = true
+ ./: file{output}: test.stdout = true
+ EOI
+ %cat .+/input \| .+/driver.* - \| diff -u .+%
+ %.+
+ -3.4.5
+ %error: test .+dir\{.+\} failed%
+ error: process diff exited with code 1
+ % info: while testing .+dir\{.+\}%
+ %info: failed to test .+dir\{.+\}%
+ EOE
+ }
+
+ : input-not-found
+ :
+ {
+ echo '1.2.3' >=input;
+ echo '3.4.5' >=output;
+ $* -q <<EOI 2>>/~%EOE% != 0
+ driver = $src_root/../exe{driver}
+ ./: test = $driver
+ ./: $driver
+ ./: test.arguments = 'foo'
+ ./: file{input}: test.stdin = true
+ ./: file{output}: test.stdout = true
+ EOI
+ unable to open foo
+ error: test dir{./} failed
+ % error: process .+/driver.* exited with code 1%
+ EOE
+ }
+}
+
: timeout
:
{
@@ -59,25 +176,30 @@ EOI
./: $driver
EOI
error: test dir{./} failed
- % error: .+ -s terminated: execution timeout expired%
- % info: test command line: .+%
+ % error: process .+driver.* terminated: execution timeout expired%
EOE
- : output
+ : stdin-stdout
:
+ ln -s $src_base/input.in ./;
ln -s $src_base/output.in ./;
- $* config.test.timeout=1 &output &output.d <<EOI 2>>/~%EOE% != 0
+ $* config.test.timeout=1 --verbose 1 &input &input.d &output &output.d <<EOI 2>>/~%EOE% != 0
driver = $src_root/../exe{driver}
./: test = $driver
./: test.options = -s
./: $driver
+ ./: file{input}: test.stdin = true
./: file{output}: test.stdout = true
+ file{input}: in{input} $src_root/manifest #@@ in module
file{output}: in{output} $src_root/manifest #@@ in module
EOI
+ %version in\{.+\} -> .+%{2}
+ test dir{./}
error: test dir{./} failed
- % error: diff .+ terminated: execution timeout expired%
- % error: .+ -s terminated: execution timeout expired%
- % info: test command line: .+%
+ % error: process .+driver.* terminated: execution timeout expired%
+ % info: test command line: cat .+/input \| .+driver.* -s \| diff -u .+%
+ info: while testing dir{./}
+ info: failed to test dir{./}
EOE
}
@@ -94,8 +216,7 @@ EOI
./: $driver
EOI
error: test dir{./} failed
- % error: .+ -s terminated: execution timeout expired%
- % info: test command line: .+%
+ % error: process .+driver.* terminated: execution timeout expired%
EOE
}
}
diff --git a/tests/type/json/buildfile b/tests/type/json/buildfile
new file mode 100644
index 0000000..5bc6bf2
--- /dev/null
+++ b/tests/type/json/buildfile
@@ -0,0 +1,4 @@
+# file : tests/type/json/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/type/json/testscript b/tests/type/json/testscript
new file mode 100644
index 0000000..36287b7
--- /dev/null
+++ b/tests/type/json/testscript
@@ -0,0 +1,504 @@
+# file : tests/type/json/testscript
+# license : MIT; see accompanying LICENSE file
+
+# See also tests in function/json/.
+
+.include ../../common.testscript
+
+: basics
+:
+{
+ : empty-null
+ :
+ $* <<EOI >>EOO
+ print ([json, null] )
+ print ([json] null)
+ print ([json] )
+ print ([json] "")
+ print ([json_array] )
+ print ([json_object] )
+ print ([json] one@null)
+ print ([json] one@) # @@ Would be more consistent if were null (type hints?)
+ print ([json] one@"")
+ EOI
+ [null]
+
+
+ ""
+ []
+ {}
+ {"one":null}
+ {"one":""}
+ {"one":""}
+ EOO
+
+ : reverse
+ :
+ $* <<EOI >>EOO
+ print ([json] null)
+ print ([json] true)
+ print ([json] 123)
+ print ([json] -123)
+ print ([json] 0xdecaf)
+ print ([json] abc) # @@ Ideally we would like this to be reversed unquoted.
+ print ([json] '"abc"') # @@ Ditto.
+ print (([json] abc)[0]) # @@ Workaround.
+ print ([json] dir/{file1 file2})
+ print ([json] ' ["dir/file1", "dir/file2"] ')
+ print ([json] zero@null one@1 two@abc three@([json] x@123 y@-123) four@([json] null true))
+ print ([json] '{"zero":null,"one":1,"two":"abc","three":{"x":123,"y":-123},"four":[null,true]}')
+ EOI
+
+ true
+ 123
+ -123
+ 0xdecaf
+ "abc"
+ "abc"
+ abc
+ ["dir/file1","dir/file2"]
+ ["dir/file1","dir/file2"]
+ {"zero":null,"one":1,"two":"abc","three":{"x":123,"y":-123},"four":[null,true]}
+ {"zero":null,"one":1,"two":"abc","three":{"x":123,"y":-123},"four":[null,true]}
+ EOO
+
+
+ : hex
+ :
+ $* <<EOI >>EOO
+ print ([json] 0xffffFFFF)
+
+ # These should be in the hexadecimal notation once we switch to JSON5.
+ #
+ print ([json] 0x0 0x01 0xff 0xFFFF)
+ print ([json] ff@0xff FFFF@0xFFFF)
+
+ # @@ This should start working once we switch to type hints in subscript.
+ #
+ #j = [json] ff@0xff
+ #print $value_type($j[ff], true)
+ print 'hexadecimal number'
+ EOI
+ 0xffffffff
+ [0,1,255,65535]
+ {"ff":255,"FFFF":65535}
+ hexadecimal number
+ EOO
+
+ : diagnostics-reverse-invalid
+ :
+ $* <<EOI 2>>EOE != 0
+ o = [json] '{"one":1, "two":}'
+ EOI
+ error: invalid json value in variable o: invalid json input: unexpected byte '}' in value
+ <stdin>:1:5: info: variable o value is assigned here
+ EOE
+
+ : diagnostics-duplicate-member
+ :
+ $* <<EOI 2>>EOE != 0
+ o = [json] one@1 one@2
+ EOI
+ error: invalid json value in variable o: duplicate json object member 'one'
+ <stdin>:1:5: info: variable o value is assigned here
+ EOE
+}
+
+: compare
+:
+{
+ : type
+ :
+ $* <<EOI >>EOO
+ print (([json] null) < ([json] true))
+ print (([json] true) < ([json] 0))
+ print (([json] 123) < ([json] '"0"'))
+ print (([json] abc) < ([json] xxx yyy))
+ print (([json] xxx yyy) < ([json] xxx@null yyy@null))
+ EOI
+ true
+ true
+ true
+ true
+ true
+ EOO
+
+ : simple
+ :
+ $* <<EOI >>EOO
+ print (([json] false) == ([json] false))
+ print (([json] false) < ([json] true))
+
+ print (([json] 123) == ([json] 123))
+ print (([json] -123) == ([json] -123))
+ print (([json] 0xff) == ([json] 255))
+ print (([json] 0) == ([json] -0))
+ print (([json] -1) < ([json] 0))
+ print (([json] 123) < ([json] 234))
+ print (([json] -234) < ([json] -123))
+
+ print (([json] abc) == ([json] abc))
+ print (([json] abc) < ([json] abz))
+ print (([json] abc) < ([json] abcd))
+ EOI
+ true
+ true
+ true
+ true
+ true
+ true
+ true
+ true
+ true
+ true
+ true
+ true
+ EOO
+
+ : array
+ :
+ $* <<EOI >>EOO
+ print (([json] 1 2 3) == ([json] 1 2 3))
+ print (([json] 1 2 3) < ([json] 1 2 4))
+ print (([json] 1 2 3) < ([json] 1 2 3 4))
+ EOI
+ true
+ true
+ true
+ EOO
+
+ : object
+ :
+ $* <<EOI >>EOO
+ print (([json] one@1 two@2 three@3) == ([json] three@3 one@1 two@2))
+ print (([json] one@1 two@2 three@3) < ([json] three@3 one@1 two@4))
+ print (([json] one@1 three@3) < ([json] three@3 one@1 two@2))
+ EOI
+ true
+ true
+ true
+ EOO
+}
+
+: append-prepend
+:
+{
+ : array
+ :
+ $* <<EOI >'[0,1,2,3,4,5,6,7,8]'
+ a = [json] 2 3
+ a += 4
+ a += 5 6
+ a += [json] 7 8
+ a =+ [json] 0 1
+ print $a
+ EOI
+
+ : array-type
+ :
+ $* <<EOI >'[1,2,3,4,5]'
+ [json_array] a =
+ a += 1
+ a += 2 3
+ a += [json_array] 4 5 # @@ Should be possible to use json.
+ print $a
+ EOI
+
+ : object
+ :
+ $* <<EOI >'{"zero":0,"one":6,"two":8,"three":3,"four":4,"five":5,"seven":7}'
+ o = [json] one@1 two@2 three@3
+ o += four@4
+ o += five@5 one@6
+ o += [json] seven@7 two@8
+ o =+ [json] zero@0 three@9
+ print $o
+ EOI
+
+ : object-type
+ :
+ $* <<EOI >'{"one":1,"two":2,"three":3,"four":4,"five":5}'
+ [json_object] o =
+ o += one@1
+ o += two@2 three@3
+ o += [json_object] four@4 five@5 # @@ Should be possible to use json.
+ print $o
+ EOI
+
+ : boolean
+ :
+ $* <<EOI >>EOO
+ b = [json] false
+ b += [json] true
+ print $b
+ EOI
+ true
+ EOO
+
+ : number
+ :
+ $* <<EOI >>EOO
+ n = [json] -2
+ print $value_type($n, true) $n
+ n += 1
+ print $value_type($n, true) $n
+ n += 1
+ print $value_type($n, true) $n
+ n += 1
+ print $value_type($n, true) $n
+ n += [json] -1
+ print $value_type($n, true) $n
+ n += [json] -1
+ print $value_type($n, true) $n
+ EOI
+ signed number -2
+ signed number -1
+ unsigned number 0
+ unsigned number 1
+ unsigned number 0
+ signed number -1
+ EOO
+
+ : string
+ :
+ $* <<EOI >>EOO
+ s = [json] yyy
+ s += [json] zzz
+ s =+ [json] xxx
+ print $s
+ EOI
+ "xxxyyyzzz"
+ EOO
+
+ : invalid
+ :
+ $* <<EOI 2>>EOE != 0
+ a = [json] 1 2 3
+ s = [json] str
+ s += $a
+ print $s
+ EOI
+ error: invalid json value in variable s: unable to append array to string
+ <stdin>:3:6: info: variable s value is assigned here
+ EOE
+}
+
+: subscript
+:
+{
+ : null
+ :
+ $* <<EOI >>EOO
+ j = [json] null
+ print ($j[0])
+ print ($j[one])
+ EOI
+ [null]
+ [null]
+ EOO
+
+ : array
+ :
+ $* <<EOI >>EOO
+ j = [json] 1 2 3 null
+ print ($j[1])
+ print ($j[3])
+ print ($j[4])
+ EOI
+ 2
+ [null]
+ [null]
+ EOO
+
+ : object-name
+ :
+ $* <<EOI >>EOO
+ j = [json] one@1 two@2 three@3 four@null
+ print ($j[two])
+ print ($j[four])
+ print ($j[five])
+ EOI
+ 2
+ [null]
+ [null]
+ EOO
+
+ : object-index
+ :
+ $* <<EOI >>EOO
+ j = [json] one@1 two@2 three@3
+ print ($j[([uint64] 1)])
+ EOI
+ {"two":2}
+ EOO
+
+ : nested
+ :
+ $* <<EOI >>EOO
+ o = [json] one@([json] 1 2 ([json] a@3 b@4) null) two@([json] x@x y@([json] 5 6))
+ print ($o[one][1])
+ print ($o[one][2][b])
+ print ($o[two][y][1])
+ print ($o[two][bogus][junk])
+ print ($o[two][bogus][junk][garbage])
+ print ($o[one][3][junk]) # JSON null
+ print ($o[one][3][junk][garbage])
+
+ a = [json] ([json] one@1 two@([json] 2 3)) ([json] 4 5) null
+ print ($a[0][one])
+ print ($a[0][two][1])
+ print ($a[1][1])
+ print ($a[1][123][junk])
+ print ($a[1][123][junk][garbage])
+ print ($a[2][junk]) # JSON null
+ print ($a[2][junk][garbage])
+ EOI
+ 2
+ 4
+ 6
+ [null]
+ [null]
+ [null]
+ [null]
+ 1
+ 3
+ 5
+ [null]
+ [null]
+ [null]
+ [null]
+ EOO
+
+ : reverse
+ :
+ $* <<EOI >>EOO
+ print (([json] one@null)[one])
+ print (([json] one@true)[one])
+ print (([json] one@123)[one])
+ print (([json] one@-123)[one])
+ print (([json] one@0xdecaf)[one])
+ print (([json] one@abc)[one])
+ EOI
+ [null]
+ true
+ 123
+ -123
+ 912559
+ abc
+ EOO
+
+ : diagnostics-not-object
+ :
+ $* <<EOI 2>>EOE != 0
+ j = [json] 1 2 3
+ print ($j[one])
+ EOI
+ <stdin>:2:11: error: invalid json value subscript: invalid uint64 value 'one'
+ info: json value type is array
+ <stdin>:2:9: info: use the '\[' escape sequence if this is a wildcard pattern
+ EOE
+}
+
+: iteration
+:
+{
+ : null
+ :
+ $* <<EOI
+ for v: ([json] null)
+ print $v
+ EOI
+
+ : simple
+ :
+ $* <<EOI >>EOO
+ for v: ([json] 123)
+ print $v
+ EOI
+ 123
+ EOO
+
+ : array
+ :
+ $* <<EOI >>EOO
+ for v: ([json] 1 2 3)
+ print $v
+ EOI
+ 1
+ 2
+ 3
+ EOO
+
+ : object
+ :
+ $* <<EOI >>EOO
+ for v: ([json] one@1 two@2 three@3)
+ print $v
+ EOI
+ {"one":1}
+ {"two":2}
+ {"three":3}
+ EOO
+
+ : reverse
+ :
+ $* <<EOI >>EOO
+ for v: ([json] null true 123 -123 0xdecaf abc)
+ print $v
+ EOI
+ [null]
+ true
+ 123
+ -123
+ 912559
+ abc
+ EOO
+}
+
+: json-map
+:
+{
+ : basics
+ :
+ $* <<EOI >>EOO
+ m = [json_map] 2@([json] a@1 b@2) 1@([json] 1 2) 0@([json] null) -1@null
+ print $m
+ for p: $m
+ print $first($p) $second($p)
+ print ($m[1])
+ print $type($m[1])
+ print ($m[2][b])
+ print ($m[0])
+ print ($m[-1])
+ EOI
+ -1@null 0@null 1@[1,2] 2@{"a":1,"b":2}
+ -1 null
+ 0 null
+ 1 [1,2]
+ 2 {"a":1,"b":2}
+ [1,2]
+ json
+ 2
+
+
+ EOO
+}
+
+: json-set
+:
+{
+ : basics
+ :
+ $* <<EOI >>EOO
+ s = [json_set] ([json] x@1 y@2) ([json] a@1 b@2)
+ print $s
+ for v: $s
+ print $type($v) $v
+ print ($s[([json] y@2 x@1)])
+ EOI
+ {"a":1,"b":2} {"x":1,"y":2}
+ json {"a":1,"b":2}
+ json {"x":1,"y":2}
+ true
+ EOO
+}
diff --git a/tests/type/map/buildfile b/tests/type/map/buildfile
new file mode 100644
index 0000000..7f2cdcf
--- /dev/null
+++ b/tests/type/map/buildfile
@@ -0,0 +1,4 @@
+# file : tests/type/map/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/type/map/testscript b/tests/type/map/testscript
new file mode 100644
index 0000000..29f5ed4
--- /dev/null
+++ b/tests/type/map/testscript
@@ -0,0 +1,70 @@
+# file : tests/type/map/testscript
+# license : MIT; see accompanying LICENSE file
+
+# See also tests in function/*/ (size(), keys()), type/json/ (json_map).
+
+.include ../../common.testscript
+
+: basics
+:
+$* <<EOI >>EOO
+m = [string_map] a@0 b@2 a@1
+print $m
+m += c@3 b@0
+print $m
+m =+ d@4 b@1
+print $m
+EOI
+a@1 b@2
+a@1 b@0 c@3
+a@1 b@0 c@3 d@4
+EOO
+
+: type
+:
+$* <<EOI >>EOO
+m = [string_map]
+print $type($m)
+EOI
+string_map
+EOO
+
+: subscript
+:
+$* <<EOI >>EOO
+m = [string_map] a@1 b@2 c@3
+print ($m[b])
+print $type($m[b])
+print ($m[z])
+EOI
+2
+string
+[null]
+EOO
+
+: iteration
+:
+$* <<EOI >>EOO
+for p: [string_map] a@1 b@2 c@3
+ print $first($p) $second($p)
+
+for p: [string_map, null]
+ fail bad
+EOI
+a 1
+b 2
+c 3
+EOO
+
+: iteration-index
+:
+$* <<EOI >>EOO
+m = [string_map] a@1 b@2 c@3
+k = $keys($m)
+for i: $integer_sequence(0, $size($k))
+ print $i ($k[$i]) ($m[($k[$i])]) # @@ TMP: nested subscript
+EOI
+0 a 1
+1 b 2
+2 c 3
+EOO
diff --git a/tests/type/set/buildfile b/tests/type/set/buildfile
new file mode 100644
index 0000000..55b37bb
--- /dev/null
+++ b/tests/type/set/buildfile
@@ -0,0 +1,4 @@
+# file : tests/type/set/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/type/set/testscript b/tests/type/set/testscript
new file mode 100644
index 0000000..aca4c2d
--- /dev/null
+++ b/tests/type/set/testscript
@@ -0,0 +1,55 @@
+# file : tests/type/set/testscript
+# license : MIT; see accompanying LICENSE file
+
+# See also tests in function/*/ (size()), type/json/ (json_set).
+
+.include ../../common.testscript
+
+: basics
+:
+$* <<EOI >>EOO
+s = [string_set] a b a
+print $s
+s += c b
+print $s
+s =+ d b
+print $s
+EOI
+a b
+a b c
+a b c d
+EOO
+
+: type
+:
+$* <<EOI >>EOO
+s = [string_set]
+print $type($s)
+EOI
+string_set
+EOO
+
+: subscript
+:
+$* <<EOI >>EOO
+s = [string_set] a b c
+print ($s[b])
+print ($s[z])
+EOI
+true
+false
+EOO
+
+: iteration
+:
+$* <<EOI >>EOO
+for s: [string_set] a b c
+ print $type($s) $s
+
+for s: [string_set, null]
+ fail bad
+EOI
+string a
+string b
+string c
+EOO
diff --git a/tests/type/vector/buildfile b/tests/type/vector/buildfile
new file mode 100644
index 0000000..5b2aa0e
--- /dev/null
+++ b/tests/type/vector/buildfile
@@ -0,0 +1,4 @@
+# file : tests/type/vector/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/type/vector/testscript b/tests/type/vector/testscript
new file mode 100644
index 0000000..9b3aaba
--- /dev/null
+++ b/tests/type/vector/testscript
@@ -0,0 +1,57 @@
+# file : tests/type/vector/testscript
+# license : MIT; see accompanying LICENSE file
+
+# See also tests in function/*/ (size(), find(), etc).
+
+.include ../../common.testscript
+
+: basics
+:
+$* <<EOI >>EOO
+v = [strings] b c
+print $v
+v += d
+print $v
+v =+ a
+print $v
+EOI
+b c
+b c d
+a b c d
+EOO
+
+: type
+:
+$* <<EOI >>EOO
+v = [strings]
+print $type($v)
+EOI
+strings
+EOO
+
+: subscript
+:
+$* <<EOI >>EOO
+v = [strings] a b c
+print ($v[1])
+print $type($v[1])
+print ($v[3])
+EOI
+b
+string
+[null]
+EOO
+
+: iteration
+:
+$* <<EOI >>EOO
+for s: [strings] a b c
+ print $type($s) $s
+
+for s: [strings, null]
+ fail bad
+EOI
+string a
+string b
+string c
+EOO
diff --git a/tests/value/concat.testscript b/tests/value/concat.testscript
index 97391c4..69ec9fc 100644
--- a/tests/value/concat.testscript
+++ b/tests/value/concat.testscript
@@ -3,6 +3,48 @@
.include ../common.testscript
+: null
+:
+{
+ : untyped
+ :
+ $* <<EOI >>/EOO
+ x = [null]
+
+ print y "$x x"
+ print "x $x" y
+
+ print $x"x"
+ print "x"$x
+ print $x$x
+ EOI
+ y x
+ x y
+ x
+ x
+ {}
+ EOO
+
+ : string
+ :
+ $* <<EOI >>/EOO
+ x = [string,null]
+
+ print y "$x x"
+ print "x $x" y
+
+ print $x"x"
+ print "x"$x
+ print $x$x
+ EOI
+ y x
+ x y
+ x
+ x
+ {}
+ EOO
+}
+
: dir_path
:
{
diff --git a/tests/value/reverse.testscript b/tests/value/reverse.testscript
index 9f73981..921d14b 100644
--- a/tests/value/reverse.testscript
+++ b/tests/value/reverse.testscript
@@ -89,3 +89,58 @@
EOO
}
}
+
+: reduce
+:
+: Test empty simple value reduction heuristics.
+:
+{
+ : typed
+ :
+ $* <<EOI >>"EOO"
+ x = [string]
+ n = [string,null]
+ y = [strings] $x
+ y += $x
+ y += $n
+ print $size($y)
+
+ file{*}: y += $x
+ file{x}:
+ print $size($(file{x}: y))
+
+ for i: $x
+ print iteration
+
+ print $null($x[0])
+ EOI
+ 2
+ 3
+ iteration
+ false
+ EOO
+
+ : untyped
+ :
+ $* <<EOI >>"EOO"
+ x =
+ n = [null]
+ y = $x
+ y += $x
+ y += $n
+ print $size($y)
+
+ file{*}: y += $x
+ file{x}:
+ print $size($(file{x}: y))
+
+ for i: $x
+ print iteration
+
+ print $null($x[0])
+ EOI
+ 0
+ 0
+ true
+ EOO
+}
diff --git a/tests/variable/override/testscript b/tests/variable/override/testscript
index 0c8ef5b..7b973c0 100644
--- a/tests/variable/override/testscript
+++ b/tests/variable/override/testscript
@@ -8,18 +8,18 @@
{
: value-version
:
- $* x+=01 y+=01 <<EOI >>EOO
- x = [string] 0
- print $x
+ $* p.x+=01 p.y+=01 <<EOI >>EOO
+ p.x = [string] 0
+ print $p.x
- x = [uint64] 1
- print $x
+ p.x = [uint64] 1
+ print $p.x
- y = 0
- print $y
+ p.y = 0
+ print $p.y
- [uint64] y = [null]
- print $y
+ [uint64] p.y = [null]
+ print $p.y
EOI
001
2
@@ -29,21 +29,21 @@
: value-position
:
- $* x+=01 <<EOI >>EOO
- x = [string] 0
+ $* p.x+=01 <<EOI >>EOO
+ p.x = [string] 0
- print $x
+ print $p.x
dir/
{
- print $x
+ print $p.x
}
- dir/ x = [uint64] 1
+ dir/ p.x = [uint64] 1
- print $x
+ print $p.x
dir/
{
- print $x
+ print $p.x
}
EOI
@@ -59,17 +59,19 @@
: Test overriding cached target type/pattern-specific prepend/append
:
{
- $* x+=X <<EOI >>EOO
- x = 0
- file{*}: x += a
+ $* p.x+=X <<EOI >>EOO
+ p.x = 0
+ file{*}: p.x += a
- print $(file{foo}:x)
+ file{foo}:
- x = 1 # Should invalidate both caches.
- print $(file{foo}:x)
+ print $(file{foo}:p.x)
- file{*}: x += b # Should invalidate both caches.
- print $(file{foo}:x)
+ p.x = 1 # Should invalidate both caches.
+ print $(file{foo}:p.x)
+
+ file{*}: p.x += b # Should invalidate both caches.
+ print $(file{foo}:p.x)
EOI
0 a X
1 a X
@@ -82,24 +84,24 @@
{
: after
:
- $* x=1 x+=2 x=+0 <<EOI >>EOO
- print $x
+ $* p.x=1 p.x+=2 p.x=+0 <<EOI >>EOO
+ print $p.x
EOI
0 1 2
EOO
: before
:
- $* x+=2 x=+0 x=1 <<EOI >>EOO
- print $x
+ $* p.x+=2 p.x=+0 p.x=1 <<EOI >>EOO
+ print $p.x
EOI
1
EOO
: both
:
- $* x=+0 x=1 x+=2 <<EOI >>EOO
- print $x
+ $* p.x=+0 p.x=1 p.x+=2 <<EOI >>EOO
+ print $p.x
EOI
1 2
EOO
@@ -110,9 +112,9 @@
{
: assign
:
- $* x=0 !y=0 x=1 !y=1 <<EOI >>EOO
- print $x
- print $y
+ $* p.x=0 !p.y=0 p.x=1 !p.y=1 <<EOI >>EOO
+ print $p.x
+ print $p.y
EOI
1
1
@@ -120,16 +122,16 @@
: append
:
- $* x=0 x+=1 x+=2 <<EOI >>EOO
- print $x
+ $* p.x=0 p.x+=1 p.x+=2 <<EOI >>EOO
+ print $p.x
EOI
0 1 2
EOO
: prepend
:
- $* x=2 x=+1 x=+0 <<EOI >>EOO
- print $x
+ $* p.x=2 p.x=+1 p.x=+0 <<EOI >>EOO
+ print $p.x
EOI
0 1 2
EOO
diff --git a/tests/variable/private/buildfile b/tests/variable/private/buildfile
new file mode 100644
index 0000000..3b0d20c
--- /dev/null
+++ b/tests/variable/private/buildfile
@@ -0,0 +1,4 @@
+# file : tests/variable/private/buildfile
+# license : MIT; see accompanying LICENSE file
+
+./: testscript $b
diff --git a/tests/variable/private/testscript b/tests/variable/private/testscript
new file mode 100644
index 0000000..ddb78fd
--- /dev/null
+++ b/tests/variable/private/testscript
@@ -0,0 +1,46 @@
+# file : tests/variable/private/testscript
+# license : MIT; see accompanying LICENSE file
+
+# Test public/private variable mode.
+
+buildfile = true
+test.arguments = 'noop(../)'
+
+.include ../../common.testscript
+
++cat <<EOI >=build/bootstrap.build
+project = test
+amalgamation =
+subprojects = subproj
+
+using install
+EOI
++cat <<EOI >=buildfile
+[string] foo = abc
+print $type($foo) $foo
+
+subproj/: install = false
+print $type($(subproj/: install)) $(subproj/: install)
+
+include subproj/
+EOI
+
+: subproj
+:
+mkdir build;
+cat <<EOI >=build/bootstrap.build;
+project = subporj
+EOI
+cat <<EOI >=buildfile;
+[uint64] foo = 0123
+print $type($foo) $foo
+
+[bool] install = true
+print $type($install) $install
+EOI
+$* >>EOO
+string abc
+path false
+uint64 123
+bool true
+EOO
diff --git a/tests/variable/target-specific/testscript b/tests/variable/target-specific/testscript
index 627d8ab..c52712b 100644
--- a/tests/variable/target-specific/testscript
+++ b/tests/variable/target-specific/testscript
@@ -65,13 +65,15 @@ print (foo: bar)
print (foo : bar)
print (foo/: bar)
print (foo/file{fox}: bar)
+print (file{fox}@./: bar)
EOI
-foo:bar
-foo:bar
-foo:bar
-foo:bar
-foo/:bar
-foo/file{fox}:bar
+bar:foo
+bar:foo
+bar:foo
+bar:foo
+bar:foo/
+bar:foo/file{fox}
+bar:file{fox}@./
EOO
: eval-qual-name-expected
diff --git a/tests/variable/target-type-pattern-specific/testscript b/tests/variable/target-type-pattern-specific/testscript
index 016380b..9c600ca 100644
--- a/tests/variable/target-type-pattern-specific/testscript
+++ b/tests/variable/target-type-pattern-specific/testscript
@@ -12,6 +12,9 @@ x = x
y = y
dir{*}: x = X
dir{*}: y += Y
+
+./:
+
print $(./: x)
print $(./: y)
EOI
@@ -26,6 +29,7 @@ dir{*}: x = y
x = z
dir{*-foo}: x = $x # 'z'
+bar-foo/:
print $(bar-foo/: x)
x = G
@@ -59,6 +63,7 @@ print $(file{x-foz}: x)
*: x1 = X1
{*}: x2 = X2
target{*}: x3 = X3
+file{x}:
print $(file{x}: x1)
print $(file{x}: x2)
print $(file{x}: x3)
@@ -89,6 +94,9 @@ dir{*}:
y += Y
z = $x # Note: from scope.
}
+
+./:
+
print $(./: x)
print $(./: y)
print $(./: z)
@@ -108,6 +116,9 @@ file{f*} file{b*}:
x = X
y += Y
}
+
+file{foo bar}:
+
print $(file{foo}: x)
print $(file{bar}: y)
EOI
@@ -123,6 +134,8 @@ EOO
$* <<EOI >>EOO
file{~/'.+\.txt'/i}: x = 1
+ file{foo.txt foo.TXT}:
+
print $(file{foo.txt}: x)
print $(file{foo.TXT}: x)
EOI
@@ -140,6 +153,8 @@ EOO
txt{~/'.+\.tx'/e}: x = 2
txt{~/'.+\.txt'/e}: x = 3
+ txt{foo.x foo.tx foo.txt foo.bar...}:
+
print $(txt{foo.x}: x)
print $(txt{foo.tx}: x)
print $(txt{foo.txt}: x)
@@ -157,6 +172,8 @@ EOO
x = 0
file{~/'(.+)-\1'/}: x = 1
+ file{foo-foo foo-bar}:
+
print $(file{foo-foo}: x)
print $(file{foo-bar}: x)
EOI
@@ -169,6 +186,8 @@ EOO
$* <<EOI >>EOO
foo/dir{~/b.+/}: x = 1
+ foo/dir{bar}:
+
print $(foo/dir{bar}: x)
EOI
1